diff options
76 files changed, 1440 insertions, 767 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 0b6815504e6d..b660085dcc69 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2438,7 +2438,7 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2438 | topology informations if the hardware supports these. | 2438 | topology informations if the hardware supports these. |
2439 | The scheduler will make use of these informations and | 2439 | The scheduler will make use of these informations and |
2440 | e.g. base its process migration decisions on it. | 2440 | e.g. base its process migration decisions on it. |
2441 | Default is off. | 2441 | Default is on. |
2442 | 2442 | ||
2443 | tp720= [HW,PS2] | 2443 | tp720= [HW,PS2] |
2444 | 2444 | ||
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild new file mode 100644 index 000000000000..ae4b01060edd --- /dev/null +++ b/arch/s390/Kbuild | |||
@@ -0,0 +1,6 @@ | |||
1 | obj-y += kernel/ | ||
2 | obj-y += mm/ | ||
3 | obj-y += crypto/ | ||
4 | obj-y += appldata/ | ||
5 | obj-y += hypfs/ | ||
6 | obj-y += kvm/ | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 75976a141947..068e55d1bba8 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -60,6 +60,9 @@ config NO_IOMEM | |||
60 | config NO_DMA | 60 | config NO_DMA |
61 | def_bool y | 61 | def_bool y |
62 | 62 | ||
63 | config ARCH_DMA_ADDR_T_64BIT | ||
64 | def_bool 64BIT | ||
65 | |||
63 | config GENERIC_LOCKBREAK | 66 | config GENERIC_LOCKBREAK |
64 | bool | 67 | bool |
65 | default y | 68 | default y |
@@ -101,6 +104,7 @@ config S390 | |||
101 | select HAVE_KERNEL_BZIP2 | 104 | select HAVE_KERNEL_BZIP2 |
102 | select HAVE_KERNEL_LZMA | 105 | select HAVE_KERNEL_LZMA |
103 | select HAVE_KERNEL_LZO | 106 | select HAVE_KERNEL_LZO |
107 | select HAVE_GET_USER_PAGES_FAST | ||
104 | select ARCH_INLINE_SPIN_TRYLOCK | 108 | select ARCH_INLINE_SPIN_TRYLOCK |
105 | select ARCH_INLINE_SPIN_TRYLOCK_BH | 109 | select ARCH_INLINE_SPIN_TRYLOCK_BH |
106 | select ARCH_INLINE_SPIN_LOCK | 110 | select ARCH_INLINE_SPIN_LOCK |
@@ -286,6 +290,14 @@ config MARCH_Z10 | |||
286 | machines such as the z990, z890, z900, z800, z9-109, z9-ec | 290 | machines such as the z990, z890, z900, z800, z9-109, z9-ec |
287 | and z9-bc. | 291 | and z9-bc. |
288 | 292 | ||
293 | config MARCH_Z196 | ||
294 | bool "IBM zEnterprise 196" | ||
295 | help | ||
296 | Select this to enable optimizations for IBM zEnterprise 196. | ||
297 | The kernel will be slightly faster but will not work on older | ||
298 | machines such as the z990, z890, z900, z800, z9-109, z9-ec, | ||
299 | z9-bc, z10-ec and z10-bc. | ||
300 | |||
289 | endchoice | 301 | endchoice |
290 | 302 | ||
291 | config PACK_STACK | 303 | config PACK_STACK |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 0c9e6c6d2a64..d5b8a6ade525 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -40,6 +40,7 @@ cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) | |||
40 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) | 40 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) |
41 | cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) | 41 | cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) |
42 | cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10) | 42 | cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10) |
43 | cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196) | ||
43 | 44 | ||
44 | #KBUILD_IMAGE is necessary for make rpm | 45 | #KBUILD_IMAGE is necessary for make rpm |
45 | KBUILD_IMAGE :=arch/s390/boot/image | 46 | KBUILD_IMAGE :=arch/s390/boot/image |
@@ -94,8 +95,8 @@ head-y := arch/s390/kernel/head.o | |||
94 | head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o) | 95 | head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o) |
95 | head-y += arch/s390/kernel/init_task.o | 96 | head-y += arch/s390/kernel/init_task.o |
96 | 97 | ||
97 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ | 98 | # See arch/s390/Kbuild for content of core part of the kernel |
98 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ | 99 | core-y += arch/s390/ |
99 | 100 | ||
100 | libs-y += arch/s390/lib/ | 101 | libs-y += arch/s390/lib/ |
101 | drivers-y += drivers/s390/ | 102 | drivers-y += drivers/s390/ |
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index 0ef9829f2ad6..7ee9a1b4ad9f 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h | |||
@@ -297,7 +297,7 @@ static inline int crypt_s390_func_available(int func) | |||
297 | int ret; | 297 | int ret; |
298 | 298 | ||
299 | /* check if CPACF facility (bit 17) is available */ | 299 | /* check if CPACF facility (bit 17) is available */ |
300 | if (!(stfl() & 1ULL << (31 - 17))) | 300 | if (!test_facility(17)) |
301 | return 0; | 301 | return 0; |
302 | 302 | ||
303 | switch (func & CRYPT_S390_OP_MASK) { | 303 | switch (func & CRYPT_S390_OP_MASK) { |
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index f3ba0fa98de6..e8501115eca8 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h | |||
@@ -92,6 +92,16 @@ struct ccw_device { | |||
92 | }; | 92 | }; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * Possible events used by the path_event notifier. | ||
96 | */ | ||
97 | #define PE_NONE 0x0 | ||
98 | #define PE_PATH_GONE 0x1 /* A path is no longer available. */ | ||
99 | #define PE_PATH_AVAILABLE 0x2 /* A path has become available and | ||
100 | was successfully verified. */ | ||
101 | #define PE_PATHGROUP_ESTABLISHED 0x4 /* A pathgroup was reset and had | ||
102 | to be established again. */ | ||
103 | |||
104 | /* | ||
95 | * Possible CIO actions triggered by the unit check handler. | 105 | * Possible CIO actions triggered by the unit check handler. |
96 | */ | 106 | */ |
97 | enum uc_todo { | 107 | enum uc_todo { |
@@ -109,6 +119,7 @@ enum uc_todo { | |||
109 | * @set_online: called when setting device online | 119 | * @set_online: called when setting device online |
110 | * @set_offline: called when setting device offline | 120 | * @set_offline: called when setting device offline |
111 | * @notify: notify driver of device state changes | 121 | * @notify: notify driver of device state changes |
122 | * @path_event: notify driver of channel path events | ||
112 | * @shutdown: called at device shutdown | 123 | * @shutdown: called at device shutdown |
113 | * @prepare: prepare for pm state transition | 124 | * @prepare: prepare for pm state transition |
114 | * @complete: undo work done in @prepare | 125 | * @complete: undo work done in @prepare |
@@ -127,6 +138,7 @@ struct ccw_driver { | |||
127 | int (*set_online) (struct ccw_device *); | 138 | int (*set_online) (struct ccw_device *); |
128 | int (*set_offline) (struct ccw_device *); | 139 | int (*set_offline) (struct ccw_device *); |
129 | int (*notify) (struct ccw_device *, int); | 140 | int (*notify) (struct ccw_device *, int); |
141 | void (*path_event) (struct ccw_device *, int *); | ||
130 | void (*shutdown) (struct ccw_device *); | 142 | void (*shutdown) (struct ccw_device *); |
131 | int (*prepare) (struct ccw_device *); | 143 | int (*prepare) (struct ccw_device *); |
132 | void (*complete) (struct ccw_device *); | 144 | void (*complete) (struct ccw_device *); |
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h index 471234b90574..e0b69540216f 100644 --- a/arch/s390/include/asm/cpu.h +++ b/arch/s390/include/asm/cpu.h | |||
@@ -20,7 +20,7 @@ struct cpuid | |||
20 | unsigned int ident : 24; | 20 | unsigned int ident : 24; |
21 | unsigned int machine : 16; | 21 | unsigned int machine : 16; |
22 | unsigned int unused : 16; | 22 | unsigned int unused : 16; |
23 | } __packed; | 23 | } __attribute__ ((packed, aligned(8))); |
24 | 24 | ||
25 | #endif /* __ASSEMBLY__ */ | 25 | #endif /* __ASSEMBLY__ */ |
26 | #endif /* _ASM_S390_CPU_H */ | 26 | #endif /* _ASM_S390_CPU_H */ |
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index bb8343d157bc..b56403c2df28 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h | |||
@@ -37,32 +37,6 @@ static inline int prepare_hugepage_range(struct file *file, | |||
37 | int arch_prepare_hugepage(struct page *page); | 37 | int arch_prepare_hugepage(struct page *page); |
38 | void arch_release_hugepage(struct page *page); | 38 | void arch_release_hugepage(struct page *page); |
39 | 39 | ||
40 | static inline pte_t pte_mkhuge(pte_t pte) | ||
41 | { | ||
42 | /* | ||
43 | * PROT_NONE needs to be remapped from the pte type to the ste type. | ||
44 | * The HW invalid bit is also different for pte and ste. The pte | ||
45 | * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE | ||
46 | * bit, so we don't have to clear it. | ||
47 | */ | ||
48 | if (pte_val(pte) & _PAGE_INVALID) { | ||
49 | if (pte_val(pte) & _PAGE_SWT) | ||
50 | pte_val(pte) |= _HPAGE_TYPE_NONE; | ||
51 | pte_val(pte) |= _SEGMENT_ENTRY_INV; | ||
52 | } | ||
53 | /* | ||
54 | * Clear SW pte bits SWT and SWX, there are no SW bits in a segment | ||
55 | * table entry. | ||
56 | */ | ||
57 | pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX); | ||
58 | /* | ||
59 | * Also set the change-override bit because we don't need dirty bit | ||
60 | * tracking for hugetlbfs pages. | ||
61 | */ | ||
62 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); | ||
63 | return pte; | ||
64 | } | ||
65 | |||
66 | static inline pte_t huge_pte_wrprotect(pte_t pte) | 40 | static inline pte_t huge_pte_wrprotect(pte_t pte) |
67 | { | 41 | { |
68 | pte_val(pte) |= _PAGE_RO; | 42 | pte_val(pte) |= _PAGE_RO; |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 0f97ef2d92ac..65e172f8209d 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -150,9 +150,10 @@ struct _lowcore { | |||
150 | */ | 150 | */ |
151 | __u32 ipib; /* 0x0e00 */ | 151 | __u32 ipib; /* 0x0e00 */ |
152 | __u32 ipib_checksum; /* 0x0e04 */ | 152 | __u32 ipib_checksum; /* 0x0e04 */ |
153 | __u8 pad_0x0e08[0x0f00-0x0e08]; /* 0x0e08 */ | ||
153 | 154 | ||
154 | /* Align to the top 1k of prefix area */ | 155 | /* Extended facility list */ |
155 | __u8 pad_0x0e08[0x1000-0x0e08]; /* 0x0e08 */ | 156 | __u64 stfle_fac_list[32]; /* 0x0f00 */ |
156 | } __packed; | 157 | } __packed; |
157 | 158 | ||
158 | #else /* CONFIG_32BIT */ | 159 | #else /* CONFIG_32BIT */ |
@@ -285,7 +286,11 @@ struct _lowcore { | |||
285 | */ | 286 | */ |
286 | __u64 ipib; /* 0x0e00 */ | 287 | __u64 ipib; /* 0x0e00 */ |
287 | __u32 ipib_checksum; /* 0x0e08 */ | 288 | __u32 ipib_checksum; /* 0x0e08 */ |
288 | __u8 pad_0x0e0c[0x11b8-0x0e0c]; /* 0x0e0c */ | 289 | __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */ |
290 | |||
291 | /* Extended facility list */ | ||
292 | __u64 stfle_fac_list[32]; /* 0x0f00 */ | ||
293 | __u8 pad_0x1000[0x11b8-0x1000]; /* 0x1000 */ | ||
289 | 294 | ||
290 | /* 64 bit extparam used for pfault/diag 250: defined by architecture */ | 295 | /* 64 bit extparam used for pfault/diag 250: defined by architecture */ |
291 | __u64 ext_params2; /* 0x11B8 */ | 296 | __u64 ext_params2; /* 0x11B8 */ |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index af650fb47206..a8729ea7e9ac 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -108,9 +108,13 @@ typedef pte_t *pgtable_t; | |||
108 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 108 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
109 | 109 | ||
110 | static inline void | 110 | static inline void |
111 | page_set_storage_key(unsigned long addr, unsigned int skey) | 111 | page_set_storage_key(unsigned long addr, unsigned int skey, int mapped) |
112 | { | 112 | { |
113 | asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); | 113 | if (!mapped) |
114 | asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" | ||
115 | : : "d" (skey), "a" (addr)); | ||
116 | else | ||
117 | asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); | ||
114 | } | 118 | } |
115 | 119 | ||
116 | static inline unsigned int | 120 | static inline unsigned int |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 68940d0bad91..082eb4e50e8b 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -21,9 +21,11 @@ | |||
21 | 21 | ||
22 | unsigned long *crst_table_alloc(struct mm_struct *, int); | 22 | unsigned long *crst_table_alloc(struct mm_struct *, int); |
23 | void crst_table_free(struct mm_struct *, unsigned long *); | 23 | void crst_table_free(struct mm_struct *, unsigned long *); |
24 | void crst_table_free_rcu(struct mm_struct *, unsigned long *); | ||
24 | 25 | ||
25 | unsigned long *page_table_alloc(struct mm_struct *); | 26 | unsigned long *page_table_alloc(struct mm_struct *); |
26 | void page_table_free(struct mm_struct *, unsigned long *); | 27 | void page_table_free(struct mm_struct *, unsigned long *); |
28 | void page_table_free_rcu(struct mm_struct *, unsigned long *); | ||
27 | void disable_noexec(struct mm_struct *, struct task_struct *); | 29 | void disable_noexec(struct mm_struct *, struct task_struct *); |
28 | 30 | ||
29 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) | 31 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) |
@@ -176,4 +178,6 @@ static inline void pmd_populate(struct mm_struct *mm, | |||
176 | #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) | 178 | #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) |
177 | #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) | 179 | #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) |
178 | 180 | ||
181 | extern void rcu_table_freelist_finish(void); | ||
182 | |||
179 | #endif /* _S390_PGALLOC_H */ | 183 | #endif /* _S390_PGALLOC_H */ |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 3157441ee1da..986dc9476c21 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -38,6 +38,7 @@ | |||
38 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); | 38 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
39 | extern void paging_init(void); | 39 | extern void paging_init(void); |
40 | extern void vmem_map_init(void); | 40 | extern void vmem_map_init(void); |
41 | extern void fault_init(void); | ||
41 | 42 | ||
42 | /* | 43 | /* |
43 | * The S390 doesn't have any external MMU info: the kernel page | 44 | * The S390 doesn't have any external MMU info: the kernel page |
@@ -46,11 +47,27 @@ extern void vmem_map_init(void); | |||
46 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | 47 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
47 | 48 | ||
48 | /* | 49 | /* |
49 | * ZERO_PAGE is a global shared page that is always zero: used | 50 | * ZERO_PAGE is a global shared page that is always zero; used |
50 | * for zero-mapped memory areas etc.. | 51 | * for zero-mapped memory areas etc.. |
51 | */ | 52 | */ |
52 | extern char empty_zero_page[PAGE_SIZE]; | 53 | |
53 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 54 | extern unsigned long empty_zero_page; |
55 | extern unsigned long zero_page_mask; | ||
56 | |||
57 | #define ZERO_PAGE(vaddr) \ | ||
58 | (virt_to_page((void *)(empty_zero_page + \ | ||
59 | (((unsigned long)(vaddr)) &zero_page_mask)))) | ||
60 | |||
61 | #define is_zero_pfn is_zero_pfn | ||
62 | static inline int is_zero_pfn(unsigned long pfn) | ||
63 | { | ||
64 | extern unsigned long zero_pfn; | ||
65 | unsigned long offset_from_zero_pfn = pfn - zero_pfn; | ||
66 | return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); | ||
67 | } | ||
68 | |||
69 | #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) | ||
70 | |||
54 | #endif /* !__ASSEMBLY__ */ | 71 | #endif /* !__ASSEMBLY__ */ |
55 | 72 | ||
56 | /* | 73 | /* |
@@ -300,6 +317,7 @@ extern unsigned long VMALLOC_START; | |||
300 | 317 | ||
301 | /* Bits in the segment table entry */ | 318 | /* Bits in the segment table entry */ |
302 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ | 319 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ |
320 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ | ||
303 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | 321 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ |
304 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ | 322 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ |
305 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ | 323 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ |
@@ -572,7 +590,7 @@ static inline void rcp_unlock(pte_t *ptep) | |||
572 | } | 590 | } |
573 | 591 | ||
574 | /* forward declaration for SetPageUptodate in page-flags.h*/ | 592 | /* forward declaration for SetPageUptodate in page-flags.h*/ |
575 | static inline void page_clear_dirty(struct page *page); | 593 | static inline void page_clear_dirty(struct page *page, int mapped); |
576 | #include <linux/page-flags.h> | 594 | #include <linux/page-flags.h> |
577 | 595 | ||
578 | static inline void ptep_rcp_copy(pte_t *ptep) | 596 | static inline void ptep_rcp_copy(pte_t *ptep) |
@@ -754,6 +772,34 @@ static inline pte_t pte_mkspecial(pte_t pte) | |||
754 | return pte; | 772 | return pte; |
755 | } | 773 | } |
756 | 774 | ||
775 | #ifdef CONFIG_HUGETLB_PAGE | ||
776 | static inline pte_t pte_mkhuge(pte_t pte) | ||
777 | { | ||
778 | /* | ||
779 | * PROT_NONE needs to be remapped from the pte type to the ste type. | ||
780 | * The HW invalid bit is also different for pte and ste. The pte | ||
781 | * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE | ||
782 | * bit, so we don't have to clear it. | ||
783 | */ | ||
784 | if (pte_val(pte) & _PAGE_INVALID) { | ||
785 | if (pte_val(pte) & _PAGE_SWT) | ||
786 | pte_val(pte) |= _HPAGE_TYPE_NONE; | ||
787 | pte_val(pte) |= _SEGMENT_ENTRY_INV; | ||
788 | } | ||
789 | /* | ||
790 | * Clear SW pte bits SWT and SWX, there are no SW bits in a segment | ||
791 | * table entry. | ||
792 | */ | ||
793 | pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX); | ||
794 | /* | ||
795 | * Also set the change-override bit because we don't need dirty bit | ||
796 | * tracking for hugetlbfs pages. | ||
797 | */ | ||
798 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); | ||
799 | return pte; | ||
800 | } | ||
801 | #endif | ||
802 | |||
757 | #ifdef CONFIG_PGSTE | 803 | #ifdef CONFIG_PGSTE |
758 | /* | 804 | /* |
759 | * Get (and clear) the user dirty bit for a PTE. | 805 | * Get (and clear) the user dirty bit for a PTE. |
@@ -782,7 +828,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, | |||
782 | } | 828 | } |
783 | dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); | 829 | dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); |
784 | if (skey & _PAGE_CHANGED) | 830 | if (skey & _PAGE_CHANGED) |
785 | page_clear_dirty(page); | 831 | page_clear_dirty(page, 1); |
786 | rcp_unlock(ptep); | 832 | rcp_unlock(ptep); |
787 | return dirty; | 833 | return dirty; |
788 | } | 834 | } |
@@ -957,9 +1003,9 @@ static inline int page_test_dirty(struct page *page) | |||
957 | } | 1003 | } |
958 | 1004 | ||
959 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY | 1005 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY |
960 | static inline void page_clear_dirty(struct page *page) | 1006 | static inline void page_clear_dirty(struct page *page, int mapped) |
961 | { | 1007 | { |
962 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); | 1008 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped); |
963 | } | 1009 | } |
964 | 1010 | ||
965 | /* | 1011 | /* |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 73e259834e10..8d6f87169577 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -82,8 +82,6 @@ struct thread_struct { | |||
82 | unsigned long prot_addr; /* address of protection-excep. */ | 82 | unsigned long prot_addr; /* address of protection-excep. */ |
83 | unsigned int trap_no; | 83 | unsigned int trap_no; |
84 | per_struct per_info; | 84 | per_struct per_info; |
85 | /* Used to give failing instruction back to user for ieee exceptions */ | ||
86 | unsigned long ieee_instruction_pointer; | ||
87 | /* pfault_wait is used to block the process on a pfault event */ | 85 | /* pfault_wait is used to block the process on a pfault event */ |
88 | unsigned long pfault_wait; | 86 | unsigned long pfault_wait; |
89 | }; | 87 | }; |
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index e2c218dc68a6..d9d42b1e46fa 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
@@ -481,8 +481,7 @@ struct user_regs_struct | |||
481 | * watchpoints. This is the way intel does it. | 481 | * watchpoints. This is the way intel does it. |
482 | */ | 482 | */ |
483 | per_struct per_info; | 483 | per_struct per_info; |
484 | unsigned long ieee_instruction_pointer; | 484 | unsigned long ieee_instruction_pointer; /* obsolete, always 0 */ |
485 | /* Used to give failing instruction back to user for ieee exceptions */ | ||
486 | }; | 485 | }; |
487 | 486 | ||
488 | #ifdef __KERNEL__ | 487 | #ifdef __KERNEL__ |
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h index 2afc060266a2..1a9307e70842 100644 --- a/arch/s390/include/asm/s390_ext.h +++ b/arch/s390/include/asm/s390_ext.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | 14 | ||
15 | typedef void (*ext_int_handler_t)(__u16 code); | 15 | typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long); |
16 | 16 | ||
17 | typedef struct ext_int_info_t { | 17 | typedef struct ext_int_info_t { |
18 | struct ext_int_info_t *next; | 18 | struct ext_int_info_t *next; |
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h index 35d786fe93ae..6d45ef6c12a7 100644 --- a/arch/s390/include/asm/scatterlist.h +++ b/arch/s390/include/asm/scatterlist.h | |||
@@ -1 +1,3 @@ | |||
1 | #include <asm-generic/scatterlist.h> | 1 | #include <asm-generic/scatterlist.h> |
2 | |||
3 | #define ARCH_HAS_SG_CHAIN | ||
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 25e831d58e1e..d5e2ef10537d 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -73,6 +73,7 @@ extern unsigned int user_mode; | |||
73 | #define MACHINE_FLAG_PFMF (1UL << 11) | 73 | #define MACHINE_FLAG_PFMF (1UL << 11) |
74 | #define MACHINE_FLAG_LPAR (1UL << 12) | 74 | #define MACHINE_FLAG_LPAR (1UL << 12) |
75 | #define MACHINE_FLAG_SPP (1UL << 13) | 75 | #define MACHINE_FLAG_SPP (1UL << 13) |
76 | #define MACHINE_FLAG_TOPOLOGY (1UL << 14) | ||
76 | 77 | ||
77 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) | 78 | #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) |
78 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) | 79 | #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) |
@@ -90,6 +91,7 @@ extern unsigned int user_mode; | |||
90 | #define MACHINE_HAS_HPAGE (0) | 91 | #define MACHINE_HAS_HPAGE (0) |
91 | #define MACHINE_HAS_PFMF (0) | 92 | #define MACHINE_HAS_PFMF (0) |
92 | #define MACHINE_HAS_SPP (0) | 93 | #define MACHINE_HAS_SPP (0) |
94 | #define MACHINE_HAS_TOPOLOGY (0) | ||
93 | #else /* __s390x__ */ | 95 | #else /* __s390x__ */ |
94 | #define MACHINE_HAS_IEEE (1) | 96 | #define MACHINE_HAS_IEEE (1) |
95 | #define MACHINE_HAS_CSP (1) | 97 | #define MACHINE_HAS_CSP (1) |
@@ -100,6 +102,7 @@ extern unsigned int user_mode; | |||
100 | #define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE) | 102 | #define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE) |
101 | #define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) | 103 | #define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) |
102 | #define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) | 104 | #define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) |
105 | #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) | ||
103 | #endif /* __s390x__ */ | 106 | #endif /* __s390x__ */ |
104 | 107 | ||
105 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) | 108 | #define ZFCPDUMP_HSA_SIZE (32UL<<20) |
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 8429686951f9..5c0246b955d8 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h | |||
@@ -65,8 +65,6 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
65 | if (test_tsk_thread_flag(task, TIF_31BIT)) | 65 | if (test_tsk_thread_flag(task, TIF_31BIT)) |
66 | mask = 0xffffffff; | 66 | mask = 0xffffffff; |
67 | #endif | 67 | #endif |
68 | if (i + n == 6) | ||
69 | args[--n] = regs->args[0] & mask; | ||
70 | while (n-- > 0) | 68 | while (n-- > 0) |
71 | if (i + n > 0) | 69 | if (i + n > 0) |
72 | args[n] = regs->gprs[2 + i + n] & mask; | 70 | args[n] = regs->gprs[2 + i + n] & mask; |
@@ -80,8 +78,6 @@ static inline void syscall_set_arguments(struct task_struct *task, | |||
80 | const unsigned long *args) | 78 | const unsigned long *args) |
81 | { | 79 | { |
82 | BUG_ON(i + n > 6); | 80 | BUG_ON(i + n > 6); |
83 | if (i + n == 6) | ||
84 | regs->args[0] = args[--n]; | ||
85 | while (n-- > 0) | 81 | while (n-- > 0) |
86 | if (i + n > 0) | 82 | if (i + n > 0) |
87 | regs->gprs[2 + i + n] = args[n]; | 83 | regs->gprs[2 + i + n] = args[n]; |
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index 22bdb2a0ee5f..79d3d6e2e9c5 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h | |||
@@ -14,8 +14,13 @@ | |||
14 | #ifndef __ASM_S390_SYSINFO_H | 14 | #ifndef __ASM_S390_SYSINFO_H |
15 | #define __ASM_S390_SYSINFO_H | 15 | #define __ASM_S390_SYSINFO_H |
16 | 16 | ||
17 | #include <asm/bitsperlong.h> | ||
18 | |||
17 | struct sysinfo_1_1_1 { | 19 | struct sysinfo_1_1_1 { |
18 | char reserved_0[32]; | 20 | unsigned short :16; |
21 | unsigned char ccr; | ||
22 | unsigned char cai; | ||
23 | char reserved_0[28]; | ||
19 | char manufacturer[16]; | 24 | char manufacturer[16]; |
20 | char type[4]; | 25 | char type[4]; |
21 | char reserved_1[12]; | 26 | char reserved_1[12]; |
@@ -104,6 +109,39 @@ struct sysinfo_3_2_2 { | |||
104 | char reserved_544[3552]; | 109 | char reserved_544[3552]; |
105 | }; | 110 | }; |
106 | 111 | ||
112 | #define TOPOLOGY_CPU_BITS 64 | ||
113 | #define TOPOLOGY_NR_MAG 6 | ||
114 | |||
115 | struct topology_cpu { | ||
116 | unsigned char reserved0[4]; | ||
117 | unsigned char :6; | ||
118 | unsigned char pp:2; | ||
119 | unsigned char reserved1; | ||
120 | unsigned short origin; | ||
121 | unsigned long mask[TOPOLOGY_CPU_BITS / BITS_PER_LONG]; | ||
122 | }; | ||
123 | |||
124 | struct topology_container { | ||
125 | unsigned char reserved[7]; | ||
126 | unsigned char id; | ||
127 | }; | ||
128 | |||
129 | union topology_entry { | ||
130 | unsigned char nl; | ||
131 | struct topology_cpu cpu; | ||
132 | struct topology_container container; | ||
133 | }; | ||
134 | |||
135 | struct sysinfo_15_1_x { | ||
136 | unsigned char reserved0[2]; | ||
137 | unsigned short length; | ||
138 | unsigned char mag[TOPOLOGY_NR_MAG]; | ||
139 | unsigned char reserved1; | ||
140 | unsigned char mnest; | ||
141 | unsigned char reserved2[4]; | ||
142 | union topology_entry tle[0]; | ||
143 | }; | ||
144 | |||
107 | static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) | 145 | static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) |
108 | { | 146 | { |
109 | register int r0 asm("0") = (fc << 28) | sel1; | 147 | register int r0 asm("0") = (fc << 28) | sel1; |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 1f2ebc4afd82..3ad16dbf622e 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -85,14 +85,16 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
85 | asm volatile("lam 0,15,%0" : : "Q" (*acrs)); | 85 | asm volatile("lam 0,15,%0" : : "Q" (*acrs)); |
86 | } | 86 | } |
87 | 87 | ||
88 | #define switch_to(prev,next,last) do { \ | 88 | #define switch_to(prev,next,last) do { \ |
89 | if (prev == next) \ | 89 | if (prev->mm) { \ |
90 | break; \ | 90 | save_fp_regs(&prev->thread.fp_regs); \ |
91 | save_fp_regs(&prev->thread.fp_regs); \ | 91 | save_access_regs(&prev->thread.acrs[0]); \ |
92 | restore_fp_regs(&next->thread.fp_regs); \ | 92 | } \ |
93 | save_access_regs(&prev->thread.acrs[0]); \ | 93 | if (next->mm) { \ |
94 | restore_access_regs(&next->thread.acrs[0]); \ | 94 | restore_fp_regs(&next->thread.fp_regs); \ |
95 | prev = __switch_to(prev,next); \ | 95 | restore_access_regs(&next->thread.acrs[0]); \ |
96 | } \ | ||
97 | prev = __switch_to(prev,next); \ | ||
96 | } while (0) | 98 | } while (0) |
97 | 99 | ||
98 | extern void account_vtime(struct task_struct *, struct task_struct *); | 100 | extern void account_vtime(struct task_struct *, struct task_struct *); |
@@ -418,30 +420,21 @@ extern void smp_ctl_clear_bit(int cr, int bit); | |||
418 | 420 | ||
419 | #endif /* CONFIG_SMP */ | 421 | #endif /* CONFIG_SMP */ |
420 | 422 | ||
421 | static inline unsigned int stfl(void) | 423 | #define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */ |
422 | { | ||
423 | asm volatile( | ||
424 | " .insn s,0xb2b10000,0(0)\n" /* stfl */ | ||
425 | "0:\n" | ||
426 | EX_TABLE(0b,0b)); | ||
427 | return S390_lowcore.stfl_fac_list; | ||
428 | } | ||
429 | 424 | ||
430 | static inline int __stfle(unsigned long long *list, int doublewords) | 425 | /* |
426 | * The test_facility function uses the bit odering where the MSB is bit 0. | ||
427 | * That makes it easier to query facility bits with the bit number as | ||
428 | * documented in the Principles of Operation. | ||
429 | */ | ||
430 | static inline int test_facility(unsigned long nr) | ||
431 | { | 431 | { |
432 | typedef struct { unsigned long long _[doublewords]; } addrtype; | 432 | unsigned char *ptr; |
433 | register unsigned long __nr asm("0") = doublewords - 1; | ||
434 | |||
435 | asm volatile(".insn s,0xb2b00000,%0" /* stfle */ | ||
436 | : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc"); | ||
437 | return __nr + 1; | ||
438 | } | ||
439 | 433 | ||
440 | static inline int stfle(unsigned long long *list, int doublewords) | 434 | if (nr >= MAX_FACILITY_BIT) |
441 | { | 435 | return 0; |
442 | if (!(stfl() & (1UL << 24))) | 436 | ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3); |
443 | return -EOPNOTSUPP; | 437 | return (*ptr & (0x80 >> (nr & 7))) != 0; |
444 | return __stfle(list, doublewords); | ||
445 | } | 438 | } |
446 | 439 | ||
447 | static inline unsigned short stap(void) | 440 | static inline unsigned short stap(void) |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index fd1c00d08bf5..f1f644f2240a 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -64,10 +64,9 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb, | |||
64 | if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS)) | 64 | if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS)) |
65 | __tlb_flush_mm(tlb->mm); | 65 | __tlb_flush_mm(tlb->mm); |
66 | while (tlb->nr_ptes > 0) | 66 | while (tlb->nr_ptes > 0) |
67 | pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]); | 67 | page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); |
68 | while (tlb->nr_pxds < TLB_NR_PTRS) | 68 | while (tlb->nr_pxds < TLB_NR_PTRS) |
69 | /* pgd_free frees the pointer as region or segment table */ | 69 | crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); |
70 | pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]); | ||
71 | } | 70 | } |
72 | 71 | ||
73 | static inline void tlb_finish_mmu(struct mmu_gather *tlb, | 72 | static inline void tlb_finish_mmu(struct mmu_gather *tlb, |
@@ -75,6 +74,8 @@ static inline void tlb_finish_mmu(struct mmu_gather *tlb, | |||
75 | { | 74 | { |
76 | tlb_flush_mmu(tlb, start, end); | 75 | tlb_flush_mmu(tlb, start, end); |
77 | 76 | ||
77 | rcu_table_freelist_finish(); | ||
78 | |||
78 | /* keep the page table cache within bounds */ | 79 | /* keep the page table cache within bounds */ |
79 | check_pgt_cache(); | 80 | check_pgt_cache(); |
80 | 81 | ||
@@ -103,7 +104,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
103 | if (tlb->nr_ptes >= tlb->nr_pxds) | 104 | if (tlb->nr_ptes >= tlb->nr_pxds) |
104 | tlb_flush_mmu(tlb, 0, 0); | 105 | tlb_flush_mmu(tlb, 0, 0); |
105 | } else | 106 | } else |
106 | pte_free(tlb->mm, pte); | 107 | page_table_free(tlb->mm, (unsigned long *) pte); |
107 | } | 108 | } |
108 | 109 | ||
109 | /* | 110 | /* |
@@ -124,7 +125,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | |||
124 | if (tlb->nr_ptes >= tlb->nr_pxds) | 125 | if (tlb->nr_ptes >= tlb->nr_pxds) |
125 | tlb_flush_mmu(tlb, 0, 0); | 126 | tlb_flush_mmu(tlb, 0, 0); |
126 | } else | 127 | } else |
127 | pmd_free(tlb->mm, pmd); | 128 | crst_table_free(tlb->mm, (unsigned long *) pmd); |
128 | #endif | 129 | #endif |
129 | } | 130 | } |
130 | 131 | ||
@@ -146,7 +147,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, | |||
146 | if (tlb->nr_ptes >= tlb->nr_pxds) | 147 | if (tlb->nr_ptes >= tlb->nr_pxds) |
147 | tlb_flush_mmu(tlb, 0, 0); | 148 | tlb_flush_mmu(tlb, 0, 0); |
148 | } else | 149 | } else |
149 | pud_free(tlb->mm, pud); | 150 | crst_table_free(tlb->mm, (unsigned long *) pud); |
150 | #endif | 151 | #endif |
151 | } | 152 | } |
152 | 153 | ||
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 051107a2c5e2..c5338834ddbd 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_S390_TOPOLOGY_H | 2 | #define _ASM_S390_TOPOLOGY_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | #include <asm/sysinfo.h> | ||
5 | 6 | ||
6 | extern unsigned char cpu_core_id[NR_CPUS]; | 7 | extern unsigned char cpu_core_id[NR_CPUS]; |
7 | extern cpumask_t cpu_core_map[NR_CPUS]; | 8 | extern cpumask_t cpu_core_map[NR_CPUS]; |
@@ -32,6 +33,7 @@ static inline const struct cpumask *cpu_book_mask(unsigned int cpu) | |||
32 | 33 | ||
33 | int topology_set_cpu_management(int fc); | 34 | int topology_set_cpu_management(int fc); |
34 | void topology_schedule_update(void); | 35 | void topology_schedule_update(void); |
36 | void store_topology(struct sysinfo_15_1_x *info); | ||
35 | 37 | ||
36 | #define POLARIZATION_UNKNWN (-1) | 38 | #define POLARIZATION_UNKNWN (-1) |
37 | #define POLARIZATION_HRZ (0) | 39 | #define POLARIZATION_HRZ (0) |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 5232278d79ad..f3c1b823c9a8 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -84,6 +84,7 @@ int main(void) | |||
84 | DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); | 84 | DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); |
85 | DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); | 85 | DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); |
86 | DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); | 86 | DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); |
87 | DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); | ||
87 | DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); | 88 | DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); |
88 | DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); | 89 | DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); |
89 | DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); | 90 | DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); |
@@ -142,10 +143,8 @@ int main(void) | |||
142 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); | 143 | DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); |
143 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); | 144 | DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); |
144 | #ifdef CONFIG_32BIT | 145 | #ifdef CONFIG_32BIT |
145 | DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params)); | ||
146 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); | 146 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); |
147 | #else /* CONFIG_32BIT */ | 147 | #else /* CONFIG_32BIT */ |
148 | DEFINE(__LC_PFAULT_INTPARM, offsetof(struct _lowcore, ext_params2)); | ||
149 | DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); | 148 | DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); |
150 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); | 149 | DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); |
151 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); | 150 | DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); |
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h index 123dd660d7fb..3141025724f4 100644 --- a/arch/s390/kernel/compat_ptrace.h +++ b/arch/s390/kernel/compat_ptrace.h | |||
@@ -51,8 +51,7 @@ struct user_regs_struct32 | |||
51 | * watchpoints. This is the way intel does it. | 51 | * watchpoints. This is the way intel does it. |
52 | */ | 52 | */ |
53 | per_struct32 per_info; | 53 | per_struct32 per_info; |
54 | u32 ieee_instruction_pointer; | 54 | u32 ieee_instruction_pointer; /* obsolete, always 0 */ |
55 | /* Used to give failing instruction back to user for ieee exceptions */ | ||
56 | }; | 55 | }; |
57 | 56 | ||
58 | struct user32 { | 57 | struct user32 { |
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index b39b27d68b45..c83726c9fe03 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -113,7 +113,7 @@ enum { | |||
113 | INSTR_INVALID, | 113 | INSTR_INVALID, |
114 | INSTR_E, | 114 | INSTR_E, |
115 | INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU, | 115 | INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU, |
116 | INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, | 116 | INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0, |
117 | INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP, | 117 | INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP, |
118 | INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU, | 118 | INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU, |
119 | INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, | 119 | INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, |
@@ -122,13 +122,14 @@ enum { | |||
122 | INSTR_RRE_RR, INSTR_RRE_RR_OPT, | 122 | INSTR_RRE_RR, INSTR_RRE_RR_OPT, |
123 | INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR, | 123 | INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR, |
124 | INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR, | 124 | INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR, |
125 | INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, | 125 | INSTR_RRF_R0RR2, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, |
126 | INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU, | 126 | INSTR_RRF_U0RR, INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU, |
127 | INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, | 127 | INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, |
128 | INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, | 128 | INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, |
129 | INSTR_RSI_RRP, | 129 | INSTR_RSI_RRP, |
130 | INSTR_RSL_R0RD, | 130 | INSTR_RSL_R0RD, |
131 | INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, | 131 | INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, |
132 | INSTR_RSY_RDRM, | ||
132 | INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, | 133 | INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, |
133 | INSTR_RS_RURD, | 134 | INSTR_RS_RURD, |
134 | INSTR_RXE_FRRD, INSTR_RXE_RRRD, | 135 | INSTR_RXE_FRRD, INSTR_RXE_RRRD, |
@@ -139,7 +140,7 @@ enum { | |||
139 | INSTR_SIY_IRD, INSTR_SIY_URD, | 140 | INSTR_SIY_IRD, INSTR_SIY_URD, |
140 | INSTR_SI_URD, | 141 | INSTR_SI_URD, |
141 | INSTR_SSE_RDRD, | 142 | INSTR_SSE_RDRD, |
142 | INSTR_SSF_RRDRD, | 143 | INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2, |
143 | INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, | 144 | INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, |
144 | INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, | 145 | INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, |
145 | INSTR_S_00, INSTR_S_RD, | 146 | INSTR_S_00, INSTR_S_RD, |
@@ -152,7 +153,7 @@ struct operand { | |||
152 | }; | 153 | }; |
153 | 154 | ||
154 | struct insn { | 155 | struct insn { |
155 | const char name[6]; | 156 | const char name[5]; |
156 | unsigned char opfrag; | 157 | unsigned char opfrag; |
157 | unsigned char format; | 158 | unsigned char format; |
158 | }; | 159 | }; |
@@ -217,6 +218,7 @@ static const unsigned char formats[][7] = { | |||
217 | [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, | 218 | [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, |
218 | [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 }, | 219 | [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 }, |
219 | [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 }, | 220 | [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 }, |
221 | [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 }, | ||
220 | [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, | 222 | [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, |
221 | [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, | 223 | [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, |
222 | [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, | 224 | [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, |
@@ -248,6 +250,7 @@ static const unsigned char formats[][7] = { | |||
248 | [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 }, | 250 | [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 }, |
249 | [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, | 251 | [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, |
250 | [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, | 252 | [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, |
253 | [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 }, | ||
251 | [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 }, | 254 | [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 }, |
252 | [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, | 255 | [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, |
253 | [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, | 256 | [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, |
@@ -269,6 +272,7 @@ static const unsigned char formats[][7] = { | |||
269 | [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, | 272 | [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, |
270 | [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, | 273 | [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, |
271 | [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, | 274 | [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, |
275 | [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, | ||
272 | [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, | 276 | [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, |
273 | [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, | 277 | [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, |
274 | [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, | 278 | [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, |
@@ -290,6 +294,7 @@ static const unsigned char formats[][7] = { | |||
290 | [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, | 294 | [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, |
291 | [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, | 295 | [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, |
292 | [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, | 296 | [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, |
297 | [INSTR_SSF_RRDRD2]= { 0x00, R_8,D_20,B_16,D_36,B_32,0 }, | ||
293 | [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, | 298 | [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, |
294 | [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, | 299 | [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, |
295 | [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, | 300 | [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, |
@@ -300,6 +305,36 @@ static const unsigned char formats[][7] = { | |||
300 | [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, | 305 | [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, |
301 | }; | 306 | }; |
302 | 307 | ||
308 | enum { | ||
309 | LONG_INSN_ALGHSIK, | ||
310 | LONG_INSN_ALHSIK, | ||
311 | LONG_INSN_CLFHSI, | ||
312 | LONG_INSN_CLGFRL, | ||
313 | LONG_INSN_CLGHRL, | ||
314 | LONG_INSN_CLGHSI, | ||
315 | LONG_INSN_CLHHSI, | ||
316 | LONG_INSN_LLGFRL, | ||
317 | LONG_INSN_LLGHRL, | ||
318 | LONG_INSN_POPCNT, | ||
319 | LONG_INSN_RISBHG, | ||
320 | LONG_INSN_RISBLG, | ||
321 | }; | ||
322 | |||
323 | static char *long_insn_name[] = { | ||
324 | [LONG_INSN_ALGHSIK] = "alghsik", | ||
325 | [LONG_INSN_ALHSIK] = "alhsik", | ||
326 | [LONG_INSN_CLFHSI] = "clfhsi", | ||
327 | [LONG_INSN_CLGFRL] = "clgfrl", | ||
328 | [LONG_INSN_CLGHRL] = "clghrl", | ||
329 | [LONG_INSN_CLGHSI] = "clghsi", | ||
330 | [LONG_INSN_CLHHSI] = "clhhsi", | ||
331 | [LONG_INSN_LLGFRL] = "llgfrl", | ||
332 | [LONG_INSN_LLGHRL] = "llghrl", | ||
333 | [LONG_INSN_POPCNT] = "popcnt", | ||
334 | [LONG_INSN_RISBHG] = "risbhg", | ||
335 | [LONG_INSN_RISBLG] = "risblk", | ||
336 | }; | ||
337 | |||
303 | static struct insn opcode[] = { | 338 | static struct insn opcode[] = { |
304 | #ifdef CONFIG_64BIT | 339 | #ifdef CONFIG_64BIT |
305 | { "lmd", 0xef, INSTR_SS_RRRDRD3 }, | 340 | { "lmd", 0xef, INSTR_SS_RRRDRD3 }, |
@@ -881,6 +916,35 @@ static struct insn opcode_b9[] = { | |||
881 | { "pfmf", 0xaf, INSTR_RRE_RR }, | 916 | { "pfmf", 0xaf, INSTR_RRE_RR }, |
882 | { "trte", 0xbf, INSTR_RRF_M0RR }, | 917 | { "trte", 0xbf, INSTR_RRF_M0RR }, |
883 | { "trtre", 0xbd, INSTR_RRF_M0RR }, | 918 | { "trtre", 0xbd, INSTR_RRF_M0RR }, |
919 | { "ahhhr", 0xc8, INSTR_RRF_R0RR2 }, | ||
920 | { "shhhr", 0xc9, INSTR_RRF_R0RR2 }, | ||
921 | { "alhhh", 0xca, INSTR_RRF_R0RR2 }, | ||
922 | { "alhhl", 0xca, INSTR_RRF_R0RR2 }, | ||
923 | { "slhhh", 0xcb, INSTR_RRF_R0RR2 }, | ||
924 | { "chhr ", 0xcd, INSTR_RRE_RR }, | ||
925 | { "clhhr", 0xcf, INSTR_RRE_RR }, | ||
926 | { "ahhlr", 0xd8, INSTR_RRF_R0RR2 }, | ||
927 | { "shhlr", 0xd9, INSTR_RRF_R0RR2 }, | ||
928 | { "slhhl", 0xdb, INSTR_RRF_R0RR2 }, | ||
929 | { "chlr", 0xdd, INSTR_RRE_RR }, | ||
930 | { "clhlr", 0xdf, INSTR_RRE_RR }, | ||
931 | { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR }, | ||
932 | { "locgr", 0xe2, INSTR_RRF_M0RR }, | ||
933 | { "ngrk", 0xe4, INSTR_RRF_R0RR2 }, | ||
934 | { "ogrk", 0xe6, INSTR_RRF_R0RR2 }, | ||
935 | { "xgrk", 0xe7, INSTR_RRF_R0RR2 }, | ||
936 | { "agrk", 0xe8, INSTR_RRF_R0RR2 }, | ||
937 | { "sgrk", 0xe9, INSTR_RRF_R0RR2 }, | ||
938 | { "algrk", 0xea, INSTR_RRF_R0RR2 }, | ||
939 | { "slgrk", 0xeb, INSTR_RRF_R0RR2 }, | ||
940 | { "locr", 0xf2, INSTR_RRF_M0RR }, | ||
941 | { "nrk", 0xf4, INSTR_RRF_R0RR2 }, | ||
942 | { "ork", 0xf6, INSTR_RRF_R0RR2 }, | ||
943 | { "xrk", 0xf7, INSTR_RRF_R0RR2 }, | ||
944 | { "ark", 0xf8, INSTR_RRF_R0RR2 }, | ||
945 | { "srk", 0xf9, INSTR_RRF_R0RR2 }, | ||
946 | { "alrk", 0xfa, INSTR_RRF_R0RR2 }, | ||
947 | { "slrk", 0xfb, INSTR_RRF_R0RR2 }, | ||
884 | #endif | 948 | #endif |
885 | { "kmac", 0x1e, INSTR_RRE_RR }, | 949 | { "kmac", 0x1e, INSTR_RRE_RR }, |
886 | { "lrvr", 0x1f, INSTR_RRE_RR }, | 950 | { "lrvr", 0x1f, INSTR_RRE_RR }, |
@@ -949,9 +1013,9 @@ static struct insn opcode_c4[] = { | |||
949 | { "lgfrl", 0x0c, INSTR_RIL_RP }, | 1013 | { "lgfrl", 0x0c, INSTR_RIL_RP }, |
950 | { "lhrl", 0x05, INSTR_RIL_RP }, | 1014 | { "lhrl", 0x05, INSTR_RIL_RP }, |
951 | { "lghrl", 0x04, INSTR_RIL_RP }, | 1015 | { "lghrl", 0x04, INSTR_RIL_RP }, |
952 | { "llgfrl", 0x0e, INSTR_RIL_RP }, | 1016 | { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, |
953 | { "llhrl", 0x02, INSTR_RIL_RP }, | 1017 | { "llhrl", 0x02, INSTR_RIL_RP }, |
954 | { "llghrl", 0x06, INSTR_RIL_RP }, | 1018 | { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP }, |
955 | { "strl", 0x0f, INSTR_RIL_RP }, | 1019 | { "strl", 0x0f, INSTR_RIL_RP }, |
956 | { "stgrl", 0x0b, INSTR_RIL_RP }, | 1020 | { "stgrl", 0x0b, INSTR_RIL_RP }, |
957 | { "sthrl", 0x07, INSTR_RIL_RP }, | 1021 | { "sthrl", 0x07, INSTR_RIL_RP }, |
@@ -968,9 +1032,9 @@ static struct insn opcode_c6[] = { | |||
968 | { "cghrl", 0x04, INSTR_RIL_RP }, | 1032 | { "cghrl", 0x04, INSTR_RIL_RP }, |
969 | { "clrl", 0x0f, INSTR_RIL_RP }, | 1033 | { "clrl", 0x0f, INSTR_RIL_RP }, |
970 | { "clgrl", 0x0a, INSTR_RIL_RP }, | 1034 | { "clgrl", 0x0a, INSTR_RIL_RP }, |
971 | { "clgfrl", 0x0e, INSTR_RIL_RP }, | 1035 | { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, |
972 | { "clhrl", 0x07, INSTR_RIL_RP }, | 1036 | { "clhrl", 0x07, INSTR_RIL_RP }, |
973 | { "clghrl", 0x06, INSTR_RIL_RP }, | 1037 | { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP }, |
974 | { "pfdrl", 0x02, INSTR_RIL_UP }, | 1038 | { "pfdrl", 0x02, INSTR_RIL_UP }, |
975 | { "exrl", 0x00, INSTR_RIL_RP }, | 1039 | { "exrl", 0x00, INSTR_RIL_RP }, |
976 | #endif | 1040 | #endif |
@@ -982,6 +1046,20 @@ static struct insn opcode_c8[] = { | |||
982 | { "mvcos", 0x00, INSTR_SSF_RRDRD }, | 1046 | { "mvcos", 0x00, INSTR_SSF_RRDRD }, |
983 | { "ectg", 0x01, INSTR_SSF_RRDRD }, | 1047 | { "ectg", 0x01, INSTR_SSF_RRDRD }, |
984 | { "csst", 0x02, INSTR_SSF_RRDRD }, | 1048 | { "csst", 0x02, INSTR_SSF_RRDRD }, |
1049 | { "lpd", 0x04, INSTR_SSF_RRDRD2 }, | ||
1050 | { "lpdg ", 0x05, INSTR_SSF_RRDRD2 }, | ||
1051 | #endif | ||
1052 | { "", 0, INSTR_INVALID } | ||
1053 | }; | ||
1054 | |||
1055 | static struct insn opcode_cc[] = { | ||
1056 | #ifdef CONFIG_64BIT | ||
1057 | { "brcth", 0x06, INSTR_RIL_RP }, | ||
1058 | { "aih", 0x08, INSTR_RIL_RI }, | ||
1059 | { "alsih", 0x0a, INSTR_RIL_RI }, | ||
1060 | { "alsih", 0x0b, INSTR_RIL_RI }, | ||
1061 | { "cih", 0x0d, INSTR_RIL_RI }, | ||
1062 | { "clih ", 0x0f, INSTR_RIL_RI }, | ||
985 | #endif | 1063 | #endif |
986 | { "", 0, INSTR_INVALID } | 1064 | { "", 0, INSTR_INVALID } |
987 | }; | 1065 | }; |
@@ -1063,6 +1141,16 @@ static struct insn opcode_e3[] = { | |||
1063 | { "mfy", 0x5c, INSTR_RXY_RRRD }, | 1141 | { "mfy", 0x5c, INSTR_RXY_RRRD }, |
1064 | { "mhy", 0x7c, INSTR_RXY_RRRD }, | 1142 | { "mhy", 0x7c, INSTR_RXY_RRRD }, |
1065 | { "pfd", 0x36, INSTR_RXY_URRD }, | 1143 | { "pfd", 0x36, INSTR_RXY_URRD }, |
1144 | { "lbh", 0xc0, INSTR_RXY_RRRD }, | ||
1145 | { "llch", 0xc2, INSTR_RXY_RRRD }, | ||
1146 | { "stch", 0xc3, INSTR_RXY_RRRD }, | ||
1147 | { "lhh", 0xc4, INSTR_RXY_RRRD }, | ||
1148 | { "llhh", 0xc6, INSTR_RXY_RRRD }, | ||
1149 | { "sthh", 0xc7, INSTR_RXY_RRRD }, | ||
1150 | { "lfh", 0xca, INSTR_RXY_RRRD }, | ||
1151 | { "stfh", 0xcb, INSTR_RXY_RRRD }, | ||
1152 | { "chf", 0xcd, INSTR_RXY_RRRD }, | ||
1153 | { "clhf", 0xcf, INSTR_RXY_RRRD }, | ||
1066 | #endif | 1154 | #endif |
1067 | { "lrv", 0x1e, INSTR_RXY_RRRD }, | 1155 | { "lrv", 0x1e, INSTR_RXY_RRRD }, |
1068 | { "lrvh", 0x1f, INSTR_RXY_RRRD }, | 1156 | { "lrvh", 0x1f, INSTR_RXY_RRRD }, |
@@ -1080,9 +1168,9 @@ static struct insn opcode_e5[] = { | |||
1080 | { "chhsi", 0x54, INSTR_SIL_RDI }, | 1168 | { "chhsi", 0x54, INSTR_SIL_RDI }, |
1081 | { "chsi", 0x5c, INSTR_SIL_RDI }, | 1169 | { "chsi", 0x5c, INSTR_SIL_RDI }, |
1082 | { "cghsi", 0x58, INSTR_SIL_RDI }, | 1170 | { "cghsi", 0x58, INSTR_SIL_RDI }, |
1083 | { "clhhsi", 0x55, INSTR_SIL_RDU }, | 1171 | { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU }, |
1084 | { "clfhsi", 0x5d, INSTR_SIL_RDU }, | 1172 | { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, |
1085 | { "clghsi", 0x59, INSTR_SIL_RDU }, | 1173 | { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU }, |
1086 | { "mvhhi", 0x44, INSTR_SIL_RDI }, | 1174 | { "mvhhi", 0x44, INSTR_SIL_RDI }, |
1087 | { "mvhi", 0x4c, INSTR_SIL_RDI }, | 1175 | { "mvhi", 0x4c, INSTR_SIL_RDI }, |
1088 | { "mvghi", 0x48, INSTR_SIL_RDI }, | 1176 | { "mvghi", 0x48, INSTR_SIL_RDI }, |
@@ -1137,6 +1225,24 @@ static struct insn opcode_eb[] = { | |||
1137 | { "alsi", 0x6e, INSTR_SIY_IRD }, | 1225 | { "alsi", 0x6e, INSTR_SIY_IRD }, |
1138 | { "algsi", 0x7e, INSTR_SIY_IRD }, | 1226 | { "algsi", 0x7e, INSTR_SIY_IRD }, |
1139 | { "ecag", 0x4c, INSTR_RSY_RRRD }, | 1227 | { "ecag", 0x4c, INSTR_RSY_RRRD }, |
1228 | { "srak", 0xdc, INSTR_RSY_RRRD }, | ||
1229 | { "slak", 0xdd, INSTR_RSY_RRRD }, | ||
1230 | { "srlk", 0xde, INSTR_RSY_RRRD }, | ||
1231 | { "sllk", 0xdf, INSTR_RSY_RRRD }, | ||
1232 | { "locg", 0xe2, INSTR_RSY_RDRM }, | ||
1233 | { "stocg", 0xe3, INSTR_RSY_RDRM }, | ||
1234 | { "lang", 0xe4, INSTR_RSY_RRRD }, | ||
1235 | { "laog", 0xe6, INSTR_RSY_RRRD }, | ||
1236 | { "laxg", 0xe7, INSTR_RSY_RRRD }, | ||
1237 | { "laag", 0xe8, INSTR_RSY_RRRD }, | ||
1238 | { "laalg", 0xea, INSTR_RSY_RRRD }, | ||
1239 | { "loc", 0xf2, INSTR_RSY_RDRM }, | ||
1240 | { "stoc", 0xf3, INSTR_RSY_RDRM }, | ||
1241 | { "lan", 0xf4, INSTR_RSY_RRRD }, | ||
1242 | { "lao", 0xf6, INSTR_RSY_RRRD }, | ||
1243 | { "lax", 0xf7, INSTR_RSY_RRRD }, | ||
1244 | { "laa", 0xf8, INSTR_RSY_RRRD }, | ||
1245 | { "laal", 0xfa, INSTR_RSY_RRRD }, | ||
1140 | #endif | 1246 | #endif |
1141 | { "rll", 0x1d, INSTR_RSY_RRRD }, | 1247 | { "rll", 0x1d, INSTR_RSY_RRRD }, |
1142 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, | 1248 | { "mvclu", 0x8e, INSTR_RSY_RRRD }, |
@@ -1172,6 +1278,12 @@ static struct insn opcode_ec[] = { | |||
1172 | { "rxsbg", 0x57, INSTR_RIE_RRUUU }, | 1278 | { "rxsbg", 0x57, INSTR_RIE_RRUUU }, |
1173 | { "rosbg", 0x56, INSTR_RIE_RRUUU }, | 1279 | { "rosbg", 0x56, INSTR_RIE_RRUUU }, |
1174 | { "risbg", 0x55, INSTR_RIE_RRUUU }, | 1280 | { "risbg", 0x55, INSTR_RIE_RRUUU }, |
1281 | { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, | ||
1282 | { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU }, | ||
1283 | { "ahik", 0xd8, INSTR_RIE_RRI0 }, | ||
1284 | { "aghik", 0xd9, INSTR_RIE_RRI0 }, | ||
1285 | { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 }, | ||
1286 | { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 }, | ||
1175 | #endif | 1287 | #endif |
1176 | { "", 0, INSTR_INVALID } | 1288 | { "", 0, INSTR_INVALID } |
1177 | }; | 1289 | }; |
@@ -1321,6 +1433,9 @@ static struct insn *find_insn(unsigned char *code) | |||
1321 | case 0xc8: | 1433 | case 0xc8: |
1322 | table = opcode_c8; | 1434 | table = opcode_c8; |
1323 | break; | 1435 | break; |
1436 | case 0xcc: | ||
1437 | table = opcode_cc; | ||
1438 | break; | ||
1324 | case 0xe3: | 1439 | case 0xe3: |
1325 | table = opcode_e3; | 1440 | table = opcode_e3; |
1326 | opfrag = code[5]; | 1441 | opfrag = code[5]; |
@@ -1367,7 +1482,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) | |||
1367 | ptr = buffer; | 1482 | ptr = buffer; |
1368 | insn = find_insn(code); | 1483 | insn = find_insn(code); |
1369 | if (insn) { | 1484 | if (insn) { |
1370 | ptr += sprintf(ptr, "%.5s\t", insn->name); | 1485 | if (insn->name[0] == '\0') |
1486 | ptr += sprintf(ptr, "%s\t", | ||
1487 | long_insn_name[(int) insn->name[1]]); | ||
1488 | else | ||
1489 | ptr += sprintf(ptr, "%.5s\t", insn->name); | ||
1371 | /* Extract the operands. */ | 1490 | /* Extract the operands. */ |
1372 | separator = 0; | 1491 | separator = 0; |
1373 | for (ops = formats[insn->format] + 1, i = 0; | 1492 | for (ops = formats[insn->format] + 1, i = 0; |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index c00856ad4e5a..d149609e46e6 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -208,7 +208,8 @@ static noinline __init void init_kernel_storage_key(void) | |||
208 | end_pfn = PFN_UP(__pa(&_end)); | 208 | end_pfn = PFN_UP(__pa(&_end)); |
209 | 209 | ||
210 | for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) | 210 | for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) |
211 | page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); | 211 | page_set_storage_key(init_pfn << PAGE_SHIFT, |
212 | PAGE_DEFAULT_KEY, 0); | ||
212 | } | 213 | } |
213 | 214 | ||
214 | static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE); | 215 | static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE); |
@@ -255,13 +256,35 @@ static noinline __init void setup_lowcore_early(void) | |||
255 | s390_base_pgm_handler_fn = early_pgm_check_handler; | 256 | s390_base_pgm_handler_fn = early_pgm_check_handler; |
256 | } | 257 | } |
257 | 258 | ||
259 | static noinline __init void setup_facility_list(void) | ||
260 | { | ||
261 | unsigned long nr; | ||
262 | |||
263 | S390_lowcore.stfl_fac_list = 0; | ||
264 | asm volatile( | ||
265 | " .insn s,0xb2b10000,0(0)\n" /* stfl */ | ||
266 | "0:\n" | ||
267 | EX_TABLE(0b,0b) : "=m" (S390_lowcore.stfl_fac_list)); | ||
268 | memcpy(&S390_lowcore.stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); | ||
269 | nr = 4; /* # bytes stored by stfl */ | ||
270 | if (test_facility(7)) { | ||
271 | /* More facility bits available with stfle */ | ||
272 | register unsigned long reg0 asm("0") = MAX_FACILITY_BIT/64 - 1; | ||
273 | asm volatile(".insn s,0xb2b00000,%0" /* stfle */ | ||
274 | : "=m" (S390_lowcore.stfle_fac_list), "+d" (reg0) | ||
275 | : : "cc"); | ||
276 | nr = (reg0 + 1) * 8; /* # bytes stored by stfle */ | ||
277 | } | ||
278 | memset((char *) S390_lowcore.stfle_fac_list + nr, 0, | ||
279 | MAX_FACILITY_BIT/8 - nr); | ||
280 | } | ||
281 | |||
258 | static noinline __init void setup_hpage(void) | 282 | static noinline __init void setup_hpage(void) |
259 | { | 283 | { |
260 | #ifndef CONFIG_DEBUG_PAGEALLOC | 284 | #ifndef CONFIG_DEBUG_PAGEALLOC |
261 | unsigned int facilities; | 285 | unsigned int facilities; |
262 | 286 | ||
263 | facilities = stfl(); | 287 | if (!test_facility(2) || !test_facility(8)) |
264 | if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29))) | ||
265 | return; | 288 | return; |
266 | S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE; | 289 | S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE; |
267 | __ctl_set_bit(0, 23); | 290 | __ctl_set_bit(0, 23); |
@@ -355,18 +378,15 @@ static __init void detect_diag44(void) | |||
355 | static __init void detect_machine_facilities(void) | 378 | static __init void detect_machine_facilities(void) |
356 | { | 379 | { |
357 | #ifdef CONFIG_64BIT | 380 | #ifdef CONFIG_64BIT |
358 | unsigned int facilities; | 381 | if (test_facility(3)) |
359 | unsigned long long facility_bits; | ||
360 | |||
361 | facilities = stfl(); | ||
362 | if (facilities & (1 << 28)) | ||
363 | S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; | 382 | S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; |
364 | if (facilities & (1 << 23)) | 383 | if (test_facility(8)) |
365 | S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; | 384 | S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; |
366 | if (facilities & (1 << 4)) | 385 | if (test_facility(11)) |
386 | S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY; | ||
387 | if (test_facility(27)) | ||
367 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; | 388 | S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; |
368 | if ((stfle(&facility_bits, 1) > 0) && | 389 | if (test_facility(40)) |
369 | (facility_bits & (1ULL << (63 - 40)))) | ||
370 | S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; | 390 | S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; |
371 | #endif | 391 | #endif |
372 | } | 392 | } |
@@ -447,6 +467,7 @@ void __init startup_init(void) | |||
447 | lockdep_off(); | 467 | lockdep_off(); |
448 | sort_main_extable(); | 468 | sort_main_extable(); |
449 | setup_lowcore_early(); | 469 | setup_lowcore_early(); |
470 | setup_facility_list(); | ||
450 | detect_machine_type(); | 471 | detect_machine_type(); |
451 | ipl_update_parameters(); | 472 | ipl_update_parameters(); |
452 | setup_boot_command_line(); | 473 | setup_boot_command_line(); |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index bea9ee37ac9d..5efce7202984 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -72,25 +72,9 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
72 | l %r1,BASED(.Ltrace_irq_off_caller) | 72 | l %r1,BASED(.Ltrace_irq_off_caller) |
73 | basr %r14,%r1 | 73 | basr %r14,%r1 |
74 | .endm | 74 | .endm |
75 | |||
76 | .macro TRACE_IRQS_CHECK_ON | ||
77 | tm SP_PSW(%r15),0x03 # irqs enabled? | ||
78 | bz BASED(0f) | ||
79 | TRACE_IRQS_ON | ||
80 | 0: | ||
81 | .endm | ||
82 | |||
83 | .macro TRACE_IRQS_CHECK_OFF | ||
84 | tm SP_PSW(%r15),0x03 # irqs enabled? | ||
85 | bz BASED(0f) | ||
86 | TRACE_IRQS_OFF | ||
87 | 0: | ||
88 | .endm | ||
89 | #else | 75 | #else |
90 | #define TRACE_IRQS_ON | 76 | #define TRACE_IRQS_ON |
91 | #define TRACE_IRQS_OFF | 77 | #define TRACE_IRQS_OFF |
92 | #define TRACE_IRQS_CHECK_ON | ||
93 | #define TRACE_IRQS_CHECK_OFF | ||
94 | #endif | 78 | #endif |
95 | 79 | ||
96 | #ifdef CONFIG_LOCKDEP | 80 | #ifdef CONFIG_LOCKDEP |
@@ -198,6 +182,12 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
198 | lpsw \psworg # back to caller | 182 | lpsw \psworg # back to caller |
199 | .endm | 183 | .endm |
200 | 184 | ||
185 | .macro REENABLE_IRQS | ||
186 | mvc __SF_EMPTY(1,%r15),SP_PSW(%r15) | ||
187 | ni __SF_EMPTY(%r15),0xbf | ||
188 | ssm __SF_EMPTY(%r15) | ||
189 | .endm | ||
190 | |||
201 | /* | 191 | /* |
202 | * Scheduler resume function, called by switch_to | 192 | * Scheduler resume function, called by switch_to |
203 | * gpr2 = (task_struct *) prev | 193 | * gpr2 = (task_struct *) prev |
@@ -264,12 +254,11 @@ sysc_do_svc: | |||
264 | bnl BASED(sysc_nr_ok) | 254 | bnl BASED(sysc_nr_ok) |
265 | lr %r7,%r1 # copy svc number to %r7 | 255 | lr %r7,%r1 # copy svc number to %r7 |
266 | sysc_nr_ok: | 256 | sysc_nr_ok: |
267 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | ||
268 | sysc_do_restart: | ||
269 | sth %r7,SP_SVCNR(%r15) | 257 | sth %r7,SP_SVCNR(%r15) |
270 | sll %r7,2 # svc number *4 | 258 | sll %r7,2 # svc number *4 |
271 | l %r8,BASED(.Lsysc_table) | 259 | l %r8,BASED(.Lsysc_table) |
272 | tm __TI_flags+2(%r9),_TIF_SYSCALL | 260 | tm __TI_flags+2(%r9),_TIF_SYSCALL |
261 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | ||
273 | l %r8,0(%r7,%r8) # get system call addr. | 262 | l %r8,0(%r7,%r8) # get system call addr. |
274 | bnz BASED(sysc_tracesys) | 263 | bnz BASED(sysc_tracesys) |
275 | basr %r14,%r8 # call sys_xxxx | 264 | basr %r14,%r8 # call sys_xxxx |
@@ -357,7 +346,7 @@ sysc_restart: | |||
357 | l %r7,SP_R2(%r15) # load new svc number | 346 | l %r7,SP_R2(%r15) # load new svc number |
358 | mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument | 347 | mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument |
359 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | 348 | lm %r2,%r6,SP_R2(%r15) # load svc arguments |
360 | b BASED(sysc_do_restart) # restart svc | 349 | b BASED(sysc_nr_ok) # restart svc |
361 | 350 | ||
362 | # | 351 | # |
363 | # _TIF_SINGLE_STEP is set, call do_single_step | 352 | # _TIF_SINGLE_STEP is set, call do_single_step |
@@ -390,6 +379,7 @@ sysc_tracesys: | |||
390 | l %r8,0(%r7,%r8) | 379 | l %r8,0(%r7,%r8) |
391 | sysc_tracego: | 380 | sysc_tracego: |
392 | lm %r3,%r6,SP_R3(%r15) | 381 | lm %r3,%r6,SP_R3(%r15) |
382 | mvc SP_ARGS(4,%r15),SP_R7(%r15) | ||
393 | l %r2,SP_ORIG_R2(%r15) | 383 | l %r2,SP_ORIG_R2(%r15) |
394 | basr %r14,%r8 # call sys_xxx | 384 | basr %r14,%r8 # call sys_xxx |
395 | st %r2,SP_R2(%r15) # store return value | 385 | st %r2,SP_R2(%r15) # store return value |
@@ -440,13 +430,11 @@ kernel_execve: | |||
440 | br %r14 | 430 | br %r14 |
441 | # execve succeeded. | 431 | # execve succeeded. |
442 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts | 432 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts |
443 | TRACE_IRQS_OFF | ||
444 | l %r15,__LC_KERNEL_STACK # load ksp | 433 | l %r15,__LC_KERNEL_STACK # load ksp |
445 | s %r15,BASED(.Lc_spsize) # make room for registers & psw | 434 | s %r15,BASED(.Lc_spsize) # make room for registers & psw |
446 | l %r9,__LC_THREAD_INFO | 435 | l %r9,__LC_THREAD_INFO |
447 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs | 436 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs |
448 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 437 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
449 | TRACE_IRQS_ON | ||
450 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 438 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
451 | l %r1,BASED(.Lexecve_tail) | 439 | l %r1,BASED(.Lexecve_tail) |
452 | basr %r14,%r1 | 440 | basr %r14,%r1 |
@@ -483,9 +471,10 @@ pgm_check_handler: | |||
483 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 471 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
484 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 472 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
485 | pgm_no_vtime: | 473 | pgm_no_vtime: |
486 | TRACE_IRQS_CHECK_OFF | ||
487 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 474 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
488 | l %r3,__LC_PGM_ILC # load program interruption code | 475 | l %r3,__LC_PGM_ILC # load program interruption code |
476 | l %r4,__LC_TRANS_EXC_CODE | ||
477 | REENABLE_IRQS | ||
489 | la %r8,0x7f | 478 | la %r8,0x7f |
490 | nr %r8,%r3 | 479 | nr %r8,%r3 |
491 | pgm_do_call: | 480 | pgm_do_call: |
@@ -495,7 +484,6 @@ pgm_do_call: | |||
495 | la %r2,SP_PTREGS(%r15) # address of register-save area | 484 | la %r2,SP_PTREGS(%r15) # address of register-save area |
496 | basr %r14,%r7 # branch to interrupt-handler | 485 | basr %r14,%r7 # branch to interrupt-handler |
497 | pgm_exit: | 486 | pgm_exit: |
498 | TRACE_IRQS_CHECK_ON | ||
499 | b BASED(sysc_return) | 487 | b BASED(sysc_return) |
500 | 488 | ||
501 | # | 489 | # |
@@ -523,7 +511,6 @@ pgm_per_std: | |||
523 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 511 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
524 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 512 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
525 | pgm_no_vtime2: | 513 | pgm_no_vtime2: |
526 | TRACE_IRQS_CHECK_OFF | ||
527 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 514 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
528 | l %r1,__TI_task(%r9) | 515 | l %r1,__TI_task(%r9) |
529 | tm SP_PSW+1(%r15),0x01 # kernel per event ? | 516 | tm SP_PSW+1(%r15),0x01 # kernel per event ? |
@@ -533,6 +520,8 @@ pgm_no_vtime2: | |||
533 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | 520 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID |
534 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 521 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
535 | l %r3,__LC_PGM_ILC # load program interruption code | 522 | l %r3,__LC_PGM_ILC # load program interruption code |
523 | l %r4,__LC_TRANS_EXC_CODE | ||
524 | REENABLE_IRQS | ||
536 | la %r8,0x7f | 525 | la %r8,0x7f |
537 | nr %r8,%r3 # clear per-event-bit and ilc | 526 | nr %r8,%r3 # clear per-event-bit and ilc |
538 | be BASED(pgm_exit2) # only per or per+check ? | 527 | be BASED(pgm_exit2) # only per or per+check ? |
@@ -542,8 +531,6 @@ pgm_no_vtime2: | |||
542 | la %r2,SP_PTREGS(%r15) # address of register-save area | 531 | la %r2,SP_PTREGS(%r15) # address of register-save area |
543 | basr %r14,%r7 # branch to interrupt-handler | 532 | basr %r14,%r7 # branch to interrupt-handler |
544 | pgm_exit2: | 533 | pgm_exit2: |
545 | TRACE_IRQS_ON | ||
546 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
547 | b BASED(sysc_return) | 534 | b BASED(sysc_return) |
548 | 535 | ||
549 | # | 536 | # |
@@ -557,13 +544,11 @@ pgm_svcper: | |||
557 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 544 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
558 | lh %r7,0x8a # get svc number from lowcore | 545 | lh %r7,0x8a # get svc number from lowcore |
559 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 546 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
560 | TRACE_IRQS_OFF | ||
561 | l %r8,__TI_task(%r9) | 547 | l %r8,__TI_task(%r9) |
562 | mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID | 548 | mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID |
563 | mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS | 549 | mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS |
564 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID | 550 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID |
565 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 551 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
566 | TRACE_IRQS_ON | ||
567 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 552 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
568 | lm %r2,%r6,SP_R2(%r15) # load svc arguments | 553 | lm %r2,%r6,SP_R2(%r15) # load svc arguments |
569 | b BASED(sysc_do_svc) | 554 | b BASED(sysc_do_svc) |
@@ -737,7 +722,8 @@ ext_no_vtime: | |||
737 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 722 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
738 | TRACE_IRQS_OFF | 723 | TRACE_IRQS_OFF |
739 | la %r2,SP_PTREGS(%r15) # address of register-save area | 724 | la %r2,SP_PTREGS(%r15) # address of register-save area |
740 | lh %r3,__LC_EXT_INT_CODE # get interruption code | 725 | l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code |
726 | l %r4,__LC_EXT_PARAMS # get external parameters | ||
741 | l %r1,BASED(.Ldo_extint) | 727 | l %r1,BASED(.Ldo_extint) |
742 | basr %r14,%r1 | 728 | basr %r14,%r1 |
743 | b BASED(io_return) | 729 | b BASED(io_return) |
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index ff579b6bde06..95c1dfc4ef31 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/signal.h> | 5 | #include <linux/signal.h> |
6 | #include <asm/ptrace.h> | 6 | #include <asm/ptrace.h> |
7 | 7 | ||
8 | typedef void pgm_check_handler_t(struct pt_regs *, long); | 8 | typedef void pgm_check_handler_t(struct pt_regs *, long, unsigned long); |
9 | extern pgm_check_handler_t *pgm_check_table[128]; | 9 | extern pgm_check_handler_t *pgm_check_table[128]; |
10 | pgm_check_handler_t do_protection_exception; | 10 | pgm_check_handler_t do_protection_exception; |
11 | pgm_check_handler_t do_dat_exception; | 11 | pgm_check_handler_t do_dat_exception; |
@@ -19,7 +19,7 @@ void do_signal(struct pt_regs *regs); | |||
19 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, | 19 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, |
20 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); | 20 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); |
21 | 21 | ||
22 | void do_extint(struct pt_regs *regs, unsigned short code); | 22 | void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); |
23 | int __cpuinit start_secondary(void *cpuvoid); | 23 | int __cpuinit start_secondary(void *cpuvoid); |
24 | void __init startup_init(void); | 24 | void __init startup_init(void); |
25 | void die(const char * str, struct pt_regs * regs, long err); | 25 | void die(const char * str, struct pt_regs * regs, long err); |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 8bccec15ea90..a2be23922f43 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -79,25 +79,9 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | |||
79 | basr %r2,%r0 | 79 | basr %r2,%r0 |
80 | brasl %r14,trace_hardirqs_off_caller | 80 | brasl %r14,trace_hardirqs_off_caller |
81 | .endm | 81 | .endm |
82 | |||
83 | .macro TRACE_IRQS_CHECK_ON | ||
84 | tm SP_PSW(%r15),0x03 # irqs enabled? | ||
85 | jz 0f | ||
86 | TRACE_IRQS_ON | ||
87 | 0: | ||
88 | .endm | ||
89 | |||
90 | .macro TRACE_IRQS_CHECK_OFF | ||
91 | tm SP_PSW(%r15),0x03 # irqs enabled? | ||
92 | jz 0f | ||
93 | TRACE_IRQS_OFF | ||
94 | 0: | ||
95 | .endm | ||
96 | #else | 82 | #else |
97 | #define TRACE_IRQS_ON | 83 | #define TRACE_IRQS_ON |
98 | #define TRACE_IRQS_OFF | 84 | #define TRACE_IRQS_OFF |
99 | #define TRACE_IRQS_CHECK_ON | ||
100 | #define TRACE_IRQS_CHECK_OFF | ||
101 | #endif | 85 | #endif |
102 | 86 | ||
103 | #ifdef CONFIG_LOCKDEP | 87 | #ifdef CONFIG_LOCKDEP |
@@ -207,6 +191,12 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ | |||
207 | 0: | 191 | 0: |
208 | .endm | 192 | .endm |
209 | 193 | ||
194 | .macro REENABLE_IRQS | ||
195 | mvc __SF_EMPTY(1,%r15),SP_PSW(%r15) | ||
196 | ni __SF_EMPTY(%r15),0xbf | ||
197 | ssm __SF_EMPTY(%r15) | ||
198 | .endm | ||
199 | |||
210 | /* | 200 | /* |
211 | * Scheduler resume function, called by switch_to | 201 | * Scheduler resume function, called by switch_to |
212 | * gpr2 = (task_struct *) prev | 202 | * gpr2 = (task_struct *) prev |
@@ -256,7 +246,6 @@ sysc_saveall: | |||
256 | CREATE_STACK_FRAME __LC_SAVE_AREA | 246 | CREATE_STACK_FRAME __LC_SAVE_AREA |
257 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | 247 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW |
258 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | 248 | mvc SP_ILC(4,%r15),__LC_SVC_ILC |
259 | stg %r7,SP_ARGS(%r15) | ||
260 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct | 249 | lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct |
261 | sysc_vtime: | 250 | sysc_vtime: |
262 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 251 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
@@ -284,6 +273,7 @@ sysc_nr_ok: | |||
284 | sysc_noemu: | 273 | sysc_noemu: |
285 | #endif | 274 | #endif |
286 | tm __TI_flags+6(%r12),_TIF_SYSCALL | 275 | tm __TI_flags+6(%r12),_TIF_SYSCALL |
276 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | ||
287 | lgf %r8,0(%r7,%r10) # load address of system call routine | 277 | lgf %r8,0(%r7,%r10) # load address of system call routine |
288 | jnz sysc_tracesys | 278 | jnz sysc_tracesys |
289 | basr %r14,%r8 # call sys_xxxx | 279 | basr %r14,%r8 # call sys_xxxx |
@@ -397,6 +387,7 @@ sysc_tracesys: | |||
397 | lgf %r8,0(%r7,%r10) | 387 | lgf %r8,0(%r7,%r10) |
398 | sysc_tracego: | 388 | sysc_tracego: |
399 | lmg %r3,%r6,SP_R3(%r15) | 389 | lmg %r3,%r6,SP_R3(%r15) |
390 | mvc SP_ARGS(8,%r15),SP_R7(%r15) | ||
400 | lg %r2,SP_ORIG_R2(%r15) | 391 | lg %r2,SP_ORIG_R2(%r15) |
401 | basr %r14,%r8 # call sys_xxx | 392 | basr %r14,%r8 # call sys_xxx |
402 | stg %r2,SP_R2(%r15) # store return value | 393 | stg %r2,SP_R2(%r15) # store return value |
@@ -443,14 +434,12 @@ kernel_execve: | |||
443 | br %r14 | 434 | br %r14 |
444 | # execve succeeded. | 435 | # execve succeeded. |
445 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts | 436 | 0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts |
446 | # TRACE_IRQS_OFF | ||
447 | lg %r15,__LC_KERNEL_STACK # load ksp | 437 | lg %r15,__LC_KERNEL_STACK # load ksp |
448 | aghi %r15,-SP_SIZE # make room for registers & psw | 438 | aghi %r15,-SP_SIZE # make room for registers & psw |
449 | lg %r13,__LC_SVC_NEW_PSW+8 | 439 | lg %r13,__LC_SVC_NEW_PSW+8 |
450 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs | 440 | mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs |
451 | lg %r12,__LC_THREAD_INFO | 441 | lg %r12,__LC_THREAD_INFO |
452 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 442 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
453 | # TRACE_IRQS_ON | ||
454 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 443 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
455 | brasl %r14,execve_tail | 444 | brasl %r14,execve_tail |
456 | j sysc_return | 445 | j sysc_return |
@@ -490,19 +479,18 @@ pgm_check_handler: | |||
490 | LAST_BREAK | 479 | LAST_BREAK |
491 | pgm_no_vtime: | 480 | pgm_no_vtime: |
492 | HANDLE_SIE_INTERCEPT | 481 | HANDLE_SIE_INTERCEPT |
493 | TRACE_IRQS_CHECK_OFF | ||
494 | stg %r11,SP_ARGS(%r15) | 482 | stg %r11,SP_ARGS(%r15) |
495 | lgf %r3,__LC_PGM_ILC # load program interruption code | 483 | lgf %r3,__LC_PGM_ILC # load program interruption code |
484 | lg %r4,__LC_TRANS_EXC_CODE | ||
485 | REENABLE_IRQS | ||
496 | lghi %r8,0x7f | 486 | lghi %r8,0x7f |
497 | ngr %r8,%r3 | 487 | ngr %r8,%r3 |
498 | pgm_do_call: | ||
499 | sll %r8,3 | 488 | sll %r8,3 |
500 | larl %r1,pgm_check_table | 489 | larl %r1,pgm_check_table |
501 | lg %r1,0(%r8,%r1) # load address of handler routine | 490 | lg %r1,0(%r8,%r1) # load address of handler routine |
502 | la %r2,SP_PTREGS(%r15) # address of register-save area | 491 | la %r2,SP_PTREGS(%r15) # address of register-save area |
503 | basr %r14,%r1 # branch to interrupt-handler | 492 | basr %r14,%r1 # branch to interrupt-handler |
504 | pgm_exit: | 493 | pgm_exit: |
505 | TRACE_IRQS_CHECK_ON | ||
506 | j sysc_return | 494 | j sysc_return |
507 | 495 | ||
508 | # | 496 | # |
@@ -533,7 +521,6 @@ pgm_per_std: | |||
533 | LAST_BREAK | 521 | LAST_BREAK |
534 | pgm_no_vtime2: | 522 | pgm_no_vtime2: |
535 | HANDLE_SIE_INTERCEPT | 523 | HANDLE_SIE_INTERCEPT |
536 | TRACE_IRQS_CHECK_OFF | ||
537 | lg %r1,__TI_task(%r12) | 524 | lg %r1,__TI_task(%r12) |
538 | tm SP_PSW+1(%r15),0x01 # kernel per event ? | 525 | tm SP_PSW+1(%r15),0x01 # kernel per event ? |
539 | jz kernel_per | 526 | jz kernel_per |
@@ -542,6 +529,8 @@ pgm_no_vtime2: | |||
542 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID | 529 | mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID |
543 | oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 530 | oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
544 | lgf %r3,__LC_PGM_ILC # load program interruption code | 531 | lgf %r3,__LC_PGM_ILC # load program interruption code |
532 | lg %r4,__LC_TRANS_EXC_CODE | ||
533 | REENABLE_IRQS | ||
545 | lghi %r8,0x7f | 534 | lghi %r8,0x7f |
546 | ngr %r8,%r3 # clear per-event-bit and ilc | 535 | ngr %r8,%r3 # clear per-event-bit and ilc |
547 | je pgm_exit2 | 536 | je pgm_exit2 |
@@ -551,8 +540,6 @@ pgm_no_vtime2: | |||
551 | la %r2,SP_PTREGS(%r15) # address of register-save area | 540 | la %r2,SP_PTREGS(%r15) # address of register-save area |
552 | basr %r14,%r1 # branch to interrupt-handler | 541 | basr %r14,%r1 # branch to interrupt-handler |
553 | pgm_exit2: | 542 | pgm_exit2: |
554 | TRACE_IRQS_ON | ||
555 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
556 | j sysc_return | 543 | j sysc_return |
557 | 544 | ||
558 | # | 545 | # |
@@ -568,13 +555,11 @@ pgm_svcper: | |||
568 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 555 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
569 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 556 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
570 | LAST_BREAK | 557 | LAST_BREAK |
571 | TRACE_IRQS_OFF | ||
572 | lg %r8,__TI_task(%r12) | 558 | lg %r8,__TI_task(%r12) |
573 | mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID | 559 | mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID |
574 | mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS | 560 | mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS |
575 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID | 561 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID |
576 | oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | 562 | oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP |
577 | TRACE_IRQS_ON | ||
578 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 563 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
579 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments | 564 | lmg %r2,%r6,SP_R2(%r15) # load svc arguments |
580 | j sysc_do_svc | 565 | j sysc_do_svc |
@@ -743,8 +728,11 @@ ext_int_handler: | |||
743 | ext_no_vtime: | 728 | ext_no_vtime: |
744 | HANDLE_SIE_INTERCEPT | 729 | HANDLE_SIE_INTERCEPT |
745 | TRACE_IRQS_OFF | 730 | TRACE_IRQS_OFF |
731 | lghi %r1,4096 | ||
746 | la %r2,SP_PTREGS(%r15) # address of register-save area | 732 | la %r2,SP_PTREGS(%r15) # address of register-save area |
747 | llgh %r3,__LC_EXT_INT_CODE # get interruption code | 733 | llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code |
734 | llgf %r4,__LC_EXT_PARAMS # get external parameter | ||
735 | lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter | ||
748 | brasl %r14,do_extint | 736 | brasl %r14,do_extint |
749 | j io_return | 737 | j io_return |
750 | 738 | ||
@@ -966,7 +954,6 @@ cleanup_system_call: | |||
966 | CREATE_STACK_FRAME __LC_SAVE_AREA | 954 | CREATE_STACK_FRAME __LC_SAVE_AREA |
967 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW | 955 | mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW |
968 | mvc SP_ILC(4,%r15),__LC_SVC_ILC | 956 | mvc SP_ILC(4,%r15),__LC_SVC_ILC |
969 | stg %r7,SP_ARGS(%r15) | ||
970 | mvc 8(8,%r12),__LC_THREAD_INFO | 957 | mvc 8(8,%r12),__LC_THREAD_INFO |
971 | cleanup_vtime: | 958 | cleanup_vtime: |
972 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | 959 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index db1696e210af..7061398341d5 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -488,7 +488,9 @@ startup: | |||
488 | .align 16 | 488 | .align 16 |
489 | 2: .long 0x000a0000,0x8badcccc | 489 | 2: .long 0x000a0000,0x8badcccc |
490 | #if defined(CONFIG_64BIT) | 490 | #if defined(CONFIG_64BIT) |
491 | #if defined(CONFIG_MARCH_Z10) | 491 | #if defined(CONFIG_MARCH_Z196) |
492 | .long 0xc100efe3, 0xf46c0000 | ||
493 | #elif defined(CONFIG_MARCH_Z10) | ||
492 | .long 0xc100efe3, 0xf0680000 | 494 | .long 0xc100efe3, 0xf0680000 |
493 | #elif defined(CONFIG_MARCH_Z9_109) | 495 | #elif defined(CONFIG_MARCH_Z9_109) |
494 | .long 0xc100efc3, 0x00000000 | 496 | .long 0xc100efc3, 0x00000000 |
@@ -498,7 +500,9 @@ startup: | |||
498 | .long 0xc0000000, 0x00000000 | 500 | .long 0xc0000000, 0x00000000 |
499 | #endif | 501 | #endif |
500 | #else | 502 | #else |
501 | #if defined(CONFIG_MARCH_Z10) | 503 | #if defined(CONFIG_MARCH_Z196) |
504 | .long 0x8100c880, 0x00000000 | ||
505 | #elif defined(CONFIG_MARCH_Z10) | ||
502 | .long 0x8100c880, 0x00000000 | 506 | .long 0x8100c880, 0x00000000 |
503 | #elif defined(CONFIG_MARCH_Z9_109) | 507 | #elif defined(CONFIG_MARCH_Z9_109) |
504 | .long 0x8100c880, 0x00000000 | 508 | .long 0x8100c880, 0x00000000 |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index d3a2d1c6438e..ec2e03b22ead 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -76,17 +76,17 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
76 | static void default_idle(void) | 76 | static void default_idle(void) |
77 | { | 77 | { |
78 | /* CPU is going idle. */ | 78 | /* CPU is going idle. */ |
79 | local_irq_disable(); | ||
80 | if (need_resched()) { | ||
81 | local_irq_enable(); | ||
82 | return; | ||
83 | } | ||
84 | #ifdef CONFIG_HOTPLUG_CPU | 79 | #ifdef CONFIG_HOTPLUG_CPU |
85 | if (cpu_is_offline(smp_processor_id())) { | 80 | if (cpu_is_offline(smp_processor_id())) { |
86 | preempt_enable_no_resched(); | 81 | preempt_enable_no_resched(); |
87 | cpu_die(); | 82 | cpu_die(); |
88 | } | 83 | } |
89 | #endif | 84 | #endif |
85 | local_irq_disable(); | ||
86 | if (need_resched()) { | ||
87 | local_irq_enable(); | ||
88 | return; | ||
89 | } | ||
90 | local_mcck_disable(); | 90 | local_mcck_disable(); |
91 | if (test_thread_flag(TIF_MCCK_PENDING)) { | 91 | if (test_thread_flag(TIF_MCCK_PENDING)) { |
92 | local_mcck_enable(); | 92 | local_mcck_enable(); |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index ecb2d02b02e4..644548e615c6 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -42,7 +42,7 @@ void __cpuinit print_cpu_info(void) | |||
42 | struct cpuid *id = &per_cpu(cpu_id, smp_processor_id()); | 42 | struct cpuid *id = &per_cpu(cpu_id, smp_processor_id()); |
43 | 43 | ||
44 | pr_info("Processor %d started, address %d, identification %06X\n", | 44 | pr_info("Processor %d started, address %d, identification %06X\n", |
45 | S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, id->ident); | 45 | S390_lowcore.cpu_nr, stap(), id->ident); |
46 | } | 46 | } |
47 | 47 | ||
48 | /* | 48 | /* |
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index 9ce641b5291f..bd1db508e8af 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c | |||
@@ -113,12 +113,15 @@ int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler, | |||
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
115 | 115 | ||
116 | void __irq_entry do_extint(struct pt_regs *regs, unsigned short code) | 116 | void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, |
117 | unsigned int param32, unsigned long param64) | ||
117 | { | 118 | { |
119 | struct pt_regs *old_regs; | ||
120 | unsigned short code; | ||
118 | ext_int_info_t *p; | 121 | ext_int_info_t *p; |
119 | int index; | 122 | int index; |
120 | struct pt_regs *old_regs; | ||
121 | 123 | ||
124 | code = (unsigned short) ext_int_code; | ||
122 | old_regs = set_irq_regs(regs); | 125 | old_regs = set_irq_regs(regs); |
123 | s390_idle_check(regs, S390_lowcore.int_clock, | 126 | s390_idle_check(regs, S390_lowcore.int_clock, |
124 | S390_lowcore.async_enter_timer); | 127 | S390_lowcore.async_enter_timer); |
@@ -132,7 +135,7 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code) | |||
132 | index = ext_hash(code); | 135 | index = ext_hash(code); |
133 | for (p = ext_int_hash[index]; p; p = p->next) { | 136 | for (p = ext_int_hash[index]; p; p = p->next) { |
134 | if (likely(p->code == code)) | 137 | if (likely(p->code == code)) |
135 | p->handler(code); | 138 | p->handler(ext_int_code, param32, param64); |
136 | } | 139 | } |
137 | irq_exit(); | 140 | irq_exit(); |
138 | set_irq_regs(old_regs); | 141 | set_irq_regs(old_regs); |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c8e8e1354e1d..e3ceb911dc75 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -409,6 +409,9 @@ setup_lowcore(void) | |||
409 | lc->current_task = (unsigned long) init_thread_union.thread_info.task; | 409 | lc->current_task = (unsigned long) init_thread_union.thread_info.task; |
410 | lc->thread_info = (unsigned long) &init_thread_union; | 410 | lc->thread_info = (unsigned long) &init_thread_union; |
411 | lc->machine_flags = S390_lowcore.machine_flags; | 411 | lc->machine_flags = S390_lowcore.machine_flags; |
412 | lc->stfl_fac_list = S390_lowcore.stfl_fac_list; | ||
413 | memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, | ||
414 | MAX_FACILITY_BIT/8); | ||
412 | #ifndef CONFIG_64BIT | 415 | #ifndef CONFIG_64BIT |
413 | if (MACHINE_HAS_IEEE) { | 416 | if (MACHINE_HAS_IEEE) { |
414 | lc->extended_save_area_addr = (__u32) | 417 | lc->extended_save_area_addr = (__u32) |
@@ -627,7 +630,8 @@ setup_memory(void) | |||
627 | add_active_range(0, start_chunk, end_chunk); | 630 | add_active_range(0, start_chunk, end_chunk); |
628 | pfn = max(start_chunk, start_pfn); | 631 | pfn = max(start_chunk, start_pfn); |
629 | for (; pfn < end_chunk; pfn++) | 632 | for (; pfn < end_chunk; pfn++) |
630 | page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); | 633 | page_set_storage_key(PFN_PHYS(pfn), |
634 | PAGE_DEFAULT_KEY, 0); | ||
631 | } | 635 | } |
632 | 636 | ||
633 | psw_set_key(PAGE_DEFAULT_KEY); | 637 | psw_set_key(PAGE_DEFAULT_KEY); |
@@ -674,12 +678,9 @@ setup_memory(void) | |||
674 | static void __init setup_hwcaps(void) | 678 | static void __init setup_hwcaps(void) |
675 | { | 679 | { |
676 | static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; | 680 | static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; |
677 | unsigned long long facility_list_extended; | ||
678 | unsigned int facility_list; | ||
679 | struct cpuid cpu_id; | 681 | struct cpuid cpu_id; |
680 | int i; | 682 | int i; |
681 | 683 | ||
682 | facility_list = stfl(); | ||
683 | /* | 684 | /* |
684 | * The store facility list bits numbers as found in the principles | 685 | * The store facility list bits numbers as found in the principles |
685 | * of operation are numbered with bit 1UL<<31 as number 0 to | 686 | * of operation are numbered with bit 1UL<<31 as number 0 to |
@@ -699,11 +700,10 @@ static void __init setup_hwcaps(void) | |||
699 | * HWCAP_S390_ETF3EH bit 8 (22 && 30). | 700 | * HWCAP_S390_ETF3EH bit 8 (22 && 30). |
700 | */ | 701 | */ |
701 | for (i = 0; i < 6; i++) | 702 | for (i = 0; i < 6; i++) |
702 | if (facility_list & (1UL << (31 - stfl_bits[i]))) | 703 | if (test_facility(stfl_bits[i])) |
703 | elf_hwcap |= 1UL << i; | 704 | elf_hwcap |= 1UL << i; |
704 | 705 | ||
705 | if ((facility_list & (1UL << (31 - 22))) | 706 | if (test_facility(22) && test_facility(30)) |
706 | && (facility_list & (1UL << (31 - 30)))) | ||
707 | elf_hwcap |= HWCAP_S390_ETF3EH; | 707 | elf_hwcap |= HWCAP_S390_ETF3EH; |
708 | 708 | ||
709 | /* | 709 | /* |
@@ -719,12 +719,8 @@ static void __init setup_hwcaps(void) | |||
719 | * translated to: | 719 | * translated to: |
720 | * HWCAP_S390_DFP bit 6 (42 && 44). | 720 | * HWCAP_S390_DFP bit 6 (42 && 44). |
721 | */ | 721 | */ |
722 | if ((elf_hwcap & (1UL << 2)) && | 722 | if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44)) |
723 | __stfle(&facility_list_extended, 1) > 0) { | 723 | elf_hwcap |= HWCAP_S390_DFP; |
724 | if ((facility_list_extended & (1ULL << (63 - 42))) | ||
725 | && (facility_list_extended & (1ULL << (63 - 44)))) | ||
726 | elf_hwcap |= HWCAP_S390_DFP; | ||
727 | } | ||
728 | 724 | ||
729 | /* | 725 | /* |
730 | * Huge page support HWCAP_S390_HPAGE is bit 7. | 726 | * Huge page support HWCAP_S390_HPAGE is bit 7. |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 8127ebd59c4d..94cf510b8fe1 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -156,7 +156,8 @@ void smp_send_stop(void) | |||
156 | * cpus are handled. | 156 | * cpus are handled. |
157 | */ | 157 | */ |
158 | 158 | ||
159 | static void do_ext_call_interrupt(__u16 code) | 159 | static void do_ext_call_interrupt(unsigned int ext_int_code, |
160 | unsigned int param32, unsigned long param64) | ||
160 | { | 161 | { |
161 | unsigned long bits; | 162 | unsigned long bits; |
162 | 163 | ||
@@ -593,6 +594,8 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
593 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; | 594 | cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; |
594 | cpu_lowcore->machine_flags = S390_lowcore.machine_flags; | 595 | cpu_lowcore->machine_flags = S390_lowcore.machine_flags; |
595 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; | 596 | cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; |
597 | memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list, | ||
598 | MAX_FACILITY_BIT/8); | ||
596 | eieio(); | 599 | eieio(); |
597 | 600 | ||
598 | while (sigp(cpu, sigp_restart) == sigp_busy) | 601 | while (sigp(cpu, sigp_restart) == sigp_busy) |
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index a0ffc7717ed6..f04d93aa48ec 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/ebcdic.h> | 15 | #include <asm/ebcdic.h> |
16 | #include <asm/sysinfo.h> | 16 | #include <asm/sysinfo.h> |
17 | #include <asm/cpcmd.h> | 17 | #include <asm/cpcmd.h> |
18 | #include <asm/topology.h> | ||
18 | 19 | ||
19 | /* Sigh, math-emu. Don't ask. */ | 20 | /* Sigh, math-emu. Don't ask. */ |
20 | #include <asm/sfp-util.h> | 21 | #include <asm/sfp-util.h> |
@@ -74,6 +75,42 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len) | |||
74 | "Model Temp. Capacity: %-16.16s %08u\n", | 75 | "Model Temp. Capacity: %-16.16s %08u\n", |
75 | info->model_temp_cap, | 76 | info->model_temp_cap, |
76 | *(u32 *) info->model_temp_cap_rating); | 77 | *(u32 *) info->model_temp_cap_rating); |
78 | if (info->cai) { | ||
79 | len += sprintf(page + len, | ||
80 | "Capacity Adj. Ind.: %d\n", | ||
81 | info->cai); | ||
82 | len += sprintf(page + len, "Capacity Ch. Reason: %d\n", | ||
83 | info->ccr); | ||
84 | } | ||
85 | return len; | ||
86 | } | ||
87 | |||
88 | static int stsi_15_1_x(struct sysinfo_15_1_x *info, char *page, int len) | ||
89 | { | ||
90 | static int max_mnest; | ||
91 | int i, rc; | ||
92 | |||
93 | len += sprintf(page + len, "\n"); | ||
94 | if (!MACHINE_HAS_TOPOLOGY) | ||
95 | return len; | ||
96 | if (max_mnest) { | ||
97 | stsi(info, 15, 1, max_mnest); | ||
98 | } else { | ||
99 | for (max_mnest = 6; max_mnest > 1; max_mnest--) { | ||
100 | rc = stsi(info, 15, 1, max_mnest); | ||
101 | if (rc != -ENOSYS) | ||
102 | break; | ||
103 | } | ||
104 | } | ||
105 | len += sprintf(page + len, "CPU Topology HW: "); | ||
106 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) | ||
107 | len += sprintf(page + len, " %d", info->mag[i]); | ||
108 | len += sprintf(page + len, "\n"); | ||
109 | store_topology(info); | ||
110 | len += sprintf(page + len, "CPU Topology SW: "); | ||
111 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) | ||
112 | len += sprintf(page + len, " %d", info->mag[i]); | ||
113 | len += sprintf(page + len, "\n"); | ||
77 | return len; | 114 | return len; |
78 | } | 115 | } |
79 | 116 | ||
@@ -87,7 +124,6 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) | |||
87 | ext = (struct sysinfo_1_2_2_extension *) | 124 | ext = (struct sysinfo_1_2_2_extension *) |
88 | ((unsigned long) info + info->acc_offset); | 125 | ((unsigned long) info + info->acc_offset); |
89 | 126 | ||
90 | len += sprintf(page + len, "\n"); | ||
91 | len += sprintf(page + len, "CPUs Total: %d\n", | 127 | len += sprintf(page + len, "CPUs Total: %d\n", |
92 | info->cpus_total); | 128 | info->cpus_total); |
93 | len += sprintf(page + len, "CPUs Configured: %d\n", | 129 | len += sprintf(page + len, "CPUs Configured: %d\n", |
@@ -217,6 +253,9 @@ static int proc_read_sysinfo(char *page, char **start, | |||
217 | len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len); | 253 | len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len); |
218 | 254 | ||
219 | if (level >= 1) | 255 | if (level >= 1) |
256 | len = stsi_15_1_x((struct sysinfo_15_1_x *) info, page, len); | ||
257 | |||
258 | if (level >= 1) | ||
220 | len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len); | 259 | len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len); |
221 | 260 | ||
222 | if (level >= 2) | 261 | if (level >= 2) |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 2896cac9c14a..f754a6dc4f94 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -155,7 +155,9 @@ void init_cpu_timer(void) | |||
155 | __ctl_set_bit(0, 4); | 155 | __ctl_set_bit(0, 4); |
156 | } | 156 | } |
157 | 157 | ||
158 | static void clock_comparator_interrupt(__u16 code) | 158 | static void clock_comparator_interrupt(unsigned int ext_int_code, |
159 | unsigned int param32, | ||
160 | unsigned long param64) | ||
159 | { | 161 | { |
160 | if (S390_lowcore.clock_comparator == -1ULL) | 162 | if (S390_lowcore.clock_comparator == -1ULL) |
161 | set_clock_comparator(S390_lowcore.clock_comparator); | 163 | set_clock_comparator(S390_lowcore.clock_comparator); |
@@ -164,14 +166,13 @@ static void clock_comparator_interrupt(__u16 code) | |||
164 | static void etr_timing_alert(struct etr_irq_parm *); | 166 | static void etr_timing_alert(struct etr_irq_parm *); |
165 | static void stp_timing_alert(struct stp_irq_parm *); | 167 | static void stp_timing_alert(struct stp_irq_parm *); |
166 | 168 | ||
167 | static void timing_alert_interrupt(__u16 code) | 169 | static void timing_alert_interrupt(unsigned int ext_int_code, |
170 | unsigned int param32, unsigned long param64) | ||
168 | { | 171 | { |
169 | if (S390_lowcore.ext_params & 0x00c40000) | 172 | if (param32 & 0x00c40000) |
170 | etr_timing_alert((struct etr_irq_parm *) | 173 | etr_timing_alert((struct etr_irq_parm *) ¶m32); |
171 | &S390_lowcore.ext_params); | 174 | if (param32 & 0x00038000) |
172 | if (S390_lowcore.ext_params & 0x00038000) | 175 | stp_timing_alert((struct stp_irq_parm *) ¶m32); |
173 | stp_timing_alert((struct stp_irq_parm *) | ||
174 | &S390_lowcore.ext_params); | ||
175 | } | 176 | } |
176 | 177 | ||
177 | static void etr_reset(void); | 178 | static void etr_reset(void); |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 13559c993847..a9dee9048ee5 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -18,55 +18,20 @@ | |||
18 | #include <linux/cpuset.h> | 18 | #include <linux/cpuset.h> |
19 | #include <asm/delay.h> | 19 | #include <asm/delay.h> |
20 | #include <asm/s390_ext.h> | 20 | #include <asm/s390_ext.h> |
21 | #include <asm/sysinfo.h> | ||
22 | |||
23 | #define CPU_BITS 64 | ||
24 | #define NR_MAG 6 | ||
25 | 21 | ||
26 | #define PTF_HORIZONTAL (0UL) | 22 | #define PTF_HORIZONTAL (0UL) |
27 | #define PTF_VERTICAL (1UL) | 23 | #define PTF_VERTICAL (1UL) |
28 | #define PTF_CHECK (2UL) | 24 | #define PTF_CHECK (2UL) |
29 | 25 | ||
30 | struct tl_cpu { | ||
31 | unsigned char reserved0[4]; | ||
32 | unsigned char :6; | ||
33 | unsigned char pp:2; | ||
34 | unsigned char reserved1; | ||
35 | unsigned short origin; | ||
36 | unsigned long mask[CPU_BITS / BITS_PER_LONG]; | ||
37 | }; | ||
38 | |||
39 | struct tl_container { | ||
40 | unsigned char reserved[7]; | ||
41 | unsigned char id; | ||
42 | }; | ||
43 | |||
44 | union tl_entry { | ||
45 | unsigned char nl; | ||
46 | struct tl_cpu cpu; | ||
47 | struct tl_container container; | ||
48 | }; | ||
49 | |||
50 | struct tl_info { | ||
51 | unsigned char reserved0[2]; | ||
52 | unsigned short length; | ||
53 | unsigned char mag[NR_MAG]; | ||
54 | unsigned char reserved1; | ||
55 | unsigned char mnest; | ||
56 | unsigned char reserved2[4]; | ||
57 | union tl_entry tle[0]; | ||
58 | }; | ||
59 | |||
60 | struct mask_info { | 26 | struct mask_info { |
61 | struct mask_info *next; | 27 | struct mask_info *next; |
62 | unsigned char id; | 28 | unsigned char id; |
63 | cpumask_t mask; | 29 | cpumask_t mask; |
64 | }; | 30 | }; |
65 | 31 | ||
66 | static int topology_enabled; | 32 | static int topology_enabled = 1; |
67 | static void topology_work_fn(struct work_struct *work); | 33 | static void topology_work_fn(struct work_struct *work); |
68 | static struct tl_info *tl_info; | 34 | static struct sysinfo_15_1_x *tl_info; |
69 | static int machine_has_topology; | ||
70 | static struct timer_list topology_timer; | 35 | static struct timer_list topology_timer; |
71 | static void set_topology_timer(void); | 36 | static void set_topology_timer(void); |
72 | static DECLARE_WORK(topology_work, topology_work_fn); | 37 | static DECLARE_WORK(topology_work, topology_work_fn); |
@@ -88,7 +53,7 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | |||
88 | cpumask_t mask; | 53 | cpumask_t mask; |
89 | 54 | ||
90 | cpus_clear(mask); | 55 | cpus_clear(mask); |
91 | if (!topology_enabled || !machine_has_topology) | 56 | if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) |
92 | return cpu_possible_map; | 57 | return cpu_possible_map; |
93 | while (info) { | 58 | while (info) { |
94 | if (cpu_isset(cpu, info->mask)) { | 59 | if (cpu_isset(cpu, info->mask)) { |
@@ -102,18 +67,18 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | |||
102 | return mask; | 67 | return mask; |
103 | } | 68 | } |
104 | 69 | ||
105 | static void add_cpus_to_mask(struct tl_cpu *tl_cpu, struct mask_info *book, | 70 | static void add_cpus_to_mask(struct topology_cpu *tl_cpu, |
106 | struct mask_info *core) | 71 | struct mask_info *book, struct mask_info *core) |
107 | { | 72 | { |
108 | unsigned int cpu; | 73 | unsigned int cpu; |
109 | 74 | ||
110 | for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS); | 75 | for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS); |
111 | cpu < CPU_BITS; | 76 | cpu < TOPOLOGY_CPU_BITS; |
112 | cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1)) | 77 | cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1)) |
113 | { | 78 | { |
114 | unsigned int rcpu, lcpu; | 79 | unsigned int rcpu, lcpu; |
115 | 80 | ||
116 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; | 81 | rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; |
117 | for_each_present_cpu(lcpu) { | 82 | for_each_present_cpu(lcpu) { |
118 | if (cpu_logical_map(lcpu) != rcpu) | 83 | if (cpu_logical_map(lcpu) != rcpu) |
119 | continue; | 84 | continue; |
@@ -146,15 +111,14 @@ static void clear_masks(void) | |||
146 | #endif | 111 | #endif |
147 | } | 112 | } |
148 | 113 | ||
149 | static union tl_entry *next_tle(union tl_entry *tle) | 114 | static union topology_entry *next_tle(union topology_entry *tle) |
150 | { | 115 | { |
151 | if (tle->nl) | 116 | if (!tle->nl) |
152 | return (union tl_entry *)((struct tl_container *)tle + 1); | 117 | return (union topology_entry *)((struct topology_cpu *)tle + 1); |
153 | else | 118 | return (union topology_entry *)((struct topology_container *)tle + 1); |
154 | return (union tl_entry *)((struct tl_cpu *)tle + 1); | ||
155 | } | 119 | } |
156 | 120 | ||
157 | static void tl_to_cores(struct tl_info *info) | 121 | static void tl_to_cores(struct sysinfo_15_1_x *info) |
158 | { | 122 | { |
159 | #ifdef CONFIG_SCHED_BOOK | 123 | #ifdef CONFIG_SCHED_BOOK |
160 | struct mask_info *book = &book_info; | 124 | struct mask_info *book = &book_info; |
@@ -162,13 +126,13 @@ static void tl_to_cores(struct tl_info *info) | |||
162 | struct mask_info *book = NULL; | 126 | struct mask_info *book = NULL; |
163 | #endif | 127 | #endif |
164 | struct mask_info *core = &core_info; | 128 | struct mask_info *core = &core_info; |
165 | union tl_entry *tle, *end; | 129 | union topology_entry *tle, *end; |
166 | 130 | ||
167 | 131 | ||
168 | spin_lock_irq(&topology_lock); | 132 | spin_lock_irq(&topology_lock); |
169 | clear_masks(); | 133 | clear_masks(); |
170 | tle = info->tle; | 134 | tle = info->tle; |
171 | end = (union tl_entry *)((unsigned long)info + info->length); | 135 | end = (union topology_entry *)((unsigned long)info + info->length); |
172 | while (tle < end) { | 136 | while (tle < end) { |
173 | switch (tle->nl) { | 137 | switch (tle->nl) { |
174 | #ifdef CONFIG_SCHED_BOOK | 138 | #ifdef CONFIG_SCHED_BOOK |
@@ -186,7 +150,6 @@ static void tl_to_cores(struct tl_info *info) | |||
186 | break; | 150 | break; |
187 | default: | 151 | default: |
188 | clear_masks(); | 152 | clear_masks(); |
189 | machine_has_topology = 0; | ||
190 | goto out; | 153 | goto out; |
191 | } | 154 | } |
192 | tle = next_tle(tle); | 155 | tle = next_tle(tle); |
@@ -223,7 +186,7 @@ int topology_set_cpu_management(int fc) | |||
223 | int cpu; | 186 | int cpu; |
224 | int rc; | 187 | int rc; |
225 | 188 | ||
226 | if (!machine_has_topology) | 189 | if (!MACHINE_HAS_TOPOLOGY) |
227 | return -EOPNOTSUPP; | 190 | return -EOPNOTSUPP; |
228 | if (fc) | 191 | if (fc) |
229 | rc = ptf(PTF_VERTICAL); | 192 | rc = ptf(PTF_VERTICAL); |
@@ -251,7 +214,7 @@ static void update_cpu_core_map(void) | |||
251 | spin_unlock_irqrestore(&topology_lock, flags); | 214 | spin_unlock_irqrestore(&topology_lock, flags); |
252 | } | 215 | } |
253 | 216 | ||
254 | static void store_topology(struct tl_info *info) | 217 | void store_topology(struct sysinfo_15_1_x *info) |
255 | { | 218 | { |
256 | #ifdef CONFIG_SCHED_BOOK | 219 | #ifdef CONFIG_SCHED_BOOK |
257 | int rc; | 220 | int rc; |
@@ -265,11 +228,11 @@ static void store_topology(struct tl_info *info) | |||
265 | 228 | ||
266 | int arch_update_cpu_topology(void) | 229 | int arch_update_cpu_topology(void) |
267 | { | 230 | { |
268 | struct tl_info *info = tl_info; | 231 | struct sysinfo_15_1_x *info = tl_info; |
269 | struct sys_device *sysdev; | 232 | struct sys_device *sysdev; |
270 | int cpu; | 233 | int cpu; |
271 | 234 | ||
272 | if (!machine_has_topology) { | 235 | if (!MACHINE_HAS_TOPOLOGY) { |
273 | update_cpu_core_map(); | 236 | update_cpu_core_map(); |
274 | topology_update_polarization_simple(); | 237 | topology_update_polarization_simple(); |
275 | return 0; | 238 | return 0; |
@@ -311,9 +274,9 @@ static void set_topology_timer(void) | |||
311 | 274 | ||
312 | static int __init early_parse_topology(char *p) | 275 | static int __init early_parse_topology(char *p) |
313 | { | 276 | { |
314 | if (strncmp(p, "on", 2)) | 277 | if (strncmp(p, "off", 3)) |
315 | return 0; | 278 | return 0; |
316 | topology_enabled = 1; | 279 | topology_enabled = 0; |
317 | return 0; | 280 | return 0; |
318 | } | 281 | } |
319 | early_param("topology", early_parse_topology); | 282 | early_param("topology", early_parse_topology); |
@@ -323,7 +286,7 @@ static int __init init_topology_update(void) | |||
323 | int rc; | 286 | int rc; |
324 | 287 | ||
325 | rc = 0; | 288 | rc = 0; |
326 | if (!machine_has_topology) { | 289 | if (!MACHINE_HAS_TOPOLOGY) { |
327 | topology_update_polarization_simple(); | 290 | topology_update_polarization_simple(); |
328 | goto out; | 291 | goto out; |
329 | } | 292 | } |
@@ -335,13 +298,14 @@ out: | |||
335 | } | 298 | } |
336 | __initcall(init_topology_update); | 299 | __initcall(init_topology_update); |
337 | 300 | ||
338 | static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset) | 301 | static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask, |
302 | int offset) | ||
339 | { | 303 | { |
340 | int i, nr_masks; | 304 | int i, nr_masks; |
341 | 305 | ||
342 | nr_masks = info->mag[NR_MAG - offset]; | 306 | nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; |
343 | for (i = 0; i < info->mnest - offset; i++) | 307 | for (i = 0; i < info->mnest - offset; i++) |
344 | nr_masks *= info->mag[NR_MAG - offset - 1 - i]; | 308 | nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; |
345 | nr_masks = max(nr_masks, 1); | 309 | nr_masks = max(nr_masks, 1); |
346 | for (i = 0; i < nr_masks; i++) { | 310 | for (i = 0; i < nr_masks; i++) { |
347 | mask->next = alloc_bootmem(sizeof(struct mask_info)); | 311 | mask->next = alloc_bootmem(sizeof(struct mask_info)); |
@@ -351,21 +315,16 @@ static void alloc_masks(struct tl_info *info, struct mask_info *mask, int offset | |||
351 | 315 | ||
352 | void __init s390_init_cpu_topology(void) | 316 | void __init s390_init_cpu_topology(void) |
353 | { | 317 | { |
354 | unsigned long long facility_bits; | 318 | struct sysinfo_15_1_x *info; |
355 | struct tl_info *info; | ||
356 | int i; | 319 | int i; |
357 | 320 | ||
358 | if (stfle(&facility_bits, 1) <= 0) | 321 | if (!MACHINE_HAS_TOPOLOGY) |
359 | return; | ||
360 | if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61))) | ||
361 | return; | 322 | return; |
362 | machine_has_topology = 1; | ||
363 | |||
364 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | 323 | tl_info = alloc_bootmem_pages(PAGE_SIZE); |
365 | info = tl_info; | 324 | info = tl_info; |
366 | store_topology(info); | 325 | store_topology(info); |
367 | pr_info("The CPU configuration topology of the machine is:"); | 326 | pr_info("The CPU configuration topology of the machine is:"); |
368 | for (i = 0; i < NR_MAG; i++) | 327 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) |
369 | printk(" %d", info->mag[i]); | 328 | printk(" %d", info->mag[i]); |
370 | printk(" / %d\n", info->mnest); | 329 | printk(" / %d\n", info->mnest); |
371 | alloc_masks(info, &core_info, 2); | 330 | alloc_masks(info, &core_info, 2); |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 5d8f0f3d0250..70640822621a 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -329,27 +329,19 @@ int is_valid_bugaddr(unsigned long addr) | |||
329 | return 1; | 329 | return 1; |
330 | } | 330 | } |
331 | 331 | ||
332 | static void __kprobes inline do_trap(long interruption_code, int signr, | 332 | static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str, |
333 | char *str, struct pt_regs *regs, | 333 | struct pt_regs *regs, siginfo_t *info) |
334 | siginfo_t *info) | ||
335 | { | 334 | { |
336 | /* | 335 | if (notify_die(DIE_TRAP, str, regs, pgm_int_code, |
337 | * We got all needed information from the lowcore and can | 336 | pgm_int_code, signr) == NOTIFY_STOP) |
338 | * now safely switch on interrupts. | ||
339 | */ | ||
340 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
341 | local_irq_enable(); | ||
342 | |||
343 | if (notify_die(DIE_TRAP, str, regs, interruption_code, | ||
344 | interruption_code, signr) == NOTIFY_STOP) | ||
345 | return; | 337 | return; |
346 | 338 | ||
347 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 339 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
348 | struct task_struct *tsk = current; | 340 | struct task_struct *tsk = current; |
349 | 341 | ||
350 | tsk->thread.trap_no = interruption_code & 0xffff; | 342 | tsk->thread.trap_no = pgm_int_code & 0xffff; |
351 | force_sig_info(signr, info, tsk); | 343 | force_sig_info(signr, info, tsk); |
352 | report_user_fault(regs, interruption_code, signr); | 344 | report_user_fault(regs, pgm_int_code, signr); |
353 | } else { | 345 | } else { |
354 | const struct exception_table_entry *fixup; | 346 | const struct exception_table_entry *fixup; |
355 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); | 347 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); |
@@ -361,14 +353,16 @@ static void __kprobes inline do_trap(long interruption_code, int signr, | |||
361 | btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); | 353 | btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); |
362 | if (btt == BUG_TRAP_TYPE_WARN) | 354 | if (btt == BUG_TRAP_TYPE_WARN) |
363 | return; | 355 | return; |
364 | die(str, regs, interruption_code); | 356 | die(str, regs, pgm_int_code); |
365 | } | 357 | } |
366 | } | 358 | } |
367 | } | 359 | } |
368 | 360 | ||
369 | static inline void __user *get_check_address(struct pt_regs *regs) | 361 | static inline void __user *get_psw_address(struct pt_regs *regs, |
362 | long pgm_int_code) | ||
370 | { | 363 | { |
371 | return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); | 364 | return (void __user *) |
365 | ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); | ||
372 | } | 366 | } |
373 | 367 | ||
374 | void __kprobes do_single_step(struct pt_regs *regs) | 368 | void __kprobes do_single_step(struct pt_regs *regs) |
@@ -381,57 +375,57 @@ void __kprobes do_single_step(struct pt_regs *regs) | |||
381 | force_sig(SIGTRAP, current); | 375 | force_sig(SIGTRAP, current); |
382 | } | 376 | } |
383 | 377 | ||
384 | static void default_trap_handler(struct pt_regs * regs, long interruption_code) | 378 | static void default_trap_handler(struct pt_regs *regs, long pgm_int_code, |
379 | unsigned long trans_exc_code) | ||
385 | { | 380 | { |
386 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 381 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
387 | local_irq_enable(); | 382 | report_user_fault(regs, pgm_int_code, SIGSEGV); |
388 | report_user_fault(regs, interruption_code, SIGSEGV); | ||
389 | do_exit(SIGSEGV); | 383 | do_exit(SIGSEGV); |
390 | } else | 384 | } else |
391 | die("Unknown program exception", regs, interruption_code); | 385 | die("Unknown program exception", regs, pgm_int_code); |
392 | } | 386 | } |
393 | 387 | ||
394 | #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ | 388 | #define DO_ERROR_INFO(name, signr, sicode, str) \ |
395 | static void name(struct pt_regs * regs, long interruption_code) \ | 389 | static void name(struct pt_regs *regs, long pgm_int_code, \ |
390 | unsigned long trans_exc_code) \ | ||
396 | { \ | 391 | { \ |
397 | siginfo_t info; \ | 392 | siginfo_t info; \ |
398 | info.si_signo = signr; \ | 393 | info.si_signo = signr; \ |
399 | info.si_errno = 0; \ | 394 | info.si_errno = 0; \ |
400 | info.si_code = sicode; \ | 395 | info.si_code = sicode; \ |
401 | info.si_addr = siaddr; \ | 396 | info.si_addr = get_psw_address(regs, pgm_int_code); \ |
402 | do_trap(interruption_code, signr, str, regs, &info); \ | 397 | do_trap(pgm_int_code, signr, str, regs, &info); \ |
403 | } | 398 | } |
404 | 399 | ||
405 | DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception, | 400 | DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, |
406 | ILL_ILLADR, get_check_address(regs)) | 401 | "addressing exception") |
407 | DO_ERROR_INFO(SIGILL, "execute exception", execute_exception, | 402 | DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN, |
408 | ILL_ILLOPN, get_check_address(regs)) | 403 | "execute exception") |
409 | DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception, | 404 | DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV, |
410 | FPE_INTDIV, get_check_address(regs)) | 405 | "fixpoint divide exception") |
411 | DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception, | 406 | DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF, |
412 | FPE_INTOVF, get_check_address(regs)) | 407 | "fixpoint overflow exception") |
413 | DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception, | 408 | DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF, |
414 | FPE_FLTOVF, get_check_address(regs)) | 409 | "HFP overflow exception") |
415 | DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception, | 410 | DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND, |
416 | FPE_FLTUND, get_check_address(regs)) | 411 | "HFP underflow exception") |
417 | DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception, | 412 | DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES, |
418 | FPE_FLTRES, get_check_address(regs)) | 413 | "HFP significance exception") |
419 | DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception, | 414 | DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV, |
420 | FPE_FLTDIV, get_check_address(regs)) | 415 | "HFP divide exception") |
421 | DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception, | 416 | DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV, |
422 | FPE_FLTINV, get_check_address(regs)) | 417 | "HFP square root exception") |
423 | DO_ERROR_INFO(SIGILL, "operand exception", operand_exception, | 418 | DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN, |
424 | ILL_ILLOPN, get_check_address(regs)) | 419 | "operand exception") |
425 | DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op, | 420 | DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, |
426 | ILL_PRVOPC, get_check_address(regs)) | 421 | "privileged operation") |
427 | DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception, | 422 | DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, |
428 | ILL_ILLOPN, get_check_address(regs)) | 423 | "special operation exception") |
429 | DO_ERROR_INFO(SIGILL, "translation exception", translation_exception, | 424 | DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, |
430 | ILL_ILLOPN, get_check_address(regs)) | 425 | "translation exception") |
431 | 426 | ||
432 | static inline void | 427 | static inline void do_fp_trap(struct pt_regs *regs, void __user *location, |
433 | do_fp_trap(struct pt_regs *regs, void __user *location, | 428 | int fpc, long pgm_int_code) |
434 | int fpc, long interruption_code) | ||
435 | { | 429 | { |
436 | siginfo_t si; | 430 | siginfo_t si; |
437 | 431 | ||
@@ -453,26 +447,19 @@ do_fp_trap(struct pt_regs *regs, void __user *location, | |||
453 | else if (fpc & 0x0800) /* inexact */ | 447 | else if (fpc & 0x0800) /* inexact */ |
454 | si.si_code = FPE_FLTRES; | 448 | si.si_code = FPE_FLTRES; |
455 | } | 449 | } |
456 | current->thread.ieee_instruction_pointer = (addr_t) location; | 450 | do_trap(pgm_int_code, SIGFPE, |
457 | do_trap(interruption_code, SIGFPE, | ||
458 | "floating point exception", regs, &si); | 451 | "floating point exception", regs, &si); |
459 | } | 452 | } |
460 | 453 | ||
461 | static void illegal_op(struct pt_regs * regs, long interruption_code) | 454 | static void illegal_op(struct pt_regs *regs, long pgm_int_code, |
455 | unsigned long trans_exc_code) | ||
462 | { | 456 | { |
463 | siginfo_t info; | 457 | siginfo_t info; |
464 | __u8 opcode[6]; | 458 | __u8 opcode[6]; |
465 | __u16 __user *location; | 459 | __u16 __user *location; |
466 | int signal = 0; | 460 | int signal = 0; |
467 | 461 | ||
468 | location = get_check_address(regs); | 462 | location = get_psw_address(regs, pgm_int_code); |
469 | |||
470 | /* | ||
471 | * We got all needed information from the lowcore and can | ||
472 | * now safely switch on interrupts. | ||
473 | */ | ||
474 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
475 | local_irq_enable(); | ||
476 | 463 | ||
477 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 464 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
478 | if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) | 465 | if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) |
@@ -512,7 +499,7 @@ static void illegal_op(struct pt_regs * regs, long interruption_code) | |||
512 | * If we get an illegal op in kernel mode, send it through the | 499 | * If we get an illegal op in kernel mode, send it through the |
513 | * kprobes notifier. If kprobes doesn't pick it up, SIGILL | 500 | * kprobes notifier. If kprobes doesn't pick it up, SIGILL |
514 | */ | 501 | */ |
515 | if (notify_die(DIE_BPT, "bpt", regs, interruption_code, | 502 | if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code, |
516 | 3, SIGTRAP) != NOTIFY_STOP) | 503 | 3, SIGTRAP) != NOTIFY_STOP) |
517 | signal = SIGILL; | 504 | signal = SIGILL; |
518 | } | 505 | } |
@@ -520,13 +507,13 @@ static void illegal_op(struct pt_regs * regs, long interruption_code) | |||
520 | #ifdef CONFIG_MATHEMU | 507 | #ifdef CONFIG_MATHEMU |
521 | if (signal == SIGFPE) | 508 | if (signal == SIGFPE) |
522 | do_fp_trap(regs, location, | 509 | do_fp_trap(regs, location, |
523 | current->thread.fp_regs.fpc, interruption_code); | 510 | current->thread.fp_regs.fpc, pgm_int_code); |
524 | else if (signal == SIGSEGV) { | 511 | else if (signal == SIGSEGV) { |
525 | info.si_signo = signal; | 512 | info.si_signo = signal; |
526 | info.si_errno = 0; | 513 | info.si_errno = 0; |
527 | info.si_code = SEGV_MAPERR; | 514 | info.si_code = SEGV_MAPERR; |
528 | info.si_addr = (void __user *) location; | 515 | info.si_addr = (void __user *) location; |
529 | do_trap(interruption_code, signal, | 516 | do_trap(pgm_int_code, signal, |
530 | "user address fault", regs, &info); | 517 | "user address fault", regs, &info); |
531 | } else | 518 | } else |
532 | #endif | 519 | #endif |
@@ -535,28 +522,22 @@ static void illegal_op(struct pt_regs * regs, long interruption_code) | |||
535 | info.si_errno = 0; | 522 | info.si_errno = 0; |
536 | info.si_code = ILL_ILLOPC; | 523 | info.si_code = ILL_ILLOPC; |
537 | info.si_addr = (void __user *) location; | 524 | info.si_addr = (void __user *) location; |
538 | do_trap(interruption_code, signal, | 525 | do_trap(pgm_int_code, signal, |
539 | "illegal operation", regs, &info); | 526 | "illegal operation", regs, &info); |
540 | } | 527 | } |
541 | } | 528 | } |
542 | 529 | ||
543 | 530 | ||
544 | #ifdef CONFIG_MATHEMU | 531 | #ifdef CONFIG_MATHEMU |
545 | asmlinkage void | 532 | asmlinkage void specification_exception(struct pt_regs *regs, |
546 | specification_exception(struct pt_regs * regs, long interruption_code) | 533 | long pgm_int_code, |
534 | unsigned long trans_exc_code) | ||
547 | { | 535 | { |
548 | __u8 opcode[6]; | 536 | __u8 opcode[6]; |
549 | __u16 __user *location = NULL; | 537 | __u16 __user *location = NULL; |
550 | int signal = 0; | 538 | int signal = 0; |
551 | 539 | ||
552 | location = (__u16 __user *) get_check_address(regs); | 540 | location = (__u16 __user *) get_psw_address(regs, pgm_int_code); |
553 | |||
554 | /* | ||
555 | * We got all needed information from the lowcore and can | ||
556 | * now safely switch on interrupts. | ||
557 | */ | ||
558 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
559 | local_irq_enable(); | ||
560 | 541 | ||
561 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 542 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
562 | get_user(*((__u16 *) opcode), location); | 543 | get_user(*((__u16 *) opcode), location); |
@@ -592,35 +573,29 @@ specification_exception(struct pt_regs * regs, long interruption_code) | |||
592 | 573 | ||
593 | if (signal == SIGFPE) | 574 | if (signal == SIGFPE) |
594 | do_fp_trap(regs, location, | 575 | do_fp_trap(regs, location, |
595 | current->thread.fp_regs.fpc, interruption_code); | 576 | current->thread.fp_regs.fpc, pgm_int_code); |
596 | else if (signal) { | 577 | else if (signal) { |
597 | siginfo_t info; | 578 | siginfo_t info; |
598 | info.si_signo = signal; | 579 | info.si_signo = signal; |
599 | info.si_errno = 0; | 580 | info.si_errno = 0; |
600 | info.si_code = ILL_ILLOPN; | 581 | info.si_code = ILL_ILLOPN; |
601 | info.si_addr = location; | 582 | info.si_addr = location; |
602 | do_trap(interruption_code, signal, | 583 | do_trap(pgm_int_code, signal, |
603 | "specification exception", regs, &info); | 584 | "specification exception", regs, &info); |
604 | } | 585 | } |
605 | } | 586 | } |
606 | #else | 587 | #else |
607 | DO_ERROR_INFO(SIGILL, "specification exception", specification_exception, | 588 | DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, |
608 | ILL_ILLOPN, get_check_address(regs)); | 589 | "specification exception"); |
609 | #endif | 590 | #endif |
610 | 591 | ||
611 | static void data_exception(struct pt_regs * regs, long interruption_code) | 592 | static void data_exception(struct pt_regs *regs, long pgm_int_code, |
593 | unsigned long trans_exc_code) | ||
612 | { | 594 | { |
613 | __u16 __user *location; | 595 | __u16 __user *location; |
614 | int signal = 0; | 596 | int signal = 0; |
615 | 597 | ||
616 | location = get_check_address(regs); | 598 | location = get_psw_address(regs, pgm_int_code); |
617 | |||
618 | /* | ||
619 | * We got all needed information from the lowcore and can | ||
620 | * now safely switch on interrupts. | ||
621 | */ | ||
622 | if (regs->psw.mask & PSW_MASK_PSTATE) | ||
623 | local_irq_enable(); | ||
624 | 599 | ||
625 | if (MACHINE_HAS_IEEE) | 600 | if (MACHINE_HAS_IEEE) |
626 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); | 601 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); |
@@ -686,19 +661,19 @@ static void data_exception(struct pt_regs * regs, long interruption_code) | |||
686 | signal = SIGILL; | 661 | signal = SIGILL; |
687 | if (signal == SIGFPE) | 662 | if (signal == SIGFPE) |
688 | do_fp_trap(regs, location, | 663 | do_fp_trap(regs, location, |
689 | current->thread.fp_regs.fpc, interruption_code); | 664 | current->thread.fp_regs.fpc, pgm_int_code); |
690 | else if (signal) { | 665 | else if (signal) { |
691 | siginfo_t info; | 666 | siginfo_t info; |
692 | info.si_signo = signal; | 667 | info.si_signo = signal; |
693 | info.si_errno = 0; | 668 | info.si_errno = 0; |
694 | info.si_code = ILL_ILLOPN; | 669 | info.si_code = ILL_ILLOPN; |
695 | info.si_addr = location; | 670 | info.si_addr = location; |
696 | do_trap(interruption_code, signal, | 671 | do_trap(pgm_int_code, signal, "data exception", regs, &info); |
697 | "data exception", regs, &info); | ||
698 | } | 672 | } |
699 | } | 673 | } |
700 | 674 | ||
701 | static void space_switch_exception(struct pt_regs * regs, long int_code) | 675 | static void space_switch_exception(struct pt_regs *regs, long pgm_int_code, |
676 | unsigned long trans_exc_code) | ||
702 | { | 677 | { |
703 | siginfo_t info; | 678 | siginfo_t info; |
704 | 679 | ||
@@ -709,8 +684,8 @@ static void space_switch_exception(struct pt_regs * regs, long int_code) | |||
709 | info.si_signo = SIGILL; | 684 | info.si_signo = SIGILL; |
710 | info.si_errno = 0; | 685 | info.si_errno = 0; |
711 | info.si_code = ILL_PRVOPC; | 686 | info.si_code = ILL_PRVOPC; |
712 | info.si_addr = get_check_address(regs); | 687 | info.si_addr = get_psw_address(regs, pgm_int_code); |
713 | do_trap(int_code, SIGILL, "space switch event", regs, &info); | 688 | do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); |
714 | } | 689 | } |
715 | 690 | ||
716 | asmlinkage void kernel_stack_overflow(struct pt_regs * regs) | 691 | asmlinkage void kernel_stack_overflow(struct pt_regs * regs) |
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 6b83870507d5..e3150dd2fe74 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c | |||
@@ -84,11 +84,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data; | |||
84 | */ | 84 | */ |
85 | static void vdso_init_data(struct vdso_data *vd) | 85 | static void vdso_init_data(struct vdso_data *vd) |
86 | { | 86 | { |
87 | unsigned int facility_list; | 87 | vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31); |
88 | |||
89 | facility_list = stfl(); | ||
90 | vd->ectg_available = | ||
91 | user_mode != HOME_SPACE_MODE && (facility_list & 1); | ||
92 | } | 88 | } |
93 | 89 | ||
94 | #ifdef CONFIG_64BIT | 90 | #ifdef CONFIG_64BIT |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 3479f1b0d4e0..56c8687b29b3 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -314,7 +314,8 @@ static void do_callbacks(struct list_head *cb_list) | |||
314 | /* | 314 | /* |
315 | * Handler for the virtual CPU timer. | 315 | * Handler for the virtual CPU timer. |
316 | */ | 316 | */ |
317 | static void do_cpu_timer_interrupt(__u16 error_code) | 317 | static void do_cpu_timer_interrupt(unsigned int ext_int_code, |
318 | unsigned int param32, unsigned long param64) | ||
318 | { | 319 | { |
319 | struct vtimer_queue *vq; | 320 | struct vtimer_queue *vq; |
320 | struct vtimer_list *event, *tmp; | 321 | struct vtimer_list *event, *tmp; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 4fe68650535c..985d825494f1 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -740,8 +740,8 @@ static int __init kvm_s390_init(void) | |||
740 | kvm_exit(); | 740 | kvm_exit(); |
741 | return -ENOMEM; | 741 | return -ENOMEM; |
742 | } | 742 | } |
743 | stfle(facilities, 1); | 743 | memcpy(facilities, S390_lowcore.stfle_fac_list, 16); |
744 | facilities[0] &= 0xff00fff3f0700000ULL; | 744 | facilities[0] &= 0xff00fff3f47c0000ULL; |
745 | return 0; | 745 | return 0; |
746 | } | 746 | } |
747 | 747 | ||
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 44205507717c..9194a4b52b22 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -154,12 +154,12 @@ static int handle_chsc(struct kvm_vcpu *vcpu) | |||
154 | 154 | ||
155 | static int handle_stfl(struct kvm_vcpu *vcpu) | 155 | static int handle_stfl(struct kvm_vcpu *vcpu) |
156 | { | 156 | { |
157 | unsigned int facility_list = stfl(); | 157 | unsigned int facility_list; |
158 | int rc; | 158 | int rc; |
159 | 159 | ||
160 | vcpu->stat.instruction_stfl++; | 160 | vcpu->stat.instruction_stfl++; |
161 | /* only pass the facility bits, which we can handle */ | 161 | /* only pass the facility bits, which we can handle */ |
162 | facility_list &= 0xff00fff3; | 162 | facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3; |
163 | 163 | ||
164 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), | 164 | rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), |
165 | &facility_list, sizeof(facility_list)); | 165 | &facility_list, sizeof(facility_list)); |
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index eec054484419..6fbc6f3fbdf2 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile | |||
@@ -3,6 +3,6 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ | 5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ |
6 | page-states.o | 6 | page-states.o gup.o |
7 | obj-$(CONFIG_CMM) += cmm.o | 7 | obj-$(CONFIG_CMM) += cmm.o |
8 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 8 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index a9550dca3e4b..c66ffd8dbbb7 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c | |||
@@ -23,7 +23,10 @@ | |||
23 | #include <asm/pgalloc.h> | 23 | #include <asm/pgalloc.h> |
24 | #include <asm/diag.h> | 24 | #include <asm/diag.h> |
25 | 25 | ||
26 | static char *sender = "VMRMSVM"; | 26 | #ifdef CONFIG_CMM_IUCV |
27 | static char *cmm_default_sender = "VMRMSVM"; | ||
28 | #endif | ||
29 | static char *sender; | ||
27 | module_param(sender, charp, 0400); | 30 | module_param(sender, charp, 0400); |
28 | MODULE_PARM_DESC(sender, | 31 | MODULE_PARM_DESC(sender, |
29 | "Guest name that may send SMSG messages (default VMRMSVM)"); | 32 | "Guest name that may send SMSG messages (default VMRMSVM)"); |
@@ -440,6 +443,8 @@ static int __init cmm_init(void) | |||
440 | int len = strlen(sender); | 443 | int len = strlen(sender); |
441 | while (len--) | 444 | while (len--) |
442 | sender[len] = toupper(sender[len]); | 445 | sender[len] = toupper(sender[len]); |
446 | } else { | ||
447 | sender = cmm_default_sender; | ||
443 | } | 448 | } |
444 | 449 | ||
445 | rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); | 450 | rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 2505b2ea0ef1..fe5701e9efbf 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -52,6 +52,14 @@ | |||
52 | #define VM_FAULT_BADMAP 0x020000 | 52 | #define VM_FAULT_BADMAP 0x020000 |
53 | #define VM_FAULT_BADACCESS 0x040000 | 53 | #define VM_FAULT_BADACCESS 0x040000 |
54 | 54 | ||
55 | static unsigned long store_indication; | ||
56 | |||
57 | void fault_init(void) | ||
58 | { | ||
59 | if (test_facility(2) && test_facility(75)) | ||
60 | store_indication = 0xc00; | ||
61 | } | ||
62 | |||
55 | static inline int notify_page_fault(struct pt_regs *regs) | 63 | static inline int notify_page_fault(struct pt_regs *regs) |
56 | { | 64 | { |
57 | int ret = 0; | 65 | int ret = 0; |
@@ -199,14 +207,21 @@ static noinline void do_sigbus(struct pt_regs *regs, long int_code, | |||
199 | unsigned long trans_exc_code) | 207 | unsigned long trans_exc_code) |
200 | { | 208 | { |
201 | struct task_struct *tsk = current; | 209 | struct task_struct *tsk = current; |
210 | unsigned long address; | ||
211 | struct siginfo si; | ||
202 | 212 | ||
203 | /* | 213 | /* |
204 | * Send a sigbus, regardless of whether we were in kernel | 214 | * Send a sigbus, regardless of whether we were in kernel |
205 | * or user mode. | 215 | * or user mode. |
206 | */ | 216 | */ |
207 | tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK; | 217 | address = trans_exc_code & __FAIL_ADDR_MASK; |
218 | tsk->thread.prot_addr = address; | ||
208 | tsk->thread.trap_no = int_code; | 219 | tsk->thread.trap_no = int_code; |
209 | force_sig(SIGBUS, tsk); | 220 | si.si_signo = SIGBUS; |
221 | si.si_errno = 0; | ||
222 | si.si_code = BUS_ADRERR; | ||
223 | si.si_addr = (void __user *) address; | ||
224 | force_sig_info(SIGBUS, &si, tsk); | ||
210 | } | 225 | } |
211 | 226 | ||
212 | #ifdef CONFIG_S390_EXEC_PROTECT | 227 | #ifdef CONFIG_S390_EXEC_PROTECT |
@@ -266,10 +281,11 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code, | |||
266 | if (fault & VM_FAULT_OOM) | 281 | if (fault & VM_FAULT_OOM) |
267 | pagefault_out_of_memory(); | 282 | pagefault_out_of_memory(); |
268 | else if (fault & VM_FAULT_SIGBUS) { | 283 | else if (fault & VM_FAULT_SIGBUS) { |
269 | do_sigbus(regs, int_code, trans_exc_code); | ||
270 | /* Kernel mode? Handle exceptions or die */ | 284 | /* Kernel mode? Handle exceptions or die */ |
271 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 285 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) |
272 | do_no_context(regs, int_code, trans_exc_code); | 286 | do_no_context(regs, int_code, trans_exc_code); |
287 | else | ||
288 | do_sigbus(regs, int_code, trans_exc_code); | ||
273 | } else | 289 | } else |
274 | BUG(); | 290 | BUG(); |
275 | break; | 291 | break; |
@@ -294,7 +310,7 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
294 | struct mm_struct *mm; | 310 | struct mm_struct *mm; |
295 | struct vm_area_struct *vma; | 311 | struct vm_area_struct *vma; |
296 | unsigned long address; | 312 | unsigned long address; |
297 | int fault; | 313 | int fault, write; |
298 | 314 | ||
299 | if (notify_page_fault(regs)) | 315 | if (notify_page_fault(regs)) |
300 | return 0; | 316 | return 0; |
@@ -312,12 +328,6 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
312 | goto out; | 328 | goto out; |
313 | 329 | ||
314 | address = trans_exc_code & __FAIL_ADDR_MASK; | 330 | address = trans_exc_code & __FAIL_ADDR_MASK; |
315 | /* | ||
316 | * When we get here, the fault happened in the current | ||
317 | * task's user address space, so we can switch on the | ||
318 | * interrupts again and then search the VMAs | ||
319 | */ | ||
320 | local_irq_enable(); | ||
321 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 331 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); |
322 | down_read(&mm->mmap_sem); | 332 | down_read(&mm->mmap_sem); |
323 | 333 | ||
@@ -348,8 +358,10 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
348 | * make sure we exit gracefully rather than endlessly redo | 358 | * make sure we exit gracefully rather than endlessly redo |
349 | * the fault. | 359 | * the fault. |
350 | */ | 360 | */ |
351 | fault = handle_mm_fault(mm, vma, address, | 361 | write = (access == VM_WRITE || |
352 | (access == VM_WRITE) ? FAULT_FLAG_WRITE : 0); | 362 | (trans_exc_code & store_indication) == 0x400) ? |
363 | FAULT_FLAG_WRITE : 0; | ||
364 | fault = handle_mm_fault(mm, vma, address, write); | ||
353 | if (unlikely(fault & VM_FAULT_ERROR)) | 365 | if (unlikely(fault & VM_FAULT_ERROR)) |
354 | goto out_up; | 366 | goto out_up; |
355 | 367 | ||
@@ -374,20 +386,20 @@ out: | |||
374 | return fault; | 386 | return fault; |
375 | } | 387 | } |
376 | 388 | ||
377 | void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) | 389 | void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code, |
390 | unsigned long trans_exc_code) | ||
378 | { | 391 | { |
379 | unsigned long trans_exc_code = S390_lowcore.trans_exc_code; | ||
380 | int fault; | 392 | int fault; |
381 | 393 | ||
382 | /* Protection exception is supressing, decrement psw address. */ | 394 | /* Protection exception is supressing, decrement psw address. */ |
383 | regs->psw.addr -= (int_code >> 16); | 395 | regs->psw.addr -= (pgm_int_code >> 16); |
384 | /* | 396 | /* |
385 | * Check for low-address protection. This needs to be treated | 397 | * Check for low-address protection. This needs to be treated |
386 | * as a special case because the translation exception code | 398 | * as a special case because the translation exception code |
387 | * field is not guaranteed to contain valid data in this case. | 399 | * field is not guaranteed to contain valid data in this case. |
388 | */ | 400 | */ |
389 | if (unlikely(!(trans_exc_code & 4))) { | 401 | if (unlikely(!(trans_exc_code & 4))) { |
390 | do_low_address(regs, int_code, trans_exc_code); | 402 | do_low_address(regs, pgm_int_code, trans_exc_code); |
391 | return; | 403 | return; |
392 | } | 404 | } |
393 | fault = do_exception(regs, VM_WRITE, trans_exc_code); | 405 | fault = do_exception(regs, VM_WRITE, trans_exc_code); |
@@ -395,9 +407,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs, long int_code) | |||
395 | do_fault_error(regs, 4, trans_exc_code, fault); | 407 | do_fault_error(regs, 4, trans_exc_code, fault); |
396 | } | 408 | } |
397 | 409 | ||
398 | void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) | 410 | void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code, |
411 | unsigned long trans_exc_code) | ||
399 | { | 412 | { |
400 | unsigned long trans_exc_code = S390_lowcore.trans_exc_code; | ||
401 | int access, fault; | 413 | int access, fault; |
402 | 414 | ||
403 | access = VM_READ | VM_EXEC | VM_WRITE; | 415 | access = VM_READ | VM_EXEC | VM_WRITE; |
@@ -408,21 +420,19 @@ void __kprobes do_dat_exception(struct pt_regs *regs, long int_code) | |||
408 | #endif | 420 | #endif |
409 | fault = do_exception(regs, access, trans_exc_code); | 421 | fault = do_exception(regs, access, trans_exc_code); |
410 | if (unlikely(fault)) | 422 | if (unlikely(fault)) |
411 | do_fault_error(regs, int_code & 255, trans_exc_code, fault); | 423 | do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); |
412 | } | 424 | } |
413 | 425 | ||
414 | #ifdef CONFIG_64BIT | 426 | #ifdef CONFIG_64BIT |
415 | void __kprobes do_asce_exception(struct pt_regs *regs, long int_code) | 427 | void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code, |
428 | unsigned long trans_exc_code) | ||
416 | { | 429 | { |
417 | unsigned long trans_exc_code = S390_lowcore.trans_exc_code; | ||
418 | struct mm_struct *mm = current->mm; | 430 | struct mm_struct *mm = current->mm; |
419 | struct vm_area_struct *vma; | 431 | struct vm_area_struct *vma; |
420 | 432 | ||
421 | if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) | 433 | if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) |
422 | goto no_context; | 434 | goto no_context; |
423 | 435 | ||
424 | local_irq_enable(); | ||
425 | |||
426 | down_read(&mm->mmap_sem); | 436 | down_read(&mm->mmap_sem); |
427 | vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); | 437 | vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); |
428 | up_read(&mm->mmap_sem); | 438 | up_read(&mm->mmap_sem); |
@@ -434,16 +444,16 @@ void __kprobes do_asce_exception(struct pt_regs *regs, long int_code) | |||
434 | 444 | ||
435 | /* User mode accesses just cause a SIGSEGV */ | 445 | /* User mode accesses just cause a SIGSEGV */ |
436 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 446 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
437 | do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code); | 447 | do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code); |
438 | return; | 448 | return; |
439 | } | 449 | } |
440 | 450 | ||
441 | no_context: | 451 | no_context: |
442 | do_no_context(regs, int_code, trans_exc_code); | 452 | do_no_context(regs, pgm_int_code, trans_exc_code); |
443 | } | 453 | } |
444 | #endif | 454 | #endif |
445 | 455 | ||
446 | int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user) | 456 | int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) |
447 | { | 457 | { |
448 | struct pt_regs regs; | 458 | struct pt_regs regs; |
449 | int access, fault; | 459 | int access, fault; |
@@ -454,14 +464,14 @@ int __handle_fault(unsigned long uaddr, unsigned long int_code, int write_user) | |||
454 | regs.psw.addr = (unsigned long) __builtin_return_address(0); | 464 | regs.psw.addr = (unsigned long) __builtin_return_address(0); |
455 | regs.psw.addr |= PSW_ADDR_AMODE; | 465 | regs.psw.addr |= PSW_ADDR_AMODE; |
456 | uaddr &= PAGE_MASK; | 466 | uaddr &= PAGE_MASK; |
457 | access = write_user ? VM_WRITE : VM_READ; | 467 | access = write ? VM_WRITE : VM_READ; |
458 | fault = do_exception(®s, access, uaddr | 2); | 468 | fault = do_exception(®s, access, uaddr | 2); |
459 | if (unlikely(fault)) { | 469 | if (unlikely(fault)) { |
460 | if (fault & VM_FAULT_OOM) { | 470 | if (fault & VM_FAULT_OOM) { |
461 | pagefault_out_of_memory(); | 471 | pagefault_out_of_memory(); |
462 | fault = 0; | 472 | fault = 0; |
463 | } else if (fault & VM_FAULT_SIGBUS) | 473 | } else if (fault & VM_FAULT_SIGBUS) |
464 | do_sigbus(®s, int_code, uaddr); | 474 | do_sigbus(®s, pgm_int_code, uaddr); |
465 | } | 475 | } |
466 | return fault ? -EFAULT : 0; | 476 | return fault ? -EFAULT : 0; |
467 | } | 477 | } |
@@ -527,7 +537,8 @@ void pfault_fini(void) | |||
527 | : : "a" (&refbk), "m" (refbk) : "cc"); | 537 | : : "a" (&refbk), "m" (refbk) : "cc"); |
528 | } | 538 | } |
529 | 539 | ||
530 | static void pfault_interrupt(__u16 int_code) | 540 | static void pfault_interrupt(unsigned int ext_int_code, |
541 | unsigned int param32, unsigned long param64) | ||
531 | { | 542 | { |
532 | struct task_struct *tsk; | 543 | struct task_struct *tsk; |
533 | __u16 subcode; | 544 | __u16 subcode; |
@@ -538,14 +549,18 @@ static void pfault_interrupt(__u16 int_code) | |||
538 | * in the 'cpu address' field associated with the | 549 | * in the 'cpu address' field associated with the |
539 | * external interrupt. | 550 | * external interrupt. |
540 | */ | 551 | */ |
541 | subcode = S390_lowcore.cpu_addr; | 552 | subcode = ext_int_code >> 16; |
542 | if ((subcode & 0xff00) != __SUBCODE_MASK) | 553 | if ((subcode & 0xff00) != __SUBCODE_MASK) |
543 | return; | 554 | return; |
544 | 555 | ||
545 | /* | 556 | /* |
546 | * Get the token (= address of the task structure of the affected task). | 557 | * Get the token (= address of the task structure of the affected task). |
547 | */ | 558 | */ |
548 | tsk = *(struct task_struct **) __LC_PFAULT_INTPARM; | 559 | #ifdef CONFIG_64BIT |
560 | tsk = *(struct task_struct **) param64; | ||
561 | #else | ||
562 | tsk = *(struct task_struct **) param32; | ||
563 | #endif | ||
549 | 564 | ||
550 | if (subcode & 0x0080) { | 565 | if (subcode & 0x0080) { |
551 | /* signal bit is set -> a page has been swapped in by VM */ | 566 | /* signal bit is set -> a page has been swapped in by VM */ |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c new file mode 100644 index 000000000000..38e641cdd977 --- /dev/null +++ b/arch/s390/mm/gup.c | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * Lockless get_user_pages_fast for s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2010 | ||
5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
6 | */ | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/mm.h> | ||
9 | #include <linux/hugetlb.h> | ||
10 | #include <linux/vmstat.h> | ||
11 | #include <linux/pagemap.h> | ||
12 | #include <linux/rwsem.h> | ||
13 | #include <asm/pgtable.h> | ||
14 | |||
15 | /* | ||
16 | * The performance critical leaf functions are made noinline otherwise gcc | ||
17 | * inlines everything into a single function which results in too much | ||
18 | * register pressure. | ||
19 | */ | ||
20 | static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | ||
21 | unsigned long end, int write, struct page **pages, int *nr) | ||
22 | { | ||
23 | unsigned long mask, result; | ||
24 | pte_t *ptep, pte; | ||
25 | struct page *page; | ||
26 | |||
27 | result = write ? 0 : _PAGE_RO; | ||
28 | mask = result | _PAGE_INVALID | _PAGE_SPECIAL; | ||
29 | |||
30 | ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); | ||
31 | do { | ||
32 | pte = *ptep; | ||
33 | barrier(); | ||
34 | if ((pte_val(pte) & mask) != result) | ||
35 | return 0; | ||
36 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | ||
37 | page = pte_page(pte); | ||
38 | if (!page_cache_get_speculative(page)) | ||
39 | return 0; | ||
40 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { | ||
41 | put_page(page); | ||
42 | return 0; | ||
43 | } | ||
44 | pages[*nr] = page; | ||
45 | (*nr)++; | ||
46 | |||
47 | } while (ptep++, addr += PAGE_SIZE, addr != end); | ||
48 | |||
49 | return 1; | ||
50 | } | ||
51 | |||
52 | static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | ||
53 | unsigned long end, int write, struct page **pages, int *nr) | ||
54 | { | ||
55 | unsigned long mask, result; | ||
56 | struct page *head, *page; | ||
57 | int refs; | ||
58 | |||
59 | result = write ? 0 : _SEGMENT_ENTRY_RO; | ||
60 | mask = result | _SEGMENT_ENTRY_INV; | ||
61 | if ((pmd_val(pmd) & mask) != result) | ||
62 | return 0; | ||
63 | VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); | ||
64 | |||
65 | refs = 0; | ||
66 | head = pmd_page(pmd); | ||
67 | page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); | ||
68 | do { | ||
69 | VM_BUG_ON(compound_head(page) != head); | ||
70 | pages[*nr] = page; | ||
71 | (*nr)++; | ||
72 | page++; | ||
73 | refs++; | ||
74 | } while (addr += PAGE_SIZE, addr != end); | ||
75 | |||
76 | if (!page_cache_add_speculative(head, refs)) { | ||
77 | *nr -= refs; | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) { | ||
82 | *nr -= refs; | ||
83 | while (refs--) | ||
84 | put_page(head); | ||
85 | } | ||
86 | |||
87 | return 1; | ||
88 | } | ||
89 | |||
90 | |||
91 | static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, | ||
92 | unsigned long end, int write, struct page **pages, int *nr) | ||
93 | { | ||
94 | unsigned long next; | ||
95 | pmd_t *pmdp, pmd; | ||
96 | |||
97 | pmdp = (pmd_t *) pudp; | ||
98 | #ifdef CONFIG_64BIT | ||
99 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) | ||
100 | pmdp = (pmd_t *) pud_deref(pud); | ||
101 | pmdp += pmd_index(addr); | ||
102 | #endif | ||
103 | do { | ||
104 | pmd = *pmdp; | ||
105 | barrier(); | ||
106 | next = pmd_addr_end(addr, end); | ||
107 | if (pmd_none(pmd)) | ||
108 | return 0; | ||
109 | if (unlikely(pmd_huge(pmd))) { | ||
110 | if (!gup_huge_pmd(pmdp, pmd, addr, next, | ||
111 | write, pages, nr)) | ||
112 | return 0; | ||
113 | } else if (!gup_pte_range(pmdp, pmd, addr, next, | ||
114 | write, pages, nr)) | ||
115 | return 0; | ||
116 | } while (pmdp++, addr = next, addr != end); | ||
117 | |||
118 | return 1; | ||
119 | } | ||
120 | |||
121 | static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, | ||
122 | unsigned long end, int write, struct page **pages, int *nr) | ||
123 | { | ||
124 | unsigned long next; | ||
125 | pud_t *pudp, pud; | ||
126 | |||
127 | pudp = (pud_t *) pgdp; | ||
128 | #ifdef CONFIG_64BIT | ||
129 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) | ||
130 | pudp = (pud_t *) pgd_deref(pgd); | ||
131 | pudp += pud_index(addr); | ||
132 | #endif | ||
133 | do { | ||
134 | pud = *pudp; | ||
135 | barrier(); | ||
136 | next = pud_addr_end(addr, end); | ||
137 | if (pud_none(pud)) | ||
138 | return 0; | ||
139 | if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr)) | ||
140 | return 0; | ||
141 | } while (pudp++, addr = next, addr != end); | ||
142 | |||
143 | return 1; | ||
144 | } | ||
145 | |||
146 | /** | ||
147 | * get_user_pages_fast() - pin user pages in memory | ||
148 | * @start: starting user address | ||
149 | * @nr_pages: number of pages from start to pin | ||
150 | * @write: whether pages will be written to | ||
151 | * @pages: array that receives pointers to the pages pinned. | ||
152 | * Should be at least nr_pages long. | ||
153 | * | ||
154 | * Attempt to pin user pages in memory without taking mm->mmap_sem. | ||
155 | * If not successful, it will fall back to taking the lock and | ||
156 | * calling get_user_pages(). | ||
157 | * | ||
158 | * Returns number of pages pinned. This may be fewer than the number | ||
159 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | ||
160 | * were pinned, returns -errno. | ||
161 | */ | ||
162 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
163 | struct page **pages) | ||
164 | { | ||
165 | struct mm_struct *mm = current->mm; | ||
166 | unsigned long addr, len, end; | ||
167 | unsigned long next; | ||
168 | pgd_t *pgdp, pgd; | ||
169 | int nr = 0; | ||
170 | |||
171 | start &= PAGE_MASK; | ||
172 | addr = start; | ||
173 | len = (unsigned long) nr_pages << PAGE_SHIFT; | ||
174 | end = start + len; | ||
175 | if (end < start) | ||
176 | goto slow_irqon; | ||
177 | |||
178 | /* | ||
179 | * local_irq_disable() doesn't prevent pagetable teardown, but does | ||
180 | * prevent the pagetables from being freed on s390. | ||
181 | * | ||
182 | * So long as we atomically load page table pointers versus teardown, | ||
183 | * we can follow the address down to the the page and take a ref on it. | ||
184 | */ | ||
185 | local_irq_disable(); | ||
186 | pgdp = pgd_offset(mm, addr); | ||
187 | do { | ||
188 | pgd = *pgdp; | ||
189 | barrier(); | ||
190 | next = pgd_addr_end(addr, end); | ||
191 | if (pgd_none(pgd)) | ||
192 | goto slow; | ||
193 | if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr)) | ||
194 | goto slow; | ||
195 | } while (pgdp++, addr = next, addr != end); | ||
196 | local_irq_enable(); | ||
197 | |||
198 | VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); | ||
199 | return nr; | ||
200 | |||
201 | { | ||
202 | int ret; | ||
203 | slow: | ||
204 | local_irq_enable(); | ||
205 | slow_irqon: | ||
206 | /* Try to get the remaining pages with get_user_pages */ | ||
207 | start += nr << PAGE_SHIFT; | ||
208 | pages += nr; | ||
209 | |||
210 | down_read(&mm->mmap_sem); | ||
211 | ret = get_user_pages(current, mm, start, | ||
212 | (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); | ||
213 | up_read(&mm->mmap_sem); | ||
214 | |||
215 | /* Have to be a bit careful with return values */ | ||
216 | if (nr > 0) { | ||
217 | if (ret < 0) | ||
218 | ret = nr; | ||
219 | else | ||
220 | ret += nr; | ||
221 | } | ||
222 | |||
223 | return ret; | ||
224 | } | ||
225 | } | ||
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index f28c43d2f61d..639cd21f2218 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -68,7 +68,7 @@ void arch_release_hugepage(struct page *page) | |||
68 | ptep = (pte_t *) page[1].index; | 68 | ptep = (pte_t *) page[1].index; |
69 | if (!ptep) | 69 | if (!ptep) |
70 | return; | 70 | return; |
71 | pte_free(&init_mm, ptep); | 71 | page_table_free(&init_mm, (unsigned long *) ptep); |
72 | page[1].index = 0; | 72 | page[1].index = 0; |
73 | } | 73 | } |
74 | 74 | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 94b8ba2ec857..bb409332a484 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -38,13 +38,54 @@ | |||
38 | #include <asm/tlbflush.h> | 38 | #include <asm/tlbflush.h> |
39 | #include <asm/sections.h> | 39 | #include <asm/sections.h> |
40 | 40 | ||
41 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
42 | |||
43 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | 41 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); |
44 | 42 | ||
45 | char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | 43 | unsigned long empty_zero_page, zero_page_mask; |
46 | EXPORT_SYMBOL(empty_zero_page); | 44 | EXPORT_SYMBOL(empty_zero_page); |
47 | 45 | ||
46 | static unsigned long setup_zero_pages(void) | ||
47 | { | ||
48 | struct cpuid cpu_id; | ||
49 | unsigned int order; | ||
50 | unsigned long size; | ||
51 | struct page *page; | ||
52 | int i; | ||
53 | |||
54 | get_cpu_id(&cpu_id); | ||
55 | switch (cpu_id.machine) { | ||
56 | case 0x9672: /* g5 */ | ||
57 | case 0x2064: /* z900 */ | ||
58 | case 0x2066: /* z900 */ | ||
59 | case 0x2084: /* z990 */ | ||
60 | case 0x2086: /* z990 */ | ||
61 | case 0x2094: /* z9-109 */ | ||
62 | case 0x2096: /* z9-109 */ | ||
63 | order = 0; | ||
64 | break; | ||
65 | case 0x2097: /* z10 */ | ||
66 | case 0x2098: /* z10 */ | ||
67 | default: | ||
68 | order = 2; | ||
69 | break; | ||
70 | } | ||
71 | |||
72 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | ||
73 | if (!empty_zero_page) | ||
74 | panic("Out of memory in setup_zero_pages"); | ||
75 | |||
76 | page = virt_to_page((void *) empty_zero_page); | ||
77 | split_page(page, order); | ||
78 | for (i = 1 << order; i > 0; i--) { | ||
79 | SetPageReserved(page); | ||
80 | page++; | ||
81 | } | ||
82 | |||
83 | size = PAGE_SIZE << order; | ||
84 | zero_page_mask = (size - 1) & PAGE_MASK; | ||
85 | |||
86 | return 1UL << order; | ||
87 | } | ||
88 | |||
48 | /* | 89 | /* |
49 | * paging_init() sets up the page tables | 90 | * paging_init() sets up the page tables |
50 | */ | 91 | */ |
@@ -83,6 +124,7 @@ void __init paging_init(void) | |||
83 | #endif | 124 | #endif |
84 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 125 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
85 | free_area_init_nodes(max_zone_pfns); | 126 | free_area_init_nodes(max_zone_pfns); |
127 | fault_init(); | ||
86 | } | 128 | } |
87 | 129 | ||
88 | void __init mem_init(void) | 130 | void __init mem_init(void) |
@@ -92,14 +134,12 @@ void __init mem_init(void) | |||
92 | max_mapnr = num_physpages = max_low_pfn; | 134 | max_mapnr = num_physpages = max_low_pfn; |
93 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 135 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
94 | 136 | ||
95 | /* clear the zero-page */ | ||
96 | memset(empty_zero_page, 0, PAGE_SIZE); | ||
97 | |||
98 | /* Setup guest page hinting */ | 137 | /* Setup guest page hinting */ |
99 | cmma_init(); | 138 | cmma_init(); |
100 | 139 | ||
101 | /* this will put all low memory onto the freelists */ | 140 | /* this will put all low memory onto the freelists */ |
102 | totalram_pages += free_all_bootmem(); | 141 | totalram_pages += free_all_bootmem(); |
142 | totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ | ||
103 | 143 | ||
104 | reservedpages = 0; | 144 | reservedpages = 0; |
105 | 145 | ||
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 8d999249d357..0c719c61972e 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/quicklist.h> | 17 | #include <linux/quicklist.h> |
18 | #include <linux/rcupdate.h> | ||
18 | 19 | ||
19 | #include <asm/system.h> | 20 | #include <asm/system.h> |
20 | #include <asm/pgtable.h> | 21 | #include <asm/pgtable.h> |
@@ -23,6 +24,67 @@ | |||
23 | #include <asm/tlbflush.h> | 24 | #include <asm/tlbflush.h> |
24 | #include <asm/mmu_context.h> | 25 | #include <asm/mmu_context.h> |
25 | 26 | ||
27 | struct rcu_table_freelist { | ||
28 | struct rcu_head rcu; | ||
29 | struct mm_struct *mm; | ||
30 | unsigned int pgt_index; | ||
31 | unsigned int crst_index; | ||
32 | unsigned long *table[0]; | ||
33 | }; | ||
34 | |||
35 | #define RCU_FREELIST_SIZE \ | ||
36 | ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \ | ||
37 | / sizeof(unsigned long)) | ||
38 | |||
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
40 | static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); | ||
41 | |||
42 | static void __page_table_free(struct mm_struct *mm, unsigned long *table); | ||
43 | static void __crst_table_free(struct mm_struct *mm, unsigned long *table); | ||
44 | |||
45 | static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) | ||
46 | { | ||
47 | struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist); | ||
48 | struct rcu_table_freelist *batch = *batchp; | ||
49 | |||
50 | if (batch) | ||
51 | return batch; | ||
52 | batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC); | ||
53 | if (batch) { | ||
54 | batch->mm = mm; | ||
55 | batch->pgt_index = 0; | ||
56 | batch->crst_index = RCU_FREELIST_SIZE; | ||
57 | *batchp = batch; | ||
58 | } | ||
59 | return batch; | ||
60 | } | ||
61 | |||
62 | static void rcu_table_freelist_callback(struct rcu_head *head) | ||
63 | { | ||
64 | struct rcu_table_freelist *batch = | ||
65 | container_of(head, struct rcu_table_freelist, rcu); | ||
66 | |||
67 | while (batch->pgt_index > 0) | ||
68 | __page_table_free(batch->mm, batch->table[--batch->pgt_index]); | ||
69 | while (batch->crst_index < RCU_FREELIST_SIZE) | ||
70 | __crst_table_free(batch->mm, batch->table[batch->crst_index++]); | ||
71 | free_page((unsigned long) batch); | ||
72 | } | ||
73 | |||
74 | void rcu_table_freelist_finish(void) | ||
75 | { | ||
76 | struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist); | ||
77 | |||
78 | if (!batch) | ||
79 | return; | ||
80 | call_rcu(&batch->rcu, rcu_table_freelist_callback); | ||
81 | __get_cpu_var(rcu_table_freelist) = NULL; | ||
82 | } | ||
83 | |||
84 | static void smp_sync(void *arg) | ||
85 | { | ||
86 | } | ||
87 | |||
26 | #ifndef CONFIG_64BIT | 88 | #ifndef CONFIG_64BIT |
27 | #define ALLOC_ORDER 1 | 89 | #define ALLOC_ORDER 1 |
28 | #define TABLES_PER_PAGE 4 | 90 | #define TABLES_PER_PAGE 4 |
@@ -78,25 +140,55 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | |||
78 | } | 140 | } |
79 | page->index = page_to_phys(shadow); | 141 | page->index = page_to_phys(shadow); |
80 | } | 142 | } |
81 | spin_lock(&mm->context.list_lock); | 143 | spin_lock_bh(&mm->context.list_lock); |
82 | list_add(&page->lru, &mm->context.crst_list); | 144 | list_add(&page->lru, &mm->context.crst_list); |
83 | spin_unlock(&mm->context.list_lock); | 145 | spin_unlock_bh(&mm->context.list_lock); |
84 | return (unsigned long *) page_to_phys(page); | 146 | return (unsigned long *) page_to_phys(page); |
85 | } | 147 | } |
86 | 148 | ||
87 | void crst_table_free(struct mm_struct *mm, unsigned long *table) | 149 | static void __crst_table_free(struct mm_struct *mm, unsigned long *table) |
88 | { | 150 | { |
89 | unsigned long *shadow = get_shadow_table(table); | 151 | unsigned long *shadow = get_shadow_table(table); |
90 | struct page *page = virt_to_page(table); | ||
91 | 152 | ||
92 | spin_lock(&mm->context.list_lock); | ||
93 | list_del(&page->lru); | ||
94 | spin_unlock(&mm->context.list_lock); | ||
95 | if (shadow) | 153 | if (shadow) |
96 | free_pages((unsigned long) shadow, ALLOC_ORDER); | 154 | free_pages((unsigned long) shadow, ALLOC_ORDER); |
97 | free_pages((unsigned long) table, ALLOC_ORDER); | 155 | free_pages((unsigned long) table, ALLOC_ORDER); |
98 | } | 156 | } |
99 | 157 | ||
158 | void crst_table_free(struct mm_struct *mm, unsigned long *table) | ||
159 | { | ||
160 | struct page *page = virt_to_page(table); | ||
161 | |||
162 | spin_lock_bh(&mm->context.list_lock); | ||
163 | list_del(&page->lru); | ||
164 | spin_unlock_bh(&mm->context.list_lock); | ||
165 | __crst_table_free(mm, table); | ||
166 | } | ||
167 | |||
168 | void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) | ||
169 | { | ||
170 | struct rcu_table_freelist *batch; | ||
171 | struct page *page = virt_to_page(table); | ||
172 | |||
173 | spin_lock_bh(&mm->context.list_lock); | ||
174 | list_del(&page->lru); | ||
175 | spin_unlock_bh(&mm->context.list_lock); | ||
176 | if (atomic_read(&mm->mm_users) < 2 && | ||
177 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { | ||
178 | __crst_table_free(mm, table); | ||
179 | return; | ||
180 | } | ||
181 | batch = rcu_table_freelist_get(mm); | ||
182 | if (!batch) { | ||
183 | smp_call_function(smp_sync, NULL, 1); | ||
184 | __crst_table_free(mm, table); | ||
185 | return; | ||
186 | } | ||
187 | batch->table[--batch->crst_index] = table; | ||
188 | if (batch->pgt_index >= batch->crst_index) | ||
189 | rcu_table_freelist_finish(); | ||
190 | } | ||
191 | |||
100 | #ifdef CONFIG_64BIT | 192 | #ifdef CONFIG_64BIT |
101 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) | 193 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) |
102 | { | 194 | { |
@@ -108,7 +200,7 @@ repeat: | |||
108 | table = crst_table_alloc(mm, mm->context.noexec); | 200 | table = crst_table_alloc(mm, mm->context.noexec); |
109 | if (!table) | 201 | if (!table) |
110 | return -ENOMEM; | 202 | return -ENOMEM; |
111 | spin_lock(&mm->page_table_lock); | 203 | spin_lock_bh(&mm->page_table_lock); |
112 | if (mm->context.asce_limit < limit) { | 204 | if (mm->context.asce_limit < limit) { |
113 | pgd = (unsigned long *) mm->pgd; | 205 | pgd = (unsigned long *) mm->pgd; |
114 | if (mm->context.asce_limit <= (1UL << 31)) { | 206 | if (mm->context.asce_limit <= (1UL << 31)) { |
@@ -130,7 +222,7 @@ repeat: | |||
130 | mm->task_size = mm->context.asce_limit; | 222 | mm->task_size = mm->context.asce_limit; |
131 | table = NULL; | 223 | table = NULL; |
132 | } | 224 | } |
133 | spin_unlock(&mm->page_table_lock); | 225 | spin_unlock_bh(&mm->page_table_lock); |
134 | if (table) | 226 | if (table) |
135 | crst_table_free(mm, table); | 227 | crst_table_free(mm, table); |
136 | if (mm->context.asce_limit < limit) | 228 | if (mm->context.asce_limit < limit) |
@@ -182,7 +274,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
182 | unsigned long bits; | 274 | unsigned long bits; |
183 | 275 | ||
184 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 276 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; |
185 | spin_lock(&mm->context.list_lock); | 277 | spin_lock_bh(&mm->context.list_lock); |
186 | page = NULL; | 278 | page = NULL; |
187 | if (!list_empty(&mm->context.pgtable_list)) { | 279 | if (!list_empty(&mm->context.pgtable_list)) { |
188 | page = list_first_entry(&mm->context.pgtable_list, | 280 | page = list_first_entry(&mm->context.pgtable_list, |
@@ -191,7 +283,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
191 | page = NULL; | 283 | page = NULL; |
192 | } | 284 | } |
193 | if (!page) { | 285 | if (!page) { |
194 | spin_unlock(&mm->context.list_lock); | 286 | spin_unlock_bh(&mm->context.list_lock); |
195 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | 287 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
196 | if (!page) | 288 | if (!page) |
197 | return NULL; | 289 | return NULL; |
@@ -202,7 +294,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
202 | clear_table_pgstes(table); | 294 | clear_table_pgstes(table); |
203 | else | 295 | else |
204 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 296 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
205 | spin_lock(&mm->context.list_lock); | 297 | spin_lock_bh(&mm->context.list_lock); |
206 | list_add(&page->lru, &mm->context.pgtable_list); | 298 | list_add(&page->lru, &mm->context.pgtable_list); |
207 | } | 299 | } |
208 | table = (unsigned long *) page_to_phys(page); | 300 | table = (unsigned long *) page_to_phys(page); |
@@ -213,10 +305,25 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
213 | page->flags |= bits; | 305 | page->flags |= bits; |
214 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | 306 | if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) |
215 | list_move_tail(&page->lru, &mm->context.pgtable_list); | 307 | list_move_tail(&page->lru, &mm->context.pgtable_list); |
216 | spin_unlock(&mm->context.list_lock); | 308 | spin_unlock_bh(&mm->context.list_lock); |
217 | return table; | 309 | return table; |
218 | } | 310 | } |
219 | 311 | ||
312 | static void __page_table_free(struct mm_struct *mm, unsigned long *table) | ||
313 | { | ||
314 | struct page *page; | ||
315 | unsigned long bits; | ||
316 | |||
317 | bits = ((unsigned long) table) & 15; | ||
318 | table = (unsigned long *)(((unsigned long) table) ^ bits); | ||
319 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | ||
320 | page->flags ^= bits; | ||
321 | if (!(page->flags & FRAG_MASK)) { | ||
322 | pgtable_page_dtor(page); | ||
323 | __free_page(page); | ||
324 | } | ||
325 | } | ||
326 | |||
220 | void page_table_free(struct mm_struct *mm, unsigned long *table) | 327 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
221 | { | 328 | { |
222 | struct page *page; | 329 | struct page *page; |
@@ -225,7 +332,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
225 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 332 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; |
226 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); | 333 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); |
227 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 334 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
228 | spin_lock(&mm->context.list_lock); | 335 | spin_lock_bh(&mm->context.list_lock); |
229 | page->flags ^= bits; | 336 | page->flags ^= bits; |
230 | if (page->flags & FRAG_MASK) { | 337 | if (page->flags & FRAG_MASK) { |
231 | /* Page now has some free pgtable fragments. */ | 338 | /* Page now has some free pgtable fragments. */ |
@@ -234,18 +341,48 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) | |||
234 | } else | 341 | } else |
235 | /* All fragments of the 4K page have been freed. */ | 342 | /* All fragments of the 4K page have been freed. */ |
236 | list_del(&page->lru); | 343 | list_del(&page->lru); |
237 | spin_unlock(&mm->context.list_lock); | 344 | spin_unlock_bh(&mm->context.list_lock); |
238 | if (page) { | 345 | if (page) { |
239 | pgtable_page_dtor(page); | 346 | pgtable_page_dtor(page); |
240 | __free_page(page); | 347 | __free_page(page); |
241 | } | 348 | } |
242 | } | 349 | } |
243 | 350 | ||
351 | void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) | ||
352 | { | ||
353 | struct rcu_table_freelist *batch; | ||
354 | struct page *page; | ||
355 | unsigned long bits; | ||
356 | |||
357 | if (atomic_read(&mm->mm_users) < 2 && | ||
358 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { | ||
359 | page_table_free(mm, table); | ||
360 | return; | ||
361 | } | ||
362 | batch = rcu_table_freelist_get(mm); | ||
363 | if (!batch) { | ||
364 | smp_call_function(smp_sync, NULL, 1); | ||
365 | page_table_free(mm, table); | ||
366 | return; | ||
367 | } | ||
368 | bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | ||
369 | bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); | ||
370 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | ||
371 | spin_lock_bh(&mm->context.list_lock); | ||
372 | /* Delayed freeing with rcu prevents reuse of pgtable fragments */ | ||
373 | list_del_init(&page->lru); | ||
374 | spin_unlock_bh(&mm->context.list_lock); | ||
375 | table = (unsigned long *)(((unsigned long) table) | bits); | ||
376 | batch->table[batch->pgt_index++] = table; | ||
377 | if (batch->pgt_index >= batch->crst_index) | ||
378 | rcu_table_freelist_finish(); | ||
379 | } | ||
380 | |||
244 | void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | 381 | void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) |
245 | { | 382 | { |
246 | struct page *page; | 383 | struct page *page; |
247 | 384 | ||
248 | spin_lock(&mm->context.list_lock); | 385 | spin_lock_bh(&mm->context.list_lock); |
249 | /* Free shadow region and segment tables. */ | 386 | /* Free shadow region and segment tables. */ |
250 | list_for_each_entry(page, &mm->context.crst_list, lru) | 387 | list_for_each_entry(page, &mm->context.crst_list, lru) |
251 | if (page->index) { | 388 | if (page->index) { |
@@ -255,7 +392,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | |||
255 | /* "Free" second halves of page tables. */ | 392 | /* "Free" second halves of page tables. */ |
256 | list_for_each_entry(page, &mm->context.pgtable_list, lru) | 393 | list_for_each_entry(page, &mm->context.pgtable_list, lru) |
257 | page->flags &= ~SECOND_HALVES; | 394 | page->flags &= ~SECOND_HALVES; |
258 | spin_unlock(&mm->context.list_lock); | 395 | spin_unlock_bh(&mm->context.list_lock); |
259 | mm->context.noexec = 0; | 396 | mm->context.noexec = 0; |
260 | update_mm(mm, tsk); | 397 | update_mm(mm, tsk); |
261 | } | 398 | } |
@@ -312,6 +449,8 @@ int s390_enable_sie(void) | |||
312 | tsk->mm = tsk->active_mm = mm; | 449 | tsk->mm = tsk->active_mm = mm; |
313 | preempt_disable(); | 450 | preempt_disable(); |
314 | update_mm(mm, tsk); | 451 | update_mm(mm, tsk); |
452 | atomic_inc(&mm->context.attach_count); | ||
453 | atomic_dec(&old_mm->context.attach_count); | ||
315 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 454 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
316 | preempt_enable(); | 455 | preempt_enable(); |
317 | task_unlock(tsk); | 456 | task_unlock(tsk); |
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c index 7b01bc609de3..c3425bb3a1f6 100644 --- a/drivers/char/hvc_iucv.c +++ b/drivers/char/hvc_iucv.c | |||
@@ -1303,13 +1303,11 @@ static int __init hvc_iucv_init(void) | |||
1303 | if (rc) { | 1303 | if (rc) { |
1304 | pr_err("Registering IUCV handlers failed with error code=%d\n", | 1304 | pr_err("Registering IUCV handlers failed with error code=%d\n", |
1305 | rc); | 1305 | rc); |
1306 | goto out_error_iucv; | 1306 | goto out_error_hvc; |
1307 | } | 1307 | } |
1308 | 1308 | ||
1309 | return 0; | 1309 | return 0; |
1310 | 1310 | ||
1311 | out_error_iucv: | ||
1312 | iucv_unregister(&hvc_iucv_handler, 0); | ||
1313 | out_error_hvc: | 1311 | out_error_hvc: |
1314 | for (i = 0; i < hvc_iucv_devices; i++) | 1312 | for (i = 0; i < hvc_iucv_devices; i++) |
1315 | if (hvc_iucv_table[i]) | 1313 | if (hvc_iucv_table[i]) |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index aa95f1001761..fb613d70c2cb 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1099,16 +1099,30 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1099 | cqr = (struct dasd_ccw_req *) intparm; | 1099 | cqr = (struct dasd_ccw_req *) intparm; |
1100 | if (!cqr || ((scsw_cc(&irb->scsw) == 1) && | 1100 | if (!cqr || ((scsw_cc(&irb->scsw) == 1) && |
1101 | (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && | 1101 | (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && |
1102 | (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) { | 1102 | ((scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND) || |
1103 | (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | | ||
1104 | SCSW_STCTL_ALERT_STATUS))))) { | ||
1103 | if (cqr && cqr->status == DASD_CQR_IN_IO) | 1105 | if (cqr && cqr->status == DASD_CQR_IN_IO) |
1104 | cqr->status = DASD_CQR_QUEUED; | 1106 | cqr->status = DASD_CQR_QUEUED; |
1107 | if (cqr) | ||
1108 | memcpy(&cqr->irb, irb, sizeof(*irb)); | ||
1105 | device = dasd_device_from_cdev_locked(cdev); | 1109 | device = dasd_device_from_cdev_locked(cdev); |
1106 | if (!IS_ERR(device)) { | 1110 | if (IS_ERR(device)) |
1107 | dasd_device_clear_timer(device); | 1111 | return; |
1108 | device->discipline->handle_unsolicited_interrupt(device, | 1112 | /* ignore unsolicited interrupts for DIAG discipline */ |
1109 | irb); | 1113 | if (device->discipline == dasd_diag_discipline_pointer) { |
1110 | dasd_put_device(device); | 1114 | dasd_put_device(device); |
1115 | return; | ||
1111 | } | 1116 | } |
1117 | device->discipline->dump_sense_dbf(device, irb, | ||
1118 | "unsolicited"); | ||
1119 | if ((device->features & DASD_FEATURE_ERPLOG)) | ||
1120 | device->discipline->dump_sense(device, cqr, | ||
1121 | irb); | ||
1122 | dasd_device_clear_timer(device); | ||
1123 | device->discipline->handle_unsolicited_interrupt(device, | ||
1124 | irb); | ||
1125 | dasd_put_device(device); | ||
1112 | return; | 1126 | return; |
1113 | } | 1127 | } |
1114 | 1128 | ||
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index e82d427ff5eb..968c76cf7127 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -221,6 +221,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) | |||
221 | ccw->cmd_code = CCW_CMD_DCTL; | 221 | ccw->cmd_code = CCW_CMD_DCTL; |
222 | ccw->count = 4; | 222 | ccw->count = 4; |
223 | ccw->cda = (__u32)(addr_t) DCTL_data; | 223 | ccw->cda = (__u32)(addr_t) DCTL_data; |
224 | dctl_cqr->flags = erp->flags; | ||
224 | dctl_cqr->function = dasd_3990_erp_DCTL; | 225 | dctl_cqr->function = dasd_3990_erp_DCTL; |
225 | dctl_cqr->refers = erp; | 226 | dctl_cqr->refers = erp; |
226 | dctl_cqr->startdev = device; | 227 | dctl_cqr->startdev = device; |
@@ -1710,6 +1711,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) | |||
1710 | ccw->cda = cpa; | 1711 | ccw->cda = cpa; |
1711 | 1712 | ||
1712 | /* fill erp related fields */ | 1713 | /* fill erp related fields */ |
1714 | erp->flags = default_erp->flags; | ||
1713 | erp->function = dasd_3990_erp_action_1B_32; | 1715 | erp->function = dasd_3990_erp_action_1B_32; |
1714 | erp->refers = default_erp->refers; | 1716 | erp->refers = default_erp->refers; |
1715 | erp->startdev = device; | 1717 | erp->startdev = device; |
@@ -2354,6 +2356,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr) | |||
2354 | ccw->cda = (long)(cqr->cpaddr); | 2356 | ccw->cda = (long)(cqr->cpaddr); |
2355 | } | 2357 | } |
2356 | 2358 | ||
2359 | erp->flags = cqr->flags; | ||
2357 | erp->function = dasd_3990_erp_add_erp; | 2360 | erp->function = dasd_3990_erp_add_erp; |
2358 | erp->refers = cqr; | 2361 | erp->refers = cqr; |
2359 | erp->startdev = device; | 2362 | erp->startdev = device; |
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 2b3bc3ec0541..266b34b55403 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -228,25 +228,22 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr) | |||
228 | } | 228 | } |
229 | 229 | ||
230 | /* Handle external interruption. */ | 230 | /* Handle external interruption. */ |
231 | static void | 231 | static void dasd_ext_handler(unsigned int ext_int_code, |
232 | dasd_ext_handler(__u16 code) | 232 | unsigned int param32, unsigned long param64) |
233 | { | 233 | { |
234 | struct dasd_ccw_req *cqr, *next; | 234 | struct dasd_ccw_req *cqr, *next; |
235 | struct dasd_device *device; | 235 | struct dasd_device *device; |
236 | unsigned long long expires; | 236 | unsigned long long expires; |
237 | unsigned long flags; | 237 | unsigned long flags; |
238 | u8 int_code, status; | ||
239 | addr_t ip; | 238 | addr_t ip; |
240 | int rc; | 239 | int rc; |
241 | 240 | ||
242 | int_code = *((u8 *) DASD_DIAG_LC_INT_CODE); | 241 | switch (ext_int_code >> 24) { |
243 | status = *((u8 *) DASD_DIAG_LC_INT_STATUS); | ||
244 | switch (int_code) { | ||
245 | case DASD_DIAG_CODE_31BIT: | 242 | case DASD_DIAG_CODE_31BIT: |
246 | ip = (addr_t) *((u32 *) DASD_DIAG_LC_INT_PARM_31BIT); | 243 | ip = (addr_t) param32; |
247 | break; | 244 | break; |
248 | case DASD_DIAG_CODE_64BIT: | 245 | case DASD_DIAG_CODE_64BIT: |
249 | ip = (addr_t) *((u64 *) DASD_DIAG_LC_INT_PARM_64BIT); | 246 | ip = (addr_t) param64; |
250 | break; | 247 | break; |
251 | default: | 248 | default: |
252 | return; | 249 | return; |
@@ -281,7 +278,7 @@ dasd_ext_handler(__u16 code) | |||
281 | cqr->stopclk = get_clock(); | 278 | cqr->stopclk = get_clock(); |
282 | 279 | ||
283 | expires = 0; | 280 | expires = 0; |
284 | if (status == 0) { | 281 | if ((ext_int_code & 0xff0000) == 0) { |
285 | cqr->status = DASD_CQR_SUCCESS; | 282 | cqr->status = DASD_CQR_SUCCESS; |
286 | /* Start first request on queue if possible -> fast_io. */ | 283 | /* Start first request on queue if possible -> fast_io. */ |
287 | if (!list_empty(&device->ccw_queue)) { | 284 | if (!list_empty(&device->ccw_queue)) { |
@@ -296,8 +293,8 @@ dasd_ext_handler(__u16 code) | |||
296 | } else { | 293 | } else { |
297 | cqr->status = DASD_CQR_QUEUED; | 294 | cqr->status = DASD_CQR_QUEUED; |
298 | DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " | 295 | DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " |
299 | "request %p was %d (%d retries left)", cqr, status, | 296 | "request %p was %d (%d retries left)", cqr, |
300 | cqr->retries); | 297 | (ext_int_code >> 16) & 0xff, cqr->retries); |
301 | dasd_diag_erp(device); | 298 | dasd_diag_erp(device); |
302 | } | 299 | } |
303 | 300 | ||
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h index b8c78267ff3e..4f71fbe60c82 100644 --- a/drivers/s390/block/dasd_diag.h +++ b/drivers/s390/block/dasd_diag.h | |||
@@ -18,10 +18,6 @@ | |||
18 | #define DEV_CLASS_FBA 0x01 | 18 | #define DEV_CLASS_FBA 0x01 |
19 | #define DEV_CLASS_ECKD 0x04 | 19 | #define DEV_CLASS_ECKD 0x04 |
20 | 20 | ||
21 | #define DASD_DIAG_LC_INT_CODE 132 | ||
22 | #define DASD_DIAG_LC_INT_STATUS 133 | ||
23 | #define DASD_DIAG_LC_INT_PARM_31BIT 128 | ||
24 | #define DASD_DIAG_LC_INT_PARM_64BIT 4536 | ||
25 | #define DASD_DIAG_CODE_31BIT 0x03 | 21 | #define DASD_DIAG_CODE_31BIT 0x03 |
26 | #define DASD_DIAG_CODE_64BIT 0x07 | 22 | #define DASD_DIAG_CODE_64BIT 0x07 |
27 | 23 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 59b4ecfb967b..50cf96389d2c 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1776,13 +1776,13 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, | |||
1776 | } | 1776 | } |
1777 | 1777 | ||
1778 | /* summary unit check */ | 1778 | /* summary unit check */ |
1779 | if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && | 1779 | sense = dasd_get_sense(irb); |
1780 | (irb->ecw[7] == 0x0D)) { | 1780 | if (sense && (sense[7] == 0x0D) && |
1781 | (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { | ||
1781 | dasd_alias_handle_summary_unit_check(device, irb); | 1782 | dasd_alias_handle_summary_unit_check(device, irb); |
1782 | return; | 1783 | return; |
1783 | } | 1784 | } |
1784 | 1785 | ||
1785 | sense = dasd_get_sense(irb); | ||
1786 | /* service information message SIM */ | 1786 | /* service information message SIM */ |
1787 | if (sense && !(sense[27] & DASD_SENSE_BIT_0) && | 1787 | if (sense && !(sense[27] & DASD_SENSE_BIT_0) && |
1788 | ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { | 1788 | ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { |
@@ -1791,26 +1791,15 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, | |||
1791 | return; | 1791 | return; |
1792 | } | 1792 | } |
1793 | 1793 | ||
1794 | if ((scsw_cc(&irb->scsw) == 1) && | 1794 | if ((scsw_cc(&irb->scsw) == 1) && !sense && |
1795 | (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && | 1795 | (scsw_fctl(&irb->scsw) == SCSW_FCTL_START_FUNC) && |
1796 | (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) && | 1796 | (scsw_actl(&irb->scsw) == SCSW_ACTL_START_PEND) && |
1797 | (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) { | 1797 | (scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND)) { |
1798 | /* fake irb do nothing, they are handled elsewhere */ | 1798 | /* fake irb do nothing, they are handled elsewhere */ |
1799 | dasd_schedule_device_bh(device); | 1799 | dasd_schedule_device_bh(device); |
1800 | return; | 1800 | return; |
1801 | } | 1801 | } |
1802 | 1802 | ||
1803 | if (!sense) { | ||
1804 | /* just report other unsolicited interrupts */ | ||
1805 | DBF_DEV_EVENT(DBF_ERR, device, "%s", | ||
1806 | "unsolicited interrupt received"); | ||
1807 | } else { | ||
1808 | DBF_DEV_EVENT(DBF_ERR, device, "%s", | ||
1809 | "unsolicited interrupt received " | ||
1810 | "(sense available)"); | ||
1811 | device->discipline->dump_sense_dbf(device, irb, "unsolicited"); | ||
1812 | } | ||
1813 | |||
1814 | dasd_schedule_device_bh(device); | 1803 | dasd_schedule_device_bh(device); |
1815 | return; | 1804 | return; |
1816 | }; | 1805 | }; |
@@ -3093,19 +3082,19 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, | |||
3093 | char *reason) | 3082 | char *reason) |
3094 | { | 3083 | { |
3095 | u64 *sense; | 3084 | u64 *sense; |
3085 | u64 *stat; | ||
3096 | 3086 | ||
3097 | sense = (u64 *) dasd_get_sense(irb); | 3087 | sense = (u64 *) dasd_get_sense(irb); |
3088 | stat = (u64 *) &irb->scsw; | ||
3098 | if (sense) { | 3089 | if (sense) { |
3099 | DBF_DEV_EVENT(DBF_EMERG, device, | 3090 | DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : " |
3100 | "%s: %s %02x%02x%02x %016llx %016llx %016llx " | 3091 | "%016llx %016llx %016llx %016llx", |
3101 | "%016llx", reason, | 3092 | reason, *stat, *((u32 *) (stat + 1)), |
3102 | scsw_is_tm(&irb->scsw) ? "t" : "c", | 3093 | sense[0], sense[1], sense[2], sense[3]); |
3103 | scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw), | ||
3104 | scsw_dstat(&irb->scsw), sense[0], sense[1], | ||
3105 | sense[2], sense[3]); | ||
3106 | } else { | 3094 | } else { |
3107 | DBF_DEV_EVENT(DBF_EMERG, device, "%s", | 3095 | DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s", |
3108 | "SORRY - NO VALID SENSE AVAILABLE\n"); | 3096 | reason, *stat, *((u32 *) (stat + 1)), |
3097 | "NO VALID SENSE"); | ||
3109 | } | 3098 | } |
3110 | } | 3099 | } |
3111 | 3100 | ||
@@ -3131,9 +3120,12 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, | |||
3131 | " I/O status report for device %s:\n", | 3120 | " I/O status report for device %s:\n", |
3132 | dev_name(&device->cdev->dev)); | 3121 | dev_name(&device->cdev->dev)); |
3133 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 3122 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
3134 | " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", | 3123 | " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " |
3135 | req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), | 3124 | "CS:%02X RC:%d\n", |
3136 | scsw_cc(&irb->scsw), req ? req->intrc : 0); | 3125 | req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), |
3126 | scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), | ||
3127 | scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), | ||
3128 | req ? req->intrc : 0); | ||
3137 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 3129 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
3138 | " device %s: Failing CCW: %p\n", | 3130 | " device %s: Failing CCW: %p\n", |
3139 | dev_name(&device->cdev->dev), | 3131 | dev_name(&device->cdev->dev), |
@@ -3234,11 +3226,13 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, | |||
3234 | " I/O status report for device %s:\n", | 3226 | " I/O status report for device %s:\n", |
3235 | dev_name(&device->cdev->dev)); | 3227 | dev_name(&device->cdev->dev)); |
3236 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 3228 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
3237 | " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d " | 3229 | " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " |
3238 | "fcxs: 0x%02X schxs: 0x%02X\n", req, | 3230 | "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", |
3239 | scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), | 3231 | req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), |
3240 | scsw_cc(&irb->scsw), req->intrc, | 3232 | scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), |
3241 | irb->scsw.tm.fcxs, irb->scsw.tm.schxs); | 3233 | scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), |
3234 | irb->scsw.tm.fcxs, irb->scsw.tm.schxs, | ||
3235 | req ? req->intrc : 0); | ||
3242 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | 3236 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER |
3243 | " device %s: Failing TCW: %p\n", | 3237 | " device %s: Failing TCW: %p\n", |
3244 | dev_name(&device->cdev->dev), | 3238 | dev_name(&device->cdev->dev), |
@@ -3246,7 +3240,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, | |||
3246 | 3240 | ||
3247 | tsb = NULL; | 3241 | tsb = NULL; |
3248 | sense = NULL; | 3242 | sense = NULL; |
3249 | if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs == 0x01)) | 3243 | if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01)) |
3250 | tsb = tcw_get_tsb( | 3244 | tsb = tcw_get_tsb( |
3251 | (struct tcw *)(unsigned long)irb->scsw.tm.tcw); | 3245 | (struct tcw *)(unsigned long)irb->scsw.tm.tcw); |
3252 | 3246 | ||
@@ -3344,7 +3338,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, | |||
3344 | static void dasd_eckd_dump_sense(struct dasd_device *device, | 3338 | static void dasd_eckd_dump_sense(struct dasd_device *device, |
3345 | struct dasd_ccw_req *req, struct irb *irb) | 3339 | struct dasd_ccw_req *req, struct irb *irb) |
3346 | { | 3340 | { |
3347 | if (req && scsw_is_tm(&req->irb.scsw)) | 3341 | if (scsw_is_tm(&irb->scsw)) |
3348 | dasd_eckd_dump_sense_tcw(device, req, irb); | 3342 | dasd_eckd_dump_sense_tcw(device, req, irb); |
3349 | else | 3343 | else |
3350 | dasd_eckd_dump_sense_ccw(device, req, irb); | 3344 | dasd_eckd_dump_sense_ccw(device, req, irb); |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 2eb025592809..c4a6a31bd9cd 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -251,7 +251,6 @@ static ssize_t dasd_stats_proc_write(struct file *file, | |||
251 | buffer = dasd_get_user_string(user_buf, user_len); | 251 | buffer = dasd_get_user_string(user_buf, user_len); |
252 | if (IS_ERR(buffer)) | 252 | if (IS_ERR(buffer)) |
253 | return PTR_ERR(buffer); | 253 | return PTR_ERR(buffer); |
254 | DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer); | ||
255 | 254 | ||
256 | /* check for valid verbs */ | 255 | /* check for valid verbs */ |
257 | str = skip_spaces(buffer); | 256 | str = skip_spaces(buffer); |
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 5707a80b96b6..35cc4686b99b 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -395,16 +395,16 @@ __sclp_find_req(u32 sccb) | |||
395 | /* Handler for external interruption. Perform request post-processing. | 395 | /* Handler for external interruption. Perform request post-processing. |
396 | * Prepare read event data request if necessary. Start processing of next | 396 | * Prepare read event data request if necessary. Start processing of next |
397 | * request on queue. */ | 397 | * request on queue. */ |
398 | static void | 398 | static void sclp_interrupt_handler(unsigned int ext_int_code, |
399 | sclp_interrupt_handler(__u16 code) | 399 | unsigned int param32, unsigned long param64) |
400 | { | 400 | { |
401 | struct sclp_req *req; | 401 | struct sclp_req *req; |
402 | u32 finished_sccb; | 402 | u32 finished_sccb; |
403 | u32 evbuf_pending; | 403 | u32 evbuf_pending; |
404 | 404 | ||
405 | spin_lock(&sclp_lock); | 405 | spin_lock(&sclp_lock); |
406 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; | 406 | finished_sccb = param32 & 0xfffffff8; |
407 | evbuf_pending = S390_lowcore.ext_params & 0x3; | 407 | evbuf_pending = param32 & 0x3; |
408 | if (finished_sccb) { | 408 | if (finished_sccb) { |
409 | del_timer(&sclp_request_timer); | 409 | del_timer(&sclp_request_timer); |
410 | sclp_running_state = sclp_running_state_reset_pending; | 410 | sclp_running_state = sclp_running_state_reset_pending; |
@@ -819,12 +819,12 @@ EXPORT_SYMBOL(sclp_reactivate); | |||
819 | 819 | ||
820 | /* Handler for external interruption used during initialization. Modify | 820 | /* Handler for external interruption used during initialization. Modify |
821 | * request state to done. */ | 821 | * request state to done. */ |
822 | static void | 822 | static void sclp_check_handler(unsigned int ext_int_code, |
823 | sclp_check_handler(__u16 code) | 823 | unsigned int param32, unsigned long param64) |
824 | { | 824 | { |
825 | u32 finished_sccb; | 825 | u32 finished_sccb; |
826 | 826 | ||
827 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; | 827 | finished_sccb = param32 & 0xfffffff8; |
828 | /* Is this the interrupt we are waiting for? */ | 828 | /* Is this the interrupt we are waiting for? */ |
829 | if (finished_sccb == 0) | 829 | if (finished_sccb == 0) |
830 | return; | 830 | return; |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 0d6dc4b92cc2..9f661426e4a1 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -215,7 +215,7 @@ static void vmlogrdr_iucv_message_pending(struct iucv_path *path, | |||
215 | 215 | ||
216 | static int vmlogrdr_get_recording_class_AB(void) | 216 | static int vmlogrdr_get_recording_class_AB(void) |
217 | { | 217 | { |
218 | char cp_command[]="QUERY COMMAND RECORDING "; | 218 | static const char cp_command[] = "QUERY COMMAND RECORDING "; |
219 | char cp_response[80]; | 219 | char cp_response[80]; |
220 | char *tail; | 220 | char *tail; |
221 | int len,i; | 221 | int len,i; |
@@ -638,7 +638,7 @@ static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver, | |||
638 | char *buf) | 638 | char *buf) |
639 | { | 639 | { |
640 | 640 | ||
641 | char cp_command[] = "QUERY RECORDING "; | 641 | static const char cp_command[] = "QUERY RECORDING "; |
642 | int len; | 642 | int len; |
643 | 643 | ||
644 | cpcmd(cp_command, buf, 4096, NULL); | 644 | cpcmd(cp_command, buf, 4096, NULL); |
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 13cb60162e42..76058a5166ed 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -79,17 +79,15 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit, | |||
79 | int max_digit, int max_val) | 79 | int max_digit, int max_val) |
80 | { | 80 | { |
81 | int diff; | 81 | int diff; |
82 | unsigned int value; | ||
83 | 82 | ||
84 | diff = 0; | 83 | diff = 0; |
85 | *val = 0; | 84 | *val = 0; |
86 | 85 | ||
87 | while (isxdigit(**cp) && (diff <= max_digit)) { | 86 | while (diff <= max_digit) { |
87 | int value = hex_to_bin(**cp); | ||
88 | 88 | ||
89 | if (isdigit(**cp)) | 89 | if (value < 0) |
90 | value = **cp - '0'; | 90 | break; |
91 | else | ||
92 | value = tolower(**cp) - 'a' + 10; | ||
93 | *val = *val * 16 + value; | 91 | *val = *val * 16 + value; |
94 | (*cp)++; | 92 | (*cp)++; |
95 | diff++; | 93 | diff++; |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 6c9fa15aac7b..2d32233943a9 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/cio/chp.c | 2 | * drivers/s390/cio/chp.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 1999,2007 | 4 | * Copyright IBM Corp. 1999,2010 |
5 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) | 5 | * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) |
6 | * Arnd Bergmann (arndb@de.ibm.com) | 6 | * Arnd Bergmann (arndb@de.ibm.com) |
7 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 7 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> |
@@ -54,12 +54,6 @@ static struct work_struct cfg_work; | |||
54 | /* Wait queue for configure completion events. */ | 54 | /* Wait queue for configure completion events. */ |
55 | static wait_queue_head_t cfg_wait_queue; | 55 | static wait_queue_head_t cfg_wait_queue; |
56 | 56 | ||
57 | /* Return channel_path struct for given chpid. */ | ||
58 | static inline struct channel_path *chpid_to_chp(struct chp_id chpid) | ||
59 | { | ||
60 | return channel_subsystems[chpid.cssid]->chps[chpid.id]; | ||
61 | } | ||
62 | |||
63 | /* Set vary state for given chpid. */ | 57 | /* Set vary state for given chpid. */ |
64 | static void set_chp_logically_online(struct chp_id chpid, int onoff) | 58 | static void set_chp_logically_online(struct chp_id chpid, int onoff) |
65 | { | 59 | { |
@@ -241,11 +235,13 @@ static ssize_t chp_status_show(struct device *dev, | |||
241 | struct device_attribute *attr, char *buf) | 235 | struct device_attribute *attr, char *buf) |
242 | { | 236 | { |
243 | struct channel_path *chp = to_channelpath(dev); | 237 | struct channel_path *chp = to_channelpath(dev); |
238 | int status; | ||
244 | 239 | ||
245 | if (!chp) | 240 | mutex_lock(&chp->lock); |
246 | return 0; | 241 | status = chp->state; |
247 | return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") : | 242 | mutex_unlock(&chp->lock); |
248 | sprintf(buf, "offline\n")); | 243 | |
244 | return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n"); | ||
249 | } | 245 | } |
250 | 246 | ||
251 | static ssize_t chp_status_write(struct device *dev, | 247 | static ssize_t chp_status_write(struct device *dev, |
@@ -261,15 +257,18 @@ static ssize_t chp_status_write(struct device *dev, | |||
261 | if (!num_args) | 257 | if (!num_args) |
262 | return count; | 258 | return count; |
263 | 259 | ||
264 | if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) | 260 | if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) { |
261 | mutex_lock(&cp->lock); | ||
265 | error = s390_vary_chpid(cp->chpid, 1); | 262 | error = s390_vary_chpid(cp->chpid, 1); |
266 | else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) | 263 | mutex_unlock(&cp->lock); |
264 | } else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) { | ||
265 | mutex_lock(&cp->lock); | ||
267 | error = s390_vary_chpid(cp->chpid, 0); | 266 | error = s390_vary_chpid(cp->chpid, 0); |
268 | else | 267 | mutex_unlock(&cp->lock); |
268 | } else | ||
269 | error = -EINVAL; | 269 | error = -EINVAL; |
270 | 270 | ||
271 | return error < 0 ? error : count; | 271 | return error < 0 ? error : count; |
272 | |||
273 | } | 272 | } |
274 | 273 | ||
275 | static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); | 274 | static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); |
@@ -315,10 +314,12 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, | |||
315 | char *buf) | 314 | char *buf) |
316 | { | 315 | { |
317 | struct channel_path *chp = to_channelpath(dev); | 316 | struct channel_path *chp = to_channelpath(dev); |
317 | u8 type; | ||
318 | 318 | ||
319 | if (!chp) | 319 | mutex_lock(&chp->lock); |
320 | return 0; | 320 | type = chp->desc.desc; |
321 | return sprintf(buf, "%x\n", chp->desc.desc); | 321 | mutex_unlock(&chp->lock); |
322 | return sprintf(buf, "%x\n", type); | ||
322 | } | 323 | } |
323 | 324 | ||
324 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); | 325 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); |
@@ -395,6 +396,7 @@ int chp_new(struct chp_id chpid) | |||
395 | chp->state = 1; | 396 | chp->state = 1; |
396 | chp->dev.parent = &channel_subsystems[chpid.cssid]->device; | 397 | chp->dev.parent = &channel_subsystems[chpid.cssid]->device; |
397 | chp->dev.release = chp_release; | 398 | chp->dev.release = chp_release; |
399 | mutex_init(&chp->lock); | ||
398 | 400 | ||
399 | /* Obtain channel path description and fill it in. */ | 401 | /* Obtain channel path description and fill it in. */ |
400 | ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); | 402 | ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); |
@@ -464,7 +466,10 @@ void *chp_get_chp_desc(struct chp_id chpid) | |||
464 | desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); | 466 | desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); |
465 | if (!desc) | 467 | if (!desc) |
466 | return NULL; | 468 | return NULL; |
469 | |||
470 | mutex_lock(&chp->lock); | ||
467 | memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); | 471 | memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); |
472 | mutex_unlock(&chp->lock); | ||
468 | return desc; | 473 | return desc; |
469 | } | 474 | } |
470 | 475 | ||
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 26c3d2246176..12b4903d6fe3 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/cio/chp.h | 2 | * drivers/s390/cio/chp.h |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2007 | 4 | * Copyright IBM Corp. 2007,2010 |
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/mutex.h> | ||
13 | #include <asm/chpid.h> | 14 | #include <asm/chpid.h> |
14 | #include "chsc.h" | 15 | #include "chsc.h" |
15 | #include "css.h" | 16 | #include "css.h" |
@@ -40,16 +41,23 @@ static inline int chp_test_bit(u8 *bitmap, int num) | |||
40 | 41 | ||
41 | 42 | ||
42 | struct channel_path { | 43 | struct channel_path { |
44 | struct device dev; | ||
43 | struct chp_id chpid; | 45 | struct chp_id chpid; |
46 | struct mutex lock; /* Serialize access to below members. */ | ||
44 | int state; | 47 | int state; |
45 | struct channel_path_desc desc; | 48 | struct channel_path_desc desc; |
46 | /* Channel-measurement related stuff: */ | 49 | /* Channel-measurement related stuff: */ |
47 | int cmg; | 50 | int cmg; |
48 | int shared; | 51 | int shared; |
49 | void *cmg_chars; | 52 | void *cmg_chars; |
50 | struct device dev; | ||
51 | }; | 53 | }; |
52 | 54 | ||
55 | /* Return channel_path struct for given chpid. */ | ||
56 | static inline struct channel_path *chpid_to_chp(struct chp_id chpid) | ||
57 | { | ||
58 | return channel_subsystems[chpid.cssid]->chps[chpid.id]; | ||
59 | } | ||
60 | |||
53 | int chp_get_status(struct chp_id chpid); | 61 | int chp_get_status(struct chp_id chpid); |
54 | u8 chp_get_sch_opm(struct subchannel *sch); | 62 | u8 chp_get_sch_opm(struct subchannel *sch); |
55 | int chp_is_registered(struct chp_id chpid); | 63 | int chp_is_registered(struct chp_id chpid); |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 4cbb1a6ca33c..1aaddea673e0 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * drivers/s390/cio/chsc.c | 2 | * drivers/s390/cio/chsc.c |
3 | * S/390 common I/O routines -- channel subsystem call | 3 | * S/390 common I/O routines -- channel subsystem call |
4 | * | 4 | * |
5 | * Copyright IBM Corp. 1999,2008 | 5 | * Copyright IBM Corp. 1999,2010 |
6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | 6 | * Author(s): Ingo Adlung (adlung@de.ibm.com) |
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
8 | * Arnd Bergmann (arndb@de.ibm.com) | 8 | * Arnd Bergmann (arndb@de.ibm.com) |
@@ -29,8 +29,8 @@ | |||
29 | #include "chsc.h" | 29 | #include "chsc.h" |
30 | 30 | ||
31 | static void *sei_page; | 31 | static void *sei_page; |
32 | static DEFINE_SPINLOCK(siosl_lock); | 32 | static void *chsc_page; |
33 | static DEFINE_SPINLOCK(sda_lock); | 33 | static DEFINE_SPINLOCK(chsc_page_lock); |
34 | 34 | ||
35 | /** | 35 | /** |
36 | * chsc_error_from_response() - convert a chsc response to an error | 36 | * chsc_error_from_response() - convert a chsc response to an error |
@@ -85,17 +85,15 @@ struct chsc_ssd_area { | |||
85 | 85 | ||
86 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) | 86 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) |
87 | { | 87 | { |
88 | unsigned long page; | ||
89 | struct chsc_ssd_area *ssd_area; | 88 | struct chsc_ssd_area *ssd_area; |
90 | int ccode; | 89 | int ccode; |
91 | int ret; | 90 | int ret; |
92 | int i; | 91 | int i; |
93 | int mask; | 92 | int mask; |
94 | 93 | ||
95 | page = get_zeroed_page(GFP_KERNEL | GFP_DMA); | 94 | spin_lock_irq(&chsc_page_lock); |
96 | if (!page) | 95 | memset(chsc_page, 0, PAGE_SIZE); |
97 | return -ENOMEM; | 96 | ssd_area = chsc_page; |
98 | ssd_area = (struct chsc_ssd_area *) page; | ||
99 | ssd_area->request.length = 0x0010; | 97 | ssd_area->request.length = 0x0010; |
100 | ssd_area->request.code = 0x0004; | 98 | ssd_area->request.code = 0x0004; |
101 | ssd_area->ssid = schid.ssid; | 99 | ssd_area->ssid = schid.ssid; |
@@ -106,25 +104,25 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) | |||
106 | /* Check response. */ | 104 | /* Check response. */ |
107 | if (ccode > 0) { | 105 | if (ccode > 0) { |
108 | ret = (ccode == 3) ? -ENODEV : -EBUSY; | 106 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
109 | goto out_free; | 107 | goto out; |
110 | } | 108 | } |
111 | ret = chsc_error_from_response(ssd_area->response.code); | 109 | ret = chsc_error_from_response(ssd_area->response.code); |
112 | if (ret != 0) { | 110 | if (ret != 0) { |
113 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", | 111 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", |
114 | schid.ssid, schid.sch_no, | 112 | schid.ssid, schid.sch_no, |
115 | ssd_area->response.code); | 113 | ssd_area->response.code); |
116 | goto out_free; | 114 | goto out; |
117 | } | 115 | } |
118 | if (!ssd_area->sch_valid) { | 116 | if (!ssd_area->sch_valid) { |
119 | ret = -ENODEV; | 117 | ret = -ENODEV; |
120 | goto out_free; | 118 | goto out; |
121 | } | 119 | } |
122 | /* Copy data */ | 120 | /* Copy data */ |
123 | ret = 0; | 121 | ret = 0; |
124 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); | 122 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); |
125 | if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && | 123 | if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && |
126 | (ssd_area->st != SUBCHANNEL_TYPE_MSG)) | 124 | (ssd_area->st != SUBCHANNEL_TYPE_MSG)) |
127 | goto out_free; | 125 | goto out; |
128 | ssd->path_mask = ssd_area->path_mask; | 126 | ssd->path_mask = ssd_area->path_mask; |
129 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; | 127 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; |
130 | for (i = 0; i < 8; i++) { | 128 | for (i = 0; i < 8; i++) { |
@@ -136,8 +134,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) | |||
136 | if (ssd_area->fla_valid_mask & mask) | 134 | if (ssd_area->fla_valid_mask & mask) |
137 | ssd->fla[i] = ssd_area->fla[i]; | 135 | ssd->fla[i] = ssd_area->fla[i]; |
138 | } | 136 | } |
139 | out_free: | 137 | out: |
140 | free_page(page); | 138 | spin_unlock_irq(&chsc_page_lock); |
141 | return ret; | 139 | return ret; |
142 | } | 140 | } |
143 | 141 | ||
@@ -497,6 +495,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) | |||
497 | */ | 495 | */ |
498 | int chsc_chp_vary(struct chp_id chpid, int on) | 496 | int chsc_chp_vary(struct chp_id chpid, int on) |
499 | { | 497 | { |
498 | struct channel_path *chp = chpid_to_chp(chpid); | ||
500 | struct chp_link link; | 499 | struct chp_link link; |
501 | 500 | ||
502 | memset(&link, 0, sizeof(struct chp_link)); | 501 | memset(&link, 0, sizeof(struct chp_link)); |
@@ -506,11 +505,12 @@ int chsc_chp_vary(struct chp_id chpid, int on) | |||
506 | /* | 505 | /* |
507 | * Redo PathVerification on the devices the chpid connects to | 506 | * Redo PathVerification on the devices the chpid connects to |
508 | */ | 507 | */ |
509 | 508 | if (on) { | |
510 | if (on) | 509 | /* Try to update the channel path descritor. */ |
510 | chsc_determine_base_channel_path_desc(chpid, &chp->desc); | ||
511 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, | 511 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, |
512 | __s390_vary_chpid_on, &link); | 512 | __s390_vary_chpid_on, &link); |
513 | else | 513 | } else |
514 | for_each_subchannel_staged(s390_subchannel_vary_chpid_off, | 514 | for_each_subchannel_staged(s390_subchannel_vary_chpid_off, |
515 | NULL, &link); | 515 | NULL, &link); |
516 | 516 | ||
@@ -552,7 +552,7 @@ cleanup: | |||
552 | return ret; | 552 | return ret; |
553 | } | 553 | } |
554 | 554 | ||
555 | int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | 555 | int __chsc_do_secm(struct channel_subsystem *css, int enable) |
556 | { | 556 | { |
557 | struct { | 557 | struct { |
558 | struct chsc_header request; | 558 | struct chsc_header request; |
@@ -573,7 +573,9 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | |||
573 | } __attribute__ ((packed)) *secm_area; | 573 | } __attribute__ ((packed)) *secm_area; |
574 | int ret, ccode; | 574 | int ret, ccode; |
575 | 575 | ||
576 | secm_area = page; | 576 | spin_lock_irq(&chsc_page_lock); |
577 | memset(chsc_page, 0, PAGE_SIZE); | ||
578 | secm_area = chsc_page; | ||
577 | secm_area->request.length = 0x0050; | 579 | secm_area->request.length = 0x0050; |
578 | secm_area->request.code = 0x0016; | 580 | secm_area->request.code = 0x0016; |
579 | 581 | ||
@@ -584,8 +586,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | |||
584 | secm_area->operation_code = enable ? 0 : 1; | 586 | secm_area->operation_code = enable ? 0 : 1; |
585 | 587 | ||
586 | ccode = chsc(secm_area); | 588 | ccode = chsc(secm_area); |
587 | if (ccode > 0) | 589 | if (ccode > 0) { |
588 | return (ccode == 3) ? -ENODEV : -EBUSY; | 590 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
591 | goto out; | ||
592 | } | ||
589 | 593 | ||
590 | switch (secm_area->response.code) { | 594 | switch (secm_area->response.code) { |
591 | case 0x0102: | 595 | case 0x0102: |
@@ -598,37 +602,32 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | |||
598 | if (ret != 0) | 602 | if (ret != 0) |
599 | CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", | 603 | CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", |
600 | secm_area->response.code); | 604 | secm_area->response.code); |
605 | out: | ||
606 | spin_unlock_irq(&chsc_page_lock); | ||
601 | return ret; | 607 | return ret; |
602 | } | 608 | } |
603 | 609 | ||
604 | int | 610 | int |
605 | chsc_secm(struct channel_subsystem *css, int enable) | 611 | chsc_secm(struct channel_subsystem *css, int enable) |
606 | { | 612 | { |
607 | void *secm_area; | ||
608 | int ret; | 613 | int ret; |
609 | 614 | ||
610 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
611 | if (!secm_area) | ||
612 | return -ENOMEM; | ||
613 | |||
614 | if (enable && !css->cm_enabled) { | 615 | if (enable && !css->cm_enabled) { |
615 | css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 616 | css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
616 | css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 617 | css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
617 | if (!css->cub_addr1 || !css->cub_addr2) { | 618 | if (!css->cub_addr1 || !css->cub_addr2) { |
618 | free_page((unsigned long)css->cub_addr1); | 619 | free_page((unsigned long)css->cub_addr1); |
619 | free_page((unsigned long)css->cub_addr2); | 620 | free_page((unsigned long)css->cub_addr2); |
620 | free_page((unsigned long)secm_area); | ||
621 | return -ENOMEM; | 621 | return -ENOMEM; |
622 | } | 622 | } |
623 | } | 623 | } |
624 | ret = __chsc_do_secm(css, enable, secm_area); | 624 | ret = __chsc_do_secm(css, enable); |
625 | if (!ret) { | 625 | if (!ret) { |
626 | css->cm_enabled = enable; | 626 | css->cm_enabled = enable; |
627 | if (css->cm_enabled) { | 627 | if (css->cm_enabled) { |
628 | ret = chsc_add_cmg_attr(css); | 628 | ret = chsc_add_cmg_attr(css); |
629 | if (ret) { | 629 | if (ret) { |
630 | memset(secm_area, 0, PAGE_SIZE); | 630 | __chsc_do_secm(css, 0); |
631 | __chsc_do_secm(css, 0, secm_area); | ||
632 | css->cm_enabled = 0; | 631 | css->cm_enabled = 0; |
633 | } | 632 | } |
634 | } else | 633 | } else |
@@ -638,44 +637,24 @@ chsc_secm(struct channel_subsystem *css, int enable) | |||
638 | free_page((unsigned long)css->cub_addr1); | 637 | free_page((unsigned long)css->cub_addr1); |
639 | free_page((unsigned long)css->cub_addr2); | 638 | free_page((unsigned long)css->cub_addr2); |
640 | } | 639 | } |
641 | free_page((unsigned long)secm_area); | ||
642 | return ret; | 640 | return ret; |
643 | } | 641 | } |
644 | 642 | ||
645 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, | 643 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, |
646 | int c, int m, | 644 | int c, int m, void *page) |
647 | struct chsc_response_struct *resp) | ||
648 | { | 645 | { |
646 | struct chsc_scpd *scpd_area; | ||
649 | int ccode, ret; | 647 | int ccode, ret; |
650 | 648 | ||
651 | struct { | ||
652 | struct chsc_header request; | ||
653 | u32 : 2; | ||
654 | u32 m : 1; | ||
655 | u32 c : 1; | ||
656 | u32 fmt : 4; | ||
657 | u32 cssid : 8; | ||
658 | u32 : 4; | ||
659 | u32 rfmt : 4; | ||
660 | u32 first_chpid : 8; | ||
661 | u32 : 24; | ||
662 | u32 last_chpid : 8; | ||
663 | u32 zeroes1; | ||
664 | struct chsc_header response; | ||
665 | u8 data[PAGE_SIZE - 20]; | ||
666 | } __attribute__ ((packed)) *scpd_area; | ||
667 | |||
668 | if ((rfmt == 1) && !css_general_characteristics.fcs) | 649 | if ((rfmt == 1) && !css_general_characteristics.fcs) |
669 | return -EINVAL; | 650 | return -EINVAL; |
670 | if ((rfmt == 2) && !css_general_characteristics.cib) | 651 | if ((rfmt == 2) && !css_general_characteristics.cib) |
671 | return -EINVAL; | 652 | return -EINVAL; |
672 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
673 | if (!scpd_area) | ||
674 | return -ENOMEM; | ||
675 | 653 | ||
654 | memset(page, 0, PAGE_SIZE); | ||
655 | scpd_area = page; | ||
676 | scpd_area->request.length = 0x0010; | 656 | scpd_area->request.length = 0x0010; |
677 | scpd_area->request.code = 0x0002; | 657 | scpd_area->request.code = 0x0002; |
678 | |||
679 | scpd_area->cssid = chpid.cssid; | 658 | scpd_area->cssid = chpid.cssid; |
680 | scpd_area->first_chpid = chpid.id; | 659 | scpd_area->first_chpid = chpid.id; |
681 | scpd_area->last_chpid = chpid.id; | 660 | scpd_area->last_chpid = chpid.id; |
@@ -685,20 +664,13 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, | |||
685 | scpd_area->rfmt = rfmt; | 664 | scpd_area->rfmt = rfmt; |
686 | 665 | ||
687 | ccode = chsc(scpd_area); | 666 | ccode = chsc(scpd_area); |
688 | if (ccode > 0) { | 667 | if (ccode > 0) |
689 | ret = (ccode == 3) ? -ENODEV : -EBUSY; | 668 | return (ccode == 3) ? -ENODEV : -EBUSY; |
690 | goto out; | ||
691 | } | ||
692 | 669 | ||
693 | ret = chsc_error_from_response(scpd_area->response.code); | 670 | ret = chsc_error_from_response(scpd_area->response.code); |
694 | if (ret == 0) | 671 | if (ret) |
695 | /* Success. */ | ||
696 | memcpy(resp, &scpd_area->response, scpd_area->response.length); | ||
697 | else | ||
698 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", | 672 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", |
699 | scpd_area->response.code); | 673 | scpd_area->response.code); |
700 | out: | ||
701 | free_page((unsigned long)scpd_area); | ||
702 | return ret; | 674 | return ret; |
703 | } | 675 | } |
704 | EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); | 676 | EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); |
@@ -707,17 +679,19 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid, | |||
707 | struct channel_path_desc *desc) | 679 | struct channel_path_desc *desc) |
708 | { | 680 | { |
709 | struct chsc_response_struct *chsc_resp; | 681 | struct chsc_response_struct *chsc_resp; |
682 | struct chsc_scpd *scpd_area; | ||
683 | unsigned long flags; | ||
710 | int ret; | 684 | int ret; |
711 | 685 | ||
712 | chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); | 686 | spin_lock_irqsave(&chsc_page_lock, flags); |
713 | if (!chsc_resp) | 687 | scpd_area = chsc_page; |
714 | return -ENOMEM; | 688 | ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); |
715 | ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); | ||
716 | if (ret) | 689 | if (ret) |
717 | goto out_free; | 690 | goto out; |
691 | chsc_resp = (void *)&scpd_area->response; | ||
718 | memcpy(desc, &chsc_resp->data, sizeof(*desc)); | 692 | memcpy(desc, &chsc_resp->data, sizeof(*desc)); |
719 | out_free: | 693 | out: |
720 | kfree(chsc_resp); | 694 | spin_unlock_irqrestore(&chsc_page_lock, flags); |
721 | return ret; | 695 | return ret; |
722 | } | 696 | } |
723 | 697 | ||
@@ -725,33 +699,22 @@ static void | |||
725 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, | 699 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, |
726 | struct cmg_chars *chars) | 700 | struct cmg_chars *chars) |
727 | { | 701 | { |
728 | switch (chp->cmg) { | 702 | struct cmg_chars *cmg_chars; |
729 | case 2: | 703 | int i, mask; |
730 | case 3: | 704 | |
731 | chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), | 705 | cmg_chars = chp->cmg_chars; |
732 | GFP_KERNEL); | 706 | for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { |
733 | if (chp->cmg_chars) { | 707 | mask = 0x80 >> (i + 3); |
734 | int i, mask; | 708 | if (cmcv & mask) |
735 | struct cmg_chars *cmg_chars; | 709 | cmg_chars->values[i] = chars->values[i]; |
736 | 710 | else | |
737 | cmg_chars = chp->cmg_chars; | 711 | cmg_chars->values[i] = 0; |
738 | for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { | ||
739 | mask = 0x80 >> (i + 3); | ||
740 | if (cmcv & mask) | ||
741 | cmg_chars->values[i] = chars->values[i]; | ||
742 | else | ||
743 | cmg_chars->values[i] = 0; | ||
744 | } | ||
745 | } | ||
746 | break; | ||
747 | default: | ||
748 | /* No cmg-dependent data. */ | ||
749 | break; | ||
750 | } | 712 | } |
751 | } | 713 | } |
752 | 714 | ||
753 | int chsc_get_channel_measurement_chars(struct channel_path *chp) | 715 | int chsc_get_channel_measurement_chars(struct channel_path *chp) |
754 | { | 716 | { |
717 | struct cmg_chars *cmg_chars; | ||
755 | int ccode, ret; | 718 | int ccode, ret; |
756 | 719 | ||
757 | struct { | 720 | struct { |
@@ -775,13 +738,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
775 | u32 data[NR_MEASUREMENT_CHARS]; | 738 | u32 data[NR_MEASUREMENT_CHARS]; |
776 | } __attribute__ ((packed)) *scmc_area; | 739 | } __attribute__ ((packed)) *scmc_area; |
777 | 740 | ||
778 | scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 741 | chp->cmg_chars = NULL; |
779 | if (!scmc_area) | 742 | cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); |
743 | if (!cmg_chars) | ||
780 | return -ENOMEM; | 744 | return -ENOMEM; |
781 | 745 | ||
746 | spin_lock_irq(&chsc_page_lock); | ||
747 | memset(chsc_page, 0, PAGE_SIZE); | ||
748 | scmc_area = chsc_page; | ||
782 | scmc_area->request.length = 0x0010; | 749 | scmc_area->request.length = 0x0010; |
783 | scmc_area->request.code = 0x0022; | 750 | scmc_area->request.code = 0x0022; |
784 | |||
785 | scmc_area->first_chpid = chp->chpid.id; | 751 | scmc_area->first_chpid = chp->chpid.id; |
786 | scmc_area->last_chpid = chp->chpid.id; | 752 | scmc_area->last_chpid = chp->chpid.id; |
787 | 753 | ||
@@ -792,53 +758,65 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
792 | } | 758 | } |
793 | 759 | ||
794 | ret = chsc_error_from_response(scmc_area->response.code); | 760 | ret = chsc_error_from_response(scmc_area->response.code); |
795 | if (ret == 0) { | 761 | if (ret) { |
796 | /* Success. */ | ||
797 | if (!scmc_area->not_valid) { | ||
798 | chp->cmg = scmc_area->cmg; | ||
799 | chp->shared = scmc_area->shared; | ||
800 | chsc_initialize_cmg_chars(chp, scmc_area->cmcv, | ||
801 | (struct cmg_chars *) | ||
802 | &scmc_area->data); | ||
803 | } else { | ||
804 | chp->cmg = -1; | ||
805 | chp->shared = -1; | ||
806 | } | ||
807 | } else { | ||
808 | CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", | 762 | CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", |
809 | scmc_area->response.code); | 763 | scmc_area->response.code); |
764 | goto out; | ||
765 | } | ||
766 | if (scmc_area->not_valid) { | ||
767 | chp->cmg = -1; | ||
768 | chp->shared = -1; | ||
769 | goto out; | ||
810 | } | 770 | } |
771 | chp->cmg = scmc_area->cmg; | ||
772 | chp->shared = scmc_area->shared; | ||
773 | if (chp->cmg != 2 && chp->cmg != 3) { | ||
774 | /* No cmg-dependent data. */ | ||
775 | goto out; | ||
776 | } | ||
777 | chp->cmg_chars = cmg_chars; | ||
778 | chsc_initialize_cmg_chars(chp, scmc_area->cmcv, | ||
779 | (struct cmg_chars *) &scmc_area->data); | ||
811 | out: | 780 | out: |
812 | free_page((unsigned long)scmc_area); | 781 | spin_unlock_irq(&chsc_page_lock); |
782 | if (!chp->cmg_chars) | ||
783 | kfree(cmg_chars); | ||
784 | |||
813 | return ret; | 785 | return ret; |
814 | } | 786 | } |
815 | 787 | ||
816 | int __init chsc_alloc_sei_area(void) | 788 | int __init chsc_init(void) |
817 | { | 789 | { |
818 | int ret; | 790 | int ret; |
819 | 791 | ||
820 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 792 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
821 | if (!sei_page) { | 793 | chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
822 | CIO_MSG_EVENT(0, "Can't allocate page for processing of " | 794 | if (!sei_page || !chsc_page) { |
823 | "chsc machine checks!\n"); | 795 | ret = -ENOMEM; |
824 | return -ENOMEM; | 796 | goto out_err; |
825 | } | 797 | } |
826 | ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); | 798 | ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); |
827 | if (ret) | 799 | if (ret) |
828 | kfree(sei_page); | 800 | goto out_err; |
801 | return ret; | ||
802 | out_err: | ||
803 | free_page((unsigned long)chsc_page); | ||
804 | free_page((unsigned long)sei_page); | ||
829 | return ret; | 805 | return ret; |
830 | } | 806 | } |
831 | 807 | ||
832 | void __init chsc_free_sei_area(void) | 808 | void __init chsc_init_cleanup(void) |
833 | { | 809 | { |
834 | crw_unregister_handler(CRW_RSC_CSS); | 810 | crw_unregister_handler(CRW_RSC_CSS); |
835 | kfree(sei_page); | 811 | free_page((unsigned long)chsc_page); |
812 | free_page((unsigned long)sei_page); | ||
836 | } | 813 | } |
837 | 814 | ||
838 | int chsc_enable_facility(int operation_code) | 815 | int chsc_enable_facility(int operation_code) |
839 | { | 816 | { |
817 | unsigned long flags; | ||
840 | int ret; | 818 | int ret; |
841 | static struct { | 819 | struct { |
842 | struct chsc_header request; | 820 | struct chsc_header request; |
843 | u8 reserved1:4; | 821 | u8 reserved1:4; |
844 | u8 format:4; | 822 | u8 format:4; |
@@ -851,32 +829,33 @@ int chsc_enable_facility(int operation_code) | |||
851 | u32 reserved5:4; | 829 | u32 reserved5:4; |
852 | u32 format2:4; | 830 | u32 format2:4; |
853 | u32 reserved6:24; | 831 | u32 reserved6:24; |
854 | } __attribute__ ((packed, aligned(4096))) sda_area; | 832 | } __attribute__ ((packed)) *sda_area; |
855 | 833 | ||
856 | spin_lock(&sda_lock); | 834 | spin_lock_irqsave(&chsc_page_lock, flags); |
857 | memset(&sda_area, 0, sizeof(sda_area)); | 835 | memset(chsc_page, 0, PAGE_SIZE); |
858 | sda_area.request.length = 0x0400; | 836 | sda_area = chsc_page; |
859 | sda_area.request.code = 0x0031; | 837 | sda_area->request.length = 0x0400; |
860 | sda_area.operation_code = operation_code; | 838 | sda_area->request.code = 0x0031; |
839 | sda_area->operation_code = operation_code; | ||
861 | 840 | ||
862 | ret = chsc(&sda_area); | 841 | ret = chsc(sda_area); |
863 | if (ret > 0) { | 842 | if (ret > 0) { |
864 | ret = (ret == 3) ? -ENODEV : -EBUSY; | 843 | ret = (ret == 3) ? -ENODEV : -EBUSY; |
865 | goto out; | 844 | goto out; |
866 | } | 845 | } |
867 | 846 | ||
868 | switch (sda_area.response.code) { | 847 | switch (sda_area->response.code) { |
869 | case 0x0101: | 848 | case 0x0101: |
870 | ret = -EOPNOTSUPP; | 849 | ret = -EOPNOTSUPP; |
871 | break; | 850 | break; |
872 | default: | 851 | default: |
873 | ret = chsc_error_from_response(sda_area.response.code); | 852 | ret = chsc_error_from_response(sda_area->response.code); |
874 | } | 853 | } |
875 | if (ret != 0) | 854 | if (ret != 0) |
876 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", | 855 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", |
877 | operation_code, sda_area.response.code); | 856 | operation_code, sda_area->response.code); |
878 | out: | 857 | out: |
879 | spin_unlock(&sda_lock); | 858 | spin_unlock_irqrestore(&chsc_page_lock, flags); |
880 | return ret; | 859 | return ret; |
881 | } | 860 | } |
882 | 861 | ||
@@ -895,13 +874,12 @@ chsc_determine_css_characteristics(void) | |||
895 | struct chsc_header response; | 874 | struct chsc_header response; |
896 | u32 reserved4; | 875 | u32 reserved4; |
897 | u32 general_char[510]; | 876 | u32 general_char[510]; |
898 | u32 chsc_char[518]; | 877 | u32 chsc_char[508]; |
899 | } __attribute__ ((packed)) *scsc_area; | 878 | } __attribute__ ((packed)) *scsc_area; |
900 | 879 | ||
901 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 880 | spin_lock_irq(&chsc_page_lock); |
902 | if (!scsc_area) | 881 | memset(chsc_page, 0, PAGE_SIZE); |
903 | return -ENOMEM; | 882 | scsc_area = chsc_page; |
904 | |||
905 | scsc_area->request.length = 0x0010; | 883 | scsc_area->request.length = 0x0010; |
906 | scsc_area->request.code = 0x0010; | 884 | scsc_area->request.code = 0x0010; |
907 | 885 | ||
@@ -921,7 +899,7 @@ chsc_determine_css_characteristics(void) | |||
921 | CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", | 899 | CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", |
922 | scsc_area->response.code); | 900 | scsc_area->response.code); |
923 | exit: | 901 | exit: |
924 | free_page ((unsigned long) scsc_area); | 902 | spin_unlock_irq(&chsc_page_lock); |
925 | return result; | 903 | return result; |
926 | } | 904 | } |
927 | 905 | ||
@@ -976,29 +954,29 @@ int chsc_sstpi(void *page, void *result, size_t size) | |||
976 | return (rr->response.code == 0x0001) ? 0 : -EIO; | 954 | return (rr->response.code == 0x0001) ? 0 : -EIO; |
977 | } | 955 | } |
978 | 956 | ||
979 | static struct { | ||
980 | struct chsc_header request; | ||
981 | u32 word1; | ||
982 | struct subchannel_id sid; | ||
983 | u32 word3; | ||
984 | struct chsc_header response; | ||
985 | u32 word[11]; | ||
986 | } __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE))); | ||
987 | |||
988 | int chsc_siosl(struct subchannel_id schid) | 957 | int chsc_siosl(struct subchannel_id schid) |
989 | { | 958 | { |
959 | struct { | ||
960 | struct chsc_header request; | ||
961 | u32 word1; | ||
962 | struct subchannel_id sid; | ||
963 | u32 word3; | ||
964 | struct chsc_header response; | ||
965 | u32 word[11]; | ||
966 | } __attribute__ ((packed)) *siosl_area; | ||
990 | unsigned long flags; | 967 | unsigned long flags; |
991 | int ccode; | 968 | int ccode; |
992 | int rc; | 969 | int rc; |
993 | 970 | ||
994 | spin_lock_irqsave(&siosl_lock, flags); | 971 | spin_lock_irqsave(&chsc_page_lock, flags); |
995 | memset(&siosl_area, 0, sizeof(siosl_area)); | 972 | memset(chsc_page, 0, PAGE_SIZE); |
996 | siosl_area.request.length = 0x0010; | 973 | siosl_area = chsc_page; |
997 | siosl_area.request.code = 0x0046; | 974 | siosl_area->request.length = 0x0010; |
998 | siosl_area.word1 = 0x80000000; | 975 | siosl_area->request.code = 0x0046; |
999 | siosl_area.sid = schid; | 976 | siosl_area->word1 = 0x80000000; |
977 | siosl_area->sid = schid; | ||
1000 | 978 | ||
1001 | ccode = chsc(&siosl_area); | 979 | ccode = chsc(siosl_area); |
1002 | if (ccode > 0) { | 980 | if (ccode > 0) { |
1003 | if (ccode == 3) | 981 | if (ccode == 3) |
1004 | rc = -ENODEV; | 982 | rc = -ENODEV; |
@@ -1008,17 +986,16 @@ int chsc_siosl(struct subchannel_id schid) | |||
1008 | schid.ssid, schid.sch_no, ccode); | 986 | schid.ssid, schid.sch_no, ccode); |
1009 | goto out; | 987 | goto out; |
1010 | } | 988 | } |
1011 | rc = chsc_error_from_response(siosl_area.response.code); | 989 | rc = chsc_error_from_response(siosl_area->response.code); |
1012 | if (rc) | 990 | if (rc) |
1013 | CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", | 991 | CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", |
1014 | schid.ssid, schid.sch_no, | 992 | schid.ssid, schid.sch_no, |
1015 | siosl_area.response.code); | 993 | siosl_area->response.code); |
1016 | else | 994 | else |
1017 | CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", | 995 | CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", |
1018 | schid.ssid, schid.sch_no); | 996 | schid.ssid, schid.sch_no); |
1019 | out: | 997 | out: |
1020 | spin_unlock_irqrestore(&siosl_lock, flags); | 998 | spin_unlock_irqrestore(&chsc_page_lock, flags); |
1021 | |||
1022 | return rc; | 999 | return rc; |
1023 | } | 1000 | } |
1024 | EXPORT_SYMBOL_GPL(chsc_siosl); | 1001 | EXPORT_SYMBOL_GPL(chsc_siosl); |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 5453013f094b..6693f5e3176f 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -57,21 +57,39 @@ struct chsc_ssd_info { | |||
57 | struct chp_id chpid[8]; | 57 | struct chp_id chpid[8]; |
58 | u16 fla[8]; | 58 | u16 fla[8]; |
59 | }; | 59 | }; |
60 | |||
61 | struct chsc_scpd { | ||
62 | struct chsc_header request; | ||
63 | u32:2; | ||
64 | u32 m:1; | ||
65 | u32 c:1; | ||
66 | u32 fmt:4; | ||
67 | u32 cssid:8; | ||
68 | u32:4; | ||
69 | u32 rfmt:4; | ||
70 | u32 first_chpid:8; | ||
71 | u32:24; | ||
72 | u32 last_chpid:8; | ||
73 | u32 zeroes1; | ||
74 | struct chsc_header response; | ||
75 | u8 data[PAGE_SIZE - 20]; | ||
76 | } __attribute__ ((packed)); | ||
77 | |||
78 | |||
60 | extern int chsc_get_ssd_info(struct subchannel_id schid, | 79 | extern int chsc_get_ssd_info(struct subchannel_id schid, |
61 | struct chsc_ssd_info *ssd); | 80 | struct chsc_ssd_info *ssd); |
62 | extern int chsc_determine_css_characteristics(void); | 81 | extern int chsc_determine_css_characteristics(void); |
63 | extern int chsc_alloc_sei_area(void); | 82 | extern int chsc_init(void); |
64 | extern void chsc_free_sei_area(void); | 83 | extern void chsc_init_cleanup(void); |
65 | 84 | ||
66 | extern int chsc_enable_facility(int); | 85 | extern int chsc_enable_facility(int); |
67 | struct channel_subsystem; | 86 | struct channel_subsystem; |
68 | extern int chsc_secm(struct channel_subsystem *, int); | 87 | extern int chsc_secm(struct channel_subsystem *, int); |
69 | int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page); | 88 | int __chsc_do_secm(struct channel_subsystem *css, int enable); |
70 | 89 | ||
71 | int chsc_chp_vary(struct chp_id chpid, int on); | 90 | int chsc_chp_vary(struct chp_id chpid, int on); |
72 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, | 91 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, |
73 | int c, int m, | 92 | int c, int m, void *page); |
74 | struct chsc_response_struct *resp); | ||
75 | int chsc_determine_base_channel_path_desc(struct chp_id chpid, | 93 | int chsc_determine_base_channel_path_desc(struct chp_id chpid, |
76 | struct channel_path_desc *desc); | 94 | struct channel_path_desc *desc); |
77 | void chsc_chp_online(struct chp_id chpid); | 95 | void chsc_chp_online(struct chp_id chpid); |
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index f2b77e7bfc6f..3c3f3ffe2179 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
@@ -688,25 +688,31 @@ out_free: | |||
688 | 688 | ||
689 | static int chsc_ioctl_chpd(void __user *user_chpd) | 689 | static int chsc_ioctl_chpd(void __user *user_chpd) |
690 | { | 690 | { |
691 | struct chsc_scpd *scpd_area; | ||
691 | struct chsc_cpd_info *chpd; | 692 | struct chsc_cpd_info *chpd; |
692 | int ret; | 693 | int ret; |
693 | 694 | ||
694 | chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); | 695 | chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); |
695 | if (!chpd) | 696 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
696 | return -ENOMEM; | 697 | if (!scpd_area || !chpd) { |
698 | ret = -ENOMEM; | ||
699 | goto out_free; | ||
700 | } | ||
697 | if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { | 701 | if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { |
698 | ret = -EFAULT; | 702 | ret = -EFAULT; |
699 | goto out_free; | 703 | goto out_free; |
700 | } | 704 | } |
701 | ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, | 705 | ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, |
702 | chpd->rfmt, chpd->c, chpd->m, | 706 | chpd->rfmt, chpd->c, chpd->m, |
703 | &chpd->chpdb); | 707 | scpd_area); |
704 | if (ret) | 708 | if (ret) |
705 | goto out_free; | 709 | goto out_free; |
710 | memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length); | ||
706 | if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) | 711 | if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) |
707 | ret = -EFAULT; | 712 | ret = -EFAULT; |
708 | out_free: | 713 | out_free: |
709 | kfree(chpd); | 714 | kfree(chpd); |
715 | free_page((unsigned long)scpd_area); | ||
710 | return ret; | 716 | return ret; |
711 | } | 717 | } |
712 | 718 | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index ca8e1c240c3c..a5050e217150 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * driver for channel subsystem | 2 | * driver for channel subsystem |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2002, 2009 | 4 | * Copyright IBM Corp. 2002, 2010 |
5 | * | 5 | * |
6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
@@ -577,7 +577,7 @@ static int __unset_registered(struct device *dev, void *data) | |||
577 | return 0; | 577 | return 0; |
578 | } | 578 | } |
579 | 579 | ||
580 | void css_schedule_eval_all_unreg(void) | 580 | static void css_schedule_eval_all_unreg(void) |
581 | { | 581 | { |
582 | unsigned long flags; | 582 | unsigned long flags; |
583 | struct idset *unreg_set; | 583 | struct idset *unreg_set; |
@@ -790,7 +790,6 @@ static struct notifier_block css_reboot_notifier = { | |||
790 | static int css_power_event(struct notifier_block *this, unsigned long event, | 790 | static int css_power_event(struct notifier_block *this, unsigned long event, |
791 | void *ptr) | 791 | void *ptr) |
792 | { | 792 | { |
793 | void *secm_area; | ||
794 | int ret, i; | 793 | int ret, i; |
795 | 794 | ||
796 | switch (event) { | 795 | switch (event) { |
@@ -806,15 +805,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event, | |||
806 | mutex_unlock(&css->mutex); | 805 | mutex_unlock(&css->mutex); |
807 | continue; | 806 | continue; |
808 | } | 807 | } |
809 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | | 808 | if (__chsc_do_secm(css, 0)) |
810 | GFP_DMA); | ||
811 | if (secm_area) { | ||
812 | if (__chsc_do_secm(css, 0, secm_area)) | ||
813 | ret = NOTIFY_BAD; | ||
814 | free_page((unsigned long)secm_area); | ||
815 | } else | ||
816 | ret = NOTIFY_BAD; | 809 | ret = NOTIFY_BAD; |
817 | |||
818 | mutex_unlock(&css->mutex); | 810 | mutex_unlock(&css->mutex); |
819 | } | 811 | } |
820 | break; | 812 | break; |
@@ -830,15 +822,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event, | |||
830 | mutex_unlock(&css->mutex); | 822 | mutex_unlock(&css->mutex); |
831 | continue; | 823 | continue; |
832 | } | 824 | } |
833 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | | 825 | if (__chsc_do_secm(css, 1)) |
834 | GFP_DMA); | ||
835 | if (secm_area) { | ||
836 | if (__chsc_do_secm(css, 1, secm_area)) | ||
837 | ret = NOTIFY_BAD; | ||
838 | free_page((unsigned long)secm_area); | ||
839 | } else | ||
840 | ret = NOTIFY_BAD; | 826 | ret = NOTIFY_BAD; |
841 | |||
842 | mutex_unlock(&css->mutex); | 827 | mutex_unlock(&css->mutex); |
843 | } | 828 | } |
844 | /* search for subchannels, which appeared during hibernation */ | 829 | /* search for subchannels, which appeared during hibernation */ |
@@ -863,14 +848,11 @@ static int __init css_bus_init(void) | |||
863 | { | 848 | { |
864 | int ret, i; | 849 | int ret, i; |
865 | 850 | ||
866 | ret = chsc_determine_css_characteristics(); | 851 | ret = chsc_init(); |
867 | if (ret == -ENOMEM) | ||
868 | goto out; | ||
869 | |||
870 | ret = chsc_alloc_sei_area(); | ||
871 | if (ret) | 852 | if (ret) |
872 | goto out; | 853 | return ret; |
873 | 854 | ||
855 | chsc_determine_css_characteristics(); | ||
874 | /* Try to enable MSS. */ | 856 | /* Try to enable MSS. */ |
875 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); | 857 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); |
876 | if (ret) | 858 | if (ret) |
@@ -956,9 +938,9 @@ out_unregister: | |||
956 | } | 938 | } |
957 | bus_unregister(&css_bus_type); | 939 | bus_unregister(&css_bus_type); |
958 | out: | 940 | out: |
959 | crw_unregister_handler(CRW_RSC_CSS); | 941 | crw_unregister_handler(CRW_RSC_SCH); |
960 | chsc_free_sei_area(); | ||
961 | idset_free(slow_subchannel_set); | 942 | idset_free(slow_subchannel_set); |
943 | chsc_init_cleanup(); | ||
962 | pr_alert("The CSS device driver initialization failed with " | 944 | pr_alert("The CSS device driver initialization failed with " |
963 | "errno=%d\n", ret); | 945 | "errno=%d\n", ret); |
964 | return ret; | 946 | return ret; |
@@ -978,9 +960,9 @@ static void __init css_bus_cleanup(void) | |||
978 | device_unregister(&css->device); | 960 | device_unregister(&css->device); |
979 | } | 961 | } |
980 | bus_unregister(&css_bus_type); | 962 | bus_unregister(&css_bus_type); |
981 | crw_unregister_handler(CRW_RSC_CSS); | 963 | crw_unregister_handler(CRW_RSC_SCH); |
982 | chsc_free_sei_area(); | ||
983 | idset_free(slow_subchannel_set); | 964 | idset_free(slow_subchannel_set); |
965 | chsc_init_cleanup(); | ||
984 | isc_unregister(IO_SCH_ISC); | 966 | isc_unregister(IO_SCH_ISC); |
985 | } | 967 | } |
986 | 968 | ||
@@ -1048,7 +1030,16 @@ subsys_initcall_sync(channel_subsystem_init_sync); | |||
1048 | 1030 | ||
1049 | void channel_subsystem_reinit(void) | 1031 | void channel_subsystem_reinit(void) |
1050 | { | 1032 | { |
1033 | struct channel_path *chp; | ||
1034 | struct chp_id chpid; | ||
1035 | |||
1051 | chsc_enable_facility(CHSC_SDA_OC_MSS); | 1036 | chsc_enable_facility(CHSC_SDA_OC_MSS); |
1037 | chp_id_for_each(&chpid) { | ||
1038 | chp = chpid_to_chp(chpid); | ||
1039 | if (!chp) | ||
1040 | continue; | ||
1041 | chsc_determine_base_channel_path_desc(chpid, &chp->desc); | ||
1042 | } | ||
1052 | } | 1043 | } |
1053 | 1044 | ||
1054 | #ifdef CONFIG_PROC_FS | 1045 | #ifdef CONFIG_PROC_FS |
@@ -1200,6 +1191,7 @@ static int css_pm_restore(struct device *dev) | |||
1200 | struct subchannel *sch = to_subchannel(dev); | 1191 | struct subchannel *sch = to_subchannel(dev); |
1201 | struct css_driver *drv; | 1192 | struct css_driver *drv; |
1202 | 1193 | ||
1194 | css_update_ssd_info(sch); | ||
1203 | if (!sch->dev.driver) | 1195 | if (!sch->dev.driver) |
1204 | return 0; | 1196 | return 0; |
1205 | drv = to_cssdriver(sch->dev.driver); | 1197 | drv = to_cssdriver(sch->dev.driver); |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 51bd3687d163..2ff8a22d4257 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -1147,6 +1147,7 @@ err: | |||
1147 | static int io_subchannel_chp_event(struct subchannel *sch, | 1147 | static int io_subchannel_chp_event(struct subchannel *sch, |
1148 | struct chp_link *link, int event) | 1148 | struct chp_link *link, int event) |
1149 | { | 1149 | { |
1150 | struct ccw_device *cdev = sch_get_cdev(sch); | ||
1150 | int mask; | 1151 | int mask; |
1151 | 1152 | ||
1152 | mask = chp_ssd_get_mask(&sch->ssd_info, link); | 1153 | mask = chp_ssd_get_mask(&sch->ssd_info, link); |
@@ -1156,22 +1157,30 @@ static int io_subchannel_chp_event(struct subchannel *sch, | |||
1156 | case CHP_VARY_OFF: | 1157 | case CHP_VARY_OFF: |
1157 | sch->opm &= ~mask; | 1158 | sch->opm &= ~mask; |
1158 | sch->lpm &= ~mask; | 1159 | sch->lpm &= ~mask; |
1160 | if (cdev) | ||
1161 | cdev->private->path_gone_mask |= mask; | ||
1159 | io_subchannel_terminate_path(sch, mask); | 1162 | io_subchannel_terminate_path(sch, mask); |
1160 | break; | 1163 | break; |
1161 | case CHP_VARY_ON: | 1164 | case CHP_VARY_ON: |
1162 | sch->opm |= mask; | 1165 | sch->opm |= mask; |
1163 | sch->lpm |= mask; | 1166 | sch->lpm |= mask; |
1167 | if (cdev) | ||
1168 | cdev->private->path_new_mask |= mask; | ||
1164 | io_subchannel_verify(sch); | 1169 | io_subchannel_verify(sch); |
1165 | break; | 1170 | break; |
1166 | case CHP_OFFLINE: | 1171 | case CHP_OFFLINE: |
1167 | if (cio_update_schib(sch)) | 1172 | if (cio_update_schib(sch)) |
1168 | return -ENODEV; | 1173 | return -ENODEV; |
1174 | if (cdev) | ||
1175 | cdev->private->path_gone_mask |= mask; | ||
1169 | io_subchannel_terminate_path(sch, mask); | 1176 | io_subchannel_terminate_path(sch, mask); |
1170 | break; | 1177 | break; |
1171 | case CHP_ONLINE: | 1178 | case CHP_ONLINE: |
1172 | if (cio_update_schib(sch)) | 1179 | if (cio_update_schib(sch)) |
1173 | return -ENODEV; | 1180 | return -ENODEV; |
1174 | sch->lpm |= mask & sch->opm; | 1181 | sch->lpm |= mask & sch->opm; |
1182 | if (cdev) | ||
1183 | cdev->private->path_new_mask |= mask; | ||
1175 | io_subchannel_verify(sch); | 1184 | io_subchannel_verify(sch); |
1176 | break; | 1185 | break; |
1177 | } | 1186 | } |
@@ -1196,6 +1205,7 @@ static void io_subchannel_quiesce(struct subchannel *sch) | |||
1196 | cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); | 1205 | cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); |
1197 | while (ret == -EBUSY) { | 1206 | while (ret == -EBUSY) { |
1198 | cdev->private->state = DEV_STATE_QUIESCE; | 1207 | cdev->private->state = DEV_STATE_QUIESCE; |
1208 | cdev->private->iretry = 255; | ||
1199 | ret = ccw_device_cancel_halt_clear(cdev); | 1209 | ret = ccw_device_cancel_halt_clear(cdev); |
1200 | if (ret == -EBUSY) { | 1210 | if (ret == -EBUSY) { |
1201 | ccw_device_set_timeout(cdev, HZ/10); | 1211 | ccw_device_set_timeout(cdev, HZ/10); |
@@ -1468,9 +1478,13 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) | |||
1468 | goto out; | 1478 | goto out; |
1469 | break; | 1479 | break; |
1470 | case IO_SCH_UNREG_ATTACH: | 1480 | case IO_SCH_UNREG_ATTACH: |
1481 | if (cdev->private->flags.resuming) { | ||
1482 | /* Device will be handled later. */ | ||
1483 | rc = 0; | ||
1484 | goto out; | ||
1485 | } | ||
1471 | /* Unregister ccw device. */ | 1486 | /* Unregister ccw device. */ |
1472 | if (!cdev->private->flags.resuming) | 1487 | ccw_device_unregister(cdev); |
1473 | ccw_device_unregister(cdev); | ||
1474 | break; | 1488 | break; |
1475 | default: | 1489 | default: |
1476 | break; | 1490 | break; |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index c9b852647f01..a845695ac314 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -174,7 +174,10 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
174 | ret = cio_clear (sch); | 174 | ret = cio_clear (sch); |
175 | return (ret == 0) ? -EBUSY : ret; | 175 | return (ret == 0) ? -EBUSY : ret; |
176 | } | 176 | } |
177 | panic("Can't stop i/o on subchannel.\n"); | 177 | /* Function was unsuccessful */ |
178 | CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n", | ||
179 | cdev->private->dev_id.ssid, cdev->private->dev_id.devno); | ||
180 | return -EIO; | ||
178 | } | 181 | } |
179 | 182 | ||
180 | void ccw_device_update_sense_data(struct ccw_device *cdev) | 183 | void ccw_device_update_sense_data(struct ccw_device *cdev) |
@@ -349,9 +352,13 @@ out: | |||
349 | 352 | ||
350 | static void ccw_device_oper_notify(struct ccw_device *cdev) | 353 | static void ccw_device_oper_notify(struct ccw_device *cdev) |
351 | { | 354 | { |
355 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
356 | |||
352 | if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { | 357 | if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { |
353 | /* Reenable channel measurements, if needed. */ | 358 | /* Reenable channel measurements, if needed. */ |
354 | ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); | 359 | ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); |
360 | /* Save indication for new paths. */ | ||
361 | cdev->private->path_new_mask = sch->vpm; | ||
355 | return; | 362 | return; |
356 | } | 363 | } |
357 | /* Driver doesn't want device back. */ | 364 | /* Driver doesn't want device back. */ |
@@ -462,6 +469,32 @@ static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e) | |||
462 | } | 469 | } |
463 | } | 470 | } |
464 | 471 | ||
472 | static void ccw_device_report_path_events(struct ccw_device *cdev) | ||
473 | { | ||
474 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
475 | int path_event[8]; | ||
476 | int chp, mask; | ||
477 | |||
478 | for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) { | ||
479 | path_event[chp] = PE_NONE; | ||
480 | if (mask & cdev->private->path_gone_mask & ~(sch->vpm)) | ||
481 | path_event[chp] |= PE_PATH_GONE; | ||
482 | if (mask & cdev->private->path_new_mask & sch->vpm) | ||
483 | path_event[chp] |= PE_PATH_AVAILABLE; | ||
484 | if (mask & cdev->private->pgid_reset_mask & sch->vpm) | ||
485 | path_event[chp] |= PE_PATHGROUP_ESTABLISHED; | ||
486 | } | ||
487 | if (cdev->online && cdev->drv->path_event) | ||
488 | cdev->drv->path_event(cdev, path_event); | ||
489 | } | ||
490 | |||
491 | static void ccw_device_reset_path_events(struct ccw_device *cdev) | ||
492 | { | ||
493 | cdev->private->path_gone_mask = 0; | ||
494 | cdev->private->path_new_mask = 0; | ||
495 | cdev->private->pgid_reset_mask = 0; | ||
496 | } | ||
497 | |||
465 | void | 498 | void |
466 | ccw_device_verify_done(struct ccw_device *cdev, int err) | 499 | ccw_device_verify_done(struct ccw_device *cdev, int err) |
467 | { | 500 | { |
@@ -498,6 +531,7 @@ callback: | |||
498 | &cdev->private->irb); | 531 | &cdev->private->irb); |
499 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 532 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
500 | } | 533 | } |
534 | ccw_device_report_path_events(cdev); | ||
501 | break; | 535 | break; |
502 | case -ETIME: | 536 | case -ETIME: |
503 | case -EUSERS: | 537 | case -EUSERS: |
@@ -516,6 +550,7 @@ callback: | |||
516 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | 550 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); |
517 | break; | 551 | break; |
518 | } | 552 | } |
553 | ccw_device_reset_path_events(cdev); | ||
519 | } | 554 | } |
520 | 555 | ||
521 | /* | 556 | /* |
@@ -734,13 +769,14 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
734 | int ret; | 769 | int ret; |
735 | 770 | ||
736 | ccw_device_set_timeout(cdev, 0); | 771 | ccw_device_set_timeout(cdev, 0); |
772 | cdev->private->iretry = 255; | ||
737 | ret = ccw_device_cancel_halt_clear(cdev); | 773 | ret = ccw_device_cancel_halt_clear(cdev); |
738 | if (ret == -EBUSY) { | 774 | if (ret == -EBUSY) { |
739 | ccw_device_set_timeout(cdev, 3*HZ); | 775 | ccw_device_set_timeout(cdev, 3*HZ); |
740 | cdev->private->state = DEV_STATE_TIMEOUT_KILL; | 776 | cdev->private->state = DEV_STATE_TIMEOUT_KILL; |
741 | return; | 777 | return; |
742 | } | 778 | } |
743 | if (ret == -ENODEV) | 779 | if (ret) |
744 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | 780 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); |
745 | else if (cdev->handler) | 781 | else if (cdev->handler) |
746 | cdev->handler(cdev, cdev->private->intparm, | 782 | cdev->handler(cdev, cdev->private->intparm, |
@@ -837,6 +873,7 @@ void ccw_device_kill_io(struct ccw_device *cdev) | |||
837 | { | 873 | { |
838 | int ret; | 874 | int ret; |
839 | 875 | ||
876 | cdev->private->iretry = 255; | ||
840 | ret = ccw_device_cancel_halt_clear(cdev); | 877 | ret = ccw_device_cancel_halt_clear(cdev); |
841 | if (ret == -EBUSY) { | 878 | if (ret == -EBUSY) { |
842 | ccw_device_set_timeout(cdev, 3*HZ); | 879 | ccw_device_set_timeout(cdev, 3*HZ); |
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 82a5ad0d63f6..07a4fd29f096 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c | |||
@@ -213,6 +213,17 @@ static void spid_start(struct ccw_device *cdev) | |||
213 | spid_do(cdev); | 213 | spid_do(cdev); |
214 | } | 214 | } |
215 | 215 | ||
216 | static int pgid_is_reset(struct pgid *p) | ||
217 | { | ||
218 | char *c; | ||
219 | |||
220 | for (c = (char *)p + 1; c < (char *)(p + 1); c++) { | ||
221 | if (*c != 0) | ||
222 | return 0; | ||
223 | } | ||
224 | return 1; | ||
225 | } | ||
226 | |||
216 | static int pgid_cmp(struct pgid *p1, struct pgid *p2) | 227 | static int pgid_cmp(struct pgid *p1, struct pgid *p2) |
217 | { | 228 | { |
218 | return memcmp((char *) p1 + 1, (char *) p2 + 1, | 229 | return memcmp((char *) p1 + 1, (char *) p2 + 1, |
@@ -223,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2) | |||
223 | * Determine pathgroup state from PGID data. | 234 | * Determine pathgroup state from PGID data. |
224 | */ | 235 | */ |
225 | static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, | 236 | static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, |
226 | int *mismatch, int *reserved, int *reset) | 237 | int *mismatch, int *reserved, u8 *reset) |
227 | { | 238 | { |
228 | struct pgid *pgid = &cdev->private->pgid[0]; | 239 | struct pgid *pgid = &cdev->private->pgid[0]; |
229 | struct pgid *first = NULL; | 240 | struct pgid *first = NULL; |
@@ -238,9 +249,8 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, | |||
238 | continue; | 249 | continue; |
239 | if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) | 250 | if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) |
240 | *reserved = 1; | 251 | *reserved = 1; |
241 | if (pgid->inf.ps.state1 == SNID_STATE1_RESET) { | 252 | if (pgid_is_reset(pgid)) { |
242 | /* A PGID was reset. */ | 253 | *reset |= lpm; |
243 | *reset = 1; | ||
244 | continue; | 254 | continue; |
245 | } | 255 | } |
246 | if (!first) { | 256 | if (!first) { |
@@ -307,7 +317,7 @@ static void snid_done(struct ccw_device *cdev, int rc) | |||
307 | struct pgid *pgid; | 317 | struct pgid *pgid; |
308 | int mismatch = 0; | 318 | int mismatch = 0; |
309 | int reserved = 0; | 319 | int reserved = 0; |
310 | int reset = 0; | 320 | u8 reset = 0; |
311 | u8 donepm; | 321 | u8 donepm; |
312 | 322 | ||
313 | if (rc) | 323 | if (rc) |
@@ -321,11 +331,12 @@ static void snid_done(struct ccw_device *cdev, int rc) | |||
321 | donepm = pgid_to_donepm(cdev); | 331 | donepm = pgid_to_donepm(cdev); |
322 | sch->vpm = donepm & sch->opm; | 332 | sch->vpm = donepm & sch->opm; |
323 | cdev->private->pgid_todo_mask &= ~donepm; | 333 | cdev->private->pgid_todo_mask &= ~donepm; |
334 | cdev->private->pgid_reset_mask |= reset; | ||
324 | pgid_fill(cdev, pgid); | 335 | pgid_fill(cdev, pgid); |
325 | } | 336 | } |
326 | out: | 337 | out: |
327 | CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " | 338 | CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " |
328 | "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid, | 339 | "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid, |
329 | id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, | 340 | id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, |
330 | cdev->private->pgid_todo_mask, mismatch, reserved, reset); | 341 | cdev->private->pgid_todo_mask, mismatch, reserved, reset); |
331 | switch (rc) { | 342 | switch (rc) { |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 469ef93f2302..d024d2c21897 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -151,8 +151,11 @@ struct ccw_device_private { | |||
151 | struct subchannel_id schid; /* subchannel number */ | 151 | struct subchannel_id schid; /* subchannel number */ |
152 | struct ccw_request req; /* internal I/O request */ | 152 | struct ccw_request req; /* internal I/O request */ |
153 | int iretry; | 153 | int iretry; |
154 | u8 pgid_valid_mask; /* mask of valid PGIDs */ | 154 | u8 pgid_valid_mask; /* mask of valid PGIDs */ |
155 | u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ | 155 | u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ |
156 | u8 pgid_reset_mask; /* mask of PGIDs which were reset */ | ||
157 | u8 path_gone_mask; /* mask of paths, that became unavailable */ | ||
158 | u8 path_new_mask; /* mask of paths, that became available */ | ||
156 | struct { | 159 | struct { |
157 | unsigned int fast:1; /* post with "channel end" */ | 160 | unsigned int fast:1; /* post with "channel end" */ |
158 | unsigned int repall:1; /* report every interrupt status */ | 161 | unsigned int repall:1; /* report every interrupt status */ |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 91c6028d7b74..8fd8c62455e9 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -154,14 +154,7 @@ static inline int ap_instructions_available(void) | |||
154 | */ | 154 | */ |
155 | static int ap_interrupts_available(void) | 155 | static int ap_interrupts_available(void) |
156 | { | 156 | { |
157 | unsigned long long facility_bits[2]; | 157 | return test_facility(1) && test_facility(2); |
158 | |||
159 | if (stfle(facility_bits, 2) <= 1) | ||
160 | return 0; | ||
161 | if (!(facility_bits[0] & (1ULL << 61)) || | ||
162 | !(facility_bits[1] & (1ULL << 62))) | ||
163 | return 0; | ||
164 | return 1; | ||
165 | } | 158 | } |
166 | 159 | ||
167 | /** | 160 | /** |
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 5a46b8c5d68a..375aeeaf9ea5 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c | |||
@@ -372,21 +372,22 @@ static void hotplug_devices(struct work_struct *dummy) | |||
372 | /* | 372 | /* |
373 | * we emulate the request_irq behaviour on top of s390 extints | 373 | * we emulate the request_irq behaviour on top of s390 extints |
374 | */ | 374 | */ |
375 | static void kvm_extint_handler(u16 code) | 375 | static void kvm_extint_handler(unsigned int ext_int_code, |
376 | unsigned int param32, unsigned long param64) | ||
376 | { | 377 | { |
377 | struct virtqueue *vq; | 378 | struct virtqueue *vq; |
378 | u16 subcode; | 379 | u16 subcode; |
379 | u32 param; | 380 | u32 param; |
380 | 381 | ||
381 | subcode = S390_lowcore.cpu_addr; | 382 | subcode = ext_int_code >> 16; |
382 | if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) | 383 | if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) |
383 | return; | 384 | return; |
384 | 385 | ||
385 | /* The LSB might be overloaded, we have to mask it */ | 386 | /* The LSB might be overloaded, we have to mask it */ |
386 | vq = (struct virtqueue *)(S390_lowcore.ext_params2 & ~1UL); | 387 | vq = (struct virtqueue *)(param64 & ~1UL); |
387 | 388 | ||
388 | /* We use ext_params to decide what this interrupt means */ | 389 | /* We use ext_params to decide what this interrupt means */ |
389 | param = S390_lowcore.ext_params & VIRTIO_PARAM_MASK; | 390 | param = param32 & VIRTIO_PARAM_MASK; |
390 | 391 | ||
391 | switch (param) { | 392 | switch (param) { |
392 | case VIRTIO_PARAM_CONFIG_CHANGED: | 393 | case VIRTIO_PARAM_CONFIG_CHANGED: |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f4d4120e5128..6f3c6ae4fe03 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -108,7 +108,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
108 | #endif | 108 | #endif |
109 | 109 | ||
110 | #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY | 110 | #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY |
111 | #define page_clear_dirty(page) do { } while (0) | 111 | #define page_clear_dirty(page, mapped) do { } while (0) |
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY | 114 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6fa317801e1c..5f38c460367e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -310,7 +310,7 @@ static inline void SetPageUptodate(struct page *page) | |||
310 | { | 310 | { |
311 | #ifdef CONFIG_S390 | 311 | #ifdef CONFIG_S390 |
312 | if (!test_and_set_bit(PG_uptodate, &page->flags)) | 312 | if (!test_and_set_bit(PG_uptodate, &page->flags)) |
313 | page_clear_dirty(page); | 313 | page_clear_dirty(page, 0); |
314 | #else | 314 | #else |
315 | /* | 315 | /* |
316 | * Memory barrier must be issued before setting the PG_uptodate bit, | 316 | * Memory barrier must be issued before setting the PG_uptodate bit, |
@@ -745,7 +745,7 @@ int page_mkclean(struct page *page) | |||
745 | if (mapping) { | 745 | if (mapping) { |
746 | ret = page_mkclean_file(mapping, page); | 746 | ret = page_mkclean_file(mapping, page); |
747 | if (page_test_dirty(page)) { | 747 | if (page_test_dirty(page)) { |
748 | page_clear_dirty(page); | 748 | page_clear_dirty(page, 1); |
749 | ret = 1; | 749 | ret = 1; |
750 | } | 750 | } |
751 | } | 751 | } |
@@ -942,7 +942,7 @@ void page_remove_rmap(struct page *page) | |||
942 | * containing the swap entry, but page not yet written to swap. | 942 | * containing the swap entry, but page not yet written to swap. |
943 | */ | 943 | */ |
944 | if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { | 944 | if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { |
945 | page_clear_dirty(page); | 945 | page_clear_dirty(page, 1); |
946 | set_page_dirty(page); | 946 | set_page_dirty(page); |
947 | } | 947 | } |
948 | /* | 948 | /* |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 499c045d6910..f7db676de77d 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -1798,7 +1798,8 @@ static void iucv_work_fn(struct work_struct *work) | |||
1798 | * Handles external interrupts coming in from CP. | 1798 | * Handles external interrupts coming in from CP. |
1799 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). | 1799 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). |
1800 | */ | 1800 | */ |
1801 | static void iucv_external_interrupt(u16 code) | 1801 | static void iucv_external_interrupt(unsigned int ext_int_code, |
1802 | unsigned int param32, unsigned long param64) | ||
1802 | { | 1803 | { |
1803 | struct iucv_irq_data *p; | 1804 | struct iucv_irq_data *p; |
1804 | struct iucv_irq_list *work; | 1805 | struct iucv_irq_list *work; |