diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 66 | ||||
-rw-r--r-- | arch/s390/include/asm/setup.h | 9 | ||||
-rw-r--r-- | arch/s390/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/kernel/crash_dump.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/mem_detect.c | 145 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 65 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_pt.c | 3 | ||||
-rw-r--r-- | arch/s390/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 11 | ||||
-rw-r--r-- | arch/s390/mm/mem_detect.c | 134 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 121 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 8 |
13 files changed, 356 insertions, 217 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index b4622915bd15..4105b8221fdd 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -306,6 +306,7 @@ extern unsigned long MODULES_END; | |||
306 | #define RCP_HC_BIT 0x00200000UL | 306 | #define RCP_HC_BIT 0x00200000UL |
307 | #define RCP_GR_BIT 0x00040000UL | 307 | #define RCP_GR_BIT 0x00040000UL |
308 | #define RCP_GC_BIT 0x00020000UL | 308 | #define RCP_GC_BIT 0x00020000UL |
309 | #define RCP_IN_BIT 0x00008000UL /* IPTE notify bit */ | ||
309 | 310 | ||
310 | /* User dirty / referenced bit for KVM's migration feature */ | 311 | /* User dirty / referenced bit for KVM's migration feature */ |
311 | #define KVM_UR_BIT 0x00008000UL | 312 | #define KVM_UR_BIT 0x00008000UL |
@@ -373,6 +374,7 @@ extern unsigned long MODULES_END; | |||
373 | #define RCP_HC_BIT 0x0020000000000000UL | 374 | #define RCP_HC_BIT 0x0020000000000000UL |
374 | #define RCP_GR_BIT 0x0004000000000000UL | 375 | #define RCP_GR_BIT 0x0004000000000000UL |
375 | #define RCP_GC_BIT 0x0002000000000000UL | 376 | #define RCP_GC_BIT 0x0002000000000000UL |
377 | #define RCP_IN_BIT 0x0000800000000000UL /* IPTE notify bit */ | ||
376 | 378 | ||
377 | /* User dirty / referenced bit for KVM's migration feature */ | 379 | /* User dirty / referenced bit for KVM's migration feature */ |
378 | #define KVM_UR_BIT 0x0000800000000000UL | 380 | #define KVM_UR_BIT 0x0000800000000000UL |
@@ -746,30 +748,42 @@ struct gmap { | |||
746 | 748 | ||
747 | /** | 749 | /** |
748 | * struct gmap_rmap - reverse mapping for segment table entries | 750 | * struct gmap_rmap - reverse mapping for segment table entries |
749 | * @next: pointer to the next gmap_rmap structure in the list | 751 | * @gmap: pointer to the gmap_struct |
750 | * @entry: pointer to a segment table entry | 752 | * @entry: pointer to a segment table entry |
753 | * @vmaddr: virtual address in the guest address space | ||
751 | */ | 754 | */ |
752 | struct gmap_rmap { | 755 | struct gmap_rmap { |
753 | struct list_head list; | 756 | struct list_head list; |
757 | struct gmap *gmap; | ||
754 | unsigned long *entry; | 758 | unsigned long *entry; |
759 | unsigned long vmaddr; | ||
755 | }; | 760 | }; |
756 | 761 | ||
757 | /** | 762 | /** |
758 | * struct gmap_pgtable - gmap information attached to a page table | 763 | * struct gmap_pgtable - gmap information attached to a page table |
759 | * @vmaddr: address of the 1MB segment in the process virtual memory | 764 | * @vmaddr: address of the 1MB segment in the process virtual memory |
760 | * @mapper: list of segment table entries maping a page table | 765 | * @mapper: list of segment table entries mapping a page table |
761 | */ | 766 | */ |
762 | struct gmap_pgtable { | 767 | struct gmap_pgtable { |
763 | unsigned long vmaddr; | 768 | unsigned long vmaddr; |
764 | struct list_head mapper; | 769 | struct list_head mapper; |
765 | }; | 770 | }; |
766 | 771 | ||
772 | /** | ||
773 | * struct gmap_notifier - notify function block for page invalidation | ||
774 | * @notifier_call: address of callback function | ||
775 | */ | ||
776 | struct gmap_notifier { | ||
777 | struct list_head list; | ||
778 | void (*notifier_call)(struct gmap *gmap, unsigned long address); | ||
779 | }; | ||
780 | |||
767 | struct gmap *gmap_alloc(struct mm_struct *mm); | 781 | struct gmap *gmap_alloc(struct mm_struct *mm); |
768 | void gmap_free(struct gmap *gmap); | 782 | void gmap_free(struct gmap *gmap); |
769 | void gmap_enable(struct gmap *gmap); | 783 | void gmap_enable(struct gmap *gmap); |
770 | void gmap_disable(struct gmap *gmap); | 784 | void gmap_disable(struct gmap *gmap); |
771 | int gmap_map_segment(struct gmap *gmap, unsigned long from, | 785 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
772 | unsigned long to, unsigned long length); | 786 | unsigned long to, unsigned long len); |
773 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); | 787 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); |
774 | unsigned long __gmap_translate(unsigned long address, struct gmap *); | 788 | unsigned long __gmap_translate(unsigned long address, struct gmap *); |
775 | unsigned long gmap_translate(unsigned long address, struct gmap *); | 789 | unsigned long gmap_translate(unsigned long address, struct gmap *); |
@@ -777,6 +791,24 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *); | |||
777 | unsigned long gmap_fault(unsigned long address, struct gmap *); | 791 | unsigned long gmap_fault(unsigned long address, struct gmap *); |
778 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); | 792 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *); |
779 | 793 | ||
794 | void gmap_register_ipte_notifier(struct gmap_notifier *); | ||
795 | void gmap_unregister_ipte_notifier(struct gmap_notifier *); | ||
796 | int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); | ||
797 | void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *); | ||
798 | |||
799 | static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, | ||
800 | unsigned long addr, | ||
801 | pte_t *ptep, pgste_t pgste) | ||
802 | { | ||
803 | #ifdef CONFIG_PGSTE | ||
804 | if (pgste_val(pgste) & RCP_IN_BIT) { | ||
805 | pgste_val(pgste) &= ~RCP_IN_BIT; | ||
806 | gmap_do_ipte_notify(mm, addr, ptep); | ||
807 | } | ||
808 | #endif | ||
809 | return pgste; | ||
810 | } | ||
811 | |||
780 | /* | 812 | /* |
781 | * Certain architectures need to do special things when PTEs | 813 | * Certain architectures need to do special things when PTEs |
782 | * within a page table are directly modified. Thus, the following | 814 | * within a page table are directly modified. Thus, the following |
@@ -1032,8 +1064,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |||
1032 | pte_t pte; | 1064 | pte_t pte; |
1033 | 1065 | ||
1034 | mm->context.flush_mm = 1; | 1066 | mm->context.flush_mm = 1; |
1035 | if (mm_has_pgste(mm)) | 1067 | if (mm_has_pgste(mm)) { |
1036 | pgste = pgste_get_lock(ptep); | 1068 | pgste = pgste_get_lock(ptep); |
1069 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); | ||
1070 | } | ||
1037 | 1071 | ||
1038 | pte = *ptep; | 1072 | pte = *ptep; |
1039 | if (!mm_exclusive(mm)) | 1073 | if (!mm_exclusive(mm)) |
@@ -1052,11 +1086,14 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, | |||
1052 | unsigned long address, | 1086 | unsigned long address, |
1053 | pte_t *ptep) | 1087 | pte_t *ptep) |
1054 | { | 1088 | { |
1089 | pgste_t pgste; | ||
1055 | pte_t pte; | 1090 | pte_t pte; |
1056 | 1091 | ||
1057 | mm->context.flush_mm = 1; | 1092 | mm->context.flush_mm = 1; |
1058 | if (mm_has_pgste(mm)) | 1093 | if (mm_has_pgste(mm)) { |
1059 | pgste_get_lock(ptep); | 1094 | pgste = pgste_get_lock(ptep); |
1095 | pgste_ipte_notify(mm, address, ptep, pgste); | ||
1096 | } | ||
1060 | 1097 | ||
1061 | pte = *ptep; | 1098 | pte = *ptep; |
1062 | if (!mm_exclusive(mm)) | 1099 | if (!mm_exclusive(mm)) |
@@ -1082,8 +1119,10 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | |||
1082 | pgste_t pgste; | 1119 | pgste_t pgste; |
1083 | pte_t pte; | 1120 | pte_t pte; |
1084 | 1121 | ||
1085 | if (mm_has_pgste(vma->vm_mm)) | 1122 | if (mm_has_pgste(vma->vm_mm)) { |
1086 | pgste = pgste_get_lock(ptep); | 1123 | pgste = pgste_get_lock(ptep); |
1124 | pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); | ||
1125 | } | ||
1087 | 1126 | ||
1088 | pte = *ptep; | 1127 | pte = *ptep; |
1089 | __ptep_ipte(address, ptep); | 1128 | __ptep_ipte(address, ptep); |
@@ -1111,8 +1150,11 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |||
1111 | pgste_t pgste; | 1150 | pgste_t pgste; |
1112 | pte_t pte; | 1151 | pte_t pte; |
1113 | 1152 | ||
1114 | if (mm_has_pgste(mm)) | 1153 | if (mm_has_pgste(mm)) { |
1115 | pgste = pgste_get_lock(ptep); | 1154 | pgste = pgste_get_lock(ptep); |
1155 | if (!full) | ||
1156 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); | ||
1157 | } | ||
1116 | 1158 | ||
1117 | pte = *ptep; | 1159 | pte = *ptep; |
1118 | if (!full) | 1160 | if (!full) |
@@ -1135,8 +1177,10 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, | |||
1135 | 1177 | ||
1136 | if (pte_write(pte)) { | 1178 | if (pte_write(pte)) { |
1137 | mm->context.flush_mm = 1; | 1179 | mm->context.flush_mm = 1; |
1138 | if (mm_has_pgste(mm)) | 1180 | if (mm_has_pgste(mm)) { |
1139 | pgste = pgste_get_lock(ptep); | 1181 | pgste = pgste_get_lock(ptep); |
1182 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); | ||
1183 | } | ||
1140 | 1184 | ||
1141 | if (!mm_exclusive(mm)) | 1185 | if (!mm_exclusive(mm)) |
1142 | __ptep_ipte(address, ptep); | 1186 | __ptep_ipte(address, ptep); |
@@ -1160,8 +1204,10 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, | |||
1160 | 1204 | ||
1161 | if (pte_same(*ptep, entry)) | 1205 | if (pte_same(*ptep, entry)) |
1162 | return 0; | 1206 | return 0; |
1163 | if (mm_has_pgste(vma->vm_mm)) | 1207 | if (mm_has_pgste(vma->vm_mm)) { |
1164 | pgste = pgste_get_lock(ptep); | 1208 | pgste = pgste_get_lock(ptep); |
1209 | pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); | ||
1210 | } | ||
1165 | 1211 | ||
1166 | __ptep_ipte(address, ptep); | 1212 | __ptep_ipte(address, ptep); |
1167 | 1213 | ||
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index ff67d730c00c..59880dbaf360 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h | |||
@@ -33,8 +33,6 @@ | |||
33 | 33 | ||
34 | #define CHUNK_READ_WRITE 0 | 34 | #define CHUNK_READ_WRITE 0 |
35 | #define CHUNK_READ_ONLY 1 | 35 | #define CHUNK_READ_ONLY 1 |
36 | #define CHUNK_OLDMEM 4 | ||
37 | #define CHUNK_CRASHK 5 | ||
38 | 36 | ||
39 | struct mem_chunk { | 37 | struct mem_chunk { |
40 | unsigned long addr; | 38 | unsigned long addr; |
@@ -43,13 +41,12 @@ struct mem_chunk { | |||
43 | }; | 41 | }; |
44 | 42 | ||
45 | extern struct mem_chunk memory_chunk[]; | 43 | extern struct mem_chunk memory_chunk[]; |
46 | extern unsigned long real_memory_size; | ||
47 | extern int memory_end_set; | 44 | extern int memory_end_set; |
48 | extern unsigned long memory_end; | 45 | extern unsigned long memory_end; |
49 | 46 | ||
50 | void detect_memory_layout(struct mem_chunk chunk[]); | 47 | void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize); |
51 | void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr, | 48 | void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, |
52 | unsigned long size, int type); | 49 | unsigned long size); |
53 | 50 | ||
54 | #define PRIMARY_SPACE_MODE 0 | 51 | #define PRIMARY_SPACE_MODE 0 |
55 | #define ACCESS_REGISTER_MODE 1 | 52 | #define ACCESS_REGISTER_MODE 1 |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 1386fcaf4ef6..4bb2a4656163 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -30,7 +30,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w | |||
30 | 30 | ||
31 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o | 31 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o |
32 | obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o | 32 | obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o |
33 | obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o | 33 | obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o |
34 | obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o | 34 | obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o |
35 | obj-y += dumpstack.o | 35 | obj-y += dumpstack.o |
36 | 36 | ||
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index fb8d8781a011..f703d91bf720 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c | |||
@@ -88,8 +88,8 @@ static struct mem_chunk *get_memory_layout(void) | |||
88 | struct mem_chunk *chunk_array; | 88 | struct mem_chunk *chunk_array; |
89 | 89 | ||
90 | chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk)); | 90 | chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk)); |
91 | detect_memory_layout(chunk_array); | 91 | detect_memory_layout(chunk_array, 0); |
92 | create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK); | 92 | create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE); |
93 | return chunk_array; | 93 | return chunk_array; |
94 | } | 94 | } |
95 | 95 | ||
@@ -344,7 +344,7 @@ static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) | |||
344 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 344 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
345 | mem_chunk = &chunk_array[i]; | 345 | mem_chunk = &chunk_array[i]; |
346 | if (mem_chunk->size == 0) | 346 | if (mem_chunk->size == 0) |
347 | break; | 347 | continue; |
348 | if (chunk_array[i].type != CHUNK_READ_WRITE && | 348 | if (chunk_array[i].type != CHUNK_READ_WRITE && |
349 | chunk_array[i].type != CHUNK_READ_ONLY) | 349 | chunk_array[i].type != CHUNK_READ_ONLY) |
350 | continue; | 350 | continue; |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index bda011e2f8ae..dc8770d7173c 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -482,7 +482,6 @@ void __init startup_init(void) | |||
482 | detect_machine_facilities(); | 482 | detect_machine_facilities(); |
483 | setup_topology(); | 483 | setup_topology(); |
484 | sclp_facilities_detect(); | 484 | sclp_facilities_detect(); |
485 | detect_memory_layout(memory_chunk); | ||
486 | #ifdef CONFIG_DYNAMIC_FTRACE | 485 | #ifdef CONFIG_DYNAMIC_FTRACE |
487 | S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; | 486 | S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; |
488 | #endif | 487 | #endif |
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c deleted file mode 100644 index 22d502e885ed..000000000000 --- a/arch/s390/kernel/mem_detect.c +++ /dev/null | |||
@@ -1,145 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2008, 2009 | ||
3 | * | ||
4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <asm/ipl.h> | ||
10 | #include <asm/sclp.h> | ||
11 | #include <asm/setup.h> | ||
12 | |||
13 | #define ADDR2G (1ULL << 31) | ||
14 | |||
15 | static void find_memory_chunks(struct mem_chunk chunk[]) | ||
16 | { | ||
17 | unsigned long long memsize, rnmax, rzm; | ||
18 | unsigned long addr = 0, size; | ||
19 | int i = 0, type; | ||
20 | |||
21 | rzm = sclp_get_rzm(); | ||
22 | rnmax = sclp_get_rnmax(); | ||
23 | memsize = rzm * rnmax; | ||
24 | if (!rzm) | ||
25 | rzm = 1ULL << 17; | ||
26 | if (sizeof(long) == 4) { | ||
27 | rzm = min(ADDR2G, rzm); | ||
28 | memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; | ||
29 | } | ||
30 | do { | ||
31 | size = 0; | ||
32 | type = tprot(addr); | ||
33 | do { | ||
34 | size += rzm; | ||
35 | if (memsize && addr + size >= memsize) | ||
36 | break; | ||
37 | } while (type == tprot(addr + size)); | ||
38 | if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { | ||
39 | chunk[i].addr = addr; | ||
40 | chunk[i].size = size; | ||
41 | chunk[i].type = type; | ||
42 | i++; | ||
43 | } | ||
44 | addr += size; | ||
45 | } while (addr < memsize && i < MEMORY_CHUNKS); | ||
46 | } | ||
47 | |||
48 | void detect_memory_layout(struct mem_chunk chunk[]) | ||
49 | { | ||
50 | unsigned long flags, cr0; | ||
51 | |||
52 | memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk)); | ||
53 | /* Disable IRQs, DAT and low address protection so tprot does the | ||
54 | * right thing and we don't get scheduled away with low address | ||
55 | * protection disabled. | ||
56 | */ | ||
57 | flags = __arch_local_irq_stnsm(0xf8); | ||
58 | __ctl_store(cr0, 0, 0); | ||
59 | __ctl_clear_bit(0, 28); | ||
60 | find_memory_chunks(chunk); | ||
61 | __ctl_load(cr0, 0, 0); | ||
62 | arch_local_irq_restore(flags); | ||
63 | } | ||
64 | EXPORT_SYMBOL(detect_memory_layout); | ||
65 | |||
66 | /* | ||
67 | * Move memory chunks array from index "from" to index "to" | ||
68 | */ | ||
69 | static void mem_chunk_move(struct mem_chunk chunk[], int to, int from) | ||
70 | { | ||
71 | int cnt = MEMORY_CHUNKS - to; | ||
72 | |||
73 | memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk)); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Initialize memory chunk | ||
78 | */ | ||
79 | static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr, | ||
80 | unsigned long size, int type) | ||
81 | { | ||
82 | chunk->type = type; | ||
83 | chunk->addr = addr; | ||
84 | chunk->size = size; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Create memory hole with given address, size, and type | ||
89 | */ | ||
90 | void create_mem_hole(struct mem_chunk chunk[], unsigned long addr, | ||
91 | unsigned long size, int type) | ||
92 | { | ||
93 | unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size; | ||
94 | int i, ch_type; | ||
95 | |||
96 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
97 | if (chunk[i].size == 0) | ||
98 | continue; | ||
99 | |||
100 | /* Define chunk properties */ | ||
101 | ch_start = chunk[i].addr; | ||
102 | ch_size = chunk[i].size; | ||
103 | ch_end = ch_start + ch_size - 1; | ||
104 | ch_type = chunk[i].type; | ||
105 | |||
106 | /* Is memory chunk hit by memory hole? */ | ||
107 | if (addr + size <= ch_start) | ||
108 | continue; /* No: memory hole in front of chunk */ | ||
109 | if (addr > ch_end) | ||
110 | continue; /* No: memory hole after chunk */ | ||
111 | |||
112 | /* Yes: Define local hole properties */ | ||
113 | lh_start = max(addr, chunk[i].addr); | ||
114 | lh_end = min(addr + size - 1, ch_end); | ||
115 | lh_size = lh_end - lh_start + 1; | ||
116 | |||
117 | if (lh_start == ch_start && lh_end == ch_end) { | ||
118 | /* Hole covers complete memory chunk */ | ||
119 | mem_chunk_init(&chunk[i], lh_start, lh_size, type); | ||
120 | } else if (lh_end == ch_end) { | ||
121 | /* Hole starts in memory chunk and convers chunk end */ | ||
122 | mem_chunk_move(chunk, i + 1, i); | ||
123 | mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size, | ||
124 | ch_type); | ||
125 | mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type); | ||
126 | i += 1; | ||
127 | } else if (lh_start == ch_start) { | ||
128 | /* Hole ends in memory chunk */ | ||
129 | mem_chunk_move(chunk, i + 1, i); | ||
130 | mem_chunk_init(&chunk[i], lh_start, lh_size, type); | ||
131 | mem_chunk_init(&chunk[i + 1], lh_end + 1, | ||
132 | ch_size - lh_size, ch_type); | ||
133 | break; | ||
134 | } else { | ||
135 | /* Hole splits memory chunk */ | ||
136 | mem_chunk_move(chunk, i + 2, i); | ||
137 | mem_chunk_init(&chunk[i], ch_start, | ||
138 | lh_start - ch_start, ch_type); | ||
139 | mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type); | ||
140 | mem_chunk_init(&chunk[i + 2], lh_end + 1, | ||
141 | ch_end - lh_end, ch_type); | ||
142 | break; | ||
143 | } | ||
144 | } | ||
145 | } | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 0f419c5765c8..0a49095104c9 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -226,25 +226,17 @@ static void __init conmode_default(void) | |||
226 | } | 226 | } |
227 | 227 | ||
228 | #ifdef CONFIG_ZFCPDUMP | 228 | #ifdef CONFIG_ZFCPDUMP |
229 | static void __init setup_zfcpdump(unsigned int console_devno) | 229 | static void __init setup_zfcpdump(void) |
230 | { | 230 | { |
231 | static char str[41]; | ||
232 | |||
233 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 231 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
234 | return; | 232 | return; |
235 | if (OLDMEM_BASE) | 233 | if (OLDMEM_BASE) |
236 | return; | 234 | return; |
237 | if (console_devno != -1) | 235 | strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev"); |
238 | sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", | ||
239 | ipl_info.data.fcp.dev_id.devno, console_devno); | ||
240 | else | ||
241 | sprintf(str, " cio_ignore=all,!0.0.%04x", | ||
242 | ipl_info.data.fcp.dev_id.devno); | ||
243 | strcat(boot_command_line, str); | ||
244 | console_loglevel = 2; | 236 | console_loglevel = 2; |
245 | } | 237 | } |
246 | #else | 238 | #else |
247 | static inline void setup_zfcpdump(unsigned int console_devno) {} | 239 | static inline void setup_zfcpdump(void) {} |
248 | #endif /* CONFIG_ZFCPDUMP */ | 240 | #endif /* CONFIG_ZFCPDUMP */ |
249 | 241 | ||
250 | /* | 242 | /* |
@@ -471,14 +463,10 @@ static void __init setup_resources(void) | |||
471 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 463 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
472 | if (!memory_chunk[i].size) | 464 | if (!memory_chunk[i].size) |
473 | continue; | 465 | continue; |
474 | if (memory_chunk[i].type == CHUNK_OLDMEM || | ||
475 | memory_chunk[i].type == CHUNK_CRASHK) | ||
476 | continue; | ||
477 | res = alloc_bootmem_low(sizeof(*res)); | 466 | res = alloc_bootmem_low(sizeof(*res)); |
478 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | 467 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; |
479 | switch (memory_chunk[i].type) { | 468 | switch (memory_chunk[i].type) { |
480 | case CHUNK_READ_WRITE: | 469 | case CHUNK_READ_WRITE: |
481 | case CHUNK_CRASHK: | ||
482 | res->name = "System RAM"; | 470 | res->name = "System RAM"; |
483 | break; | 471 | break; |
484 | case CHUNK_READ_ONLY: | 472 | case CHUNK_READ_ONLY: |
@@ -510,12 +498,10 @@ static void __init setup_resources(void) | |||
510 | } | 498 | } |
511 | } | 499 | } |
512 | 500 | ||
513 | unsigned long real_memory_size; | ||
514 | EXPORT_SYMBOL_GPL(real_memory_size); | ||
515 | |||
516 | static void __init setup_memory_end(void) | 501 | static void __init setup_memory_end(void) |
517 | { | 502 | { |
518 | unsigned long vmax, vmalloc_size, tmp; | 503 | unsigned long vmax, vmalloc_size, tmp; |
504 | unsigned long real_memory_size = 0; | ||
519 | int i; | 505 | int i; |
520 | 506 | ||
521 | 507 | ||
@@ -525,7 +511,6 @@ static void __init setup_memory_end(void) | |||
525 | memory_end_set = 1; | 511 | memory_end_set = 1; |
526 | } | 512 | } |
527 | #endif | 513 | #endif |
528 | real_memory_size = 0; | ||
529 | memory_end &= PAGE_MASK; | 514 | memory_end &= PAGE_MASK; |
530 | 515 | ||
531 | /* | 516 | /* |
@@ -538,6 +523,8 @@ static void __init setup_memory_end(void) | |||
538 | unsigned long align; | 523 | unsigned long align; |
539 | 524 | ||
540 | chunk = &memory_chunk[i]; | 525 | chunk = &memory_chunk[i]; |
526 | if (!chunk->size) | ||
527 | continue; | ||
541 | align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); | 528 | align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); |
542 | start = (chunk->addr + align - 1) & ~(align - 1); | 529 | start = (chunk->addr + align - 1) & ~(align - 1); |
543 | end = (chunk->addr + chunk->size) & ~(align - 1); | 530 | end = (chunk->addr + chunk->size) & ~(align - 1); |
@@ -588,6 +575,8 @@ static void __init setup_memory_end(void) | |||
588 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 575 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
589 | struct mem_chunk *chunk = &memory_chunk[i]; | 576 | struct mem_chunk *chunk = &memory_chunk[i]; |
590 | 577 | ||
578 | if (!chunk->size) | ||
579 | continue; | ||
591 | if (chunk->addr >= memory_end) { | 580 | if (chunk->addr >= memory_end) { |
592 | memset(chunk, 0, sizeof(*chunk)); | 581 | memset(chunk, 0, sizeof(*chunk)); |
593 | continue; | 582 | continue; |
@@ -688,15 +677,6 @@ static int __init verify_crash_base(unsigned long crash_base, | |||
688 | } | 677 | } |
689 | 678 | ||
690 | /* | 679 | /* |
691 | * Reserve kdump memory by creating a memory hole in the mem_chunk array | ||
692 | */ | ||
693 | static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size, | ||
694 | int type) | ||
695 | { | ||
696 | create_mem_hole(memory_chunk, addr, size, type); | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * When kdump is enabled, we have to ensure that no memory from | 680 | * When kdump is enabled, we have to ensure that no memory from |
701 | * the area [0 - crashkernel memory size] and | 681 | * the area [0 - crashkernel memory size] and |
702 | * [crashk_res.start - crashk_res.end] is set offline. | 682 | * [crashk_res.start - crashk_res.end] is set offline. |
@@ -727,16 +707,22 @@ static struct notifier_block kdump_mem_nb = { | |||
727 | static void reserve_oldmem(void) | 707 | static void reserve_oldmem(void) |
728 | { | 708 | { |
729 | #ifdef CONFIG_CRASH_DUMP | 709 | #ifdef CONFIG_CRASH_DUMP |
710 | unsigned long real_size = 0; | ||
711 | int i; | ||
712 | |||
730 | if (!OLDMEM_BASE) | 713 | if (!OLDMEM_BASE) |
731 | return; | 714 | return; |
715 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
716 | struct mem_chunk *chunk = &memory_chunk[i]; | ||
732 | 717 | ||
733 | reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM); | 718 | real_size = max(real_size, chunk->addr + chunk->size); |
734 | reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE, | 719 | } |
735 | CHUNK_OLDMEM); | 720 | create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE); |
736 | if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size) | 721 | create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE); |
722 | if (OLDMEM_BASE + OLDMEM_SIZE == real_size) | ||
737 | saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; | 723 | saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; |
738 | else | 724 | else |
739 | saved_max_pfn = PFN_DOWN(real_memory_size) - 1; | 725 | saved_max_pfn = PFN_DOWN(real_size) - 1; |
740 | #endif | 726 | #endif |
741 | } | 727 | } |
742 | 728 | ||
@@ -775,7 +761,7 @@ static void __init reserve_crashkernel(void) | |||
775 | crashk_res.start = crash_base; | 761 | crashk_res.start = crash_base; |
776 | crashk_res.end = crash_base + crash_size - 1; | 762 | crashk_res.end = crash_base + crash_size - 1; |
777 | insert_resource(&iomem_resource, &crashk_res); | 763 | insert_resource(&iomem_resource, &crashk_res); |
778 | reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); | 764 | create_mem_hole(memory_chunk, crash_base, crash_size); |
779 | pr_info("Reserving %lluMB of memory at %lluMB " | 765 | pr_info("Reserving %lluMB of memory at %lluMB " |
780 | "for crashkernel (System RAM: %luMB)\n", | 766 | "for crashkernel (System RAM: %luMB)\n", |
781 | crash_size >> 20, crash_base >> 20, memory_end >> 20); | 767 | crash_size >> 20, crash_base >> 20, memory_end >> 20); |
@@ -847,11 +833,10 @@ static void __init setup_memory(void) | |||
847 | * Register RAM areas with the bootmem allocator. | 833 | * Register RAM areas with the bootmem allocator. |
848 | */ | 834 | */ |
849 | 835 | ||
850 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 836 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
851 | unsigned long start_chunk, end_chunk, pfn; | 837 | unsigned long start_chunk, end_chunk, pfn; |
852 | 838 | ||
853 | if (memory_chunk[i].type != CHUNK_READ_WRITE && | 839 | if (!memory_chunk[i].size) |
854 | memory_chunk[i].type != CHUNK_CRASHK) | ||
855 | continue; | 840 | continue; |
856 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | 841 | start_chunk = PFN_DOWN(memory_chunk[i].addr); |
857 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); | 842 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); |
@@ -1067,12 +1052,12 @@ void __init setup_arch(char **cmdline_p) | |||
1067 | memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); | 1052 | memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); |
1068 | 1053 | ||
1069 | parse_early_param(); | 1054 | parse_early_param(); |
1070 | 1055 | detect_memory_layout(memory_chunk, memory_end); | |
1071 | os_info_init(); | 1056 | os_info_init(); |
1072 | setup_ipl(); | 1057 | setup_ipl(); |
1058 | reserve_oldmem(); | ||
1073 | setup_memory_end(); | 1059 | setup_memory_end(); |
1074 | setup_addressing_mode(); | 1060 | setup_addressing_mode(); |
1075 | reserve_oldmem(); | ||
1076 | reserve_crashkernel(); | 1061 | reserve_crashkernel(); |
1077 | setup_memory(); | 1062 | setup_memory(); |
1078 | setup_resources(); | 1063 | setup_resources(); |
@@ -1097,5 +1082,5 @@ void __init setup_arch(char **cmdline_p) | |||
1097 | set_preferred_console(); | 1082 | set_preferred_console(); |
1098 | 1083 | ||
1099 | /* Setup zfcpdump support */ | 1084 | /* Setup zfcpdump support */ |
1100 | setup_zfcpdump(console_devno); | 1085 | setup_zfcpdump(); |
1101 | } | 1086 | } |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 466fb3383960..50ea137a2d3c 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -89,16 +89,19 @@ static unsigned long follow_table(struct mm_struct *mm, | |||
89 | if (unlikely(*table & _REGION_ENTRY_INV)) | 89 | if (unlikely(*table & _REGION_ENTRY_INV)) |
90 | return -0x39UL; | 90 | return -0x39UL; |
91 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 91 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
92 | /* fallthrough */ | ||
92 | case _ASCE_TYPE_REGION2: | 93 | case _ASCE_TYPE_REGION2: |
93 | table = table + ((address >> 42) & 0x7ff); | 94 | table = table + ((address >> 42) & 0x7ff); |
94 | if (unlikely(*table & _REGION_ENTRY_INV)) | 95 | if (unlikely(*table & _REGION_ENTRY_INV)) |
95 | return -0x3aUL; | 96 | return -0x3aUL; |
96 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 97 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
98 | /* fallthrough */ | ||
97 | case _ASCE_TYPE_REGION3: | 99 | case _ASCE_TYPE_REGION3: |
98 | table = table + ((address >> 31) & 0x7ff); | 100 | table = table + ((address >> 31) & 0x7ff); |
99 | if (unlikely(*table & _REGION_ENTRY_INV)) | 101 | if (unlikely(*table & _REGION_ENTRY_INV)) |
100 | return -0x3bUL; | 102 | return -0x3bUL; |
101 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 103 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
104 | /* fallthrough */ | ||
102 | case _ASCE_TYPE_SEGMENT: | 105 | case _ASCE_TYPE_SEGMENT: |
103 | table = table + ((address >> 20) & 0x7ff); | 106 | table = table + ((address >> 20) & 0x7ff); |
104 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) | 107 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) |
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index 640bea12303c..839592ca265c 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o | 5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o |
6 | obj-y += page-states.o gup.o extable.o pageattr.o | 6 | obj-y += page-states.o gup.o extable.o pageattr.o mem_detect.o |
7 | 7 | ||
8 | obj-$(CONFIG_CMM) += cmm.o | 8 | obj-$(CONFIG_CMM) += cmm.o |
9 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 9 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 0b09b2342302..89ebae4008f2 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/memory.h> | ||
24 | #include <linux/pfn.h> | 25 | #include <linux/pfn.h> |
25 | #include <linux/poison.h> | 26 | #include <linux/poison.h> |
26 | #include <linux/initrd.h> | 27 | #include <linux/initrd.h> |
@@ -36,6 +37,7 @@ | |||
36 | #include <asm/tlbflush.h> | 37 | #include <asm/tlbflush.h> |
37 | #include <asm/sections.h> | 38 | #include <asm/sections.h> |
38 | #include <asm/ctl_reg.h> | 39 | #include <asm/ctl_reg.h> |
40 | #include <asm/sclp.h> | ||
39 | 41 | ||
40 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | 42 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); |
41 | 43 | ||
@@ -214,6 +216,15 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
214 | return rc; | 216 | return rc; |
215 | } | 217 | } |
216 | 218 | ||
219 | unsigned long memory_block_size_bytes(void) | ||
220 | { | ||
221 | /* | ||
222 | * Make sure the memory block size is always greater | ||
223 | * or equal than the memory increment size. | ||
224 | */ | ||
225 | return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp_get_rzm()); | ||
226 | } | ||
227 | |||
217 | #ifdef CONFIG_MEMORY_HOTREMOVE | 228 | #ifdef CONFIG_MEMORY_HOTREMOVE |
218 | int arch_remove_memory(u64 start, u64 size) | 229 | int arch_remove_memory(u64 start, u64 size) |
219 | { | 230 | { |
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c new file mode 100644 index 000000000000..3cbd3b8bf311 --- /dev/null +++ b/arch/s390/mm/mem_detect.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2008, 2009 | ||
3 | * | ||
4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <asm/ipl.h> | ||
10 | #include <asm/sclp.h> | ||
11 | #include <asm/setup.h> | ||
12 | |||
13 | #define ADDR2G (1ULL << 31) | ||
14 | |||
15 | static void find_memory_chunks(struct mem_chunk chunk[], unsigned long maxsize) | ||
16 | { | ||
17 | unsigned long long memsize, rnmax, rzm; | ||
18 | unsigned long addr = 0, size; | ||
19 | int i = 0, type; | ||
20 | |||
21 | rzm = sclp_get_rzm(); | ||
22 | rnmax = sclp_get_rnmax(); | ||
23 | memsize = rzm * rnmax; | ||
24 | if (!rzm) | ||
25 | rzm = 1ULL << 17; | ||
26 | if (sizeof(long) == 4) { | ||
27 | rzm = min(ADDR2G, rzm); | ||
28 | memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; | ||
29 | } | ||
30 | if (maxsize) | ||
31 | memsize = memsize ? min((unsigned long)memsize, maxsize) : maxsize; | ||
32 | do { | ||
33 | size = 0; | ||
34 | type = tprot(addr); | ||
35 | do { | ||
36 | size += rzm; | ||
37 | if (memsize && addr + size >= memsize) | ||
38 | break; | ||
39 | } while (type == tprot(addr + size)); | ||
40 | if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { | ||
41 | if (memsize && (addr + size > memsize)) | ||
42 | size = memsize - addr; | ||
43 | chunk[i].addr = addr; | ||
44 | chunk[i].size = size; | ||
45 | chunk[i].type = type; | ||
46 | i++; | ||
47 | } | ||
48 | addr += size; | ||
49 | } while (addr < memsize && i < MEMORY_CHUNKS); | ||
50 | } | ||
51 | |||
52 | /** | ||
53 | * detect_memory_layout - fill mem_chunk array with memory layout data | ||
54 | * @chunk: mem_chunk array to be filled | ||
55 | * @maxsize: maximum address where memory detection should stop | ||
56 | * | ||
57 | * Fills the passed in memory chunk array with the memory layout of the | ||
58 | * machine. The array must have a size of at least MEMORY_CHUNKS and will | ||
59 | * be fully initialized afterwards. | ||
60 | * If the maxsize paramater has a value > 0 memory detection will stop at | ||
61 | * that address. It is guaranteed that all chunks have an ending address | ||
62 | * that is smaller than maxsize. | ||
63 | * If maxsize is 0 all memory will be detected. | ||
64 | */ | ||
65 | void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize) | ||
66 | { | ||
67 | unsigned long flags, flags_dat, cr0; | ||
68 | |||
69 | memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk)); | ||
70 | /* | ||
71 | * Disable IRQs, DAT and low address protection so tprot does the | ||
72 | * right thing and we don't get scheduled away with low address | ||
73 | * protection disabled. | ||
74 | */ | ||
75 | local_irq_save(flags); | ||
76 | flags_dat = __arch_local_irq_stnsm(0xfb); | ||
77 | /* | ||
78 | * In case DAT was enabled, make sure chunk doesn't reside in vmalloc | ||
79 | * space. We have disabled DAT and any access to vmalloc area will | ||
80 | * cause an exception. | ||
81 | * If DAT was disabled we are called from early ipl code. | ||
82 | */ | ||
83 | if (test_bit(5, &flags_dat)) { | ||
84 | if (WARN_ON_ONCE(is_vmalloc_or_module_addr(chunk))) | ||
85 | goto out; | ||
86 | } | ||
87 | __ctl_store(cr0, 0, 0); | ||
88 | __ctl_clear_bit(0, 28); | ||
89 | find_memory_chunks(chunk, maxsize); | ||
90 | __ctl_load(cr0, 0, 0); | ||
91 | out: | ||
92 | __arch_local_irq_ssm(flags_dat); | ||
93 | local_irq_restore(flags); | ||
94 | } | ||
95 | EXPORT_SYMBOL(detect_memory_layout); | ||
96 | |||
97 | /* | ||
98 | * Create memory hole with given address and size. | ||
99 | */ | ||
100 | void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, | ||
101 | unsigned long size) | ||
102 | { | ||
103 | int i; | ||
104 | |||
105 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
106 | struct mem_chunk *chunk = &mem_chunk[i]; | ||
107 | |||
108 | if (chunk->size == 0) | ||
109 | continue; | ||
110 | if (addr > chunk->addr + chunk->size) | ||
111 | continue; | ||
112 | if (addr + size <= chunk->addr) | ||
113 | continue; | ||
114 | /* Split */ | ||
115 | if ((addr > chunk->addr) && | ||
116 | (addr + size < chunk->addr + chunk->size)) { | ||
117 | struct mem_chunk *new = chunk + 1; | ||
118 | |||
119 | memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new)); | ||
120 | new->addr = addr + size; | ||
121 | new->size = chunk->addr + chunk->size - new->addr; | ||
122 | chunk->size = addr - chunk->addr; | ||
123 | continue; | ||
124 | } else if ((addr <= chunk->addr) && | ||
125 | (addr + size >= chunk->addr + chunk->size)) { | ||
126 | memset(chunk, 0 , sizeof(*chunk)); | ||
127 | } else if (addr + size < chunk->addr + chunk->size) { | ||
128 | chunk->size = chunk->addr + chunk->size - addr - size; | ||
129 | chunk->addr = addr + size; | ||
130 | } else if (addr > chunk->addr) { | ||
131 | chunk->size = addr - chunk->addr; | ||
132 | } | ||
133 | } | ||
134 | } | ||
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index bd954e96f51c..7805ddca833d 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -454,9 +454,8 @@ unsigned long gmap_translate(unsigned long address, struct gmap *gmap) | |||
454 | } | 454 | } |
455 | EXPORT_SYMBOL_GPL(gmap_translate); | 455 | EXPORT_SYMBOL_GPL(gmap_translate); |
456 | 456 | ||
457 | static int gmap_connect_pgtable(unsigned long segment, | 457 | static int gmap_connect_pgtable(unsigned long address, unsigned long segment, |
458 | unsigned long *segment_ptr, | 458 | unsigned long *segment_ptr, struct gmap *gmap) |
459 | struct gmap *gmap) | ||
460 | { | 459 | { |
461 | unsigned long vmaddr; | 460 | unsigned long vmaddr; |
462 | struct vm_area_struct *vma; | 461 | struct vm_area_struct *vma; |
@@ -491,7 +490,9 @@ static int gmap_connect_pgtable(unsigned long segment, | |||
491 | /* Link gmap segment table entry location to page table. */ | 490 | /* Link gmap segment table entry location to page table. */ |
492 | page = pmd_page(*pmd); | 491 | page = pmd_page(*pmd); |
493 | mp = (struct gmap_pgtable *) page->index; | 492 | mp = (struct gmap_pgtable *) page->index; |
493 | rmap->gmap = gmap; | ||
494 | rmap->entry = segment_ptr; | 494 | rmap->entry = segment_ptr; |
495 | rmap->vmaddr = address; | ||
495 | spin_lock(&mm->page_table_lock); | 496 | spin_lock(&mm->page_table_lock); |
496 | if (*segment_ptr == segment) { | 497 | if (*segment_ptr == segment) { |
497 | list_add(&rmap->list, &mp->mapper); | 498 | list_add(&rmap->list, &mp->mapper); |
@@ -553,7 +554,7 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | |||
553 | if (!(segment & _SEGMENT_ENTRY_RO)) | 554 | if (!(segment & _SEGMENT_ENTRY_RO)) |
554 | /* Nothing mapped in the gmap address space. */ | 555 | /* Nothing mapped in the gmap address space. */ |
555 | break; | 556 | break; |
556 | rc = gmap_connect_pgtable(segment, segment_ptr, gmap); | 557 | rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); |
557 | if (rc) | 558 | if (rc) |
558 | return rc; | 559 | return rc; |
559 | } | 560 | } |
@@ -619,6 +620,118 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) | |||
619 | } | 620 | } |
620 | EXPORT_SYMBOL_GPL(gmap_discard); | 621 | EXPORT_SYMBOL_GPL(gmap_discard); |
621 | 622 | ||
623 | static LIST_HEAD(gmap_notifier_list); | ||
624 | static DEFINE_SPINLOCK(gmap_notifier_lock); | ||
625 | |||
626 | /** | ||
627 | * gmap_register_ipte_notifier - register a pte invalidation callback | ||
628 | * @nb: pointer to the gmap notifier block | ||
629 | */ | ||
630 | void gmap_register_ipte_notifier(struct gmap_notifier *nb) | ||
631 | { | ||
632 | spin_lock(&gmap_notifier_lock); | ||
633 | list_add(&nb->list, &gmap_notifier_list); | ||
634 | spin_unlock(&gmap_notifier_lock); | ||
635 | } | ||
636 | EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier); | ||
637 | |||
638 | /** | ||
639 | * gmap_unregister_ipte_notifier - remove a pte invalidation callback | ||
640 | * @nb: pointer to the gmap notifier block | ||
641 | */ | ||
642 | void gmap_unregister_ipte_notifier(struct gmap_notifier *nb) | ||
643 | { | ||
644 | spin_lock(&gmap_notifier_lock); | ||
645 | list_del_init(&nb->list); | ||
646 | spin_unlock(&gmap_notifier_lock); | ||
647 | } | ||
648 | EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier); | ||
649 | |||
650 | /** | ||
651 | * gmap_ipte_notify - mark a range of ptes for invalidation notification | ||
652 | * @gmap: pointer to guest mapping meta data structure | ||
653 | * @address: virtual address in the guest address space | ||
654 | * @len: size of area | ||
655 | * | ||
656 | * Returns 0 if for each page in the given range a gmap mapping exists and | ||
657 | * the invalidation notification could be set. If the gmap mapping is missing | ||
658 | * for one or more pages -EFAULT is returned. If no memory could be allocated | ||
659 | * -ENOMEM is returned. This function establishes missing page table entries. | ||
660 | */ | ||
661 | int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len) | ||
662 | { | ||
663 | unsigned long addr; | ||
664 | spinlock_t *ptl; | ||
665 | pte_t *ptep, entry; | ||
666 | pgste_t pgste; | ||
667 | int rc = 0; | ||
668 | |||
669 | if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK)) | ||
670 | return -EINVAL; | ||
671 | down_read(&gmap->mm->mmap_sem); | ||
672 | while (len) { | ||
673 | /* Convert gmap address and connect the page tables */ | ||
674 | addr = __gmap_fault(start, gmap); | ||
675 | if (IS_ERR_VALUE(addr)) { | ||
676 | rc = addr; | ||
677 | break; | ||
678 | } | ||
679 | /* Get the page mapped */ | ||
680 | if (get_user_pages(current, gmap->mm, addr, 1, 1, 0, | ||
681 | NULL, NULL) != 1) { | ||
682 | rc = -EFAULT; | ||
683 | break; | ||
684 | } | ||
685 | /* Walk the process page table, lock and get pte pointer */ | ||
686 | ptep = get_locked_pte(gmap->mm, addr, &ptl); | ||
687 | if (unlikely(!ptep)) | ||
688 | continue; | ||
689 | /* Set notification bit in the pgste of the pte */ | ||
690 | entry = *ptep; | ||
691 | if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) { | ||
692 | pgste = pgste_get_lock(ptep); | ||
693 | pgste_val(pgste) |= RCP_IN_BIT; | ||
694 | pgste_set_unlock(ptep, pgste); | ||
695 | start += PAGE_SIZE; | ||
696 | len -= PAGE_SIZE; | ||
697 | } | ||
698 | spin_unlock(ptl); | ||
699 | } | ||
700 | up_read(&gmap->mm->mmap_sem); | ||
701 | return rc; | ||
702 | } | ||
703 | EXPORT_SYMBOL_GPL(gmap_ipte_notify); | ||
704 | |||
705 | /** | ||
706 | * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte. | ||
707 | * @mm: pointer to the process mm_struct | ||
708 | * @addr: virtual address in the process address space | ||
709 | * @pte: pointer to the page table entry | ||
710 | * | ||
711 | * This function is assumed to be called with the page table lock held | ||
712 | * for the pte to notify. | ||
713 | */ | ||
714 | void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte) | ||
715 | { | ||
716 | unsigned long segment_offset; | ||
717 | struct gmap_notifier *nb; | ||
718 | struct gmap_pgtable *mp; | ||
719 | struct gmap_rmap *rmap; | ||
720 | struct page *page; | ||
721 | |||
722 | segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); | ||
723 | segment_offset = segment_offset * (4096 / sizeof(pte_t)); | ||
724 | page = pfn_to_page(__pa(pte) >> PAGE_SHIFT); | ||
725 | mp = (struct gmap_pgtable *) page->index; | ||
726 | spin_lock(&gmap_notifier_lock); | ||
727 | list_for_each_entry(rmap, &mp->mapper, list) { | ||
728 | list_for_each_entry(nb, &gmap_notifier_list, list) | ||
729 | nb->notifier_call(rmap->gmap, | ||
730 | rmap->vmaddr + segment_offset); | ||
731 | } | ||
732 | spin_unlock(&gmap_notifier_lock); | ||
733 | } | ||
734 | |||
622 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | 735 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
623 | unsigned long vmaddr) | 736 | unsigned long vmaddr) |
624 | { | 737 | { |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 35837054f734..8b268fcc4612 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -375,9 +375,8 @@ void __init vmem_map_init(void) | |||
375 | 375 | ||
376 | ro_start = PFN_ALIGN((unsigned long)&_stext); | 376 | ro_start = PFN_ALIGN((unsigned long)&_stext); |
377 | ro_end = (unsigned long)&_eshared & PAGE_MASK; | 377 | ro_end = (unsigned long)&_eshared & PAGE_MASK; |
378 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 378 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
379 | if (memory_chunk[i].type == CHUNK_CRASHK || | 379 | if (!memory_chunk[i].size) |
380 | memory_chunk[i].type == CHUNK_OLDMEM) | ||
381 | continue; | 380 | continue; |
382 | start = memory_chunk[i].addr; | 381 | start = memory_chunk[i].addr; |
383 | end = memory_chunk[i].addr + memory_chunk[i].size; | 382 | end = memory_chunk[i].addr + memory_chunk[i].size; |
@@ -412,9 +411,6 @@ static int __init vmem_convert_memory_chunk(void) | |||
412 | for (i = 0; i < MEMORY_CHUNKS; i++) { | 411 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
413 | if (!memory_chunk[i].size) | 412 | if (!memory_chunk[i].size) |
414 | continue; | 413 | continue; |
415 | if (memory_chunk[i].type == CHUNK_CRASHK || | ||
416 | memory_chunk[i].type == CHUNK_OLDMEM) | ||
417 | continue; | ||
418 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | 414 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
419 | if (!seg) | 415 | if (!seg) |
420 | panic("Out of memory...\n"); | 416 | panic("Out of memory...\n"); |