diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-27 17:16:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-27 17:16:30 -0400 |
commit | 55bea71ed549398133732425a631b2268446526c (patch) | |
tree | 7546e1508c295345c5a30d0f7732b03e8502ac54 /arch/s390/mm | |
parent | 7618c6a17f6afae973e5c7b73e1ca80d0c8b8c2a (diff) | |
parent | 4a65429457a5d271dd3b00598b3ec75fe8b5103c (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 fixes from Martin Schwidefsky:
"A few more s390 patches for 4.9:
- a fix for an overflow in the dasd driver reported by UBSAN
- fix a regression and add hotplug memory to the zone movable again
- add ignore defines for the pkey system calls
- fix the ouput of the merged stack tracer
- replace printk with pr_cont in arch/s390 where appropriate
- remove the arch specific return_address function again
- ignore reserved channel paths at boot time
- add a missing hugetlb_bad_size call to the arch backend"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/mm: fix zone calculation in arch_add_memory()
s390/dumpstack: use pr_cont within show_stack and die
s390/dumpstack: get rid of return_address again
s390/disassambler: use pr_cont where appropriate
s390/dumpstack: use pr_cont where appropriate
s390/dumpstack: restore reliable indicator for call traces
s390/mm: use hugetlb_bad_size()
s390/cio: don't register chpids in reserved state
s390: ignore pkey system calls
s390/dasd: avoid undefined behaviour
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 1 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 38 |
2 files changed, 22 insertions, 17 deletions
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index cd404aa3931c..4a0c5bce3552 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -217,6 +217,7 @@ static __init int setup_hugepagesz(char *opt) | |||
217 | } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) { | 217 | } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) { |
218 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); | 218 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); |
219 | } else { | 219 | } else { |
220 | hugetlb_bad_size(); | ||
220 | pr_err("hugepagesz= specifies an unsupported page size %s\n", | 221 | pr_err("hugepagesz= specifies an unsupported page size %s\n", |
221 | string); | 222 | string); |
222 | return 0; | 223 | return 0; |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index f56a39bd8ba6..b3e9d18f2ec6 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) | |||
151 | #ifdef CONFIG_MEMORY_HOTPLUG | 151 | #ifdef CONFIG_MEMORY_HOTPLUG |
152 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) | 152 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
153 | { | 153 | { |
154 | unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); | 154 | unsigned long zone_start_pfn, zone_end_pfn, nr_pages; |
155 | unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS); | ||
156 | unsigned long start_pfn = PFN_DOWN(start); | 155 | unsigned long start_pfn = PFN_DOWN(start); |
157 | unsigned long size_pages = PFN_DOWN(size); | 156 | unsigned long size_pages = PFN_DOWN(size); |
158 | unsigned long nr_pages; | 157 | pg_data_t *pgdat = NODE_DATA(nid); |
159 | int rc, zone_enum; | 158 | struct zone *zone; |
159 | int rc, i; | ||
160 | 160 | ||
161 | rc = vmem_add_mapping(start, size); | 161 | rc = vmem_add_mapping(start, size); |
162 | if (rc) | 162 | if (rc) |
163 | return rc; | 163 | return rc; |
164 | 164 | ||
165 | while (size_pages > 0) { | 165 | for (i = 0; i < MAX_NR_ZONES; i++) { |
166 | if (start_pfn < dma_end_pfn) { | 166 | zone = pgdat->node_zones + i; |
167 | nr_pages = (start_pfn + size_pages > dma_end_pfn) ? | 167 | if (zone_idx(zone) != ZONE_MOVABLE) { |
168 | dma_end_pfn - start_pfn : size_pages; | 168 | /* Add range within existing zone limits, if possible */ |
169 | zone_enum = ZONE_DMA; | 169 | zone_start_pfn = zone->zone_start_pfn; |
170 | } else if (start_pfn < normal_end_pfn) { | 170 | zone_end_pfn = zone->zone_start_pfn + |
171 | nr_pages = (start_pfn + size_pages > normal_end_pfn) ? | 171 | zone->spanned_pages; |
172 | normal_end_pfn - start_pfn : size_pages; | ||
173 | zone_enum = ZONE_NORMAL; | ||
174 | } else { | 172 | } else { |
175 | nr_pages = size_pages; | 173 | /* Add remaining range to ZONE_MOVABLE */ |
176 | zone_enum = ZONE_MOVABLE; | 174 | zone_start_pfn = start_pfn; |
175 | zone_end_pfn = start_pfn + size_pages; | ||
177 | } | 176 | } |
178 | rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum, | 177 | if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) |
179 | start_pfn, size_pages); | 178 | continue; |
179 | nr_pages = (start_pfn + size_pages > zone_end_pfn) ? | ||
180 | zone_end_pfn - start_pfn : size_pages; | ||
181 | rc = __add_pages(nid, zone, start_pfn, nr_pages); | ||
180 | if (rc) | 182 | if (rc) |
181 | break; | 183 | break; |
182 | start_pfn += nr_pages; | 184 | start_pfn += nr_pages; |
183 | size_pages -= nr_pages; | 185 | size_pages -= nr_pages; |
186 | if (!size_pages) | ||
187 | break; | ||
184 | } | 188 | } |
185 | if (rc) | 189 | if (rc) |
186 | vmem_remove_mapping(start, size); | 190 | vmem_remove_mapping(start, size); |