aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/vmem.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2008-05-15 10:52:31 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-05-15 10:52:38 -0400
commit2069e978d5a6e7b45d58027e3de7f879b8c5e488 (patch)
treee2fba2169e6d745b4cdb2e252b66dcaaacdadaeb /arch/s390/mm/vmem.c
parente0a45ee0b922b998f8d6737cf6e9e69a791252b7 (diff)
[S390] sparsemem vmemmap: initialize memmap.
Let's just use the generic vmmemmap_alloc_block() function which always returns initialized memory. Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/vmem.c')
-rw-r--r--arch/s390/mm/vmem.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index beccacf907f3..ea2804808f39 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -27,19 +27,12 @@ struct memory_segment {
27 27
28static LIST_HEAD(mem_segs); 28static LIST_HEAD(mem_segs);
29 29
30static void __ref *vmem_alloc_pages(unsigned int order) 30static pud_t *vmem_pud_alloc(void)
31{
32 if (slab_is_available())
33 return (void *)__get_free_pages(GFP_KERNEL, order);
34 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
35}
36
37static inline pud_t *vmem_pud_alloc(void)
38{ 31{
39 pud_t *pud = NULL; 32 pud_t *pud = NULL;
40 33
41#ifdef CONFIG_64BIT 34#ifdef CONFIG_64BIT
42 pud = vmem_alloc_pages(2); 35 pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
43 if (!pud) 36 if (!pud)
44 return NULL; 37 return NULL;
45 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); 38 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
@@ -47,12 +40,12 @@ static inline pud_t *vmem_pud_alloc(void)
47 return pud; 40 return pud;
48} 41}
49 42
50static inline pmd_t *vmem_pmd_alloc(void) 43static pmd_t *vmem_pmd_alloc(void)
51{ 44{
52 pmd_t *pmd = NULL; 45 pmd_t *pmd = NULL;
53 46
54#ifdef CONFIG_64BIT 47#ifdef CONFIG_64BIT
55 pmd = vmem_alloc_pages(2); 48 pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
56 if (!pmd) 49 if (!pmd)
57 return NULL; 50 return NULL;
58 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); 51 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
@@ -60,7 +53,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
60 return pmd; 53 return pmd;
61} 54}
62 55
63static pte_t __init_refok *vmem_pte_alloc(void) 56static pte_t __ref *vmem_pte_alloc(void)
64{ 57{
65 pte_t *pte; 58 pte_t *pte;
66 59
@@ -214,7 +207,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
214 if (pte_none(*pt_dir)) { 207 if (pte_none(*pt_dir)) {
215 unsigned long new_page; 208 unsigned long new_page;
216 209
217 new_page =__pa(vmem_alloc_pages(0)); 210 new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0));
218 if (!new_page) 211 if (!new_page)
219 goto out; 212 goto out;
220 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 213 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);