diff options
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r-- | mm/sparse-vmemmap.c | 159 |
1 files changed, 63 insertions, 96 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 7bb7a4b96d74..4f2d4854f840 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -14,21 +14,8 @@ | |||
14 | * case the overhead consists of a few additional pages that are | 14 | * case the overhead consists of a few additional pages that are |
15 | * allocated to create a view of memory for vmemmap. | 15 | * allocated to create a view of memory for vmemmap. |
16 | * | 16 | * |
17 | * Special Kconfig settings: | 17 | * The architecture is expected to provide a vmemmap_populate() function |
18 | * | 18 | * to instantiate the mapping. |
19 | * CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP | ||
20 | * | ||
21 | * The architecture has its own functions to populate the memory | ||
22 | * map and provides a vmemmap_populate function. | ||
23 | * | ||
24 | * CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD | ||
25 | * | ||
26 | * The architecture provides functions to populate the pmd level | ||
27 | * of the vmemmap mappings. Allowing mappings using large pages | ||
28 | * where available. | ||
29 | * | ||
30 | * If neither are set then PAGE_SIZE mappings are generated which | ||
31 | * require one PTE/TLB per PAGE_SIZE chunk of the virtual memory map. | ||
32 | */ | 19 | */ |
33 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
34 | #include <linux/mmzone.h> | 21 | #include <linux/mmzone.h> |
@@ -60,7 +47,6 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) | |||
60 | __pa(MAX_DMA_ADDRESS)); | 47 | __pa(MAX_DMA_ADDRESS)); |
61 | } | 48 | } |
62 | 49 | ||
63 | #ifndef CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP | ||
64 | void __meminit vmemmap_verify(pte_t *pte, int node, | 50 | void __meminit vmemmap_verify(pte_t *pte, int node, |
65 | unsigned long start, unsigned long end) | 51 | unsigned long start, unsigned long end) |
66 | { | 52 | { |
@@ -72,103 +58,84 @@ void __meminit vmemmap_verify(pte_t *pte, int node, | |||
72 | "page_structs\n", start, end - 1); | 58 | "page_structs\n", start, end - 1); |
73 | } | 59 | } |
74 | 60 | ||
75 | #ifndef CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD | 61 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) |
76 | static int __meminit vmemmap_populate_pte(pmd_t *pmd, unsigned long addr, | ||
77 | unsigned long end, int node) | ||
78 | { | 62 | { |
79 | pte_t *pte; | 63 | pte_t *pte = pte_offset_kernel(pmd, addr); |
80 | 64 | if (pte_none(*pte)) { | |
81 | for (pte = pte_offset_kernel(pmd, addr); addr < end; | 65 | pte_t entry; |
82 | pte++, addr += PAGE_SIZE) | 66 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
83 | if (pte_none(*pte)) { | 67 | if (!p) |
84 | pte_t entry; | 68 | return 0; |
85 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 69 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
86 | if (!p) | 70 | set_pte_at(&init_mm, addr, pte, entry); |
87 | return -ENOMEM; | 71 | } |
88 | 72 | return pte; | |
89 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); | ||
90 | set_pte(pte, entry); | ||
91 | |||
92 | } else | ||
93 | vmemmap_verify(pte, node, addr + PAGE_SIZE, end); | ||
94 | |||
95 | return 0; | ||
96 | } | 73 | } |
97 | 74 | ||
98 | int __meminit vmemmap_populate_pmd(pud_t *pud, unsigned long addr, | 75 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
99 | unsigned long end, int node) | ||
100 | { | 76 | { |
101 | pmd_t *pmd; | 77 | pmd_t *pmd = pmd_offset(pud, addr); |
102 | int error = 0; | 78 | if (pmd_none(*pmd)) { |
103 | unsigned long next; | 79 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
104 | 80 | if (!p) | |
105 | for (pmd = pmd_offset(pud, addr); addr < end && !error; | 81 | return 0; |
106 | pmd++, addr = next) { | 82 | pmd_populate_kernel(&init_mm, pmd, p); |
107 | if (pmd_none(*pmd)) { | ||
108 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | ||
109 | if (!p) | ||
110 | return -ENOMEM; | ||
111 | |||
112 | pmd_populate_kernel(&init_mm, pmd, p); | ||
113 | } else | ||
114 | vmemmap_verify((pte_t *)pmd, node, | ||
115 | pmd_addr_end(addr, end), end); | ||
116 | next = pmd_addr_end(addr, end); | ||
117 | error = vmemmap_populate_pte(pmd, addr, next, node); | ||
118 | } | 83 | } |
119 | return error; | 84 | return pmd; |
120 | } | 85 | } |
121 | #endif /* CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD */ | ||
122 | 86 | ||
123 | static int __meminit vmemmap_populate_pud(pgd_t *pgd, unsigned long addr, | 87 | pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) |
124 | unsigned long end, int node) | ||
125 | { | 88 | { |
126 | pud_t *pud; | 89 | pud_t *pud = pud_offset(pgd, addr); |
127 | int error = 0; | 90 | if (pud_none(*pud)) { |
128 | unsigned long next; | 91 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
129 | 92 | if (!p) | |
130 | for (pud = pud_offset(pgd, addr); addr < end && !error; | 93 | return 0; |
131 | pud++, addr = next) { | 94 | pud_populate(&init_mm, pud, p); |
132 | if (pud_none(*pud)) { | 95 | } |
133 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | 96 | return pud; |
134 | if (!p) | 97 | } |
135 | return -ENOMEM; | ||
136 | 98 | ||
137 | pud_populate(&init_mm, pud, p); | 99 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
138 | } | 100 | { |
139 | next = pud_addr_end(addr, end); | 101 | pgd_t *pgd = pgd_offset_k(addr); |
140 | error = vmemmap_populate_pmd(pud, addr, next, node); | 102 | if (pgd_none(*pgd)) { |
103 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | ||
104 | if (!p) | ||
105 | return 0; | ||
106 | pgd_populate(&init_mm, pgd, p); | ||
141 | } | 107 | } |
142 | return error; | 108 | return pgd; |
143 | } | 109 | } |
144 | 110 | ||
145 | int __meminit vmemmap_populate(struct page *start_page, | 111 | int __meminit vmemmap_populate_basepages(struct page *start_page, |
146 | unsigned long nr, int node) | 112 | unsigned long size, int node) |
147 | { | 113 | { |
148 | pgd_t *pgd; | ||
149 | unsigned long addr = (unsigned long)start_page; | 114 | unsigned long addr = (unsigned long)start_page; |
150 | unsigned long end = (unsigned long)(start_page + nr); | 115 | unsigned long end = (unsigned long)(start_page + size); |
151 | unsigned long next; | 116 | pgd_t *pgd; |
152 | int error = 0; | 117 | pud_t *pud; |
153 | 118 | pmd_t *pmd; | |
154 | printk(KERN_DEBUG "[%lx-%lx] Virtual memory section" | 119 | pte_t *pte; |
155 | " (%ld pages) node %d\n", addr, end - 1, nr, node); | ||
156 | |||
157 | for (pgd = pgd_offset_k(addr); addr < end && !error; | ||
158 | pgd++, addr = next) { | ||
159 | if (pgd_none(*pgd)) { | ||
160 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); | ||
161 | if (!p) | ||
162 | return -ENOMEM; | ||
163 | 120 | ||
164 | pgd_populate(&init_mm, pgd, p); | 121 | for (; addr < end; addr += PAGE_SIZE) { |
165 | } | 122 | pgd = vmemmap_pgd_populate(addr, node); |
166 | next = pgd_addr_end(addr,end); | 123 | if (!pgd) |
167 | error = vmemmap_populate_pud(pgd, addr, next, node); | 124 | return -ENOMEM; |
125 | pud = vmemmap_pud_populate(pgd, addr, node); | ||
126 | if (!pud) | ||
127 | return -ENOMEM; | ||
128 | pmd = vmemmap_pmd_populate(pud, addr, node); | ||
129 | if (!pmd) | ||
130 | return -ENOMEM; | ||
131 | pte = vmemmap_pte_populate(pmd, addr, node); | ||
132 | if (!pte) | ||
133 | return -ENOMEM; | ||
134 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); | ||
168 | } | 135 | } |
169 | return error; | 136 | |
137 | return 0; | ||
170 | } | 138 | } |
171 | #endif /* !CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP */ | ||
172 | 139 | ||
173 | struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) | 140 | struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) |
174 | { | 141 | { |