diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable-book3s64.c')
-rw-r--r-- | arch/powerpc/mm/pgtable-book3s64.c | 88 |
1 files changed, 3 insertions, 85 deletions
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 9f93c9f985c5..f3c31f5e1026 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c | |||
@@ -244,6 +244,9 @@ static pmd_t *get_pmd_from_cache(struct mm_struct *mm) | |||
244 | { | 244 | { |
245 | void *pmd_frag, *ret; | 245 | void *pmd_frag, *ret; |
246 | 246 | ||
247 | if (PMD_FRAG_NR == 1) | ||
248 | return NULL; | ||
249 | |||
247 | spin_lock(&mm->page_table_lock); | 250 | spin_lock(&mm->page_table_lock); |
248 | ret = mm->context.pmd_frag; | 251 | ret = mm->context.pmd_frag; |
249 | if (ret) { | 252 | if (ret) { |
@@ -322,91 +325,6 @@ void pmd_fragment_free(unsigned long *pmd) | |||
322 | } | 325 | } |
323 | } | 326 | } |
324 | 327 | ||
325 | static pte_t *get_pte_from_cache(struct mm_struct *mm) | ||
326 | { | ||
327 | void *pte_frag, *ret; | ||
328 | |||
329 | spin_lock(&mm->page_table_lock); | ||
330 | ret = mm->context.pte_frag; | ||
331 | if (ret) { | ||
332 | pte_frag = ret + PTE_FRAG_SIZE; | ||
333 | /* | ||
334 | * If we have taken up all the fragments mark PTE page NULL | ||
335 | */ | ||
336 | if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) | ||
337 | pte_frag = NULL; | ||
338 | mm->context.pte_frag = pte_frag; | ||
339 | } | ||
340 | spin_unlock(&mm->page_table_lock); | ||
341 | return (pte_t *)ret; | ||
342 | } | ||
343 | |||
344 | static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) | ||
345 | { | ||
346 | void *ret = NULL; | ||
347 | struct page *page; | ||
348 | |||
349 | if (!kernel) { | ||
350 | page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); | ||
351 | if (!page) | ||
352 | return NULL; | ||
353 | if (!pgtable_page_ctor(page)) { | ||
354 | __free_page(page); | ||
355 | return NULL; | ||
356 | } | ||
357 | } else { | ||
358 | page = alloc_page(PGALLOC_GFP); | ||
359 | if (!page) | ||
360 | return NULL; | ||
361 | } | ||
362 | |||
363 | atomic_set(&page->pt_frag_refcount, 1); | ||
364 | |||
365 | ret = page_address(page); | ||
366 | /* | ||
367 | * if we support only one fragment just return the | ||
368 | * allocated page. | ||
369 | */ | ||
370 | if (PTE_FRAG_NR == 1) | ||
371 | return ret; | ||
372 | spin_lock(&mm->page_table_lock); | ||
373 | /* | ||
374 | * If we find pgtable_page set, we return | ||
375 | * the allocated page with single fragement | ||
376 | * count. | ||
377 | */ | ||
378 | if (likely(!mm->context.pte_frag)) { | ||
379 | atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR); | ||
380 | mm->context.pte_frag = ret + PTE_FRAG_SIZE; | ||
381 | } | ||
382 | spin_unlock(&mm->page_table_lock); | ||
383 | |||
384 | return (pte_t *)ret; | ||
385 | } | ||
386 | |||
387 | pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) | ||
388 | { | ||
389 | pte_t *pte; | ||
390 | |||
391 | pte = get_pte_from_cache(mm); | ||
392 | if (pte) | ||
393 | return pte; | ||
394 | |||
395 | return __alloc_for_ptecache(mm, kernel); | ||
396 | } | ||
397 | |||
398 | void pte_fragment_free(unsigned long *table, int kernel) | ||
399 | { | ||
400 | struct page *page = virt_to_page(table); | ||
401 | |||
402 | BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); | ||
403 | if (atomic_dec_and_test(&page->pt_frag_refcount)) { | ||
404 | if (!kernel) | ||
405 | pgtable_page_dtor(page); | ||
406 | __free_page(page); | ||
407 | } | ||
408 | } | ||
409 | |||
410 | static inline void pgtable_free(void *table, int index) | 328 | static inline void pgtable_free(void *table, int index) |
411 | { | 329 | { |
412 | switch (index) { | 330 | switch (index) { |