aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/pgd.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/pgd.c')
-rw-r--r--arch/arm/mm/pgd.c101
1 files changed, 101 insertions, 0 deletions
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
new file mode 100644
index 000000000000..20c1b0df75f2
--- /dev/null
+++ b/arch/arm/mm/pgd.c
@@ -0,0 +1,101 @@
1/*
2 * linux/arch/arm/mm/pgd.c
3 *
4 * Copyright (C) 1998-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/mm.h>
11#include <linux/highmem.h>
12
13#include <asm/pgalloc.h>
14#include <asm/page.h>
15#include <asm/tlbflush.h>
16
17#include "mm.h"
18
19#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
20
21/*
22 * need to get a 16k page for level 1
23 */
24pgd_t *get_pgd_slow(struct mm_struct *mm)
25{
26 pgd_t *new_pgd, *init_pgd;
27 pmd_t *new_pmd, *init_pmd;
28 pte_t *new_pte, *init_pte;
29
30 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
31 if (!new_pgd)
32 goto no_pgd;
33
34 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
35
36 /*
37 * Copy over the kernel and IO PGD entries
38 */
39 init_pgd = pgd_offset_k(0);
40 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
41 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
42
43 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
44
45 if (!vectors_high()) {
46 /*
47 * On ARM, first page must always be allocated since it
48 * contains the machine vectors.
49 */
50 new_pmd = pmd_alloc(mm, new_pgd, 0);
51 if (!new_pmd)
52 goto no_pmd;
53
54 new_pte = pte_alloc_map(mm, new_pmd, 0);
55 if (!new_pte)
56 goto no_pte;
57
58 init_pmd = pmd_offset(init_pgd, 0);
59 init_pte = pte_offset_map_nested(init_pmd, 0);
60 set_pte(new_pte, *init_pte);
61 pte_unmap_nested(init_pte);
62 pte_unmap(new_pte);
63 }
64
65 return new_pgd;
66
67no_pte:
68 pmd_free(new_pmd);
69no_pmd:
70 free_pages((unsigned long)new_pgd, 2);
71no_pgd:
72 return NULL;
73}
74
75void free_pgd_slow(pgd_t *pgd)
76{
77 pmd_t *pmd;
78 struct page *pte;
79
80 if (!pgd)
81 return;
82
83 /* pgd is always present and good */
84 pmd = pmd_off(pgd, 0);
85 if (pmd_none(*pmd))
86 goto free;
87 if (pmd_bad(*pmd)) {
88 pmd_ERROR(*pmd);
89 pmd_clear(pmd);
90 goto free;
91 }
92
93 pte = pmd_page(*pmd);
94 pmd_clear(pmd);
95 dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
96 pte_lock_deinit(pte);
97 pte_free(pte);
98 pmd_free(pmd);
99free:
100 free_pages((unsigned long) pgd, 2);
101}