diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/arm/mm/pgd.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/arm/mm/pgd.c')
-rw-r--r-- | arch/arm/mm/pgd.c | 63 |
1 files changed, 39 insertions, 24 deletions
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index be5f58e153bf..b2027c154b2a 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -17,14 +17,13 @@ | |||
17 | 17 | ||
18 | #include "mm.h" | 18 | #include "mm.h" |
19 | 19 | ||
20 | #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) | ||
21 | |||
22 | /* | 20 | /* |
23 | * need to get a 16k page for level 1 | 21 | * need to get a 16k page for level 1 |
24 | */ | 22 | */ |
25 | pgd_t *get_pgd_slow(struct mm_struct *mm) | 23 | pgd_t *pgd_alloc(struct mm_struct *mm) |
26 | { | 24 | { |
27 | pgd_t *new_pgd, *init_pgd; | 25 | pgd_t *new_pgd, *init_pgd; |
26 | pud_t *new_pud, *init_pud; | ||
28 | pmd_t *new_pmd, *init_pmd; | 27 | pmd_t *new_pmd, *init_pmd; |
29 | pte_t *new_pte, *init_pte; | 28 | pte_t *new_pte, *init_pte; |
30 | 29 | ||
@@ -32,14 +31,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
32 | if (!new_pgd) | 31 | if (!new_pgd) |
33 | goto no_pgd; | 32 | goto no_pgd; |
34 | 33 | ||
35 | memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); | 34 | memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
36 | 35 | ||
37 | /* | 36 | /* |
38 | * Copy over the kernel and IO PGD entries | 37 | * Copy over the kernel and IO PGD entries |
39 | */ | 38 | */ |
40 | init_pgd = pgd_offset_k(0); | 39 | init_pgd = pgd_offset_k(0); |
41 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | 40 | memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, |
42 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | 41 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
43 | 42 | ||
44 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | 43 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); |
45 | 44 | ||
@@ -48,18 +47,23 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
48 | * On ARM, first page must always be allocated since it | 47 | * On ARM, first page must always be allocated since it |
49 | * contains the machine vectors. | 48 | * contains the machine vectors. |
50 | */ | 49 | */ |
51 | new_pmd = pmd_alloc(mm, new_pgd, 0); | 50 | new_pud = pud_alloc(mm, new_pgd, 0); |
51 | if (!new_pud) | ||
52 | goto no_pud; | ||
53 | |||
54 | new_pmd = pmd_alloc(mm, new_pud, 0); | ||
52 | if (!new_pmd) | 55 | if (!new_pmd) |
53 | goto no_pmd; | 56 | goto no_pmd; |
54 | 57 | ||
55 | new_pte = pte_alloc_map(mm, new_pmd, 0); | 58 | new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); |
56 | if (!new_pte) | 59 | if (!new_pte) |
57 | goto no_pte; | 60 | goto no_pte; |
58 | 61 | ||
59 | init_pmd = pmd_offset(init_pgd, 0); | 62 | init_pud = pud_offset(init_pgd, 0); |
60 | init_pte = pte_offset_map_nested(init_pmd, 0); | 63 | init_pmd = pmd_offset(init_pud, 0); |
64 | init_pte = pte_offset_map(init_pmd, 0); | ||
61 | set_pte_ext(new_pte, *init_pte, 0); | 65 | set_pte_ext(new_pte, *init_pte, 0); |
62 | pte_unmap_nested(init_pte); | 66 | pte_unmap(init_pte); |
63 | pte_unmap(new_pte); | 67 | pte_unmap(new_pte); |
64 | } | 68 | } |
65 | 69 | ||
@@ -68,33 +72,44 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
68 | no_pte: | 72 | no_pte: |
69 | pmd_free(mm, new_pmd); | 73 | pmd_free(mm, new_pmd); |
70 | no_pmd: | 74 | no_pmd: |
75 | pud_free(mm, new_pud); | ||
76 | no_pud: | ||
71 | free_pages((unsigned long)new_pgd, 2); | 77 | free_pages((unsigned long)new_pgd, 2); |
72 | no_pgd: | 78 | no_pgd: |
73 | return NULL; | 79 | return NULL; |
74 | } | 80 | } |
75 | 81 | ||
76 | void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) | 82 | void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) |
77 | { | 83 | { |
84 | pgd_t *pgd; | ||
85 | pud_t *pud; | ||
78 | pmd_t *pmd; | 86 | pmd_t *pmd; |
79 | pgtable_t pte; | 87 | pgtable_t pte; |
80 | 88 | ||
81 | if (!pgd) | 89 | if (!pgd_base) |
82 | return; | 90 | return; |
83 | 91 | ||
84 | /* pgd is always present and good */ | 92 | pgd = pgd_base + pgd_index(0); |
85 | pmd = pmd_off(pgd, 0); | 93 | if (pgd_none_or_clear_bad(pgd)) |
86 | if (pmd_none(*pmd)) | 94 | goto no_pgd; |
87 | goto free; | 95 | |
88 | if (pmd_bad(*pmd)) { | 96 | pud = pud_offset(pgd, 0); |
89 | pmd_ERROR(*pmd); | 97 | if (pud_none_or_clear_bad(pud)) |
90 | pmd_clear(pmd); | 98 | goto no_pud; |
91 | goto free; | 99 | |
92 | } | 100 | pmd = pmd_offset(pud, 0); |
101 | if (pmd_none_or_clear_bad(pmd)) | ||
102 | goto no_pmd; | ||
93 | 103 | ||
94 | pte = pmd_pgtable(*pmd); | 104 | pte = pmd_pgtable(*pmd); |
95 | pmd_clear(pmd); | 105 | pmd_clear(pmd); |
96 | pte_free(mm, pte); | 106 | pte_free(mm, pte); |
107 | no_pmd: | ||
108 | pud_clear(pud); | ||
97 | pmd_free(mm, pmd); | 109 | pmd_free(mm, pmd); |
98 | free: | 110 | no_pud: |
99 | free_pages((unsigned long) pgd, 2); | 111 | pgd_clear(pgd); |
112 | pud_free(mm, pud); | ||
113 | no_pgd: | ||
114 | free_pages((unsigned long) pgd_base, 2); | ||
100 | } | 115 | } |