diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-10-22 06:52:47 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-10-22 06:52:49 -0400 |
commit | 3610cce87af0693603db171d5b6f6735f5e3dc5b (patch) | |
tree | 9aa7d9a0924b2f075c1b95ed57bb63ed512165c9 /arch/s390/mm | |
parent | e4aa402e7a3b6b87d8df6243a37171cdcd2f01c2 (diff) |
[S390] Cleanup page table definitions.
- De-confuse the defines for the address-space-control-elements
and the segment/region table entries.
- Create out of line functions for page table allocation / freeing.
- Simplify get_shadow_xxx functions.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 28 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 94 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 19 |
4 files changed, 114 insertions, 29 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index f95449b29fa5..66401930f83e 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile | |||
@@ -2,6 +2,6 @@ | |||
2 | # Makefile for the linux s390-specific parts of the memory manager. | 2 | # Makefile for the linux s390-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o | 5 | obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o |
6 | obj-$(CONFIG_CMM) += cmm.o | 6 | obj-$(CONFIG_CMM) += cmm.o |
7 | 7 | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 3a25bbf2eb0a..90ec058aa7db 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -103,32 +103,28 @@ static void __init setup_ro_region(void) | |||
103 | */ | 103 | */ |
104 | void __init paging_init(void) | 104 | void __init paging_init(void) |
105 | { | 105 | { |
106 | pgd_t *pg_dir; | ||
107 | int i; | ||
108 | unsigned long pgdir_k; | ||
109 | static const int ssm_mask = 0x04000000L; | 106 | static const int ssm_mask = 0x04000000L; |
110 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 107 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
108 | unsigned long pgd_type; | ||
111 | 109 | ||
112 | pg_dir = swapper_pg_dir; | 110 | init_mm.pgd = swapper_pg_dir; |
113 | 111 | S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK; | |
114 | #ifdef CONFIG_64BIT | 112 | #ifdef CONFIG_64BIT |
115 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; | 113 | S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; |
116 | for (i = 0; i < PTRS_PER_PGD; i++) | 114 | pgd_type = _REGION3_ENTRY_EMPTY; |
117 | pgd_clear_kernel(pg_dir + i); | ||
118 | #else | 115 | #else |
119 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; | 116 | S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH; |
120 | for (i = 0; i < PTRS_PER_PGD; i++) | 117 | pgd_type = _SEGMENT_ENTRY_EMPTY; |
121 | pmd_clear_kernel((pmd_t *)(pg_dir + i)); | ||
122 | #endif | 118 | #endif |
119 | clear_table((unsigned long *) init_mm.pgd, pgd_type, | ||
120 | sizeof(unsigned long)*2048); | ||
123 | vmem_map_init(); | 121 | vmem_map_init(); |
124 | setup_ro_region(); | 122 | setup_ro_region(); |
125 | 123 | ||
126 | S390_lowcore.kernel_asce = pgdir_k; | ||
127 | |||
128 | /* enable virtual mapping in kernel mode */ | 124 | /* enable virtual mapping in kernel mode */ |
129 | __ctl_load(pgdir_k, 1, 1); | 125 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
130 | __ctl_load(pgdir_k, 7, 7); | 126 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
131 | __ctl_load(pgdir_k, 13, 13); | 127 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
132 | __raw_local_irq_ssm(ssm_mask); | 128 | __raw_local_irq_ssm(ssm_mask); |
133 | 129 | ||
134 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 130 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c new file mode 100644 index 000000000000..e60e0ae13402 --- /dev/null +++ b/arch/s390/mm/pgtable.c | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * arch/s390/mm/pgtable.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/sched.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/swap.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/highmem.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/pagemap.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/quicklist.h> | ||
20 | |||
21 | #include <asm/system.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/pgalloc.h> | ||
24 | #include <asm/tlb.h> | ||
25 | #include <asm/tlbflush.h> | ||
26 | |||
27 | #ifndef CONFIG_64BIT | ||
28 | #define ALLOC_ORDER 1 | ||
29 | #else | ||
30 | #define ALLOC_ORDER 2 | ||
31 | #endif | ||
32 | |||
33 | unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | ||
34 | { | ||
35 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | ||
36 | |||
37 | if (!page) | ||
38 | return NULL; | ||
39 | page->index = 0; | ||
40 | if (noexec) { | ||
41 | struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | ||
42 | if (!shadow) { | ||
43 | __free_pages(page, ALLOC_ORDER); | ||
44 | return NULL; | ||
45 | } | ||
46 | page->index = page_to_phys(shadow); | ||
47 | } | ||
48 | return (unsigned long *) page_to_phys(page); | ||
49 | } | ||
50 | |||
51 | void crst_table_free(unsigned long *table) | ||
52 | { | ||
53 | unsigned long *shadow = get_shadow_table(table); | ||
54 | |||
55 | if (shadow) | ||
56 | free_pages((unsigned long) shadow, ALLOC_ORDER); | ||
57 | free_pages((unsigned long) table, ALLOC_ORDER); | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * page table entry allocation/free routines. | ||
62 | */ | ||
63 | unsigned long *page_table_alloc(int noexec) | ||
64 | { | ||
65 | struct page *page = alloc_page(GFP_KERNEL); | ||
66 | unsigned long *table; | ||
67 | |||
68 | if (!page) | ||
69 | return NULL; | ||
70 | page->index = 0; | ||
71 | if (noexec) { | ||
72 | struct page *shadow = alloc_page(GFP_KERNEL); | ||
73 | if (!shadow) { | ||
74 | __free_page(page); | ||
75 | return NULL; | ||
76 | } | ||
77 | table = (unsigned long *) page_to_phys(shadow); | ||
78 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | ||
79 | page->index = (addr_t) table; | ||
80 | } | ||
81 | table = (unsigned long *) page_to_phys(page); | ||
82 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | ||
83 | return table; | ||
84 | } | ||
85 | |||
86 | void page_table_free(unsigned long *table) | ||
87 | { | ||
88 | unsigned long *shadow = get_shadow_pte(table); | ||
89 | |||
90 | if (shadow) | ||
91 | free_page((unsigned long) shadow); | ||
92 | free_page((unsigned long) table); | ||
93 | |||
94 | } | ||
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index fd594d5fe142..1bd51d840484 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -75,29 +75,24 @@ static void __init_refok *vmem_alloc_pages(unsigned int order) | |||
75 | 75 | ||
76 | static inline pmd_t *vmem_pmd_alloc(void) | 76 | static inline pmd_t *vmem_pmd_alloc(void) |
77 | { | 77 | { |
78 | pmd_t *pmd; | 78 | pmd_t *pmd = NULL; |
79 | int i; | ||
80 | 79 | ||
81 | pmd = vmem_alloc_pages(PMD_ALLOC_ORDER); | 80 | #ifdef CONFIG_64BIT |
81 | pmd = vmem_alloc_pages(2); | ||
82 | if (!pmd) | 82 | if (!pmd) |
83 | return NULL; | 83 | return NULL; |
84 | for (i = 0; i < PTRS_PER_PMD; i++) | 84 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4); |
85 | pmd_clear_kernel(pmd + i); | 85 | #endif |
86 | return pmd; | 86 | return pmd; |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline pte_t *vmem_pte_alloc(void) | 89 | static inline pte_t *vmem_pte_alloc(void) |
90 | { | 90 | { |
91 | pte_t *pte; | 91 | pte_t *pte = vmem_alloc_pages(0); |
92 | pte_t empty_pte; | ||
93 | int i; | ||
94 | 92 | ||
95 | pte = vmem_alloc_pages(PTE_ALLOC_ORDER); | ||
96 | if (!pte) | 93 | if (!pte) |
97 | return NULL; | 94 | return NULL; |
98 | pte_val(empty_pte) = _PAGE_TYPE_EMPTY; | 95 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
99 | for (i = 0; i < PTRS_PER_PTE; i++) | ||
100 | pte[i] = empty_pte; | ||
101 | return pte; | 96 | return pte; |
102 | } | 97 | } |
103 | 98 | ||