diff options
author | Joe Perches <joe@perches.com> | 2008-03-23 04:03:09 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:41:26 -0400 |
commit | cf840147d48626d5d86d617cbc5b7cddc1bcae14 (patch) | |
tree | f1f4082589633318fabc01eeae63f1bbc1d7e3e4 | |
parent | 65e05d15edfdd6ecb4426894cf6e6b5ae97602e4 (diff) |
include/asm-x86/pgtable_32.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/asm-x86/pgtable_32.h | 102 |
1 files changed, 53 insertions, 49 deletions
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 1e2c0d839528..c4a643674458 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h | |||
@@ -40,13 +40,13 @@ void paging_init(void); | |||
40 | #ifdef CONFIG_X86_PAE | 40 | #ifdef CONFIG_X86_PAE |
41 | # include <asm/pgtable-3level-defs.h> | 41 | # include <asm/pgtable-3level-defs.h> |
42 | # define PMD_SIZE (1UL << PMD_SHIFT) | 42 | # define PMD_SIZE (1UL << PMD_SHIFT) |
43 | # define PMD_MASK (~(PMD_SIZE-1)) | 43 | # define PMD_MASK (~(PMD_SIZE - 1)) |
44 | #else | 44 | #else |
45 | # include <asm/pgtable-2level-defs.h> | 45 | # include <asm/pgtable-2level-defs.h> |
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
49 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 49 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) |
50 | 50 | ||
51 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | 51 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) |
52 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | 52 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) |
@@ -58,21 +58,22 @@ void paging_init(void); | |||
58 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | 58 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
59 | * area for the same reason. ;) | 59 | * area for the same reason. ;) |
60 | */ | 60 | */ |
61 | #define VMALLOC_OFFSET (8*1024*1024) | 61 | #define VMALLOC_OFFSET (8 * 1024 * 1024) |
62 | #define VMALLOC_START (((unsigned long) high_memory + \ | 62 | #define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ |
63 | 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) | 63 | & ~(VMALLOC_OFFSET - 1)) |
64 | #ifdef CONFIG_X86_PAE | 64 | #ifdef CONFIG_X86_PAE |
65 | #define LAST_PKMAP 512 | 65 | #define LAST_PKMAP 512 |
66 | #else | 66 | #else |
67 | #define LAST_PKMAP 1024 | 67 | #define LAST_PKMAP 1024 |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) | 70 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ |
71 | & PMD_MASK) | ||
71 | 72 | ||
72 | #ifdef CONFIG_HIGHMEM | 73 | #ifdef CONFIG_HIGHMEM |
73 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | 74 | # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) |
74 | #else | 75 | #else |
75 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | 76 | # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) |
76 | #endif | 77 | #endif |
77 | 78 | ||
78 | /* | 79 | /* |
@@ -88,16 +89,16 @@ extern unsigned long pg0[]; | |||
88 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 89 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
89 | 90 | ||
90 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ | 91 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ |
91 | #define pmd_none(x) (!(unsigned long)pmd_val(x)) | 92 | #define pmd_none(x) (!(unsigned long)pmd_val((x))) |
92 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 93 | #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) |
93 | 94 | ||
94 | extern int pmd_bad(pmd_t pmd); | 95 | extern int pmd_bad(pmd_t pmd); |
95 | 96 | ||
96 | #define pmd_bad_v1(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | 97 | #define pmd_bad_v1(x) \ |
97 | #define pmd_bad_v2(x) ((pmd_val(x) \ | 98 | (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER))) |
98 | & ~(PAGE_MASK | _PAGE_USER | _PAGE_PSE | _PAGE_NX)) \ | 99 | #define pmd_bad_v2(x) \ |
99 | != _KERNPG_TABLE) | 100 | (_KERNPG_TABLE != (pmd_val((x)) & ~(PAGE_MASK | _PAGE_USER | \ |
100 | 101 | _PAGE_PSE | _PAGE_NX))) | |
101 | 102 | ||
102 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | 103 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
103 | 104 | ||
@@ -123,17 +124,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |||
123 | } | 124 | } |
124 | 125 | ||
125 | /* | 126 | /* |
126 | * Macro to mark a page protection value as "uncacheable". On processors which do not support | 127 | * Macro to mark a page protection value as "uncacheable". |
127 | * it, this is a no-op. | 128 | * On processors which do not support it, this is a no-op. |
128 | */ | 129 | */ |
129 | #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ | 130 | #define pgprot_noncached(prot) \ |
130 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) | 131 | ((boot_cpu_data.x86 > 3) \ |
132 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) \ | ||
133 | : (prot)) | ||
131 | 134 | ||
132 | /* | 135 | /* |
133 | * Conversion functions: convert a page and protection to a page entry, | 136 | * Conversion functions: convert a page and protection to a page entry, |
134 | * and a page entry and page directory to the page they refer to. | 137 | * and a page entry and page directory to the page they refer to. |
135 | */ | 138 | */ |
136 | |||
137 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 139 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
138 | 140 | ||
139 | /* | 141 | /* |
@@ -142,20 +144,20 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |||
142 | * this macro returns the index of the entry in the pgd page which would | 144 | * this macro returns the index of the entry in the pgd page which would |
143 | * control the given virtual address | 145 | * control the given virtual address |
144 | */ | 146 | */ |
145 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 147 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
146 | #define pgd_index_k(addr) pgd_index(addr) | 148 | #define pgd_index_k(addr) pgd_index((addr)) |
147 | 149 | ||
148 | /* | 150 | /* |
149 | * pgd_offset() returns a (pgd_t *) | 151 | * pgd_offset() returns a (pgd_t *) |
150 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | 152 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; |
151 | */ | 153 | */ |
152 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | 154 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) |
153 | 155 | ||
154 | /* | 156 | /* |
155 | * a shortcut which implies the use of the kernel's pgd, instead | 157 | * a shortcut which implies the use of the kernel's pgd, instead |
156 | * of a process's | 158 | * of a process's |
157 | */ | 159 | */ |
158 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 160 | #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) |
159 | 161 | ||
160 | static inline int pud_large(pud_t pud) { return 0; } | 162 | static inline int pud_large(pud_t pud) { return 0; } |
161 | 163 | ||
@@ -165,8 +167,8 @@ static inline int pud_large(pud_t pud) { return 0; } | |||
165 | * this macro returns the index of the entry in the pmd page which would | 167 | * this macro returns the index of the entry in the pmd page which would |
166 | * control the given virtual address | 168 | * control the given virtual address |
167 | */ | 169 | */ |
168 | #define pmd_index(address) \ | 170 | #define pmd_index(address) \ |
169 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 171 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) |
170 | 172 | ||
171 | /* | 173 | /* |
172 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | 174 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] |
@@ -174,43 +176,45 @@ static inline int pud_large(pud_t pud) { return 0; } | |||
174 | * this macro returns the index of the entry in the pte page which would | 176 | * this macro returns the index of the entry in the pte page which would |
175 | * control the given virtual address | 177 | * control the given virtual address |
176 | */ | 178 | */ |
177 | #define pte_index(address) \ | 179 | #define pte_index(address) \ |
178 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 180 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
179 | #define pte_offset_kernel(dir, address) \ | 181 | #define pte_offset_kernel(dir, address) \ |
180 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) | 182 | ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address))) |
181 | 183 | ||
182 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 184 | #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) |
183 | 185 | ||
184 | #define pmd_page_vaddr(pmd) \ | 186 | #define pmd_page_vaddr(pmd) \ |
185 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 187 | ((unsigned long)__va(pmd_val((pmd)) & PAGE_MASK)) |
186 | 188 | ||
187 | #if defined(CONFIG_HIGHPTE) | 189 | #if defined(CONFIG_HIGHPTE) |
188 | #define pte_offset_map(dir, address) \ | 190 | #define pte_offset_map(dir, address) \ |
189 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) | 191 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ |
190 | #define pte_offset_map_nested(dir, address) \ | 192 | pte_index((address))) |
191 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) | 193 | #define pte_offset_map_nested(dir, address) \ |
192 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | 194 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ |
193 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | 195 | pte_index((address))) |
196 | #define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0) | ||
197 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) | ||
194 | #else | 198 | #else |
195 | #define pte_offset_map(dir, address) \ | 199 | #define pte_offset_map(dir, address) \ |
196 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | 200 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) |
197 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | 201 | #define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) |
198 | #define pte_unmap(pte) do { } while (0) | 202 | #define pte_unmap(pte) do { } while (0) |
199 | #define pte_unmap_nested(pte) do { } while (0) | 203 | #define pte_unmap_nested(pte) do { } while (0) |
200 | #endif | 204 | #endif |
201 | 205 | ||
202 | /* Clear a kernel PTE and flush it from the TLB */ | 206 | /* Clear a kernel PTE and flush it from the TLB */ |
203 | #define kpte_clear_flush(ptep, vaddr) \ | 207 | #define kpte_clear_flush(ptep, vaddr) \ |
204 | do { \ | 208 | do { \ |
205 | pte_clear(&init_mm, vaddr, ptep); \ | 209 | pte_clear(&init_mm, (vaddr), (ptep)); \ |
206 | __flush_tlb_one(vaddr); \ | 210 | __flush_tlb_one((vaddr)); \ |
207 | } while (0) | 211 | } while (0) |
208 | 212 | ||
209 | /* | 213 | /* |
210 | * The i386 doesn't have any external MMU info: the kernel page | 214 | * The i386 doesn't have any external MMU info: the kernel page |
211 | * tables contain all the necessary information. | 215 | * tables contain all the necessary information. |
212 | */ | 216 | */ |
213 | #define update_mmu_cache(vma,address,pte) do { } while (0) | 217 | #define update_mmu_cache(vma, address, pte) do { } while (0) |
214 | 218 | ||
215 | void native_pagetable_setup_start(pgd_t *base); | 219 | void native_pagetable_setup_start(pgd_t *base); |
216 | void native_pagetable_setup_done(pgd_t *base); | 220 | void native_pagetable_setup_done(pgd_t *base); |
@@ -239,7 +243,7 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base) | |||
239 | #define kern_addr_valid(kaddr) (0) | 243 | #define kern_addr_valid(kaddr) (0) |
240 | #endif | 244 | #endif |
241 | 245 | ||
242 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 246 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
243 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 247 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
244 | 248 | ||
245 | #endif /* _I386_PGTABLE_H */ | 249 | #endif /* _I386_PGTABLE_H */ |