aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-09 05:19:29 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-09 05:19:29 -0500
commit790c7ebbe9dc3ec428a5e982af492a14dd1b565f (patch)
tree952d1e59cc58b62b331a1744003f94710b3f3277 /arch
parentd5b562330ec766292a3ac54ae5e0673610bd5b3d (diff)
parentfb08b20fe7c8491a35a4369cce60fcb886d7609d (diff)
Merge branch 'jsgf/x86/unify' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen into x86/headers
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/io.h90
-rw-r--r--arch/x86/include/asm/io_32.h86
-rw-r--r--arch/x86/include/asm/io_64.h59
-rw-r--r--arch/x86/include/asm/page.h15
-rw-r--r--arch/x86/include/asm/pgtable-2level.h2
-rw-r--r--arch/x86/include/asm/pgtable-3level.h35
-rw-r--r--arch/x86/include/asm/pgtable.h176
-rw-r--r--arch/x86/include/asm/pgtable_32.h46
-rw-r--r--arch/x86/include/asm/pgtable_64.h64
-rw-r--r--arch/x86/kernel/early_printk.c1
10 files changed, 280 insertions, 294 deletions
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 1dbbdf4be9b4..f150b1ecf920 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -5,6 +5,7 @@
5 5
6#include <linux/compiler.h> 6#include <linux/compiler.h>
7#include <asm-generic/int-ll64.h> 7#include <asm-generic/int-ll64.h>
8#include <asm/page.h>
8 9
9#define build_mmio_read(name, size, type, reg, barrier) \ 10#define build_mmio_read(name, size, type, reg, barrier) \
10static inline type name(const volatile void __iomem *addr) \ 11static inline type name(const volatile void __iomem *addr) \
@@ -80,6 +81,95 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
80#define readq readq 81#define readq readq
81#define writeq writeq 82#define writeq writeq
82 83
84/**
85 * virt_to_phys - map virtual addresses to physical
86 * @address: address to remap
87 *
88 * The returned physical address is the physical (CPU) mapping for
89 * the memory address given. It is only valid to use this function on
90 * addresses directly mapped or allocated via kmalloc.
91 *
92 * This function does not give bus mappings for DMA transfers. In
93 * almost all conceivable cases a device driver should not be using
94 * this function
95 */
96
97static inline phys_addr_t virt_to_phys(volatile void *address)
98{
99 return __pa(address);
100}
101
102/**
103 * phys_to_virt - map physical address to virtual
104 * @address: address to remap
105 *
106 * The returned virtual address is a current CPU mapping for
107 * the memory address given. It is only valid to use this function on
108 * addresses that have a kernel mapping
109 *
110 * This function does not handle bus mappings for DMA transfers. In
111 * almost all conceivable cases a device driver should not be using
112 * this function
113 */
114
115static inline void *phys_to_virt(phys_addr_t address)
116{
117 return __va(address);
118}
119
120/*
121 * Change "struct page" to physical address.
122 */
123#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
124
125/*
126 * ISA I/O bus memory addresses are 1:1 with the physical address.
127 */
128#define isa_virt_to_bus virt_to_phys
129#define isa_page_to_bus page_to_phys
130#define isa_bus_to_virt phys_to_virt
131
132/*
133 * However PCI ones are not necessarily 1:1 and therefore these interfaces
134 * are forbidden in portable PCI drivers.
135 *
136 * Allow them on x86 for legacy drivers, though.
137 */
138#define virt_to_bus virt_to_phys
139#define bus_to_virt phys_to_virt
140
141/**
142 * ioremap - map bus memory into CPU space
143 * @offset: bus address of the memory
144 * @size: size of the resource to map
145 *
146 * ioremap performs a platform specific sequence of operations to
147 * make bus memory CPU accessible via the readb/readw/readl/writeb/
148 * writew/writel functions and the other mmio helpers. The returned
149 * address is not guaranteed to be usable directly as a virtual
150 * address.
151 *
152 * If the area you are trying to map is a PCI BAR you should have a
153 * look at pci_iomap().
154 */
155extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
156extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
157extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
158 unsigned long prot_val);
159
160/*
161 * The default ioremap() behavior is non-cached:
162 */
163static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
164{
165 return ioremap_nocache(offset, size);
166}
167
168extern void iounmap(volatile void __iomem *addr);
169
170extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
171
172
83#ifdef CONFIG_X86_32 173#ifdef CONFIG_X86_32
84# include "io_32.h" 174# include "io_32.h"
85#else 175#else
diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
index d8e242e1b396..2fbe7dd26bb8 100644
--- a/arch/x86/include/asm/io_32.h
+++ b/arch/x86/include/asm/io_32.h
@@ -53,92 +53,6 @@
53 */ 53 */
54#define xlate_dev_kmem_ptr(p) p 54#define xlate_dev_kmem_ptr(p) p
55 55
56/**
57 * virt_to_phys - map virtual addresses to physical
58 * @address: address to remap
59 *
60 * The returned physical address is the physical (CPU) mapping for
61 * the memory address given. It is only valid to use this function on
62 * addresses directly mapped or allocated via kmalloc.
63 *
64 * This function does not give bus mappings for DMA transfers. In
65 * almost all conceivable cases a device driver should not be using
66 * this function
67 */
68
69static inline unsigned long virt_to_phys(volatile void *address)
70{
71 return __pa(address);
72}
73
74/**
75 * phys_to_virt - map physical address to virtual
76 * @address: address to remap
77 *
78 * The returned virtual address is a current CPU mapping for
79 * the memory address given. It is only valid to use this function on
80 * addresses that have a kernel mapping
81 *
82 * This function does not handle bus mappings for DMA transfers. In
83 * almost all conceivable cases a device driver should not be using
84 * this function
85 */
86
87static inline void *phys_to_virt(unsigned long address)
88{
89 return __va(address);
90}
91
92/*
93 * Change "struct page" to physical address.
94 */
95#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
96
97/**
98 * ioremap - map bus memory into CPU space
99 * @offset: bus address of the memory
100 * @size: size of the resource to map
101 *
102 * ioremap performs a platform specific sequence of operations to
103 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104 * writew/writel functions and the other mmio helpers. The returned
105 * address is not guaranteed to be usable directly as a virtual
106 * address.
107 *
108 * If the area you are trying to map is a PCI BAR you should have a
109 * look at pci_iomap().
110 */
111extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
112extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
113extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
114 unsigned long prot_val);
115
116/*
117 * The default ioremap() behavior is non-cached:
118 */
119static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
120{
121 return ioremap_nocache(offset, size);
122}
123
124extern void iounmap(volatile void __iomem *addr);
125
126/*
127 * ISA I/O bus memory addresses are 1:1 with the physical address.
128 */
129#define isa_virt_to_bus virt_to_phys
130#define isa_page_to_bus page_to_phys
131#define isa_bus_to_virt phys_to_virt
132
133/*
134 * However PCI ones are not necessarily 1:1 and therefore these interfaces
135 * are forbidden in portable PCI drivers.
136 *
137 * Allow them on x86 for legacy drivers, though.
138 */
139#define virt_to_bus virt_to_phys
140#define bus_to_virt phys_to_virt
141
142static inline void 56static inline void
143memset_io(volatile void __iomem *addr, unsigned char val, int count) 57memset_io(volatile void __iomem *addr, unsigned char val, int count)
144{ 58{
diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
index 563c16270ba6..0424c07246f4 100644
--- a/arch/x86/include/asm/io_64.h
+++ b/arch/x86/include/asm/io_64.h
@@ -142,67 +142,8 @@ __OUTS(l)
142 142
143#include <linux/vmalloc.h> 143#include <linux/vmalloc.h>
144 144
145#ifndef __i386__
146/*
147 * Change virtual addresses to physical addresses and vv.
148 * These are pretty trivial
149 */
150static inline unsigned long virt_to_phys(volatile void *address)
151{
152 return __pa(address);
153}
154
155static inline void *phys_to_virt(unsigned long address)
156{
157 return __va(address);
158}
159#endif
160
161/*
162 * Change "struct page" to physical address.
163 */
164#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
165
166#include <asm-generic/iomap.h> 145#include <asm-generic/iomap.h>
167 146
168/*
169 * This one maps high address device memory and turns off caching for that area.
170 * it's useful if some control registers are in such an area and write combining
171 * or read caching is not desirable:
172 */
173extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
174extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
175extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
176 unsigned long prot_val);
177
178/*
179 * The default ioremap() behavior is non-cached:
180 */
181static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
182{
183 return ioremap_nocache(offset, size);
184}
185
186extern void iounmap(volatile void __iomem *addr);
187
188extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
189
190/*
191 * ISA I/O bus memory addresses are 1:1 with the physical address.
192 */
193#define isa_virt_to_bus virt_to_phys
194#define isa_page_to_bus page_to_phys
195#define isa_bus_to_virt phys_to_virt
196
197/*
198 * However PCI ones are not necessarily 1:1 and therefore these interfaces
199 * are forbidden in portable PCI drivers.
200 *
201 * Allow them on x86 for legacy drivers, though.
202 */
203#define virt_to_bus virt_to_phys
204#define bus_to_virt phys_to_virt
205
206void __memcpy_fromio(void *, unsigned long, unsigned); 147void __memcpy_fromio(void *, unsigned long, unsigned);
207void __memcpy_toio(unsigned long, const void *, unsigned); 148void __memcpy_toio(unsigned long, const void *, unsigned);
208 149
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index e9873a2e8695..0b16b64a8fe7 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -95,6 +95,11 @@ static inline pgdval_t native_pgd_val(pgd_t pgd)
95 return pgd.pgd; 95 return pgd.pgd;
96} 96}
97 97
98static inline pgdval_t pgd_flags(pgd_t pgd)
99{
100 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
101}
102
98#if PAGETABLE_LEVELS >= 3 103#if PAGETABLE_LEVELS >= 3
99#if PAGETABLE_LEVELS == 4 104#if PAGETABLE_LEVELS == 4
100typedef struct { pudval_t pud; } pud_t; 105typedef struct { pudval_t pud; } pud_t;
@@ -117,6 +122,11 @@ static inline pudval_t native_pud_val(pud_t pud)
117} 122}
118#endif /* PAGETABLE_LEVELS == 4 */ 123#endif /* PAGETABLE_LEVELS == 4 */
119 124
125static inline pudval_t pud_flags(pud_t pud)
126{
127 return native_pud_val(pud) & PTE_FLAGS_MASK;
128}
129
120typedef struct { pmdval_t pmd; } pmd_t; 130typedef struct { pmdval_t pmd; } pmd_t;
121 131
122static inline pmd_t native_make_pmd(pmdval_t val) 132static inline pmd_t native_make_pmd(pmdval_t val)
@@ -128,6 +138,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
128{ 138{
129 return pmd.pmd; 139 return pmd.pmd;
130} 140}
141
142static inline pmdval_t pmd_flags(pmd_t pmd)
143{
144 return native_pmd_val(pmd) & PTE_FLAGS_MASK;
145}
131#else /* PAGETABLE_LEVELS == 2 */ 146#else /* PAGETABLE_LEVELS == 2 */
132#include <asm-generic/pgtable-nopmd.h> 147#include <asm-generic/pgtable-nopmd.h>
133 148
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index e0d199fe1d83..c1774ac9da7a 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
54#endif 54#endif
55 55
56#define pte_none(x) (!(x).pte_low)
57
58/* 56/*
59 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 57 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
60 * split up the 29 bits of offset into this range: 58 * split up the 29 bits of offset into this range:
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 447da43cddb3..3f13cdf61156 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -18,21 +18,6 @@
18 printk("%s:%d: bad pgd %p(%016Lx).\n", \ 18 printk("%s:%d: bad pgd %p(%016Lx).\n", \
19 __FILE__, __LINE__, &(e), pgd_val(e)) 19 __FILE__, __LINE__, &(e), pgd_val(e))
20 20
21static inline int pud_none(pud_t pud)
22{
23 return pud_val(pud) == 0;
24}
25
26static inline int pud_bad(pud_t pud)
27{
28 return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
29}
30
31static inline int pud_present(pud_t pud)
32{
33 return pud_val(pud) & _PAGE_PRESENT;
34}
35
36/* Rules for using set_pte: the pte being assigned *must* be 21/* Rules for using set_pte: the pte being assigned *must* be
37 * either not present or in a state where the hardware will 22 * either not present or in a state where the hardware will
38 * not attempt to update the pte. In places where this is 23 * not attempt to update the pte. In places where this is
@@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
120 write_cr3(pgd); 105 write_cr3(pgd);
121} 106}
122 107
123#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
124
125#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
126
127
128/* Find an entry in the second-level page table.. */
129#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
130 pmd_index(address))
131
132#ifdef CONFIG_SMP 108#ifdef CONFIG_SMP
133static inline pte_t native_ptep_get_and_clear(pte_t *ptep) 109static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
134{ 110{
@@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
145#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 121#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
146#endif 122#endif
147 123
148#define __HAVE_ARCH_PTE_SAME
149static inline int pte_same(pte_t a, pte_t b)
150{
151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152}
153
154static inline int pte_none(pte_t pte)
155{
156 return !pte.pte_low && !pte.pte_high;
157}
158
159/* 124/*
160 * Bits 0, 6 and 7 are taken in the low part of the pte, 125 * Bits 0, 6 and 7 are taken in the low part of the pte,
161 * put the 32 bits of offset into the high part. 126 * put the 32 bits of offset into the high part.
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 06bbcbd66e9c..a80a956ae655 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -236,7 +236,7 @@ static inline unsigned long pte_pfn(pte_t pte)
236 236
237static inline int pmd_large(pmd_t pte) 237static inline int pmd_large(pmd_t pte)
238{ 238{
239 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 239 return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
240 (_PAGE_PSE | _PAGE_PRESENT); 240 (_PAGE_PSE | _PAGE_PRESENT);
241} 241}
242 242
@@ -437,6 +437,180 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
437# include "pgtable_64.h" 437# include "pgtable_64.h"
438#endif 438#endif
439 439
440#ifndef __ASSEMBLY__
441#include <linux/mm_types.h>
442
443static inline int pte_none(pte_t pte)
444{
445 return !pte.pte;
446}
447
448#define __HAVE_ARCH_PTE_SAME
449static inline int pte_same(pte_t a, pte_t b)
450{
451 return a.pte == b.pte;
452}
453
454static inline int pte_present(pte_t a)
455{
456 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
457}
458
459static inline int pmd_present(pmd_t pmd)
460{
461 return pmd_flags(pmd) & _PAGE_PRESENT;
462}
463
464static inline int pmd_none(pmd_t pmd)
465{
466 /* Only check low word on 32-bit platforms, since it might be
467 out of sync with upper half. */
468 return (unsigned long)native_pmd_val(pmd) == 0;
469}
470
471static inline unsigned long pmd_page_vaddr(pmd_t pmd)
472{
473 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
474}
475
476static inline struct page *pmd_page(pmd_t pmd)
477{
478 return pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT);
479}
480
481/*
482 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
483 *
484 * this macro returns the index of the entry in the pmd page which would
485 * control the given virtual address
486 */
487static inline unsigned pmd_index(unsigned long address)
488{
489 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
490}
491
492/*
493 * Conversion functions: convert a page and protection to a page entry,
494 * and a page entry and page directory to the page they refer to.
495 *
496 * (Currently stuck as a macro because of indirect forward reference
497 * to linux/mm.h:page_to_nid())
498 */
499#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
500
501/*
502 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
503 *
504 * this function returns the index of the entry in the pte page which would
505 * control the given virtual address
506 */
507static inline unsigned pte_index(unsigned long address)
508{
509 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
510}
511
512static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
513{
514 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
515}
516
517static inline int pmd_bad(pmd_t pmd)
518{
519 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
520}
521
522static inline unsigned long pages_to_mb(unsigned long npg)
523{
524 return npg >> (20 - PAGE_SHIFT);
525}
526
527#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
528 remap_pfn_range(vma, vaddr, pfn, size, prot)
529
530#if PAGETABLE_LEVELS > 2
531static inline int pud_none(pud_t pud)
532{
533 return native_pud_val(pud) == 0;
534}
535
536static inline int pud_present(pud_t pud)
537{
538 return pud_flags(pud) & _PAGE_PRESENT;
539}
540
541static inline unsigned long pud_page_vaddr(pud_t pud)
542{
543 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
544}
545
546static inline struct page *pud_page(pud_t pud)
547{
548 return pfn_to_page(pud_val(pud) >> PAGE_SHIFT);
549}
550
551/* Find an entry in the second-level page table.. */
552static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
553{
554 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
555}
556
557static inline unsigned long pmd_pfn(pmd_t pmd)
558{
559 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
560}
561
562static inline int pud_large(pud_t pud)
563{
564 return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
565 (_PAGE_PSE | _PAGE_PRESENT);
566}
567
568static inline int pud_bad(pud_t pud)
569{
570 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
571}
572#endif /* PAGETABLE_LEVELS > 2 */
573
574#if PAGETABLE_LEVELS > 3
575static inline int pgd_present(pgd_t pgd)
576{
577 return pgd_flags(pgd) & _PAGE_PRESENT;
578}
579
580static inline unsigned long pgd_page_vaddr(pgd_t pgd)
581{
582 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
583}
584
585static inline struct page *pgd_page(pgd_t pgd)
586{
587 return pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT);
588}
589
590/* to find an entry in a page-table-directory. */
591static inline unsigned pud_index(unsigned long address)
592{
593 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
594}
595
596static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
597{
598 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
599}
600
601static inline int pgd_bad(pgd_t pgd)
602{
603 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
604}
605
606static inline int pgd_none(pgd_t pgd)
607{
608 return !native_pgd_val(pgd);
609}
610#endif /* PAGETABLE_LEVELS > 3 */
611
612#endif /* __ASSEMBLY__ */
613
440/* 614/*
441 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 615 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
442 * 616 *
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 72b020deb46b..1952bb762aac 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -85,55 +85,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
85/* The boot page tables (all created as a single array) */ 85/* The boot page tables (all created as a single array) */
86extern unsigned long pg0[]; 86extern unsigned long pg0[];
87 87
88#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
89
90/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
91#define pmd_none(x) (!(unsigned long)pmd_val((x)))
92#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
93#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
94
95#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
96
97#ifdef CONFIG_X86_PAE 88#ifdef CONFIG_X86_PAE
98# include <asm/pgtable-3level.h> 89# include <asm/pgtable-3level.h>
99#else 90#else
100# include <asm/pgtable-2level.h> 91# include <asm/pgtable-2level.h>
101#endif 92#endif
102 93
103/*
104 * Conversion functions: convert a page and protection to a page entry,
105 * and a page entry and page directory to the page they refer to.
106 */
107#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
108
109
110static inline int pud_large(pud_t pud) { return 0; }
111
112/*
113 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
114 *
115 * this macro returns the index of the entry in the pmd page which would
116 * control the given virtual address
117 */
118#define pmd_index(address) \
119 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
120
121/*
122 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
123 *
124 * this macro returns the index of the entry in the pte page which would
125 * control the given virtual address
126 */
127#define pte_index(address) \
128 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
129#define pte_offset_kernel(dir, address) \
130 ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
131
132#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
133
134#define pmd_page_vaddr(pmd) \
135 ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
136
137#if defined(CONFIG_HIGHPTE) 94#if defined(CONFIG_HIGHPTE)
138#define pte_offset_map(dir, address) \ 95#define pte_offset_map(dir, address) \
139 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ 96 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
@@ -176,7 +133,4 @@ do { \
176#define kern_addr_valid(kaddr) (0) 133#define kern_addr_valid(kaddr) (0)
177#endif 134#endif
178 135
179#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
180 remap_pfn_range(vma, vaddr, pfn, size, prot)
181
182#endif /* _ASM_X86_PGTABLE_32_H */ 136#endif /* _ASM_X86_PGTABLE_32_H */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index ba09289accaa..100ac483a0ba 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -67,9 +67,6 @@ extern void paging_init(void);
67 printk("%s:%d: bad pgd %p(%016lx).\n", \ 67 printk("%s:%d: bad pgd %p(%016lx).\n", \
68 __FILE__, __LINE__, &(e), pgd_val(e)) 68 __FILE__, __LINE__, &(e), pgd_val(e))
69 69
70#define pgd_none(x) (!pgd_val(x))
71#define pud_none(x) (!pud_val(x))
72
73struct mm_struct; 70struct mm_struct;
74 71
75void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 72void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
@@ -134,8 +131,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
134 native_set_pgd(pgd, native_make_pgd(0)); 131 native_set_pgd(pgd, native_make_pgd(0));
135} 132}
136 133
137#define pte_same(a, b) ((a).pte == (b).pte)
138
139#endif /* !__ASSEMBLY__ */ 134#endif /* !__ASSEMBLY__ */
140 135
141#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) 136#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
@@ -156,26 +151,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
156 151
157#ifndef __ASSEMBLY__ 152#ifndef __ASSEMBLY__
158 153
159static inline int pgd_bad(pgd_t pgd)
160{
161 return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
162}
163
164static inline int pud_bad(pud_t pud)
165{
166 return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
167}
168
169static inline int pmd_bad(pmd_t pmd)
170{
171 return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
172}
173
174#define pte_none(x) (!pte_val((x)))
175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
176
177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
178
179/* 154/*
180 * Conversion functions: convert a page and protection to a page entry, 155 * Conversion functions: convert a page and protection to a page entry,
181 * and a page entry and page directory to the page they refer to. 156 * and a page entry and page directory to the page they refer to.
@@ -184,41 +159,12 @@ static inline int pmd_bad(pmd_t pmd)
184/* 159/*
185 * Level 4 access. 160 * Level 4 access.
186 */ 161 */
187#define pgd_page_vaddr(pgd) \
188 ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
189#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
190#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
191static inline int pgd_large(pgd_t pgd) { return 0; } 162static inline int pgd_large(pgd_t pgd) { return 0; }
192#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE) 163#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
193 164
194/* PUD - Level3 access */ 165/* PUD - Level3 access */
195/* to find an entry in a page-table-directory. */
196#define pud_page_vaddr(pud) \
197 ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
198#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
199#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
200#define pud_offset(pgd, address) \
201 ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
202#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
203
204static inline int pud_large(pud_t pte)
205{
206 return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
207 (_PAGE_PSE | _PAGE_PRESENT);
208}
209 166
210/* PMD - Level 2 access */ 167/* PMD - Level 2 access */
211#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
212#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
213
214#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
215#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
216 pmd_index(address))
217#define pmd_none(x) (!pmd_val((x)))
218#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
219#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
220#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
221
222#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) 168#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
223#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \ 169#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
224 _PAGE_FILE }) 170 _PAGE_FILE })
@@ -226,13 +172,6 @@ static inline int pud_large(pud_t pte)
226 172
227/* PTE - Level 1 access. */ 173/* PTE - Level 1 access. */
228 174
229/* page, protection -> pte */
230#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
231
232#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
233#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
234 pte_index((address)))
235
236/* x86-64 always has all page tables mapped. */ 175/* x86-64 always has all page tables mapped. */
237#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 176#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
238#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address)) 177#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
@@ -266,9 +205,6 @@ extern int direct_gbpages;
266extern int kern_addr_valid(unsigned long addr); 205extern int kern_addr_valid(unsigned long addr);
267extern void cleanup_highmap(void); 206extern void cleanup_highmap(void);
268 207
269#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
270 remap_pfn_range(vma, vaddr, pfn, size, prot)
271
272#define HAVE_ARCH_UNMAPPED_AREA 208#define HAVE_ARCH_UNMAPPED_AREA
273#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 209#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
274 210
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 504ad198e4ad..6a36dd228b69 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -13,7 +13,6 @@
13#include <asm/setup.h> 13#include <asm/setup.h>
14#include <xen/hvc-console.h> 14#include <xen/hvc-console.h>
15#include <asm/pci-direct.h> 15#include <asm/pci-direct.h>
16#include <asm/pgtable.h>
17#include <asm/fixmap.h> 16#include <asm/fixmap.h>
18#include <linux/usb/ehci_def.h> 17#include <linux/usb/ehci_def.h>
19 18