diff options
-rw-r--r-- | arch/i386/mm/ioremap.c | 84 |
1 files changed, 6 insertions, 78 deletions
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c index 247fde76aaed..fff08ae7b5ed 100644 --- a/arch/i386/mm/ioremap.c +++ b/arch/i386/mm/ioremap.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <asm/io.h> | 15 | #include <linux/io.h> |
16 | #include <asm/fixmap.h> | 16 | #include <asm/fixmap.h> |
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
@@ -21,82 +21,6 @@ | |||
21 | #define ISA_START_ADDRESS 0xa0000 | 21 | #define ISA_START_ADDRESS 0xa0000 |
22 | #define ISA_END_ADDRESS 0x100000 | 22 | #define ISA_END_ADDRESS 0x100000 |
23 | 23 | ||
24 | static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, | ||
25 | unsigned long end, unsigned long phys_addr, unsigned long flags) | ||
26 | { | ||
27 | pte_t *pte; | ||
28 | unsigned long pfn; | ||
29 | |||
30 | pfn = phys_addr >> PAGE_SHIFT; | ||
31 | pte = pte_alloc_kernel(pmd, addr); | ||
32 | if (!pte) | ||
33 | return -ENOMEM; | ||
34 | do { | ||
35 | BUG_ON(!pte_none(*pte)); | ||
36 | set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | | ||
37 | _PAGE_DIRTY | _PAGE_ACCESSED | flags))); | ||
38 | pfn++; | ||
39 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, | ||
44 | unsigned long end, unsigned long phys_addr, unsigned long flags) | ||
45 | { | ||
46 | pmd_t *pmd; | ||
47 | unsigned long next; | ||
48 | |||
49 | phys_addr -= addr; | ||
50 | pmd = pmd_alloc(&init_mm, pud, addr); | ||
51 | if (!pmd) | ||
52 | return -ENOMEM; | ||
53 | do { | ||
54 | next = pmd_addr_end(addr, end); | ||
55 | if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, flags)) | ||
56 | return -ENOMEM; | ||
57 | } while (pmd++, addr = next, addr != end); | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, | ||
62 | unsigned long end, unsigned long phys_addr, unsigned long flags) | ||
63 | { | ||
64 | pud_t *pud; | ||
65 | unsigned long next; | ||
66 | |||
67 | phys_addr -= addr; | ||
68 | pud = pud_alloc(&init_mm, pgd, addr); | ||
69 | if (!pud) | ||
70 | return -ENOMEM; | ||
71 | do { | ||
72 | next = pud_addr_end(addr, end); | ||
73 | if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, flags)) | ||
74 | return -ENOMEM; | ||
75 | } while (pud++, addr = next, addr != end); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static int ioremap_page_range(unsigned long addr, | ||
80 | unsigned long end, unsigned long phys_addr, unsigned long flags) | ||
81 | { | ||
82 | pgd_t *pgd; | ||
83 | unsigned long next; | ||
84 | int err; | ||
85 | |||
86 | BUG_ON(addr >= end); | ||
87 | flush_cache_all(); | ||
88 | phys_addr -= addr; | ||
89 | pgd = pgd_offset_k(addr); | ||
90 | do { | ||
91 | next = pgd_addr_end(addr, end); | ||
92 | err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags); | ||
93 | if (err) | ||
94 | break; | ||
95 | } while (pgd++, addr = next, addr != end); | ||
96 | flush_tlb_all(); | ||
97 | return err; | ||
98 | } | ||
99 | |||
100 | /* | 24 | /* |
101 | * Generic mapping function (not visible outside): | 25 | * Generic mapping function (not visible outside): |
102 | */ | 26 | */ |
@@ -115,6 +39,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l | |||
115 | void __iomem * addr; | 39 | void __iomem * addr; |
116 | struct vm_struct * area; | 40 | struct vm_struct * area; |
117 | unsigned long offset, last_addr; | 41 | unsigned long offset, last_addr; |
42 | pgprot_t prot; | ||
118 | 43 | ||
119 | /* Don't allow wraparound or zero size */ | 44 | /* Don't allow wraparound or zero size */ |
120 | last_addr = phys_addr + size - 1; | 45 | last_addr = phys_addr + size - 1; |
@@ -142,6 +67,9 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l | |||
142 | return NULL; | 67 | return NULL; |
143 | } | 68 | } |
144 | 69 | ||
70 | prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | ||
71 | | _PAGE_ACCESSED | flags); | ||
72 | |||
145 | /* | 73 | /* |
146 | * Mappings have to be page-aligned | 74 | * Mappings have to be page-aligned |
147 | */ | 75 | */ |
@@ -158,7 +86,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l | |||
158 | area->phys_addr = phys_addr; | 86 | area->phys_addr = phys_addr; |
159 | addr = (void __iomem *) area->addr; | 87 | addr = (void __iomem *) area->addr; |
160 | if (ioremap_page_range((unsigned long) addr, | 88 | if (ioremap_page_range((unsigned long) addr, |
161 | (unsigned long) addr + size, phys_addr, flags)) { | 89 | (unsigned long) addr + size, phys_addr, prot)) { |
162 | vunmap((void __force *) addr); | 90 | vunmap((void __force *) addr); |
163 | return NULL; | 91 | return NULL; |
164 | } | 92 | } |