aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r--arch/x86_64/mm/ioremap.c111
1 files changed, 7 insertions, 104 deletions
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index 45d7d823c3b8..c6e5e8d401a4 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -12,117 +12,16 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <asm/io.h> 15#include <linux/io.h>
16#include <asm/pgalloc.h> 16#include <asm/pgalloc.h>
17#include <asm/fixmap.h> 17#include <asm/fixmap.h>
18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20#include <asm/proto.h> 20#include <asm/proto.h>
21 21
22#define ISA_START_ADDRESS 0xa0000 22#define ISA_START_ADDRESS 0xa0000
23#define ISA_END_ADDRESS 0x100000 23#define ISA_END_ADDRESS 0x100000
24 24
25static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
26 unsigned long phys_addr, unsigned long flags)
27{
28 unsigned long end;
29 unsigned long pfn;
30
31 address &= ~PMD_MASK;
32 end = address + size;
33 if (end > PMD_SIZE)
34 end = PMD_SIZE;
35 if (address >= end)
36 BUG();
37 pfn = phys_addr >> PAGE_SHIFT;
38 do {
39 if (!pte_none(*pte)) {
40 printk("remap_area_pte: page already exists\n");
41 BUG();
42 }
43 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
44 _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
45 address += PAGE_SIZE;
46 pfn++;
47 pte++;
48 } while (address && (address < end));
49}
50
51static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
52 unsigned long phys_addr, unsigned long flags)
53{
54 unsigned long end;
55
56 address &= ~PUD_MASK;
57 end = address + size;
58 if (end > PUD_SIZE)
59 end = PUD_SIZE;
60 phys_addr -= address;
61 if (address >= end)
62 BUG();
63 do {
64 pte_t * pte = pte_alloc_kernel(pmd, address);
65 if (!pte)
66 return -ENOMEM;
67 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
68 address = (address + PMD_SIZE) & PMD_MASK;
69 pmd++;
70 } while (address && (address < end));
71 return 0;
72}
73
74static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size,
75 unsigned long phys_addr, unsigned long flags)
76{
77 unsigned long end;
78
79 address &= ~PGDIR_MASK;
80 end = address + size;
81 if (end > PGDIR_SIZE)
82 end = PGDIR_SIZE;
83 phys_addr -= address;
84 if (address >= end)
85 BUG();
86 do {
87 pmd_t * pmd = pmd_alloc(&init_mm, pud, address);
88 if (!pmd)
89 return -ENOMEM;
90 remap_area_pmd(pmd, address, end - address, address + phys_addr, flags);
91 address = (address + PUD_SIZE) & PUD_MASK;
92 pud++;
93 } while (address && (address < end));
94 return 0;
95}
96
97static int remap_area_pages(unsigned long address, unsigned long phys_addr,
98 unsigned long size, unsigned long flags)
99{
100 int error;
101 pgd_t *pgd;
102 unsigned long end = address + size;
103
104 phys_addr -= address;
105 pgd = pgd_offset_k(address);
106 flush_cache_all();
107 if (address >= end)
108 BUG();
109 do {
110 pud_t *pud;
111 pud = pud_alloc(&init_mm, pgd, address);
112 error = -ENOMEM;
113 if (!pud)
114 break;
115 if (remap_area_pud(pud, address, end - address,
116 phys_addr + address, flags))
117 break;
118 error = 0;
119 address = (address + PGDIR_SIZE) & PGDIR_MASK;
120 pgd++;
121 } while (address && (address < end));
122 flush_tlb_all();
123 return error;
124}
125
126/* 25/*
127 * Fix up the linear direct mapping of the kernel to avoid cache attribute 26 * Fix up the linear direct mapping of the kernel to avoid cache attribute
128 * conflicts. 27 * conflicts.
@@ -165,6 +64,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
165 void * addr; 64 void * addr;
166 struct vm_struct * area; 65 struct vm_struct * area;
167 unsigned long offset, last_addr; 66 unsigned long offset, last_addr;
67 pgprot_t pgprot;
168 68
169 /* Don't allow wraparound or zero size */ 69 /* Don't allow wraparound or zero size */
170 last_addr = phys_addr + size - 1; 70 last_addr = phys_addr + size - 1;
@@ -194,6 +94,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
194 } 94 }
195#endif 95#endif
196 96
97 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_GLOBAL
98 | _PAGE_DIRTY | _PAGE_ACCESSED | flags);
197 /* 99 /*
198 * Mappings have to be page-aligned 100 * Mappings have to be page-aligned
199 */ 101 */
@@ -209,7 +111,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
209 return NULL; 111 return NULL;
210 area->phys_addr = phys_addr; 112 area->phys_addr = phys_addr;
211 addr = area->addr; 113 addr = area->addr;
212 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { 114 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
115 phys_addr, pgprot)) {
213 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); 116 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
214 return NULL; 117 return NULL;
215 } 118 }