diff options
Diffstat (limited to 'arch/sh/mm/ioremap.c')
-rw-r--r-- | arch/sh/mm/ioremap.c | 163 |
1 files changed, 163 insertions, 0 deletions
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c new file mode 100644 index 000000000000..9f490c2742f0 --- /dev/null +++ b/arch/sh/mm/ioremap.c | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/ioremap.c | ||
3 | * | ||
4 | * Re-map IO memory to kernel address space so that we can access it. | ||
5 | * This is needed for high PCI addresses that aren't mapped in the | ||
6 | * 640k-1MB IO memory area on PC's | ||
7 | * | ||
8 | * (C) Copyright 1995 1996 Linus Torvalds | ||
9 | */ | ||
10 | |||
11 | #include <linux/vmalloc.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <asm/io.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/pgalloc.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/tlbflush.h> | ||
18 | |||
19 | static inline void remap_area_pte(pte_t * pte, unsigned long address, | ||
20 | unsigned long size, unsigned long phys_addr, unsigned long flags) | ||
21 | { | ||
22 | unsigned long end; | ||
23 | unsigned long pfn; | ||
24 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | | ||
25 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
26 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags); | ||
27 | |||
28 | address &= ~PMD_MASK; | ||
29 | end = address + size; | ||
30 | if (end > PMD_SIZE) | ||
31 | end = PMD_SIZE; | ||
32 | if (address >= end) | ||
33 | BUG(); | ||
34 | pfn = phys_addr >> PAGE_SHIFT; | ||
35 | do { | ||
36 | if (!pte_none(*pte)) { | ||
37 | printk("remap_area_pte: page already exists\n"); | ||
38 | BUG(); | ||
39 | } | ||
40 | set_pte(pte, pfn_pte(pfn, pgprot)); | ||
41 | address += PAGE_SIZE; | ||
42 | pfn++; | ||
43 | pte++; | ||
44 | } while (address && (address < end)); | ||
45 | } | ||
46 | |||
47 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, | ||
48 | unsigned long size, unsigned long phys_addr, unsigned long flags) | ||
49 | { | ||
50 | unsigned long end; | ||
51 | |||
52 | address &= ~PGDIR_MASK; | ||
53 | end = address + size; | ||
54 | if (end > PGDIR_SIZE) | ||
55 | end = PGDIR_SIZE; | ||
56 | phys_addr -= address; | ||
57 | if (address >= end) | ||
58 | BUG(); | ||
59 | do { | ||
60 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | ||
61 | if (!pte) | ||
62 | return -ENOMEM; | ||
63 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | ||
64 | address = (address + PMD_SIZE) & PMD_MASK; | ||
65 | pmd++; | ||
66 | } while (address && (address < end)); | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
71 | unsigned long size, unsigned long flags) | ||
72 | { | ||
73 | int error; | ||
74 | pgd_t * dir; | ||
75 | unsigned long end = address + size; | ||
76 | |||
77 | phys_addr -= address; | ||
78 | dir = pgd_offset_k(address); | ||
79 | flush_cache_all(); | ||
80 | if (address >= end) | ||
81 | BUG(); | ||
82 | spin_lock(&init_mm.page_table_lock); | ||
83 | do { | ||
84 | pmd_t *pmd; | ||
85 | pmd = pmd_alloc(&init_mm, dir, address); | ||
86 | error = -ENOMEM; | ||
87 | if (!pmd) | ||
88 | break; | ||
89 | if (remap_area_pmd(pmd, address, end - address, | ||
90 | phys_addr + address, flags)) | ||
91 | break; | ||
92 | error = 0; | ||
93 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
94 | dir++; | ||
95 | } while (address && (address < end)); | ||
96 | spin_unlock(&init_mm.page_table_lock); | ||
97 | flush_tlb_all(); | ||
98 | return error; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Generic mapping function (not visible outside): | ||
103 | */ | ||
104 | |||
105 | /* | ||
106 | * Remap an arbitrary physical address space into the kernel virtual | ||
107 | * address space. Needed when the kernel wants to access high addresses | ||
108 | * directly. | ||
109 | * | ||
110 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
111 | * have to convert them into an offset in a page-aligned mapping, but the | ||
112 | * caller shouldn't need to know that small detail. | ||
113 | */ | ||
114 | void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | ||
115 | { | ||
116 | void * addr; | ||
117 | struct vm_struct * area; | ||
118 | unsigned long offset, last_addr; | ||
119 | |||
120 | /* Don't allow wraparound or zero size */ | ||
121 | last_addr = phys_addr + size - 1; | ||
122 | if (!size || last_addr < phys_addr) | ||
123 | return NULL; | ||
124 | |||
125 | /* | ||
126 | * Don't remap the low PCI/ISA area, it's always mapped.. | ||
127 | */ | ||
128 | if (phys_addr >= 0xA0000 && last_addr < 0x100000) | ||
129 | return phys_to_virt(phys_addr); | ||
130 | |||
131 | /* | ||
132 | * Don't allow anybody to remap normal RAM that we're using.. | ||
133 | */ | ||
134 | if (phys_addr < virt_to_phys(high_memory)) | ||
135 | return NULL; | ||
136 | |||
137 | /* | ||
138 | * Mappings have to be page-aligned | ||
139 | */ | ||
140 | offset = phys_addr & ~PAGE_MASK; | ||
141 | phys_addr &= PAGE_MASK; | ||
142 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
143 | |||
144 | /* | ||
145 | * Ok, go for it.. | ||
146 | */ | ||
147 | area = get_vm_area(size, VM_IOREMAP); | ||
148 | if (!area) | ||
149 | return NULL; | ||
150 | area->phys_addr = phys_addr; | ||
151 | addr = area->addr; | ||
152 | if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { | ||
153 | vunmap(addr); | ||
154 | return NULL; | ||
155 | } | ||
156 | return (void *) (offset + (char *)addr); | ||
157 | } | ||
158 | |||
159 | void p3_iounmap(void *addr) | ||
160 | { | ||
161 | if (addr > high_memory) | ||
162 | vfree((void *)(PAGE_MASK & (unsigned long)addr)); | ||
163 | } | ||