aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/io_trapped.c
diff options
context:
space:
mode:
authorMagnus Damm <magnus.damm@gmail.com>2008-02-07 06:18:21 -0500
committerPaul Mundt <lethal@linux-sh.org>2008-02-14 00:22:09 -0500
commite7cc9a7340b8ec018caa9eb1d035fdaef1f2fc51 (patch)
treea797888f8d3f95734288978351c33af3c965494c /arch/sh/kernel/io_trapped.c
parent2ade1a9b425c24037327197ea97db054395b536b (diff)
sh: trapped io support V2
The idea is that we want to get rid of the in/out/readb/writeb callbacks from the machvec and replace that with simple inline read and write operations to memory. Fast and simple for most hardware devices (think pci). Some devices require special treatment though - like 16-bit only CF devices - so we need to have some method to hook in callbacks. This patch makes it possible to add a per-device trap generating filter. This way we can get maximum performance of sane hardware - which doesn't need this filter - and crappy hardware works but gets punished by a performance hit. V2 changes things around a bit and replaces io access callbacks with a simple minimum_bus_width value. In the future we can add stride as well. Signed-off-by: Magnus Damm <damm@igel.co.jp> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/io_trapped.c')
-rw-r--r--arch/sh/kernel/io_trapped.c269
1 files changed, 269 insertions, 0 deletions
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
new file mode 100644
index 000000000000..0bfdc9a34e1a
--- /dev/null
+++ b/arch/sh/kernel/io_trapped.c
@@ -0,0 +1,269 @@
1/*
2 * Trapped io support
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * Intercept io operations by trapping.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/bitops.h>
15#include <linux/vmalloc.h>
16#include <asm/system.h>
17#include <asm/mmu_context.h>
18#include <asm/uaccess.h>
19#include <asm/io.h>
20#include <asm/io_trapped.h>
21
22#define TRAPPED_PAGES_MAX 16
23#define MAX(a, b) (((a) >= (b)) ? (a) : (b))
24
25#ifdef CONFIG_HAS_IOPORT
26LIST_HEAD(trapped_io);
27#endif
28#ifdef CONFIG_HAS_IOMEM
29LIST_HEAD(trapped_mem);
30#endif
31static DEFINE_SPINLOCK(trapped_lock);
32
33int __init register_trapped_io(struct trapped_io *tiop)
34{
35 struct resource *res;
36 unsigned long len = 0, flags = 0;
37 struct page *pages[TRAPPED_PAGES_MAX];
38 int k, n;
39
40 /* structure must be page aligned */
41 if ((unsigned long)tiop & (PAGE_SIZE - 1))
42 goto bad;
43
44 for (k = 0; k < tiop->num_resources; k++) {
45 res = tiop->resource + k;
46 len += roundup((res->end - res->start) + 1, PAGE_SIZE);
47 flags |= res->flags;
48 }
49
50 /* support IORESOURCE_IO _or_ MEM, not both */
51 if (hweight_long(flags) != 1)
52 goto bad;
53
54 n = len >> PAGE_SHIFT;
55
56 if (n >= TRAPPED_PAGES_MAX)
57 goto bad;
58
59 for (k = 0; k < n; k++)
60 pages[k] = virt_to_page(tiop);
61
62 tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
63 if (!tiop->virt_base)
64 goto bad;
65
66 len = 0;
67 for (k = 0; k < tiop->num_resources; k++) {
68 res = tiop->resource + k;
69 pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
70 (unsigned long)(tiop->virt_base + len),
71 res->flags & IORESOURCE_IO ? "io" : "mmio",
72 (unsigned long)res->start);
73 len += roundup((res->end - res->start) + 1, PAGE_SIZE);
74 }
75
76 tiop->magic = IO_TRAPPED_MAGIC;
77 INIT_LIST_HEAD(&tiop->list);
78 spin_lock_irq(&trapped_lock);
79 if (flags & IORESOURCE_IO)
80 list_add(&tiop->list, &trapped_io);
81 if (flags & IORESOURCE_MEM)
82 list_add(&tiop->list, &trapped_mem);
83 spin_unlock_irq(&trapped_lock);
84
85 return 0;
86 bad:
87 pr_warning("unable to install trapped io filter\n");
88 return -1;
89}
90
91void __iomem *match_trapped_io_handler(struct list_head *list,
92 unsigned long offset,
93 unsigned long size)
94{
95 unsigned long voffs;
96 struct trapped_io *tiop;
97 struct resource *res;
98 int k, len;
99
100 spin_lock_irq(&trapped_lock);
101 list_for_each_entry(tiop, list, list) {
102 voffs = 0;
103 for (k = 0; k < tiop->num_resources; k++) {
104 res = tiop->resource + k;
105 if (res->start == offset) {
106 spin_unlock_irq(&trapped_lock);
107 return tiop->virt_base + voffs;
108 }
109
110 len = (res->end - res->start) + 1;
111 voffs += roundup(len, PAGE_SIZE);
112 }
113 }
114 spin_unlock_irq(&trapped_lock);
115 return NULL;
116}
117
118static struct trapped_io *lookup_tiop(unsigned long address)
119{
120 pgd_t *pgd_k;
121 pud_t *pud_k;
122 pmd_t *pmd_k;
123 pte_t *pte_k;
124 pte_t entry;
125
126 pgd_k = swapper_pg_dir + pgd_index(address);
127 if (!pgd_present(*pgd_k))
128 return NULL;
129
130 pud_k = pud_offset(pgd_k, address);
131 if (!pud_present(*pud_k))
132 return NULL;
133
134 pmd_k = pmd_offset(pud_k, address);
135 if (!pmd_present(*pmd_k))
136 return NULL;
137
138 pte_k = pte_offset_kernel(pmd_k, address);
139 entry = *pte_k;
140
141 return pfn_to_kaddr(pte_pfn(entry));
142}
143
144static unsigned long lookup_address(struct trapped_io *tiop,
145 unsigned long address)
146{
147 struct resource *res;
148 unsigned long vaddr = (unsigned long)tiop->virt_base;
149 unsigned long len;
150 int k;
151
152 for (k = 0; k < tiop->num_resources; k++) {
153 res = tiop->resource + k;
154 len = roundup((res->end - res->start) + 1, PAGE_SIZE);
155 if (address < (vaddr + len))
156 return res->start + (address - vaddr);
157 vaddr += len;
158 }
159 return 0;
160}
161
162static unsigned long long copy_word(unsigned long src_addr, int src_len,
163 unsigned long dst_addr, int dst_len)
164{
165 unsigned long long tmp = 0;
166
167 switch (src_len) {
168 case 1:
169 tmp = ctrl_inb(src_addr);
170 break;
171 case 2:
172 tmp = ctrl_inw(src_addr);
173 break;
174 case 4:
175 tmp = ctrl_inl(src_addr);
176 break;
177 case 8:
178 tmp = ctrl_inq(src_addr);
179 break;
180 }
181
182 switch (dst_len) {
183 case 1:
184 ctrl_outb(tmp, dst_addr);
185 break;
186 case 2:
187 ctrl_outw(tmp, dst_addr);
188 break;
189 case 4:
190 ctrl_outl(tmp, dst_addr);
191 break;
192 case 8:
193 ctrl_outq(tmp, dst_addr);
194 break;
195 }
196
197 return tmp;
198}
199
200static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
201{
202 struct trapped_io *tiop;
203 unsigned long src_addr = (unsigned long)src;
204 unsigned long long tmp;
205
206 pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
207 tiop = lookup_tiop(src_addr);
208 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
209
210 src_addr = lookup_address(tiop, src_addr);
211 if (!src_addr)
212 return cnt;
213
214 tmp = copy_word(src_addr, MAX(cnt, (tiop->minimum_bus_width / 8)),
215 (unsigned long)dst, cnt);
216
217 pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
218 return 0;
219}
220
221static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
222{
223 struct trapped_io *tiop;
224 unsigned long dst_addr = (unsigned long)dst;
225 unsigned long long tmp;
226
227 pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
228 tiop = lookup_tiop(dst_addr);
229 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
230
231 dst_addr = lookup_address(tiop, dst_addr);
232 if (!dst_addr)
233 return cnt;
234
235 tmp = copy_word((unsigned long)src, cnt,
236 dst_addr, MAX(cnt, (tiop->minimum_bus_width / 8)));
237
238 pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
239 return 0;
240}
241
242static struct mem_access trapped_io_access = {
243 from_device,
244 to_device,
245};
246
247int handle_trapped_io(struct pt_regs *regs, unsigned long address)
248{
249 mm_segment_t oldfs;
250 opcode_t instruction;
251 int tmp;
252
253 if (!lookup_tiop(address))
254 return 0;
255
256 WARN_ON(user_mode(regs));
257
258 oldfs = get_fs();
259 set_fs(KERNEL_DS);
260 if (copy_from_user(&instruction, (void *)(regs->pc),
261 sizeof(instruction))) {
262 set_fs(oldfs);
263 return 0;
264 }
265
266 tmp = handle_unaligned_access(instruction, regs, &trapped_io_access);
267 set_fs(oldfs);
268 return tmp == 0;
269}