aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2006-11-11 01:25:10 -0500
committerPaul Mackerras <paulus@samba.org>2006-12-04 04:38:52 -0500
commit4cb3cee03d558fd457cb58f56c80a2a09a66110c (patch)
treefe903107d098871a7babc1e3432448758c542cde /arch/powerpc/mm
parentd03f387eb321189bc2ba278b6ca82f1a45cf19d6 (diff)
[POWERPC] Allow hooking of PCI MMIO & PIO accessors on 64 bits
This patch reworks the way iSeries hooks on PCI IO operations (both MMIO and PIO) and provides a generic way for other platforms to do so (we have need to do that for various other platforms). While reworking the IO ops, I ended up doing some spring cleaning in io.h and eeh.h which I might want to split into 2 or 3 patches (among others, eeh.h had a lot of useless stuff in it). A side effect is that EEH for PIO should work now (it used to pass IO ports down to the eeh address check functions which is bogus). Also, new are MMIO "repeat" ops, which other archs like ARM already had, and that we have too now: readsb, readsw, readsl, writesb, writesw, writesl. In the long run, I might also make EEH use the hooks instead of wrapping at the toplevel, which would make things even cleaner and relegate EEH completely in platforms/iseries, but we have to measure the performance impact there (though it's really only on MMIO reads) Since I also need to hook on ioremap, I shuffled the functions a bit there. I introduced ioremap_flags() to use by drivers who want to pass explicit flags to ioremap (and it can be hooked). The old __ioremap() is still there as a low level and cannot be hooked, thus drivers who use it should migrate unless they know they want the low level version. The patch "arch provides generic iomap missing accessors" (should be number 4 in this series) is a pre-requisite to provide full iomap API support with this patch. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/pgtable_64.c46
1 files changed, 31 insertions, 15 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ac64f4aaa509..e9b21846ccbd 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -129,22 +129,12 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
129 return (void __iomem *) (ea + (addr & ~PAGE_MASK)); 129 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
130} 130}
131 131
132
133void __iomem *
134ioremap(unsigned long addr, unsigned long size)
135{
136 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
137}
138
139void __iomem * __ioremap(unsigned long addr, unsigned long size, 132void __iomem * __ioremap(unsigned long addr, unsigned long size,
140 unsigned long flags) 133 unsigned long flags)
141{ 134{
142 unsigned long pa, ea; 135 unsigned long pa, ea;
143 void __iomem *ret; 136 void __iomem *ret;
144 137
145 if (firmware_has_feature(FW_FEATURE_ISERIES))
146 return (void __iomem *)addr;
147
148 /* 138 /*
149 * Choose an address to map it to. 139 * Choose an address to map it to.
150 * Once the imalloc system is running, we use it. 140 * Once the imalloc system is running, we use it.
@@ -178,6 +168,25 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
178 return ret; 168 return ret;
179} 169}
180 170
171
172void __iomem * ioremap(unsigned long addr, unsigned long size)
173{
174 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
175
176 if (ppc_md.ioremap)
177 return ppc_md.ioremap(addr, size, flags);
178 return __ioremap(addr, size, flags);
179}
180
181void __iomem * ioremap_flags(unsigned long addr, unsigned long size,
182 unsigned long flags)
183{
184 if (ppc_md.ioremap)
185 return ppc_md.ioremap(addr, size, flags);
186 return __ioremap(addr, size, flags);
187}
188
189
181#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) 190#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
182 191
183int __ioremap_explicit(unsigned long pa, unsigned long ea, 192int __ioremap_explicit(unsigned long pa, unsigned long ea,
@@ -235,13 +244,10 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
235 * 244 *
236 * XXX what about calls before mem_init_done (ie python_countermeasures()) 245 * XXX what about calls before mem_init_done (ie python_countermeasures())
237 */ 246 */
238void iounmap(volatile void __iomem *token) 247void __iounmap(void __iomem *token)
239{ 248{
240 void *addr; 249 void *addr;
241 250
242 if (firmware_has_feature(FW_FEATURE_ISERIES))
243 return;
244
245 if (!mem_init_done) 251 if (!mem_init_done)
246 return; 252 return;
247 253
@@ -250,6 +256,14 @@ void iounmap(volatile void __iomem *token)
250 im_free(addr); 256 im_free(addr);
251} 257}
252 258
259void iounmap(void __iomem *token)
260{
261 if (ppc_md.iounmap)
262 ppc_md.iounmap(token);
263 else
264 __iounmap(token);
265}
266
253static int iounmap_subset_regions(unsigned long addr, unsigned long size) 267static int iounmap_subset_regions(unsigned long addr, unsigned long size)
254{ 268{
255 struct vm_struct *area; 269 struct vm_struct *area;
@@ -268,7 +282,7 @@ static int iounmap_subset_regions(unsigned long addr, unsigned long size)
268 return 0; 282 return 0;
269} 283}
270 284
271int iounmap_explicit(volatile void __iomem *start, unsigned long size) 285int __iounmap_explicit(void __iomem *start, unsigned long size)
272{ 286{
273 struct vm_struct *area; 287 struct vm_struct *area;
274 unsigned long addr; 288 unsigned long addr;
@@ -303,8 +317,10 @@ int iounmap_explicit(volatile void __iomem *start, unsigned long size)
303} 317}
304 318
305EXPORT_SYMBOL(ioremap); 319EXPORT_SYMBOL(ioremap);
320EXPORT_SYMBOL(ioremap_flags);
306EXPORT_SYMBOL(__ioremap); 321EXPORT_SYMBOL(__ioremap);
307EXPORT_SYMBOL(iounmap); 322EXPORT_SYMBOL(iounmap);
323EXPORT_SYMBOL(__iounmap);
308 324
309void __iomem * reserve_phb_iospace(unsigned long size) 325void __iomem * reserve_phb_iospace(unsigned long size)
310{ 326{