aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJames Bottomley <James.Bottomley@HansenPartnership.com>2010-12-22 11:22:11 -0500
committerJames Bottomley <James.Bottomley@suse.de>2011-01-15 09:44:40 -0500
commitf311847c2fcebd81912e2f0caf8a461dec28db41 (patch)
tree802ef2aa01bd0d662e60412366a40c827fd3e875 /arch
parent38567333a6dabd0f2b4150e9fb6dd8e3ba2985e5 (diff)
parisc: flush pages through tmpalias space
The kernel has an 8M tmpailas space (originally designed for copying and clearing pages but now only used for clearing). The idea is to place zeros into the cache above a physical page rather than into the physical page and flush the cache, because often the zeros end up being replaced quickly anyway. We can also use the tmpalias space for flushing a page. The difference here is that we have to do tmpalias processing in the non access data and instruction traps. The principle is the same: as long as we know the physical address and have a virtual address congruent to the real one, the flush will be effective. In order to use the tmpalias space, the icache miss path has to be enhanced to check for the alias region to make the fic instruction effective. Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/parisc/include/asm/cacheflush.h7
-rw-r--r--arch/parisc/kernel/cache.c109
-rw-r--r--arch/parisc/kernel/entry.S194
-rw-r--r--arch/parisc/kernel/pacache.S245
4 files changed, 272 insertions, 283 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index f388a85bba11..dc9286a4dcc7 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -26,8 +26,6 @@ void flush_user_dcache_range_asm(unsigned long, unsigned long);
26void flush_kernel_dcache_range_asm(unsigned long, unsigned long); 26void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
27void flush_kernel_dcache_page_asm(void *); 27void flush_kernel_dcache_page_asm(void *);
28void flush_kernel_icache_page(void *); 28void flush_kernel_icache_page(void *);
29void flush_user_dcache_page(unsigned long);
30void flush_user_icache_page(unsigned long);
31void flush_user_dcache_range(unsigned long, unsigned long); 29void flush_user_dcache_range(unsigned long, unsigned long);
32void flush_user_icache_range(unsigned long, unsigned long); 30void flush_user_icache_range(unsigned long, unsigned long);
33 31
@@ -90,12 +88,15 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned
90void flush_cache_range(struct vm_area_struct *vma, 88void flush_cache_range(struct vm_area_struct *vma,
91 unsigned long start, unsigned long end); 89 unsigned long start, unsigned long end);
92 90
91/* defined in pacache.S exported in cache.c used by flush_anon_page */
92void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
93
93#define ARCH_HAS_FLUSH_ANON_PAGE 94#define ARCH_HAS_FLUSH_ANON_PAGE
94static inline void 95static inline void
95flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 96flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
96{ 97{
97 if (PageAnon(page)) 98 if (PageAnon(page))
98 flush_user_dcache_page(vmaddr); 99 flush_dcache_page_asm(page_to_phys(page), vmaddr);
99} 100}
100 101
101#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 102#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index d054f3da3ff5..3f11331c2775 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -27,12 +27,17 @@
27#include <asm/pgalloc.h> 27#include <asm/pgalloc.h>
28#include <asm/processor.h> 28#include <asm/processor.h>
29#include <asm/sections.h> 29#include <asm/sections.h>
30#include <asm/shmparam.h>
30 31
31int split_tlb __read_mostly; 32int split_tlb __read_mostly;
32int dcache_stride __read_mostly; 33int dcache_stride __read_mostly;
33int icache_stride __read_mostly; 34int icache_stride __read_mostly;
34EXPORT_SYMBOL(dcache_stride); 35EXPORT_SYMBOL(dcache_stride);
35 36
37void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38EXPORT_SYMBOL(flush_dcache_page_asm);
39void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40
36 41
37/* On some machines (e.g. ones with the Merced bus), there can be 42/* On some machines (e.g. ones with the Merced bus), there can be
38 * only a single PxTLB broadcast at a time; this must be guaranteed 43 * only a single PxTLB broadcast at a time; this must be guaranteed
@@ -259,81 +264,13 @@ void disable_sr_hashing(void)
259 panic("SpaceID hashing is still on!\n"); 264 panic("SpaceID hashing is still on!\n");
260} 265}
261 266
262/* Simple function to work out if we have an existing address translation
263 * for a user space vma. */
264static inline int translation_exists(struct vm_area_struct *vma,
265 unsigned long addr, unsigned long pfn)
266{
267 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
268 pmd_t *pmd;
269 pte_t pte;
270
271 if(pgd_none(*pgd))
272 return 0;
273
274 pmd = pmd_offset(pgd, addr);
275 if(pmd_none(*pmd) || pmd_bad(*pmd))
276 return 0;
277
278 /* We cannot take the pte lock here: flush_cache_page is usually
279 * called with pte lock already held. Whereas flush_dcache_page
280 * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
281 * the vma itself is secure, but the pte might come or go racily.
282 */
283 pte = *pte_offset_map(pmd, addr);
284 /* But pte_unmap() does nothing on this architecture */
285
286 /* Filter out coincidental file entries and swap entries */
287 if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
288 return 0;
289
290 return pte_pfn(pte) == pfn;
291}
292
293/* Private function to flush a page from the cache of a non-current
294 * process. cr25 contains the Page Directory of the current user
295 * process; we're going to hijack both it and the user space %sr3 to
296 * temporarily make the non-current process current. We have to do
297 * this because cache flushing may cause a non-access tlb miss which
298 * the handlers have to fill in from the pgd of the non-current
299 * process. */
300static inline void 267static inline void
301flush_user_cache_page_non_current(struct vm_area_struct *vma, 268__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
302 unsigned long vmaddr) 269 unsigned long physaddr)
303{ 270{
304 /* save the current process space and pgd */ 271 flush_dcache_page_asm(physaddr, vmaddr);
305 unsigned long space = mfsp(3), pgd = mfctl(25); 272 if (vma->vm_flags & VM_EXEC)
306 273 flush_icache_page_asm(physaddr, vmaddr);
307 /* we don't mind taking interrupts since they may not
308 * do anything with user space, but we can't
309 * be preempted here */
310 preempt_disable();
311
312 /* make us current */
313 mtctl(__pa(vma->vm_mm->pgd), 25);
314 mtsp(vma->vm_mm->context, 3);
315
316 flush_user_dcache_page(vmaddr);
317 if(vma->vm_flags & VM_EXEC)
318 flush_user_icache_page(vmaddr);
319
320 /* put the old current process back */
321 mtsp(space, 3);
322 mtctl(pgd, 25);
323 preempt_enable();
324}
325
326
327static inline void
328__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
329{
330 if (likely(vma->vm_mm->context == mfsp(3))) {
331 flush_user_dcache_page(vmaddr);
332 if (vma->vm_flags & VM_EXEC)
333 flush_user_icache_page(vmaddr);
334 } else {
335 flush_user_cache_page_non_current(vma, vmaddr);
336 }
337} 274}
338 275
339void flush_dcache_page(struct page *page) 276void flush_dcache_page(struct page *page)
@@ -342,10 +279,8 @@ void flush_dcache_page(struct page *page)
342 struct vm_area_struct *mpnt; 279 struct vm_area_struct *mpnt;
343 struct prio_tree_iter iter; 280 struct prio_tree_iter iter;
344 unsigned long offset; 281 unsigned long offset;
345 unsigned long addr; 282 unsigned long addr, old_addr = 0;
346 pgoff_t pgoff; 283 pgoff_t pgoff;
347 unsigned long pfn = page_to_pfn(page);
348
349 284
350 if (mapping && !mapping_mapped(mapping)) { 285 if (mapping && !mapping_mapped(mapping)) {
351 set_bit(PG_dcache_dirty, &page->flags); 286 set_bit(PG_dcache_dirty, &page->flags);
@@ -369,20 +304,11 @@ void flush_dcache_page(struct page *page)
369 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 304 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
370 addr = mpnt->vm_start + offset; 305 addr = mpnt->vm_start + offset;
371 306
372 /* Flush instructions produce non access tlb misses. 307 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
373 * On PA, we nullify these instructions rather than 308 __flush_cache_page(mpnt, addr, page_to_phys(page));
374 * taking a page fault if the pte doesn't exist. 309 if (old_addr)
375 * This is just for speed. If the page translation 310 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
376 * isn't there, there's no point exciting the 311 old_addr = addr;
377 * nadtlb handler into a nullification frenzy.
378 *
379 * Make sure we really have this page: the private
380 * mappings may cover this area but have COW'd this
381 * particular page.
382 */
383 if (translation_exists(mpnt, addr, pfn)) {
384 __flush_cache_page(mpnt, addr);
385 break;
386 } 312 }
387 } 313 }
388 flush_dcache_mmap_unlock(mapping); 314 flush_dcache_mmap_unlock(mapping);
@@ -573,7 +499,6 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
573{ 499{
574 BUG_ON(!vma->vm_mm->context); 500 BUG_ON(!vma->vm_mm->context);
575 501
576 if (likely(translation_exists(vma, vmaddr, pfn))) 502 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
577 __flush_cache_page(vma, vmaddr);
578 503
579} 504}
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 6337adef30f6..e8c119b61fc7 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -225,22 +225,13 @@
225#ifndef CONFIG_64BIT 225#ifndef CONFIG_64BIT
226 /* 226 /*
227 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 227 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
228 *
229 * Note: naitlb misses will be treated
230 * as an ordinary itlb miss for now.
231 * However, note that naitlb misses
232 * have the faulting address in the
233 * IOR/ISR.
234 */ 228 */
235 229
236 .macro naitlb_11 code 230 .macro naitlb_11 code
237 231
238 mfctl %isr,spc 232 mfctl %isr,spc
239 b itlb_miss_11 233 b naitlb_miss_11
240 mfctl %ior,va 234 mfctl %ior,va
241 /* FIXME: If user causes a naitlb miss, the priv level may not be in
242 * lower bits of va, where the itlb miss handler is expecting them
243 */
244 235
245 .align 32 236 .align 32
246 .endm 237 .endm
@@ -248,26 +239,17 @@
248 239
249 /* 240 /*
250 * naitlb miss interruption handler (parisc 2.0) 241 * naitlb miss interruption handler (parisc 2.0)
251 *
252 * Note: naitlb misses will be treated
253 * as an ordinary itlb miss for now.
254 * However, note that naitlb misses
255 * have the faulting address in the
256 * IOR/ISR.
257 */ 242 */
258 243
259 .macro naitlb_20 code 244 .macro naitlb_20 code
260 245
261 mfctl %isr,spc 246 mfctl %isr,spc
262#ifdef CONFIG_64BIT 247#ifdef CONFIG_64BIT
263 b itlb_miss_20w 248 b naitlb_miss_20w
264#else 249#else
265 b itlb_miss_20 250 b naitlb_miss_20
266#endif 251#endif
267 mfctl %ior,va 252 mfctl %ior,va
268 /* FIXME: If user causes a naitlb miss, the priv level may not be in
269 * lower bits of va, where the itlb miss handler is expecting them
270 */
271 253
272 .align 32 254 .align 32
273 .endm 255 .endm
@@ -581,7 +563,24 @@
581 copy \va,\tmp1 563 copy \va,\tmp1
582 depi 0,31,23,\tmp1 564 depi 0,31,23,\tmp1
583 cmpb,COND(<>),n \tmp,\tmp1,\fault 565 cmpb,COND(<>),n \tmp,\tmp1,\fault
584 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot 566 mfctl %cr19,\tmp /* iir */
567 /* get the opcode (first six bits) into \tmp */
568 extrw,u \tmp,5,6,\tmp
569 /*
570 * Only setting the T bit prevents data cache movein
571 * Setting access rights to zero prevents instruction cache movein
572 *
573 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
574 * to type field and _PAGE_READ goes to top bit of PL1
575 */
576 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
577 /*
578 * so if the opcode is one (i.e. this is a memory management
579 * instruction) nullify the next load so \prot is only T.
580 * Otherwise this is a normal data operation
581 */
582 cmpiclr,= 0x01,\tmp,%r0
583 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
585 depd,z \prot,8,7,\prot 584 depd,z \prot,8,7,\prot
586 /* 585 /*
587 * OK, it is in the temp alias region, check whether "from" or "to". 586 * OK, it is in the temp alias region, check whether "from" or "to".
@@ -631,11 +630,7 @@ ENTRY(fault_vector_20)
631 def 13 630 def 13
632 def 14 631 def 14
633 dtlb_20 15 632 dtlb_20 15
634#if 0
635 naitlb_20 16 633 naitlb_20 16
636#else
637 def 16
638#endif
639 nadtlb_20 17 634 nadtlb_20 17
640 def 18 635 def 18
641 def 19 636 def 19
@@ -678,11 +673,7 @@ ENTRY(fault_vector_11)
678 def 13 673 def 13
679 def 14 674 def 14
680 dtlb_11 15 675 dtlb_11 15
681#if 0
682 naitlb_11 16 676 naitlb_11 16
683#else
684 def 16
685#endif
686 nadtlb_11 17 677 nadtlb_11 17
687 def 18 678 def 18
688 def 19 679 def 19
@@ -1203,7 +1194,7 @@ nadtlb_miss_20w:
1203 get_pgd spc,ptp 1194 get_pgd spc,ptp
1204 space_check spc,t0,nadtlb_fault 1195 space_check spc,t0,nadtlb_fault
1205 1196
1206 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w 1197 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1207 1198
1208 update_ptep ptp,pte,t0,t1 1199 update_ptep ptp,pte,t0,t1
1209 1200
@@ -1214,6 +1205,14 @@ nadtlb_miss_20w:
1214 rfir 1205 rfir
1215 nop 1206 nop
1216 1207
1208nadtlb_check_alias_20w:
1209 do_alias spc,t0,t1,va,pte,prot,nadtlb_check_flush_20w
1210
1211 idtlbt pte,prot
1212
1213 rfir
1214 nop
1215
1217nadtlb_check_flush_20w: 1216nadtlb_check_flush_20w:
1218 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1217 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1219 1218
@@ -1255,25 +1254,7 @@ dtlb_miss_11:
1255 nop 1254 nop
1256 1255
1257dtlb_check_alias_11: 1256dtlb_check_alias_11:
1258 1257 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1259 /* Check to see if fault is in the temporary alias region */
1260
1261 cmpib,<>,n 0,spc,dtlb_fault /* forward */
1262 ldil L%(TMPALIAS_MAP_START),t0
1263 copy va,t1
1264 depwi 0,31,23,t1
1265 cmpb,<>,n t0,t1,dtlb_fault /* forward */
1266 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1267 depw,z prot,8,7,prot
1268
1269 /*
1270 * OK, it is in the temp alias region, check whether "from" or "to".
1271 * Check "subtle" note in pacache.S re: r23/r26.
1272 */
1273
1274 extrw,u,= va,9,1,r0
1275 or,tr %r23,%r0,pte /* If "from" use "from" page */
1276 or %r26,%r0,pte /* else "to", use "to" page */
1277 1258
1278 idtlba pte,(va) 1259 idtlba pte,(va)
1279 idtlbp prot,(va) 1260 idtlbp prot,(va)
@@ -1286,7 +1267,7 @@ nadtlb_miss_11:
1286 1267
1287 space_check spc,t0,nadtlb_fault 1268 space_check spc,t0,nadtlb_fault
1288 1269
1289 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11 1270 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1290 1271
1291 update_ptep ptp,pte,t0,t1 1272 update_ptep ptp,pte,t0,t1
1292 1273
@@ -1304,6 +1285,15 @@ nadtlb_miss_11:
1304 rfir 1285 rfir
1305 nop 1286 nop
1306 1287
1288nadtlb_check_alias_11:
1289 do_alias spc,t0,t1,va,pte,prot,nadtlb_check_flush_11
1290
1291 idtlba pte,(va)
1292 idtlbp prot,(va)
1293
1294 rfir
1295 nop
1296
1307nadtlb_check_flush_11: 1297nadtlb_check_flush_11:
1308 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1298 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1309 1299
@@ -1359,7 +1349,7 @@ nadtlb_miss_20:
1359 1349
1360 space_check spc,t0,nadtlb_fault 1350 space_check spc,t0,nadtlb_fault
1361 1351
1362 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20 1352 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1363 1353
1364 update_ptep ptp,pte,t0,t1 1354 update_ptep ptp,pte,t0,t1
1365 1355
@@ -1372,6 +1362,14 @@ nadtlb_miss_20:
1372 rfir 1362 rfir
1373 nop 1363 nop
1374 1364
1365nadtlb_check_alias_20:
1366 do_alias spc,t0,t1,va,pte,prot,nadtlb_check_flush_20
1367
1368 idtlbt pte,prot
1369
1370 rfir
1371 nop
1372
1375nadtlb_check_flush_20: 1373nadtlb_check_flush_20:
1376 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1374 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1377 1375
@@ -1484,6 +1482,36 @@ itlb_miss_20w:
1484 rfir 1482 rfir
1485 nop 1483 nop
1486 1484
1485naitlb_miss_20w:
1486
1487 /*
1488 * I miss is a little different, since we allow users to fault
1489 * on the gateway page which is in the kernel address space.
1490 */
1491
1492 space_adjust spc,va,t0
1493 get_pgd spc,ptp
1494 space_check spc,t0,naitlb_fault
1495
1496 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1497
1498 update_ptep ptp,pte,t0,t1
1499
1500 make_insert_tlb spc,pte,prot
1501
1502 iitlbt pte,prot
1503
1504 rfir
1505 nop
1506
1507naitlb_check_alias_20w:
1508 do_alias spc,t0,t1,va,pte,prot,naitlb_fault
1509
1510 iitlbt pte,prot
1511
1512 rfir
1513 nop
1514
1487#else 1515#else
1488 1516
1489itlb_miss_11: 1517itlb_miss_11:
@@ -1508,6 +1536,38 @@ itlb_miss_11:
1508 rfir 1536 rfir
1509 nop 1537 nop
1510 1538
1539naitlb_miss_11:
1540 get_pgd spc,ptp
1541
1542 space_check spc,t0,naitlb_fault
1543
1544 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1545
1546 update_ptep ptp,pte,t0,t1
1547
1548 make_insert_tlb_11 spc,pte,prot
1549
1550 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1551 mtsp spc,%sr1
1552
1553 iitlba pte,(%sr1,va)
1554 iitlbp prot,(%sr1,va)
1555
1556 mtsp t0, %sr1 /* Restore sr1 */
1557
1558 rfir
1559 nop
1560
1561naitlb_check_alias_11:
1562 do_alias spc,t0,t1,va,pte,prot,itlb_fault
1563
1564 iitlba pte,(%sr0, va)
1565 iitlbp prot,(%sr0, va)
1566
1567 rfir
1568 nop
1569
1570
1511itlb_miss_20: 1571itlb_miss_20:
1512 get_pgd spc,ptp 1572 get_pgd spc,ptp
1513 1573
@@ -1526,6 +1586,32 @@ itlb_miss_20:
1526 rfir 1586 rfir
1527 nop 1587 nop
1528 1588
1589naitlb_miss_20:
1590 get_pgd spc,ptp
1591
1592 space_check spc,t0,naitlb_fault
1593
1594 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1595
1596 update_ptep ptp,pte,t0,t1
1597
1598 make_insert_tlb spc,pte,prot
1599
1600 f_extend pte,t0
1601
1602 iitlbt pte,prot
1603
1604 rfir
1605 nop
1606
1607naitlb_check_alias_20:
1608 do_alias spc,t0,t1,va,pte,prot,naitlb_fault
1609
1610 iitlbt pte,prot
1611
1612 rfir
1613 nop
1614
1529#endif 1615#endif
1530 1616
1531#ifdef CONFIG_64BIT 1617#ifdef CONFIG_64BIT
@@ -1662,6 +1748,10 @@ nadtlb_fault:
1662 b intr_save 1748 b intr_save
1663 ldi 17,%r8 1749 ldi 17,%r8
1664 1750
1751naitlb_fault:
1752 b intr_save
1753 ldi 16,%r8
1754
1665dtlb_fault: 1755dtlb_fault:
1666 b intr_save 1756 b intr_save
1667 ldi 15,%r8 1757 ldi 15,%r8
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 09b77b2553c6..a85823668cba 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -608,93 +608,131 @@ ENTRY(__clear_user_page_asm)
608 .procend 608 .procend
609ENDPROC(__clear_user_page_asm) 609ENDPROC(__clear_user_page_asm)
610 610
611ENTRY(flush_kernel_dcache_page_asm) 611ENTRY(flush_dcache_page_asm)
612 .proc 612 .proc
613 .callinfo NO_CALLS 613 .callinfo NO_CALLS
614 .entry 614 .entry
615 615
616 ldil L%(TMPALIAS_MAP_START), %r28
617#ifdef CONFIG_64BIT
618#if (TMPALIAS_MAP_START >= 0x80000000)
619 depdi 0, 31,32, %r28 /* clear any sign extension */
620 /* FIXME: page size dependend */
621#endif
622 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
623 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
624 depdi 0, 63,12, %r28 /* Clear any offset bits */
625#else
626 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
627 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
628 depwi 0, 31,12, %r28 /* Clear any offset bits */
629#endif
630
631 /* Purge any old translation */
632
633 pdtlb 0(%r28)
634
616 ldil L%dcache_stride, %r1 635 ldil L%dcache_stride, %r1
617 ldw R%dcache_stride(%r1), %r23 636 ldw R%dcache_stride(%r1), %r1
618 637
619#ifdef CONFIG_64BIT 638#ifdef CONFIG_64BIT
620 depdi,z 1, 63-PAGE_SHIFT,1, %r25 639 depdi,z 1, 63-PAGE_SHIFT,1, %r25
621#else 640#else
622 depwi,z 1, 31-PAGE_SHIFT,1, %r25 641 depwi,z 1, 31-PAGE_SHIFT,1, %r25
623#endif 642#endif
624 add %r26, %r25, %r25 643 add %r28, %r25, %r25
625 sub %r25, %r23, %r25 644 sub %r25, %r1, %r25
626 645
627 646
6281: fdc,m %r23(%r26) 6471: fdc,m %r1(%r28)
629 fdc,m %r23(%r26) 648 fdc,m %r1(%r28)
630 fdc,m %r23(%r26) 649 fdc,m %r1(%r28)
631 fdc,m %r23(%r26) 650 fdc,m %r1(%r28)
632 fdc,m %r23(%r26) 651 fdc,m %r1(%r28)
633 fdc,m %r23(%r26) 652 fdc,m %r1(%r28)
634 fdc,m %r23(%r26) 653 fdc,m %r1(%r28)
635 fdc,m %r23(%r26) 654 fdc,m %r1(%r28)
636 fdc,m %r23(%r26) 655 fdc,m %r1(%r28)
637 fdc,m %r23(%r26) 656 fdc,m %r1(%r28)
638 fdc,m %r23(%r26) 657 fdc,m %r1(%r28)
639 fdc,m %r23(%r26) 658 fdc,m %r1(%r28)
640 fdc,m %r23(%r26) 659 fdc,m %r1(%r28)
641 fdc,m %r23(%r26) 660 fdc,m %r1(%r28)
642 fdc,m %r23(%r26) 661 fdc,m %r1(%r28)
643 cmpb,COND(<<) %r26, %r25,1b 662 cmpb,COND(<<) %r28, %r25,1b
644 fdc,m %r23(%r26) 663 fdc,m %r1(%r28)
645 664
646 sync 665 sync
647 bv %r0(%r2) 666 bv %r0(%r2)
648 nop 667 pdtlb (%r25)
649 .exit 668 .exit
650 669
651 .procend 670 .procend
652ENDPROC(flush_kernel_dcache_page_asm) 671ENDPROC(flush_dcache_page_asm)
653 672
654ENTRY(flush_user_dcache_page) 673ENTRY(flush_icache_page_asm)
655 .proc 674 .proc
656 .callinfo NO_CALLS 675 .callinfo NO_CALLS
657 .entry 676 .entry
658 677
659 ldil L%dcache_stride, %r1 678 ldil L%(TMPALIAS_MAP_START), %r28
660 ldw R%dcache_stride(%r1), %r23
661
662#ifdef CONFIG_64BIT 679#ifdef CONFIG_64BIT
663 depdi,z 1,63-PAGE_SHIFT,1, %r25 680#if (TMPALIAS_MAP_START >= 0x80000000)
681 depdi 0, 31,32, %r28 /* clear any sign extension */
682 /* FIXME: page size dependend */
683#endif
684 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
685 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
686 depdi 0, 63,12, %r28 /* Clear any offset bits */
664#else 687#else
665 depwi,z 1,31-PAGE_SHIFT,1, %r25 688 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
689 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
690 depwi 0, 31,12, %r28 /* Clear any offset bits */
666#endif 691#endif
667 add %r26, %r25, %r25
668 sub %r25, %r23, %r25
669 692
693 /* Purge any old translation */
670 694
6711: fdc,m %r23(%sr3, %r26) 695 pitlb (%sr0,%r28)
672 fdc,m %r23(%sr3, %r26) 696
673 fdc,m %r23(%sr3, %r26) 697 ldil L%icache_stride, %r1
674 fdc,m %r23(%sr3, %r26) 698 ldw R%icache_stride(%r1), %r1
675 fdc,m %r23(%sr3, %r26) 699
676 fdc,m %r23(%sr3, %r26) 700#ifdef CONFIG_64BIT
677 fdc,m %r23(%sr3, %r26) 701 depdi,z 1, 63-PAGE_SHIFT,1, %r25
678 fdc,m %r23(%sr3, %r26) 702#else
679 fdc,m %r23(%sr3, %r26) 703 depwi,z 1, 31-PAGE_SHIFT,1, %r25
680 fdc,m %r23(%sr3, %r26) 704#endif
681 fdc,m %r23(%sr3, %r26) 705 add %r28, %r25, %r25
682 fdc,m %r23(%sr3, %r26) 706 sub %r25, %r1, %r25
683 fdc,m %r23(%sr3, %r26) 707
684 fdc,m %r23(%sr3, %r26) 708
685 fdc,m %r23(%sr3, %r26) 7091: fic,m %r1(%r28)
686 cmpb,COND(<<) %r26, %r25,1b 710 fic,m %r1(%r28)
687 fdc,m %r23(%sr3, %r26) 711 fic,m %r1(%r28)
712 fic,m %r1(%r28)
713 fic,m %r1(%r28)
714 fic,m %r1(%r28)
715 fic,m %r1(%r28)
716 fic,m %r1(%r28)
717 fic,m %r1(%r28)
718 fic,m %r1(%r28)
719 fic,m %r1(%r28)
720 fic,m %r1(%r28)
721 fic,m %r1(%r28)
722 fic,m %r1(%r28)
723 fic,m %r1(%r28)
724 cmpb,COND(<<) %r28, %r25,1b
725 fic,m %r1(%r28)
688 726
689 sync 727 sync
690 bv %r0(%r2) 728 bv %r0(%r2)
691 nop 729 pitlb (%sr0,%r25)
692 .exit 730 .exit
693 731
694 .procend 732 .procend
695ENDPROC(flush_user_dcache_page) 733ENDPROC(flush_icache_page_asm)
696 734
697ENTRY(flush_user_icache_page) 735ENTRY(flush_kernel_dcache_page_asm)
698 .proc 736 .proc
699 .callinfo NO_CALLS 737 .callinfo NO_CALLS
700 .entry 738 .entry
@@ -711,23 +749,23 @@ ENTRY(flush_user_icache_page)
711 sub %r25, %r23, %r25 749 sub %r25, %r23, %r25
712 750
713 751
7141: fic,m %r23(%sr3, %r26) 7521: fdc,m %r23(%r26)
715 fic,m %r23(%sr3, %r26) 753 fdc,m %r23(%r26)
716 fic,m %r23(%sr3, %r26) 754 fdc,m %r23(%r26)
717 fic,m %r23(%sr3, %r26) 755 fdc,m %r23(%r26)
718 fic,m %r23(%sr3, %r26) 756 fdc,m %r23(%r26)
719 fic,m %r23(%sr3, %r26) 757 fdc,m %r23(%r26)
720 fic,m %r23(%sr3, %r26) 758 fdc,m %r23(%r26)
721 fic,m %r23(%sr3, %r26) 759 fdc,m %r23(%r26)
722 fic,m %r23(%sr3, %r26) 760 fdc,m %r23(%r26)
723 fic,m %r23(%sr3, %r26) 761 fdc,m %r23(%r26)
724 fic,m %r23(%sr3, %r26) 762 fdc,m %r23(%r26)
725 fic,m %r23(%sr3, %r26) 763 fdc,m %r23(%r26)
726 fic,m %r23(%sr3, %r26) 764 fdc,m %r23(%r26)
727 fic,m %r23(%sr3, %r26) 765 fdc,m %r23(%r26)
728 fic,m %r23(%sr3, %r26) 766 fdc,m %r23(%r26)
729 cmpb,COND(<<) %r26, %r25,1b 767 cmpb,COND(<<) %r26, %r25,1b
730 fic,m %r23(%sr3, %r26) 768 fdc,m %r23(%r26)
731 769
732 sync 770 sync
733 bv %r0(%r2) 771 bv %r0(%r2)
@@ -735,8 +773,7 @@ ENTRY(flush_user_icache_page)
735 .exit 773 .exit
736 774
737 .procend 775 .procend
738ENDPROC(flush_user_icache_page) 776ENDPROC(flush_kernel_dcache_page_asm)
739
740 777
741ENTRY(purge_kernel_dcache_page) 778ENTRY(purge_kernel_dcache_page)
742 .proc 779 .proc
@@ -780,69 +817,6 @@ ENTRY(purge_kernel_dcache_page)
780 .procend 817 .procend
781ENDPROC(purge_kernel_dcache_page) 818ENDPROC(purge_kernel_dcache_page)
782 819
783#if 0
784 /* Currently not used, but it still is a possible alternate
785 * solution.
786 */
787
788ENTRY(flush_alias_page)
789 .proc
790 .callinfo NO_CALLS
791 .entry
792
793 tophys_r1 %r26
794
795 ldil L%(TMPALIAS_MAP_START), %r28
796#ifdef CONFIG_64BIT
797 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
798 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
799 depdi 0, 63,12, %r28 /* Clear any offset bits */
800#else
801 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
802 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
803 depwi 0, 31,12, %r28 /* Clear any offset bits */
804#endif
805
806 /* Purge any old translation */
807
808 pdtlb 0(%r28)
809
810 ldil L%dcache_stride, %r1
811 ldw R%dcache_stride(%r1), %r23
812
813#ifdef CONFIG_64BIT
814 depdi,z 1, 63-PAGE_SHIFT,1, %r29
815#else
816 depwi,z 1, 31-PAGE_SHIFT,1, %r29
817#endif
818 add %r28, %r29, %r29
819 sub %r29, %r23, %r29
820
8211: fdc,m %r23(%r28)
822 fdc,m %r23(%r28)
823 fdc,m %r23(%r28)
824 fdc,m %r23(%r28)
825 fdc,m %r23(%r28)
826 fdc,m %r23(%r28)
827 fdc,m %r23(%r28)
828 fdc,m %r23(%r28)
829 fdc,m %r23(%r28)
830 fdc,m %r23(%r28)
831 fdc,m %r23(%r28)
832 fdc,m %r23(%r28)
833 fdc,m %r23(%r28)
834 fdc,m %r23(%r28)
835 fdc,m %r23(%r28)
836 cmpb,COND(<<) %r28, %r29, 1b
837 fdc,m %r23(%r28)
838
839 sync
840 bv %r0(%r2)
841 nop
842 .exit
843
844 .procend
845#endif
846 820
847 .export flush_user_dcache_range_asm 821 .export flush_user_dcache_range_asm
848 822
@@ -865,7 +839,6 @@ flush_user_dcache_range_asm:
865 .exit 839 .exit
866 840
867 .procend 841 .procend
868ENDPROC(flush_alias_page)
869 842
870ENTRY(flush_kernel_dcache_range_asm) 843ENTRY(flush_kernel_dcache_range_asm)
871 .proc 844 .proc