aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/tsb.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 21:29:18 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:13 -0500
commit74bf4312fff083ab25c3f357cc653ada7995e5f6 (patch)
treec23dea461e32485f4cd7ca4b8c33c632655eb906 /arch/sparc64/mm/tsb.c
parent30d4d1ffed7098afe2641536d67eef150499da02 (diff)
[SPARC64]: Move away from virtual page tables, part 1.
We now use the TSB hardware assist features of the UltraSPARC MMUs. SMP is currently knowingly broken, we need to find another place to store the per-cpu base pointers. We hid them away in the TSB base register, and that obviously will not work any more :-) Another known broken case is non-8KB base page size. Also noticed that flush_tlb_all() is not referenced anywhere, only the internal __flush_tlb_all() (local cpu only) is used by the sparc64 port, so we can get rid of flush_tlb_all(). The kernel gets it's own 8KB TSB (swapper_tsb) and each address space gets it's own private 8K TSB. Later we can add code to dynamically increase the size of per-process TSB as the RSS grows. An 8KB TSB is good enough for up to about a 4MB RSS, after which the TSB starts to incur many capacity and conflict misses. We even accumulate OBP translations into the kernel TSB. Another area for refinement is large page size support. We could use a secondary address space TSB to handle those. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm/tsb.c')
-rw-r--r--arch/sparc64/mm/tsb.c84
1 files changed, 84 insertions, 0 deletions
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
new file mode 100644
index 000000000000..15e8af58b1d2
--- /dev/null
+++ b/arch/sparc64/mm/tsb.c
@@ -0,0 +1,84 @@
1/* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <asm/system.h>
8#include <asm/page.h>
9#include <asm/tlbflush.h>
10#include <asm/tlb.h>
11
12#define TSB_ENTRY_ALIGNMENT 16
13
14struct tsb {
15 unsigned long tag;
16 unsigned long pte;
17} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
18
19/* We use an 8K TSB for the whole kernel, this allows to
20 * handle about 4MB of modules and vmalloc mappings without
21 * incurring many hash conflicts.
22 */
23#define KERNEL_TSB_SIZE_BYTES 8192
24#define KERNEL_TSB_NENTRIES \
25 (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb))
26
27extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
28
29static inline unsigned long tsb_hash(unsigned long vaddr)
30{
31 vaddr >>= PAGE_SHIFT;
32 return vaddr & (KERNEL_TSB_NENTRIES - 1);
33}
34
35static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context)
36{
37 if (context == ~0UL)
38 return 1;
39
40 return (entry->tag == ((vaddr >> 22) | (context << 48)));
41}
42
43/* TSB flushes need only occur on the processor initiating the address
44 * space modification, not on each cpu the address space has run on.
45 * Only the TLB flush needs that treatment.
46 */
47
48void flush_tsb_kernel_range(unsigned long start, unsigned long end)
49{
50 unsigned long v;
51
52 for (v = start; v < end; v += PAGE_SIZE) {
53 struct tsb *ent = &swapper_tsb[tsb_hash(v)];
54
55 if (tag_compare(ent, v, 0)) {
56 ent->tag = 0UL;
57 membar_storeload_storestore();
58 }
59 }
60}
61
62void flush_tsb_user(struct mmu_gather *mp)
63{
64 struct mm_struct *mm = mp->mm;
65 struct tsb *tsb = (struct tsb *) mm->context.sparc64_tsb;
66 unsigned long ctx = ~0UL;
67 int i;
68
69 if (CTX_VALID(mm->context))
70 ctx = CTX_HWBITS(mm->context);
71
72 for (i = 0; i < mp->tlb_nr; i++) {
73 unsigned long v = mp->vaddrs[i];
74 struct tsb *ent;
75
76 v &= ~0x1UL;
77
78 ent = &tsb[tsb_hash(v)];
79 if (tag_compare(ent, v, ctx)) {
80 ent->tag = 0UL;
81 membar_storeload_storestore();
82 }
83 }
84}