aboutsummaryrefslogtreecommitdiffstats
path: root/arch/cris/arch-v32/mm
diff options
context:
space:
mode:
authorMikael Starvik <mikael.starvik@axis.com>2005-07-27 14:44:44 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:26:01 -0400
commit51533b615e605d86154ec1b4e585c8ca1b0b15b7 (patch)
tree4a6d7d8494d2017632d83624fb71b36031e0e7e5 /arch/cris/arch-v32/mm
parent5d01e6ce785884a5db5792cd2e5bb36fa82fe23c (diff)
[PATCH] CRIS update: new subarchitecture v32
New CRIS sub architecture named v32. From: Dave Jones <davej@redhat.com> Fix swapped kmalloc args Signed-off-by: Mikael Starvik <starvik@axis.com> Signed-off-by: Dave Jones <davej@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/cris/arch-v32/mm')
-rw-r--r--arch/cris/arch-v32/mm/Makefile3
-rw-r--r--arch/cris/arch-v32/mm/init.c174
-rw-r--r--arch/cris/arch-v32/mm/intmem.c139
-rw-r--r--arch/cris/arch-v32/mm/mmu.S141
-rw-r--r--arch/cris/arch-v32/mm/tlb.c208
5 files changed, 665 insertions, 0 deletions
diff --git a/arch/cris/arch-v32/mm/Makefile b/arch/cris/arch-v32/mm/Makefile
new file mode 100644
index 000000000000..9146f88484b1
--- /dev/null
+++ b/arch/cris/arch-v32/mm/Makefile
@@ -0,0 +1,3 @@
1# Makefile for the Linux/cris parts of the memory manager.
2
3obj-y := mmu.o init.o tlb.o intmem.o
diff --git a/arch/cris/arch-v32/mm/init.c b/arch/cris/arch-v32/mm/init.c
new file mode 100644
index 000000000000..f2fba27d822c
--- /dev/null
+++ b/arch/cris/arch-v32/mm/init.c
@@ -0,0 +1,174 @@
1/*
2 * Set up paging and the MMU.
3 *
4 * Copyright (C) 2000-2003, Axis Communications AB.
5 *
6 * Authors: Bjorn Wesen <bjornw@axis.com>
7 * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
8 */
9#include <linux/config.h>
10#include <linux/mmzone.h>
11#include <linux/init.h>
12#include <linux/bootmem.h>
13#include <linux/mm.h>
14#include <linux/config.h>
15#include <asm/pgtable.h>
16#include <asm/page.h>
17#include <asm/types.h>
18#include <asm/mmu.h>
19#include <asm/io.h>
20#include <asm/mmu_context.h>
21#include <asm/arch/hwregs/asm/mmu_defs_asm.h>
22#include <asm/arch/hwregs/supp_reg.h>
23
24extern void tlb_init(void);
25
26/*
27 * The kernel is already mapped with linear mapping at kseg_c so there's no
28 * need to map it with a page table. However, head.S also temporarily mapped it
29 * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
30 * other paging stuff.
31 */
32void __init
33cris_mmu_init(void)
34{
35 unsigned long mmu_config;
36 unsigned long mmu_kbase_hi;
37 unsigned long mmu_kbase_lo;
38 unsigned short mmu_page_id;
39
40 /*
41 * Make sure the current pgd table points to something sane, even if it
42 * is most probably not used until the next switch_mm.
43 */
44 per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
45
46#ifdef CONFIG_SMP
47 {
48 pgd_t **pgd;
49 pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
50 SUPP_BANK_SEL(1);
51 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
52 SUPP_BANK_SEL(2);
53 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
54 }
55#endif
56
57 /* Initialise the TLB. Function found in tlb.c. */
58 tlb_init();
59
60 /* Enable exceptions and initialize the kernel segments. */
61 mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) |
62 REG_STATE(mmu, rw_mm_cfg, acc, on) |
63 REG_STATE(mmu, rw_mm_cfg, ex, on) |
64 REG_STATE(mmu, rw_mm_cfg, inv, on) |
65 REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
66 REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
67 REG_STATE(mmu, rw_mm_cfg, seg_d, page) |
68 REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
69 REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
70#ifndef CONFIG_ETRAXFS_SIM
71 REG_STATE(mmu, rw_mm_cfg, seg_a, page) |
72#else
73 REG_STATE(mmu, rw_mm_cfg, seg_a, linear) |
74#endif
75 REG_STATE(mmu, rw_mm_cfg, seg_9, page) |
76 REG_STATE(mmu, rw_mm_cfg, seg_8, page) |
77 REG_STATE(mmu, rw_mm_cfg, seg_7, page) |
78 REG_STATE(mmu, rw_mm_cfg, seg_6, page) |
79 REG_STATE(mmu, rw_mm_cfg, seg_5, page) |
80 REG_STATE(mmu, rw_mm_cfg, seg_4, page) |
81 REG_STATE(mmu, rw_mm_cfg, seg_3, page) |
82 REG_STATE(mmu, rw_mm_cfg, seg_2, page) |
83 REG_STATE(mmu, rw_mm_cfg, seg_1, page) |
84 REG_STATE(mmu, rw_mm_cfg, seg_0, page));
85
86 mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
87 REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
88 REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
89#ifndef CONFIG_ETRAXFS_SIM
90 REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
91#else
92 REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x0) |
93#endif
94 REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
95#ifndef CONFIG_ETRAXFS_SIM
96 REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
97#else
98 REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) |
99#endif
100 REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
101 REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
102
103 mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
104 REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
105 REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
106 REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
107 REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
108 REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
109 REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
110 REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));
111
112 mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);
113
114 /* Update the instruction MMU. */
115 SUPP_BANK_SEL(BANK_IM);
116 SUPP_REG_WR(RW_MM_CFG, mmu_config);
117 SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
118 SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
119 SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
120
121 /* Update the data MMU. */
122 SUPP_BANK_SEL(BANK_DM);
123 SUPP_REG_WR(RW_MM_CFG, mmu_config);
124 SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
125 SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
126 SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
127
128 SPEC_REG_WR(SPEC_REG_PID, 0);
129
130 /*
131 * The MMU has been enabled ever since head.S but just to make it
132 * totally obvious enable it here as well.
133 */
134 SUPP_BANK_SEL(BANK_GC);
135 SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */
136}
137
138void __init
139paging_init(void)
140{
141 int i;
142 unsigned long zones_size[MAX_NR_ZONES];
143
144 printk("Setting up paging and the MMU.\n");
145
146 /* Clear out the init_mm.pgd that will contain the kernel's mappings. */
147 for(i = 0; i < PTRS_PER_PGD; i++)
148 swapper_pg_dir[i] = __pgd(0);
149
150 cris_mmu_init();
151
152 /*
153 * Initialize the bad page table and bad page to point to a couple of
154 * allocated pages.
155 */
156 empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
157 memset((void *) empty_zero_page, 0, PAGE_SIZE);
158
159 /* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
160 zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
161
162 for (i = 1; i < MAX_NR_ZONES; i++)
163 zones_size[i] = 0;
164
165 /*
166 * Use free_area_init_node instead of free_area_init, because it is
167 * designed for systems where the DRAM starts at an address
168 * substantially higher than 0, like us (we start at PAGE_OFFSET). This
169 * saves space in the mem_map page array.
170 */
171 free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
172
173 mem_map = contig_page_data.node_mem_map;
174}
diff --git a/arch/cris/arch-v32/mm/intmem.c b/arch/cris/arch-v32/mm/intmem.c
new file mode 100644
index 000000000000..41ee7f7997fd
--- /dev/null
+++ b/arch/cris/arch-v32/mm/intmem.c
@@ -0,0 +1,139 @@
1/*
2 * Simple allocator for internal RAM in ETRAX FS
3 *
4 * Copyright (c) 2004 Axis Communications AB.
5 */
6
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <asm/io.h>
10#include <asm/arch/memmap.h>
11
12#define STATUS_FREE 0
13#define STATUS_ALLOCATED 1
14
15struct intmem_allocation {
16 struct list_head entry;
17 unsigned int size;
18 unsigned offset;
19 char status;
20};
21
22
23static struct list_head intmem_allocations;
24static void* intmem_virtual;
25
26static void crisv32_intmem_init(void)
27{
28 static int initiated = 0;
29 if (!initiated) {
30 struct intmem_allocation* alloc =
31 (struct intmem_allocation*)kmalloc(sizeof *alloc, GFP_KERNEL);
32 INIT_LIST_HEAD(&intmem_allocations);
33 intmem_virtual = ioremap(MEM_INTMEM_START, MEM_INTMEM_SIZE);
34 initiated = 1;
35 alloc->size = MEM_INTMEM_SIZE;
36 alloc->offset = 0;
37 alloc->status = STATUS_FREE;
38 list_add_tail(&alloc->entry, &intmem_allocations);
39 }
40}
41
42void* crisv32_intmem_alloc(unsigned size, unsigned align)
43{
44 struct intmem_allocation* allocation;
45 struct intmem_allocation* tmp;
46 void* ret = NULL;
47
48 preempt_disable();
49 crisv32_intmem_init();
50
51 list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) {
52 int alignment = allocation->offset % align;
53 alignment = alignment ? align - alignment : alignment;
54
55 if (allocation->status == STATUS_FREE &&
56 allocation->size >= size + alignment) {
57 if (allocation->size > size + alignment) {
58 struct intmem_allocation* alloc =
59 (struct intmem_allocation*)
60 kmalloc(sizeof *alloc, GFP_ATOMIC);
61 alloc->status = STATUS_FREE;
62 alloc->size = allocation->size - size - alignment;
63 alloc->offset = allocation->offset + size;
64 list_add(&alloc->entry, &allocation->entry);
65
66 if (alignment) {
67 struct intmem_allocation* tmp;
68 tmp = (struct intmem_allocation*)
69 kmalloc(sizeof *tmp, GFP_ATOMIC);
70 tmp->offset = allocation->offset;
71 tmp->size = alignment;
72 tmp->status = STATUS_FREE;
73 allocation->offset += alignment;
74 list_add_tail(&tmp->entry, &allocation->entry);
75 }
76 }
77 allocation->status = STATUS_ALLOCATED;
78 allocation->size = size;
79 ret = (void*)((int)intmem_virtual + allocation->offset);
80 }
81 }
82 preempt_enable();
83 return ret;
84}
85
86void crisv32_intmem_free(void* addr)
87{
88 struct intmem_allocation* allocation;
89 struct intmem_allocation* tmp;
90
91 if (addr == NULL)
92 return;
93
94 preempt_disable();
95 crisv32_intmem_init();
96
97 list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) {
98 if (allocation->offset == (int)(addr - intmem_virtual)) {
99 struct intmem_allocation* prev =
100 list_entry(allocation->entry.prev,
101 struct intmem_allocation, entry);
102 struct intmem_allocation* next =
103 list_entry(allocation->entry.next,
104 struct intmem_allocation, entry);
105
106 allocation->status = STATUS_FREE;
107 /* Join with prev and/or next if also free */
108 if (prev->status == STATUS_FREE) {
109 prev->size += allocation->size;
110 list_del(&allocation->entry);
111 kfree(allocation);
112 allocation = prev;
113 }
114 if (next->status == STATUS_FREE) {
115 allocation->size += next->size;
116 list_del(&next->entry);
117 kfree(next);
118 }
119 preempt_enable();
120 return;
121 }
122 }
123 preempt_enable();
124}
125
126void* crisv32_intmem_phys_to_virt(unsigned long addr)
127{
128 return (void*)(addr - MEM_INTMEM_START+
129 (unsigned long)intmem_virtual);
130}
131
132unsigned long crisv32_intmem_virt_to_phys(void* addr)
133{
134 return (unsigned long)((unsigned long )addr -
135 (unsigned long)intmem_virtual + MEM_INTMEM_START);
136}
137
138
139
diff --git a/arch/cris/arch-v32/mm/mmu.S b/arch/cris/arch-v32/mm/mmu.S
new file mode 100644
index 000000000000..27b70e5006af
--- /dev/null
+++ b/arch/cris/arch-v32/mm/mmu.S
@@ -0,0 +1,141 @@
1/*
2 * Copyright (C) 2003 Axis Communications AB
3 *
4 * Authors: Mikael Starvik (starvik@axis.com)
5 *
6 * Code for the fault low-level handling routines.
7 *
8 */
9
10#include <asm/page.h>
11#include <asm/pgtable.h>
12
13; Save all register. Must save in same order as struct pt_regs.
14.macro SAVE_ALL
15 subq 12, $sp
16 move $erp, [$sp]
17 subq 4, $sp
18 move $srp, [$sp]
19 subq 4, $sp
20 move $ccs, [$sp]
21 subq 4, $sp
22 move $spc, [$sp]
23 subq 4, $sp
24 move $mof, [$sp]
25 subq 4, $sp
26 move $srs, [$sp]
27 subq 4, $sp
28 move.d $acr, [$sp]
29 subq 14*4, $sp
30 movem $r13, [$sp]
31 subq 4, $sp
32 move.d $r10, [$sp]
33.endm
34
35; Bus fault handler. Extracts relevant information and calls mm subsystem
36; to handle the fault.
37.macro MMU_BUS_FAULT_HANDLER handler, mmu, we, ex
38 .globl \handler
39\handler:
40 SAVE_ALL
41 move \mmu, $srs ; Select MMU support register bank
42 move.d $sp, $r11 ; regs
43 moveq 1, $r12 ; protection fault
44 moveq \we, $r13 ; write exception?
45 orq \ex << 1, $r13 ; execute?
46 move $s3, $r10 ; rw_mm_cause
47 and.d ~8191, $r10 ; Get faulting page start address
48
49 jsr do_page_fault
50 nop
51 ba ret_from_intr
52 nop
53.endm
54
55; Refill handler. Three cases may occur:
56; 1. PMD and PTE exists in mm subsystem but not in TLB
57; 2. PMD exists but not PTE
58; 3. PMD doesn't exist
59; The code below handles case 1 and calls the mm subsystem for case 2 and 3.
60; Do not touch this code without very good reasons and extensive testing.
61; Note that the code is optimized to minimize stalls (makes the code harder
62; to read).
63;
64; Each page is 8 KB. Each PMD holds 8192/4 PTEs (each PTE is 4 bytes) so each
65; PMD holds 16 MB of virtual memory.
66; Bits 0-12 : Offset within a page
67; Bits 13-23 : PTE offset within a PMD
68; Bits 24-31 : PMD offset within the PGD
69
70.macro MMU_REFILL_HANDLER handler, mmu
71 .globl \handler
72\handler:
73 subq 4, $sp
74; (The pipeline stalls for one cycle; $sp used as address in the next cycle.)
75 move $srs, [$sp]
76 subq 4, $sp
77 move \mmu, $srs ; Select MMU support register bank
78 move.d $acr, [$sp]
79 subq 4, $sp
80 move.d $r0, [$sp]
81#ifdef CONFIG_SMP
82 move $s7, $acr ; PGD
83#else
84 move.d per_cpu__current_pgd, $acr ; PGD
85#endif
86 ; Look up PMD in PGD
87 move $s3, $r0 ; rw_mm_cause
88 lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
89 move.d [$acr], $acr ; PGD for the current process
90 addi $r0.d, $acr, $acr
91 move $s3, $r0 ; rw_mm_cause
92 move.d [$acr], $acr ; Get PMD
93 beq 1f
94 ; Look up PTE in PMD
95 lsrq PAGE_SHIFT, $r0
96 and.w PAGE_MASK, $acr ; Remove PMD flags
97 and.d 0x7ff, $r0 ; Get PTE index into PMD (bit 13-23)
98 addi $r0.d, $acr, $acr
99 move.d [$acr], $acr ; Get PTE
100 beq 2f
101 move.d [$sp+], $r0 ; Pop r0 in delayslot
102 ; Store in TLB
103 move $acr, $s5
104 ; Return
105 move.d [$sp+], $acr
106 move [$sp], $srs
107 addq 4, $sp
108 rete
109 rfe
1101: ; PMD missing, let the mm subsystem fix it up.
111 move.d [$sp+], $r0 ; Pop r0
1122: ; PTE missing, let the mm subsystem fix it up.
113 move.d [$sp+], $acr
114 move [$sp], $srs
115 addq 4, $sp
116 SAVE_ALL
117 move \mmu, $srs
118 move.d $sp, $r11 ; regs
119 clear.d $r12 ; Not a protection fault
120 move.w PAGE_MASK, $acr
121 move $s3, $r10 ; rw_mm_cause
122 btstq 9, $r10 ; Check if write access
123 smi $r13
124 and.w PAGE_MASK, $r10 ; Get VPN (virtual address)
125 jsr do_page_fault
126 and.w $acr, $r10
127 ; Return
128 ba ret_from_intr
129 nop
130.endm
131
132 ; This is the MMU bus fault handlers.
133
134MMU_REFILL_HANDLER i_mmu_refill, 1
135MMU_BUS_FAULT_HANDLER i_mmu_invalid, 1, 0, 0
136MMU_BUS_FAULT_HANDLER i_mmu_access, 1, 0, 0
137MMU_BUS_FAULT_HANDLER i_mmu_execute, 1, 0, 1
138MMU_REFILL_HANDLER d_mmu_refill, 2
139MMU_BUS_FAULT_HANDLER d_mmu_invalid, 2, 0, 0
140MMU_BUS_FAULT_HANDLER d_mmu_access, 2, 0, 0
141MMU_BUS_FAULT_HANDLER d_mmu_write, 2, 1, 0
diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c
new file mode 100644
index 000000000000..8233406798d3
--- /dev/null
+++ b/arch/cris/arch-v32/mm/tlb.c
@@ -0,0 +1,208 @@
1/*
2 * Low level TLB handling.
3 *
4 * Copyright (C) 2000-2003, Axis Communications AB.
5 *
6 * Authors: Bjorn Wesen <bjornw@axis.com>
7 * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
8 */
9
10#include <asm/tlb.h>
11#include <asm/mmu_context.h>
12#include <asm/arch/hwregs/asm/mmu_defs_asm.h>
13#include <asm/arch/hwregs/supp_reg.h>
14
15#define UPDATE_TLB_SEL_IDX(val) \
16do { \
17 unsigned long tlb_sel; \
18 \
19 tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \
20 SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \
21} while(0)
22
23#define UPDATE_TLB_HILO(tlb_hi, tlb_lo) \
24do { \
25 SUPP_REG_WR(RW_MM_TLB_HI, tlb_hi); \
26 SUPP_REG_WR(RW_MM_TLB_LO, tlb_lo); \
27} while(0)
28
29/*
30 * The TLB can host up to 256 different mm contexts at the same time. The running
31 * context is found in the PID register. Each TLB entry contains a page_id that
32 * has to match the PID register to give a hit. page_id_map keeps track of which
33 * mm's is assigned to which page_id's, making sure it's known when to
34 * invalidate TLB entries.
35 *
36 * The last page_id is never running, it is used as an invalid page_id so that
37 * it's possible to make TLB entries that will nerver match.
38 *
39 * Note; the flushes needs to be atomic otherwise an interrupt hander that uses
40 * vmalloc'ed memory might cause a TLB load in the middle of a flush.
41 */
42
43/* Flush all TLB entries. */
44void
45__flush_tlb_all(void)
46{
47 int i;
48 int mmu;
49 unsigned long flags;
50 unsigned long mmu_tlb_hi;
51 unsigned long mmu_tlb_sel;
52
53 /*
54 * Mask with 0xf so similar TLB entries aren't written in the same 4-way
55 * entry group.
56 */
57 local_save_flags(flags);
58 local_irq_disable();
59
60 for (mmu = 1; mmu <= 2; mmu++) {
61 SUPP_BANK_SEL(mmu); /* Select the MMU */
62 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
63 /* Store invalid entry */
64 mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i);
65
66 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID)
67 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf));
68
69 SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel);
70 SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi);
71 SUPP_REG_WR(RW_MM_TLB_LO, 0);
72 }
73 }
74
75 local_irq_restore(flags);
76}
77
78/* Flush an entire user address space. */
79void
80__flush_tlb_mm(struct mm_struct *mm)
81{
82 int i;
83 int mmu;
84 unsigned long flags;
85 unsigned long page_id;
86 unsigned long tlb_hi;
87 unsigned long mmu_tlb_hi;
88
89 page_id = mm->context.page_id;
90
91 if (page_id == NO_CONTEXT)
92 return;
93
94 /* Mark the TLB entries that match the page_id as invalid. */
95 local_save_flags(flags);
96 local_irq_disable();
97
98 for (mmu = 1; mmu <= 2; mmu++) {
99 SUPP_BANK_SEL(mmu);
100 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
101 UPDATE_TLB_SEL_IDX(i);
102
103 /* Get the page_id */
104 SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
105
106 /* Check if the page_id match. */
107 if ((tlb_hi & 0xff) == page_id) {
108 mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid,
109 INVALID_PAGEID)
110 | REG_FIELD(mmu, rw_mm_tlb_hi, vpn,
111 i & 0xf));
112
113 UPDATE_TLB_HILO(mmu_tlb_hi, 0);
114 }
115 }
116 }
117
118 local_irq_restore(flags);
119}
120
121/* Invalidate a single page. */
122void
123__flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
124{
125 int i;
126 int mmu;
127 unsigned long page_id;
128 unsigned long flags;
129 unsigned long tlb_hi;
130 unsigned long mmu_tlb_hi;
131
132 page_id = vma->vm_mm->context.page_id;
133
134 if (page_id == NO_CONTEXT)
135 return;
136
137 addr &= PAGE_MASK;
138
139 /*
140 * Invalidate those TLB entries that match both the mm context and the
141 * requested virtual address.
142 */
143 local_save_flags(flags);
144 local_irq_disable();
145
146 for (mmu = 1; mmu <= 2; mmu++) {
147 SUPP_BANK_SEL(mmu);
148 for (i = 0; i < NUM_TLB_ENTRIES; i++) {
149 UPDATE_TLB_SEL_IDX(i);
150 SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi);
151
152 /* Check if page_id and address matches */
153 if (((tlb_hi & 0xff) == page_id) &&
154 ((tlb_hi & PAGE_MASK) == addr)) {
155 mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid,
156 INVALID_PAGEID) | addr;
157
158 UPDATE_TLB_HILO(mmu_tlb_hi, 0);
159 }
160 }
161 }
162
163 local_irq_restore(flags);
164}
165
166/*
167 * Initialize the context related info for a new mm_struct
168 * instance.
169 */
170
171int
172init_new_context(struct task_struct *tsk, struct mm_struct *mm)
173{
174 mm->context.page_id = NO_CONTEXT;
175 return 0;
176}
177
178/* Called in schedule() just before actually doing the switch_to. */
179void
180switch_mm(struct mm_struct *prev, struct mm_struct *next,
181 struct task_struct *tsk)
182{
183 int cpu = smp_processor_id();
184
185 /* Make sure there is a MMU context. */
186 spin_lock(&next->page_table_lock);
187 get_mmu_context(next);
188 cpu_set(cpu, next->cpu_vm_mask);
189 spin_unlock(&next->page_table_lock);
190
191 /*
192 * Remember the pgd for the fault handlers. Keep a seperate copy of it
193 * because current and active_mm might be invalid at points where
194 * there's still a need to derefer the pgd.
195 */
196 per_cpu(current_pgd, cpu) = next->pgd;
197
198 /* Switch context in the MMU. */
199 if (tsk && tsk->thread_info)
200 {
201 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | tsk->thread_info->tls);
202 }
203 else
204 {
205 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id);
206 }
207}
208