diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/m32r/mm |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/m32r/mm')
-rw-r--r-- | arch/m32r/mm/Makefile | 12 | ||||
-rw-r--r-- | arch/m32r/mm/cache.c | 65 | ||||
-rw-r--r-- | arch/m32r/mm/discontig.c | 171 | ||||
-rw-r--r-- | arch/m32r/mm/extable.c | 22 | ||||
-rw-r--r-- | arch/m32r/mm/fault-nommu.c | 165 | ||||
-rw-r--r-- | arch/m32r/mm/fault.c | 583 | ||||
-rw-r--r-- | arch/m32r/mm/init.c | 247 | ||||
-rw-r--r-- | arch/m32r/mm/ioremap-nommu.c | 52 | ||||
-rw-r--r-- | arch/m32r/mm/ioremap.c | 192 | ||||
-rw-r--r-- | arch/m32r/mm/mmu.S | 350 | ||||
-rw-r--r-- | arch/m32r/mm/page.S | 82 |
11 files changed, 1941 insertions, 0 deletions
diff --git a/arch/m32r/mm/Makefile b/arch/m32r/mm/Makefile new file mode 100644 index 000000000000..c51c1c3b4a5f --- /dev/null +++ b/arch/m32r/mm/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # Makefile for the Linux M32R-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | ifdef CONFIG_MMU | ||
6 | obj-y := init.o fault.o mmu.o extable.o ioremap.o cache.o page.o | ||
7 | else | ||
8 | obj-y := init.o fault-nommu.o mmu.o extable.o ioremap-nommu.o cache.o page.o | ||
9 | endif | ||
10 | |||
11 | obj-$(CONFIG_DISCONTIGMEM) += discontig.o | ||
12 | |||
diff --git a/arch/m32r/mm/cache.c b/arch/m32r/mm/cache.c new file mode 100644 index 000000000000..31b0789c1992 --- /dev/null +++ b/arch/m32r/mm/cache.c | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/mm/cache.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Hirokazu Takata | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <asm/pgtable.h> | ||
9 | |||
10 | #undef MCCR | ||
11 | |||
12 | #if defined(CONFIG_CHIP_XNUX2) || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_OPSP) | ||
13 | /* Cache Control Register */ | ||
14 | #define MCCR ((volatile unsigned long*)0xfffffffc) | ||
15 | #define MCCR_CC (1UL << 7) /* Cache mode modify bit */ | ||
16 | #define MCCR_IIV (1UL << 6) /* I-cache invalidate */ | ||
17 | #define MCCR_DIV (1UL << 5) /* D-cache invalidate */ | ||
18 | #define MCCR_DCB (1UL << 4) /* D-cache copy back */ | ||
19 | #define MCCR_ICM (1UL << 1) /* I-cache mode [0:off,1:on] */ | ||
20 | #define MCCR_DCM (1UL << 0) /* D-cache mode [0:off,1:on] */ | ||
21 | #define MCCR_ICACHE_INV (MCCR_CC|MCCR_IIV) | ||
22 | #define MCCR_DCACHE_CB (MCCR_CC|MCCR_DCB) | ||
23 | #define MCCR_DCACHE_CBINV (MCCR_CC|MCCR_DIV|MCCR_DCB) | ||
24 | #define CHECK_MCCR(mccr) (mccr = *MCCR) | ||
25 | #elif defined(CONFIG_CHIP_M32102) | ||
26 | #define MCCR ((volatile unsigned char*)0xfffffffe) | ||
27 | #define MCCR_IIV (1UL << 0) /* I-cache invalidate */ | ||
28 | #define MCCR_ICACHE_INV MCCR_IIV | ||
29 | #endif /* CONFIG_CHIP_XNUX2 || CONFIG_CHIP_M32700 */ | ||
30 | |||
31 | #ifndef MCCR | ||
32 | #error Unknown cache type. | ||
33 | #endif | ||
34 | |||
35 | |||
36 | /* Copy back and invalidate D-cache and invalidate I-cache all */ | ||
37 | void _flush_cache_all(void) | ||
38 | { | ||
39 | #if defined(CONFIG_CHIP_M32102) | ||
40 | *MCCR = MCCR_ICACHE_INV; | ||
41 | #else | ||
42 | unsigned long mccr; | ||
43 | |||
44 | /* Copyback and invalidate D-cache */ | ||
45 | /* Invalidate I-cache */ | ||
46 | *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CBINV; | ||
47 | while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */ | ||
48 | #endif | ||
49 | } | ||
50 | |||
51 | /* Copy back D-cache and invalidate I-cache all */ | ||
52 | void _flush_cache_copyback_all(void) | ||
53 | { | ||
54 | #if defined(CONFIG_CHIP_M32102) | ||
55 | *MCCR = MCCR_ICACHE_INV; | ||
56 | #else | ||
57 | unsigned long mccr; | ||
58 | |||
59 | /* Copyback D-cache */ | ||
60 | /* Invalidate I-cache */ | ||
61 | *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CB; | ||
62 | while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */ | ||
63 | |||
64 | #endif | ||
65 | } | ||
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c new file mode 100644 index 000000000000..1d1a01e54b3f --- /dev/null +++ b/arch/m32r/mm/discontig.c | |||
@@ -0,0 +1,171 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/mm/discontig.c | ||
3 | * | ||
4 | * Discontig memory support | ||
5 | * | ||
6 | * Copyright (c) 2003 Hitoshi Yamamoto | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/bootmem.h> | ||
12 | #include <linux/mmzone.h> | ||
13 | #include <linux/initrd.h> | ||
14 | #include <linux/nodemask.h> | ||
15 | |||
16 | #include <asm/setup.h> | ||
17 | |||
18 | extern char _end[]; | ||
19 | |||
20 | struct pglist_data *node_data[MAX_NUMNODES]; | ||
21 | static bootmem_data_t node_bdata[MAX_NUMNODES] __initdata; | ||
22 | |||
23 | pg_data_t m32r_node_data[MAX_NUMNODES]; | ||
24 | |||
25 | /* Memory profile */ | ||
26 | typedef struct { | ||
27 | unsigned long start_pfn; | ||
28 | unsigned long pages; | ||
29 | unsigned long holes; | ||
30 | unsigned long free_pfn; | ||
31 | } mem_prof_t; | ||
32 | static mem_prof_t mem_prof[MAX_NUMNODES]; | ||
33 | |||
34 | static void __init mem_prof_init(void) | ||
35 | { | ||
36 | unsigned long start_pfn, holes, free_pfn; | ||
37 | const unsigned long zone_alignment = 1UL << (MAX_ORDER - 1); | ||
38 | unsigned long ul; | ||
39 | mem_prof_t *mp; | ||
40 | |||
41 | /* Node#0 SDRAM */ | ||
42 | mp = &mem_prof[0]; | ||
43 | mp->start_pfn = PFN_UP(CONFIG_MEMORY_START); | ||
44 | mp->pages = PFN_DOWN(CONFIG_MEMORY_SIZE); | ||
45 | mp->holes = 0; | ||
46 | mp->free_pfn = PFN_UP(__pa(_end)); | ||
47 | |||
48 | /* Node#1 internal SRAM */ | ||
49 | mp = &mem_prof[1]; | ||
50 | start_pfn = free_pfn = PFN_UP(CONFIG_IRAM_START); | ||
51 | holes = 0; | ||
52 | if (start_pfn & (zone_alignment - 1)) { | ||
53 | ul = zone_alignment; | ||
54 | while (start_pfn >= ul) | ||
55 | ul += zone_alignment; | ||
56 | |||
57 | start_pfn = ul - zone_alignment; | ||
58 | holes = free_pfn - start_pfn; | ||
59 | } | ||
60 | |||
61 | mp->start_pfn = start_pfn; | ||
62 | mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes; | ||
63 | mp->holes = holes; | ||
64 | mp->free_pfn = PFN_UP(CONFIG_IRAM_START); | ||
65 | } | ||
66 | |||
67 | unsigned long __init setup_memory(void) | ||
68 | { | ||
69 | unsigned long bootmap_size; | ||
70 | unsigned long min_pfn; | ||
71 | int nid; | ||
72 | mem_prof_t *mp; | ||
73 | |||
74 | max_low_pfn = 0; | ||
75 | min_low_pfn = -1; | ||
76 | |||
77 | mem_prof_init(); | ||
78 | |||
79 | for_each_online_node(nid) { | ||
80 | mp = &mem_prof[nid]; | ||
81 | NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid]; | ||
82 | NODE_DATA(nid)->bdata = &node_bdata[nid]; | ||
83 | min_pfn = mp->start_pfn; | ||
84 | max_pfn = mp->start_pfn + mp->pages; | ||
85 | bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn, | ||
86 | mp->start_pfn, max_pfn); | ||
87 | |||
88 | free_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn), | ||
89 | PFN_PHYS(mp->pages)); | ||
90 | |||
91 | reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn), | ||
92 | PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size); | ||
93 | |||
94 | if (max_low_pfn < max_pfn) | ||
95 | max_low_pfn = max_pfn; | ||
96 | |||
97 | if (min_low_pfn > min_pfn) | ||
98 | min_low_pfn = min_pfn; | ||
99 | } | ||
100 | |||
101 | #ifdef CONFIG_BLK_DEV_INITRD | ||
102 | if (LOADER_TYPE && INITRD_START) { | ||
103 | if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) { | ||
104 | reserve_bootmem_node(NODE_DATA(0), INITRD_START, | ||
105 | INITRD_SIZE); | ||
106 | initrd_start = INITRD_START ? | ||
107 | INITRD_START + PAGE_OFFSET : 0; | ||
108 | |||
109 | initrd_end = initrd_start + INITRD_SIZE; | ||
110 | printk("initrd:start[%08lx],size[%08lx]\n", | ||
111 | initrd_start, INITRD_SIZE); | ||
112 | } else { | ||
113 | printk("initrd extends beyond end of memory " | ||
114 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | ||
115 | INITRD_START + INITRD_SIZE, | ||
116 | PFN_PHYS(max_low_pfn)); | ||
117 | |||
118 | initrd_start = 0; | ||
119 | } | ||
120 | } | ||
121 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
122 | |||
123 | return max_low_pfn; | ||
124 | } | ||
125 | |||
126 | #define START_PFN(nid) \ | ||
127 | (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT) | ||
128 | #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) | ||
129 | |||
130 | unsigned long __init zone_sizes_init(void) | ||
131 | { | ||
132 | unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES]; | ||
133 | unsigned long low, start_pfn; | ||
134 | unsigned long holes = 0; | ||
135 | int nid, i; | ||
136 | mem_prof_t *mp; | ||
137 | |||
138 | pgdat_list = NULL; | ||
139 | for (nid = num_online_nodes() - 1 ; nid >= 0 ; nid--) { | ||
140 | NODE_DATA(nid)->pgdat_next = pgdat_list; | ||
141 | pgdat_list = NODE_DATA(nid); | ||
142 | } | ||
143 | |||
144 | for_each_online_node(nid) { | ||
145 | mp = &mem_prof[nid]; | ||
146 | for (i = 0 ; i < MAX_NR_ZONES ; i++) { | ||
147 | zones_size[i] = 0; | ||
148 | zholes_size[i] = 0; | ||
149 | } | ||
150 | start_pfn = START_PFN(nid); | ||
151 | low = MAX_LOW_PFN(nid); | ||
152 | zones_size[ZONE_DMA] = low - start_pfn; | ||
153 | zholes_size[ZONE_DMA] = mp->holes; | ||
154 | holes += zholes_size[ZONE_DMA]; | ||
155 | |||
156 | free_area_init_node(nid, NODE_DATA(nid), zones_size, | ||
157 | start_pfn, zholes_size); | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * For test | ||
162 | * Use all area of internal RAM. | ||
163 | * see __alloc_pages() | ||
164 | */ | ||
165 | NODE_DATA(1)->node_zones->pages_min = 0; | ||
166 | NODE_DATA(1)->node_zones->pages_low = 0; | ||
167 | NODE_DATA(1)->node_zones->pages_high = 0; | ||
168 | |||
169 | return holes; | ||
170 | } | ||
171 | |||
diff --git a/arch/m32r/mm/extable.c b/arch/m32r/mm/extable.c new file mode 100644 index 000000000000..9a97363b6524 --- /dev/null +++ b/arch/m32r/mm/extable.c | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * linux/arch/i386/mm/extable.c | ||
3 | */ | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/spinlock.h> | ||
8 | #include <asm/uaccess.h> | ||
9 | |||
10 | int fixup_exception(struct pt_regs *regs) | ||
11 | { | ||
12 | const struct exception_table_entry *fixup; | ||
13 | |||
14 | fixup = search_exception_tables(regs->bpc); | ||
15 | if (fixup) { | ||
16 | regs->bpc = fixup->fixup; | ||
17 | return 1; | ||
18 | } | ||
19 | |||
20 | return 0; | ||
21 | } | ||
22 | |||
diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c new file mode 100644 index 000000000000..d9d488d782e8 --- /dev/null +++ b/arch/m32r/mm/fault-nommu.c | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/mm/fault.c | ||
3 | * | ||
4 | * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo | ||
5 | * | ||
6 | * Some code taken from i386 version. | ||
7 | * Copyright (C) 1995 Linus Torvalds | ||
8 | */ | ||
9 | |||
10 | /* $Id: fault-nommu.c,v 1.1 2004/03/30 06:40:59 sakugawa Exp $ */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/signal.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/mman.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/smp_lock.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/vt_kern.h> /* For unblank_screen() */ | ||
27 | |||
28 | #include <asm/m32r.h> | ||
29 | #include <asm/system.h> | ||
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/pgalloc.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/hardirq.h> | ||
34 | #include <asm/mmu_context.h> | ||
35 | |||
36 | extern void die(const char *, struct pt_regs *, long); | ||
37 | |||
38 | #ifndef CONFIG_SMP | ||
39 | asmlinkage unsigned int tlb_entry_i_dat; | ||
40 | asmlinkage unsigned int tlb_entry_d_dat; | ||
41 | #define tlb_entry_i tlb_entry_i_dat | ||
42 | #define tlb_entry_d tlb_entry_d_dat | ||
43 | #else | ||
44 | unsigned int tlb_entry_i_dat[NR_CPUS]; | ||
45 | unsigned int tlb_entry_d_dat[NR_CPUS]; | ||
46 | #define tlb_entry_i tlb_entry_i_dat[smp_processor_id()] | ||
47 | #define tlb_entry_d tlb_entry_d_dat[smp_processor_id()] | ||
48 | #endif | ||
49 | |||
50 | /* | ||
51 | * Unlock any spinlocks which will prevent us from getting the | ||
52 | * message out | ||
53 | */ | ||
54 | void bust_spinlocks(int yes) | ||
55 | { | ||
56 | int loglevel_save = console_loglevel; | ||
57 | |||
58 | if (yes) { | ||
59 | oops_in_progress = 1; | ||
60 | return; | ||
61 | } | ||
62 | #ifdef CONFIG_VT | ||
63 | unblank_screen(); | ||
64 | #endif | ||
65 | oops_in_progress = 0; | ||
66 | /* | ||
67 | * OK, the message is on the console. Now we call printk() | ||
68 | * without oops_in_progress set so that printk will give klogd | ||
69 | * a poke. Hold onto your hats... | ||
70 | */ | ||
71 | console_loglevel = 15; /* NMI oopser may have shut the console up */ | ||
72 | printk(" "); | ||
73 | console_loglevel = loglevel_save; | ||
74 | } | ||
75 | |||
76 | void do_BUG(const char *file, int line) | ||
77 | { | ||
78 | bust_spinlocks(1); | ||
79 | printk("kernel BUG at %s:%d!\n", file, line); | ||
80 | } | ||
81 | |||
82 | /*======================================================================* | ||
83 | * do_page_fault() | ||
84 | *======================================================================* | ||
85 | * This routine handles page faults. It determines the address, | ||
86 | * and the problem, and then passes it off to one of the appropriate | ||
87 | * routines. | ||
88 | * | ||
89 | * ARGUMENT: | ||
90 | * regs : M32R SP reg. | ||
91 | * error_code : See below | ||
92 | * address : M32R MMU MDEVA reg. (Operand ACE) | ||
93 | * : M32R BPC reg. (Instruction ACE) | ||
94 | * | ||
95 | * error_code : | ||
96 | * bit 0 == 0 means no page found, 1 means protection fault | ||
97 | * bit 1 == 0 means read, 1 means write | ||
98 | * bit 2 == 0 means kernel, 1 means user-mode | ||
99 | *======================================================================*/ | ||
100 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, | ||
101 | unsigned long address) | ||
102 | { | ||
103 | |||
104 | /* | ||
105 | * Oops. The kernel tried to access some bad page. We'll have to | ||
106 | * terminate things with extreme prejudice. | ||
107 | */ | ||
108 | |||
109 | bust_spinlocks(1); | ||
110 | |||
111 | if (address < PAGE_SIZE) | ||
112 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
113 | else | ||
114 | printk(KERN_ALERT "Unable to handle kernel paging request"); | ||
115 | printk(" at virtual address %08lx\n",address); | ||
116 | printk(" printing bpc:\n"); | ||
117 | printk(KERN_ALERT "bpc = %08lx\n", regs->bpc); | ||
118 | |||
119 | die("Oops", regs, error_code); | ||
120 | bust_spinlocks(0); | ||
121 | do_exit(SIGKILL); | ||
122 | } | ||
123 | |||
124 | /*======================================================================* | ||
125 | * update_mmu_cache() | ||
126 | *======================================================================*/ | ||
127 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | ||
128 | pte_t pte) | ||
129 | { | ||
130 | BUG(); | ||
131 | } | ||
132 | |||
133 | /*======================================================================* | ||
134 | * flush_tlb_page() : flushes one page | ||
135 | *======================================================================*/ | ||
136 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
137 | { | ||
138 | BUG(); | ||
139 | } | ||
140 | |||
141 | /*======================================================================* | ||
142 | * flush_tlb_range() : flushes a range of pages | ||
143 | *======================================================================*/ | ||
144 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
145 | unsigned long end) | ||
146 | { | ||
147 | BUG(); | ||
148 | } | ||
149 | |||
150 | /*======================================================================* | ||
151 | * flush_tlb_mm() : flushes the specified mm context TLB's | ||
152 | *======================================================================*/ | ||
153 | void local_flush_tlb_mm(struct mm_struct *mm) | ||
154 | { | ||
155 | BUG(); | ||
156 | } | ||
157 | |||
158 | /*======================================================================* | ||
159 | * flush_tlb_all() : flushes all processes TLBs | ||
160 | *======================================================================*/ | ||
161 | void local_flush_tlb_all(void) | ||
162 | { | ||
163 | BUG(); | ||
164 | } | ||
165 | |||
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c new file mode 100644 index 000000000000..bf7fb58ef02c --- /dev/null +++ b/arch/m32r/mm/fault.c | |||
@@ -0,0 +1,583 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/mm/fault.c | ||
3 | * | ||
4 | * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo | ||
5 | * Copyright (c) 2004 Naoto Sugai, NIIBE Yutaka | ||
6 | * | ||
7 | * Some code taken from i386 version. | ||
8 | * Copyright (C) 1995 Linus Torvalds | ||
9 | */ | ||
10 | |||
11 | #include <linux/config.h> | ||
12 | #include <linux/signal.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/mman.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/smp.h> | ||
22 | #include <linux/smp_lock.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/tty.h> | ||
26 | #include <linux/vt_kern.h> /* For unblank_screen() */ | ||
27 | #include <linux/highmem.h> | ||
28 | #include <linux/module.h> | ||
29 | |||
30 | #include <asm/m32r.h> | ||
31 | #include <asm/system.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | #include <asm/hardirq.h> | ||
34 | #include <asm/mmu_context.h> | ||
35 | #include <asm/tlbflush.h> | ||
36 | |||
37 | extern void die(const char *, struct pt_regs *, long); | ||
38 | |||
39 | #ifndef CONFIG_SMP | ||
40 | asmlinkage unsigned int tlb_entry_i_dat; | ||
41 | asmlinkage unsigned int tlb_entry_d_dat; | ||
42 | #define tlb_entry_i tlb_entry_i_dat | ||
43 | #define tlb_entry_d tlb_entry_d_dat | ||
44 | #else | ||
45 | unsigned int tlb_entry_i_dat[NR_CPUS]; | ||
46 | unsigned int tlb_entry_d_dat[NR_CPUS]; | ||
47 | #define tlb_entry_i tlb_entry_i_dat[smp_processor_id()] | ||
48 | #define tlb_entry_d tlb_entry_d_dat[smp_processor_id()] | ||
49 | #endif | ||
50 | |||
51 | extern void init_tlb(void); | ||
52 | |||
53 | /* | ||
54 | * Unlock any spinlocks which will prevent us from getting the | ||
55 | * message out | ||
56 | */ | ||
57 | void bust_spinlocks(int yes) | ||
58 | { | ||
59 | int loglevel_save = console_loglevel; | ||
60 | |||
61 | if (yes) { | ||
62 | oops_in_progress = 1; | ||
63 | return; | ||
64 | } | ||
65 | #ifdef CONFIG_VT | ||
66 | unblank_screen(); | ||
67 | #endif | ||
68 | oops_in_progress = 0; | ||
69 | /* | ||
70 | * OK, the message is on the console. Now we call printk() | ||
71 | * without oops_in_progress set so that printk will give klogd | ||
72 | * a poke. Hold onto your hats... | ||
73 | */ | ||
74 | console_loglevel = 15; /* NMI oopser may have shut the console up */ | ||
75 | printk(" "); | ||
76 | console_loglevel = loglevel_save; | ||
77 | } | ||
78 | |||
79 | /*======================================================================* | ||
80 | * do_page_fault() | ||
81 | *======================================================================* | ||
82 | * This routine handles page faults. It determines the address, | ||
83 | * and the problem, and then passes it off to one of the appropriate | ||
84 | * routines. | ||
85 | * | ||
86 | * ARGUMENT: | ||
87 | * regs : M32R SP reg. | ||
88 | * error_code : See below | ||
89 | * address : M32R MMU MDEVA reg. (Operand ACE) | ||
90 | * : M32R BPC reg. (Instruction ACE) | ||
91 | * | ||
92 | * error_code : | ||
93 | * bit 0 == 0 means no page found, 1 means protection fault | ||
94 | * bit 1 == 0 means read, 1 means write | ||
95 | * bit 2 == 0 means kernel, 1 means user-mode | ||
96 | * bit 3 == 0 means data, 1 means instruction | ||
97 | *======================================================================*/ | ||
98 | #define ACE_PROTECTION 1 | ||
99 | #define ACE_WRITE 2 | ||
100 | #define ACE_USERMODE 4 | ||
101 | #define ACE_INSTRUCTION 8 | ||
102 | |||
103 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, | ||
104 | unsigned long address) | ||
105 | { | ||
106 | struct task_struct *tsk; | ||
107 | struct mm_struct *mm; | ||
108 | struct vm_area_struct * vma; | ||
109 | unsigned long page, addr; | ||
110 | int write; | ||
111 | siginfo_t info; | ||
112 | |||
113 | /* | ||
114 | * If BPSW IE bit enable --> set PSW IE bit | ||
115 | */ | ||
116 | if (regs->psw & M32R_PSW_BIE) | ||
117 | local_irq_enable(); | ||
118 | |||
119 | tsk = current; | ||
120 | |||
121 | info.si_code = SEGV_MAPERR; | ||
122 | |||
123 | /* | ||
124 | * We fault-in kernel-space virtual memory on-demand. The | ||
125 | * 'reference' page table is init_mm.pgd. | ||
126 | * | ||
127 | * NOTE! We MUST NOT take any locks for this case. We may | ||
128 | * be in an interrupt or a critical region, and should | ||
129 | * only copy the information from the master page table, | ||
130 | * nothing more. | ||
131 | * | ||
132 | * This verifies that the fault happens in kernel space | ||
133 | * (error_code & ACE_USERMODE) == 0, and that the fault was not a | ||
134 | * protection error (error_code & ACE_PROTECTION) == 0. | ||
135 | */ | ||
136 | if (address >= TASK_SIZE && !(error_code & ACE_USERMODE)) | ||
137 | goto vmalloc_fault; | ||
138 | |||
139 | mm = tsk->mm; | ||
140 | |||
141 | /* | ||
142 | * If we're in an interrupt or have no user context or are running in an | ||
143 | * atomic region then we must not take the fault.. | ||
144 | */ | ||
145 | if (in_atomic() || !mm) | ||
146 | goto bad_area_nosemaphore; | ||
147 | |||
148 | /* When running in the kernel we expect faults to occur only to | ||
149 | * addresses in user space. All other faults represent errors in the | ||
150 | * kernel and should generate an OOPS. Unfortunatly, in the case of an | ||
151 | * erroneous fault occuring in a code path which already holds mmap_sem | ||
152 | * we will deadlock attempting to validate the fault against the | ||
153 | * address space. Luckily the kernel only validly references user | ||
154 | * space from well defined areas of code, which are listed in the | ||
155 | * exceptions table. | ||
156 | * | ||
157 | * As the vast majority of faults will be valid we will only perform | ||
158 | * the source reference check when there is a possibilty of a deadlock. | ||
159 | * Attempt to lock the address space, if we cannot we then validate the | ||
160 | * source. If this is invalid we can skip the address space check, | ||
161 | * thus avoiding the deadlock. | ||
162 | */ | ||
163 | if (!down_read_trylock(&mm->mmap_sem)) { | ||
164 | if ((error_code & ACE_USERMODE) == 0 && | ||
165 | !search_exception_tables(regs->psw)) | ||
166 | goto bad_area_nosemaphore; | ||
167 | down_read(&mm->mmap_sem); | ||
168 | } | ||
169 | |||
170 | vma = find_vma(mm, address); | ||
171 | if (!vma) | ||
172 | goto bad_area; | ||
173 | if (vma->vm_start <= address) | ||
174 | goto good_area; | ||
175 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
176 | goto bad_area; | ||
177 | #if 0 | ||
178 | if (error_code & ACE_USERMODE) { | ||
179 | /* | ||
180 | * accessing the stack below "spu" is always a bug. | ||
181 | * The "+ 4" is there due to the push instruction | ||
182 | * doing pre-decrement on the stack and that | ||
183 | * doesn't show up until later.. | ||
184 | */ | ||
185 | if (address + 4 < regs->spu) | ||
186 | goto bad_area; | ||
187 | } | ||
188 | #endif | ||
189 | if (expand_stack(vma, address)) | ||
190 | goto bad_area; | ||
191 | /* | ||
192 | * Ok, we have a good vm_area for this memory access, so | ||
193 | * we can handle it.. | ||
194 | */ | ||
195 | good_area: | ||
196 | info.si_code = SEGV_ACCERR; | ||
197 | write = 0; | ||
198 | switch (error_code & (ACE_WRITE|ACE_PROTECTION)) { | ||
199 | default: /* 3: write, present */ | ||
200 | /* fall through */ | ||
201 | case ACE_WRITE: /* write, not present */ | ||
202 | if (!(vma->vm_flags & VM_WRITE)) | ||
203 | goto bad_area; | ||
204 | write++; | ||
205 | break; | ||
206 | case ACE_PROTECTION: /* read, present */ | ||
207 | case 0: /* read, not present */ | ||
208 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
209 | goto bad_area; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * For instruction access exception, check if the area is executable | ||
214 | */ | ||
215 | if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC)) | ||
216 | goto bad_area; | ||
217 | |||
218 | survive: | ||
219 | /* | ||
220 | * If for any reason at all we couldn't handle the fault, | ||
221 | * make sure we exit gracefully rather than endlessly redo | ||
222 | * the fault. | ||
223 | */ | ||
224 | addr = (address & PAGE_MASK); | ||
225 | set_thread_fault_code(error_code); | ||
226 | switch (handle_mm_fault(mm, vma, addr, write)) { | ||
227 | case VM_FAULT_MINOR: | ||
228 | tsk->min_flt++; | ||
229 | break; | ||
230 | case VM_FAULT_MAJOR: | ||
231 | tsk->maj_flt++; | ||
232 | break; | ||
233 | case VM_FAULT_SIGBUS: | ||
234 | goto do_sigbus; | ||
235 | case VM_FAULT_OOM: | ||
236 | goto out_of_memory; | ||
237 | default: | ||
238 | BUG(); | ||
239 | } | ||
240 | set_thread_fault_code(0); | ||
241 | up_read(&mm->mmap_sem); | ||
242 | return; | ||
243 | |||
244 | /* | ||
245 | * Something tried to access memory that isn't in our memory map.. | ||
246 | * Fix it, but check if it's kernel or user first.. | ||
247 | */ | ||
248 | bad_area: | ||
249 | up_read(&mm->mmap_sem); | ||
250 | |||
251 | bad_area_nosemaphore: | ||
252 | /* User mode accesses just cause a SIGSEGV */ | ||
253 | if (error_code & ACE_USERMODE) { | ||
254 | tsk->thread.address = address; | ||
255 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | ||
256 | tsk->thread.trap_no = 14; | ||
257 | info.si_signo = SIGSEGV; | ||
258 | info.si_errno = 0; | ||
259 | /* info.si_code has been set above */ | ||
260 | info.si_addr = (void __user *)address; | ||
261 | force_sig_info(SIGSEGV, &info, tsk); | ||
262 | return; | ||
263 | } | ||
264 | |||
265 | no_context: | ||
266 | /* Are we prepared to handle this kernel fault? */ | ||
267 | if (fixup_exception(regs)) | ||
268 | return; | ||
269 | |||
270 | /* | ||
271 | * Oops. The kernel tried to access some bad page. We'll have to | ||
272 | * terminate things with extreme prejudice. | ||
273 | */ | ||
274 | |||
275 | bust_spinlocks(1); | ||
276 | |||
277 | if (address < PAGE_SIZE) | ||
278 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); | ||
279 | else | ||
280 | printk(KERN_ALERT "Unable to handle kernel paging request"); | ||
281 | printk(" at virtual address %08lx\n",address); | ||
282 | printk(KERN_ALERT " printing bpc:\n"); | ||
283 | printk("%08lx\n", regs->bpc); | ||
284 | page = *(unsigned long *)MPTB; | ||
285 | page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; | ||
286 | printk(KERN_ALERT "*pde = %08lx\n", page); | ||
287 | if (page & _PAGE_PRESENT) { | ||
288 | page &= PAGE_MASK; | ||
289 | address &= 0x003ff000; | ||
290 | page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; | ||
291 | printk(KERN_ALERT "*pte = %08lx\n", page); | ||
292 | } | ||
293 | die("Oops", regs, error_code); | ||
294 | bust_spinlocks(0); | ||
295 | do_exit(SIGKILL); | ||
296 | |||
297 | /* | ||
298 | * We ran out of memory, or some other thing happened to us that made | ||
299 | * us unable to handle the page fault gracefully. | ||
300 | */ | ||
301 | out_of_memory: | ||
302 | up_read(&mm->mmap_sem); | ||
303 | if (tsk->pid == 1) { | ||
304 | yield(); | ||
305 | down_read(&mm->mmap_sem); | ||
306 | goto survive; | ||
307 | } | ||
308 | printk("VM: killing process %s\n", tsk->comm); | ||
309 | if (error_code & ACE_USERMODE) | ||
310 | do_exit(SIGKILL); | ||
311 | goto no_context; | ||
312 | |||
313 | do_sigbus: | ||
314 | up_read(&mm->mmap_sem); | ||
315 | |||
316 | /* Kernel mode? Handle exception or die */ | ||
317 | if (!(error_code & ACE_USERMODE)) | ||
318 | goto no_context; | ||
319 | |||
320 | tsk->thread.address = address; | ||
321 | tsk->thread.error_code = error_code; | ||
322 | tsk->thread.trap_no = 14; | ||
323 | info.si_signo = SIGBUS; | ||
324 | info.si_errno = 0; | ||
325 | info.si_code = BUS_ADRERR; | ||
326 | info.si_addr = (void __user *)address; | ||
327 | force_sig_info(SIGBUS, &info, tsk); | ||
328 | return; | ||
329 | |||
330 | vmalloc_fault: | ||
331 | { | ||
332 | /* | ||
333 | * Synchronize this task's top level page-table | ||
334 | * with the 'reference' page table. | ||
335 | * | ||
336 | * Do _not_ use "tsk" here. We might be inside | ||
337 | * an interrupt in the middle of a task switch.. | ||
338 | */ | ||
339 | int offset = pgd_index(address); | ||
340 | pgd_t *pgd, *pgd_k; | ||
341 | pmd_t *pmd, *pmd_k; | ||
342 | pte_t *pte_k; | ||
343 | |||
344 | pgd = (pgd_t *)*(unsigned long *)MPTB; | ||
345 | pgd = offset + (pgd_t *)pgd; | ||
346 | pgd_k = init_mm.pgd + offset; | ||
347 | |||
348 | if (!pgd_present(*pgd_k)) | ||
349 | goto no_context; | ||
350 | |||
351 | /* | ||
352 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | ||
353 | * and redundant with the set_pmd() on non-PAE. | ||
354 | */ | ||
355 | |||
356 | pmd = pmd_offset(pgd, address); | ||
357 | pmd_k = pmd_offset(pgd_k, address); | ||
358 | if (!pmd_present(*pmd_k)) | ||
359 | goto no_context; | ||
360 | set_pmd(pmd, *pmd_k); | ||
361 | |||
362 | pte_k = pte_offset_kernel(pmd_k, address); | ||
363 | if (!pte_present(*pte_k)) | ||
364 | goto no_context; | ||
365 | |||
366 | addr = (address & PAGE_MASK) | (error_code & ACE_INSTRUCTION); | ||
367 | update_mmu_cache(NULL, addr, *pte_k); | ||
368 | return; | ||
369 | } | ||
370 | } | ||
371 | |||
372 | /*======================================================================* | ||
373 | * update_mmu_cache() | ||
374 | *======================================================================*/ | ||
375 | #define TLB_MASK (NR_TLB_ENTRIES - 1) | ||
376 | #define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) | ||
377 | #define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) | ||
378 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, | ||
379 | pte_t pte) | ||
380 | { | ||
381 | unsigned long *entry1, *entry2; | ||
382 | unsigned long pte_data, flags; | ||
383 | unsigned int *entry_dat; | ||
384 | int inst = get_thread_fault_code() & ACE_INSTRUCTION; | ||
385 | int i; | ||
386 | |||
387 | /* Ptrace may call this routine. */ | ||
388 | if (vma && current->active_mm != vma->vm_mm) | ||
389 | return; | ||
390 | |||
391 | local_irq_save(flags); | ||
392 | |||
393 | vaddr = (vaddr & PAGE_MASK) | get_asid(); | ||
394 | |||
395 | #ifdef CONFIG_CHIP_OPSP | ||
396 | entry1 = (unsigned long *)ITLB_BASE; | ||
397 | for(i = 0 ; i < NR_TLB_ENTRIES; i++) { | ||
398 | if(*entry1++ == vaddr) { | ||
399 | pte_data = pte_val(pte); | ||
400 | set_tlb_data(entry1, pte_data); | ||
401 | break; | ||
402 | } | ||
403 | entry1++; | ||
404 | } | ||
405 | entry2 = (unsigned long *)DTLB_BASE; | ||
406 | for(i = 0 ; i < NR_TLB_ENTRIES ; i++) { | ||
407 | if(*entry2++ == vaddr) { | ||
408 | pte_data = pte_val(pte); | ||
409 | set_tlb_data(entry2, pte_data); | ||
410 | break; | ||
411 | } | ||
412 | entry2++; | ||
413 | } | ||
414 | local_irq_restore(flags); | ||
415 | return; | ||
416 | #else | ||
417 | pte_data = pte_val(pte); | ||
418 | |||
419 | /* | ||
420 | * Update TLB entries | ||
421 | * entry1: ITLB entry address | ||
422 | * entry2: DTLB entry address | ||
423 | */ | ||
424 | __asm__ __volatile__ ( | ||
425 | "seth %0, #high(%4) \n\t" | ||
426 | "st %2, @(%5, %0) \n\t" | ||
427 | "ldi %1, #1 \n\t" | ||
428 | "st %1, @(%6, %0) \n\t" | ||
429 | "add3 r4, %0, %7 \n\t" | ||
430 | ".fillinsn \n" | ||
431 | "1: \n\t" | ||
432 | "ld %1, @(%6, %0) \n\t" | ||
433 | "bnez %1, 1b \n\t" | ||
434 | "ld %0, @r4+ \n\t" | ||
435 | "ld %1, @r4 \n\t" | ||
436 | "st %3, @+%0 \n\t" | ||
437 | "st %3, @+%1 \n\t" | ||
438 | : "=&r" (entry1), "=&r" (entry2) | ||
439 | : "r" (vaddr), "r" (pte_data), "i" (MMU_REG_BASE), | ||
440 | "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset) | ||
441 | : "r4", "memory" | ||
442 | ); | ||
443 | |||
444 | if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END)) | ||
445 | goto notfound; | ||
446 | |||
447 | found: | ||
448 | local_irq_restore(flags); | ||
449 | |||
450 | return; | ||
451 | |||
452 | /* Valid entry not found */ | ||
453 | notfound: | ||
454 | /* | ||
455 | * Update ITLB or DTLB entry | ||
456 | * entry1: TLB entry address | ||
457 | * entry2: TLB base address | ||
458 | */ | ||
459 | if (!inst) { | ||
460 | entry2 = (unsigned long *)DTLB_BASE; | ||
461 | entry_dat = &tlb_entry_d; | ||
462 | } else { | ||
463 | entry2 = (unsigned long *)ITLB_BASE; | ||
464 | entry_dat = &tlb_entry_i; | ||
465 | } | ||
466 | entry1 = entry2 + (((*entry_dat - 1) & TLB_MASK) << 1); | ||
467 | |||
468 | for (i = 0 ; i < NR_TLB_ENTRIES ; i++) { | ||
469 | if (!(entry1[1] & 2)) /* Valid bit check */ | ||
470 | break; | ||
471 | |||
472 | if (entry1 != entry2) | ||
473 | entry1 -= 2; | ||
474 | else | ||
475 | entry1 += TLB_MASK << 1; | ||
476 | } | ||
477 | |||
478 | if (i >= NR_TLB_ENTRIES) { /* Empty entry not found */ | ||
479 | entry1 = entry2 + (*entry_dat << 1); | ||
480 | *entry_dat = (*entry_dat + 1) & TLB_MASK; | ||
481 | } | ||
482 | *entry1++ = vaddr; /* Set TLB tag */ | ||
483 | set_tlb_data(entry1, pte_data); | ||
484 | |||
485 | goto found; | ||
486 | #endif | ||
487 | } | ||
488 | |||
489 | /*======================================================================* | ||
490 | * flush_tlb_page() : flushes one page | ||
491 | *======================================================================*/ | ||
492 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
493 | { | ||
494 | if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) { | ||
495 | unsigned long flags; | ||
496 | |||
497 | local_irq_save(flags); | ||
498 | page &= PAGE_MASK; | ||
499 | page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK); | ||
500 | __flush_tlb_page(page); | ||
501 | local_irq_restore(flags); | ||
502 | } | ||
503 | } | ||
504 | |||
505 | /*======================================================================* | ||
506 | * flush_tlb_range() : flushes a range of pages | ||
507 | *======================================================================*/ | ||
508 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
509 | unsigned long end) | ||
510 | { | ||
511 | struct mm_struct *mm; | ||
512 | |||
513 | mm = vma->vm_mm; | ||
514 | if (mm_context(mm) != NO_CONTEXT) { | ||
515 | unsigned long flags; | ||
516 | int size; | ||
517 | |||
518 | local_irq_save(flags); | ||
519 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | ||
520 | if (size > (NR_TLB_ENTRIES / 4)) { /* Too many TLB to flush */ | ||
521 | mm_context(mm) = NO_CONTEXT; | ||
522 | if (mm == current->mm) | ||
523 | activate_context(mm); | ||
524 | } else { | ||
525 | unsigned long asid; | ||
526 | |||
527 | asid = mm_context(mm) & MMU_CONTEXT_ASID_MASK; | ||
528 | start &= PAGE_MASK; | ||
529 | end += (PAGE_SIZE - 1); | ||
530 | end &= PAGE_MASK; | ||
531 | |||
532 | start |= asid; | ||
533 | end |= asid; | ||
534 | while (start < end) { | ||
535 | __flush_tlb_page(start); | ||
536 | start += PAGE_SIZE; | ||
537 | } | ||
538 | } | ||
539 | local_irq_restore(flags); | ||
540 | } | ||
541 | } | ||
542 | |||
543 | /*======================================================================* | ||
544 | * flush_tlb_mm() : flushes the specified mm context TLB's | ||
545 | *======================================================================*/ | ||
546 | void local_flush_tlb_mm(struct mm_struct *mm) | ||
547 | { | ||
548 | /* Invalidate all TLB of this process. */ | ||
549 | /* Instead of invalidating each TLB, we get new MMU context. */ | ||
550 | if (mm_context(mm) != NO_CONTEXT) { | ||
551 | unsigned long flags; | ||
552 | |||
553 | local_irq_save(flags); | ||
554 | mm_context(mm) = NO_CONTEXT; | ||
555 | if (mm == current->mm) | ||
556 | activate_context(mm); | ||
557 | local_irq_restore(flags); | ||
558 | } | ||
559 | } | ||
560 | |||
561 | /*======================================================================* | ||
562 | * flush_tlb_all() : flushes all processes TLBs | ||
563 | *======================================================================*/ | ||
564 | void local_flush_tlb_all(void) | ||
565 | { | ||
566 | unsigned long flags; | ||
567 | |||
568 | local_irq_save(flags); | ||
569 | __flush_tlb_all(); | ||
570 | local_irq_restore(flags); | ||
571 | } | ||
572 | |||
573 | /*======================================================================* | ||
574 | * init_mmu() | ||
575 | *======================================================================*/ | ||
576 | void __init init_mmu(void) | ||
577 | { | ||
578 | tlb_entry_i = 0; | ||
579 | tlb_entry_d = 0; | ||
580 | mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; | ||
581 | set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK); | ||
582 | *(volatile unsigned long *)MPTB = (unsigned long)swapper_pg_dir; | ||
583 | } | ||
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c new file mode 100644 index 000000000000..bc423d838fb8 --- /dev/null +++ b/arch/m32r/mm/init.c | |||
@@ -0,0 +1,247 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/mm/init.c | ||
3 | * | ||
4 | * Copyright (c) 2001, 2002 Hitoshi Yamamoto | ||
5 | * | ||
6 | * Some code taken from sh version. | ||
7 | * Copyright (C) 1999 Niibe Yutaka | ||
8 | * Based on linux/arch/i386/mm/init.c: | ||
9 | * Copyright (C) 1995 Linus Torvalds | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/pagemap.h> | ||
16 | #include <linux/bootmem.h> | ||
17 | #include <linux/swap.h> | ||
18 | #include <linux/highmem.h> | ||
19 | #include <linux/bitops.h> | ||
20 | #include <linux/nodemask.h> | ||
21 | #include <asm/types.h> | ||
22 | #include <asm/processor.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/mmu_context.h> | ||
27 | #include <asm/setup.h> | ||
28 | #include <asm/tlb.h> | ||
29 | |||
30 | /* References to section boundaries */ | ||
31 | extern char _text, _etext, _edata; | ||
32 | extern char __init_begin, __init_end; | ||
33 | |||
34 | pgd_t swapper_pg_dir[1024]; | ||
35 | |||
36 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
37 | |||
38 | void show_mem(void) | ||
39 | { | ||
40 | int total = 0, reserved = 0; | ||
41 | int shared = 0, cached = 0; | ||
42 | int highmem = 0; | ||
43 | struct page *page; | ||
44 | pg_data_t *pgdat; | ||
45 | unsigned long i; | ||
46 | |||
47 | printk("Mem-info:\n"); | ||
48 | show_free_areas(); | ||
49 | printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); | ||
50 | for_each_pgdat(pgdat) { | ||
51 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { | ||
52 | page = pgdat->node_mem_map + i; | ||
53 | total++; | ||
54 | if (PageHighMem(page)) | ||
55 | highmem++; | ||
56 | if (PageReserved(page)) | ||
57 | reserved++; | ||
58 | else if (PageSwapCache(page)) | ||
59 | cached++; | ||
60 | else if (page_count(page)) | ||
61 | shared += page_count(page) - 1; | ||
62 | } | ||
63 | } | ||
64 | printk("%d pages of RAM\n", total); | ||
65 | printk("%d pages of HIGHMEM\n",highmem); | ||
66 | printk("%d reserved pages\n",reserved); | ||
67 | printk("%d pages shared\n",shared); | ||
68 | printk("%d pages swap cached\n",cached); | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Cache of MMU context last used. | ||
73 | */ | ||
74 | #ifndef CONFIG_SMP | ||
75 | unsigned long mmu_context_cache_dat; | ||
76 | #else | ||
77 | unsigned long mmu_context_cache_dat[NR_CPUS]; | ||
78 | #endif | ||
79 | static unsigned long hole_pages; | ||
80 | |||
81 | /* | ||
82 | * function prototype | ||
83 | */ | ||
84 | void __init paging_init(void); | ||
85 | void __init mem_init(void); | ||
86 | void free_initmem(void); | ||
87 | #ifdef CONFIG_BLK_DEV_INITRD | ||
88 | void free_initrd_mem(unsigned long, unsigned long); | ||
89 | #endif | ||
90 | |||
91 | /* It'd be good if these lines were in the standard header file. */ | ||
92 | #define START_PFN(nid) \ | ||
93 | (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT) | ||
94 | #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) | ||
95 | |||
96 | #ifndef CONFIG_DISCONTIGMEM | ||
97 | unsigned long __init zone_sizes_init(void) | ||
98 | { | ||
99 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | ||
100 | unsigned long max_dma; | ||
101 | unsigned long low; | ||
102 | unsigned long start_pfn; | ||
103 | |||
104 | #ifdef CONFIG_MMU | ||
105 | start_pfn = START_PFN(0); | ||
106 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | ||
107 | low = MAX_LOW_PFN(0); | ||
108 | |||
109 | if (low < max_dma){ | ||
110 | zones_size[ZONE_DMA] = low - start_pfn; | ||
111 | zones_size[ZONE_NORMAL] = 0; | ||
112 | } else { | ||
113 | zones_size[ZONE_DMA] = low - start_pfn; | ||
114 | zones_size[ZONE_NORMAL] = low - max_dma; | ||
115 | } | ||
116 | #else | ||
117 | zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; | ||
118 | zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT; | ||
119 | start_pfn = __MEMORY_START >> PAGE_SHIFT; | ||
120 | #endif /* CONFIG_MMU */ | ||
121 | |||
122 | free_area_init_node(0, NODE_DATA(0), zones_size, start_pfn, 0); | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | #else /* CONFIG_DISCONTIGMEM */ | ||
127 | extern unsigned long zone_sizes_init(void); | ||
128 | #endif /* CONFIG_DISCONTIGMEM */ | ||
129 | |||
130 | /*======================================================================* | ||
131 | * paging_init() : sets up the page tables | ||
132 | *======================================================================*/ | ||
133 | void __init paging_init(void) | ||
134 | { | ||
135 | #ifdef CONFIG_MMU | ||
136 | int i; | ||
137 | pgd_t *pg_dir; | ||
138 | |||
139 | /* We don't need kernel mapping as hardware support that. */ | ||
140 | pg_dir = swapper_pg_dir; | ||
141 | |||
142 | for (i = 0 ; i < USER_PTRS_PER_PGD * 2 ; i++) | ||
143 | pgd_val(pg_dir[i]) = 0; | ||
144 | #endif /* CONFIG_MMU */ | ||
145 | hole_pages = zone_sizes_init(); | ||
146 | } | ||
147 | |||
148 | int __init reservedpages_count(void) | ||
149 | { | ||
150 | int reservedpages, nid, i; | ||
151 | |||
152 | reservedpages = 0; | ||
153 | for_each_online_node(nid) | ||
154 | for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++) | ||
155 | if (PageReserved(NODE_DATA(nid)->node_mem_map + i)) | ||
156 | reservedpages++; | ||
157 | |||
158 | return reservedpages; | ||
159 | } | ||
160 | |||
161 | /*======================================================================* | ||
162 | * mem_init() : | ||
163 | * orig : arch/sh/mm/init.c | ||
164 | *======================================================================*/ | ||
165 | void __init mem_init(void) | ||
166 | { | ||
167 | int codesize, reservedpages, datasize, initsize; | ||
168 | int nid; | ||
169 | #ifndef CONFIG_MMU | ||
170 | extern unsigned long memory_end; | ||
171 | #endif | ||
172 | |||
173 | num_physpages = 0; | ||
174 | for_each_online_node(nid) | ||
175 | num_physpages += MAX_LOW_PFN(nid) - START_PFN(nid) + 1; | ||
176 | |||
177 | num_physpages -= hole_pages; | ||
178 | |||
179 | #ifndef CONFIG_DISCONTIGMEM | ||
180 | max_mapnr = num_physpages; | ||
181 | #endif /* CONFIG_DISCONTIGMEM */ | ||
182 | |||
183 | #ifdef CONFIG_MMU | ||
184 | high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0))); | ||
185 | #else | ||
186 | high_memory = (void *)(memory_end & PAGE_MASK); | ||
187 | #endif /* CONFIG_MMU */ | ||
188 | |||
189 | /* clear the zero-page */ | ||
190 | memset(empty_zero_page, 0, PAGE_SIZE); | ||
191 | |||
192 | /* this will put all low memory onto the freelists */ | ||
193 | for_each_online_node(nid) | ||
194 | totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); | ||
195 | |||
196 | reservedpages = reservedpages_count() - hole_pages; | ||
197 | codesize = (unsigned long) &_etext - (unsigned long)&_text; | ||
198 | datasize = (unsigned long) &_edata - (unsigned long)&_etext; | ||
199 | initsize = (unsigned long) &__init_end - (unsigned long)&__init_begin; | ||
200 | |||
201 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | ||
202 | "%dk reserved, %dk data, %dk init)\n", | ||
203 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
204 | num_physpages << (PAGE_SHIFT-10), | ||
205 | codesize >> 10, | ||
206 | reservedpages << (PAGE_SHIFT-10), | ||
207 | datasize >> 10, | ||
208 | initsize >> 10); | ||
209 | } | ||
210 | |||
211 | /*======================================================================* | ||
212 | * free_initmem() : | ||
213 | * orig : arch/sh/mm/init.c | ||
214 | *======================================================================*/ | ||
215 | void free_initmem(void) | ||
216 | { | ||
217 | unsigned long addr; | ||
218 | |||
219 | addr = (unsigned long)(&__init_begin); | ||
220 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | ||
221 | ClearPageReserved(virt_to_page(addr)); | ||
222 | set_page_count(virt_to_page(addr), 1); | ||
223 | free_page(addr); | ||
224 | totalram_pages++; | ||
225 | } | ||
226 | printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", \ | ||
227 | (int)(&__init_end - &__init_begin) >> 10); | ||
228 | } | ||
229 | |||
230 | #ifdef CONFIG_BLK_DEV_INITRD | ||
231 | /*======================================================================* | ||
232 | * free_initrd_mem() : | ||
233 | * orig : arch/sh/mm/init.c | ||
234 | *======================================================================*/ | ||
235 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
236 | { | ||
237 | unsigned long p; | ||
238 | for (p = start; p < end; p += PAGE_SIZE) { | ||
239 | ClearPageReserved(virt_to_page(p)); | ||
240 | set_page_count(virt_to_page(p), 1); | ||
241 | free_page(p); | ||
242 | totalram_pages++; | ||
243 | } | ||
244 | printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
245 | } | ||
246 | #endif | ||
247 | |||
diff --git a/arch/m32r/mm/ioremap-nommu.c b/arch/m32r/mm/ioremap-nommu.c new file mode 100644 index 000000000000..2759f2d48384 --- /dev/null +++ b/arch/m32r/mm/ioremap-nommu.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/mm/ioremap-nommu.c | ||
3 | * | ||
4 | * Copyright (c) 2001, 2002 Hiroyuki Kondo | ||
5 | * | ||
6 | * Taken from mips version. | ||
7 | * (C) Copyright 1995 1996 Linus Torvalds | ||
8 | * (C) Copyright 2001 Ralf Baechle | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <asm/addrspace.h> | ||
20 | #include <asm/byteorder.h> | ||
21 | |||
22 | #include <linux/vmalloc.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | |||
28 | |||
29 | /* | ||
30 | * Remap an arbitrary physical address space into the kernel virtual | ||
31 | * address space. Needed when the kernel wants to access high addresses | ||
32 | * directly. | ||
33 | * | ||
34 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
35 | * have to convert them into an offset in a page-aligned mapping, but the | ||
36 | * caller shouldn't need to know that small detail. | ||
37 | */ | ||
38 | |||
39 | #define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL)) | ||
40 | |||
41 | void __iomem * | ||
42 | __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | ||
43 | { | ||
44 | return (void *)phys_addr; | ||
45 | } | ||
46 | |||
47 | #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1) | ||
48 | |||
49 | void iounmap(volatile void __iomem *addr) | ||
50 | { | ||
51 | } | ||
52 | |||
diff --git a/arch/m32r/mm/ioremap.c b/arch/m32r/mm/ioremap.c new file mode 100644 index 000000000000..70c59055c19c --- /dev/null +++ b/arch/m32r/mm/ioremap.c | |||
@@ -0,0 +1,192 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/mm/ioremap.c | ||
3 | * | ||
4 | * Copyright (c) 2001, 2002 Hiroyuki Kondo | ||
5 | * | ||
6 | * Taken from mips version. | ||
7 | * (C) Copyright 1995 1996 Linus Torvalds | ||
8 | * (C) Copyright 2001 Ralf Baechle | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <asm/addrspace.h> | ||
20 | #include <asm/byteorder.h> | ||
21 | |||
22 | #include <linux/vmalloc.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | |||
28 | static inline void | ||
29 | remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | ||
30 | unsigned long phys_addr, unsigned long flags) | ||
31 | { | ||
32 | unsigned long end; | ||
33 | unsigned long pfn; | ||
34 | pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ | ||
35 | | _PAGE_WRITE | flags); | ||
36 | |||
37 | address &= ~PMD_MASK; | ||
38 | end = address + size; | ||
39 | if (end > PMD_SIZE) | ||
40 | end = PMD_SIZE; | ||
41 | if (address >= end) | ||
42 | BUG(); | ||
43 | pfn = phys_addr >> PAGE_SHIFT; | ||
44 | do { | ||
45 | if (!pte_none(*pte)) { | ||
46 | printk("remap_area_pte: page already exists\n"); | ||
47 | BUG(); | ||
48 | } | ||
49 | set_pte(pte, pfn_pte(pfn, pgprot)); | ||
50 | address += PAGE_SIZE; | ||
51 | pfn++; | ||
52 | pte++; | ||
53 | } while (address && (address < end)); | ||
54 | } | ||
55 | |||
56 | static inline int | ||
57 | remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | ||
58 | unsigned long phys_addr, unsigned long flags) | ||
59 | { | ||
60 | unsigned long end; | ||
61 | |||
62 | address &= ~PGDIR_MASK; | ||
63 | end = address + size; | ||
64 | if (end > PGDIR_SIZE) | ||
65 | end = PGDIR_SIZE; | ||
66 | phys_addr -= address; | ||
67 | if (address >= end) | ||
68 | BUG(); | ||
69 | do { | ||
70 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | ||
71 | if (!pte) | ||
72 | return -ENOMEM; | ||
73 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | ||
74 | address = (address + PMD_SIZE) & PMD_MASK; | ||
75 | pmd++; | ||
76 | } while (address && (address < end)); | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static int | ||
81 | remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
82 | unsigned long size, unsigned long flags) | ||
83 | { | ||
84 | int error; | ||
85 | pgd_t * dir; | ||
86 | unsigned long end = address + size; | ||
87 | |||
88 | phys_addr -= address; | ||
89 | dir = pgd_offset(&init_mm, address); | ||
90 | flush_cache_all(); | ||
91 | if (address >= end) | ||
92 | BUG(); | ||
93 | spin_lock(&init_mm.page_table_lock); | ||
94 | do { | ||
95 | pmd_t *pmd; | ||
96 | pmd = pmd_alloc(&init_mm, dir, address); | ||
97 | error = -ENOMEM; | ||
98 | if (!pmd) | ||
99 | break; | ||
100 | if (remap_area_pmd(pmd, address, end - address, | ||
101 | phys_addr + address, flags)) | ||
102 | break; | ||
103 | error = 0; | ||
104 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
105 | dir++; | ||
106 | } while (address && (address < end)); | ||
107 | spin_unlock(&init_mm.page_table_lock); | ||
108 | flush_tlb_all(); | ||
109 | return error; | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Generic mapping function (not visible outside): | ||
114 | */ | ||
115 | |||
116 | /* | ||
117 | * Remap an arbitrary physical address space into the kernel virtual | ||
118 | * address space. Needed when the kernel wants to access high addresses | ||
119 | * directly. | ||
120 | * | ||
121 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
122 | * have to convert them into an offset in a page-aligned mapping, but the | ||
123 | * caller shouldn't need to know that small detail. | ||
124 | */ | ||
125 | |||
126 | #define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL)) | ||
127 | |||
128 | void __iomem * | ||
129 | __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | ||
130 | { | ||
131 | void __iomem * addr; | ||
132 | struct vm_struct * area; | ||
133 | unsigned long offset, last_addr; | ||
134 | |||
135 | /* Don't allow wraparound or zero size */ | ||
136 | last_addr = phys_addr + size - 1; | ||
137 | if (!size || last_addr < phys_addr) | ||
138 | return NULL; | ||
139 | |||
140 | /* | ||
141 | * Map objects in the low 512mb of address space using KSEG1, otherwise | ||
142 | * map using page tables. | ||
143 | */ | ||
144 | if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1)) | ||
145 | return (void *) KSEG1ADDR(phys_addr); | ||
146 | |||
147 | /* | ||
148 | * Don't allow anybody to remap normal RAM that we're using.. | ||
149 | */ | ||
150 | if (phys_addr < virt_to_phys(high_memory)) { | ||
151 | char *t_addr, *t_end; | ||
152 | struct page *page; | ||
153 | |||
154 | t_addr = __va(phys_addr); | ||
155 | t_end = t_addr + (size - 1); | ||
156 | |||
157 | for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) | ||
158 | if(!PageReserved(page)) | ||
159 | return NULL; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Mappings have to be page-aligned | ||
164 | */ | ||
165 | offset = phys_addr & ~PAGE_MASK; | ||
166 | phys_addr &= PAGE_MASK; | ||
167 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; | ||
168 | |||
169 | /* | ||
170 | * Ok, go for it.. | ||
171 | */ | ||
172 | area = get_vm_area(size, VM_IOREMAP); | ||
173 | if (!area) | ||
174 | return NULL; | ||
175 | area->phys_addr = phys_addr; | ||
176 | addr = (void __iomem *) area->addr; | ||
177 | if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) { | ||
178 | vunmap((void __force *) addr); | ||
179 | return NULL; | ||
180 | } | ||
181 | |||
182 | return (void __iomem *) (offset + (char __iomem *)addr); | ||
183 | } | ||
184 | |||
185 | #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1) | ||
186 | |||
187 | void iounmap(volatile void __iomem *addr) | ||
188 | { | ||
189 | if (!IS_KSEG1(addr)) | ||
190 | vfree((void *) (PAGE_MASK & (unsigned long) addr)); | ||
191 | } | ||
192 | |||
diff --git a/arch/m32r/mm/mmu.S b/arch/m32r/mm/mmu.S new file mode 100644 index 000000000000..0c28f11d6677 --- /dev/null +++ b/arch/m32r/mm/mmu.S | |||
@@ -0,0 +1,350 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/mm/mmu.S | ||
3 | * | ||
4 | * Copyright (C) 2001 by Hiroyuki Kondo | ||
5 | */ | ||
6 | |||
7 | /* $Id: mmu.S,v 1.15 2004/03/16 02:56:27 takata Exp $ */ | ||
8 | |||
9 | #include <linux/config.h> /* CONFIG_MMU */ | ||
10 | #include <linux/linkage.h> | ||
11 | #include <asm/assembler.h> | ||
12 | #include <asm/smp.h> | ||
13 | |||
14 | .text | ||
15 | #ifdef CONFIG_MMU | ||
16 | |||
17 | #include <asm/mmu_context.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/m32r.h> | ||
21 | |||
22 | /* | ||
23 | * TLB Miss Exception handler | ||
24 | */ | ||
25 | .balign 16 | ||
26 | ENTRY(tme_handler) | ||
27 | .global tlb_entry_i_dat | ||
28 | .global tlb_entry_d_dat | ||
29 | |||
30 | SWITCH_TO_KERNEL_STACK | ||
31 | |||
32 | #if defined(CONFIG_ISA_M32R2) | ||
33 | st r0, @-sp | ||
34 | st r1, @-sp | ||
35 | st r2, @-sp | ||
36 | st r3, @-sp | ||
37 | |||
38 | seth r3, #high(MMU_REG_BASE) | ||
39 | ld r1, @(MESTS_offset, r3) ; r1: status (MESTS reg.) | ||
40 | ld r0, @(MDEVP_offset, r3) ; r0: PFN + ASID (MDEVP reg.) | ||
41 | st r1, @(MESTS_offset, r3) ; clear status (MESTS reg.) | ||
42 | and3 r1, r1, #(MESTS_IT) | ||
43 | bnez r1, 1f ; instruction TLB miss? | ||
44 | |||
45 | ;; data TLB miss | ||
46 | ;; input | ||
47 | ;; r0: PFN + ASID (MDEVP reg.) | ||
48 | ;; r1 - r3: free | ||
49 | ;; output | ||
50 | ;; r0: PFN + ASID | ||
51 | ;; r1: TLB entry base address | ||
52 | ;; r2: &tlb_entry_{i|d}_dat | ||
53 | ;; r3: free | ||
54 | |||
55 | #ifndef CONFIG_SMP | ||
56 | seth r2, #high(tlb_entry_d_dat) | ||
57 | or3 r2, r2, #low(tlb_entry_d_dat) | ||
58 | #else /* CONFIG_SMP */ | ||
59 | ldi r1, #-8192 | ||
60 | seth r2, #high(tlb_entry_d_dat) | ||
61 | or3 r2, r2, #low(tlb_entry_d_dat) | ||
62 | and r1, sp | ||
63 | ld r1, @(16, r1) ; current_thread_info->cpu | ||
64 | slli r1, #2 | ||
65 | add r2, r1 | ||
66 | #endif /* !CONFIG_SMP */ | ||
67 | seth r1, #high(DTLB_BASE) | ||
68 | or3 r1, r1, #low(DTLB_BASE) | ||
69 | bra 2f | ||
70 | |||
71 | .balign 16 | ||
72 | .fillinsn | ||
73 | 1: | ||
74 | ;; instrucntion TLB miss | ||
75 | ;; input | ||
76 | ;; r0: MDEVP reg. (included ASID) | ||
77 | ;; r1 - r3: free | ||
78 | ;; output | ||
79 | ;; r0: PFN + ASID | ||
80 | ;; r1: TLB entry base address | ||
81 | ;; r2: &tlb_entry_{i|d}_dat | ||
82 | ;; r3: free | ||
83 | ldi r3, #-4096 | ||
84 | and3 r0, r0, #(MMU_CONTEXT_ASID_MASK) | ||
85 | mvfc r1, bpc | ||
86 | and r1, r3 | ||
87 | or r0, r1 ; r0: PFN + ASID | ||
88 | #ifndef CONFIG_SMP | ||
89 | seth r2, #high(tlb_entry_i_dat) | ||
90 | or3 r2, r2, #low(tlb_entry_i_dat) | ||
91 | #else /* CONFIG_SMP */ | ||
92 | ldi r1, #-8192 | ||
93 | seth r2, #high(tlb_entry_i_dat) | ||
94 | or3 r2, r2, #low(tlb_entry_i_dat) | ||
95 | and r1, sp | ||
96 | ld r1, @(16, r1) ; current_thread_info->cpu | ||
97 | slli r1, #2 | ||
98 | add r2, r1 | ||
99 | #endif /* !CONFIG_SMP */ | ||
100 | seth r1, #high(ITLB_BASE) | ||
101 | or3 r1, r1, #low(ITLB_BASE) | ||
102 | |||
103 | .fillinsn | ||
104 | 2: | ||
105 | ;; select TLB entry | ||
106 | ;; input | ||
107 | ;; r0: PFN + ASID | ||
108 | ;; r1: TLB entry base address | ||
109 | ;; r2: &tlb_entry_{i|d}_dat | ||
110 | ;; r3: free | ||
111 | ;; output | ||
112 | ;; r0: PFN + ASID | ||
113 | ;; r1: TLB entry address | ||
114 | ;; r2, r3: free | ||
115 | #ifdef CONFIG_ISA_DUAL_ISSUE | ||
116 | ld r3, @r2 || srli r1, #3 | ||
117 | #else | ||
118 | ld r3, @r2 | ||
119 | srli r1, #3 | ||
120 | #endif | ||
121 | add r1, r3 | ||
122 | ; tlb_entry_{d|i}_dat++; | ||
123 | addi r3, #1 | ||
124 | and3 r3, r3, #(NR_TLB_ENTRIES - 1) | ||
125 | #ifdef CONFIG_ISA_DUAL_ISSUE | ||
126 | st r3, @r2 || slli r1, #3 | ||
127 | #else | ||
128 | st r3, @r2 | ||
129 | slli r1, #3 | ||
130 | #endif | ||
131 | |||
132 | ;; load pte | ||
133 | ;; input | ||
134 | ;; r0: PFN + ASID | ||
135 | ;; r1: TLB entry address | ||
136 | ;; r2, r3: free | ||
137 | ;; output | ||
138 | ;; r0: PFN + ASID | ||
139 | ;; r1: TLB entry address | ||
140 | ;; r2: pte_data | ||
141 | ;; r3: free | ||
142 | ; pgd = *(unsigned long *)MPTB; | ||
143 | ld24 r2, #(-MPTB - 1) | ||
144 | srl3 r3, r0, #22 | ||
145 | #ifdef CONFIG_ISA_DUAL_ISSUE | ||
146 | not r2, r2 || slli r3, #2 ; r3: pgd offset | ||
147 | #else | ||
148 | not r2, r2 | ||
149 | slli r3, #2 | ||
150 | #endif | ||
151 | ld r2, @r2 ; r2: pgd base addr (MPTB reg.) | ||
152 | or r3, r2 ; r3: pmd addr | ||
153 | |||
154 | ; pmd = pmd_offset(pgd, address); | ||
155 | ld r3, @r3 ; r3: pmd data | ||
156 | ldi r2, #-4096 | ||
157 | beqz r3, 3f ; pmd_none(*pmd) ? | ||
158 | |||
159 | ; pte = pte_offset(pmd, address); | ||
160 | and r2, r3 ; r2: pte base addr | ||
161 | srl3 r3, r0, #10 | ||
162 | and3 r3, r3, #0xffc ; r3: pte offset | ||
163 | or r3, r2 | ||
164 | seth r2, #0x8000 | ||
165 | or r3, r2 ; r3: pte addr | ||
166 | |||
167 | ; pte_data = (unsigned long)pte_val(*pte); | ||
168 | ld r2, @r3 ; r2: pte data | ||
169 | or3 r2, r2, #2 ; _PAGE_PRESENT(=2) | ||
170 | |||
171 | .fillinsn | ||
172 | 5: | ||
173 | ;; set tlb | ||
174 | ;; input | ||
175 | ;; r0: PFN + ASID | ||
176 | ;; r1: TLB entry address | ||
177 | ;; r2: pte_data | ||
178 | ;; r3: free | ||
179 | st r0, @r1 ; set_tlb_tag(entry++, address); | ||
180 | st r2, @+r1 ; set_tlb_data(entry, pte_data); | ||
181 | |||
182 | .fillinsn | ||
183 | 6: | ||
184 | ld r3, @sp+ | ||
185 | ld r2, @sp+ | ||
186 | ld r1, @sp+ | ||
187 | ld r0, @sp+ | ||
188 | rte | ||
189 | |||
190 | .fillinsn | ||
191 | 3: | ||
192 | ;; error | ||
193 | ;; input | ||
194 | ;; r0: PFN + ASID | ||
195 | ;; r1: TLB entry address | ||
196 | ;; r2, r3: free | ||
197 | ;; output | ||
198 | ;; r0: PFN + ASID | ||
199 | ;; r1: TLB entry address | ||
200 | ;; r2: pte_data | ||
201 | ;; r3: free | ||
202 | #ifdef CONFIG_ISA_DUAL_ISSUE | ||
203 | bra 5b || ldi r2, #2 | ||
204 | #else | ||
205 | ldi r2, #2 ; r2: pte_data = 0 | _PAGE_PRESENT(=2) | ||
206 | bra 5b | ||
207 | #endif | ||
208 | |||
209 | #elif defined (CONFIG_ISA_M32R) | ||
210 | |||
211 | st sp, @-sp | ||
212 | st r0, @-sp | ||
213 | st r1, @-sp | ||
214 | st r2, @-sp | ||
215 | st r3, @-sp | ||
216 | st r4, @-sp | ||
217 | |||
218 | seth r3, #high(MMU_REG_BASE) | ||
219 | ld r0, @(MDEVA_offset,r3) ; r0: address (MDEVA reg.) | ||
220 | mvfc r2, bpc ; r2: bpc | ||
221 | ld r1, @(MESTS_offset,r3) ; r1: status (MESTS reg.) | ||
222 | st r1, @(MESTS_offset,r3) ; clear status (MESTS reg.) | ||
223 | and3 r1, r1, #(MESTS_IT) | ||
224 | beqz r1, 1f ; data TLB miss? | ||
225 | |||
226 | ;; instrucntion TLB miss | ||
227 | mv r0, r2 ; address = bpc; | ||
228 | ; entry = (unsigned long *)ITLB_BASE+tlb_entry_i*2; | ||
229 | seth r3, #shigh(tlb_entry_i_dat) | ||
230 | ld r4, @(low(tlb_entry_i_dat),r3) | ||
231 | sll3 r2, r4, #3 | ||
232 | seth r1, #high(ITLB_BASE) | ||
233 | or3 r1, r1, #low(ITLB_BASE) | ||
234 | add r2, r1 ; r2: entry | ||
235 | addi r4, #1 ; tlb_entry_i++; | ||
236 | and3 r4, r4, #(NR_TLB_ENTRIES-1) | ||
237 | st r4, @(low(tlb_entry_i_dat),r3) | ||
238 | bra 2f | ||
239 | .fillinsn | ||
240 | 1: | ||
241 | ;; data TLB miss | ||
242 | ; entry = (unsigned long *)DTLB_BASE+tlb_entry_d*2; | ||
243 | seth r3, #shigh(tlb_entry_d_dat) | ||
244 | ld r4, @(low(tlb_entry_d_dat),r3) | ||
245 | sll3 r2, r4, #3 | ||
246 | seth r1, #high(DTLB_BASE) | ||
247 | or3 r1, r1, #low(DTLB_BASE) | ||
248 | add r2, r1 ; r2: entry | ||
249 | addi r4, #1 ; tlb_entry_d++; | ||
250 | and3 r4, r4, #(NR_TLB_ENTRIES-1) | ||
251 | st r4, @(low(tlb_entry_d_dat),r3) | ||
252 | .fillinsn | ||
253 | 2: | ||
254 | ;; load pte | ||
255 | ; r0: address, r2: entry | ||
256 | ; r1,r3,r4: (free) | ||
257 | ; pgd = *(unsigned long *)MPTB; | ||
258 | ld24 r1, #(-MPTB-1) | ||
259 | not r1, r1 | ||
260 | ld r1, @r1 | ||
261 | srl3 r4, r0, #22 | ||
262 | sll3 r3, r4, #2 | ||
263 | add r3, r1 ; r3: pgd | ||
264 | ; pmd = pmd_offset(pgd, address); | ||
265 | ld r1, @r3 ; r1: pmd | ||
266 | beqz r1, 3f ; pmd_none(*pmd) ? | ||
267 | ; | ||
268 | and3 r1, r1, #0xeff | ||
269 | ldi r4, #611 ; _KERNPG_TABLE(=611) | ||
270 | beq r1, r4, 4f ; !pmd_bad(*pmd) ? | ||
271 | .fillinsn | ||
272 | 3: | ||
273 | ldi r1, #0 ; r1: pte_data = 0 | ||
274 | bra 5f | ||
275 | .fillinsn | ||
276 | 4: | ||
277 | ; pte = pte_offset(pmd, address); | ||
278 | ld r4, @r3 ; r4: pte | ||
279 | ldi r3, #-4096 | ||
280 | and r4, r3 | ||
281 | srl3 r3, r0, #10 | ||
282 | and3 r3, r3, #0xffc | ||
283 | add r4, r3 | ||
284 | seth r3, #0x8000 | ||
285 | add r4, r3 ; r4: pte | ||
286 | ; pte_data = (unsigned long)pte_val(*pte); | ||
287 | ld r1, @r4 ; r1: pte_data | ||
288 | .fillinsn | ||
289 | |||
290 | ;; set tlb | ||
291 | ; r0: address, r1: pte_data, r2: entry | ||
292 | ; r3,r4: (free) | ||
293 | 5: | ||
294 | ldi r3, #-4096 ; set_tlb_tag(entry++, address); | ||
295 | and r3, r0 | ||
296 | seth r4, #shigh(MASID) | ||
297 | ld r4, @(low(MASID),r4) ; r4: MASID | ||
298 | and3 r4, r4, #(MMU_CONTEXT_ASID_MASK) | ||
299 | or r3, r4 | ||
300 | st r3, @r2 | ||
301 | or3 r4, r1, #2 ; _PAGE_PRESENT(=2) | ||
302 | st r4, @(4,r2) ; set_tlb_data(entry, pte_data); | ||
303 | |||
304 | ld r4, @sp+ | ||
305 | ld r3, @sp+ | ||
306 | ld r2, @sp+ | ||
307 | ld r1, @sp+ | ||
308 | ld r0, @sp+ | ||
309 | ld sp, @sp+ | ||
310 | rte | ||
311 | |||
312 | #else | ||
313 | #error unknown isa configuration | ||
314 | #endif | ||
315 | |||
316 | ENTRY(init_tlb) | ||
317 | ;; Set MMU Register | ||
318 | seth r0, #high(MMU_REG_BASE) ; Set MMU_REG_BASE higher | ||
319 | or3 r0, r0, #low(MMU_REG_BASE) ; Set MMU_REG_BASE lower | ||
320 | ldi r1, #0 | ||
321 | st r1, @(MPSZ_offset,r0) ; Set MPSZ Reg(Page size 4KB:0 16KB:1 64KB:2) | ||
322 | ldi r1, #0 | ||
323 | st r1, @(MASID_offset,r0) ; Set ASID Zero | ||
324 | |||
325 | ;; Set TLB | ||
326 | seth r0, #high(ITLB_BASE) ; Set ITLB_BASE higher | ||
327 | or3 r0, r0, #low(ITLB_BASE) ; Set ITLB_BASE lower | ||
328 | seth r1, #high(DTLB_BASE) ; Set DTLB_BASE higher | ||
329 | or3 r1, r1, #low(DTLB_BASE) ; Set DTLB_BASE lower | ||
330 | ldi r2, #0 | ||
331 | ldi r3, #NR_TLB_ENTRIES | ||
332 | addi r0, #-4 | ||
333 | addi r1, #-4 | ||
334 | clear_tlb: | ||
335 | st r2, @+r0 ; VPA <- 0 | ||
336 | st r2, @+r0 ; PPA <- 0 | ||
337 | st r2, @+r1 ; VPA <- 0 | ||
338 | st r2, @+r1 ; PPA <- 0 | ||
339 | addi r3, #-1 | ||
340 | bnez r3, clear_tlb | ||
341 | ;; | ||
342 | jmp r14 | ||
343 | |||
344 | ENTRY(m32r_itlb_entrys) | ||
345 | ENTRY(m32r_otlb_entrys) | ||
346 | |||
347 | #endif /* CONFIG_MMU */ | ||
348 | |||
349 | .end | ||
350 | |||
diff --git a/arch/m32r/mm/page.S b/arch/m32r/mm/page.S new file mode 100644 index 000000000000..a2e9367dbf79 --- /dev/null +++ b/arch/m32r/mm/page.S | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/mm/page.S | ||
3 | * | ||
4 | * Clear/Copy page with CPU | ||
5 | * | ||
6 | * Copyright (C) 2004 The Free Software Initiative of Japan | ||
7 | * | ||
8 | * Written by Niibe Yutaka | ||
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU General Public | ||
11 | * License. See the file "COPYING" in the main directory of this archive | ||
12 | * for more details. | ||
13 | * | ||
14 | */ | ||
15 | .text | ||
16 | .global copy_page | ||
17 | /* | ||
18 | * copy_page (to, from) | ||
19 | * | ||
20 | * PAGE_SIZE = 4096-byte | ||
21 | * Cache line = 16-byte | ||
22 | * 16 * 256 | ||
23 | */ | ||
24 | .align 4 | ||
25 | copy_page: | ||
26 | ldi r2, #255 | ||
27 | ld r3, @r0 /* cache line allocate */ | ||
28 | ld r4, @r1+ | ||
29 | ld r5, @r1+ | ||
30 | ld r6, @r1+ | ||
31 | ld r7, @r1+ | ||
32 | .fillinsn | ||
33 | 0: | ||
34 | st r4, @r0 | ||
35 | st r5, @+r0 | ||
36 | st r6, @+r0 | ||
37 | st r7, @+r0 | ||
38 | ld r4, @r1+ | ||
39 | addi r0, #4 | ||
40 | ld r5, @r1+ | ||
41 | ld r6, @r1+ | ||
42 | ld r7, @r1+ | ||
43 | ld r3, @r0 /* cache line allocate */ | ||
44 | addi r2, #-1 | ||
45 | bnez r2, 0b | ||
46 | |||
47 | st r4, @r0 | ||
48 | st r5, @+r0 | ||
49 | st r6, @+r0 | ||
50 | st r7, @+r0 | ||
51 | jmp r14 | ||
52 | |||
53 | .text | ||
54 | .global clear_page | ||
55 | /* | ||
56 | * clear_page (to) | ||
57 | * | ||
58 | * PAGE_SIZE = 4096-byte | ||
59 | * Cache line = 16-byte | ||
60 | * 16 * 256 | ||
61 | */ | ||
62 | .align 4 | ||
63 | clear_page: | ||
64 | ldi r2, #255 | ||
65 | ldi r4, #0 | ||
66 | ld r3, @r0 /* cache line allocate */ | ||
67 | .fillinsn | ||
68 | 0: | ||
69 | st r4, @r0 | ||
70 | st r4, @+r0 | ||
71 | st r4, @+r0 | ||
72 | st r4, @+r0 | ||
73 | addi r0, #4 | ||
74 | ld r3, @r0 /* cache line allocate */ | ||
75 | addi r2, #-1 | ||
76 | bnez r2, 0b | ||
77 | |||
78 | st r4, @r0 | ||
79 | st r4, @+r0 | ||
80 | st r4, @+r0 | ||
81 | st r4, @+r0 | ||
82 | jmp r14 | ||