diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/sh/mm | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/sh/mm')
34 files changed, 1773 insertions, 1386 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 7f7b52f9beba..1445ca6257df 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -75,57 +75,25 @@ config MEMORY_SIZE | |||
75 | config 29BIT | 75 | config 29BIT |
76 | def_bool !32BIT | 76 | def_bool !32BIT |
77 | depends on SUPERH32 | 77 | depends on SUPERH32 |
78 | select UNCACHED_MAPPING | ||
78 | 79 | ||
79 | config 32BIT | 80 | config 32BIT |
80 | bool | 81 | bool |
81 | default y if CPU_SH5 | 82 | default y if CPU_SH5 |
82 | 83 | ||
83 | config PMB_ENABLE | ||
84 | bool "Support 32-bit physical addressing through PMB" | ||
85 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) | ||
86 | select 32BIT | ||
87 | default y | ||
88 | help | ||
89 | If you say Y here, physical addressing will be extended to | ||
90 | 32-bits through the SH-4A PMB. If this is not set, legacy | ||
91 | 29-bit physical addressing will be used. | ||
92 | |||
93 | choice | ||
94 | prompt "PMB handling type" | ||
95 | depends on PMB_ENABLE | ||
96 | default PMB_FIXED | ||
97 | |||
98 | config PMB | 84 | config PMB |
99 | bool "PMB" | 85 | bool "Support 32-bit physical addressing through PMB" |
100 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) | 86 | depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP |
101 | select 32BIT | 87 | select 32BIT |
88 | select UNCACHED_MAPPING | ||
102 | help | 89 | help |
103 | If you say Y here, physical addressing will be extended to | 90 | If you say Y here, physical addressing will be extended to |
104 | 32-bits through the SH-4A PMB. If this is not set, legacy | 91 | 32-bits through the SH-4A PMB. If this is not set, legacy |
105 | 29-bit physical addressing will be used. | 92 | 29-bit physical addressing will be used. |
106 | 93 | ||
107 | config PMB_FIXED | ||
108 | bool "fixed PMB" | ||
109 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || \ | ||
110 | CPU_SUBTYPE_SH7780 || \ | ||
111 | CPU_SUBTYPE_SH7785) | ||
112 | select 32BIT | ||
113 | help | ||
114 | If this option is enabled, fixed PMB mappings are inherited | ||
115 | from the boot loader, and the kernel does not attempt dynamic | ||
116 | management. This is the closest to legacy 29-bit physical mode, | ||
117 | and allows systems to support up to 512MiB of system memory. | ||
118 | |||
119 | endchoice | ||
120 | |||
121 | config X2TLB | 94 | config X2TLB |
122 | bool "Enable extended TLB mode" | 95 | def_bool y |
123 | depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL | 96 | depends on (CPU_SHX2 || CPU_SHX3) && MMU |
124 | help | ||
125 | Selecting this option will enable the extended mode of the SH-X2 | ||
126 | TLB. For legacy SH-X behaviour and interoperability, say N. For | ||
127 | all of the fun new features and a willingless to submit bug reports, | ||
128 | say Y. | ||
129 | 97 | ||
130 | config VSYSCALL | 98 | config VSYSCALL |
131 | bool "Support vsyscall page" | 99 | bool "Support vsyscall page" |
@@ -193,14 +161,19 @@ config ARCH_MEMORY_PROBE | |||
193 | def_bool y | 161 | def_bool y |
194 | depends on MEMORY_HOTPLUG | 162 | depends on MEMORY_HOTPLUG |
195 | 163 | ||
164 | config IOREMAP_FIXED | ||
165 | def_bool y | ||
166 | depends on X2TLB || SUPERH64 | ||
167 | |||
168 | config UNCACHED_MAPPING | ||
169 | bool | ||
170 | |||
196 | choice | 171 | choice |
197 | prompt "Kernel page size" | 172 | prompt "Kernel page size" |
198 | default PAGE_SIZE_8KB if X2TLB | ||
199 | default PAGE_SIZE_4KB | 173 | default PAGE_SIZE_4KB |
200 | 174 | ||
201 | config PAGE_SIZE_4KB | 175 | config PAGE_SIZE_4KB |
202 | bool "4kB" | 176 | bool "4kB" |
203 | depends on !MMU || !X2TLB | ||
204 | help | 177 | help |
205 | This is the default page size used by all SuperH CPUs. | 178 | This is the default page size used by all SuperH CPUs. |
206 | 179 | ||
@@ -258,6 +231,15 @@ endchoice | |||
258 | 231 | ||
259 | source "mm/Kconfig" | 232 | source "mm/Kconfig" |
260 | 233 | ||
234 | config SCHED_MC | ||
235 | bool "Multi-core scheduler support" | ||
236 | depends on SMP | ||
237 | default y | ||
238 | help | ||
239 | Multi-core scheduler support improves the CPU scheduler's decision | ||
240 | making when dealing with multi-core CPU chips at a cost of slightly | ||
241 | increased overhead in some places. If unsure say N here. | ||
242 | |||
261 | endmenu | 243 | endmenu |
262 | 244 | ||
263 | menu "Cache configuration" | 245 | menu "Cache configuration" |
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 3759bf853293..3dc8a8a63822 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the Linux SuperH-specific parts of the memory manager. | 2 | # Makefile for the Linux SuperH-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := cache.o init.o consistent.o mmap.o | 5 | obj-y := alignment.o cache.o init.o consistent.o mmap.o |
6 | 6 | ||
7 | cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o | 7 | cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o |
8 | cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o | 8 | cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o |
@@ -15,7 +15,7 @@ obj-y += $(cacheops-y) | |||
15 | 15 | ||
16 | mmu-y := nommu.o extable_32.o | 16 | mmu-y := nommu.o extable_32.o |
17 | mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ | 17 | mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \ |
18 | ioremap_$(BITS).o kmap.o tlbflush_$(BITS).o | 18 | ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o |
19 | 19 | ||
20 | obj-y += $(mmu-y) | 20 | obj-y += $(mmu-y) |
21 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o | 21 | obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o |
@@ -26,16 +26,17 @@ endif | |||
26 | 26 | ||
27 | ifdef CONFIG_MMU | 27 | ifdef CONFIG_MMU |
28 | tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o | 28 | tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o |
29 | tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o | 29 | tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o |
30 | tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o | 30 | tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o |
31 | tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o | 31 | tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o |
32 | obj-y += $(tlb-y) | 32 | obj-y += $(tlb-y) |
33 | endif | 33 | endif |
34 | 34 | ||
35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
36 | obj-$(CONFIG_PMB) += pmb.o | 36 | obj-$(CONFIG_PMB) += pmb.o |
37 | obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o | ||
38 | obj-$(CONFIG_NUMA) += numa.o | 37 | obj-$(CONFIG_NUMA) += numa.o |
38 | obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o | ||
39 | obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o | ||
39 | 40 | ||
40 | # Special flags for fault_64.o. This puts restrictions on the number of | 41 | # Special flags for fault_64.o. This puts restrictions on the number of |
41 | # caller-save registers that the compiler can target when building this file. | 42 | # caller-save registers that the compiler can target when building this file. |
diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c new file mode 100644 index 000000000000..b2595b8548ee --- /dev/null +++ b/arch/sh/mm/alignment.c | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * Alignment access counters and corresponding user-space interfaces. | ||
3 | * | ||
4 | * Copyright (C) 2009 ST Microelectronics | ||
5 | * Copyright (C) 2009 - 2010 Paul Mundt | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/seq_file.h> | ||
14 | #include <linux/proc_fs.h> | ||
15 | #include <linux/uaccess.h> | ||
16 | #include <asm/alignment.h> | ||
17 | #include <asm/processor.h> | ||
18 | |||
19 | static unsigned long se_user; | ||
20 | static unsigned long se_sys; | ||
21 | static unsigned long se_half; | ||
22 | static unsigned long se_word; | ||
23 | static unsigned long se_dword; | ||
24 | static unsigned long se_multi; | ||
25 | /* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not | ||
26 | valid! */ | ||
27 | static int se_usermode = UM_WARN | UM_FIXUP; | ||
28 | /* 0: no warning 1: print a warning message, disabled by default */ | ||
29 | static int se_kernmode_warn; | ||
30 | |||
31 | core_param(alignment, se_usermode, int, 0600); | ||
32 | |||
33 | void inc_unaligned_byte_access(void) | ||
34 | { | ||
35 | se_half++; | ||
36 | } | ||
37 | |||
38 | void inc_unaligned_word_access(void) | ||
39 | { | ||
40 | se_word++; | ||
41 | } | ||
42 | |||
43 | void inc_unaligned_dword_access(void) | ||
44 | { | ||
45 | se_dword++; | ||
46 | } | ||
47 | |||
48 | void inc_unaligned_multi_access(void) | ||
49 | { | ||
50 | se_multi++; | ||
51 | } | ||
52 | |||
53 | void inc_unaligned_user_access(void) | ||
54 | { | ||
55 | se_user++; | ||
56 | } | ||
57 | |||
58 | void inc_unaligned_kernel_access(void) | ||
59 | { | ||
60 | se_sys++; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * This defaults to the global policy which can be set from the command | ||
65 | * line, while processes can overload their preferences via prctl(). | ||
66 | */ | ||
67 | unsigned int unaligned_user_action(void) | ||
68 | { | ||
69 | unsigned int action = se_usermode; | ||
70 | |||
71 | if (current->thread.flags & SH_THREAD_UAC_SIGBUS) { | ||
72 | action &= ~UM_FIXUP; | ||
73 | action |= UM_SIGNAL; | ||
74 | } | ||
75 | |||
76 | if (current->thread.flags & SH_THREAD_UAC_NOPRINT) | ||
77 | action &= ~UM_WARN; | ||
78 | |||
79 | return action; | ||
80 | } | ||
81 | |||
82 | int get_unalign_ctl(struct task_struct *tsk, unsigned long addr) | ||
83 | { | ||
84 | return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK, | ||
85 | (unsigned int __user *)addr); | ||
86 | } | ||
87 | |||
88 | int set_unalign_ctl(struct task_struct *tsk, unsigned int val) | ||
89 | { | ||
90 | tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) | | ||
91 | (val & SH_THREAD_UAC_MASK); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn, | ||
96 | struct pt_regs *regs) | ||
97 | { | ||
98 | if (user_mode(regs) && (se_usermode & UM_WARN) && printk_ratelimit()) | ||
99 | pr_notice("Fixing up unaligned userspace access " | ||
100 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
101 | tsk->comm, task_pid_nr(tsk), | ||
102 | (void *)instruction_pointer(regs), insn); | ||
103 | else if (se_kernmode_warn && printk_ratelimit()) | ||
104 | pr_notice("Fixing up unaligned kernel access " | ||
105 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
106 | tsk->comm, task_pid_nr(tsk), | ||
107 | (void *)instruction_pointer(regs), insn); | ||
108 | } | ||
109 | |||
110 | static const char *se_usermode_action[] = { | ||
111 | "ignored", | ||
112 | "warn", | ||
113 | "fixup", | ||
114 | "fixup+warn", | ||
115 | "signal", | ||
116 | "signal+warn" | ||
117 | }; | ||
118 | |||
119 | static int alignment_proc_show(struct seq_file *m, void *v) | ||
120 | { | ||
121 | seq_printf(m, "User:\t\t%lu\n", se_user); | ||
122 | seq_printf(m, "System:\t\t%lu\n", se_sys); | ||
123 | seq_printf(m, "Half:\t\t%lu\n", se_half); | ||
124 | seq_printf(m, "Word:\t\t%lu\n", se_word); | ||
125 | seq_printf(m, "DWord:\t\t%lu\n", se_dword); | ||
126 | seq_printf(m, "Multi:\t\t%lu\n", se_multi); | ||
127 | seq_printf(m, "User faults:\t%i (%s)\n", se_usermode, | ||
128 | se_usermode_action[se_usermode]); | ||
129 | seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, | ||
130 | se_kernmode_warn ? "+warn" : ""); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int alignment_proc_open(struct inode *inode, struct file *file) | ||
135 | { | ||
136 | return single_open(file, alignment_proc_show, NULL); | ||
137 | } | ||
138 | |||
139 | static ssize_t alignment_proc_write(struct file *file, | ||
140 | const char __user *buffer, size_t count, loff_t *pos) | ||
141 | { | ||
142 | int *data = PDE(file->f_path.dentry->d_inode)->data; | ||
143 | char mode; | ||
144 | |||
145 | if (count > 0) { | ||
146 | if (get_user(mode, buffer)) | ||
147 | return -EFAULT; | ||
148 | if (mode >= '0' && mode <= '5') | ||
149 | *data = mode - '0'; | ||
150 | } | ||
151 | return count; | ||
152 | } | ||
153 | |||
154 | static const struct file_operations alignment_proc_fops = { | ||
155 | .owner = THIS_MODULE, | ||
156 | .open = alignment_proc_open, | ||
157 | .read = seq_read, | ||
158 | .llseek = seq_lseek, | ||
159 | .release = single_release, | ||
160 | .write = alignment_proc_write, | ||
161 | }; | ||
162 | |||
163 | /* | ||
164 | * This needs to be done after sysctl_init, otherwise sys/ will be | ||
165 | * overwritten. Actually, this shouldn't be in sys/ at all since | ||
166 | * it isn't a sysctl, and it doesn't contain sysctl information. | ||
167 | * We now locate it in /proc/cpu/alignment instead. | ||
168 | */ | ||
169 | static int __init alignment_init(void) | ||
170 | { | ||
171 | struct proc_dir_entry *dir, *res; | ||
172 | |||
173 | dir = proc_mkdir("cpu", NULL); | ||
174 | if (!dir) | ||
175 | return -ENOMEM; | ||
176 | |||
177 | res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir, | ||
178 | &alignment_proc_fops, &se_usermode); | ||
179 | if (!res) | ||
180 | return -ENOMEM; | ||
181 | |||
182 | res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir, | ||
183 | &alignment_proc_fops, &se_kernmode_warn); | ||
184 | if (!res) | ||
185 | return -ENOMEM; | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | fs_initcall(alignment_init); | ||
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 5ba067b26591..690ed010d002 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c | |||
@@ -22,8 +22,7 @@ enum cache_type { | |||
22 | CACHE_TYPE_UNIFIED, | 22 | CACHE_TYPE_UNIFIED, |
23 | }; | 23 | }; |
24 | 24 | ||
25 | static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, | 25 | static int cache_seq_show(struct seq_file *file, void *iter) |
26 | void *iter) | ||
27 | { | 26 | { |
28 | unsigned int cache_type = (unsigned int)file->private; | 27 | unsigned int cache_type = (unsigned int)file->private; |
29 | struct cache_info *cache; | 28 | struct cache_info *cache; |
@@ -37,7 +36,7 @@ static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, | |||
37 | */ | 36 | */ |
38 | jump_to_uncached(); | 37 | jump_to_uncached(); |
39 | 38 | ||
40 | ccr = ctrl_inl(CCR); | 39 | ccr = __raw_readl(CCR); |
41 | if ((ccr & CCR_CACHE_ENABLE) == 0) { | 40 | if ((ccr & CCR_CACHE_ENABLE) == 0) { |
42 | back_to_cached(); | 41 | back_to_cached(); |
43 | 42 | ||
@@ -90,7 +89,7 @@ static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, | |||
90 | for (addr = addrstart, line = 0; | 89 | for (addr = addrstart, line = 0; |
91 | addr < addrstart + waysize; | 90 | addr < addrstart + waysize; |
92 | addr += cache->linesz, line++) { | 91 | addr += cache->linesz, line++) { |
93 | unsigned long data = ctrl_inl(addr); | 92 | unsigned long data = __raw_readl(addr); |
94 | 93 | ||
95 | /* Check the V bit, ignore invalid cachelines */ | 94 | /* Check the V bit, ignore invalid cachelines */ |
96 | if ((data & 1) == 0) | 95 | if ((data & 1) == 0) |
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index 699a71f46327..defcf719f2e8 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c | |||
@@ -28,10 +28,10 @@ static void sh2__flush_wback_region(void *start, int size) | |||
28 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0); | 28 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0); |
29 | int way; | 29 | int way; |
30 | for (way = 0; way < 4; way++) { | 30 | for (way = 0; way < 4; way++) { |
31 | unsigned long data = ctrl_inl(addr | (way << 12)); | 31 | unsigned long data = __raw_readl(addr | (way << 12)); |
32 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { | 32 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { |
33 | data &= ~SH_CACHE_UPDATED; | 33 | data &= ~SH_CACHE_UPDATED; |
34 | ctrl_outl(data, addr | (way << 12)); | 34 | __raw_writel(data, addr | (way << 12)); |
35 | } | 35 | } |
36 | } | 36 | } |
37 | } | 37 | } |
@@ -47,7 +47,7 @@ static void sh2__flush_purge_region(void *start, int size) | |||
47 | & ~(L1_CACHE_BYTES-1); | 47 | & ~(L1_CACHE_BYTES-1); |
48 | 48 | ||
49 | for (v = begin; v < end; v+=L1_CACHE_BYTES) | 49 | for (v = begin; v < end; v+=L1_CACHE_BYTES) |
50 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 50 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
51 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); | 51 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); |
52 | } | 52 | } |
53 | 53 | ||
@@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size) | |||
63 | local_irq_save(flags); | 63 | local_irq_save(flags); |
64 | jump_to_uncached(); | 64 | jump_to_uncached(); |
65 | 65 | ||
66 | ccr = ctrl_inl(CCR); | 66 | ccr = __raw_readl(CCR); |
67 | ccr |= CCR_CACHE_INVALIDATE; | 67 | ccr |= CCR_CACHE_INVALIDATE; |
68 | ctrl_outl(ccr, CCR); | 68 | __raw_writel(ccr, CCR); |
69 | 69 | ||
70 | back_to_cached(); | 70 | back_to_cached(); |
71 | local_irq_restore(flags); | 71 | local_irq_restore(flags); |
@@ -78,7 +78,7 @@ static void sh2__flush_invalidate_region(void *start, int size) | |||
78 | & ~(L1_CACHE_BYTES-1); | 78 | & ~(L1_CACHE_BYTES-1); |
79 | 79 | ||
80 | for (v = begin; v < end; v+=L1_CACHE_BYTES) | 80 | for (v = begin; v < end; v+=L1_CACHE_BYTES) |
81 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 81 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
82 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); | 82 | CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); |
83 | #endif | 83 | #endif |
84 | } | 84 | } |
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 975899d83564..1f51225426a2 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c | |||
@@ -32,10 +32,10 @@ static void sh2a__flush_wback_region(void *start, int size) | |||
32 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0); | 32 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0); |
33 | int way; | 33 | int way; |
34 | for (way = 0; way < 4; way++) { | 34 | for (way = 0; way < 4; way++) { |
35 | unsigned long data = ctrl_inl(addr | (way << 11)); | 35 | unsigned long data = __raw_readl(addr | (way << 11)); |
36 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { | 36 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { |
37 | data &= ~SH_CACHE_UPDATED; | 37 | data &= ~SH_CACHE_UPDATED; |
38 | ctrl_outl(data, addr | (way << 11)); | 38 | __raw_writel(data, addr | (way << 11)); |
39 | } | 39 | } |
40 | } | 40 | } |
41 | } | 41 | } |
@@ -58,7 +58,7 @@ static void sh2a__flush_purge_region(void *start, int size) | |||
58 | jump_to_uncached(); | 58 | jump_to_uncached(); |
59 | 59 | ||
60 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | 60 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
61 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 61 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
62 | CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | 62 | CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); |
63 | } | 63 | } |
64 | back_to_cached(); | 64 | back_to_cached(); |
@@ -78,17 +78,17 @@ static void sh2a__flush_invalidate_region(void *start, int size) | |||
78 | jump_to_uncached(); | 78 | jump_to_uncached(); |
79 | 79 | ||
80 | #ifdef CONFIG_CACHE_WRITEBACK | 80 | #ifdef CONFIG_CACHE_WRITEBACK |
81 | ctrl_outl(ctrl_inl(CCR) | CCR_OCACHE_INVALIDATE, CCR); | 81 | __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); |
82 | /* I-cache invalidate */ | 82 | /* I-cache invalidate */ |
83 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | 83 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
84 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 84 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
85 | CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | 85 | CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); |
86 | } | 86 | } |
87 | #else | 87 | #else |
88 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | 88 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
89 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 89 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
90 | CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | 90 | CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); |
91 | ctrl_outl((v & CACHE_PHYSADDR_MASK), | 91 | __raw_writel((v & CACHE_PHYSADDR_MASK), |
92 | CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); | 92 | CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); |
93 | } | 93 | } |
94 | #endif | 94 | #endif |
@@ -115,14 +115,14 @@ static void sh2a_flush_icache_range(void *args) | |||
115 | int way; | 115 | int way; |
116 | /* O-Cache writeback */ | 116 | /* O-Cache writeback */ |
117 | for (way = 0; way < 4; way++) { | 117 | for (way = 0; way < 4; way++) { |
118 | unsigned long data = ctrl_inl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); | 118 | unsigned long data = __raw_readl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); |
119 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { | 119 | if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { |
120 | data &= ~SH_CACHE_UPDATED; | 120 | data &= ~SH_CACHE_UPDATED; |
121 | ctrl_outl(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); | 121 | __raw_writel(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); |
122 | } | 122 | } |
123 | } | 123 | } |
124 | /* I-Cache invalidate */ | 124 | /* I-Cache invalidate */ |
125 | ctrl_outl(addr, | 125 | __raw_writel(addr, |
126 | CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); | 126 | CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); |
127 | } | 127 | } |
128 | 128 | ||
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c index faef80c98134..e37523f65195 100644 --- a/arch/sh/mm/cache-sh3.c +++ b/arch/sh/mm/cache-sh3.c | |||
@@ -50,12 +50,12 @@ static void sh3__flush_wback_region(void *start, int size) | |||
50 | p = __pa(v); | 50 | p = __pa(v); |
51 | addr = addrstart | (v & current_cpu_data.dcache.entry_mask); | 51 | addr = addrstart | (v & current_cpu_data.dcache.entry_mask); |
52 | local_irq_save(flags); | 52 | local_irq_save(flags); |
53 | data = ctrl_inl(addr); | 53 | data = __raw_readl(addr); |
54 | 54 | ||
55 | if ((data & CACHE_PHYSADDR_MASK) == | 55 | if ((data & CACHE_PHYSADDR_MASK) == |
56 | (p & CACHE_PHYSADDR_MASK)) { | 56 | (p & CACHE_PHYSADDR_MASK)) { |
57 | data &= ~SH_CACHE_UPDATED; | 57 | data &= ~SH_CACHE_UPDATED; |
58 | ctrl_outl(data, addr); | 58 | __raw_writel(data, addr); |
59 | local_irq_restore(flags); | 59 | local_irq_restore(flags); |
60 | break; | 60 | break; |
61 | } | 61 | } |
@@ -86,7 +86,7 @@ static void sh3__flush_purge_region(void *start, int size) | |||
86 | data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ | 86 | data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */ |
87 | addr = CACHE_OC_ADDRESS_ARRAY | | 87 | addr = CACHE_OC_ADDRESS_ARRAY | |
88 | (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC; | 88 | (v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC; |
89 | ctrl_outl(data, addr); | 89 | __raw_writel(data, addr); |
90 | } | 90 | } |
91 | } | 91 | } |
92 | 92 | ||
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index b7f235c74d66..2cfae81914aa 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/sh/mm/cache-sh4.c | 2 | * arch/sh/mm/cache-sh4.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2001 - 2007 Paul Mundt | 5 | * Copyright (C) 2001 - 2009 Paul Mundt |
6 | * Copyright (C) 2003 Richard Curnow | 6 | * Copyright (C) 2003 Richard Curnow |
7 | * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. | 7 | * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. |
8 | * | 8 | * |
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/highmem.h> | ||
19 | #include <asm/pgtable.h> | ||
18 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
19 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
20 | 22 | ||
@@ -23,27 +25,18 @@ | |||
23 | * flushing. Anything exceeding this will simply flush the dcache in its | 25 | * flushing. Anything exceeding this will simply flush the dcache in its |
24 | * entirety. | 26 | * entirety. |
25 | */ | 27 | */ |
26 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ | ||
27 | #define MAX_ICACHE_PAGES 32 | 28 | #define MAX_ICACHE_PAGES 32 |
28 | 29 | ||
29 | static void __flush_cache_one(unsigned long addr, unsigned long phys, | 30 | static void __flush_cache_one(unsigned long addr, unsigned long phys, |
30 | unsigned long exec_offset); | 31 | unsigned long exec_offset); |
31 | 32 | ||
32 | /* | 33 | /* |
33 | * This is initialised here to ensure that it is not placed in the BSS. If | ||
34 | * that were to happen, note that cache_init gets called before the BSS is | ||
35 | * cleared, so this would get nulled out which would be hopeless. | ||
36 | */ | ||
37 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | ||
38 | (void (*)(unsigned long, unsigned long))0xdeadbeef; | ||
39 | |||
40 | /* | ||
41 | * Write back the range of D-cache, and purge the I-cache. | 34 | * Write back the range of D-cache, and purge the I-cache. |
42 | * | 35 | * |
43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | 36 | * Called from kernel/module.c:sys_init_module and routine for a.out format, |
44 | * signal handler code and kprobes code | 37 | * signal handler code and kprobes code |
45 | */ | 38 | */ |
46 | static void __uses_jump_to_uncached sh4_flush_icache_range(void *args) | 39 | static void sh4_flush_icache_range(void *args) |
47 | { | 40 | { |
48 | struct flusher_data *data = args; | 41 | struct flusher_data *data = args; |
49 | unsigned long start, end; | 42 | unsigned long start, end; |
@@ -97,15 +90,15 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys) | |||
97 | unsigned long flags, exec_offset = 0; | 90 | unsigned long flags, exec_offset = 0; |
98 | 91 | ||
99 | /* | 92 | /* |
100 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. | 93 | * All types of SH-4 require PC to be uncached to operate on the I-cache. |
101 | * Some types of SH-4 require PC to be in P2 to operate on the D-cache. | 94 | * Some types of SH-4 require PC to be uncached to operate on the D-cache. |
102 | */ | 95 | */ |
103 | if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || | 96 | if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || |
104 | (start < CACHE_OC_ADDRESS_ARRAY)) | 97 | (start < CACHE_OC_ADDRESS_ARRAY)) |
105 | exec_offset = 0x20000000; | 98 | exec_offset = cached_to_uncached; |
106 | 99 | ||
107 | local_irq_save(flags); | 100 | local_irq_save(flags); |
108 | __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset); | 101 | __flush_cache_one(start, phys, exec_offset); |
109 | local_irq_restore(flags); | 102 | local_irq_restore(flags); |
110 | } | 103 | } |
111 | 104 | ||
@@ -116,6 +109,7 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys) | |||
116 | static void sh4_flush_dcache_page(void *arg) | 109 | static void sh4_flush_dcache_page(void *arg) |
117 | { | 110 | { |
118 | struct page *page = arg; | 111 | struct page *page = arg; |
112 | unsigned long addr = (unsigned long)page_address(page); | ||
119 | #ifndef CONFIG_SMP | 113 | #ifndef CONFIG_SMP |
120 | struct address_space *mapping = page_mapping(page); | 114 | struct address_space *mapping = page_mapping(page); |
121 | 115 | ||
@@ -123,22 +117,14 @@ static void sh4_flush_dcache_page(void *arg) | |||
123 | set_bit(PG_dcache_dirty, &page->flags); | 117 | set_bit(PG_dcache_dirty, &page->flags); |
124 | else | 118 | else |
125 | #endif | 119 | #endif |
126 | { | 120 | flush_cache_one(CACHE_OC_ADDRESS_ARRAY | |
127 | unsigned long phys = PHYSADDR(page_address(page)); | 121 | (addr & shm_align_mask), page_to_phys(page)); |
128 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; | ||
129 | int i, n; | ||
130 | |||
131 | /* Loop all the D-cache */ | ||
132 | n = boot_cpu_data.dcache.n_aliases; | ||
133 | for (i = 0; i < n; i++, addr += PAGE_SIZE) | ||
134 | flush_cache_one(addr, phys); | ||
135 | } | ||
136 | 122 | ||
137 | wmb(); | 123 | wmb(); |
138 | } | 124 | } |
139 | 125 | ||
140 | /* TODO: Selective icache invalidation through IC address array.. */ | 126 | /* TODO: Selective icache invalidation through IC address array.. */ |
141 | static void __uses_jump_to_uncached flush_icache_all(void) | 127 | static void flush_icache_all(void) |
142 | { | 128 | { |
143 | unsigned long flags, ccr; | 129 | unsigned long flags, ccr; |
144 | 130 | ||
@@ -146,9 +132,9 @@ static void __uses_jump_to_uncached flush_icache_all(void) | |||
146 | jump_to_uncached(); | 132 | jump_to_uncached(); |
147 | 133 | ||
148 | /* Flush I-cache */ | 134 | /* Flush I-cache */ |
149 | ccr = ctrl_inl(CCR); | 135 | ccr = __raw_readl(CCR); |
150 | ccr |= CCR_CACHE_ICI; | 136 | ccr |= CCR_CACHE_ICI; |
151 | ctrl_outl(ccr, CCR); | 137 | __raw_writel(ccr, CCR); |
152 | 138 | ||
153 | /* | 139 | /* |
154 | * back_to_cached() will take care of the barrier for us, don't add | 140 | * back_to_cached() will take care of the barrier for us, don't add |
@@ -159,10 +145,27 @@ static void __uses_jump_to_uncached flush_icache_all(void) | |||
159 | local_irq_restore(flags); | 145 | local_irq_restore(flags); |
160 | } | 146 | } |
161 | 147 | ||
162 | static inline void flush_dcache_all(void) | 148 | static void flush_dcache_all(void) |
163 | { | 149 | { |
164 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); | 150 | unsigned long addr, end_addr, entry_offset; |
165 | wmb(); | 151 | |
152 | end_addr = CACHE_OC_ADDRESS_ARRAY + | ||
153 | (current_cpu_data.dcache.sets << | ||
154 | current_cpu_data.dcache.entry_shift) * | ||
155 | current_cpu_data.dcache.ways; | ||
156 | |||
157 | entry_offset = 1 << current_cpu_data.dcache.entry_shift; | ||
158 | |||
159 | for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) { | ||
160 | __raw_writel(0, addr); addr += entry_offset; | ||
161 | __raw_writel(0, addr); addr += entry_offset; | ||
162 | __raw_writel(0, addr); addr += entry_offset; | ||
163 | __raw_writel(0, addr); addr += entry_offset; | ||
164 | __raw_writel(0, addr); addr += entry_offset; | ||
165 | __raw_writel(0, addr); addr += entry_offset; | ||
166 | __raw_writel(0, addr); addr += entry_offset; | ||
167 | __raw_writel(0, addr); addr += entry_offset; | ||
168 | } | ||
166 | } | 169 | } |
167 | 170 | ||
168 | static void sh4_flush_cache_all(void *unused) | 171 | static void sh4_flush_cache_all(void *unused) |
@@ -171,89 +174,13 @@ static void sh4_flush_cache_all(void *unused) | |||
171 | flush_icache_all(); | 174 | flush_icache_all(); |
172 | } | 175 | } |
173 | 176 | ||
174 | static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, | ||
175 | unsigned long end) | ||
176 | { | ||
177 | unsigned long d = 0, p = start & PAGE_MASK; | ||
178 | unsigned long alias_mask = boot_cpu_data.dcache.alias_mask; | ||
179 | unsigned long n_aliases = boot_cpu_data.dcache.n_aliases; | ||
180 | unsigned long select_bit; | ||
181 | unsigned long all_aliases_mask; | ||
182 | unsigned long addr_offset; | ||
183 | pgd_t *dir; | ||
184 | pmd_t *pmd; | ||
185 | pud_t *pud; | ||
186 | pte_t *pte; | ||
187 | int i; | ||
188 | |||
189 | dir = pgd_offset(mm, p); | ||
190 | pud = pud_offset(dir, p); | ||
191 | pmd = pmd_offset(pud, p); | ||
192 | end = PAGE_ALIGN(end); | ||
193 | |||
194 | all_aliases_mask = (1 << n_aliases) - 1; | ||
195 | |||
196 | do { | ||
197 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { | ||
198 | p &= PMD_MASK; | ||
199 | p += PMD_SIZE; | ||
200 | pmd++; | ||
201 | |||
202 | continue; | ||
203 | } | ||
204 | |||
205 | pte = pte_offset_kernel(pmd, p); | ||
206 | |||
207 | do { | ||
208 | unsigned long phys; | ||
209 | pte_t entry = *pte; | ||
210 | |||
211 | if (!(pte_val(entry) & _PAGE_PRESENT)) { | ||
212 | pte++; | ||
213 | p += PAGE_SIZE; | ||
214 | continue; | ||
215 | } | ||
216 | |||
217 | phys = pte_val(entry) & PTE_PHYS_MASK; | ||
218 | |||
219 | if ((p ^ phys) & alias_mask) { | ||
220 | d |= 1 << ((p & alias_mask) >> PAGE_SHIFT); | ||
221 | d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT); | ||
222 | |||
223 | if (d == all_aliases_mask) | ||
224 | goto loop_exit; | ||
225 | } | ||
226 | |||
227 | pte++; | ||
228 | p += PAGE_SIZE; | ||
229 | } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); | ||
230 | pmd++; | ||
231 | } while (p < end); | ||
232 | |||
233 | loop_exit: | ||
234 | addr_offset = 0; | ||
235 | select_bit = 1; | ||
236 | |||
237 | for (i = 0; i < n_aliases; i++) { | ||
238 | if (d & select_bit) { | ||
239 | (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE); | ||
240 | wmb(); | ||
241 | } | ||
242 | |||
243 | select_bit <<= 1; | ||
244 | addr_offset += PAGE_SIZE; | ||
245 | } | ||
246 | } | ||
247 | |||
248 | /* | 177 | /* |
249 | * Note : (RPC) since the caches are physically tagged, the only point | 178 | * Note : (RPC) since the caches are physically tagged, the only point |
250 | * of flush_cache_mm for SH-4 is to get rid of aliases from the | 179 | * of flush_cache_mm for SH-4 is to get rid of aliases from the |
251 | * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that | 180 | * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that |
252 | * lines can stay resident so long as the virtual address they were | 181 | * lines can stay resident so long as the virtual address they were |
253 | * accessed with (hence cache set) is in accord with the physical | 182 | * accessed with (hence cache set) is in accord with the physical |
254 | * address (i.e. tag). It's no different here. So I reckon we don't | 183 | * address (i.e. tag). It's no different here. |
255 | * need to flush the I-cache, since aliases don't matter for that. We | ||
256 | * should try that. | ||
257 | * | 184 | * |
258 | * Caller takes mm->mmap_sem. | 185 | * Caller takes mm->mmap_sem. |
259 | */ | 186 | */ |
@@ -264,33 +191,7 @@ static void sh4_flush_cache_mm(void *arg) | |||
264 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | 191 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) |
265 | return; | 192 | return; |
266 | 193 | ||
267 | /* | 194 | flush_dcache_all(); |
268 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | ||
269 | * the cache is physically tagged, the data can just be left in there. | ||
270 | */ | ||
271 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
272 | return; | ||
273 | |||
274 | /* | ||
275 | * Don't bother groveling around the dcache for the VMA ranges | ||
276 | * if there are too many PTEs to make it worthwhile. | ||
277 | */ | ||
278 | if (mm->nr_ptes >= MAX_DCACHE_PAGES) | ||
279 | flush_dcache_all(); | ||
280 | else { | ||
281 | struct vm_area_struct *vma; | ||
282 | |||
283 | /* | ||
284 | * In this case there are reasonably sized ranges to flush, | ||
285 | * iterate through the VMA list and take care of any aliases. | ||
286 | */ | ||
287 | for (vma = mm->mmap; vma; vma = vma->vm_next) | ||
288 | __flush_cache_mm(mm, vma->vm_start, vma->vm_end); | ||
289 | } | ||
290 | |||
291 | /* Only touch the icache if one of the VMAs has VM_EXEC set. */ | ||
292 | if (mm->exec_vm) | ||
293 | flush_icache_all(); | ||
294 | } | 195 | } |
295 | 196 | ||
296 | /* | 197 | /* |
@@ -303,44 +204,62 @@ static void sh4_flush_cache_page(void *args) | |||
303 | { | 204 | { |
304 | struct flusher_data *data = args; | 205 | struct flusher_data *data = args; |
305 | struct vm_area_struct *vma; | 206 | struct vm_area_struct *vma; |
207 | struct page *page; | ||
306 | unsigned long address, pfn, phys; | 208 | unsigned long address, pfn, phys; |
307 | unsigned int alias_mask; | 209 | int map_coherent = 0; |
210 | pgd_t *pgd; | ||
211 | pud_t *pud; | ||
212 | pmd_t *pmd; | ||
213 | pte_t *pte; | ||
214 | void *vaddr; | ||
308 | 215 | ||
309 | vma = data->vma; | 216 | vma = data->vma; |
310 | address = data->addr1; | 217 | address = data->addr1 & PAGE_MASK; |
311 | pfn = data->addr2; | 218 | pfn = data->addr2; |
312 | phys = pfn << PAGE_SHIFT; | 219 | phys = pfn << PAGE_SHIFT; |
220 | page = pfn_to_page(pfn); | ||
313 | 221 | ||
314 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | 222 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) |
315 | return; | 223 | return; |
316 | 224 | ||
317 | alias_mask = boot_cpu_data.dcache.alias_mask; | 225 | pgd = pgd_offset(vma->vm_mm, address); |
318 | 226 | pud = pud_offset(pgd, address); | |
319 | /* We only need to flush D-cache when we have alias */ | 227 | pmd = pmd_offset(pud, address); |
320 | if ((address^phys) & alias_mask) { | 228 | pte = pte_offset_kernel(pmd, address); |
321 | /* Loop 4K of the D-cache */ | 229 | |
322 | flush_cache_one( | 230 | /* If the page isn't present, there is nothing to do here. */ |
323 | CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), | 231 | if (!(pte_val(*pte) & _PAGE_PRESENT)) |
324 | phys); | 232 | return; |
325 | /* Loop another 4K of the D-cache */ | ||
326 | flush_cache_one( | ||
327 | CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), | ||
328 | phys); | ||
329 | } | ||
330 | 233 | ||
331 | alias_mask = boot_cpu_data.icache.alias_mask; | 234 | if ((vma->vm_mm == current->active_mm)) |
332 | if (vma->vm_flags & VM_EXEC) { | 235 | vaddr = NULL; |
236 | else { | ||
333 | /* | 237 | /* |
334 | * Evict entries from the portion of the cache from which code | 238 | * Use kmap_coherent or kmap_atomic to do flushes for |
335 | * may have been executed at this address (virtual). There's | 239 | * another ASID than the current one. |
336 | * no need to evict from the portion corresponding to the | ||
337 | * physical address as for the D-cache, because we know the | ||
338 | * kernel has never executed the code through its identity | ||
339 | * translation. | ||
340 | */ | 240 | */ |
341 | flush_cache_one( | 241 | map_coherent = (current_cpu_data.dcache.n_aliases && |
342 | CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), | 242 | !test_bit(PG_dcache_dirty, &page->flags) && |
343 | phys); | 243 | page_mapped(page)); |
244 | if (map_coherent) | ||
245 | vaddr = kmap_coherent(page, address); | ||
246 | else | ||
247 | vaddr = kmap_atomic(page, KM_USER0); | ||
248 | |||
249 | address = (unsigned long)vaddr; | ||
250 | } | ||
251 | |||
252 | flush_cache_one(CACHE_OC_ADDRESS_ARRAY | | ||
253 | (address & shm_align_mask), phys); | ||
254 | |||
255 | if (vma->vm_flags & VM_EXEC) | ||
256 | flush_icache_all(); | ||
257 | |||
258 | if (vaddr) { | ||
259 | if (map_coherent) | ||
260 | kunmap_coherent(vaddr); | ||
261 | else | ||
262 | kunmap_atomic(vaddr, KM_USER0); | ||
344 | } | 263 | } |
345 | } | 264 | } |
346 | 265 | ||
@@ -373,24 +292,10 @@ static void sh4_flush_cache_range(void *args) | |||
373 | if (boot_cpu_data.dcache.n_aliases == 0) | 292 | if (boot_cpu_data.dcache.n_aliases == 0) |
374 | return; | 293 | return; |
375 | 294 | ||
376 | /* | 295 | flush_dcache_all(); |
377 | * Don't bother with the lookup and alias check if we have a | ||
378 | * wide range to cover, just blow away the dcache in its | ||
379 | * entirety instead. -- PFM. | ||
380 | */ | ||
381 | if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES) | ||
382 | flush_dcache_all(); | ||
383 | else | ||
384 | __flush_cache_mm(vma->vm_mm, start, end); | ||
385 | 296 | ||
386 | if (vma->vm_flags & VM_EXEC) { | 297 | if (vma->vm_flags & VM_EXEC) |
387 | /* | ||
388 | * TODO: Is this required??? Need to look at how I-cache | ||
389 | * coherency is assured when new programs are loaded to see if | ||
390 | * this matters. | ||
391 | */ | ||
392 | flush_icache_all(); | 298 | flush_icache_all(); |
393 | } | ||
394 | } | 299 | } |
395 | 300 | ||
396 | /** | 301 | /** |
@@ -464,245 +369,6 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys, | |||
464 | } while (--way_count != 0); | 369 | } while (--way_count != 0); |
465 | } | 370 | } |
466 | 371 | ||
467 | /* | ||
468 | * Break the 1, 2 and 4 way variants of this out into separate functions to | ||
469 | * avoid nearly all the overhead of having the conditional stuff in the function | ||
470 | * bodies (+ the 1 and 2 way cases avoid saving any registers too). | ||
471 | * | ||
472 | * We want to eliminate unnecessary bus transactions, so this code uses | ||
473 | * a non-obvious technique. | ||
474 | * | ||
475 | * Loop over a cache way sized block of, one cache line at a time. For each | ||
476 | * line, use movca.a to cause the current cache line contents to be written | ||
477 | * back, but without reading anything from main memory. However this has the | ||
478 | * side effect that the cache is now caching that memory location. So follow | ||
479 | * this with a cache invalidate to mark the cache line invalid. And do all | ||
480 | * this with interrupts disabled, to avoid the cache line being accidently | ||
481 | * evicted while it is holding garbage. | ||
482 | * | ||
483 | * This also breaks in a number of circumstances: | ||
484 | * - if there are modifications to the region of memory just above | ||
485 | * empty_zero_page (for example because a breakpoint has been placed | ||
486 | * there), then these can be lost. | ||
487 | * | ||
488 | * This is because the the memory address which the cache temporarily | ||
489 | * caches in the above description is empty_zero_page. So the | ||
490 | * movca.l hits the cache (it is assumed that it misses, or at least | ||
491 | * isn't dirty), modifies the line and then invalidates it, losing the | ||
492 | * required change. | ||
493 | * | ||
494 | * - If caches are disabled or configured in write-through mode, then | ||
495 | * the movca.l writes garbage directly into memory. | ||
496 | */ | ||
497 | static void __flush_dcache_segment_writethrough(unsigned long start, | ||
498 | unsigned long extent_per_way) | ||
499 | { | ||
500 | unsigned long addr; | ||
501 | int i; | ||
502 | |||
503 | addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask); | ||
504 | |||
505 | while (extent_per_way) { | ||
506 | for (i = 0; i < cpu_data->dcache.ways; i++) | ||
507 | __raw_writel(0, addr + cpu_data->dcache.way_incr * i); | ||
508 | |||
509 | addr += cpu_data->dcache.linesz; | ||
510 | extent_per_way -= cpu_data->dcache.linesz; | ||
511 | } | ||
512 | } | ||
513 | |||
514 | static void __flush_dcache_segment_1way(unsigned long start, | ||
515 | unsigned long extent_per_way) | ||
516 | { | ||
517 | unsigned long orig_sr, sr_with_bl; | ||
518 | unsigned long base_addr; | ||
519 | unsigned long way_incr, linesz, way_size; | ||
520 | struct cache_info *dcache; | ||
521 | register unsigned long a0, a0e; | ||
522 | |||
523 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
524 | sr_with_bl = orig_sr | (1<<28); | ||
525 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
526 | |||
527 | /* | ||
528 | * The previous code aligned base_addr to 16k, i.e. the way_size of all | ||
529 | * existing SH-4 D-caches. Whilst I don't see a need to have this | ||
530 | * aligned to any better than the cache line size (which it will be | ||
531 | * anyway by construction), let's align it to at least the way_size of | ||
532 | * any existing or conceivable SH-4 D-cache. -- RPC | ||
533 | */ | ||
534 | base_addr = ((base_addr >> 16) << 16); | ||
535 | base_addr |= start; | ||
536 | |||
537 | dcache = &boot_cpu_data.dcache; | ||
538 | linesz = dcache->linesz; | ||
539 | way_incr = dcache->way_incr; | ||
540 | way_size = dcache->way_size; | ||
541 | |||
542 | a0 = base_addr; | ||
543 | a0e = base_addr + extent_per_way; | ||
544 | do { | ||
545 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
546 | asm volatile("movca.l r0, @%0\n\t" | ||
547 | "ocbi @%0" : : "r" (a0)); | ||
548 | a0 += linesz; | ||
549 | asm volatile("movca.l r0, @%0\n\t" | ||
550 | "ocbi @%0" : : "r" (a0)); | ||
551 | a0 += linesz; | ||
552 | asm volatile("movca.l r0, @%0\n\t" | ||
553 | "ocbi @%0" : : "r" (a0)); | ||
554 | a0 += linesz; | ||
555 | asm volatile("movca.l r0, @%0\n\t" | ||
556 | "ocbi @%0" : : "r" (a0)); | ||
557 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
558 | a0 += linesz; | ||
559 | } while (a0 < a0e); | ||
560 | } | ||
561 | |||
562 | static void __flush_dcache_segment_2way(unsigned long start, | ||
563 | unsigned long extent_per_way) | ||
564 | { | ||
565 | unsigned long orig_sr, sr_with_bl; | ||
566 | unsigned long base_addr; | ||
567 | unsigned long way_incr, linesz, way_size; | ||
568 | struct cache_info *dcache; | ||
569 | register unsigned long a0, a1, a0e; | ||
570 | |||
571 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
572 | sr_with_bl = orig_sr | (1<<28); | ||
573 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
574 | |||
575 | /* See comment under 1-way above */ | ||
576 | base_addr = ((base_addr >> 16) << 16); | ||
577 | base_addr |= start; | ||
578 | |||
579 | dcache = &boot_cpu_data.dcache; | ||
580 | linesz = dcache->linesz; | ||
581 | way_incr = dcache->way_incr; | ||
582 | way_size = dcache->way_size; | ||
583 | |||
584 | a0 = base_addr; | ||
585 | a1 = a0 + way_incr; | ||
586 | a0e = base_addr + extent_per_way; | ||
587 | do { | ||
588 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
589 | asm volatile("movca.l r0, @%0\n\t" | ||
590 | "movca.l r0, @%1\n\t" | ||
591 | "ocbi @%0\n\t" | ||
592 | "ocbi @%1" : : | ||
593 | "r" (a0), "r" (a1)); | ||
594 | a0 += linesz; | ||
595 | a1 += linesz; | ||
596 | asm volatile("movca.l r0, @%0\n\t" | ||
597 | "movca.l r0, @%1\n\t" | ||
598 | "ocbi @%0\n\t" | ||
599 | "ocbi @%1" : : | ||
600 | "r" (a0), "r" (a1)); | ||
601 | a0 += linesz; | ||
602 | a1 += linesz; | ||
603 | asm volatile("movca.l r0, @%0\n\t" | ||
604 | "movca.l r0, @%1\n\t" | ||
605 | "ocbi @%0\n\t" | ||
606 | "ocbi @%1" : : | ||
607 | "r" (a0), "r" (a1)); | ||
608 | a0 += linesz; | ||
609 | a1 += linesz; | ||
610 | asm volatile("movca.l r0, @%0\n\t" | ||
611 | "movca.l r0, @%1\n\t" | ||
612 | "ocbi @%0\n\t" | ||
613 | "ocbi @%1" : : | ||
614 | "r" (a0), "r" (a1)); | ||
615 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
616 | a0 += linesz; | ||
617 | a1 += linesz; | ||
618 | } while (a0 < a0e); | ||
619 | } | ||
620 | |||
621 | static void __flush_dcache_segment_4way(unsigned long start, | ||
622 | unsigned long extent_per_way) | ||
623 | { | ||
624 | unsigned long orig_sr, sr_with_bl; | ||
625 | unsigned long base_addr; | ||
626 | unsigned long way_incr, linesz, way_size; | ||
627 | struct cache_info *dcache; | ||
628 | register unsigned long a0, a1, a2, a3, a0e; | ||
629 | |||
630 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
631 | sr_with_bl = orig_sr | (1<<28); | ||
632 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
633 | |||
634 | /* See comment under 1-way above */ | ||
635 | base_addr = ((base_addr >> 16) << 16); | ||
636 | base_addr |= start; | ||
637 | |||
638 | dcache = &boot_cpu_data.dcache; | ||
639 | linesz = dcache->linesz; | ||
640 | way_incr = dcache->way_incr; | ||
641 | way_size = dcache->way_size; | ||
642 | |||
643 | a0 = base_addr; | ||
644 | a1 = a0 + way_incr; | ||
645 | a2 = a1 + way_incr; | ||
646 | a3 = a2 + way_incr; | ||
647 | a0e = base_addr + extent_per_way; | ||
648 | do { | ||
649 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
650 | asm volatile("movca.l r0, @%0\n\t" | ||
651 | "movca.l r0, @%1\n\t" | ||
652 | "movca.l r0, @%2\n\t" | ||
653 | "movca.l r0, @%3\n\t" | ||
654 | "ocbi @%0\n\t" | ||
655 | "ocbi @%1\n\t" | ||
656 | "ocbi @%2\n\t" | ||
657 | "ocbi @%3\n\t" : : | ||
658 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
659 | a0 += linesz; | ||
660 | a1 += linesz; | ||
661 | a2 += linesz; | ||
662 | a3 += linesz; | ||
663 | asm volatile("movca.l r0, @%0\n\t" | ||
664 | "movca.l r0, @%1\n\t" | ||
665 | "movca.l r0, @%2\n\t" | ||
666 | "movca.l r0, @%3\n\t" | ||
667 | "ocbi @%0\n\t" | ||
668 | "ocbi @%1\n\t" | ||
669 | "ocbi @%2\n\t" | ||
670 | "ocbi @%3\n\t" : : | ||
671 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
672 | a0 += linesz; | ||
673 | a1 += linesz; | ||
674 | a2 += linesz; | ||
675 | a3 += linesz; | ||
676 | asm volatile("movca.l r0, @%0\n\t" | ||
677 | "movca.l r0, @%1\n\t" | ||
678 | "movca.l r0, @%2\n\t" | ||
679 | "movca.l r0, @%3\n\t" | ||
680 | "ocbi @%0\n\t" | ||
681 | "ocbi @%1\n\t" | ||
682 | "ocbi @%2\n\t" | ||
683 | "ocbi @%3\n\t" : : | ||
684 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
685 | a0 += linesz; | ||
686 | a1 += linesz; | ||
687 | a2 += linesz; | ||
688 | a3 += linesz; | ||
689 | asm volatile("movca.l r0, @%0\n\t" | ||
690 | "movca.l r0, @%1\n\t" | ||
691 | "movca.l r0, @%2\n\t" | ||
692 | "movca.l r0, @%3\n\t" | ||
693 | "ocbi @%0\n\t" | ||
694 | "ocbi @%1\n\t" | ||
695 | "ocbi @%2\n\t" | ||
696 | "ocbi @%3\n\t" : : | ||
697 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
698 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
699 | a0 += linesz; | ||
700 | a1 += linesz; | ||
701 | a2 += linesz; | ||
702 | a3 += linesz; | ||
703 | } while (a0 < a0e); | ||
704 | } | ||
705 | |||
706 | extern void __weak sh4__flush_region_init(void); | 372 | extern void __weak sh4__flush_region_init(void); |
707 | 373 | ||
708 | /* | 374 | /* |
@@ -710,31 +376,10 @@ extern void __weak sh4__flush_region_init(void); | |||
710 | */ | 376 | */ |
711 | void __init sh4_cache_init(void) | 377 | void __init sh4_cache_init(void) |
712 | { | 378 | { |
713 | unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); | ||
714 | |||
715 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | 379 | printk("PVR=%08x CVR=%08x PRR=%08x\n", |
716 | ctrl_inl(CCN_PVR), | 380 | __raw_readl(CCN_PVR), |
717 | ctrl_inl(CCN_CVR), | 381 | __raw_readl(CCN_CVR), |
718 | ctrl_inl(CCN_PRR)); | 382 | __raw_readl(CCN_PRR)); |
719 | |||
720 | if (wt_enabled) | ||
721 | __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; | ||
722 | else { | ||
723 | switch (boot_cpu_data.dcache.ways) { | ||
724 | case 1: | ||
725 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
726 | break; | ||
727 | case 2: | ||
728 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
729 | break; | ||
730 | case 4: | ||
731 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
732 | break; | ||
733 | default: | ||
734 | panic("unknown number of cache ways\n"); | ||
735 | break; | ||
736 | } | ||
737 | } | ||
738 | 383 | ||
739 | local_flush_icache_range = sh4_flush_icache_range; | 384 | local_flush_icache_range = sh4_flush_icache_range; |
740 | local_flush_dcache_page = sh4_flush_dcache_page; | 385 | local_flush_dcache_page = sh4_flush_dcache_page; |
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index 467ff8e260f7..eb4cc4ec7952 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -563,7 +563,7 @@ static void sh5_flush_cache_page(void *args) | |||
563 | 563 | ||
564 | static void sh5_flush_dcache_page(void *page) | 564 | static void sh5_flush_dcache_page(void *page) |
565 | { | 565 | { |
566 | sh64_dcache_purge_phy_page(page_to_phys(page)); | 566 | sh64_dcache_purge_phy_page(page_to_phys((struct page *)page)); |
567 | wmb(); | 567 | wmb(); |
568 | } | 568 | } |
569 | 569 | ||
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 2601935eb589..f498da1cce7a 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -48,10 +48,10 @@ static inline void cache_wback_all(void) | |||
48 | unsigned long data; | 48 | unsigned long data; |
49 | int v = SH_CACHE_UPDATED | SH_CACHE_VALID; | 49 | int v = SH_CACHE_UPDATED | SH_CACHE_VALID; |
50 | 50 | ||
51 | data = ctrl_inl(addr); | 51 | data = __raw_readl(addr); |
52 | 52 | ||
53 | if ((data & v) == v) | 53 | if ((data & v) == v) |
54 | ctrl_outl(data & ~v, addr); | 54 | __raw_writel(data & ~v, addr); |
55 | 55 | ||
56 | } | 56 | } |
57 | 57 | ||
@@ -78,7 +78,7 @@ static void sh7705_flush_icache_range(void *args) | |||
78 | /* | 78 | /* |
79 | * Writeback&Invalidate the D-cache of the page | 79 | * Writeback&Invalidate the D-cache of the page |
80 | */ | 80 | */ |
81 | static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) | 81 | static void __flush_dcache_page(unsigned long phys) |
82 | { | 82 | { |
83 | unsigned long ways, waysize, addrstart; | 83 | unsigned long ways, waysize, addrstart; |
84 | unsigned long flags; | 84 | unsigned long flags; |
@@ -115,10 +115,10 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) | |||
115 | addr += current_cpu_data.dcache.linesz) { | 115 | addr += current_cpu_data.dcache.linesz) { |
116 | unsigned long data; | 116 | unsigned long data; |
117 | 117 | ||
118 | data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); | 118 | data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID); |
119 | if (data == phys) { | 119 | if (data == phys) { |
120 | data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); | 120 | data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); |
121 | ctrl_outl(data, addr); | 121 | __raw_writel(data, addr); |
122 | } | 122 | } |
123 | } | 123 | } |
124 | 124 | ||
@@ -141,10 +141,10 @@ static void sh7705_flush_dcache_page(void *arg) | |||
141 | if (mapping && !mapping_mapped(mapping)) | 141 | if (mapping && !mapping_mapped(mapping)) |
142 | set_bit(PG_dcache_dirty, &page->flags); | 142 | set_bit(PG_dcache_dirty, &page->flags); |
143 | else | 143 | else |
144 | __flush_dcache_page(PHYSADDR(page_address(page))); | 144 | __flush_dcache_page(__pa(page_address(page))); |
145 | } | 145 | } |
146 | 146 | ||
147 | static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) | 147 | static void sh7705_flush_cache_all(void *args) |
148 | { | 148 | { |
149 | unsigned long flags; | 149 | unsigned long flags; |
150 | 150 | ||
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index a2dc7f9ecc51..0f4095d7ac8b 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/sh/mm/cache.c | 2 | * arch/sh/mm/cache.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2002 - 2009 Paul Mundt | 5 | * Copyright (C) 2002 - 2010 Paul Mundt |
6 | * | 6 | * |
7 | * Released under the terms of the GNU GPL v2.0. | 7 | * Released under the terms of the GNU GPL v2.0. |
8 | */ | 8 | */ |
@@ -27,8 +27,11 @@ void (*local_flush_icache_page)(void *args) = cache_noop; | |||
27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; | 27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; |
28 | 28 | ||
29 | void (*__flush_wback_region)(void *start, int size); | 29 | void (*__flush_wback_region)(void *start, int size); |
30 | EXPORT_SYMBOL(__flush_wback_region); | ||
30 | void (*__flush_purge_region)(void *start, int size); | 31 | void (*__flush_purge_region)(void *start, int size); |
32 | EXPORT_SYMBOL(__flush_purge_region); | ||
31 | void (*__flush_invalidate_region)(void *start, int size); | 33 | void (*__flush_invalidate_region)(void *start, int size); |
34 | EXPORT_SYMBOL(__flush_invalidate_region); | ||
32 | 35 | ||
33 | static inline void noop__flush_region(void *start, int size) | 36 | static inline void noop__flush_region(void *start, int size) |
34 | { | 37 | { |
@@ -38,8 +41,17 @@ static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info, | |||
38 | int wait) | 41 | int wait) |
39 | { | 42 | { |
40 | preempt_disable(); | 43 | preempt_disable(); |
41 | smp_call_function(func, info, wait); | 44 | |
45 | /* | ||
46 | * It's possible that this gets called early on when IRQs are | ||
47 | * still disabled due to ioremapping by the boot CPU, so don't | ||
48 | * even attempt IPIs unless there are other CPUs online. | ||
49 | */ | ||
50 | if (num_online_cpus() > 1) | ||
51 | smp_call_function(func, info, wait); | ||
52 | |||
42 | func(info); | 53 | func(info); |
54 | |||
43 | preempt_enable(); | 55 | preempt_enable(); |
44 | } | 56 | } |
45 | 57 | ||
@@ -130,12 +142,8 @@ void __update_cache(struct vm_area_struct *vma, | |||
130 | page = pfn_to_page(pfn); | 142 | page = pfn_to_page(pfn); |
131 | if (pfn_valid(pfn)) { | 143 | if (pfn_valid(pfn)) { |
132 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); | 144 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); |
133 | if (dirty) { | 145 | if (dirty) |
134 | unsigned long addr = (unsigned long)page_address(page); | 146 | __flush_purge_region(page_address(page), PAGE_SIZE); |
135 | |||
136 | if (pages_do_alias(addr, address & PAGE_MASK)) | ||
137 | __flush_purge_region((void *)addr, PAGE_SIZE); | ||
138 | } | ||
139 | } | 147 | } |
140 | } | 148 | } |
141 | 149 | ||
@@ -161,14 +169,21 @@ void flush_cache_all(void) | |||
161 | { | 169 | { |
162 | cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); | 170 | cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); |
163 | } | 171 | } |
172 | EXPORT_SYMBOL(flush_cache_all); | ||
164 | 173 | ||
165 | void flush_cache_mm(struct mm_struct *mm) | 174 | void flush_cache_mm(struct mm_struct *mm) |
166 | { | 175 | { |
176 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
177 | return; | ||
178 | |||
167 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); | 179 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); |
168 | } | 180 | } |
169 | 181 | ||
170 | void flush_cache_dup_mm(struct mm_struct *mm) | 182 | void flush_cache_dup_mm(struct mm_struct *mm) |
171 | { | 183 | { |
184 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
185 | return; | ||
186 | |||
172 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); | 187 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); |
173 | } | 188 | } |
174 | 189 | ||
@@ -195,11 +210,13 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
195 | 210 | ||
196 | cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); | 211 | cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); |
197 | } | 212 | } |
213 | EXPORT_SYMBOL(flush_cache_range); | ||
198 | 214 | ||
199 | void flush_dcache_page(struct page *page) | 215 | void flush_dcache_page(struct page *page) |
200 | { | 216 | { |
201 | cacheop_on_each_cpu(local_flush_dcache_page, page, 1); | 217 | cacheop_on_each_cpu(local_flush_dcache_page, page, 1); |
202 | } | 218 | } |
219 | EXPORT_SYMBOL(flush_dcache_page); | ||
203 | 220 | ||
204 | void flush_icache_range(unsigned long start, unsigned long end) | 221 | void flush_icache_range(unsigned long start, unsigned long end) |
205 | { | 222 | { |
@@ -265,7 +282,11 @@ static void __init emit_cache_params(void) | |||
265 | 282 | ||
266 | void __init cpu_cache_init(void) | 283 | void __init cpu_cache_init(void) |
267 | { | 284 | { |
268 | unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | 285 | unsigned int cache_disabled = 0; |
286 | |||
287 | #ifdef CCR | ||
288 | cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | ||
289 | #endif | ||
269 | 290 | ||
270 | compute_alias(&boot_cpu_data.icache); | 291 | compute_alias(&boot_cpu_data.icache); |
271 | compute_alias(&boot_cpu_data.dcache); | 292 | compute_alias(&boot_cpu_data.dcache); |
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index e098ec158ddb..c86a08540258 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -15,11 +15,16 @@ | |||
15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
16 | #include <linux/dma-debug.h> | 16 | #include <linux/dma-debug.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/module.h> | ||
19 | #include <linux/gfp.h> | ||
18 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
19 | #include <asm/addrspace.h> | 21 | #include <asm/addrspace.h> |
20 | 22 | ||
21 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | 23 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 |
22 | 24 | ||
25 | struct dma_map_ops *dma_ops; | ||
26 | EXPORT_SYMBOL(dma_ops); | ||
27 | |||
23 | static int __init dma_init(void) | 28 | static int __init dma_init(void) |
24 | { | 29 | { |
25 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 30 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
@@ -27,15 +32,12 @@ static int __init dma_init(void) | |||
27 | } | 32 | } |
28 | fs_initcall(dma_init); | 33 | fs_initcall(dma_init); |
29 | 34 | ||
30 | void *dma_alloc_coherent(struct device *dev, size_t size, | 35 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
31 | dma_addr_t *dma_handle, gfp_t gfp) | 36 | dma_addr_t *dma_handle, gfp_t gfp) |
32 | { | 37 | { |
33 | void *ret, *ret_nocache; | 38 | void *ret, *ret_nocache; |
34 | int order = get_order(size); | 39 | int order = get_order(size); |
35 | 40 | ||
36 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) | ||
37 | return ret; | ||
38 | |||
39 | ret = (void *)__get_free_pages(gfp, order); | 41 | ret = (void *)__get_free_pages(gfp, order); |
40 | if (!ret) | 42 | if (!ret) |
41 | return NULL; | 43 | return NULL; |
@@ -57,35 +59,26 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
57 | 59 | ||
58 | *dma_handle = virt_to_phys(ret); | 60 | *dma_handle = virt_to_phys(ret); |
59 | 61 | ||
60 | debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache); | ||
61 | |||
62 | return ret_nocache; | 62 | return ret_nocache; |
63 | } | 63 | } |
64 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
65 | 64 | ||
66 | void dma_free_coherent(struct device *dev, size_t size, | 65 | void dma_generic_free_coherent(struct device *dev, size_t size, |
67 | void *vaddr, dma_addr_t dma_handle) | 66 | void *vaddr, dma_addr_t dma_handle) |
68 | { | 67 | { |
69 | int order = get_order(size); | 68 | int order = get_order(size); |
70 | unsigned long pfn = dma_handle >> PAGE_SHIFT; | 69 | unsigned long pfn = dma_handle >> PAGE_SHIFT; |
71 | int k; | 70 | int k; |
72 | 71 | ||
73 | WARN_ON(irqs_disabled()); /* for portability */ | ||
74 | |||
75 | if (dma_release_from_coherent(dev, order, vaddr)) | ||
76 | return; | ||
77 | |||
78 | debug_dma_free_coherent(dev, size, vaddr, dma_handle); | ||
79 | for (k = 0; k < (1 << order); k++) | 72 | for (k = 0; k < (1 << order); k++) |
80 | __free_pages(pfn_to_page(pfn + k), 0); | 73 | __free_pages(pfn_to_page(pfn + k), 0); |
74 | |||
81 | iounmap(vaddr); | 75 | iounmap(vaddr); |
82 | } | 76 | } |
83 | EXPORT_SYMBOL(dma_free_coherent); | ||
84 | 77 | ||
85 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 78 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
86 | enum dma_data_direction direction) | 79 | enum dma_data_direction direction) |
87 | { | 80 | { |
88 | #ifdef CONFIG_CPU_SH5 | 81 | #if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB) |
89 | void *p1addr = vaddr; | 82 | void *p1addr = vaddr; |
90 | #else | 83 | #else |
91 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); | 84 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); |
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 47530104e0ad..8bf79e3b7bdd 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -53,6 +53,9 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |||
53 | if (!pud_present(*pud_k)) | 53 | if (!pud_present(*pud_k)) |
54 | return NULL; | 54 | return NULL; |
55 | 55 | ||
56 | if (!pud_present(*pud)) | ||
57 | set_pud(pud, *pud_k); | ||
58 | |||
56 | pmd = pmd_offset(pud, address); | 59 | pmd = pmd_offset(pud, address); |
57 | pmd_k = pmd_offset(pud_k, address); | 60 | pmd_k = pmd_offset(pud_k, address); |
58 | if (!pmd_present(*pmd_k)) | 61 | if (!pmd_present(*pmd_k)) |
@@ -371,7 +374,7 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | |||
371 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | 374 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); |
372 | #endif | 375 | #endif |
373 | 376 | ||
374 | update_mmu_cache(NULL, address, entry); | 377 | update_mmu_cache(NULL, address, pte); |
375 | 378 | ||
376 | return 0; | 379 | return 0; |
377 | } | 380 | } |
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 9304117039c4..9163db3e8d15 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/hugetlb.h> | 14 | #include <linux/hugetlb.h> |
15 | #include <linux/pagemap.h> | 15 | #include <linux/pagemap.h> |
16 | #include <linux/slab.h> | ||
17 | #include <linux/sysctl.h> | 16 | #include <linux/sysctl.h> |
18 | 17 | ||
19 | #include <asm/mman.h> | 18 | #include <asm/mman.h> |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 8173e38afd38..c505de61a5ca 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -10,35 +10,25 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | 11 | #include <linux/swap.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/gfp.h> | ||
13 | #include <linux/bootmem.h> | 14 | #include <linux/bootmem.h> |
14 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
15 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
16 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
17 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/dma-mapping.h> | ||
18 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
19 | #include <asm/tlb.h> | 21 | #include <asm/tlb.h> |
20 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
21 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
22 | #include <asm/cache.h> | 24 | #include <asm/cache.h> |
25 | #include <asm/sizes.h> | ||
23 | 26 | ||
24 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 27 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
25 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 28 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
26 | 29 | ||
27 | #ifdef CONFIG_SUPERH32 | ||
28 | /* | ||
29 | * Handle trivial transitions between cached and uncached | ||
30 | * segments, making use of the 1:1 mapping relationship in | ||
31 | * 512MB lowmem. | ||
32 | * | ||
33 | * This is the offset of the uncached section from its cached alias. | ||
34 | * Default value only valid in 29 bit mode, in 32bit mode will be | ||
35 | * overridden in pmb_init. | ||
36 | */ | ||
37 | unsigned long cached_to_uncached = P2SEG - P1SEG; | ||
38 | #endif | ||
39 | |||
40 | #ifdef CONFIG_MMU | 30 | #ifdef CONFIG_MMU |
41 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | 31 | static pte_t *__get_pte_phys(unsigned long addr) |
42 | { | 32 | { |
43 | pgd_t *pgd; | 33 | pgd_t *pgd; |
44 | pud_t *pud; | 34 | pud_t *pud; |
@@ -48,22 +38,30 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
48 | pgd = pgd_offset_k(addr); | 38 | pgd = pgd_offset_k(addr); |
49 | if (pgd_none(*pgd)) { | 39 | if (pgd_none(*pgd)) { |
50 | pgd_ERROR(*pgd); | 40 | pgd_ERROR(*pgd); |
51 | return; | 41 | return NULL; |
52 | } | 42 | } |
53 | 43 | ||
54 | pud = pud_alloc(NULL, pgd, addr); | 44 | pud = pud_alloc(NULL, pgd, addr); |
55 | if (unlikely(!pud)) { | 45 | if (unlikely(!pud)) { |
56 | pud_ERROR(*pud); | 46 | pud_ERROR(*pud); |
57 | return; | 47 | return NULL; |
58 | } | 48 | } |
59 | 49 | ||
60 | pmd = pmd_alloc(NULL, pud, addr); | 50 | pmd = pmd_alloc(NULL, pud, addr); |
61 | if (unlikely(!pmd)) { | 51 | if (unlikely(!pmd)) { |
62 | pmd_ERROR(*pmd); | 52 | pmd_ERROR(*pmd); |
63 | return; | 53 | return NULL; |
64 | } | 54 | } |
65 | 55 | ||
66 | pte = pte_offset_kernel(pmd, addr); | 56 | pte = pte_offset_kernel(pmd, addr); |
57 | return pte; | ||
58 | } | ||
59 | |||
60 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | ||
61 | { | ||
62 | pte_t *pte; | ||
63 | |||
64 | pte = __get_pte_phys(addr); | ||
67 | if (!pte_none(*pte)) { | 65 | if (!pte_none(*pte)) { |
68 | pte_ERROR(*pte); | 66 | pte_ERROR(*pte); |
69 | return; | 67 | return; |
@@ -71,23 +69,24 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
71 | 69 | ||
72 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | 70 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); |
73 | local_flush_tlb_one(get_asid(), addr); | 71 | local_flush_tlb_one(get_asid(), addr); |
72 | |||
73 | if (pgprot_val(prot) & _PAGE_WIRED) | ||
74 | tlb_wire_entry(NULL, addr, *pte); | ||
75 | } | ||
76 | |||
77 | static void clear_pte_phys(unsigned long addr, pgprot_t prot) | ||
78 | { | ||
79 | pte_t *pte; | ||
80 | |||
81 | pte = __get_pte_phys(addr); | ||
82 | |||
83 | if (pgprot_val(prot) & _PAGE_WIRED) | ||
84 | tlb_unwire_entry(); | ||
85 | |||
86 | set_pte(pte, pfn_pte(0, __pgprot(0))); | ||
87 | local_flush_tlb_one(get_asid(), addr); | ||
74 | } | 88 | } |
75 | 89 | ||
76 | /* | ||
77 | * As a performance optimization, other platforms preserve the fixmap mapping | ||
78 | * across a context switch, we don't presently do this, but this could be done | ||
79 | * in a similar fashion as to the wired TLB interface that sh64 uses (by way | ||
80 | * of the memory mapped UTLB configuration) -- this unfortunately forces us to | ||
81 | * give up a TLB entry for each mapping we want to preserve. While this may be | ||
82 | * viable for a small number of fixmaps, it's not particularly useful for | ||
83 | * everything and needs to be carefully evaluated. (ie, we may want this for | ||
84 | * the vsyscall page). | ||
85 | * | ||
86 | * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass | ||
87 | * in at __set_fixmap() time to determine the appropriate behavior to follow. | ||
88 | * | ||
89 | * -- PFM. | ||
90 | */ | ||
91 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | 90 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) |
92 | { | 91 | { |
93 | unsigned long address = __fix_to_virt(idx); | 92 | unsigned long address = __fix_to_virt(idx); |
@@ -100,6 +99,18 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |||
100 | set_pte_phys(address, phys, prot); | 99 | set_pte_phys(address, phys, prot); |
101 | } | 100 | } |
102 | 101 | ||
102 | void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) | ||
103 | { | ||
104 | unsigned long address = __fix_to_virt(idx); | ||
105 | |||
106 | if (idx >= __end_of_fixed_addresses) { | ||
107 | BUG(); | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | clear_pte_phys(address, prot); | ||
112 | } | ||
113 | |||
103 | void __init page_table_range_init(unsigned long start, unsigned long end, | 114 | void __init page_table_range_init(unsigned long start, unsigned long end, |
104 | pgd_t *pgd_base) | 115 | pgd_t *pgd_base) |
105 | { | 116 | { |
@@ -119,7 +130,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
119 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | 130 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { |
120 | pud = (pud_t *)pgd; | 131 | pud = (pud_t *)pgd; |
121 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { | 132 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { |
133 | #ifdef __PAGETABLE_PMD_FOLDED | ||
122 | pmd = (pmd_t *)pud; | 134 | pmd = (pmd_t *)pud; |
135 | #else | ||
136 | pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | ||
137 | pud_populate(&init_mm, pud, pmd); | ||
138 | pmd += k; | ||
139 | #endif | ||
123 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { | 140 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
124 | if (pmd_none(*pmd)) { | 141 | if (pmd_none(*pmd)) { |
125 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 142 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
@@ -181,16 +198,25 @@ void __init paging_init(void) | |||
181 | } | 198 | } |
182 | 199 | ||
183 | free_area_init_nodes(max_zone_pfns); | 200 | free_area_init_nodes(max_zone_pfns); |
201 | } | ||
184 | 202 | ||
185 | /* Set up the uncached fixmap */ | 203 | /* |
186 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); | 204 | * Early initialization for any I/O MMUs we might have. |
205 | */ | ||
206 | static void __init iommu_init(void) | ||
207 | { | ||
208 | no_iommu_init(); | ||
187 | } | 209 | } |
188 | 210 | ||
211 | unsigned int mem_init_done = 0; | ||
212 | |||
189 | void __init mem_init(void) | 213 | void __init mem_init(void) |
190 | { | 214 | { |
191 | int codesize, datasize, initsize; | 215 | int codesize, datasize, initsize; |
192 | int nid; | 216 | int nid; |
193 | 217 | ||
218 | iommu_init(); | ||
219 | |||
194 | num_physpages = 0; | 220 | num_physpages = 0; |
195 | high_memory = NULL; | 221 | high_memory = NULL; |
196 | 222 | ||
@@ -220,6 +246,8 @@ void __init mem_init(void) | |||
220 | memset(empty_zero_page, 0, PAGE_SIZE); | 246 | memset(empty_zero_page, 0, PAGE_SIZE); |
221 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | 247 | __flush_wback_region(empty_zero_page, PAGE_SIZE); |
222 | 248 | ||
249 | vsyscall_init(); | ||
250 | |||
223 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 251 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
224 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | 252 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; |
225 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | 253 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
@@ -232,8 +260,48 @@ void __init mem_init(void) | |||
232 | datasize >> 10, | 260 | datasize >> 10, |
233 | initsize >> 10); | 261 | initsize >> 10); |
234 | 262 | ||
235 | /* Initialize the vDSO */ | 263 | printk(KERN_INFO "virtual kernel memory layout:\n" |
236 | vsyscall_init(); | 264 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
265 | #ifdef CONFIG_HIGHMEM | ||
266 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
267 | #endif | ||
268 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
269 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" | ||
270 | #ifdef CONFIG_UNCACHED_MAPPING | ||
271 | " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" | ||
272 | #endif | ||
273 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
274 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
275 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | ||
276 | FIXADDR_START, FIXADDR_TOP, | ||
277 | (FIXADDR_TOP - FIXADDR_START) >> 10, | ||
278 | |||
279 | #ifdef CONFIG_HIGHMEM | ||
280 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | ||
281 | (LAST_PKMAP*PAGE_SIZE) >> 10, | ||
282 | #endif | ||
283 | |||
284 | (unsigned long)VMALLOC_START, VMALLOC_END, | ||
285 | (VMALLOC_END - VMALLOC_START) >> 20, | ||
286 | |||
287 | (unsigned long)memory_start, (unsigned long)high_memory, | ||
288 | ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, | ||
289 | |||
290 | #ifdef CONFIG_UNCACHED_MAPPING | ||
291 | uncached_start, uncached_end, uncached_size >> 20, | ||
292 | #endif | ||
293 | |||
294 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | ||
295 | ((unsigned long)&__init_end - | ||
296 | (unsigned long)&__init_begin) >> 10, | ||
297 | |||
298 | (unsigned long)&_etext, (unsigned long)&_edata, | ||
299 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | ||
300 | |||
301 | (unsigned long)&_text, (unsigned long)&_etext, | ||
302 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | ||
303 | |||
304 | mem_init_done = 1; | ||
237 | } | 305 | } |
238 | 306 | ||
239 | void free_initmem(void) | 307 | void free_initmem(void) |
@@ -266,35 +334,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
266 | } | 334 | } |
267 | #endif | 335 | #endif |
268 | 336 | ||
269 | #if THREAD_SHIFT < PAGE_SHIFT | ||
270 | static struct kmem_cache *thread_info_cache; | ||
271 | |||
272 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | ||
273 | { | ||
274 | struct thread_info *ti; | ||
275 | |||
276 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | ||
277 | if (unlikely(ti == NULL)) | ||
278 | return NULL; | ||
279 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
280 | memset(ti, 0, THREAD_SIZE); | ||
281 | #endif | ||
282 | return ti; | ||
283 | } | ||
284 | |||
285 | void free_thread_info(struct thread_info *ti) | ||
286 | { | ||
287 | kmem_cache_free(thread_info_cache, ti); | ||
288 | } | ||
289 | |||
290 | void thread_info_cache_init(void) | ||
291 | { | ||
292 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | ||
293 | THREAD_SIZE, 0, NULL); | ||
294 | BUG_ON(thread_info_cache == NULL); | ||
295 | } | ||
296 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | ||
297 | |||
298 | #ifdef CONFIG_MEMORY_HOTPLUG | 337 | #ifdef CONFIG_MEMORY_HOTPLUG |
299 | int arch_add_memory(int nid, u64 start, u64 size) | 338 | int arch_add_memory(int nid, u64 start, u64 size) |
300 | { | 339 | { |
@@ -323,4 +362,5 @@ int memory_add_physaddr_to_nid(u64 addr) | |||
323 | } | 362 | } |
324 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | 363 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
325 | #endif | 364 | #endif |
365 | |||
326 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 366 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c new file mode 100644 index 000000000000..0c99ec2e7ed8 --- /dev/null +++ b/arch/sh/mm/ioremap.c | |||
@@ -0,0 +1,137 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/ioremap.c | ||
3 | * | ||
4 | * (C) Copyright 1995 1996 Linus Torvalds | ||
5 | * (C) Copyright 2005 - 2010 Paul Mundt | ||
6 | * | ||
7 | * Re-map IO memory to kernel address space so that we can access it. | ||
8 | * This is needed for high PCI addresses that aren't mapped in the | ||
9 | * 640k-1MB IO memory area on PC's | ||
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General | ||
12 | * Public License. See the file "COPYING" in the main directory of this | ||
13 | * archive for more details. | ||
14 | */ | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/pci.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/pgalloc.h> | ||
23 | #include <asm/addrspace.h> | ||
24 | #include <asm/cacheflush.h> | ||
25 | #include <asm/tlbflush.h> | ||
26 | #include <asm/mmu.h> | ||
27 | |||
28 | /* | ||
29 | * Remap an arbitrary physical address space into the kernel virtual | ||
30 | * address space. Needed when the kernel wants to access high addresses | ||
31 | * directly. | ||
32 | * | ||
33 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
34 | * have to convert them into an offset in a page-aligned mapping, but the | ||
35 | * caller shouldn't need to know that small detail. | ||
36 | */ | ||
37 | void __iomem * __init_refok | ||
38 | __ioremap_caller(phys_addr_t phys_addr, unsigned long size, | ||
39 | pgprot_t pgprot, void *caller) | ||
40 | { | ||
41 | struct vm_struct *area; | ||
42 | unsigned long offset, last_addr, addr, orig_addr; | ||
43 | void __iomem *mapped; | ||
44 | |||
45 | /* Don't allow wraparound or zero size */ | ||
46 | last_addr = phys_addr + size - 1; | ||
47 | if (!size || last_addr < phys_addr) | ||
48 | return NULL; | ||
49 | |||
50 | /* | ||
51 | * If we can't yet use the regular approach, go the fixmap route. | ||
52 | */ | ||
53 | if (!mem_init_done) | ||
54 | return ioremap_fixed(phys_addr, size, pgprot); | ||
55 | |||
56 | /* | ||
57 | * First try to remap through the PMB. | ||
58 | * PMB entries are all pre-faulted. | ||
59 | */ | ||
60 | mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); | ||
61 | if (mapped && !IS_ERR(mapped)) | ||
62 | return mapped; | ||
63 | |||
64 | /* | ||
65 | * Mappings have to be page-aligned | ||
66 | */ | ||
67 | offset = phys_addr & ~PAGE_MASK; | ||
68 | phys_addr &= PAGE_MASK; | ||
69 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
70 | |||
71 | /* | ||
72 | * Ok, go for it.. | ||
73 | */ | ||
74 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | ||
75 | if (!area) | ||
76 | return NULL; | ||
77 | area->phys_addr = phys_addr; | ||
78 | orig_addr = addr = (unsigned long)area->addr; | ||
79 | |||
80 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { | ||
81 | vunmap((void *)orig_addr); | ||
82 | return NULL; | ||
83 | } | ||
84 | |||
85 | return (void __iomem *)(offset + (char *)orig_addr); | ||
86 | } | ||
87 | EXPORT_SYMBOL(__ioremap_caller); | ||
88 | |||
89 | /* | ||
90 | * Simple checks for non-translatable mappings. | ||
91 | */ | ||
92 | static inline int iomapping_nontranslatable(unsigned long offset) | ||
93 | { | ||
94 | #ifdef CONFIG_29BIT | ||
95 | /* | ||
96 | * In 29-bit mode this includes the fixed P1/P2 areas, as well as | ||
97 | * parts of P3. | ||
98 | */ | ||
99 | if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) | ||
100 | return 1; | ||
101 | #endif | ||
102 | |||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | void __iounmap(void __iomem *addr) | ||
107 | { | ||
108 | unsigned long vaddr = (unsigned long __force)addr; | ||
109 | struct vm_struct *p; | ||
110 | |||
111 | /* | ||
112 | * Nothing to do if there is no translatable mapping. | ||
113 | */ | ||
114 | if (iomapping_nontranslatable(vaddr)) | ||
115 | return; | ||
116 | |||
117 | /* | ||
118 | * There's no VMA if it's from an early fixed mapping. | ||
119 | */ | ||
120 | if (iounmap_fixed(addr) == 0) | ||
121 | return; | ||
122 | |||
123 | /* | ||
124 | * If the PMB handled it, there's nothing else to do. | ||
125 | */ | ||
126 | if (pmb_unmap(addr) == 0) | ||
127 | return; | ||
128 | |||
129 | p = remove_vm_area((void *)(vaddr & PAGE_MASK)); | ||
130 | if (!p) { | ||
131 | printk(KERN_ERR "%s: bad address %p\n", __func__, addr); | ||
132 | return; | ||
133 | } | ||
134 | |||
135 | kfree(p); | ||
136 | } | ||
137 | EXPORT_SYMBOL(__iounmap); | ||
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c deleted file mode 100644 index a86eaa9d75a5..000000000000 --- a/arch/sh/mm/ioremap_32.c +++ /dev/null | |||
@@ -1,145 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/ioremap.c | ||
3 | * | ||
4 | * Re-map IO memory to kernel address space so that we can access it. | ||
5 | * This is needed for high PCI addresses that aren't mapped in the | ||
6 | * 640k-1MB IO memory area on PC's | ||
7 | * | ||
8 | * (C) Copyright 1995 1996 Linus Torvalds | ||
9 | * (C) Copyright 2005, 2006 Paul Mundt | ||
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General | ||
12 | * Public License. See the file "COPYING" in the main directory of this | ||
13 | * archive for more details. | ||
14 | */ | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/addrspace.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | #include <asm/mmu.h> | ||
26 | |||
27 | /* | ||
28 | * Remap an arbitrary physical address space into the kernel virtual | ||
29 | * address space. Needed when the kernel wants to access high addresses | ||
30 | * directly. | ||
31 | * | ||
32 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
33 | * have to convert them into an offset in a page-aligned mapping, but the | ||
34 | * caller shouldn't need to know that small detail. | ||
35 | */ | ||
36 | void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | ||
37 | unsigned long flags) | ||
38 | { | ||
39 | struct vm_struct * area; | ||
40 | unsigned long offset, last_addr, addr, orig_addr; | ||
41 | pgprot_t pgprot; | ||
42 | |||
43 | /* Don't allow wraparound or zero size */ | ||
44 | last_addr = phys_addr + size - 1; | ||
45 | if (!size || last_addr < phys_addr) | ||
46 | return NULL; | ||
47 | |||
48 | /* | ||
49 | * If we're in the fixed PCI memory range, mapping through page | ||
50 | * tables is not only pointless, but also fundamentally broken. | ||
51 | * Just return the physical address instead. | ||
52 | * | ||
53 | * For boards that map a small PCI memory aperture somewhere in | ||
54 | * P1/P2 space, ioremap() will already do the right thing, | ||
55 | * and we'll never get this far. | ||
56 | */ | ||
57 | if (is_pci_memory_fixed_range(phys_addr, size)) | ||
58 | return (void __iomem *)phys_addr; | ||
59 | |||
60 | /* | ||
61 | * Mappings have to be page-aligned | ||
62 | */ | ||
63 | offset = phys_addr & ~PAGE_MASK; | ||
64 | phys_addr &= PAGE_MASK; | ||
65 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
66 | |||
67 | /* | ||
68 | * Ok, go for it.. | ||
69 | */ | ||
70 | area = get_vm_area(size, VM_IOREMAP); | ||
71 | if (!area) | ||
72 | return NULL; | ||
73 | area->phys_addr = phys_addr; | ||
74 | orig_addr = addr = (unsigned long)area->addr; | ||
75 | |||
76 | #ifdef CONFIG_PMB | ||
77 | /* | ||
78 | * First try to remap through the PMB once a valid VMA has been | ||
79 | * established. Smaller allocations (or the rest of the size | ||
80 | * remaining after a PMB mapping due to the size not being | ||
81 | * perfectly aligned on a PMB size boundary) are then mapped | ||
82 | * through the UTLB using conventional page tables. | ||
83 | * | ||
84 | * PMB entries are all pre-faulted. | ||
85 | */ | ||
86 | if (unlikely(phys_addr >= P1SEG)) { | ||
87 | unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); | ||
88 | |||
89 | if (likely(mapped)) { | ||
90 | addr += mapped; | ||
91 | phys_addr += mapped; | ||
92 | size -= mapped; | ||
93 | } | ||
94 | } | ||
95 | #endif | ||
96 | |||
97 | pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); | ||
98 | if (likely(size)) | ||
99 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { | ||
100 | vunmap((void *)orig_addr); | ||
101 | return NULL; | ||
102 | } | ||
103 | |||
104 | return (void __iomem *)(offset + (char *)orig_addr); | ||
105 | } | ||
106 | EXPORT_SYMBOL(__ioremap); | ||
107 | |||
108 | void __iounmap(void __iomem *addr) | ||
109 | { | ||
110 | unsigned long vaddr = (unsigned long __force)addr; | ||
111 | unsigned long seg = PXSEG(vaddr); | ||
112 | struct vm_struct *p; | ||
113 | |||
114 | if (seg < P3SEG || vaddr >= P3_ADDR_MAX) | ||
115 | return; | ||
116 | if (is_pci_memory_fixed_range(vaddr, 0)) | ||
117 | return; | ||
118 | |||
119 | #ifdef CONFIG_PMB | ||
120 | /* | ||
121 | * Purge any PMB entries that may have been established for this | ||
122 | * mapping, then proceed with conventional VMA teardown. | ||
123 | * | ||
124 | * XXX: Note that due to the way that remove_vm_area() does | ||
125 | * matching of the resultant VMA, we aren't able to fast-forward | ||
126 | * the address past the PMB space until the end of the VMA where | ||
127 | * the page tables reside. As such, unmap_vm_area() will be | ||
128 | * forced to linearly scan over the area until it finds the page | ||
129 | * tables where PTEs that need to be unmapped actually reside, | ||
130 | * which is far from optimal. Perhaps we need to use a separate | ||
131 | * VMA for the PMB mappings? | ||
132 | * -- PFM. | ||
133 | */ | ||
134 | pmb_unmap(vaddr); | ||
135 | #endif | ||
136 | |||
137 | p = remove_vm_area((void *)(vaddr & PAGE_MASK)); | ||
138 | if (!p) { | ||
139 | printk(KERN_ERR "%s: bad address %p\n", __func__, addr); | ||
140 | return; | ||
141 | } | ||
142 | |||
143 | kfree(p); | ||
144 | } | ||
145 | EXPORT_SYMBOL(__iounmap); | ||
diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c deleted file mode 100644 index b16843d02b76..000000000000 --- a/arch/sh/mm/ioremap_64.c +++ /dev/null | |||
@@ -1,326 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/ioremap_64.c | ||
3 | * | ||
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
5 | * Copyright (C) 2003 - 2007 Paul Mundt | ||
6 | * | ||
7 | * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly | ||
8 | * derived from arch/i386/mm/ioremap.c . | ||
9 | * | ||
10 | * (C) Copyright 1995 1996 Linus Torvalds | ||
11 | * | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | */ | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/ioport.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | #include <linux/proc_fs.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <asm/page.h> | ||
25 | #include <asm/pgalloc.h> | ||
26 | #include <asm/addrspace.h> | ||
27 | #include <asm/cacheflush.h> | ||
28 | #include <asm/tlbflush.h> | ||
29 | #include <asm/mmu.h> | ||
30 | |||
31 | static struct resource shmedia_iomap = { | ||
32 | .name = "shmedia_iomap", | ||
33 | .start = IOBASE_VADDR + PAGE_SIZE, | ||
34 | .end = IOBASE_END - 1, | ||
35 | }; | ||
36 | |||
37 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va, | ||
38 | unsigned long flags); | ||
39 | static void shmedia_unmapioaddr(unsigned long vaddr); | ||
40 | static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, | ||
41 | int sz, unsigned long flags); | ||
42 | |||
43 | /* | ||
44 | * We have the same problem as the SPARC, so lets have the same comment: | ||
45 | * Our mini-allocator... | ||
46 | * Boy this is gross! We need it because we must map I/O for | ||
47 | * timers and interrupt controller before the kmalloc is available. | ||
48 | */ | ||
49 | |||
50 | #define XNMLN 15 | ||
51 | #define XNRES 10 | ||
52 | |||
53 | struct xresource { | ||
54 | struct resource xres; /* Must be first */ | ||
55 | int xflag; /* 1 == used */ | ||
56 | char xname[XNMLN+1]; | ||
57 | }; | ||
58 | |||
59 | static struct xresource xresv[XNRES]; | ||
60 | |||
61 | static struct xresource *xres_alloc(void) | ||
62 | { | ||
63 | struct xresource *xrp; | ||
64 | int n; | ||
65 | |||
66 | xrp = xresv; | ||
67 | for (n = 0; n < XNRES; n++) { | ||
68 | if (xrp->xflag == 0) { | ||
69 | xrp->xflag = 1; | ||
70 | return xrp; | ||
71 | } | ||
72 | xrp++; | ||
73 | } | ||
74 | return NULL; | ||
75 | } | ||
76 | |||
77 | static void xres_free(struct xresource *xrp) | ||
78 | { | ||
79 | xrp->xflag = 0; | ||
80 | } | ||
81 | |||
82 | static struct resource *shmedia_find_resource(struct resource *root, | ||
83 | unsigned long vaddr) | ||
84 | { | ||
85 | struct resource *res; | ||
86 | |||
87 | for (res = root->child; res; res = res->sibling) | ||
88 | if (res->start <= vaddr && res->end >= vaddr) | ||
89 | return res; | ||
90 | |||
91 | return NULL; | ||
92 | } | ||
93 | |||
94 | static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size, | ||
95 | const char *name, unsigned long flags) | ||
96 | { | ||
97 | struct xresource *xres; | ||
98 | struct resource *res; | ||
99 | char *tack; | ||
100 | int tlen; | ||
101 | |||
102 | if (name == NULL) | ||
103 | name = "???"; | ||
104 | |||
105 | xres = xres_alloc(); | ||
106 | if (xres != 0) { | ||
107 | tack = xres->xname; | ||
108 | res = &xres->xres; | ||
109 | } else { | ||
110 | printk_once(KERN_NOTICE "%s: done with statics, " | ||
111 | "switching to kmalloc\n", __func__); | ||
112 | tlen = strlen(name); | ||
113 | tack = kmalloc(sizeof(struct resource) + tlen + 1, GFP_KERNEL); | ||
114 | if (!tack) | ||
115 | return NULL; | ||
116 | memset(tack, 0, sizeof(struct resource)); | ||
117 | res = (struct resource *) tack; | ||
118 | tack += sizeof(struct resource); | ||
119 | } | ||
120 | |||
121 | strncpy(tack, name, XNMLN); | ||
122 | tack[XNMLN] = 0; | ||
123 | res->name = tack; | ||
124 | |||
125 | return shmedia_ioremap(res, phys, size, flags); | ||
126 | } | ||
127 | |||
128 | static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, int sz, | ||
129 | unsigned long flags) | ||
130 | { | ||
131 | unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); | ||
132 | unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK; | ||
133 | unsigned long va; | ||
134 | unsigned int psz; | ||
135 | |||
136 | if (allocate_resource(&shmedia_iomap, res, round_sz, | ||
137 | shmedia_iomap.start, shmedia_iomap.end, | ||
138 | PAGE_SIZE, NULL, NULL) != 0) { | ||
139 | panic("alloc_io_res(%s): cannot occupy\n", | ||
140 | (res->name != NULL) ? res->name : "???"); | ||
141 | } | ||
142 | |||
143 | va = res->start; | ||
144 | pa &= PAGE_MASK; | ||
145 | |||
146 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | ||
147 | |||
148 | for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { | ||
149 | shmedia_mapioaddr(pa, va, flags); | ||
150 | va += PAGE_SIZE; | ||
151 | pa += PAGE_SIZE; | ||
152 | } | ||
153 | |||
154 | return (void __iomem *)(unsigned long)(res->start + offset); | ||
155 | } | ||
156 | |||
157 | static void shmedia_free_io(struct resource *res) | ||
158 | { | ||
159 | unsigned long len = res->end - res->start + 1; | ||
160 | |||
161 | BUG_ON((len & (PAGE_SIZE - 1)) != 0); | ||
162 | |||
163 | while (len) { | ||
164 | len -= PAGE_SIZE; | ||
165 | shmedia_unmapioaddr(res->start + len); | ||
166 | } | ||
167 | |||
168 | release_resource(res); | ||
169 | } | ||
170 | |||
171 | static __init_refok void *sh64_get_page(void) | ||
172 | { | ||
173 | void *page; | ||
174 | |||
175 | if (slab_is_available()) | ||
176 | page = (void *)get_zeroed_page(GFP_KERNEL); | ||
177 | else | ||
178 | page = alloc_bootmem_pages(PAGE_SIZE); | ||
179 | |||
180 | if (!page || ((unsigned long)page & ~PAGE_MASK)) | ||
181 | panic("sh64_get_page: Out of memory already?\n"); | ||
182 | |||
183 | return page; | ||
184 | } | ||
185 | |||
186 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va, | ||
187 | unsigned long flags) | ||
188 | { | ||
189 | pgd_t *pgdp; | ||
190 | pud_t *pudp; | ||
191 | pmd_t *pmdp; | ||
192 | pte_t *ptep, pte; | ||
193 | pgprot_t prot; | ||
194 | |||
195 | pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); | ||
196 | |||
197 | if (!flags) | ||
198 | flags = 1; /* 1 = CB0-1 device */ | ||
199 | |||
200 | pgdp = pgd_offset_k(va); | ||
201 | if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { | ||
202 | pudp = (pud_t *)sh64_get_page(); | ||
203 | set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE)); | ||
204 | } | ||
205 | |||
206 | pudp = pud_offset(pgdp, va); | ||
207 | if (pud_none(*pudp) || !pud_present(*pudp)) { | ||
208 | pmdp = (pmd_t *)sh64_get_page(); | ||
209 | set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE)); | ||
210 | } | ||
211 | |||
212 | pmdp = pmd_offset(pudp, va); | ||
213 | if (pmd_none(*pmdp) || !pmd_present(*pmdp)) { | ||
214 | ptep = (pte_t *)sh64_get_page(); | ||
215 | set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); | ||
216 | } | ||
217 | |||
218 | prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | | ||
219 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); | ||
220 | |||
221 | pte = pfn_pte(pa >> PAGE_SHIFT, prot); | ||
222 | ptep = pte_offset_kernel(pmdp, va); | ||
223 | |||
224 | if (!pte_none(*ptep) && | ||
225 | pte_val(*ptep) != pte_val(pte)) | ||
226 | pte_ERROR(*ptep); | ||
227 | |||
228 | set_pte(ptep, pte); | ||
229 | |||
230 | flush_tlb_kernel_range(va, PAGE_SIZE); | ||
231 | } | ||
232 | |||
233 | static void shmedia_unmapioaddr(unsigned long vaddr) | ||
234 | { | ||
235 | pgd_t *pgdp; | ||
236 | pud_t *pudp; | ||
237 | pmd_t *pmdp; | ||
238 | pte_t *ptep; | ||
239 | |||
240 | pgdp = pgd_offset_k(vaddr); | ||
241 | if (pgd_none(*pgdp) || pgd_bad(*pgdp)) | ||
242 | return; | ||
243 | |||
244 | pudp = pud_offset(pgdp, vaddr); | ||
245 | if (pud_none(*pudp) || pud_bad(*pudp)) | ||
246 | return; | ||
247 | |||
248 | pmdp = pmd_offset(pudp, vaddr); | ||
249 | if (pmd_none(*pmdp) || pmd_bad(*pmdp)) | ||
250 | return; | ||
251 | |||
252 | ptep = pte_offset_kernel(pmdp, vaddr); | ||
253 | |||
254 | if (pte_none(*ptep) || !pte_present(*ptep)) | ||
255 | return; | ||
256 | |||
257 | clear_page((void *)ptep); | ||
258 | pte_clear(&init_mm, vaddr, ptep); | ||
259 | } | ||
260 | |||
261 | void __iomem *__ioremap(unsigned long offset, unsigned long size, | ||
262 | unsigned long flags) | ||
263 | { | ||
264 | char name[14]; | ||
265 | |||
266 | sprintf(name, "phys_%08x", (u32)offset); | ||
267 | return shmedia_alloc_io(offset, size, name, flags); | ||
268 | } | ||
269 | EXPORT_SYMBOL(__ioremap); | ||
270 | |||
271 | void __iounmap(void __iomem *virtual) | ||
272 | { | ||
273 | unsigned long vaddr = (unsigned long)virtual & PAGE_MASK; | ||
274 | struct resource *res; | ||
275 | unsigned int psz; | ||
276 | |||
277 | res = shmedia_find_resource(&shmedia_iomap, vaddr); | ||
278 | if (!res) { | ||
279 | printk(KERN_ERR "%s: Failed to free 0x%08lx\n", | ||
280 | __func__, vaddr); | ||
281 | return; | ||
282 | } | ||
283 | |||
284 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | ||
285 | |||
286 | shmedia_free_io(res); | ||
287 | |||
288 | if ((char *)res >= (char *)xresv && | ||
289 | (char *)res < (char *)&xresv[XNRES]) { | ||
290 | xres_free((struct xresource *)res); | ||
291 | } else { | ||
292 | kfree(res); | ||
293 | } | ||
294 | } | ||
295 | EXPORT_SYMBOL(__iounmap); | ||
296 | |||
297 | static int | ||
298 | ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, | ||
299 | void *data) | ||
300 | { | ||
301 | char *p = buf, *e = buf + length; | ||
302 | struct resource *r; | ||
303 | const char *nm; | ||
304 | |||
305 | for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { | ||
306 | if (p + 32 >= e) /* Better than nothing */ | ||
307 | break; | ||
308 | nm = r->name; | ||
309 | if (nm == NULL) | ||
310 | nm = "???"; | ||
311 | |||
312 | p += sprintf(p, "%08lx-%08lx: %s\n", | ||
313 | (unsigned long)r->start, | ||
314 | (unsigned long)r->end, nm); | ||
315 | } | ||
316 | |||
317 | return p-buf; | ||
318 | } | ||
319 | |||
320 | static int __init register_proc_onchip(void) | ||
321 | { | ||
322 | create_proc_read_entry("io_map", 0, 0, ioremap_proc_info, | ||
323 | &shmedia_iomap); | ||
324 | return 0; | ||
325 | } | ||
326 | late_initcall(register_proc_onchip); | ||
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c new file mode 100644 index 000000000000..efbe84af9983 --- /dev/null +++ b/arch/sh/mm/ioremap_fixed.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * Re-map IO memory to kernel address space so that we can access it. | ||
3 | * | ||
4 | * These functions should only be used when it is necessary to map a | ||
5 | * physical address space into the kernel address space before ioremap() | ||
6 | * can be used, e.g. early in boot before paging_init(). | ||
7 | * | ||
8 | * Copyright (C) 2009 Matt Fleming | ||
9 | */ | ||
10 | |||
11 | #include <linux/vmalloc.h> | ||
12 | #include <linux/ioport.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/bootmem.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | #include <asm/fixmap.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgalloc.h> | ||
21 | #include <asm/addrspace.h> | ||
22 | #include <asm/cacheflush.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | #include <asm/mmu.h> | ||
25 | #include <asm/mmu_context.h> | ||
26 | |||
27 | struct ioremap_map { | ||
28 | void __iomem *addr; | ||
29 | unsigned long size; | ||
30 | unsigned long fixmap_addr; | ||
31 | }; | ||
32 | |||
33 | static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS]; | ||
34 | |||
35 | void __init ioremap_fixed_init(void) | ||
36 | { | ||
37 | struct ioremap_map *map; | ||
38 | int i; | ||
39 | |||
40 | for (i = 0; i < FIX_N_IOREMAPS; i++) { | ||
41 | map = &ioremap_maps[i]; | ||
42 | map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i); | ||
43 | } | ||
44 | } | ||
45 | |||
46 | void __init __iomem * | ||
47 | ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) | ||
48 | { | ||
49 | enum fixed_addresses idx0, idx; | ||
50 | struct ioremap_map *map; | ||
51 | unsigned int nrpages; | ||
52 | unsigned long offset; | ||
53 | int i, slot; | ||
54 | |||
55 | /* | ||
56 | * Mappings have to be page-aligned | ||
57 | */ | ||
58 | offset = phys_addr & ~PAGE_MASK; | ||
59 | phys_addr &= PAGE_MASK; | ||
60 | size = PAGE_ALIGN(phys_addr + size) - phys_addr; | ||
61 | |||
62 | slot = -1; | ||
63 | for (i = 0; i < FIX_N_IOREMAPS; i++) { | ||
64 | map = &ioremap_maps[i]; | ||
65 | if (!map->addr) { | ||
66 | map->size = size; | ||
67 | slot = i; | ||
68 | break; | ||
69 | } | ||
70 | } | ||
71 | |||
72 | if (slot < 0) | ||
73 | return NULL; | ||
74 | |||
75 | /* | ||
76 | * Mappings have to fit in the FIX_IOREMAP area. | ||
77 | */ | ||
78 | nrpages = size >> PAGE_SHIFT; | ||
79 | if (nrpages > FIX_N_IOREMAPS) | ||
80 | return NULL; | ||
81 | |||
82 | /* | ||
83 | * Ok, go for it.. | ||
84 | */ | ||
85 | idx0 = FIX_IOREMAP_BEGIN + slot; | ||
86 | idx = idx0; | ||
87 | while (nrpages > 0) { | ||
88 | pgprot_val(prot) |= _PAGE_WIRED; | ||
89 | __set_fixmap(idx, phys_addr, prot); | ||
90 | phys_addr += PAGE_SIZE; | ||
91 | idx++; | ||
92 | --nrpages; | ||
93 | } | ||
94 | |||
95 | map->addr = (void __iomem *)(offset + map->fixmap_addr); | ||
96 | return map->addr; | ||
97 | } | ||
98 | |||
99 | int iounmap_fixed(void __iomem *addr) | ||
100 | { | ||
101 | enum fixed_addresses idx; | ||
102 | struct ioremap_map *map; | ||
103 | unsigned int nrpages; | ||
104 | int i, slot; | ||
105 | |||
106 | slot = -1; | ||
107 | for (i = 0; i < FIX_N_IOREMAPS; i++) { | ||
108 | map = &ioremap_maps[i]; | ||
109 | if (map->addr == addr) { | ||
110 | slot = i; | ||
111 | break; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * If we don't match, it's not for us. | ||
117 | */ | ||
118 | if (slot < 0) | ||
119 | return -EINVAL; | ||
120 | |||
121 | nrpages = map->size >> PAGE_SHIFT; | ||
122 | |||
123 | idx = FIX_IOREMAP_BEGIN + slot + nrpages - 1; | ||
124 | while (nrpages > 0) { | ||
125 | __clear_fixmap(idx, __pgprot(_PAGE_WIRED)); | ||
126 | --idx; | ||
127 | --nrpages; | ||
128 | } | ||
129 | |||
130 | map->size = 0; | ||
131 | map->addr = NULL; | ||
132 | |||
133 | return 0; | ||
134 | } | ||
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c index 16e01b5fed04..15d74ea42094 100644 --- a/arch/sh/mm/kmap.c +++ b/arch/sh/mm/kmap.c | |||
@@ -39,7 +39,9 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
39 | pagefault_disable(); | 39 | pagefault_disable(); |
40 | 40 | ||
41 | idx = FIX_CMAP_END - | 41 | idx = FIX_CMAP_END - |
42 | ((addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT); | 42 | (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) + |
43 | (FIX_N_COLOURS * smp_processor_id())); | ||
44 | |||
43 | vaddr = __fix_to_virt(idx); | 45 | vaddr = __fix_to_virt(idx); |
44 | 46 | ||
45 | BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); | 47 | BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); |
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index d2984fa42d3d..afeb710ec5c3 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c | |||
@@ -54,7 +54,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
54 | /* We do not accept a shared mapping if it would violate | 54 | /* We do not accept a shared mapping if it would violate |
55 | * cache aliasing constraints. | 55 | * cache aliasing constraints. |
56 | */ | 56 | */ |
57 | if ((flags & MAP_SHARED) && (addr & shm_align_mask)) | 57 | if ((flags & MAP_SHARED) && |
58 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | ||
58 | return -EINVAL; | 59 | return -EINVAL; |
59 | return addr; | 60 | return addr; |
60 | } | 61 | } |
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c index ac16c05917ef..7694f50c9034 100644 --- a/arch/sh/mm/nommu.c +++ b/arch/sh/mm/nommu.c | |||
@@ -94,3 +94,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
94 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | 94 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) |
95 | { | 95 | { |
96 | } | 96 | } |
97 | |||
98 | void pgtable_cache_init(void) | ||
99 | { | ||
100 | } | ||
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 9b784fdb947c..961b34085e3b 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c | |||
@@ -28,7 +28,7 @@ void __init setup_memory(void) | |||
28 | { | 28 | { |
29 | unsigned long free_pfn = PFN_UP(__pa(_end)); | 29 | unsigned long free_pfn = PFN_UP(__pa(_end)); |
30 | u64 base = min_low_pfn << PAGE_SHIFT; | 30 | u64 base = min_low_pfn << PAGE_SHIFT; |
31 | u64 size = (max_low_pfn << PAGE_SHIFT) - min_low_pfn; | 31 | u64 size = (max_low_pfn << PAGE_SHIFT) - base; |
32 | 32 | ||
33 | lmb_add(base, size); | 33 | lmb_add(base, size); |
34 | 34 | ||
@@ -38,6 +38,15 @@ void __init setup_memory(void) | |||
38 | (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); | 38 | (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. | ||
42 | */ | ||
43 | if (CONFIG_ZERO_PAGE_OFFSET != 0) | ||
44 | lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); | ||
45 | |||
46 | lmb_analyze(); | ||
47 | lmb_dump_all(); | ||
48 | |||
49 | /* | ||
41 | * Node 0 sets up its pgdat at the first available pfn, | 50 | * Node 0 sets up its pgdat at the first available pfn, |
42 | * and bumps it up before setting up the bootmem allocator. | 51 | * and bumps it up before setting up the bootmem allocator. |
43 | */ | 52 | */ |
@@ -60,18 +69,21 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | |||
60 | unsigned long bootmem_paddr; | 69 | unsigned long bootmem_paddr; |
61 | 70 | ||
62 | /* Don't allow bogus node assignment */ | 71 | /* Don't allow bogus node assignment */ |
63 | BUG_ON(nid > MAX_NUMNODES || nid == 0); | 72 | BUG_ON(nid > MAX_NUMNODES || nid <= 0); |
64 | 73 | ||
65 | start_pfn = start >> PAGE_SHIFT; | 74 | start_pfn = start >> PAGE_SHIFT; |
66 | end_pfn = end >> PAGE_SHIFT; | 75 | end_pfn = end >> PAGE_SHIFT; |
67 | 76 | ||
77 | pmb_bolt_mapping((unsigned long)__va(start), start, end - start, | ||
78 | PAGE_KERNEL); | ||
79 | |||
68 | lmb_add(start, end - start); | 80 | lmb_add(start, end - start); |
69 | 81 | ||
70 | __add_active_range(nid, start_pfn, end_pfn); | 82 | __add_active_range(nid, start_pfn, end_pfn); |
71 | 83 | ||
72 | /* Node-local pgdat */ | 84 | /* Node-local pgdat */ |
73 | NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), | 85 | NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), |
74 | SMP_CACHE_BYTES, end_pfn)); | 86 | SMP_CACHE_BYTES, end)); |
75 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | 87 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
76 | 88 | ||
77 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | 89 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; |
@@ -81,7 +93,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | |||
81 | /* Node-local bootmap */ | 93 | /* Node-local bootmap */ |
82 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); | 94 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
83 | bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, | 95 | bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, |
84 | PAGE_SIZE, end_pfn); | 96 | PAGE_SIZE, end); |
85 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, | 97 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, |
86 | start_pfn, end_pfn); | 98 | start_pfn, end_pfn); |
87 | 99 | ||
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c new file mode 100644 index 000000000000..26e03a1f7ca4 --- /dev/null +++ b/arch/sh/mm/pgtable.c | |||
@@ -0,0 +1,57 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <linux/slab.h> | ||
3 | |||
4 | #define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO | ||
5 | |||
6 | static struct kmem_cache *pgd_cachep; | ||
7 | #if PAGETABLE_LEVELS > 2 | ||
8 | static struct kmem_cache *pmd_cachep; | ||
9 | #endif | ||
10 | |||
11 | void pgd_ctor(void *x) | ||
12 | { | ||
13 | pgd_t *pgd = x; | ||
14 | |||
15 | memcpy(pgd + USER_PTRS_PER_PGD, | ||
16 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
17 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
18 | } | ||
19 | |||
20 | void pgtable_cache_init(void) | ||
21 | { | ||
22 | pgd_cachep = kmem_cache_create("pgd_cache", | ||
23 | PTRS_PER_PGD * (1<<PTE_MAGNITUDE), | ||
24 | PAGE_SIZE, SLAB_PANIC, pgd_ctor); | ||
25 | #if PAGETABLE_LEVELS > 2 | ||
26 | pmd_cachep = kmem_cache_create("pmd_cache", | ||
27 | PTRS_PER_PMD * (1<<PTE_MAGNITUDE), | ||
28 | PAGE_SIZE, SLAB_PANIC, NULL); | ||
29 | #endif | ||
30 | } | ||
31 | |||
32 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
33 | { | ||
34 | return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP); | ||
35 | } | ||
36 | |||
37 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
38 | { | ||
39 | kmem_cache_free(pgd_cachep, pgd); | ||
40 | } | ||
41 | |||
42 | #if PAGETABLE_LEVELS > 2 | ||
43 | void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | ||
44 | { | ||
45 | set_pud(pud, __pud((unsigned long)pmd)); | ||
46 | } | ||
47 | |||
48 | pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
49 | { | ||
50 | return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP); | ||
51 | } | ||
52 | |||
53 | void pmd_free(struct mm_struct *mm, pmd_t *pmd) | ||
54 | { | ||
55 | kmem_cache_free(pmd_cachep, pmd); | ||
56 | } | ||
57 | #endif /* PAGETABLE_LEVELS > 2 */ | ||
diff --git a/arch/sh/mm/pmb-fixed.c b/arch/sh/mm/pmb-fixed.c deleted file mode 100644 index 43c8eac4d8a1..000000000000 --- a/arch/sh/mm/pmb-fixed.c +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/fixed_pmb.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Renesas Solutions Corp. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <asm/mmu.h> | ||
14 | #include <asm/mmu_context.h> | ||
15 | |||
16 | static int __uses_jump_to_uncached fixed_pmb_init(void) | ||
17 | { | ||
18 | int i; | ||
19 | unsigned long addr, data; | ||
20 | |||
21 | jump_to_uncached(); | ||
22 | |||
23 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | ||
24 | addr = PMB_DATA + (i << PMB_E_SHIFT); | ||
25 | data = ctrl_inl(addr); | ||
26 | if (!(data & PMB_V)) | ||
27 | continue; | ||
28 | |||
29 | if (data & PMB_C) { | ||
30 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
31 | data |= PMB_WT; | ||
32 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
33 | data &= ~PMB_WT; | ||
34 | #else | ||
35 | data &= ~(PMB_C | PMB_WT); | ||
36 | #endif | ||
37 | } | ||
38 | ctrl_outl(data, addr); | ||
39 | } | ||
40 | |||
41 | back_to_cached(); | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | arch_initcall(fixed_pmb_init); | ||
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index aade31102112..e43ec600afcf 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -3,11 +3,8 @@ | |||
3 | * | 3 | * |
4 | * Privileged Space Mapping Buffer (PMB) Support. | 4 | * Privileged Space Mapping Buffer (PMB) Support. |
5 | * | 5 | * |
6 | * Copyright (C) 2005, 2006, 2007 Paul Mundt | 6 | * Copyright (C) 2005 - 2010 Paul Mundt |
7 | * | 7 | * Copyright (C) 2010 Matt Fleming |
8 | * P1/P2 Section mapping definitions from map32.h, which was: | ||
9 | * | ||
10 | * Copyright 2003 (c) Lineo Solutions,Inc. | ||
11 | * | 8 | * |
12 | * This file is subject to the terms and conditions of the GNU General Public | 9 | * This file is subject to the terms and conditions of the GNU General Public |
13 | * License. See the file "COPYING" in the main directory of this archive | 10 | * License. See the file "COPYING" in the main directory of this archive |
@@ -18,355 +15,802 @@ | |||
18 | #include <linux/sysdev.h> | 15 | #include <linux/sysdev.h> |
19 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
20 | #include <linux/module.h> | 17 | #include <linux/module.h> |
21 | #include <linux/slab.h> | ||
22 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
23 | #include <linux/debugfs.h> | 19 | #include <linux/debugfs.h> |
24 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
25 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
26 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/io.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/vmalloc.h> | ||
26 | #include <asm/cacheflush.h> | ||
27 | #include <asm/sizes.h> | ||
27 | #include <asm/system.h> | 28 | #include <asm/system.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
31 | #include <asm/page.h> | ||
30 | #include <asm/mmu.h> | 32 | #include <asm/mmu.h> |
31 | #include <asm/io.h> | ||
32 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
33 | 34 | ||
34 | #define NR_PMB_ENTRIES 16 | 35 | struct pmb_entry; |
35 | 36 | ||
36 | static void __pmb_unmap(struct pmb_entry *); | 37 | struct pmb_entry { |
38 | unsigned long vpn; | ||
39 | unsigned long ppn; | ||
40 | unsigned long flags; | ||
41 | unsigned long size; | ||
37 | 42 | ||
38 | static struct kmem_cache *pmb_cache; | 43 | spinlock_t lock; |
39 | static unsigned long pmb_map; | ||
40 | 44 | ||
41 | static struct pmb_entry pmb_init_map[] = { | 45 | /* |
42 | /* vpn ppn flags (ub/sz/c/wt) */ | 46 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or |
47 | * PMB_NO_ENTRY to search for a free one | ||
48 | */ | ||
49 | int entry; | ||
43 | 50 | ||
44 | /* P1 Section Mappings */ | 51 | /* Adjacent entry link for contiguous multi-entry mappings */ |
45 | { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, | 52 | struct pmb_entry *link; |
46 | { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, | 53 | }; |
47 | { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, | ||
48 | { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, | ||
49 | { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, | ||
50 | { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, | ||
51 | 54 | ||
52 | /* P2 Section Mappings */ | 55 | static struct { |
53 | { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | 56 | unsigned long size; |
54 | { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | 57 | int flag; |
55 | { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, | 58 | } pmb_sizes[] = { |
56 | { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | 59 | { .size = SZ_512M, .flag = PMB_SZ_512M, }, |
57 | { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | 60 | { .size = SZ_128M, .flag = PMB_SZ_128M, }, |
58 | { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | 61 | { .size = SZ_64M, .flag = PMB_SZ_64M, }, |
62 | { .size = SZ_16M, .flag = PMB_SZ_16M, }, | ||
59 | }; | 63 | }; |
60 | 64 | ||
61 | static inline unsigned long mk_pmb_entry(unsigned int entry) | 65 | static void pmb_unmap_entry(struct pmb_entry *, int depth); |
66 | |||
67 | static DEFINE_RWLOCK(pmb_rwlock); | ||
68 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; | ||
69 | static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); | ||
70 | |||
71 | static unsigned int pmb_iomapping_enabled; | ||
72 | |||
73 | static __always_inline unsigned long mk_pmb_entry(unsigned int entry) | ||
62 | { | 74 | { |
63 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 75 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
64 | } | 76 | } |
65 | 77 | ||
66 | static inline unsigned long mk_pmb_addr(unsigned int entry) | 78 | static __always_inline unsigned long mk_pmb_addr(unsigned int entry) |
67 | { | 79 | { |
68 | return mk_pmb_entry(entry) | PMB_ADDR; | 80 | return mk_pmb_entry(entry) | PMB_ADDR; |
69 | } | 81 | } |
70 | 82 | ||
71 | static inline unsigned long mk_pmb_data(unsigned int entry) | 83 | static __always_inline unsigned long mk_pmb_data(unsigned int entry) |
72 | { | 84 | { |
73 | return mk_pmb_entry(entry) | PMB_DATA; | 85 | return mk_pmb_entry(entry) | PMB_DATA; |
74 | } | 86 | } |
75 | 87 | ||
76 | static DEFINE_SPINLOCK(pmb_list_lock); | 88 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) |
77 | static struct pmb_entry *pmb_list; | 89 | { |
90 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); | ||
91 | } | ||
78 | 92 | ||
79 | static inline void pmb_list_add(struct pmb_entry *pmbe) | 93 | /* |
94 | * Ensure that the PMB entries match our cache configuration. | ||
95 | * | ||
96 | * When we are in 32-bit address extended mode, CCR.CB becomes | ||
97 | * invalid, so care must be taken to manually adjust cacheable | ||
98 | * translations. | ||
99 | */ | ||
100 | static __always_inline unsigned long pmb_cache_flags(void) | ||
80 | { | 101 | { |
81 | struct pmb_entry **p, *tmp; | 102 | unsigned long flags = 0; |
103 | |||
104 | #if defined(CONFIG_CACHE_OFF) | ||
105 | flags |= PMB_WT | PMB_UB; | ||
106 | #elif defined(CONFIG_CACHE_WRITETHROUGH) | ||
107 | flags |= PMB_C | PMB_WT | PMB_UB; | ||
108 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
109 | flags |= PMB_C; | ||
110 | #endif | ||
82 | 111 | ||
83 | p = &pmb_list; | 112 | return flags; |
84 | while ((tmp = *p) != NULL) | 113 | } |
85 | p = &tmp->next; | ||
86 | 114 | ||
87 | pmbe->next = tmp; | 115 | /* |
88 | *p = pmbe; | 116 | * Convert typical pgprot value to the PMB equivalent |
117 | */ | ||
118 | static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot) | ||
119 | { | ||
120 | unsigned long pmb_flags = 0; | ||
121 | u64 flags = pgprot_val(prot); | ||
122 | |||
123 | if (flags & _PAGE_CACHABLE) | ||
124 | pmb_flags |= PMB_C; | ||
125 | if (flags & _PAGE_WT) | ||
126 | pmb_flags |= PMB_WT | PMB_UB; | ||
127 | |||
128 | return pmb_flags; | ||
89 | } | 129 | } |
90 | 130 | ||
91 | static inline void pmb_list_del(struct pmb_entry *pmbe) | 131 | static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) |
92 | { | 132 | { |
93 | struct pmb_entry **p, *tmp; | 133 | return (b->vpn == (a->vpn + a->size)) && |
134 | (b->ppn == (a->ppn + a->size)) && | ||
135 | (b->flags == a->flags); | ||
136 | } | ||
137 | |||
138 | static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys, | ||
139 | unsigned long size) | ||
140 | { | ||
141 | int i; | ||
142 | |||
143 | read_lock(&pmb_rwlock); | ||
144 | |||
145 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
146 | struct pmb_entry *pmbe, *iter; | ||
147 | unsigned long span; | ||
148 | |||
149 | if (!test_bit(i, pmb_map)) | ||
150 | continue; | ||
151 | |||
152 | pmbe = &pmb_entry_list[i]; | ||
153 | |||
154 | /* | ||
155 | * See if VPN and PPN are bounded by an existing mapping. | ||
156 | */ | ||
157 | if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size))) | ||
158 | continue; | ||
159 | if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size))) | ||
160 | continue; | ||
161 | |||
162 | /* | ||
163 | * Now see if we're in range of a simple mapping. | ||
164 | */ | ||
165 | if (size <= pmbe->size) { | ||
166 | read_unlock(&pmb_rwlock); | ||
167 | return true; | ||
168 | } | ||
169 | |||
170 | span = pmbe->size; | ||
94 | 171 | ||
95 | for (p = &pmb_list; (tmp = *p); p = &tmp->next) | 172 | /* |
96 | if (tmp == pmbe) { | 173 | * Finally for sizes that involve compound mappings, walk |
97 | *p = tmp->next; | 174 | * the chain. |
98 | return; | 175 | */ |
176 | for (iter = pmbe->link; iter; iter = iter->link) | ||
177 | span += iter->size; | ||
178 | |||
179 | /* | ||
180 | * Nothing else to do if the range requirements are met. | ||
181 | */ | ||
182 | if (size <= span) { | ||
183 | read_unlock(&pmb_rwlock); | ||
184 | return true; | ||
99 | } | 185 | } |
186 | } | ||
187 | |||
188 | read_unlock(&pmb_rwlock); | ||
189 | return false; | ||
190 | } | ||
191 | |||
192 | static bool pmb_size_valid(unsigned long size) | ||
193 | { | ||
194 | int i; | ||
195 | |||
196 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
197 | if (pmb_sizes[i].size == size) | ||
198 | return true; | ||
199 | |||
200 | return false; | ||
201 | } | ||
202 | |||
203 | static inline bool pmb_addr_valid(unsigned long addr, unsigned long size) | ||
204 | { | ||
205 | return (addr >= P1SEG && (addr + size - 1) < P3SEG); | ||
206 | } | ||
207 | |||
208 | static inline bool pmb_prot_valid(pgprot_t prot) | ||
209 | { | ||
210 | return (pgprot_val(prot) & _PAGE_USER) == 0; | ||
211 | } | ||
212 | |||
213 | static int pmb_size_to_flags(unsigned long size) | ||
214 | { | ||
215 | int i; | ||
216 | |||
217 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
218 | if (pmb_sizes[i].size == size) | ||
219 | return pmb_sizes[i].flag; | ||
220 | |||
221 | return 0; | ||
100 | } | 222 | } |
101 | 223 | ||
102 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | 224 | static int pmb_alloc_entry(void) |
103 | unsigned long flags) | 225 | { |
226 | int pos; | ||
227 | |||
228 | pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); | ||
229 | if (pos >= 0 && pos < NR_PMB_ENTRIES) | ||
230 | __set_bit(pos, pmb_map); | ||
231 | else | ||
232 | pos = -ENOSPC; | ||
233 | |||
234 | return pos; | ||
235 | } | ||
236 | |||
237 | static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | ||
238 | unsigned long flags, int entry) | ||
104 | { | 239 | { |
105 | struct pmb_entry *pmbe; | 240 | struct pmb_entry *pmbe; |
241 | unsigned long irqflags; | ||
242 | void *ret = NULL; | ||
243 | int pos; | ||
244 | |||
245 | write_lock_irqsave(&pmb_rwlock, irqflags); | ||
246 | |||
247 | if (entry == PMB_NO_ENTRY) { | ||
248 | pos = pmb_alloc_entry(); | ||
249 | if (unlikely(pos < 0)) { | ||
250 | ret = ERR_PTR(pos); | ||
251 | goto out; | ||
252 | } | ||
253 | } else { | ||
254 | if (__test_and_set_bit(entry, pmb_map)) { | ||
255 | ret = ERR_PTR(-ENOSPC); | ||
256 | goto out; | ||
257 | } | ||
258 | |||
259 | pos = entry; | ||
260 | } | ||
261 | |||
262 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | ||
106 | 263 | ||
107 | pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); | 264 | pmbe = &pmb_entry_list[pos]; |
108 | if (!pmbe) | 265 | |
109 | return ERR_PTR(-ENOMEM); | 266 | memset(pmbe, 0, sizeof(struct pmb_entry)); |
267 | |||
268 | spin_lock_init(&pmbe->lock); | ||
110 | 269 | ||
111 | pmbe->vpn = vpn; | 270 | pmbe->vpn = vpn; |
112 | pmbe->ppn = ppn; | 271 | pmbe->ppn = ppn; |
113 | pmbe->flags = flags; | 272 | pmbe->flags = flags; |
114 | 273 | pmbe->entry = pos; | |
115 | spin_lock_irq(&pmb_list_lock); | ||
116 | pmb_list_add(pmbe); | ||
117 | spin_unlock_irq(&pmb_list_lock); | ||
118 | 274 | ||
119 | return pmbe; | 275 | return pmbe; |
276 | |||
277 | out: | ||
278 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | ||
279 | return ret; | ||
120 | } | 280 | } |
121 | 281 | ||
122 | void pmb_free(struct pmb_entry *pmbe) | 282 | static void pmb_free(struct pmb_entry *pmbe) |
123 | { | 283 | { |
124 | spin_lock_irq(&pmb_list_lock); | 284 | __clear_bit(pmbe->entry, pmb_map); |
125 | pmb_list_del(pmbe); | ||
126 | spin_unlock_irq(&pmb_list_lock); | ||
127 | 285 | ||
128 | kmem_cache_free(pmb_cache, pmbe); | 286 | pmbe->entry = PMB_NO_ENTRY; |
287 | pmbe->link = NULL; | ||
129 | } | 288 | } |
130 | 289 | ||
131 | /* | 290 | /* |
132 | * Must be in P2 for __set_pmb_entry() | 291 | * Must be run uncached. |
133 | */ | 292 | */ |
134 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, | 293 | static void __set_pmb_entry(struct pmb_entry *pmbe) |
135 | unsigned long flags, int *entry) | ||
136 | { | 294 | { |
137 | unsigned int pos = *entry; | 295 | unsigned long addr, data; |
138 | 296 | ||
139 | if (unlikely(pos == PMB_NO_ENTRY)) | 297 | addr = mk_pmb_addr(pmbe->entry); |
140 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | 298 | data = mk_pmb_data(pmbe->entry); |
141 | 299 | ||
142 | repeat: | 300 | jump_to_uncached(); |
143 | if (unlikely(pos > NR_PMB_ENTRIES)) | ||
144 | return -ENOSPC; | ||
145 | 301 | ||
146 | if (test_and_set_bit(pos, &pmb_map)) { | 302 | /* Set V-bit */ |
147 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | 303 | __raw_writel(pmbe->vpn | PMB_V, addr); |
148 | goto repeat; | 304 | __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data); |
149 | } | ||
150 | 305 | ||
151 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); | 306 | back_to_cached(); |
307 | } | ||
152 | 308 | ||
153 | #ifdef CONFIG_CACHE_WRITETHROUGH | 309 | static void __clear_pmb_entry(struct pmb_entry *pmbe) |
154 | /* | 310 | { |
155 | * When we are in 32-bit address extended mode, CCR.CB becomes | 311 | unsigned long addr, data; |
156 | * invalid, so care must be taken to manually adjust cacheable | 312 | unsigned long addr_val, data_val; |
157 | * translations. | ||
158 | */ | ||
159 | if (likely(flags & PMB_C)) | ||
160 | flags |= PMB_WT; | ||
161 | #endif | ||
162 | 313 | ||
163 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); | 314 | addr = mk_pmb_addr(pmbe->entry); |
315 | data = mk_pmb_data(pmbe->entry); | ||
164 | 316 | ||
165 | *entry = pos; | 317 | addr_val = __raw_readl(addr); |
318 | data_val = __raw_readl(data); | ||
166 | 319 | ||
167 | return 0; | 320 | /* Clear V-bit */ |
321 | writel_uncached(addr_val & ~PMB_V, addr); | ||
322 | writel_uncached(data_val & ~PMB_V, data); | ||
168 | } | 323 | } |
169 | 324 | ||
170 | int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) | 325 | #ifdef CONFIG_PM |
326 | static void set_pmb_entry(struct pmb_entry *pmbe) | ||
171 | { | 327 | { |
172 | int ret; | 328 | unsigned long flags; |
173 | 329 | ||
174 | jump_to_uncached(); | 330 | spin_lock_irqsave(&pmbe->lock, flags); |
175 | ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); | 331 | __set_pmb_entry(pmbe); |
176 | back_to_cached(); | 332 | spin_unlock_irqrestore(&pmbe->lock, flags); |
333 | } | ||
334 | #endif /* CONFIG_PM */ | ||
177 | 335 | ||
178 | return ret; | 336 | int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, |
337 | unsigned long size, pgprot_t prot) | ||
338 | { | ||
339 | struct pmb_entry *pmbp, *pmbe; | ||
340 | unsigned long orig_addr, orig_size; | ||
341 | unsigned long flags, pmb_flags; | ||
342 | int i, mapped; | ||
343 | |||
344 | if (!pmb_addr_valid(vaddr, size)) | ||
345 | return -EFAULT; | ||
346 | if (pmb_mapping_exists(vaddr, phys, size)) | ||
347 | return 0; | ||
348 | |||
349 | orig_addr = vaddr; | ||
350 | orig_size = size; | ||
351 | |||
352 | flush_tlb_kernel_range(vaddr, vaddr + size); | ||
353 | |||
354 | pmb_flags = pgprot_to_pmb_flags(prot); | ||
355 | pmbp = NULL; | ||
356 | |||
357 | do { | ||
358 | for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | ||
359 | if (size < pmb_sizes[i].size) | ||
360 | continue; | ||
361 | |||
362 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | | ||
363 | pmb_sizes[i].flag, PMB_NO_ENTRY); | ||
364 | if (IS_ERR(pmbe)) { | ||
365 | pmb_unmap_entry(pmbp, mapped); | ||
366 | return PTR_ERR(pmbe); | ||
367 | } | ||
368 | |||
369 | spin_lock_irqsave(&pmbe->lock, flags); | ||
370 | |||
371 | pmbe->size = pmb_sizes[i].size; | ||
372 | |||
373 | __set_pmb_entry(pmbe); | ||
374 | |||
375 | phys += pmbe->size; | ||
376 | vaddr += pmbe->size; | ||
377 | size -= pmbe->size; | ||
378 | |||
379 | /* | ||
380 | * Link adjacent entries that span multiple PMB | ||
381 | * entries for easier tear-down. | ||
382 | */ | ||
383 | if (likely(pmbp)) { | ||
384 | spin_lock(&pmbp->lock); | ||
385 | pmbp->link = pmbe; | ||
386 | spin_unlock(&pmbp->lock); | ||
387 | } | ||
388 | |||
389 | pmbp = pmbe; | ||
390 | |||
391 | /* | ||
392 | * Instead of trying smaller sizes on every | ||
393 | * iteration (even if we succeed in allocating | ||
394 | * space), try using pmb_sizes[i].size again. | ||
395 | */ | ||
396 | i--; | ||
397 | mapped++; | ||
398 | |||
399 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
400 | } | ||
401 | } while (size >= SZ_16M); | ||
402 | |||
403 | flush_cache_vmap(orig_addr, orig_addr + orig_size); | ||
404 | |||
405 | return 0; | ||
179 | } | 406 | } |
180 | 407 | ||
181 | void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | 408 | void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, |
409 | pgprot_t prot, void *caller) | ||
182 | { | 410 | { |
183 | unsigned int entry = pmbe->entry; | 411 | unsigned long vaddr; |
184 | unsigned long addr; | 412 | phys_addr_t offset, last_addr; |
413 | phys_addr_t align_mask; | ||
414 | unsigned long aligned; | ||
415 | struct vm_struct *area; | ||
416 | int i, ret; | ||
417 | |||
418 | if (!pmb_iomapping_enabled) | ||
419 | return NULL; | ||
185 | 420 | ||
186 | /* | 421 | /* |
187 | * Don't allow clearing of wired init entries, P1 or P2 access | 422 | * Small mappings need to go through the TLB. |
188 | * without a corresponding mapping in the PMB will lead to reset | ||
189 | * by the TLB. | ||
190 | */ | 423 | */ |
191 | if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || | 424 | if (size < SZ_16M) |
192 | entry >= NR_PMB_ENTRIES)) | 425 | return ERR_PTR(-EINVAL); |
193 | return; | 426 | if (!pmb_prot_valid(prot)) |
427 | return ERR_PTR(-EINVAL); | ||
194 | 428 | ||
195 | jump_to_uncached(); | 429 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) |
430 | if (size >= pmb_sizes[i].size) | ||
431 | break; | ||
196 | 432 | ||
197 | /* Clear V-bit */ | 433 | last_addr = phys + size; |
198 | addr = mk_pmb_addr(entry); | 434 | align_mask = ~(pmb_sizes[i].size - 1); |
199 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 435 | offset = phys & ~align_mask; |
436 | phys &= align_mask; | ||
437 | aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; | ||
200 | 438 | ||
201 | addr = mk_pmb_data(entry); | 439 | /* |
202 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 440 | * XXX: This should really start from uncached_end, but this |
441 | * causes the MMU to reset, so for now we restrict it to the | ||
442 | * 0xb000...0xc000 range. | ||
443 | */ | ||
444 | area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000, | ||
445 | P3SEG, caller); | ||
446 | if (!area) | ||
447 | return NULL; | ||
203 | 448 | ||
204 | back_to_cached(); | 449 | area->phys_addr = phys; |
450 | vaddr = (unsigned long)area->addr; | ||
451 | |||
452 | ret = pmb_bolt_mapping(vaddr, phys, size, prot); | ||
453 | if (unlikely(ret != 0)) | ||
454 | return ERR_PTR(ret); | ||
205 | 455 | ||
206 | clear_bit(entry, &pmb_map); | 456 | return (void __iomem *)(offset + (char *)vaddr); |
207 | } | 457 | } |
208 | 458 | ||
459 | int pmb_unmap(void __iomem *addr) | ||
460 | { | ||
461 | struct pmb_entry *pmbe = NULL; | ||
462 | unsigned long vaddr = (unsigned long __force)addr; | ||
463 | int i, found = 0; | ||
464 | |||
465 | read_lock(&pmb_rwlock); | ||
466 | |||
467 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
468 | if (test_bit(i, pmb_map)) { | ||
469 | pmbe = &pmb_entry_list[i]; | ||
470 | if (pmbe->vpn == vaddr) { | ||
471 | found = 1; | ||
472 | break; | ||
473 | } | ||
474 | } | ||
475 | } | ||
209 | 476 | ||
210 | static struct { | 477 | read_unlock(&pmb_rwlock); |
211 | unsigned long size; | 478 | |
212 | int flag; | 479 | if (found) { |
213 | } pmb_sizes[] = { | 480 | pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); |
214 | { .size = 0x20000000, .flag = PMB_SZ_512M, }, | 481 | return 0; |
215 | { .size = 0x08000000, .flag = PMB_SZ_128M, }, | 482 | } |
216 | { .size = 0x04000000, .flag = PMB_SZ_64M, }, | 483 | |
217 | { .size = 0x01000000, .flag = PMB_SZ_16M, }, | 484 | return -EINVAL; |
218 | }; | 485 | } |
219 | 486 | ||
220 | long pmb_remap(unsigned long vaddr, unsigned long phys, | 487 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) |
221 | unsigned long size, unsigned long flags) | ||
222 | { | 488 | { |
223 | struct pmb_entry *pmbp, *pmbe; | 489 | do { |
224 | unsigned long wanted; | 490 | struct pmb_entry *pmblink = pmbe; |
225 | int pmb_flags, i; | ||
226 | long err; | ||
227 | |||
228 | /* Convert typical pgprot value to the PMB equivalent */ | ||
229 | if (flags & _PAGE_CACHABLE) { | ||
230 | if (flags & _PAGE_WT) | ||
231 | pmb_flags = PMB_WT; | ||
232 | else | ||
233 | pmb_flags = PMB_C; | ||
234 | } else | ||
235 | pmb_flags = PMB_WT | PMB_UB; | ||
236 | 491 | ||
237 | pmbp = NULL; | 492 | /* |
238 | wanted = size; | 493 | * We may be called before this pmb_entry has been |
494 | * entered into the PMB table via set_pmb_entry(), but | ||
495 | * that's OK because we've allocated a unique slot for | ||
496 | * this entry in pmb_alloc() (even if we haven't filled | ||
497 | * it yet). | ||
498 | * | ||
499 | * Therefore, calling __clear_pmb_entry() is safe as no | ||
500 | * other mapping can be using that slot. | ||
501 | */ | ||
502 | __clear_pmb_entry(pmbe); | ||
503 | |||
504 | flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size); | ||
505 | |||
506 | pmbe = pmblink->link; | ||
239 | 507 | ||
240 | again: | 508 | pmb_free(pmblink); |
241 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 509 | } while (pmbe && --depth); |
242 | int ret; | 510 | } |
511 | |||
512 | static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | ||
513 | { | ||
514 | unsigned long flags; | ||
515 | |||
516 | if (unlikely(!pmbe)) | ||
517 | return; | ||
518 | |||
519 | write_lock_irqsave(&pmb_rwlock, flags); | ||
520 | __pmb_unmap_entry(pmbe, depth); | ||
521 | write_unlock_irqrestore(&pmb_rwlock, flags); | ||
522 | } | ||
243 | 523 | ||
244 | if (size < pmb_sizes[i].size) | 524 | static void __init pmb_notify(void) |
525 | { | ||
526 | int i; | ||
527 | |||
528 | pr_info("PMB: boot mappings:\n"); | ||
529 | |||
530 | read_lock(&pmb_rwlock); | ||
531 | |||
532 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | ||
533 | struct pmb_entry *pmbe; | ||
534 | |||
535 | if (!test_bit(i, pmb_map)) | ||
245 | continue; | 536 | continue; |
246 | 537 | ||
247 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); | 538 | pmbe = &pmb_entry_list[i]; |
248 | if (IS_ERR(pmbe)) { | ||
249 | err = PTR_ERR(pmbe); | ||
250 | goto out; | ||
251 | } | ||
252 | 539 | ||
253 | ret = set_pmb_entry(pmbe); | 540 | pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", |
254 | if (ret != 0) { | 541 | pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, |
255 | pmb_free(pmbe); | 542 | pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); |
256 | err = -EBUSY; | 543 | } |
257 | goto out; | ||
258 | } | ||
259 | 544 | ||
260 | phys += pmb_sizes[i].size; | 545 | read_unlock(&pmb_rwlock); |
261 | vaddr += pmb_sizes[i].size; | 546 | } |
262 | size -= pmb_sizes[i].size; | 547 | |
548 | /* | ||
549 | * Sync our software copy of the PMB mappings with those in hardware. The | ||
550 | * mappings in the hardware PMB were either set up by the bootloader or | ||
551 | * very early on by the kernel. | ||
552 | */ | ||
553 | static void __init pmb_synchronize(void) | ||
554 | { | ||
555 | struct pmb_entry *pmbp = NULL; | ||
556 | int i, j; | ||
557 | |||
558 | /* | ||
559 | * Run through the initial boot mappings, log the established | ||
560 | * ones, and blow away anything that falls outside of the valid | ||
561 | * PPN range. Specifically, we only care about existing mappings | ||
562 | * that impact the cached/uncached sections. | ||
563 | * | ||
564 | * Note that touching these can be a bit of a minefield; the boot | ||
565 | * loader can establish multi-page mappings with the same caching | ||
566 | * attributes, so we need to ensure that we aren't modifying a | ||
567 | * mapping that we're presently executing from, or may execute | ||
568 | * from in the case of straddling page boundaries. | ||
569 | * | ||
570 | * In the future we will have to tidy up after the boot loader by | ||
571 | * jumping between the cached and uncached mappings and tearing | ||
572 | * down alternating mappings while executing from the other. | ||
573 | */ | ||
574 | for (i = 0; i < NR_PMB_ENTRIES; i++) { | ||
575 | unsigned long addr, data; | ||
576 | unsigned long addr_val, data_val; | ||
577 | unsigned long ppn, vpn, flags; | ||
578 | unsigned long irqflags; | ||
579 | unsigned int size; | ||
580 | struct pmb_entry *pmbe; | ||
581 | |||
582 | addr = mk_pmb_addr(i); | ||
583 | data = mk_pmb_data(i); | ||
584 | |||
585 | addr_val = __raw_readl(addr); | ||
586 | data_val = __raw_readl(data); | ||
263 | 587 | ||
264 | /* | 588 | /* |
265 | * Link adjacent entries that span multiple PMB entries | 589 | * Skip over any bogus entries |
266 | * for easier tear-down. | ||
267 | */ | 590 | */ |
268 | if (likely(pmbp)) | 591 | if (!(data_val & PMB_V) || !(addr_val & PMB_V)) |
269 | pmbp->link = pmbe; | 592 | continue; |
270 | 593 | ||
271 | pmbp = pmbe; | 594 | ppn = data_val & PMB_PFN_MASK; |
595 | vpn = addr_val & PMB_PFN_MASK; | ||
272 | 596 | ||
273 | /* | 597 | /* |
274 | * Instead of trying smaller sizes on every iteration | 598 | * Only preserve in-range mappings. |
275 | * (even if we succeed in allocating space), try using | ||
276 | * pmb_sizes[i].size again. | ||
277 | */ | 599 | */ |
278 | i--; | 600 | if (!pmb_ppn_in_range(ppn)) { |
279 | } | 601 | /* |
602 | * Invalidate anything out of bounds. | ||
603 | */ | ||
604 | writel_uncached(addr_val & ~PMB_V, addr); | ||
605 | writel_uncached(data_val & ~PMB_V, data); | ||
606 | continue; | ||
607 | } | ||
608 | |||
609 | /* | ||
610 | * Update the caching attributes if necessary | ||
611 | */ | ||
612 | if (data_val & PMB_C) { | ||
613 | data_val &= ~PMB_CACHE_MASK; | ||
614 | data_val |= pmb_cache_flags(); | ||
280 | 615 | ||
281 | if (size >= 0x1000000) | 616 | writel_uncached(data_val, data); |
282 | goto again; | 617 | } |
283 | 618 | ||
284 | return wanted - size; | 619 | size = data_val & PMB_SZ_MASK; |
620 | flags = size | (data_val & PMB_CACHE_MASK); | ||
285 | 621 | ||
286 | out: | 622 | pmbe = pmb_alloc(vpn, ppn, flags, i); |
287 | if (pmbp) | 623 | if (IS_ERR(pmbe)) { |
288 | __pmb_unmap(pmbp); | 624 | WARN_ON_ONCE(1); |
625 | continue; | ||
626 | } | ||
627 | |||
628 | spin_lock_irqsave(&pmbe->lock, irqflags); | ||
629 | |||
630 | for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) | ||
631 | if (pmb_sizes[j].flag == size) | ||
632 | pmbe->size = pmb_sizes[j].size; | ||
633 | |||
634 | if (pmbp) { | ||
635 | spin_lock(&pmbp->lock); | ||
289 | 636 | ||
290 | return err; | 637 | /* |
638 | * Compare the previous entry against the current one to | ||
639 | * see if the entries span a contiguous mapping. If so, | ||
640 | * setup the entry links accordingly. Compound mappings | ||
641 | * are later coalesced. | ||
642 | */ | ||
643 | if (pmb_can_merge(pmbp, pmbe)) | ||
644 | pmbp->link = pmbe; | ||
645 | |||
646 | spin_unlock(&pmbp->lock); | ||
647 | } | ||
648 | |||
649 | pmbp = pmbe; | ||
650 | |||
651 | spin_unlock_irqrestore(&pmbe->lock, irqflags); | ||
652 | } | ||
291 | } | 653 | } |
292 | 654 | ||
293 | void pmb_unmap(unsigned long addr) | 655 | static void __init pmb_merge(struct pmb_entry *head) |
294 | { | 656 | { |
295 | struct pmb_entry **p, *pmbe; | 657 | unsigned long span, newsize; |
658 | struct pmb_entry *tail; | ||
659 | int i = 1, depth = 0; | ||
660 | |||
661 | span = newsize = head->size; | ||
662 | |||
663 | tail = head->link; | ||
664 | while (tail) { | ||
665 | span += tail->size; | ||
296 | 666 | ||
297 | for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) | 667 | if (pmb_size_valid(span)) { |
298 | if (pmbe->vpn == addr) | 668 | newsize = span; |
669 | depth = i; | ||
670 | } | ||
671 | |||
672 | /* This is the end of the line.. */ | ||
673 | if (!tail->link) | ||
299 | break; | 674 | break; |
300 | 675 | ||
301 | if (unlikely(!pmbe)) | 676 | tail = tail->link; |
677 | i++; | ||
678 | } | ||
679 | |||
680 | /* | ||
681 | * The merged page size must be valid. | ||
682 | */ | ||
683 | if (!pmb_size_valid(newsize)) | ||
302 | return; | 684 | return; |
303 | 685 | ||
304 | __pmb_unmap(pmbe); | 686 | head->flags &= ~PMB_SZ_MASK; |
687 | head->flags |= pmb_size_to_flags(newsize); | ||
688 | |||
689 | head->size = newsize; | ||
690 | |||
691 | __pmb_unmap_entry(head->link, depth); | ||
692 | __set_pmb_entry(head); | ||
305 | } | 693 | } |
306 | 694 | ||
307 | static void __pmb_unmap(struct pmb_entry *pmbe) | 695 | static void __init pmb_coalesce(void) |
308 | { | 696 | { |
309 | WARN_ON(!test_bit(pmbe->entry, &pmb_map)); | 697 | unsigned long flags; |
698 | int i; | ||
310 | 699 | ||
311 | do { | 700 | write_lock_irqsave(&pmb_rwlock, flags); |
312 | struct pmb_entry *pmblink = pmbe; | ||
313 | 701 | ||
314 | if (pmbe->entry != PMB_NO_ENTRY) | 702 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
315 | clear_pmb_entry(pmbe); | 703 | struct pmb_entry *pmbe; |
316 | 704 | ||
317 | pmbe = pmblink->link; | 705 | if (!test_bit(i, pmb_map)) |
706 | continue; | ||
318 | 707 | ||
319 | pmb_free(pmblink); | 708 | pmbe = &pmb_entry_list[i]; |
320 | } while (pmbe); | 709 | |
710 | /* | ||
711 | * We're only interested in compound mappings | ||
712 | */ | ||
713 | if (!pmbe->link) | ||
714 | continue; | ||
715 | |||
716 | /* | ||
717 | * Nothing to do if it already uses the largest possible | ||
718 | * page size. | ||
719 | */ | ||
720 | if (pmbe->size == SZ_512M) | ||
721 | continue; | ||
722 | |||
723 | pmb_merge(pmbe); | ||
724 | } | ||
725 | |||
726 | write_unlock_irqrestore(&pmb_rwlock, flags); | ||
321 | } | 727 | } |
322 | 728 | ||
323 | static void pmb_cache_ctor(void *pmb) | 729 | #ifdef CONFIG_UNCACHED_MAPPING |
730 | static void __init pmb_resize(void) | ||
324 | { | 731 | { |
325 | struct pmb_entry *pmbe = pmb; | 732 | int i; |
733 | |||
734 | /* | ||
735 | * If the uncached mapping was constructed by the kernel, it will | ||
736 | * already be a reasonable size. | ||
737 | */ | ||
738 | if (uncached_size == SZ_16M) | ||
739 | return; | ||
740 | |||
741 | read_lock(&pmb_rwlock); | ||
326 | 742 | ||
327 | memset(pmb, 0, sizeof(struct pmb_entry)); | 743 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
744 | struct pmb_entry *pmbe; | ||
745 | unsigned long flags; | ||
328 | 746 | ||
329 | pmbe->entry = PMB_NO_ENTRY; | 747 | if (!test_bit(i, pmb_map)) |
748 | continue; | ||
749 | |||
750 | pmbe = &pmb_entry_list[i]; | ||
751 | |||
752 | if (pmbe->vpn != uncached_start) | ||
753 | continue; | ||
754 | |||
755 | /* | ||
756 | * Found it, now resize it. | ||
757 | */ | ||
758 | spin_lock_irqsave(&pmbe->lock, flags); | ||
759 | |||
760 | pmbe->size = SZ_16M; | ||
761 | pmbe->flags &= ~PMB_SZ_MASK; | ||
762 | pmbe->flags |= pmb_size_to_flags(pmbe->size); | ||
763 | |||
764 | uncached_resize(pmbe->size); | ||
765 | |||
766 | __set_pmb_entry(pmbe); | ||
767 | |||
768 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
769 | } | ||
770 | |||
771 | read_lock(&pmb_rwlock); | ||
330 | } | 772 | } |
773 | #endif | ||
331 | 774 | ||
332 | static int __uses_jump_to_uncached pmb_init(void) | 775 | static int __init early_pmb(char *p) |
333 | { | 776 | { |
334 | unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); | 777 | if (!p) |
335 | unsigned int entry, i; | 778 | return 0; |
336 | 779 | ||
337 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); | 780 | if (strstr(p, "iomap")) |
781 | pmb_iomapping_enabled = 1; | ||
338 | 782 | ||
339 | pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, | 783 | return 0; |
340 | SLAB_PANIC, pmb_cache_ctor); | 784 | } |
785 | early_param("pmb", early_pmb); | ||
341 | 786 | ||
342 | jump_to_uncached(); | 787 | void __init pmb_init(void) |
788 | { | ||
789 | /* Synchronize software state */ | ||
790 | pmb_synchronize(); | ||
343 | 791 | ||
344 | /* | 792 | /* Attempt to combine compound mappings */ |
345 | * Ordering is important, P2 must be mapped in the PMB before we | 793 | pmb_coalesce(); |
346 | * can set PMB.SE, and P1 must be mapped before we jump back to | ||
347 | * P1 space. | ||
348 | */ | ||
349 | for (entry = 0; entry < nr_entries; entry++) { | ||
350 | struct pmb_entry *pmbe = pmb_init_map + entry; | ||
351 | 794 | ||
352 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); | 795 | #ifdef CONFIG_UNCACHED_MAPPING |
353 | } | 796 | /* Resize initial mappings, if necessary */ |
797 | pmb_resize(); | ||
798 | #endif | ||
354 | 799 | ||
355 | ctrl_outl(0, PMB_IRMCR); | 800 | /* Log them */ |
801 | pmb_notify(); | ||
356 | 802 | ||
357 | /* PMB.SE and UB[7] */ | 803 | writel_uncached(0, PMB_IRMCR); |
358 | ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); | ||
359 | 804 | ||
360 | /* Flush out the TLB */ | 805 | /* Flush out the TLB */ |
361 | i = ctrl_inl(MMUCR); | 806 | local_flush_tlb_all(); |
362 | i |= MMUCR_TI; | 807 | ctrl_barrier(); |
363 | ctrl_outl(i, MMUCR); | 808 | } |
364 | |||
365 | back_to_cached(); | ||
366 | 809 | ||
367 | return 0; | 810 | bool __in_29bit_mode(void) |
811 | { | ||
812 | return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; | ||
368 | } | 813 | } |
369 | arch_initcall(pmb_init); | ||
370 | 814 | ||
371 | static int pmb_seq_show(struct seq_file *file, void *iter) | 815 | static int pmb_seq_show(struct seq_file *file, void *iter) |
372 | { | 816 | { |
@@ -381,8 +825,8 @@ static int pmb_seq_show(struct seq_file *file, void *iter) | |||
381 | unsigned int size; | 825 | unsigned int size; |
382 | char *sz_str = NULL; | 826 | char *sz_str = NULL; |
383 | 827 | ||
384 | addr = ctrl_inl(mk_pmb_addr(i)); | 828 | addr = __raw_readl(mk_pmb_addr(i)); |
385 | data = ctrl_inl(mk_pmb_data(i)); | 829 | data = __raw_readl(mk_pmb_data(i)); |
386 | 830 | ||
387 | size = data & PMB_SZ_MASK; | 831 | size = data & PMB_SZ_MASK; |
388 | sz_str = (size == PMB_SZ_16M) ? " 16MB": | 832 | sz_str = (size == PMB_SZ_16M) ? " 16MB": |
@@ -428,23 +872,33 @@ static int __init pmb_debugfs_init(void) | |||
428 | 872 | ||
429 | return 0; | 873 | return 0; |
430 | } | 874 | } |
431 | postcore_initcall(pmb_debugfs_init); | 875 | subsys_initcall(pmb_debugfs_init); |
432 | 876 | ||
433 | #ifdef CONFIG_PM | 877 | #ifdef CONFIG_PM |
434 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | 878 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) |
435 | { | 879 | { |
436 | static pm_message_t prev_state; | 880 | static pm_message_t prev_state; |
881 | int i; | ||
437 | 882 | ||
438 | /* Restore the PMB after a resume from hibernation */ | 883 | /* Restore the PMB after a resume from hibernation */ |
439 | if (state.event == PM_EVENT_ON && | 884 | if (state.event == PM_EVENT_ON && |
440 | prev_state.event == PM_EVENT_FREEZE) { | 885 | prev_state.event == PM_EVENT_FREEZE) { |
441 | struct pmb_entry *pmbe; | 886 | struct pmb_entry *pmbe; |
442 | spin_lock_irq(&pmb_list_lock); | 887 | |
443 | for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) | 888 | read_lock(&pmb_rwlock); |
444 | set_pmb_entry(pmbe); | 889 | |
445 | spin_unlock_irq(&pmb_list_lock); | 890 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
891 | if (test_bit(i, pmb_map)) { | ||
892 | pmbe = &pmb_entry_list[i]; | ||
893 | set_pmb_entry(pmbe); | ||
894 | } | ||
895 | } | ||
896 | |||
897 | read_unlock(&pmb_rwlock); | ||
446 | } | 898 | } |
899 | |||
447 | prev_state = state; | 900 | prev_state = state; |
901 | |||
448 | return 0; | 902 | return 0; |
449 | } | 903 | } |
450 | 904 | ||
@@ -462,6 +916,5 @@ static int __init pmb_sysdev_init(void) | |||
462 | { | 916 | { |
463 | return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); | 917 | return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); |
464 | } | 918 | } |
465 | |||
466 | subsys_initcall(pmb_sysdev_init); | 919 | subsys_initcall(pmb_sysdev_init); |
467 | #endif | 920 | #endif |
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 409b7c2b4b9d..b71db6af8060 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c | |||
@@ -68,11 +68,40 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
68 | * in extended mode, the legacy 8-bit ASID field in address array 1 has | 68 | * in extended mode, the legacy 8-bit ASID field in address array 1 has |
69 | * undefined behaviour. | 69 | * undefined behaviour. |
70 | */ | 70 | */ |
71 | void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | 71 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
72 | unsigned long page) | ||
73 | { | 72 | { |
74 | jump_to_uncached(); | 73 | jump_to_uncached(); |
75 | __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); | 74 | __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); |
76 | __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); | 75 | __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); |
76 | __raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); | ||
77 | __raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); | ||
77 | back_to_cached(); | 78 | back_to_cached(); |
78 | } | 79 | } |
80 | |||
81 | void local_flush_tlb_all(void) | ||
82 | { | ||
83 | unsigned long flags, status; | ||
84 | int i; | ||
85 | |||
86 | /* | ||
87 | * Flush all the TLB. | ||
88 | */ | ||
89 | local_irq_save(flags); | ||
90 | jump_to_uncached(); | ||
91 | |||
92 | status = __raw_readl(MMUCR); | ||
93 | status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT); | ||
94 | |||
95 | if (status == 0) | ||
96 | status = MMUCR_URB_NENTRIES; | ||
97 | |||
98 | for (i = 0; i < status; i++) | ||
99 | __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8)); | ||
100 | |||
101 | for (i = 0; i < 4; i++) | ||
102 | __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8)); | ||
103 | |||
104 | back_to_cached(); | ||
105 | ctrl_barrier(); | ||
106 | local_irq_restore(flags); | ||
107 | } | ||
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index ace8e6d2f59d..7a940dbfc2e9 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c | |||
@@ -41,14 +41,14 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
41 | 41 | ||
42 | /* Set PTEH register */ | 42 | /* Set PTEH register */ |
43 | vpn = (address & MMU_VPN_MASK) | get_asid(); | 43 | vpn = (address & MMU_VPN_MASK) | get_asid(); |
44 | ctrl_outl(vpn, MMU_PTEH); | 44 | __raw_writel(vpn, MMU_PTEH); |
45 | 45 | ||
46 | pteval = pte_val(pte); | 46 | pteval = pte_val(pte); |
47 | 47 | ||
48 | /* Set PTEL register */ | 48 | /* Set PTEL register */ |
49 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | 49 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ |
50 | /* conveniently, we want all the software flags to be 0 anyway */ | 50 | /* conveniently, we want all the software flags to be 0 anyway */ |
51 | ctrl_outl(pteval, MMU_PTEL); | 51 | __raw_writel(pteval, MMU_PTEL); |
52 | 52 | ||
53 | /* Load the TLB */ | 53 | /* Load the TLB */ |
54 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | 54 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); |
@@ -75,5 +75,24 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) | |||
75 | } | 75 | } |
76 | 76 | ||
77 | for (i = 0; i < ways; i++) | 77 | for (i = 0; i < ways; i++) |
78 | ctrl_outl(data, addr + (i << 8)); | 78 | __raw_writel(data, addr + (i << 8)); |
79 | } | ||
80 | |||
81 | void local_flush_tlb_all(void) | ||
82 | { | ||
83 | unsigned long flags, status; | ||
84 | |||
85 | /* | ||
86 | * Flush all the TLB. | ||
87 | * | ||
88 | * Write to the MMU control register's bit: | ||
89 | * TF-bit for SH-3, TI-bit for SH-4. | ||
90 | * It's same position, bit #2. | ||
91 | */ | ||
92 | local_irq_save(flags); | ||
93 | status = __raw_readl(MMUCR); | ||
94 | status |= 0x04; | ||
95 | __raw_writel(status, MMUCR); | ||
96 | ctrl_barrier(); | ||
97 | local_irq_restore(flags); | ||
79 | } | 98 | } |
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 8cf550e2570f..cfdf7930d294 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c | |||
@@ -29,7 +29,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
29 | 29 | ||
30 | /* Set PTEH register */ | 30 | /* Set PTEH register */ |
31 | vpn = (address & MMU_VPN_MASK) | get_asid(); | 31 | vpn = (address & MMU_VPN_MASK) | get_asid(); |
32 | ctrl_outl(vpn, MMU_PTEH); | 32 | __raw_writel(vpn, MMU_PTEH); |
33 | 33 | ||
34 | pteval = pte.pte_low; | 34 | pteval = pte.pte_low; |
35 | 35 | ||
@@ -41,13 +41,13 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
41 | * the protection bits (with the exception of the compat-mode SZ | 41 | * the protection bits (with the exception of the compat-mode SZ |
42 | * and PR bits, which are cleared) being written out in PTEL. | 42 | * and PR bits, which are cleared) being written out in PTEL. |
43 | */ | 43 | */ |
44 | ctrl_outl(pte.pte_high, MMU_PTEA); | 44 | __raw_writel(pte.pte_high, MMU_PTEA); |
45 | #else | 45 | #else |
46 | if (cpu_data->flags & CPU_HAS_PTEA) { | 46 | if (cpu_data->flags & CPU_HAS_PTEA) { |
47 | /* The last 3 bits and the first one of pteval contains | 47 | /* The last 3 bits and the first one of pteval contains |
48 | * the PTEA timing control and space attribute bits | 48 | * the PTEA timing control and space attribute bits |
49 | */ | 49 | */ |
50 | ctrl_outl(copy_ptea_attributes(pteval), MMU_PTEA); | 50 | __raw_writel(copy_ptea_attributes(pteval), MMU_PTEA); |
51 | } | 51 | } |
52 | #endif | 52 | #endif |
53 | 53 | ||
@@ -57,15 +57,14 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
57 | pteval |= _PAGE_WT; | 57 | pteval |= _PAGE_WT; |
58 | #endif | 58 | #endif |
59 | /* conveniently, we want all the software flags to be 0 anyway */ | 59 | /* conveniently, we want all the software flags to be 0 anyway */ |
60 | ctrl_outl(pteval, MMU_PTEL); | 60 | __raw_writel(pteval, MMU_PTEL); |
61 | 61 | ||
62 | /* Load the TLB */ | 62 | /* Load the TLB */ |
63 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | 63 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); |
64 | local_irq_restore(flags); | 64 | local_irq_restore(flags); |
65 | } | 65 | } |
66 | 66 | ||
67 | void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | 67 | void local_flush_tlb_one(unsigned long asid, unsigned long page) |
68 | unsigned long page) | ||
69 | { | 68 | { |
70 | unsigned long addr, data; | 69 | unsigned long addr, data; |
71 | 70 | ||
@@ -78,6 +77,34 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | |||
78 | addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT; | 77 | addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT; |
79 | data = page | asid; /* VALID bit is off */ | 78 | data = page | asid; /* VALID bit is off */ |
80 | jump_to_uncached(); | 79 | jump_to_uncached(); |
81 | ctrl_outl(data, addr); | 80 | __raw_writel(data, addr); |
82 | back_to_cached(); | 81 | back_to_cached(); |
83 | } | 82 | } |
83 | |||
84 | void local_flush_tlb_all(void) | ||
85 | { | ||
86 | unsigned long flags, status; | ||
87 | int i; | ||
88 | |||
89 | /* | ||
90 | * Flush all the TLB. | ||
91 | */ | ||
92 | local_irq_save(flags); | ||
93 | jump_to_uncached(); | ||
94 | |||
95 | status = __raw_readl(MMUCR); | ||
96 | status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT); | ||
97 | |||
98 | if (status == 0) | ||
99 | status = MMUCR_URB_NENTRIES; | ||
100 | |||
101 | for (i = 0; i < status; i++) | ||
102 | __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8)); | ||
103 | |||
104 | for (i = 0; i < 4; i++) | ||
105 | __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8)); | ||
106 | |||
107 | back_to_cached(); | ||
108 | ctrl_barrier(); | ||
109 | local_irq_restore(flags); | ||
110 | } | ||
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c index fdb64e41ec50..f27dbe1c1599 100644 --- a/arch/sh/mm/tlb-sh5.c +++ b/arch/sh/mm/tlb-sh5.c | |||
@@ -143,3 +143,42 @@ void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, | |||
143 | */ | 143 | */ |
144 | void sh64_teardown_tlb_slot(unsigned long long config_addr) | 144 | void sh64_teardown_tlb_slot(unsigned long long config_addr) |
145 | __attribute__ ((alias("__flush_tlb_slot"))); | 145 | __attribute__ ((alias("__flush_tlb_slot"))); |
146 | |||
147 | static int dtlb_entry; | ||
148 | static unsigned long long dtlb_entries[64]; | ||
149 | |||
150 | void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | ||
151 | { | ||
152 | unsigned long long entry; | ||
153 | unsigned long paddr, flags; | ||
154 | |||
155 | BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries)); | ||
156 | |||
157 | local_irq_save(flags); | ||
158 | |||
159 | entry = sh64_get_wired_dtlb_entry(); | ||
160 | dtlb_entries[dtlb_entry++] = entry; | ||
161 | |||
162 | paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK; | ||
163 | paddr &= ~PAGE_MASK; | ||
164 | |||
165 | sh64_setup_tlb_slot(entry, addr, get_asid(), paddr); | ||
166 | |||
167 | local_irq_restore(flags); | ||
168 | } | ||
169 | |||
170 | void tlb_unwire_entry(void) | ||
171 | { | ||
172 | unsigned long long entry; | ||
173 | unsigned long flags; | ||
174 | |||
175 | BUG_ON(!dtlb_entry); | ||
176 | |||
177 | local_irq_save(flags); | ||
178 | entry = dtlb_entries[dtlb_entry--]; | ||
179 | |||
180 | sh64_teardown_tlb_slot(entry); | ||
181 | sh64_put_wired_dtlb_entry(entry); | ||
182 | |||
183 | local_irq_restore(flags); | ||
184 | } | ||
diff --git a/arch/sh/mm/tlb-urb.c b/arch/sh/mm/tlb-urb.c new file mode 100644 index 000000000000..c92ce20db39b --- /dev/null +++ b/arch/sh/mm/tlb-urb.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/tlb-urb.c | ||
3 | * | ||
4 | * TLB entry wiring helpers for URB-equipped parts. | ||
5 | * | ||
6 | * Copyright (C) 2010 Matt Fleming | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <asm/tlb.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | |||
17 | /* | ||
18 | * Load the entry for 'addr' into the TLB and wire the entry. | ||
19 | */ | ||
20 | void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | ||
21 | { | ||
22 | unsigned long status, flags; | ||
23 | int urb; | ||
24 | |||
25 | local_irq_save(flags); | ||
26 | |||
27 | status = __raw_readl(MMUCR); | ||
28 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
29 | status &= ~MMUCR_URC; | ||
30 | |||
31 | /* | ||
32 | * Make sure we're not trying to wire the last TLB entry slot. | ||
33 | */ | ||
34 | BUG_ON(!--urb); | ||
35 | |||
36 | urb = urb % MMUCR_URB_NENTRIES; | ||
37 | |||
38 | /* | ||
39 | * Insert this entry into the highest non-wired TLB slot (via | ||
40 | * the URC field). | ||
41 | */ | ||
42 | status |= (urb << MMUCR_URC_SHIFT); | ||
43 | __raw_writel(status, MMUCR); | ||
44 | ctrl_barrier(); | ||
45 | |||
46 | /* Load the entry into the TLB */ | ||
47 | __update_tlb(vma, addr, pte); | ||
48 | |||
49 | /* ... and wire it up. */ | ||
50 | status = __raw_readl(MMUCR); | ||
51 | |||
52 | status &= ~MMUCR_URB; | ||
53 | status |= (urb << MMUCR_URB_SHIFT); | ||
54 | |||
55 | __raw_writel(status, MMUCR); | ||
56 | ctrl_barrier(); | ||
57 | |||
58 | local_irq_restore(flags); | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * Unwire the last wired TLB entry. | ||
63 | * | ||
64 | * It should also be noted that it is not possible to wire and unwire | ||
65 | * TLB entries in an arbitrary order. If you wire TLB entry N, followed | ||
66 | * by entry N+1, you must unwire entry N+1 first, then entry N. In this | ||
67 | * respect, it works like a stack or LIFO queue. | ||
68 | */ | ||
69 | void tlb_unwire_entry(void) | ||
70 | { | ||
71 | unsigned long status, flags; | ||
72 | int urb; | ||
73 | |||
74 | local_irq_save(flags); | ||
75 | |||
76 | status = __raw_readl(MMUCR); | ||
77 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
78 | status &= ~MMUCR_URB; | ||
79 | |||
80 | /* | ||
81 | * Make sure we're not trying to unwire a TLB entry when none | ||
82 | * have been wired. | ||
83 | */ | ||
84 | BUG_ON(urb++ == MMUCR_URB_NENTRIES); | ||
85 | |||
86 | urb = urb % MMUCR_URB_NENTRIES; | ||
87 | |||
88 | status |= (urb << MMUCR_URB_SHIFT); | ||
89 | __raw_writel(status, MMUCR); | ||
90 | ctrl_barrier(); | ||
91 | |||
92 | local_irq_restore(flags); | ||
93 | } | ||
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c index 6f45c1f8a7fe..3fbe03ce8fe3 100644 --- a/arch/sh/mm/tlbflush_32.c +++ b/arch/sh/mm/tlbflush_32.c | |||
@@ -119,22 +119,3 @@ void local_flush_tlb_mm(struct mm_struct *mm) | |||
119 | local_irq_restore(flags); | 119 | local_irq_restore(flags); |
120 | } | 120 | } |
121 | } | 121 | } |
122 | |||
123 | void local_flush_tlb_all(void) | ||
124 | { | ||
125 | unsigned long flags, status; | ||
126 | |||
127 | /* | ||
128 | * Flush all the TLB. | ||
129 | * | ||
130 | * Write to the MMU control register's bit: | ||
131 | * TF-bit for SH-3, TI-bit for SH-4. | ||
132 | * It's same position, bit #2. | ||
133 | */ | ||
134 | local_irq_save(flags); | ||
135 | status = ctrl_inl(MMUCR); | ||
136 | status |= 0x04; | ||
137 | ctrl_outl(status, MMUCR); | ||
138 | ctrl_barrier(); | ||
139 | local_irq_restore(flags); | ||
140 | } | ||
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index de0b0e881823..706da1d3a67a 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -36,7 +36,7 @@ extern void die(const char *,struct pt_regs *,long); | |||
36 | 36 | ||
37 | static inline void print_prots(pgprot_t prot) | 37 | static inline void print_prots(pgprot_t prot) |
38 | { | 38 | { |
39 | printk("prot is 0x%08lx\n",pgprot_val(prot)); | 39 | printk("prot is 0x%016llx\n",pgprot_val(prot)); |
40 | 40 | ||
41 | printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), | 41 | printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), |
42 | PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); | 42 | PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); |
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c new file mode 100644 index 000000000000..8a4eca551fc0 --- /dev/null +++ b/arch/sh/mm/uncached.c | |||
@@ -0,0 +1,43 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <asm/sizes.h> | ||
4 | #include <asm/page.h> | ||
5 | #include <asm/addrspace.h> | ||
6 | |||
7 | /* | ||
8 | * This is the offset of the uncached section from its cached alias. | ||
9 | * | ||
10 | * Legacy platforms handle trivial transitions between cached and | ||
11 | * uncached segments by making use of the 1:1 mapping relationship in | ||
12 | * 512MB lowmem, others via a special uncached mapping. | ||
13 | * | ||
14 | * Default value only valid in 29 bit mode, in 32bit mode this will be | ||
15 | * updated by the early PMB initialization code. | ||
16 | */ | ||
17 | unsigned long cached_to_uncached = SZ_512M; | ||
18 | unsigned long uncached_size = SZ_512M; | ||
19 | unsigned long uncached_start, uncached_end; | ||
20 | EXPORT_SYMBOL(uncached_start); | ||
21 | EXPORT_SYMBOL(uncached_end); | ||
22 | |||
23 | int virt_addr_uncached(unsigned long kaddr) | ||
24 | { | ||
25 | return (kaddr >= uncached_start) && (kaddr < uncached_end); | ||
26 | } | ||
27 | EXPORT_SYMBOL(virt_addr_uncached); | ||
28 | |||
29 | void __init uncached_init(void) | ||
30 | { | ||
31 | #ifdef CONFIG_29BIT | ||
32 | uncached_start = P2SEG; | ||
33 | #else | ||
34 | uncached_start = memory_end; | ||
35 | #endif | ||
36 | uncached_end = uncached_start + uncached_size; | ||
37 | } | ||
38 | |||
39 | void __init uncached_resize(unsigned long size) | ||
40 | { | ||
41 | uncached_size = size; | ||
42 | uncached_end = uncached_start + uncached_size; | ||
43 | } | ||