aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/boards/mach-landisk/gio.c10
-rw-r--r--arch/sh/mm/cache-sh4.c26
-rw-r--r--arch/sh/mm/cache.c10
3 files changed, 26 insertions, 20 deletions
diff --git a/arch/sh/boards/mach-landisk/gio.c b/arch/sh/boards/mach-landisk/gio.c
index 25cdf7358000..528013188196 100644
--- a/arch/sh/boards/mach-landisk/gio.c
+++ b/arch/sh/boards/mach-landisk/gio.c
@@ -14,7 +14,6 @@
14 */ 14 */
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/smp_lock.h>
18#include <linux/kdev_t.h> 17#include <linux/kdev_t.h>
19#include <linux/cdev.h> 18#include <linux/cdev.h>
20#include <linux/fs.h> 19#include <linux/fs.h>
@@ -35,7 +34,7 @@ static int gio_open(struct inode *inode, struct file *filp)
35 int minor; 34 int minor;
36 int ret = -ENOENT; 35 int ret = -ENOENT;
37 36
38 lock_kernel(); 37 preempt_disable();
39 minor = MINOR(inode->i_rdev); 38 minor = MINOR(inode->i_rdev);
40 if (minor < DEVCOUNT) { 39 if (minor < DEVCOUNT) {
41 if (openCnt > 0) { 40 if (openCnt > 0) {
@@ -45,7 +44,7 @@ static int gio_open(struct inode *inode, struct file *filp)
45 ret = 0; 44 ret = 0;
46 } 45 }
47 } 46 }
48 unlock_kernel(); 47 preempt_enable();
49 return ret; 48 return ret;
50} 49}
51 50
@@ -60,8 +59,7 @@ static int gio_close(struct inode *inode, struct file *filp)
60 return 0; 59 return 0;
61} 60}
62 61
63static int gio_ioctl(struct inode *inode, struct file *filp, 62static long gio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
64 unsigned int cmd, unsigned long arg)
65{ 63{
66 unsigned int data; 64 unsigned int data;
67 static unsigned int addr = 0; 65 static unsigned int addr = 0;
@@ -129,7 +127,7 @@ static const struct file_operations gio_fops = {
129 .owner = THIS_MODULE, 127 .owner = THIS_MODULE,
130 .open = gio_open, /* open */ 128 .open = gio_open, /* open */
131 .release = gio_close, /* release */ 129 .release = gio_close, /* release */
132 .ioctl = gio_ioctl, /* ioctl */ 130 .unlocked_ioctl = gio_ioctl,
133}; 131};
134 132
135static int __init gio_init(void) 133static int __init gio_init(void)
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 56dd55a1b13e..4a2fbf2864de 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -27,7 +27,7 @@
27 */ 27 */
28#define MAX_ICACHE_PAGES 32 28#define MAX_ICACHE_PAGES 32
29 29
30static void __flush_cache_4096(unsigned long addr, unsigned long phys, 30static void __flush_cache_one(unsigned long addr, unsigned long phys,
31 unsigned long exec_offset); 31 unsigned long exec_offset);
32 32
33/* 33/*
@@ -82,8 +82,7 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
82 local_irq_restore(flags); 82 local_irq_restore(flags);
83} 83}
84 84
85static inline void flush_cache_4096(unsigned long start, 85static inline void flush_cache_one(unsigned long start, unsigned long phys)
86 unsigned long phys)
87{ 86{
88 unsigned long flags, exec_offset = 0; 87 unsigned long flags, exec_offset = 0;
89 88
@@ -96,8 +95,8 @@ static inline void flush_cache_4096(unsigned long start,
96 exec_offset = cached_to_uncached; 95 exec_offset = cached_to_uncached;
97 96
98 local_irq_save(flags); 97 local_irq_save(flags);
99 __flush_cache_4096(start | SH_CACHE_ASSOC, 98 __flush_cache_one(start | SH_CACHE_ASSOC,
100 virt_to_phys(phys), exec_offset); 99 virt_to_phys(phys), exec_offset);
101 local_irq_restore(flags); 100 local_irq_restore(flags);
102} 101}
103 102
@@ -121,9 +120,9 @@ static void sh4_flush_dcache_page(void *arg)
121 int i, n; 120 int i, n;
122 121
123 /* Loop all the D-cache */ 122 /* Loop all the D-cache */
124 n = boot_cpu_data.dcache.way_incr >> 12; 123 n = boot_cpu_data.dcache.n_aliases;
125 for (i = 0; i < n; i++, addr += 4096) 124 for (i = 0; i <= n; i++, addr += PAGE_SIZE)
126 flush_cache_4096(addr, phys); 125 flush_cache_one(addr, phys);
127 } 126 }
128 127
129 wmb(); 128 wmb();
@@ -220,7 +219,7 @@ static void sh4_flush_cache_page(void *args)
220 void *vaddr; 219 void *vaddr;
221 220
222 vma = data->vma; 221 vma = data->vma;
223 address = data->addr1; 222 address = data->addr1 & PAGE_MASK;
224 pfn = data->addr2; 223 pfn = data->addr2;
225 phys = pfn << PAGE_SHIFT; 224 phys = pfn << PAGE_SHIFT;
226 page = pfn_to_page(pfn); 225 page = pfn_to_page(pfn);
@@ -228,7 +227,6 @@ static void sh4_flush_cache_page(void *args)
228 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) 227 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
229 return; 228 return;
230 229
231 address &= PAGE_MASK;
232 pgd = pgd_offset(vma->vm_mm, address); 230 pgd = pgd_offset(vma->vm_mm, address);
233 pud = pud_offset(pgd, address); 231 pud = pud_offset(pgd, address);
234 pmd = pmd_offset(pud, address); 232 pmd = pmd_offset(pud, address);
@@ -257,7 +255,7 @@ static void sh4_flush_cache_page(void *args)
257 } 255 }
258 256
259 if (pages_do_alias(address, phys)) 257 if (pages_do_alias(address, phys))
260 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 258 flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
261 (address & shm_align_mask), phys); 259 (address & shm_align_mask), phys);
262 260
263 if (vma->vm_flags & VM_EXEC) 261 if (vma->vm_flags & VM_EXEC)
@@ -307,7 +305,7 @@ static void sh4_flush_cache_range(void *args)
307} 305}
308 306
309/** 307/**
310 * __flush_cache_4096 308 * __flush_cache_one
311 * 309 *
312 * @addr: address in memory mapped cache array 310 * @addr: address in memory mapped cache array
313 * @phys: P1 address to flush (has to match tags if addr has 'A' bit 311 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
@@ -320,7 +318,7 @@ static void sh4_flush_cache_range(void *args)
320 * operation (purge/write-back) is selected by the lower 2 bits of 318 * operation (purge/write-back) is selected by the lower 2 bits of
321 * 'phys'. 319 * 'phys'.
322 */ 320 */
323static void __flush_cache_4096(unsigned long addr, unsigned long phys, 321static void __flush_cache_one(unsigned long addr, unsigned long phys,
324 unsigned long exec_offset) 322 unsigned long exec_offset)
325{ 323{
326 int way_count; 324 int way_count;
@@ -357,7 +355,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
357 * pointless nead-of-loop check for 0 iterations. 355 * pointless nead-of-loop check for 0 iterations.
358 */ 356 */
359 do { 357 do {
360 ea = base_addr + 4096; 358 ea = base_addr + PAGE_SIZE;
361 a = base_addr; 359 a = base_addr;
362 p = phys; 360 p = phys;
363 361
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index e8810f7fc7ea..fc372a1d3132 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -271,6 +271,8 @@ static void __init emit_cache_params(void)
271 271
272void __init cpu_cache_init(void) 272void __init cpu_cache_init(void)
273{ 273{
274 unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
275
274 compute_alias(&boot_cpu_data.icache); 276 compute_alias(&boot_cpu_data.icache);
275 compute_alias(&boot_cpu_data.dcache); 277 compute_alias(&boot_cpu_data.dcache);
276 compute_alias(&boot_cpu_data.scache); 278 compute_alias(&boot_cpu_data.scache);
@@ -279,6 +281,13 @@ void __init cpu_cache_init(void)
279 __flush_purge_region = noop__flush_region; 281 __flush_purge_region = noop__flush_region;
280 __flush_invalidate_region = noop__flush_region; 282 __flush_invalidate_region = noop__flush_region;
281 283
284 /*
285 * No flushing is necessary in the disabled cache case so we can
286 * just keep the noop functions in local_flush_..() and __flush_..()
287 */
288 if (unlikely(cache_disabled))
289 goto skip;
290
282 if (boot_cpu_data.family == CPU_FAMILY_SH2) { 291 if (boot_cpu_data.family == CPU_FAMILY_SH2) {
283 extern void __weak sh2_cache_init(void); 292 extern void __weak sh2_cache_init(void);
284 293
@@ -318,5 +327,6 @@ void __init cpu_cache_init(void)
318 sh5_cache_init(); 327 sh5_cache_init();
319 } 328 }
320 329
330skip:
321 emit_cache_params(); 331 emit_cache_params();
322} 332}