aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-24 09:49:17 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-24 09:49:17 -0400
commit12cceb6251c2cd23e936b25eca66be99ba41b081 (patch)
treeb7f62853e67b305519c375162760422fbfc81b8e /arch
parentf13327864f94c3a0e6acca923df537d20059639f (diff)
parent05ecd5a1f76c183cca381705b3adb7d77c9a0439 (diff)
Merge branch 'sh/st-integration'
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/Kconfig27
-rw-r--r--arch/sh/boot/compressed/head_32.S2
-rw-r--r--arch/sh/drivers/pci/pci.c4
-rw-r--r--arch/sh/include/asm/Kbuild2
-rw-r--r--arch/sh/include/asm/cachectl.h19
-rw-r--r--arch/sh/include/asm/entry-macros.S2
-rw-r--r--arch/sh/include/asm/io.h16
-rw-r--r--arch/sh/include/asm/unistd_32.h2
-rw-r--r--arch/sh/include/asm/unistd_64.h2
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c1
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S2
-rw-r--r--arch/sh/kernel/entry-common.S5
-rw-r--r--arch/sh/kernel/io.c97
-rw-r--r--arch/sh/kernel/io_generic.c50
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/kernel/kgdb.c2
-rw-r--r--arch/sh/kernel/process_32.c20
-rw-r--r--arch/sh/kernel/setup.c6
-rw-r--r--arch/sh/kernel/signal_32.c12
-rw-r--r--arch/sh/kernel/sys_sh.c43
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/kernel/traps_32.c188
-rw-r--r--arch/sh/lib/clear_page.S2
-rw-r--r--arch/sh/lib/delay.c5
-rw-r--r--arch/sh/mm/cache-sh4.c45
-rw-r--r--arch/sh/mm/ioremap_32.c8
27 files changed, 452 insertions, 116 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 80b4f9a743a1..2f5352c06a0e 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -767,12 +767,31 @@ config UBC_WAKEUP
767 767
768 If unsure, say N. 768 If unsure, say N.
769 769
770config CMDLINE_BOOL 770choice
771 bool "Default bootloader kernel arguments" 771 prompt "Kernel command line"
772 optional
773 default CMDLINE_OVERWRITE
774 help
775 Setting this option allows the kernel command line arguments
776 to be set.
777
778config CMDLINE_OVERWRITE
779 bool "Overwrite bootloader kernel arguments"
780 help
781 Given string will overwrite any arguments passed in by
782 a bootloader.
783
784config CMDLINE_EXTEND
785 bool "Extend bootloader kernel arguments"
786 help
787 Given string will be concatenated with arguments passed in
788 by a bootloader.
789
790endchoice
772 791
773config CMDLINE 792config CMDLINE
774 string "Initial kernel command string" 793 string "Kernel command line arguments string"
775 depends on CMDLINE_BOOL 794 depends on CMDLINE_OVERWRITE || CMDLINE_EXTEND
776 default "console=ttySC1,115200" 795 default "console=ttySC1,115200"
777 796
778endmenu 797endmenu
diff --git a/arch/sh/boot/compressed/head_32.S b/arch/sh/boot/compressed/head_32.S
index 06ac31f3be88..02a30935f0b9 100644
--- a/arch/sh/boot/compressed/head_32.S
+++ b/arch/sh/boot/compressed/head_32.S
@@ -22,7 +22,7 @@ startup:
22 bt clear_bss 22 bt clear_bss
23 sub r0, r2 23 sub r0, r2
24 mov.l bss_start_addr, r0 24 mov.l bss_start_addr, r0
25 mov #0xe0, r1 25 mov #0xffffffe0, r1
26 and r1, r0 ! align cache line 26 and r1, r0 ! align cache line
27 mov.l text_start_addr, r3 27 mov.l text_start_addr, r3
28 mov r0, r1 28 mov r0, r1
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 9a1c423ad167..c481df639022 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -295,6 +295,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
295 vma->vm_page_prot); 295 vma->vm_page_prot);
296} 296}
297 297
298#ifndef CONFIG_GENERIC_IOMAP
299
298static void __iomem *ioport_map_pci(struct pci_dev *dev, 300static void __iomem *ioport_map_pci(struct pci_dev *dev,
299 unsigned long port, unsigned int nr) 301 unsigned long port, unsigned int nr)
300{ 302{
@@ -346,6 +348,8 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
346} 348}
347EXPORT_SYMBOL(pci_iounmap); 349EXPORT_SYMBOL(pci_iounmap);
348 350
351#endif /* CONFIG_GENERIC_IOMAP */
352
349#ifdef CONFIG_HOTPLUG 353#ifdef CONFIG_HOTPLUG
350EXPORT_SYMBOL(pcibios_resource_to_bus); 354EXPORT_SYMBOL(pcibios_resource_to_bus);
351EXPORT_SYMBOL(pcibios_bus_to_resource); 355EXPORT_SYMBOL(pcibios_bus_to_resource);
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 43910cdf78a5..e121c30f797d 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,6 +1,6 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3header-y += cpu-features.h 3header-y += cachectl.h cpu-features.h
4 4
5unifdef-y += unistd_32.h 5unifdef-y += unistd_32.h
6unifdef-y += unistd_64.h 6unifdef-y += unistd_64.h
diff --git a/arch/sh/include/asm/cachectl.h b/arch/sh/include/asm/cachectl.h
new file mode 100644
index 000000000000..6ffb4b7a212e
--- /dev/null
+++ b/arch/sh/include/asm/cachectl.h
@@ -0,0 +1,19 @@
1#ifndef _SH_CACHECTL_H
2#define _SH_CACHECTL_H
3
4/* Definitions for the cacheflush system call. */
5
6#define CACHEFLUSH_D_INVAL 0x1 /* invalidate (without write back) */
7#define CACHEFLUSH_D_WB 0x2 /* write back (without invalidate) */
8#define CACHEFLUSH_D_PURGE 0x3 /* writeback and invalidate */
9
10#define CACHEFLUSH_I 0x4
11
12/*
13 * Options for cacheflush system call
14 */
15#define ICACHE CACHEFLUSH_I /* flush instruction cache */
16#define DCACHE CACHEFLUSH_D_PURGE /* writeback and flush data cache */
17#define BCACHE (ICACHE|DCACHE) /* flush both caches */
18
19#endif /* _SH_CACHECTL_H */
diff --git a/arch/sh/include/asm/entry-macros.S b/arch/sh/include/asm/entry-macros.S
index 64fd0de24daf..cc43a55e1fcf 100644
--- a/arch/sh/include/asm/entry-macros.S
+++ b/arch/sh/include/asm/entry-macros.S
@@ -7,7 +7,7 @@
7 .endm 7 .endm
8 8
9 .macro sti 9 .macro sti
10 mov #0xf0, r11 10 mov #0xfffffff0, r11
11 extu.b r11, r11 11 extu.b r11, r11
12 not r11, r11 12 not r11, r11
13 stc sr, r10 13 stc sr, r10
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 25348141674b..5be45ea4dfec 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -92,8 +92,12 @@
92 92
93static inline void ctrl_delay(void) 93static inline void ctrl_delay(void)
94{ 94{
95#ifdef P2SEG 95#ifdef CONFIG_CPU_SH4
96 __raw_readw(CCN_PVR);
97#elif defined(P2SEG)
96 __raw_readw(P2SEG); 98 __raw_readw(P2SEG);
99#else
100#error "Need a dummy address for delay"
97#endif 101#endif
98} 102}
99 103
@@ -146,6 +150,7 @@ __BUILD_MEMORY_STRING(q, u64)
146#define readl_relaxed(a) readl(a) 150#define readl_relaxed(a) readl(a)
147#define readq_relaxed(a) readq(a) 151#define readq_relaxed(a) readq(a)
148 152
153#ifndef CONFIG_GENERIC_IOMAP
149/* Simple MMIO */ 154/* Simple MMIO */
150#define ioread8(a) __raw_readb(a) 155#define ioread8(a) __raw_readb(a)
151#define ioread16(a) __raw_readw(a) 156#define ioread16(a) __raw_readw(a)
@@ -166,6 +171,15 @@ __BUILD_MEMORY_STRING(q, u64)
166#define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c)) 171#define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c))
167#define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c)) 172#define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c))
168#define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c)) 173#define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c))
174#endif
175
176#define mmio_insb(p,d,c) __raw_readsb(p,d,c)
177#define mmio_insw(p,d,c) __raw_readsw(p,d,c)
178#define mmio_insl(p,d,c) __raw_readsl(p,d,c)
179
180#define mmio_outsb(p,s,c) __raw_writesb(p,s,c)
181#define mmio_outsw(p,s,c) __raw_writesw(p,s,c)
182#define mmio_outsl(p,s,c) __raw_writesl(p,s,c)
169 183
170/* synco on SH-4A, otherwise a nop */ 184/* synco on SH-4A, otherwise a nop */
171#define mmiowb() wmb() 185#define mmiowb() wmb()
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index 61d6ad93d786..925dd40d9d55 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -132,7 +132,7 @@
132#define __NR_clone 120 132#define __NR_clone 120
133#define __NR_setdomainname 121 133#define __NR_setdomainname 121
134#define __NR_uname 122 134#define __NR_uname 122
135#define __NR_modify_ldt 123 135#define __NR_cacheflush 123
136#define __NR_adjtimex 124 136#define __NR_adjtimex 124
137#define __NR_mprotect 125 137#define __NR_mprotect 125
138#define __NR_sigprocmask 126 138#define __NR_sigprocmask 126
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index a751699afda3..2b84bc916bc5 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -137,7 +137,7 @@
137#define __NR_clone 120 137#define __NR_clone 120
138#define __NR_setdomainname 121 138#define __NR_setdomainname 121
139#define __NR_uname 122 139#define __NR_uname 122
140#define __NR_modify_ldt 123 140#define __NR_cacheflush 123
141#define __NR_adjtimex 124 141#define __NR_adjtimex 124
142#define __NR_mprotect 125 142#define __NR_mprotect 125
143#define __NR_sigprocmask 126 143#define __NR_sigprocmask 126
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 808d99a48efb..c1508a90fc6a 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -35,6 +35,7 @@ static void disable_ipr_irq(unsigned int irq)
35 unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx]; 35 unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
36 /* Set the priority in IPR to 0 */ 36 /* Set the priority in IPR to 0 */
37 __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr); 37 __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
38 (void)__raw_readw(addr); /* Read back to flush write posting */
38} 39}
39 40
40static void enable_ipr_irq(unsigned int irq) 41static void enable_ipr_irq(unsigned int irq)
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 8c19e21847d7..9421ec715fd2 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -257,7 +257,7 @@ restore_all:
257 ! 257 !
258 ! Calculate new SR value 258 ! Calculate new SR value
259 mov k3, k2 ! original SR value 259 mov k3, k2 ! original SR value
260 mov #0xf0, k1 260 mov #0xfffffff0, k1
261 extu.b k1, k1 261 extu.b k1, k1
262 not k1, k1 262 not k1, k1
263 and k1, k2 ! Mask original SR value 263 and k1, k2 ! Mask original SR value
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 700477601c6f..68d9223b145e 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -98,8 +98,9 @@ need_resched:
98 98
99 mov #OFF_SR, r0 99 mov #OFF_SR, r0
100 mov.l @(r0,r15), r0 ! get status register 100 mov.l @(r0,r15), r0 ! get status register
101 and #0xf0, r0 ! interrupts off (exception path)? 101 shlr r0
102 cmp/eq #0xf0, r0 102 and #(0xf0>>1), r0 ! interrupts off (exception path)?
103 cmp/eq #(0xf0>>1), r0
103 bt noresched 104 bt noresched
104 mov.l 3f, r0 105 mov.l 3f, r0
105 jsr @r0 ! call preempt_schedule_irq 106 jsr @r0 ! call preempt_schedule_irq
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 4f85fffaa557..4770c241c679 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -1,12 +1,9 @@
1/* 1/*
2 * linux/arch/sh/kernel/io.c 2 * arch/sh/kernel/io.c - Machine independent I/O functions.
3 * 3 *
4 * Copyright (C) 2000 Stuart Menefy 4 * Copyright (C) 2000 - 2009 Stuart Menefy
5 * Copyright (C) 2005 Paul Mundt 5 * Copyright (C) 2005 Paul Mundt
6 * 6 *
7 * Provide real functions which expand to whatever the header file defined.
8 * Also definitions of machine independent IO functions.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
12 * for more details. 9 * for more details.
@@ -18,33 +15,87 @@
18 15
19/* 16/*
20 * Copy data from IO memory space to "real" memory space. 17 * Copy data from IO memory space to "real" memory space.
21 * This needs to be optimized.
22 */ 18 */
23void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count) 19void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
24{ 20{
25 unsigned char *p = to; 21 /*
26 while (count) { 22 * Would it be worthwhile doing byte and long transfers first
27 count--; 23 * to try and get aligned?
28 *p = readb(from); 24 */
29 p++; 25#ifdef CONFIG_CPU_SH4
30 from++; 26 if ((count >= 0x20) &&
31 } 27 (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) {
28 int tmp2, tmp3, tmp4, tmp5, tmp6;
29
30 __asm__ __volatile__(
31 "1: \n\t"
32 "mov.l @%7+, r0 \n\t"
33 "mov.l @%7+, %2 \n\t"
34 "movca.l r0, @%0 \n\t"
35 "mov.l @%7+, %3 \n\t"
36 "mov.l @%7+, %4 \n\t"
37 "mov.l @%7+, %5 \n\t"
38 "mov.l @%7+, %6 \n\t"
39 "mov.l @%7+, r7 \n\t"
40 "mov.l @%7+, r0 \n\t"
41 "mov.l %2, @(0x04,%0) \n\t"
42 "mov #0x20, %2 \n\t"
43 "mov.l %3, @(0x08,%0) \n\t"
44 "sub %2, %1 \n\t"
45 "mov.l %4, @(0x0c,%0) \n\t"
46 "cmp/hi %1, %2 ! T if 32 > count \n\t"
47 "mov.l %5, @(0x10,%0) \n\t"
48 "mov.l %6, @(0x14,%0) \n\t"
49 "mov.l r7, @(0x18,%0) \n\t"
50 "mov.l r0, @(0x1c,%0) \n\t"
51 "bf.s 1b \n\t"
52 " add #0x20, %0 \n\t"
53 : "=&r" (to), "=&r" (count),
54 "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
55 "=&r" (tmp5), "=&r" (tmp6), "=&r" (from)
56 : "7"(from), "0" (to), "1" (count)
57 : "r0", "r7", "t", "memory");
58 }
59#endif
60
61 if ((((u32)to | (u32)from) & 0x3) == 0) {
62 for (; count > 3; count -= 4) {
63 *(u32 *)to = *(volatile u32 *)from;
64 to += 4;
65 from += 4;
66 }
67 }
68
69 for (; count > 0; count--) {
70 *(u8 *)to = *(volatile u8 *)from;
71 to++;
72 from++;
73 }
74
75 mb();
32} 76}
33EXPORT_SYMBOL(memcpy_fromio); 77EXPORT_SYMBOL(memcpy_fromio);
34 78
35/* 79/*
36 * Copy data from "real" memory space to IO memory space. 80 * Copy data from "real" memory space to IO memory space.
37 * This needs to be optimized.
38 */ 81 */
39void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count) 82void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
40{ 83{
41 const unsigned char *p = from; 84 if ((((u32)to | (u32)from) & 0x3) == 0) {
42 while (count) { 85 for ( ; count > 3; count -= 4) {
43 count--; 86 *(volatile u32 *)to = *(u32 *)from;
44 writeb(*p, to); 87 to += 4;
45 p++; 88 from += 4;
46 to++; 89 }
47 } 90 }
91
92 for (; count > 0; count--) {
93 *(volatile u8 *)to = *(u8 *)from;
94 to++;
95 from++;
96 }
97
98 mb();
48} 99}
49EXPORT_SYMBOL(memcpy_toio); 100EXPORT_SYMBOL(memcpy_toio);
50 101
@@ -62,6 +113,8 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
62} 113}
63EXPORT_SYMBOL(memset_io); 114EXPORT_SYMBOL(memset_io);
64 115
116#ifndef CONFIG_GENERIC_IOMAP
117
65void __iomem *ioport_map(unsigned long port, unsigned int nr) 118void __iomem *ioport_map(unsigned long port, unsigned int nr)
66{ 119{
67 void __iomem *ret; 120 void __iomem *ret;
@@ -79,3 +132,5 @@ void ioport_unmap(void __iomem *addr)
79 sh_mv.mv_ioport_unmap(addr); 132 sh_mv.mv_ioport_unmap(addr);
80} 133}
81EXPORT_SYMBOL(ioport_unmap); 134EXPORT_SYMBOL(ioport_unmap);
135
136#endif /* CONFIG_GENERIC_IOMAP */
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
index 5a7f554d9ca1..4ff507239286 100644
--- a/arch/sh/kernel/io_generic.c
+++ b/arch/sh/kernel/io_generic.c
@@ -73,35 +73,19 @@ u32 generic_inl_p(unsigned long port)
73 73
74void generic_insb(unsigned long port, void *dst, unsigned long count) 74void generic_insb(unsigned long port, void *dst, unsigned long count)
75{ 75{
76 volatile u8 *port_addr; 76 __raw_readsb(__ioport_map(port, 1), dst, count);
77 u8 *buf = dst; 77 dummy_read();
78
79 port_addr = (volatile u8 __force *)__ioport_map(port, 1);
80 while (count--)
81 *buf++ = *port_addr;
82} 78}
83 79
84void generic_insw(unsigned long port, void *dst, unsigned long count) 80void generic_insw(unsigned long port, void *dst, unsigned long count)
85{ 81{
86 volatile u16 *port_addr; 82 __raw_readsw(__ioport_map(port, 2), dst, count);
87 u16 *buf = dst;
88
89 port_addr = (volatile u16 __force *)__ioport_map(port, 2);
90 while (count--)
91 *buf++ = *port_addr;
92
93 dummy_read(); 83 dummy_read();
94} 84}
95 85
96void generic_insl(unsigned long port, void *dst, unsigned long count) 86void generic_insl(unsigned long port, void *dst, unsigned long count)
97{ 87{
98 volatile u32 *port_addr; 88 __raw_readsl(__ioport_map(port, 4), dst, count);
99 u32 *buf = dst;
100
101 port_addr = (volatile u32 __force *)__ioport_map(port, 4);
102 while (count--)
103 *buf++ = *port_addr;
104
105 dummy_read(); 89 dummy_read();
106} 90}
107 91
@@ -145,37 +129,19 @@ void generic_outl_p(u32 b, unsigned long port)
145 */ 129 */
146void generic_outsb(unsigned long port, const void *src, unsigned long count) 130void generic_outsb(unsigned long port, const void *src, unsigned long count)
147{ 131{
148 volatile u8 *port_addr; 132 __raw_writesb(__ioport_map(port, 1), src, count);
149 const u8 *buf = src; 133 dummy_read();
150
151 port_addr = (volatile u8 __force *)__ioport_map(port, 1);
152
153 while (count--)
154 *port_addr = *buf++;
155} 134}
156 135
157void generic_outsw(unsigned long port, const void *src, unsigned long count) 136void generic_outsw(unsigned long port, const void *src, unsigned long count)
158{ 137{
159 volatile u16 *port_addr; 138 __raw_writesw(__ioport_map(port, 2), src, count);
160 const u16 *buf = src;
161
162 port_addr = (volatile u16 __force *)__ioport_map(port, 2);
163
164 while (count--)
165 *port_addr = *buf++;
166
167 dummy_read(); 139 dummy_read();
168} 140}
169 141
170void generic_outsl(unsigned long port, const void *src, unsigned long count) 142void generic_outsl(unsigned long port, const void *src, unsigned long count)
171{ 143{
172 volatile u32 *port_addr; 144 __raw_writesl(__ioport_map(port, 4), src, count);
173 const u32 *buf = src;
174
175 port_addr = (volatile u32 __force *)__ioport_map(port, 4);
176 while (count--)
177 *port_addr = *buf++;
178
179 dummy_read(); 145 dummy_read();
180} 146}
181 147
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 278c68c60488..d1053392e287 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -114,7 +114,7 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs)
114#endif 114#endif
115 115
116 irq_enter(); 116 irq_enter();
117 irq = irq_demux(intc_evt2irq(irq)); 117 irq = irq_demux(evt2irq(irq));
118 118
119#ifdef CONFIG_IRQSTACKS 119#ifdef CONFIG_IRQSTACKS
120 curctx = (union irq_ctx *)current_thread_info(); 120 curctx = (union irq_ctx *)current_thread_info();
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 305aad742aec..d29de7864f32 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -195,8 +195,6 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
195 regs->gbr = gdb_regs[GDB_GBR]; 195 regs->gbr = gdb_regs[GDB_GBR];
196 regs->mach = gdb_regs[GDB_MACH]; 196 regs->mach = gdb_regs[GDB_MACH];
197 regs->macl = gdb_regs[GDB_MACL]; 197 regs->macl = gdb_regs[GDB_MACL];
198
199 __asm__ __volatile__ ("ldc %0, vbr" : : "r" (gdb_regs[GDB_VBR]));
200} 198}
201 199
202void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) 200void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 9fee977f176b..0673c4746be3 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -32,15 +32,35 @@
32#include <asm/ubc.h> 32#include <asm/ubc.h>
33#include <asm/fpu.h> 33#include <asm/fpu.h>
34#include <asm/syscalls.h> 34#include <asm/syscalls.h>
35#include <asm/watchdog.h>
35 36
36int ubc_usercnt = 0; 37int ubc_usercnt = 0;
37 38
39#ifdef CONFIG_32BIT
40static void watchdog_trigger_immediate(void)
41{
42 sh_wdt_write_cnt(0xFF);
43 sh_wdt_write_csr(0xC2);
44}
45
46void machine_restart(char * __unused)
47{
48 local_irq_disable();
49
50 /* Use watchdog timer to trigger reset */
51 watchdog_trigger_immediate();
52
53 while (1)
54 cpu_sleep();
55}
56#else
38void machine_restart(char * __unused) 57void machine_restart(char * __unused)
39{ 58{
40 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ 59 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
41 asm volatile("ldc %0, sr\n\t" 60 asm volatile("ldc %0, sr\n\t"
42 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); 61 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
43} 62}
63#endif
44 64
45void machine_halt(void) 65void machine_halt(void)
46{ 66{
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 212e6bddaeb8..d13bbafb4e1b 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -404,10 +404,14 @@ void __init setup_arch(char **cmdline_p)
404 if (!memory_end) 404 if (!memory_end)
405 memory_end = memory_start + __MEMORY_SIZE; 405 memory_end = memory_start + __MEMORY_SIZE;
406 406
407#ifdef CONFIG_CMDLINE_BOOL 407#ifdef CONFIG_CMDLINE_OVERWRITE
408 strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); 408 strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
409#else 409#else
410 strlcpy(command_line, COMMAND_LINE, sizeof(command_line)); 410 strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
411#ifdef CONFIG_CMDLINE_EXTEND
412 strlcat(command_line, " ", sizeof(command_line));
413 strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
414#endif
411#endif 415#endif
412 416
413 /* Save unparsed command line copy for /proc/cmdline */ 417 /* Save unparsed command line copy for /proc/cmdline */
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index b5afbec1db59..6010750c90b4 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -41,6 +41,16 @@ struct fdpic_func_descriptor {
41}; 41};
42 42
43/* 43/*
44 * The following define adds a 64 byte gap between the signal
45 * stack frame and previous contents of the stack. This allows
46 * frame unwinding in a function epilogue but only if a frame
47 * pointer is used in the function. This is necessary because
48 * current gcc compilers (<4.3) do not generate unwind info on
49 * SH for function epilogues.
50 */
51#define UNWINDGUARD 64
52
53/*
44 * Atomically swap in the new signal mask, and wait for a signal. 54 * Atomically swap in the new signal mask, and wait for a signal.
45 */ 55 */
46asmlinkage int 56asmlinkage int
@@ -327,7 +337,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
327 sp = current->sas_ss_sp + current->sas_ss_size; 337 sp = current->sas_ss_sp + current->sas_ss_size;
328 } 338 }
329 339
330 return (void __user *)((sp - frame_size) & -8ul); 340 return (void __user *)((sp - (frame_size+UNWINDGUARD)) & -8ul);
331} 341}
332 342
333/* These symbols are defined with the addresses in the vsyscall page. 343/* These symbols are defined with the addresses in the vsyscall page.
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 90d00e47264d..8aa5d1ceaf14 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -25,6 +25,8 @@
25#include <asm/syscalls.h> 25#include <asm/syscalls.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/unistd.h> 27#include <asm/unistd.h>
28#include <asm/cacheflush.h>
29#include <asm/cachectl.h>
28 30
29static inline long 31static inline long
30do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 32do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
@@ -179,6 +181,47 @@ asmlinkage int sys_ipc(uint call, int first, int second,
179 return -EINVAL; 181 return -EINVAL;
180} 182}
181 183
184/* sys_cacheflush -- flush (part of) the processor cache. */
185asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
186{
187 struct vm_area_struct *vma;
188
189 if ((op <= 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I)))
190 return -EINVAL;
191
192 /*
193 * Verify that the specified address region actually belongs
194 * to this process.
195 */
196 if (addr + len < addr)
197 return -EFAULT;
198
199 down_read(&current->mm->mmap_sem);
200 vma = find_vma (current->mm, addr);
201 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
202 up_read(&current->mm->mmap_sem);
203 return -EFAULT;
204 }
205
206 switch (op & CACHEFLUSH_D_PURGE) {
207 case CACHEFLUSH_D_INVAL:
208 __flush_invalidate_region((void *)addr, len);
209 break;
210 case CACHEFLUSH_D_WB:
211 __flush_wback_region((void *)addr, len);
212 break;
213 case CACHEFLUSH_D_PURGE:
214 __flush_purge_region((void *)addr, len);
215 break;
216 }
217
218 if (op & CACHEFLUSH_I)
219 flush_cache_all();
220
221 up_read(&current->mm->mmap_sem);
222 return 0;
223}
224
182asmlinkage int sys_uname(struct old_utsname __user *name) 225asmlinkage int sys_uname(struct old_utsname __user *name)
183{ 226{
184 int err; 227 int err;
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index f9e21fa2f592..16ba225ede89 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -139,7 +139,7 @@ ENTRY(sys_call_table)
139 .long sys_clone /* 120 */ 139 .long sys_clone /* 120 */
140 .long sys_setdomainname 140 .long sys_setdomainname
141 .long sys_newuname 141 .long sys_newuname
142 .long sys_ni_syscall /* sys_modify_ldt */ 142 .long sys_cacheflush /* x86: sys_modify_ldt */
143 .long sys_adjtimex 143 .long sys_adjtimex
144 .long sys_mprotect /* 125 */ 144 .long sys_mprotect /* 125 */
145 .long sys_sigprocmask 145 .long sys_sigprocmask
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index bf420b616ae0..af6fb7410c21 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -143,7 +143,7 @@ sys_call_table:
143 .long sys_clone /* 120 */ 143 .long sys_clone /* 120 */
144 .long sys_setdomainname 144 .long sys_setdomainname
145 .long sys_newuname 145 .long sys_newuname
146 .long sys_ni_syscall /* sys_modify_ldt */ 146 .long sys_cacheflush /* x86: sys_modify_ldt */
147 .long sys_adjtimex 147 .long sys_adjtimex
148 .long sys_mprotect /* 125 */ 148 .long sys_mprotect /* 125 */
149 .long sys_sigprocmask 149 .long sys_sigprocmask
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 05a04b6df844..c581dc31d92a 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -24,6 +24,7 @@
24#include <linux/kdebug.h> 24#include <linux/kdebug.h>
25#include <linux/kexec.h> 25#include <linux/kexec.h>
26#include <linux/limits.h> 26#include <linux/limits.h>
27#include <linux/proc_fs.h>
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/fpu.h> 30#include <asm/fpu.h>
@@ -44,6 +45,87 @@
44#define TRAP_ILLEGAL_SLOT_INST 13 45#define TRAP_ILLEGAL_SLOT_INST 13
45#endif 46#endif
46 47
48static unsigned long se_user;
49static unsigned long se_sys;
50static unsigned long se_skipped;
51static unsigned long se_half;
52static unsigned long se_word;
53static unsigned long se_dword;
54static unsigned long se_multi;
55/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not
56 valid! */
57static int se_usermode = 3;
58/* 0: no warning 1: print a warning message */
59static int se_kernmode_warn = 1;
60
61#ifdef CONFIG_PROC_FS
62static const char *se_usermode_action[] = {
63 "ignored",
64 "warn",
65 "fixup",
66 "fixup+warn",
67 "signal",
68 "signal+warn"
69};
70
71static int
72proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
73 void *data)
74{
75 char *p = page;
76 int len;
77
78 p += sprintf(p, "User:\t\t%lu\n", se_user);
79 p += sprintf(p, "System:\t\t%lu\n", se_sys);
80 p += sprintf(p, "Skipped:\t%lu\n", se_skipped);
81 p += sprintf(p, "Half:\t\t%lu\n", se_half);
82 p += sprintf(p, "Word:\t\t%lu\n", se_word);
83 p += sprintf(p, "DWord:\t\t%lu\n", se_dword);
84 p += sprintf(p, "Multi:\t\t%lu\n", se_multi);
85 p += sprintf(p, "User faults:\t%i (%s)\n", se_usermode,
86 se_usermode_action[se_usermode]);
87 p += sprintf(p, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
88 se_kernmode_warn ? "+warn" : "");
89
90 len = (p - page) - off;
91 if (len < 0)
92 len = 0;
93
94 *eof = (len <= count) ? 1 : 0;
95 *start = page + off;
96
97 return len;
98}
99
100static int proc_alignment_write(struct file *file, const char __user *buffer,
101 unsigned long count, void *data)
102{
103 char mode;
104
105 if (count > 0) {
106 if (get_user(mode, buffer))
107 return -EFAULT;
108 if (mode >= '0' && mode <= '5')
109 se_usermode = mode - '0';
110 }
111 return count;
112}
113
114static int proc_alignment_kern_write(struct file *file, const char __user *buffer,
115 unsigned long count, void *data)
116{
117 char mode;
118
119 if (count > 0) {
120 if (get_user(mode, buffer))
121 return -EFAULT;
122 if (mode >= '0' && mode <= '1')
123 se_kernmode_warn = mode - '0';
124 }
125 return count;
126}
127#endif
128
47static void dump_mem(const char *str, unsigned long bottom, unsigned long top) 129static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
48{ 130{
49 unsigned long p; 131 unsigned long p;
@@ -194,6 +276,13 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
194 276
195 count = 1<<(instruction&3); 277 count = 1<<(instruction&3);
196 278
279 switch (count) {
280 case 1: se_half += 1; break;
281 case 2: se_word += 1; break;
282 case 4: se_dword += 1; break;
283 case 8: se_multi += 1; break; /* ??? */
284 }
285
197 ret = -EFAULT; 286 ret = -EFAULT;
198 switch (instruction>>12) { 287 switch (instruction>>12) {
199 case 0: /* mov.[bwl] to/from memory via r0+rn */ 288 case 0: /* mov.[bwl] to/from memory via r0+rn */
@@ -359,13 +448,6 @@ static inline int handle_delayslot(struct pt_regs *regs,
359#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) 448#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
360#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) 449#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
361 450
362/*
363 * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
364 * opcodes..
365 */
366
367static int handle_unaligned_notify_count = 10;
368
369int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, 451int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
370 struct mem_access *ma) 452 struct mem_access *ma)
371{ 453{
@@ -375,15 +457,13 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
375 index = (instruction>>8)&15; /* 0x0F00 */ 457 index = (instruction>>8)&15; /* 0x0F00 */
376 rm = regs->regs[index]; 458 rm = regs->regs[index];
377 459
378 /* shout about the first ten userspace fixups */ 460 /* shout about fixups */
379 if (user_mode(regs) && handle_unaligned_notify_count>0) { 461 if (printk_ratelimit())
380 handle_unaligned_notify_count--; 462 printk(KERN_NOTICE "Fixing up unaligned %s access "
381
382 printk(KERN_NOTICE "Fixing up unaligned userspace access "
383 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", 463 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
464 user_mode(regs) ? "userspace" : "kernel",
384 current->comm, task_pid_nr(current), 465 current->comm, task_pid_nr(current),
385 (void *)regs->pc, instruction); 466 (void *)regs->pc, instruction);
386 }
387 467
388 ret = -EFAULT; 468 ret = -EFAULT;
389 switch (instruction&0xF000) { 469 switch (instruction&0xF000) {
@@ -539,6 +619,36 @@ asmlinkage void do_address_error(struct pt_regs *regs,
539 619
540 local_irq_enable(); 620 local_irq_enable();
541 621
622 se_user += 1;
623
624#ifndef CONFIG_CPU_SH2A
625 set_fs(USER_DS);
626 if (copy_from_user(&instruction, (u16 *)(regs->pc & ~1), 2)) {
627 set_fs(oldfs);
628 goto uspace_segv;
629 }
630 set_fs(oldfs);
631
632 /* shout about userspace fixups */
633 if (se_usermode & 1)
634 printk(KERN_NOTICE "Unaligned userspace access "
635 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
636 current->comm, current->pid, (void *)regs->pc,
637 instruction);
638#endif
639
640 if (se_usermode & 2)
641 goto fixup;
642
643 if (se_usermode & 4)
644 goto uspace_segv;
645 else {
646 /* ignore */
647 regs->pc += instruction_size(instruction);
648 return;
649 }
650
651fixup:
542 /* bad PC is not something we can fix */ 652 /* bad PC is not something we can fix */
543 if (regs->pc & 1) { 653 if (regs->pc & 1) {
544 si_code = BUS_ADRALN; 654 si_code = BUS_ADRALN;
@@ -546,15 +656,6 @@ asmlinkage void do_address_error(struct pt_regs *regs,
546 } 656 }
547 657
548 set_fs(USER_DS); 658 set_fs(USER_DS);
549 if (copy_from_user(&instruction, (void __user *)(regs->pc),
550 sizeof(instruction))) {
551 /* Argh. Fault on the instruction itself.
552 This should never happen non-SMP
553 */
554 set_fs(oldfs);
555 goto uspace_segv;
556 }
557
558 tmp = handle_unaligned_access(instruction, regs, 659 tmp = handle_unaligned_access(instruction, regs,
559 &user_mem_access); 660 &user_mem_access);
560 set_fs(oldfs); 661 set_fs(oldfs);
@@ -572,6 +673,14 @@ uspace_segv:
572 info.si_addr = (void __user *)address; 673 info.si_addr = (void __user *)address;
573 force_sig_info(SIGBUS, &info, current); 674 force_sig_info(SIGBUS, &info, current);
574 } else { 675 } else {
676 se_sys += 1;
677
678 if (se_kernmode_warn)
679 printk(KERN_NOTICE "Unaligned kernel access "
680 "on behalf of \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
681 current->comm, current->pid, (void *)regs->pc,
682 instruction);
683
575 if (regs->pc & 1) 684 if (regs->pc & 1)
576 die("unaligned program counter", regs, error_code); 685 die("unaligned program counter", regs, error_code);
577 686
@@ -881,3 +990,38 @@ void dump_stack(void)
881 show_stack(NULL, NULL); 990 show_stack(NULL, NULL);
882} 991}
883EXPORT_SYMBOL(dump_stack); 992EXPORT_SYMBOL(dump_stack);
993
994#ifdef CONFIG_PROC_FS
995/*
996 * This needs to be done after sysctl_init, otherwise sys/ will be
997 * overwritten. Actually, this shouldn't be in sys/ at all since
998 * it isn't a sysctl, and it doesn't contain sysctl information.
999 * We now locate it in /proc/cpu/alignment instead.
1000 */
1001static int __init alignment_init(void)
1002{
1003 struct proc_dir_entry *dir, *res;
1004
1005 dir = proc_mkdir("cpu", NULL);
1006 if (!dir)
1007 return -ENOMEM;
1008
1009 res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, dir);
1010 if (!res)
1011 return -ENOMEM;
1012
1013 res->read_proc = proc_alignment_read;
1014 res->write_proc = proc_alignment_write;
1015
1016 res = create_proc_entry("kernel_alignment", S_IWUSR | S_IRUGO, dir);
1017 if (!res)
1018 return -ENOMEM;
1019
1020 res->read_proc = proc_alignment_read;
1021 res->write_proc = proc_alignment_kern_write;
1022
1023 return 0;
1024}
1025
1026fs_initcall(alignment_init);
1027#endif
diff --git a/arch/sh/lib/clear_page.S b/arch/sh/lib/clear_page.S
index 8342bfbde64c..c92244d4ff9d 100644
--- a/arch/sh/lib/clear_page.S
+++ b/arch/sh/lib/clear_page.S
@@ -57,7 +57,7 @@ ENTRY(clear_page)
57ENTRY(__clear_user) 57ENTRY(__clear_user)
58 ! 58 !
59 mov #0, r0 59 mov #0, r0
60 mov #0xe0, r1 ! 0xffffffe0 60 mov #0xffffffe0, r1
61 ! 61 !
62 ! r4..(r4+31)&~32 -------- not aligned [ Area 0 ] 62 ! r4..(r4+31)&~32 -------- not aligned [ Area 0 ]
63 ! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ] 63 ! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ]
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index f3ddd2133e6f..faa8f86c0db4 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -21,13 +21,14 @@ void __delay(unsigned long loops)
21 21
22inline void __const_udelay(unsigned long xloops) 22inline void __const_udelay(unsigned long xloops)
23{ 23{
24 xloops *= 4;
24 __asm__("dmulu.l %0, %2\n\t" 25 __asm__("dmulu.l %0, %2\n\t"
25 "sts mach, %0" 26 "sts mach, %0"
26 : "=r" (xloops) 27 : "=r" (xloops)
27 : "0" (xloops), 28 : "0" (xloops),
28 "r" (HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) 29 "r" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))
29 : "macl", "mach"); 30 : "macl", "mach");
30 __delay(xloops); 31 __delay(++xloops);
31} 32}
32 33
33void __udelay(unsigned long usecs) 34void __udelay(unsigned long usecs)
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 5cfe08dbb59e..397c1030c7a6 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -581,6 +581,31 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
581 * Break the 1, 2 and 4 way variants of this out into separate functions to 581 * Break the 1, 2 and 4 way variants of this out into separate functions to
582 * avoid nearly all the overhead of having the conditional stuff in the function 582 * avoid nearly all the overhead of having the conditional stuff in the function
583 * bodies (+ the 1 and 2 way cases avoid saving any registers too). 583 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
584 *
585 * We want to eliminate unnecessary bus transactions, so this code uses
586 * a non-obvious technique.
587 *
588 * Loop over a cache way sized block of, one cache line at a time. For each
589 * line, use movca.a to cause the current cache line contents to be written
590 * back, but without reading anything from main memory. However this has the
591 * side effect that the cache is now caching that memory location. So follow
592 * this with a cache invalidate to mark the cache line invalid. And do all
593 * this with interrupts disabled, to avoid the cache line being accidently
594 * evicted while it is holding garbage.
595 *
596 * This also breaks in a number of circumstances:
597 * - if there are modifications to the region of memory just above
598 * empty_zero_page (for example because a breakpoint has been placed
599 * there), then these can be lost.
600 *
601 * This is because the the memory address which the cache temporarily
602 * caches in the above description is empty_zero_page. So the
603 * movca.l hits the cache (it is assumed that it misses, or at least
604 * isn't dirty), modifies the line and then invalidates it, losing the
605 * required change.
606 *
607 * - If caches are disabled or configured in write-through mode, then
608 * the movca.l writes garbage directly into memory.
584 */ 609 */
585static void __flush_dcache_segment_1way(unsigned long start, 610static void __flush_dcache_segment_1way(unsigned long start,
586 unsigned long extent_per_way) 611 unsigned long extent_per_way)
@@ -630,6 +655,25 @@ static void __flush_dcache_segment_1way(unsigned long start,
630 } while (a0 < a0e); 655 } while (a0 < a0e);
631} 656}
632 657
658#ifdef CONFIG_CACHE_WRITETHROUGH
659/* This method of cache flushing avoids the problems discussed
660 * in the comment above if writethrough caches are enabled. */
661static void __flush_dcache_segment_2way(unsigned long start,
662 unsigned long extent_per_way)
663{
664 unsigned long array_addr;
665
666 array_addr = CACHE_OC_ADDRESS_ARRAY |
667 (start & cpu_data->dcache.entry_mask);
668
669 while (extent_per_way) {
670 ctrl_outl(0, array_addr);
671 ctrl_outl(0, array_addr + cpu_data->dcache.way_incr);
672 array_addr += cpu_data->dcache.linesz;
673 extent_per_way -= cpu_data->dcache.linesz;
674 }
675}
676#else
633static void __flush_dcache_segment_2way(unsigned long start, 677static void __flush_dcache_segment_2way(unsigned long start,
634 unsigned long extent_per_way) 678 unsigned long extent_per_way)
635{ 679{
@@ -688,6 +732,7 @@ static void __flush_dcache_segment_2way(unsigned long start,
688 a1 += linesz; 732 a1 += linesz;
689 } while (a0 < a0e); 733 } while (a0 < a0e);
690} 734}
735#endif
691 736
692static void __flush_dcache_segment_4way(unsigned long start, 737static void __flush_dcache_segment_4way(unsigned long start,
693 unsigned long extent_per_way) 738 unsigned long extent_per_way)
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c
index da2f4186f2cd..c3250614e3ae 100644
--- a/arch/sh/mm/ioremap_32.c
+++ b/arch/sh/mm/ioremap_32.c
@@ -57,14 +57,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
57 if (is_pci_memory_fixed_range(phys_addr, size)) 57 if (is_pci_memory_fixed_range(phys_addr, size))
58 return (void __iomem *)phys_addr; 58 return (void __iomem *)phys_addr;
59 59
60#if !defined(CONFIG_PMB_FIXED)
61 /*
62 * Don't allow anybody to remap normal RAM that we're using..
63 */
64 if (phys_addr < virt_to_phys(high_memory))
65 return NULL;
66#endif
67
68 /* 60 /*
69 * Mappings have to be page-aligned 61 * Mappings have to be page-aligned
70 */ 62 */