aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/include/asm')
-rw-r--r--arch/sh/include/asm/addrspace.h4
-rw-r--r--arch/sh/include/asm/atomic-irq.h16
-rw-r--r--arch/sh/include/asm/bitops-llsc.h72
-rw-r--r--arch/sh/include/asm/clock.h1
-rw-r--r--arch/sh/include/asm/cmpxchg-llsc.h38
-rw-r--r--arch/sh/include/asm/cpu-features.h1
-rw-r--r--arch/sh/include/asm/dma-sh.h118
-rw-r--r--arch/sh/include/asm/dma.h4
-rw-r--r--arch/sh/include/asm/entry-macros.S5
-rw-r--r--arch/sh/include/asm/gpio.h70
-rw-r--r--arch/sh/include/asm/hd64461.h1
-rw-r--r--arch/sh/include/asm/io.h4
-rw-r--r--arch/sh/include/asm/kprobes.h2
-rw-r--r--arch/sh/include/asm/mmu_context.h15
-rw-r--r--arch/sh/include/asm/mmu_context_32.h12
-rw-r--r--arch/sh/include/asm/page.h7
-rw-r--r--arch/sh/include/asm/processor.h2
-rw-r--r--arch/sh/include/asm/processor_32.h15
-rw-r--r--arch/sh/include/asm/processor_64.h14
-rw-r--r--arch/sh/include/asm/ptrace.h8
-rw-r--r--arch/sh/include/asm/sections.h1
-rw-r--r--arch/sh/include/asm/suspend.h22
-rw-r--r--arch/sh/include/asm/timer.h4
-rw-r--r--arch/sh/include/asm/tlb.h100
24 files changed, 400 insertions, 136 deletions
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 36736c7e93db..80d40813e057 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -31,7 +31,7 @@
31/* Returns the physical address of a PnSEG (n=1,2) address */ 31/* Returns the physical address of a PnSEG (n=1,2) address */
32#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) 32#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
33 33
34#ifdef CONFIG_29BIT 34#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
35/* 35/*
36 * Map an address to a certain privileged segment 36 * Map an address to a certain privileged segment
37 */ 37 */
@@ -43,7 +43,7 @@
43 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) 43 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
44#define P4SEGADDR(a) \ 44#define P4SEGADDR(a) \
45 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) 45 ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
46#endif /* 29BIT */ 46#endif /* 29BIT || PMB_FIXED */
47#endif /* P1SEG */ 47#endif /* P1SEG */
48 48
49/* Check if an address can be reached in 29 bits */ 49/* Check if an address can be reached in 29 bits */
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 74f7943cff6f..a0b348068cae 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -11,7 +11,7 @@ static inline void atomic_add(int i, atomic_t *v)
11 unsigned long flags; 11 unsigned long flags;
12 12
13 local_irq_save(flags); 13 local_irq_save(flags);
14 *(long *)v += i; 14 v->counter += i;
15 local_irq_restore(flags); 15 local_irq_restore(flags);
16} 16}
17 17
@@ -20,7 +20,7 @@ static inline void atomic_sub(int i, atomic_t *v)
20 unsigned long flags; 20 unsigned long flags;
21 21
22 local_irq_save(flags); 22 local_irq_save(flags);
23 *(long *)v -= i; 23 v->counter -= i;
24 local_irq_restore(flags); 24 local_irq_restore(flags);
25} 25}
26 26
@@ -29,9 +29,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
29 unsigned long temp, flags; 29 unsigned long temp, flags;
30 30
31 local_irq_save(flags); 31 local_irq_save(flags);
32 temp = *(long *)v; 32 temp = v->counter;
33 temp += i; 33 temp += i;
34 *(long *)v = temp; 34 v->counter = temp;
35 local_irq_restore(flags); 35 local_irq_restore(flags);
36 36
37 return temp; 37 return temp;
@@ -42,9 +42,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
42 unsigned long temp, flags; 42 unsigned long temp, flags;
43 43
44 local_irq_save(flags); 44 local_irq_save(flags);
45 temp = *(long *)v; 45 temp = v->counter;
46 temp -= i; 46 temp -= i;
47 *(long *)v = temp; 47 v->counter = temp;
48 local_irq_restore(flags); 48 local_irq_restore(flags);
49 49
50 return temp; 50 return temp;
@@ -55,7 +55,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
55 unsigned long flags; 55 unsigned long flags;
56 56
57 local_irq_save(flags); 57 local_irq_save(flags);
58 *(long *)v &= ~mask; 58 v->counter &= ~mask;
59 local_irq_restore(flags); 59 local_irq_restore(flags);
60} 60}
61 61
@@ -64,7 +64,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
64 unsigned long flags; 64 unsigned long flags;
65 65
66 local_irq_save(flags); 66 local_irq_save(flags);
67 *(long *)v |= mask; 67 v->counter |= mask;
68 local_irq_restore(flags); 68 local_irq_restore(flags);
69} 69}
70 70
diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h
index 1d2fc0b010ad..d8328be06191 100644
--- a/arch/sh/include/asm/bitops-llsc.h
+++ b/arch/sh/include/asm/bitops-llsc.h
@@ -1,7 +1,7 @@
1#ifndef __ASM_SH_BITOPS_LLSC_H 1#ifndef __ASM_SH_BITOPS_LLSC_H
2#define __ASM_SH_BITOPS_LLSC_H 2#define __ASM_SH_BITOPS_LLSC_H
3 3
4static inline void set_bit(int nr, volatile void * addr) 4static inline void set_bit(int nr, volatile void *addr)
5{ 5{
6 int mask; 6 int mask;
7 volatile unsigned int *a = addr; 7 volatile unsigned int *a = addr;
@@ -13,16 +13,16 @@ static inline void set_bit(int nr, volatile void * addr)
13 __asm__ __volatile__ ( 13 __asm__ __volatile__ (
14 "1: \n\t" 14 "1: \n\t"
15 "movli.l @%1, %0 ! set_bit \n\t" 15 "movli.l @%1, %0 ! set_bit \n\t"
16 "or %3, %0 \n\t" 16 "or %2, %0 \n\t"
17 "movco.l %0, @%1 \n\t" 17 "movco.l %0, @%1 \n\t"
18 "bf 1b \n\t" 18 "bf 1b \n\t"
19 : "=&z" (tmp), "=r" (a) 19 : "=&z" (tmp)
20 : "1" (a), "r" (mask) 20 : "r" (a), "r" (mask)
21 : "t", "memory" 21 : "t", "memory"
22 ); 22 );
23} 23}
24 24
25static inline void clear_bit(int nr, volatile void * addr) 25static inline void clear_bit(int nr, volatile void *addr)
26{ 26{
27 int mask; 27 int mask;
28 volatile unsigned int *a = addr; 28 volatile unsigned int *a = addr;
@@ -34,16 +34,16 @@ static inline void clear_bit(int nr, volatile void * addr)
34 __asm__ __volatile__ ( 34 __asm__ __volatile__ (
35 "1: \n\t" 35 "1: \n\t"
36 "movli.l @%1, %0 ! clear_bit \n\t" 36 "movli.l @%1, %0 ! clear_bit \n\t"
37 "and %3, %0 \n\t" 37 "and %2, %0 \n\t"
38 "movco.l %0, @%1 \n\t" 38 "movco.l %0, @%1 \n\t"
39 "bf 1b \n\t" 39 "bf 1b \n\t"
40 : "=&z" (tmp), "=r" (a) 40 : "=&z" (tmp)
41 : "1" (a), "r" (~mask) 41 : "r" (a), "r" (~mask)
42 : "t", "memory" 42 : "t", "memory"
43 ); 43 );
44} 44}
45 45
46static inline void change_bit(int nr, volatile void * addr) 46static inline void change_bit(int nr, volatile void *addr)
47{ 47{
48 int mask; 48 int mask;
49 volatile unsigned int *a = addr; 49 volatile unsigned int *a = addr;
@@ -55,16 +55,16 @@ static inline void change_bit(int nr, volatile void * addr)
55 __asm__ __volatile__ ( 55 __asm__ __volatile__ (
56 "1: \n\t" 56 "1: \n\t"
57 "movli.l @%1, %0 ! change_bit \n\t" 57 "movli.l @%1, %0 ! change_bit \n\t"
58 "xor %3, %0 \n\t" 58 "xor %2, %0 \n\t"
59 "movco.l %0, @%1 \n\t" 59 "movco.l %0, @%1 \n\t"
60 "bf 1b \n\t" 60 "bf 1b \n\t"
61 : "=&z" (tmp), "=r" (a) 61 : "=&z" (tmp)
62 : "1" (a), "r" (mask) 62 : "r" (a), "r" (mask)
63 : "t", "memory" 63 : "t", "memory"
64 ); 64 );
65} 65}
66 66
67static inline int test_and_set_bit(int nr, volatile void * addr) 67static inline int test_and_set_bit(int nr, volatile void *addr)
68{ 68{
69 int mask, retval; 69 int mask, retval;
70 volatile unsigned int *a = addr; 70 volatile unsigned int *a = addr;
@@ -75,21 +75,21 @@ static inline int test_and_set_bit(int nr, volatile void * addr)
75 75
76 __asm__ __volatile__ ( 76 __asm__ __volatile__ (
77 "1: \n\t" 77 "1: \n\t"
78 "movli.l @%1, %0 ! test_and_set_bit \n\t" 78 "movli.l @%2, %0 ! test_and_set_bit \n\t"
79 "mov %0, %2 \n\t" 79 "mov %0, %1 \n\t"
80 "or %4, %0 \n\t" 80 "or %3, %0 \n\t"
81 "movco.l %0, @%1 \n\t" 81 "movco.l %0, @%2 \n\t"
82 "bf 1b \n\t" 82 "bf 1b \n\t"
83 "and %4, %2 \n\t" 83 "and %3, %1 \n\t"
84 : "=&z" (tmp), "=r" (a), "=&r" (retval) 84 : "=&z" (tmp), "=&r" (retval)
85 : "1" (a), "r" (mask) 85 : "r" (a), "r" (mask)
86 : "t", "memory" 86 : "t", "memory"
87 ); 87 );
88 88
89 return retval != 0; 89 return retval != 0;
90} 90}
91 91
92static inline int test_and_clear_bit(int nr, volatile void * addr) 92static inline int test_and_clear_bit(int nr, volatile void *addr)
93{ 93{
94 int mask, retval; 94 int mask, retval;
95 volatile unsigned int *a = addr; 95 volatile unsigned int *a = addr;
@@ -100,22 +100,22 @@ static inline int test_and_clear_bit(int nr, volatile void * addr)
100 100
101 __asm__ __volatile__ ( 101 __asm__ __volatile__ (
102 "1: \n\t" 102 "1: \n\t"
103 "movli.l @%1, %0 ! test_and_clear_bit \n\t" 103 "movli.l @%2, %0 ! test_and_clear_bit \n\t"
104 "mov %0, %2 \n\t" 104 "mov %0, %1 \n\t"
105 "and %5, %0 \n\t" 105 "and %4, %0 \n\t"
106 "movco.l %0, @%1 \n\t" 106 "movco.l %0, @%2 \n\t"
107 "bf 1b \n\t" 107 "bf 1b \n\t"
108 "and %4, %2 \n\t" 108 "and %3, %1 \n\t"
109 "synco \n\t" 109 "synco \n\t"
110 : "=&z" (tmp), "=r" (a), "=&r" (retval) 110 : "=&z" (tmp), "=&r" (retval)
111 : "1" (a), "r" (mask), "r" (~mask) 111 : "r" (a), "r" (mask), "r" (~mask)
112 : "t", "memory" 112 : "t", "memory"
113 ); 113 );
114 114
115 return retval != 0; 115 return retval != 0;
116} 116}
117 117
118static inline int test_and_change_bit(int nr, volatile void * addr) 118static inline int test_and_change_bit(int nr, volatile void *addr)
119{ 119{
120 int mask, retval; 120 int mask, retval;
121 volatile unsigned int *a = addr; 121 volatile unsigned int *a = addr;
@@ -126,15 +126,15 @@ static inline int test_and_change_bit(int nr, volatile void * addr)
126 126
127 __asm__ __volatile__ ( 127 __asm__ __volatile__ (
128 "1: \n\t" 128 "1: \n\t"
129 "movli.l @%1, %0 ! test_and_change_bit \n\t" 129 "movli.l @%2, %0 ! test_and_change_bit \n\t"
130 "mov %0, %2 \n\t" 130 "mov %0, %1 \n\t"
131 "xor %4, %0 \n\t" 131 "xor %3, %0 \n\t"
132 "movco.l %0, @%1 \n\t" 132 "movco.l %0, @%2 \n\t"
133 "bf 1b \n\t" 133 "bf 1b \n\t"
134 "and %4, %2 \n\t" 134 "and %3, %1 \n\t"
135 "synco \n\t" 135 "synco \n\t"
136 : "=&z" (tmp), "=r" (a), "=&r" (retval) 136 : "=&z" (tmp), "=&r" (retval)
137 : "1" (a), "r" (mask) 137 : "r" (a), "r" (mask)
138 : "t", "memory" 138 : "t", "memory"
139 ); 139 );
140 140
diff --git a/arch/sh/include/asm/clock.h b/arch/sh/include/asm/clock.h
index f9c88583d90a..2f6c9627bc1f 100644
--- a/arch/sh/include/asm/clock.h
+++ b/arch/sh/include/asm/clock.h
@@ -15,6 +15,7 @@ struct clk_ops {
15 void (*disable)(struct clk *clk); 15 void (*disable)(struct clk *clk);
16 void (*recalc)(struct clk *clk); 16 void (*recalc)(struct clk *clk);
17 int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); 17 int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id);
18 int (*set_parent)(struct clk *clk, struct clk *parent);
18 long (*round_rate)(struct clk *clk, unsigned long rate); 19 long (*round_rate)(struct clk *clk, unsigned long rate);
19}; 20};
20 21
diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h
index aee3bf286581..0fac3da536ca 100644
--- a/arch/sh/include/asm/cmpxchg-llsc.h
+++ b/arch/sh/include/asm/cmpxchg-llsc.h
@@ -8,14 +8,14 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
8 8
9 __asm__ __volatile__ ( 9 __asm__ __volatile__ (
10 "1: \n\t" 10 "1: \n\t"
11 "movli.l @%1, %0 ! xchg_u32 \n\t" 11 "movli.l @%2, %0 ! xchg_u32 \n\t"
12 "mov %0, %2 \n\t" 12 "mov %0, %1 \n\t"
13 "mov %4, %0 \n\t" 13 "mov %3, %0 \n\t"
14 "movco.l %0, @%1 \n\t" 14 "movco.l %0, @%2 \n\t"
15 "bf 1b \n\t" 15 "bf 1b \n\t"
16 "synco \n\t" 16 "synco \n\t"
17 : "=&z"(tmp), "=r" (m), "=&r" (retval) 17 : "=&z"(tmp), "=&r" (retval)
18 : "1" (m), "r" (val) 18 : "r" (m), "r" (val)
19 : "t", "memory" 19 : "t", "memory"
20 ); 20 );
21 21
@@ -29,14 +29,14 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
29 29
30 __asm__ __volatile__ ( 30 __asm__ __volatile__ (
31 "1: \n\t" 31 "1: \n\t"
32 "movli.l @%1, %0 ! xchg_u8 \n\t" 32 "movli.l @%2, %0 ! xchg_u8 \n\t"
33 "mov %0, %2 \n\t" 33 "mov %0, %1 \n\t"
34 "mov %4, %0 \n\t" 34 "mov %3, %0 \n\t"
35 "movco.l %0, @%1 \n\t" 35 "movco.l %0, @%2 \n\t"
36 "bf 1b \n\t" 36 "bf 1b \n\t"
37 "synco \n\t" 37 "synco \n\t"
38 : "=&z"(tmp), "=r" (m), "=&r" (retval) 38 : "=&z"(tmp), "=&r" (retval)
39 : "1" (m), "r" (val & 0xff) 39 : "r" (m), "r" (val & 0xff)
40 : "t", "memory" 40 : "t", "memory"
41 ); 41 );
42 42
@@ -51,17 +51,17 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
51 51
52 __asm__ __volatile__ ( 52 __asm__ __volatile__ (
53 "1: \n\t" 53 "1: \n\t"
54 "movli.l @%1, %0 ! __cmpxchg_u32 \n\t" 54 "movli.l @%2, %0 ! __cmpxchg_u32 \n\t"
55 "mov %0, %2 \n\t" 55 "mov %0, %1 \n\t"
56 "cmp/eq %2, %4 \n\t" 56 "cmp/eq %1, %3 \n\t"
57 "bf 2f \n\t" 57 "bf 2f \n\t"
58 "mov %5, %0 \n\t" 58 "mov %3, %0 \n\t"
59 "2: \n\t" 59 "2: \n\t"
60 "movco.l %0, @%1 \n\t" 60 "movco.l %0, @%2 \n\t"
61 "bf 1b \n\t" 61 "bf 1b \n\t"
62 "synco \n\t" 62 "synco \n\t"
63 : "=&z" (tmp), "=r" (m), "=&r" (retval) 63 : "=&z" (tmp), "=&r" (retval)
64 : "1" (m), "r" (old), "r" (new) 64 : "r" (m), "r" (old), "r" (new)
65 : "t", "memory" 65 : "t", "memory"
66 ); 66 );
67 67
diff --git a/arch/sh/include/asm/cpu-features.h b/arch/sh/include/asm/cpu-features.h
index 86308aa39731..694abe490edb 100644
--- a/arch/sh/include/asm/cpu-features.h
+++ b/arch/sh/include/asm/cpu-features.h
@@ -21,5 +21,6 @@
21#define CPU_HAS_LLSC 0x0040 /* movli.l/movco.l */ 21#define CPU_HAS_LLSC 0x0040 /* movli.l/movco.l */
22#define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */ 22#define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */
23#define CPU_HAS_OP32 0x0100 /* 32-bit instruction support */ 23#define CPU_HAS_OP32 0x0100 /* 32-bit instruction support */
24#define CPU_HAS_PTEAEX 0x0200 /* PTE ASID Extension support */
24 25
25#endif /* __ASM_SH_CPU_FEATURES_H */ 26#endif /* __ASM_SH_CPU_FEATURES_H */
diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h
new file mode 100644
index 000000000000..0c8f8e14622a
--- /dev/null
+++ b/arch/sh/include/asm/dma-sh.h
@@ -0,0 +1,118 @@
1/*
2 * arch/sh/include/asm/dma-sh.h
3 *
4 * Copyright (C) 2000 Takashi YOSHII
5 * Copyright (C) 2003 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#ifndef __DMA_SH_H
12#define __DMA_SH_H
13
14#include <asm/dma.h>
15#include <cpu/dma.h>
16
17/* DMAOR contorl: The DMAOR access size is different by CPU.*/
18#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \
19 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
20 defined(CONFIG_CPU_SUBTYPE_SH7785)
21#define dmaor_read_reg(n) \
22 (n ? ctrl_inw(SH_DMAC_BASE1 + DMAOR) \
23 : ctrl_inw(SH_DMAC_BASE0 + DMAOR))
24#define dmaor_write_reg(n, data) \
25 (n ? ctrl_outw(data, SH_DMAC_BASE1 + DMAOR) \
26 : ctrl_outw(data, SH_DMAC_BASE0 + DMAOR))
27#else /* Other CPU */
28#define dmaor_read_reg(n) ctrl_inw(SH_DMAC_BASE0 + DMAOR)
29#define dmaor_write_reg(n, data) ctrl_outw(data, SH_DMAC_BASE0 + DMAOR)
30#endif
31
32static int dmte_irq_map[] __maybe_unused = {
33#if (MAX_DMA_CHANNELS >= 4)
34 DMTE0_IRQ,
35 DMTE0_IRQ + 1,
36 DMTE0_IRQ + 2,
37 DMTE0_IRQ + 3,
38#endif
39#if (MAX_DMA_CHANNELS >= 6)
40 DMTE4_IRQ,
41 DMTE4_IRQ + 1,
42#endif
43#if (MAX_DMA_CHANNELS >= 8)
44 DMTE6_IRQ,
45 DMTE6_IRQ + 1,
46#endif
47#if (MAX_DMA_CHANNELS >= 12)
48 DMTE8_IRQ,
49 DMTE9_IRQ,
50 DMTE10_IRQ,
51 DMTE11_IRQ,
52#endif
53};
54
55/* Definitions for the SuperH DMAC */
56#define REQ_L 0x00000000
57#define REQ_E 0x00080000
58#define RACK_H 0x00000000
59#define RACK_L 0x00040000
60#define ACK_R 0x00000000
61#define ACK_W 0x00020000
62#define ACK_H 0x00000000
63#define ACK_L 0x00010000
64#define DM_INC 0x00004000
65#define DM_DEC 0x00008000
66#define SM_INC 0x00001000
67#define SM_DEC 0x00002000
68#define RS_IN 0x00000200
69#define RS_OUT 0x00000300
70#define TS_BLK 0x00000040
71#define TM_BUR 0x00000020
72#define CHCR_DE 0x00000001
73#define CHCR_TE 0x00000002
74#define CHCR_IE 0x00000004
75
76/* DMAOR definitions */
77#define DMAOR_AE 0x00000004
78#define DMAOR_NMIF 0x00000002
79#define DMAOR_DME 0x00000001
80
81/*
82 * Define the default configuration for dual address memory-memory transfer.
83 * The 0x400 value represents auto-request, external->external.
84 */
85#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_32)
86
87/* DMA base address */
88static u32 dma_base_addr[] __maybe_unused = {
89#if (MAX_DMA_CHANNELS >= 4)
90 SH_DMAC_BASE0 + 0x00, /* channel 0 */
91 SH_DMAC_BASE0 + 0x10,
92 SH_DMAC_BASE0 + 0x20,
93 SH_DMAC_BASE0 + 0x30,
94#endif
95#if (MAX_DMA_CHANNELS >= 6)
96 SH_DMAC_BASE0 + 0x50,
97 SH_DMAC_BASE0 + 0x60,
98#endif
99#if (MAX_DMA_CHANNELS >= 8)
100 SH_DMAC_BASE1 + 0x00,
101 SH_DMAC_BASE1 + 0x10,
102#endif
103#if (MAX_DMA_CHANNELS >= 12)
104 SH_DMAC_BASE1 + 0x20,
105 SH_DMAC_BASE1 + 0x30,
106 SH_DMAC_BASE1 + 0x50,
107 SH_DMAC_BASE1 + 0x60, /* channel 11 */
108#endif
109};
110
111/* DMA register */
112#define SAR 0x00
113#define DAR 0x04
114#define TCR 0x08
115#define CHCR 0x0C
116#define DMAOR 0x40
117
118#endif /* __DMA_SH_H */
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h
index beca7128e2ab..6bd178473878 100644
--- a/arch/sh/include/asm/dma.h
+++ b/arch/sh/include/asm/dma.h
@@ -25,9 +25,9 @@
25#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x10000000) 25#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x10000000)
26 26
27#ifdef CONFIG_NR_DMA_CHANNELS 27#ifdef CONFIG_NR_DMA_CHANNELS
28# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS) 28# define MAX_DMA_CHANNELS (CONFIG_NR_DMA_CHANNELS)
29#else 29#else
30# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS) 30# define MAX_DMA_CHANNELS (CONFIG_NR_ONCHIP_DMA_CHANNELS)
31#endif 31#endif
32 32
33/* 33/*
diff --git a/arch/sh/include/asm/entry-macros.S b/arch/sh/include/asm/entry-macros.S
index 2dab0b8d9454..3a4752a65722 100644
--- a/arch/sh/include/asm/entry-macros.S
+++ b/arch/sh/include/asm/entry-macros.S
@@ -31,3 +31,8 @@
31#endif 31#endif
32 .endm 32 .endm
33 33
34#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
35# define PREF(x) pref @x
36#else
37# define PREF(x) nop
38#endif
diff --git a/arch/sh/include/asm/gpio.h b/arch/sh/include/asm/gpio.h
index 90673658eb14..61f93da2c62e 100644
--- a/arch/sh/include/asm/gpio.h
+++ b/arch/sh/include/asm/gpio.h
@@ -19,8 +19,42 @@
19#include <cpu/gpio.h> 19#include <cpu/gpio.h>
20#endif 20#endif
21 21
22#define ARCH_NR_GPIOS 512
23#include <asm-generic/gpio.h>
24
25#ifdef CONFIG_GPIOLIB
26
27static inline int gpio_get_value(unsigned gpio)
28{
29 return __gpio_get_value(gpio);
30}
31
32static inline void gpio_set_value(unsigned gpio, int value)
33{
34 __gpio_set_value(gpio, value);
35}
36
37static inline int gpio_cansleep(unsigned gpio)
38{
39 return __gpio_cansleep(gpio);
40}
41
42static inline int gpio_to_irq(unsigned gpio)
43{
44 WARN_ON(1);
45 return -ENOSYS;
46}
47
48static inline int irq_to_gpio(unsigned int irq)
49{
50 WARN_ON(1);
51 return -EINVAL;
52}
53
54#endif /* CONFIG_GPIOLIB */
55
22typedef unsigned short pinmux_enum_t; 56typedef unsigned short pinmux_enum_t;
23typedef unsigned char pinmux_flag_t; 57typedef unsigned short pinmux_flag_t;
24 58
25#define PINMUX_TYPE_NONE 0 59#define PINMUX_TYPE_NONE 0
26#define PINMUX_TYPE_FUNCTION 1 60#define PINMUX_TYPE_FUNCTION 1
@@ -34,6 +68,11 @@ typedef unsigned char pinmux_flag_t;
34#define PINMUX_FLAG_WANT_PULLUP (1 << 3) 68#define PINMUX_FLAG_WANT_PULLUP (1 << 3)
35#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4) 69#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4)
36 70
71#define PINMUX_FLAG_DBIT_SHIFT 5
72#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT)
73#define PINMUX_FLAG_DREG_SHIFT 10
74#define PINMUX_FLAG_DREG (0x3f << PINMUX_FLAG_DREG_SHIFT)
75
37struct pinmux_gpio { 76struct pinmux_gpio {
38 pinmux_enum_t enum_id; 77 pinmux_enum_t enum_id;
39 pinmux_flag_t flags; 78 pinmux_flag_t flags;
@@ -54,7 +93,7 @@ struct pinmux_cfg_reg {
54 .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \ 93 .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)]) \
55 94
56struct pinmux_data_reg { 95struct pinmux_data_reg {
57 unsigned long reg, reg_width; 96 unsigned long reg, reg_width, reg_shadow;
58 pinmux_enum_t *enum_ids; 97 pinmux_enum_t *enum_ids;
59}; 98};
60 99
@@ -89,34 +128,9 @@ struct pinmux_info {
89 unsigned int gpio_data_size; 128 unsigned int gpio_data_size;
90 129
91 unsigned long *gpio_in_use; 130 unsigned long *gpio_in_use;
131 struct gpio_chip chip;
92}; 132};
93 133
94int register_pinmux(struct pinmux_info *pip); 134int register_pinmux(struct pinmux_info *pip);
95 135
96int __gpio_request(unsigned gpio);
97static inline int gpio_request(unsigned gpio, const char *label)
98{
99 return __gpio_request(gpio);
100}
101void gpio_free(unsigned gpio);
102int gpio_direction_input(unsigned gpio);
103int gpio_direction_output(unsigned gpio, int value);
104int gpio_get_value(unsigned gpio);
105void gpio_set_value(unsigned gpio, int value);
106
107/* IRQ modes are unspported */
108static inline int gpio_to_irq(unsigned gpio)
109{
110 WARN_ON(1);
111 return -EINVAL;
112}
113
114static inline int irq_to_gpio(unsigned irq)
115{
116 WARN_ON(1);
117 return -EINVAL;
118}
119
120#include <asm-generic/gpio.h>
121
122#endif /* __ASM_SH_GPIO_H */ 136#endif /* __ASM_SH_GPIO_H */
diff --git a/arch/sh/include/asm/hd64461.h b/arch/sh/include/asm/hd64461.h
index 8c1353baf00f..52b4b6238277 100644
--- a/arch/sh/include/asm/hd64461.h
+++ b/arch/sh/include/asm/hd64461.h
@@ -242,7 +242,6 @@
242#include <asm/io_generic.h> 242#include <asm/io_generic.h>
243 243
244/* arch/sh/cchips/hd6446x/hd64461/setup.c */ 244/* arch/sh/cchips/hd6446x/hd64461/setup.c */
245int hd64461_irq_demux(int irq);
246void hd64461_register_irq_demux(int irq, 245void hd64461_register_irq_demux(int irq,
247 int (*demux) (int irq, void *dev), void *dev); 246 int (*demux) (int irq, void *dev), void *dev);
248void hd64461_unregister_irq_demux(int irq); 247void hd64461_unregister_irq_demux(int irq);
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 61f6dae40534..0454f8d68059 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -238,7 +238,7 @@ extern void onchip_unmap(unsigned long vaddr);
238static inline void __iomem * 238static inline void __iomem *
239__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) 239__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
240{ 240{
241#ifdef CONFIG_SUPERH32 241#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
242 unsigned long last_addr = offset + size - 1; 242 unsigned long last_addr = offset + size - 1;
243#endif 243#endif
244 void __iomem *ret; 244 void __iomem *ret;
@@ -247,7 +247,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
247 if (ret) 247 if (ret)
248 return ret; 248 return ret;
249 249
250#ifdef CONFIG_SUPERH32 250#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
251 /* 251 /*
252 * For P1 and P2 space this is trivial, as everything is already 252 * For P1 and P2 space this is trivial, as everything is already
253 * mapped. Uncached access for P1 addresses are done through P2. 253 * mapped. Uncached access for P1 addresses are done through P2.
diff --git a/arch/sh/include/asm/kprobes.h b/arch/sh/include/asm/kprobes.h
index 6078d8e551d4..613644a758e8 100644
--- a/arch/sh/include/asm/kprobes.h
+++ b/arch/sh/include/asm/kprobes.h
@@ -16,7 +16,7 @@ typedef u16 kprobe_opcode_t;
16 ? (MAX_STACK_SIZE) \ 16 ? (MAX_STACK_SIZE) \
17 : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) 17 : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
18 18
19#define regs_return_value(regs) ((regs)->regs[0]) 19#define regs_return_value(_regs) ((_regs)->regs[0])
20#define flush_insn_slot(p) do { } while (0) 20#define flush_insn_slot(p) do { } while (0)
21#define kretprobe_blacklist_size 0 21#define kretprobe_blacklist_size 0
22 22
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 5d9157bd474d..2a9c55f1a83f 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -19,13 +19,18 @@
19 * (a) TLB cache version (or round, cycle whatever expression you like) 19 * (a) TLB cache version (or round, cycle whatever expression you like)
20 * (b) ASID (Address Space IDentifier) 20 * (b) ASID (Address Space IDentifier)
21 */ 21 */
22#ifdef CONFIG_CPU_HAS_PTEAEX
23#define MMU_CONTEXT_ASID_MASK 0x0000ffff
24#else
22#define MMU_CONTEXT_ASID_MASK 0x000000ff 25#define MMU_CONTEXT_ASID_MASK 0x000000ff
23#define MMU_CONTEXT_VERSION_MASK 0xffffff00 26#endif
24#define MMU_CONTEXT_FIRST_VERSION 0x00000100
25#define NO_CONTEXT 0UL
26 27
27/* ASID is 8-bit value, so it can't be 0x100 */ 28#define MMU_CONTEXT_VERSION_MASK (~0UL & ~MMU_CONTEXT_ASID_MASK)
28#define MMU_NO_ASID 0x100 29#define MMU_CONTEXT_FIRST_VERSION (MMU_CONTEXT_ASID_MASK + 1)
30
31/* Impossible ASID value, to differentiate from NO_CONTEXT. */
32#define MMU_NO_ASID MMU_CONTEXT_FIRST_VERSION
33#define NO_CONTEXT 0UL
29 34
30#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 35#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
31 36
diff --git a/arch/sh/include/asm/mmu_context_32.h b/arch/sh/include/asm/mmu_context_32.h
index f4f9aebd68b7..8ef800c549ab 100644
--- a/arch/sh/include/asm/mmu_context_32.h
+++ b/arch/sh/include/asm/mmu_context_32.h
@@ -10,6 +10,17 @@ static inline void destroy_context(struct mm_struct *mm)
10 /* Do nothing */ 10 /* Do nothing */
11} 11}
12 12
13#ifdef CONFIG_CPU_HAS_PTEAEX
14static inline void set_asid(unsigned long asid)
15{
16 __raw_writel(asid, MMU_PTEAEX);
17}
18
19static inline unsigned long get_asid(void)
20{
21 return __raw_readl(MMU_PTEAEX) & MMU_CONTEXT_ASID_MASK;
22}
23#else
13static inline void set_asid(unsigned long asid) 24static inline void set_asid(unsigned long asid)
14{ 25{
15 unsigned long __dummy; 26 unsigned long __dummy;
@@ -33,6 +44,7 @@ static inline unsigned long get_asid(void)
33 asid &= MMU_CONTEXT_ASID_MASK; 44 asid &= MMU_CONTEXT_ASID_MASK;
34 return asid; 45 return asid;
35} 46}
47#endif /* CONFIG_CPU_HAS_PTEAEX */
36 48
37/* MMU_TTB is used for optimizing the fault handling. */ 49/* MMU_TTB is used for optimizing the fault handling. */
38static inline void set_TTB(pgd_t *pgd) 50static inline void set_TTB(pgd_t *pgd)
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 5871d78e47e5..9c6d21ec0240 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -129,7 +129,12 @@ typedef struct page *pgtable_t;
129 * is not visible (it is part of the PMB mapping) and so needs to be 129 * is not visible (it is part of the PMB mapping) and so needs to be
130 * added or subtracted as required. 130 * added or subtracted as required.
131 */ 131 */
132#ifdef CONFIG_32BIT 132#if defined(CONFIG_PMB_FIXED)
133/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
134#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
135#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
136#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET))
137#elif defined(CONFIG_32BIT)
133#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) 138#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
134#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) 139#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
135#else 140#else
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 1ef4b24d7619..1fd58b421438 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -31,7 +31,7 @@ enum cpu_type {
31 CPU_SH7760, CPU_SH4_202, CPU_SH4_501, 31 CPU_SH7760, CPU_SH4_202, CPU_SH4_501,
32 32
33 /* SH-4A types */ 33 /* SH-4A types */
34 CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, 34 CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SH7786,
35 CPU_SH7723, CPU_SHX3, 35 CPU_SH7723, CPU_SHX3,
36 36
37 /* SH4AL-DSP types */ 37 /* SH4AL-DSP types */
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index d79063c5eb9c..efdd78a53b11 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -108,12 +108,12 @@ extern int ubc_usercnt;
108/* 108/*
109 * Do necessary setup to start up a newly executed thread. 109 * Do necessary setup to start up a newly executed thread.
110 */ 110 */
111#define start_thread(regs, new_pc, new_sp) \ 111#define start_thread(_regs, new_pc, new_sp) \
112 set_fs(USER_DS); \ 112 set_fs(USER_DS); \
113 regs->pr = 0; \ 113 _regs->pr = 0; \
114 regs->sr = SR_FD; /* User mode. */ \ 114 _regs->sr = SR_FD; /* User mode. */ \
115 regs->pc = new_pc; \ 115 _regs->pc = new_pc; \
116 regs->regs[15] = new_sp 116 _regs->regs[15] = new_sp
117 117
118/* Forward declaration, a strange C thing */ 118/* Forward declaration, a strange C thing */
119struct task_struct; 119struct task_struct;
@@ -189,10 +189,9 @@ extern unsigned long get_wchan(struct task_struct *p);
189#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) 189#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
190#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15]) 190#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15])
191 191
192#define user_stack_pointer(regs) ((regs)->regs[15]) 192#define user_stack_pointer(_regs) ((_regs)->regs[15])
193 193
194#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ 194#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
195 defined(CONFIG_CPU_SH4)
196#define PREFETCH_STRIDE L1_CACHE_BYTES 195#define PREFETCH_STRIDE L1_CACHE_BYTES
197#define ARCH_HAS_PREFETCH 196#define ARCH_HAS_PREFETCH
198#define ARCH_HAS_PREFETCHW 197#define ARCH_HAS_PREFETCHW
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 803177fcf086..5727d31b0ccf 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -145,13 +145,13 @@ struct thread_struct {
145 */ 145 */
146#define SR_USER (SR_MMU | SR_FD) 146#define SR_USER (SR_MMU | SR_FD)
147 147
148#define start_thread(regs, new_pc, new_sp) \ 148#define start_thread(_regs, new_pc, new_sp) \
149 set_fs(USER_DS); \ 149 set_fs(USER_DS); \
150 regs->sr = SR_USER; /* User mode. */ \ 150 _regs->sr = SR_USER; /* User mode. */ \
151 regs->pc = new_pc - 4; /* Compensate syscall exit */ \ 151 _regs->pc = new_pc - 4; /* Compensate syscall exit */ \
152 regs->pc |= 1; /* Set SHmedia ! */ \ 152 _regs->pc |= 1; /* Set SHmedia ! */ \
153 regs->regs[18] = 0; \ 153 _regs->regs[18] = 0; \
154 regs->regs[15] = new_sp 154 _regs->regs[15] = new_sp
155 155
156/* Forward declaration, a strange C thing */ 156/* Forward declaration, a strange C thing */
157struct task_struct; 157struct task_struct;
@@ -226,7 +226,7 @@ extern unsigned long get_wchan(struct task_struct *p);
226#define KSTK_EIP(tsk) ((tsk)->thread.pc) 226#define KSTK_EIP(tsk) ((tsk)->thread.pc)
227#define KSTK_ESP(tsk) ((tsk)->thread.sp) 227#define KSTK_ESP(tsk) ((tsk)->thread.sp)
228 228
229#define user_stack_pointer(regs) ((regs)->regs[15]) 229#define user_stack_pointer(_regs) ((_regs)->regs[15])
230 230
231#endif /* __ASSEMBLY__ */ 231#endif /* __ASSEMBLY__ */
232#endif /* __ASM_SH_PROCESSOR_64_H */ 232#endif /* __ASM_SH_PROCESSOR_64_H */
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index 12912ab80c15..81c6568fdb3e 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -122,14 +122,12 @@ extern void user_disable_single_step(struct task_struct *);
122#ifdef CONFIG_SH_DSP 122#ifdef CONFIG_SH_DSP
123#define task_pt_regs(task) \ 123#define task_pt_regs(task) \
124 ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \ 124 ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
125 - sizeof(struct pt_dspregs) - sizeof(unsigned long)) - 1) 125 - sizeof(struct pt_dspregs)) - 1)
126#define task_pt_dspregs(task) \ 126#define task_pt_dspregs(task) \
127 ((struct pt_dspregs *) (task_stack_page(task) + THREAD_SIZE \ 127 ((struct pt_dspregs *) (task_stack_page(task) + THREAD_SIZE) - 1)
128 - sizeof(unsigned long)) - 1)
129#else 128#else
130#define task_pt_regs(task) \ 129#define task_pt_regs(task) \
131 ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \ 130 ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE) - 1)
132 - sizeof(unsigned long)) - 1)
133#endif 131#endif
134 132
135static inline unsigned long profile_pc(struct pt_regs *regs) 133static inline unsigned long profile_pc(struct pt_regs *regs)
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 8f8f4ad400df..01a4076a3719 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -3,6 +3,7 @@
3 3
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern void __nosave_begin, __nosave_end;
6extern long __machvec_start, __machvec_end; 7extern long __machvec_start, __machvec_end;
7extern char __uncached_start, __uncached_end; 8extern char __uncached_start, __uncached_end;
8extern char _ebss[]; 9extern char _ebss[];
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h
new file mode 100644
index 000000000000..b1b995370e79
--- /dev/null
+++ b/arch/sh/include/asm/suspend.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_SH_SUSPEND_H
2#define _ASM_SH_SUSPEND_H
3
4#ifndef __ASSEMBLY__
5static inline int arch_prepare_suspend(void) { return 0; }
6
7#include <asm/ptrace.h>
8
9struct swsusp_arch_regs {
10 struct pt_regs user_regs;
11 unsigned long bank1_regs[8];
12};
13#endif
14
15/* flags passed to assembly suspend code */
16#define SUSP_SH_SLEEP (1 << 0) /* Regular sleep mode */
17#define SUSP_SH_STANDBY (1 << 1) /* SH-Mobile Software standby mode */
18#define SUSP_SH_RSTANDBY (1 << 2) /* SH-Mobile R-standby mode */
19#define SUSP_SH_USTANDBY (1 << 3) /* SH-Mobile U-standby mode */
20#define SUSP_SH_SF (1 << 4) /* Enable self-refresh */
21
22#endif /* _ASM_SH_SUSPEND_H */
diff --git a/arch/sh/include/asm/timer.h b/arch/sh/include/asm/timer.h
index a7ca3a195bb5..4c3b66e30af2 100644
--- a/arch/sh/include/asm/timer.h
+++ b/arch/sh/include/asm/timer.h
@@ -9,7 +9,6 @@ struct sys_timer_ops {
9 int (*init)(void); 9 int (*init)(void);
10 int (*start)(void); 10 int (*start)(void);
11 int (*stop)(void); 11 int (*stop)(void);
12 cycle_t (*read)(void);
13#ifndef CONFIG_GENERIC_TIME 12#ifndef CONFIG_GENERIC_TIME
14 unsigned long (*get_offset)(void); 13 unsigned long (*get_offset)(void);
15#endif 14#endif
@@ -39,6 +38,7 @@ struct sys_timer *get_sys_timer(void);
39 38
40/* arch/sh/kernel/time.c */ 39/* arch/sh/kernel/time.c */
41void handle_timer_tick(void); 40void handle_timer_tick(void);
42extern unsigned long sh_hpt_frequency; 41
42extern struct clocksource clocksource_sh;
43 43
44#endif /* __ASM_SH_TIMER_H */ 44#endif /* __ASM_SH_TIMER_H */
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 88ff1ae8a6b8..9c16f737074a 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -6,22 +6,106 @@
6#endif 6#endif
7 7
8#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
9#include <linux/pagemap.h>
10
11#ifdef CONFIG_MMU
12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h>
14
15/*
16 * TLB handling. This allows us to remove pages from the page
17 * tables, and efficiently handle the TLB issues.
18 */
19struct mmu_gather {
20 struct mm_struct *mm;
21 unsigned int fullmm;
22 unsigned long start, end;
23};
9 24
10#define tlb_start_vma(tlb, vma) \ 25DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
11 flush_cache_range(vma, vma->vm_start, vma->vm_end)
12 26
13#define tlb_end_vma(tlb, vma) \ 27static inline void init_tlb_gather(struct mmu_gather *tlb)
14 flush_tlb_range(vma, vma->vm_start, vma->vm_end) 28{
29 tlb->start = TASK_SIZE;
30 tlb->end = 0;
15 31
16#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) 32 if (tlb->fullmm) {
33 tlb->start = 0;
34 tlb->end = TASK_SIZE;
35 }
36}
37
38static inline struct mmu_gather *
39tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
40{
41 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
42
43 tlb->mm = mm;
44 tlb->fullmm = full_mm_flush;
45
46 init_tlb_gather(tlb);
47
48 return tlb;
49}
50
51static inline void
52tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
53{
54 if (tlb->fullmm)
55 flush_tlb_mm(tlb->mm);
56
57 /* keep the page table cache within bounds */
58 check_pgt_cache();
59
60 put_cpu_var(mmu_gathers);
61}
62
63static inline void
64tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
65{
66 if (tlb->start > address)
67 tlb->start = address;
68 if (tlb->end < address + PAGE_SIZE)
69 tlb->end = address + PAGE_SIZE;
70}
17 71
18/* 72/*
19 * Flush whole TLBs for MM 73 * In the case of tlb vma handling, we can optimise these away in the
74 * case where we're doing a full MM flush. When we're doing a munmap,
75 * the vmas are adjusted to only cover the region to be torn down.
20 */ 76 */
21#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) 77static inline void
78tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
79{
80 if (!tlb->fullmm)
81 flush_cache_range(vma, vma->vm_start, vma->vm_end);
82}
83
84static inline void
85tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
86{
87 if (!tlb->fullmm && tlb->end) {
88 flush_tlb_range(vma, tlb->start, tlb->end);
89 init_tlb_gather(tlb);
90 }
91}
92
93#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
94#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
95#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
96#define pud_free_tlb(tlb, pudp) pud_free((tlb)->mm, pudp)
97
98#define tlb_migrate_finish(mm) do { } while (0)
99
100#else /* CONFIG_MMU */
101
102#define tlb_start_vma(tlb, vma) do { } while (0)
103#define tlb_end_vma(tlb, vma) do { } while (0)
104#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
105#define tlb_flush(tlb) do { } while (0)
22 106
23#include <linux/pagemap.h>
24#include <asm-generic/tlb.h> 107#include <asm-generic/tlb.h>
25 108
109#endif /* CONFIG_MMU */
26#endif /* __ASSEMBLY__ */ 110#endif /* __ASSEMBLY__ */
27#endif /* __ASM_SH_TLB_H */ 111#endif /* __ASM_SH_TLB_H */