diff options
Diffstat (limited to 'arch/blackfin/include/asm')
44 files changed, 2356 insertions, 707 deletions
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 25776c19064b..94b2a9b19451 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h | |||
@@ -15,104 +15,159 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define ATOMIC_INIT(i) { (i) } | 17 | #define ATOMIC_INIT(i) { (i) } |
18 | |||
19 | #define atomic_read(v) ((v)->counter) | ||
20 | #define atomic_set(v, i) (((v)->counter) = i) | 18 | #define atomic_set(v, i) (((v)->counter) = i) |
21 | 19 | ||
22 | static __inline__ void atomic_add(int i, atomic_t * v) | 20 | #ifdef CONFIG_SMP |
21 | |||
22 | #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) | ||
23 | |||
24 | asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); | ||
25 | |||
26 | asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); | ||
27 | |||
28 | asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); | ||
29 | |||
30 | asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); | ||
31 | |||
32 | asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); | ||
33 | |||
34 | asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); | ||
35 | |||
36 | static inline void atomic_add(int i, atomic_t *v) | ||
37 | { | ||
38 | __raw_atomic_update_asm(&v->counter, i); | ||
39 | } | ||
40 | |||
41 | static inline void atomic_sub(int i, atomic_t *v) | ||
42 | { | ||
43 | __raw_atomic_update_asm(&v->counter, -i); | ||
44 | } | ||
45 | |||
46 | static inline int atomic_add_return(int i, atomic_t *v) | ||
47 | { | ||
48 | return __raw_atomic_update_asm(&v->counter, i); | ||
49 | } | ||
50 | |||
51 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
52 | { | ||
53 | return __raw_atomic_update_asm(&v->counter, -i); | ||
54 | } | ||
55 | |||
56 | static inline void atomic_inc(volatile atomic_t *v) | ||
57 | { | ||
58 | __raw_atomic_update_asm(&v->counter, 1); | ||
59 | } | ||
60 | |||
61 | static inline void atomic_dec(volatile atomic_t *v) | ||
62 | { | ||
63 | __raw_atomic_update_asm(&v->counter, -1); | ||
64 | } | ||
65 | |||
66 | static inline void atomic_clear_mask(int mask, atomic_t *v) | ||
67 | { | ||
68 | __raw_atomic_clear_asm(&v->counter, mask); | ||
69 | } | ||
70 | |||
71 | static inline void atomic_set_mask(int mask, atomic_t *v) | ||
72 | { | ||
73 | __raw_atomic_set_asm(&v->counter, mask); | ||
74 | } | ||
75 | |||
76 | static inline int atomic_test_mask(int mask, atomic_t *v) | ||
77 | { | ||
78 | return __raw_atomic_test_asm(&v->counter, mask); | ||
79 | } | ||
80 | |||
81 | /* Atomic operations are already serializing */ | ||
82 | #define smp_mb__before_atomic_dec() barrier() | ||
83 | #define smp_mb__after_atomic_dec() barrier() | ||
84 | #define smp_mb__before_atomic_inc() barrier() | ||
85 | #define smp_mb__after_atomic_inc() barrier() | ||
86 | |||
87 | #else /* !CONFIG_SMP */ | ||
88 | |||
89 | #define atomic_read(v) ((v)->counter) | ||
90 | |||
91 | static inline void atomic_add(int i, atomic_t *v) | ||
23 | { | 92 | { |
24 | long flags; | 93 | long flags; |
25 | 94 | ||
26 | local_irq_save(flags); | 95 | local_irq_save_hw(flags); |
27 | v->counter += i; | 96 | v->counter += i; |
28 | local_irq_restore(flags); | 97 | local_irq_restore_hw(flags); |
29 | } | 98 | } |
30 | 99 | ||
31 | static __inline__ void atomic_sub(int i, atomic_t * v) | 100 | static inline void atomic_sub(int i, atomic_t *v) |
32 | { | 101 | { |
33 | long flags; | 102 | long flags; |
34 | 103 | ||
35 | local_irq_save(flags); | 104 | local_irq_save_hw(flags); |
36 | v->counter -= i; | 105 | v->counter -= i; |
37 | local_irq_restore(flags); | 106 | local_irq_restore_hw(flags); |
38 | 107 | ||
39 | } | 108 | } |
40 | 109 | ||
41 | static inline int atomic_add_return(int i, atomic_t * v) | 110 | static inline int atomic_add_return(int i, atomic_t *v) |
42 | { | 111 | { |
43 | int __temp = 0; | 112 | int __temp = 0; |
44 | long flags; | 113 | long flags; |
45 | 114 | ||
46 | local_irq_save(flags); | 115 | local_irq_save_hw(flags); |
47 | v->counter += i; | 116 | v->counter += i; |
48 | __temp = v->counter; | 117 | __temp = v->counter; |
49 | local_irq_restore(flags); | 118 | local_irq_restore_hw(flags); |
50 | 119 | ||
51 | 120 | ||
52 | return __temp; | 121 | return __temp; |
53 | } | 122 | } |
54 | 123 | ||
55 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | 124 | static inline int atomic_sub_return(int i, atomic_t *v) |
56 | static inline int atomic_sub_return(int i, atomic_t * v) | ||
57 | { | 125 | { |
58 | int __temp = 0; | 126 | int __temp = 0; |
59 | long flags; | 127 | long flags; |
60 | 128 | ||
61 | local_irq_save(flags); | 129 | local_irq_save_hw(flags); |
62 | v->counter -= i; | 130 | v->counter -= i; |
63 | __temp = v->counter; | 131 | __temp = v->counter; |
64 | local_irq_restore(flags); | 132 | local_irq_restore_hw(flags); |
65 | 133 | ||
66 | return __temp; | 134 | return __temp; |
67 | } | 135 | } |
68 | 136 | ||
69 | static __inline__ void atomic_inc(volatile atomic_t * v) | 137 | static inline void atomic_inc(volatile atomic_t *v) |
70 | { | 138 | { |
71 | long flags; | 139 | long flags; |
72 | 140 | ||
73 | local_irq_save(flags); | 141 | local_irq_save_hw(flags); |
74 | v->counter++; | 142 | v->counter++; |
75 | local_irq_restore(flags); | 143 | local_irq_restore_hw(flags); |
76 | } | 144 | } |
77 | 145 | ||
78 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | 146 | static inline void atomic_dec(volatile atomic_t *v) |
79 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
80 | |||
81 | #define atomic_add_unless(v, a, u) \ | ||
82 | ({ \ | ||
83 | int c, old; \ | ||
84 | c = atomic_read(v); \ | ||
85 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | ||
86 | c = old; \ | ||
87 | c != (u); \ | ||
88 | }) | ||
89 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
90 | |||
91 | static __inline__ void atomic_dec(volatile atomic_t * v) | ||
92 | { | 147 | { |
93 | long flags; | 148 | long flags; |
94 | 149 | ||
95 | local_irq_save(flags); | 150 | local_irq_save_hw(flags); |
96 | v->counter--; | 151 | v->counter--; |
97 | local_irq_restore(flags); | 152 | local_irq_restore_hw(flags); |
98 | } | 153 | } |
99 | 154 | ||
100 | static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v) | 155 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) |
101 | { | 156 | { |
102 | long flags; | 157 | long flags; |
103 | 158 | ||
104 | local_irq_save(flags); | 159 | local_irq_save_hw(flags); |
105 | v->counter &= ~mask; | 160 | v->counter &= ~mask; |
106 | local_irq_restore(flags); | 161 | local_irq_restore_hw(flags); |
107 | } | 162 | } |
108 | 163 | ||
109 | static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v) | 164 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) |
110 | { | 165 | { |
111 | long flags; | 166 | long flags; |
112 | 167 | ||
113 | local_irq_save(flags); | 168 | local_irq_save_hw(flags); |
114 | v->counter |= mask; | 169 | v->counter |= mask; |
115 | local_irq_restore(flags); | 170 | local_irq_restore_hw(flags); |
116 | } | 171 | } |
117 | 172 | ||
118 | /* Atomic operations are already serializing */ | 173 | /* Atomic operations are already serializing */ |
@@ -121,9 +176,25 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v) | |||
121 | #define smp_mb__before_atomic_inc() barrier() | 176 | #define smp_mb__before_atomic_inc() barrier() |
122 | #define smp_mb__after_atomic_inc() barrier() | 177 | #define smp_mb__after_atomic_inc() barrier() |
123 | 178 | ||
179 | #endif /* !CONFIG_SMP */ | ||
180 | |||
181 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
124 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) | 182 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) |
125 | #define atomic_inc_return(v) atomic_add_return(1,(v)) | 183 | #define atomic_inc_return(v) atomic_add_return(1,(v)) |
126 | 184 | ||
185 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||
186 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
187 | |||
188 | #define atomic_add_unless(v, a, u) \ | ||
189 | ({ \ | ||
190 | int c, old; \ | ||
191 | c = atomic_read(v); \ | ||
192 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | ||
193 | c = old; \ | ||
194 | c != (u); \ | ||
195 | }) | ||
196 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
197 | |||
127 | /* | 198 | /* |
128 | * atomic_inc_and_test - increment and test | 199 | * atomic_inc_and_test - increment and test |
129 | * @v: pointer of type atomic_t | 200 | * @v: pointer of type atomic_t |
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h index 77295666c34b..daffc0684e75 100644 --- a/arch/blackfin/include/asm/bfin-global.h +++ b/arch/blackfin/include/asm/bfin-global.h | |||
@@ -47,6 +47,9 @@ | |||
47 | # define DMA_UNCACHED_REGION (0) | 47 | # define DMA_UNCACHED_REGION (0) |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | extern void bfin_setup_caches(unsigned int cpu); | ||
51 | extern void bfin_setup_cpudata(unsigned int cpu); | ||
52 | |||
50 | extern unsigned long get_cclk(void); | 53 | extern unsigned long get_cclk(void); |
51 | extern unsigned long get_sclk(void); | 54 | extern unsigned long get_sclk(void); |
52 | extern unsigned long sclk_to_usecs(unsigned long sclk); | 55 | extern unsigned long sclk_to_usecs(unsigned long sclk); |
@@ -58,8 +61,6 @@ extern void dump_bfin_trace_buffer(void); | |||
58 | 61 | ||
59 | /* init functions only */ | 62 | /* init functions only */ |
60 | extern int init_arch_irq(void); | 63 | extern int init_arch_irq(void); |
61 | extern void bfin_icache_init(void); | ||
62 | extern void bfin_dcache_init(void); | ||
63 | extern void init_exception_vectors(void); | 64 | extern void init_exception_vectors(void); |
64 | extern void program_IAR(void); | 65 | extern void program_IAR(void); |
65 | 66 | ||
@@ -110,7 +111,7 @@ extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; | |||
110 | 111 | ||
111 | #ifdef CONFIG_BFIN_ICACHE_LOCK | 112 | #ifdef CONFIG_BFIN_ICACHE_LOCK |
112 | extern void cache_grab_lock(int way); | 113 | extern void cache_grab_lock(int way); |
113 | extern void cache_lock(int way); | 114 | extern void bfin_cache_lock(int way); |
114 | #endif | 115 | #endif |
115 | 116 | ||
116 | #endif | 117 | #endif |
diff --git a/arch/blackfin/include/asm/bfin5xx_spi.h b/arch/blackfin/include/asm/bfin5xx_spi.h index 9fa19158e38d..1306e6b22946 100644 --- a/arch/blackfin/include/asm/bfin5xx_spi.h +++ b/arch/blackfin/include/asm/bfin5xx_spi.h | |||
@@ -1,22 +1,12 @@ | |||
1 | /************************************************************ | 1 | /* |
2 | 2 | * Blackfin On-Chip SPI Driver | |
3 | * Copyright (C) 2006-2008, Analog Devices. All Rights Reserved | 3 | * |
4 | * | 4 | * Copyright 2004-2008 Analog Devices Inc. |
5 | * FILE bfin5xx_spi.h | 5 | * |
6 | * PROGRAMMER(S): Luke Yang (Analog Devices Inc.) | 6 | * Enter bugs at http://blackfin.uclinux.org/ |
7 | * | 7 | * |
8 | * | 8 | * Licensed under the GPL-2 or later. |
9 | * DATE OF CREATION: March. 10th 2006 | 9 | */ |
10 | * | ||
11 | * SYNOPSIS: | ||
12 | * | ||
13 | * DESCRIPTION: header file for SPI controller driver for Blackfin5xx. | ||
14 | ************************************************************** | ||
15 | |||
16 | * MODIFICATION HISTORY: | ||
17 | * March 10, 2006 bfin5xx_spi.h Created. (Luke Yang) | ||
18 | |||
19 | ************************************************************/ | ||
20 | 10 | ||
21 | #ifndef _SPI_CHANNEL_H_ | 11 | #ifndef _SPI_CHANNEL_H_ |
22 | #define _SPI_CHANNEL_H_ | 12 | #define _SPI_CHANNEL_H_ |
diff --git a/arch/blackfin/include/asm/bfin_sdh.h b/arch/blackfin/include/asm/bfin_sdh.h new file mode 100644 index 000000000000..d61d5497c590 --- /dev/null +++ b/arch/blackfin/include/asm/bfin_sdh.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * bfin_sdh.h - Blackfin SDH definitions | ||
3 | * | ||
4 | * Copyright 2008 Analog Devices Inc. | ||
5 | * | ||
6 | * Licensed under the GPL-2 or later. | ||
7 | */ | ||
8 | |||
9 | #ifndef __BFIN_SDH_H__ | ||
10 | #define __BFIN_SDH_H__ | ||
11 | |||
12 | struct bfin_sd_host { | ||
13 | int dma_chan; | ||
14 | int irq_int0; | ||
15 | int irq_int1; | ||
16 | u16 pin_req[7]; | ||
17 | }; | ||
18 | |||
19 | #endif | ||
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h index c76ed8def302..fe88a2c19213 100644 --- a/arch/blackfin/include/asm/bfin_sport.h +++ b/arch/blackfin/include/asm/bfin_sport.h | |||
@@ -120,9 +120,6 @@ struct sport_register { | |||
120 | #define SPORT_IOC_MAGIC 'P' | 120 | #define SPORT_IOC_MAGIC 'P' |
121 | #define SPORT_IOC_CONFIG _IOWR('P', 0x01, struct sport_config) | 121 | #define SPORT_IOC_CONFIG _IOWR('P', 0x01, struct sport_config) |
122 | 122 | ||
123 | /* Test purpose */ | ||
124 | #define ENABLE_AD73311 _IOWR('P', 0x02, int) | ||
125 | |||
126 | struct sport_dev { | 123 | struct sport_dev { |
127 | struct cdev cdev; /* Char device structure */ | 124 | struct cdev cdev; /* Char device structure */ |
128 | 125 | ||
diff --git a/arch/blackfin/include/asm/bfrom.h b/arch/blackfin/include/asm/bfrom.h index cfe8024c3b2f..9e4be5e5e767 100644 --- a/arch/blackfin/include/asm/bfrom.h +++ b/arch/blackfin/include/asm/bfrom.h | |||
@@ -43,6 +43,11 @@ __attribute__((__noreturn__)) | |||
43 | static inline void bfrom_SoftReset(void *new_stack) | 43 | static inline void bfrom_SoftReset(void *new_stack) |
44 | { | 44 | { |
45 | while (1) | 45 | while (1) |
46 | /* | ||
47 | * We don't declare the SP as clobbered on purpose, since | ||
48 | * it confuses the heck out of the compiler, and this function | ||
49 | * never returns | ||
50 | */ | ||
46 | __asm__ __volatile__( | 51 | __asm__ __volatile__( |
47 | "sp = %[stack];" | 52 | "sp = %[stack];" |
48 | "jump (%[bfrom_syscontrol]);" | 53 | "jump (%[bfrom_syscontrol]);" |
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h index c428e4106f89..21b036eadab1 100644 --- a/arch/blackfin/include/asm/bitops.h +++ b/arch/blackfin/include/asm/bitops.h | |||
@@ -7,7 +7,6 @@ | |||
7 | 7 | ||
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | #include <asm/byteorder.h> /* swab32 */ | 9 | #include <asm/byteorder.h> /* swab32 */ |
10 | #include <asm/system.h> /* save_flags */ | ||
11 | 10 | ||
12 | #ifdef __KERNEL__ | 11 | #ifdef __KERNEL__ |
13 | 12 | ||
@@ -20,80 +19,107 @@ | |||
20 | #include <asm-generic/bitops/sched.h> | 19 | #include <asm-generic/bitops/sched.h> |
21 | #include <asm-generic/bitops/ffz.h> | 20 | #include <asm-generic/bitops/ffz.h> |
22 | 21 | ||
23 | static __inline__ void set_bit(int nr, volatile unsigned long *addr) | 22 | #ifdef CONFIG_SMP |
23 | |||
24 | #include <linux/linkage.h> | ||
25 | |||
26 | asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr); | ||
27 | |||
28 | asmlinkage int __raw_bit_clear_asm(volatile unsigned long *addr, int nr); | ||
29 | |||
30 | asmlinkage int __raw_bit_toggle_asm(volatile unsigned long *addr, int nr); | ||
31 | |||
32 | asmlinkage int __raw_bit_test_set_asm(volatile unsigned long *addr, int nr); | ||
33 | |||
34 | asmlinkage int __raw_bit_test_clear_asm(volatile unsigned long *addr, int nr); | ||
35 | |||
36 | asmlinkage int __raw_bit_test_toggle_asm(volatile unsigned long *addr, int nr); | ||
37 | |||
38 | asmlinkage int __raw_bit_test_asm(const volatile unsigned long *addr, int nr); | ||
39 | |||
40 | static inline void set_bit(int nr, volatile unsigned long *addr) | ||
24 | { | 41 | { |
25 | int *a = (int *)addr; | 42 | volatile unsigned long *a = addr + (nr >> 5); |
26 | int mask; | 43 | __raw_bit_set_asm(a, nr & 0x1f); |
27 | unsigned long flags; | 44 | } |
28 | 45 | ||
29 | a += nr >> 5; | 46 | static inline void clear_bit(int nr, volatile unsigned long *addr) |
30 | mask = 1 << (nr & 0x1f); | 47 | { |
31 | local_irq_save(flags); | 48 | volatile unsigned long *a = addr + (nr >> 5); |
32 | *a |= mask; | 49 | __raw_bit_clear_asm(a, nr & 0x1f); |
33 | local_irq_restore(flags); | ||
34 | } | 50 | } |
35 | 51 | ||
36 | static __inline__ void __set_bit(int nr, volatile unsigned long *addr) | 52 | static inline void change_bit(int nr, volatile unsigned long *addr) |
37 | { | 53 | { |
38 | int *a = (int *)addr; | 54 | volatile unsigned long *a = addr + (nr >> 5); |
39 | int mask; | 55 | __raw_bit_toggle_asm(a, nr & 0x1f); |
56 | } | ||
40 | 57 | ||
41 | a += nr >> 5; | 58 | static inline int test_bit(int nr, const volatile unsigned long *addr) |
42 | mask = 1 << (nr & 0x1f); | 59 | { |
43 | *a |= mask; | 60 | volatile const unsigned long *a = addr + (nr >> 5); |
61 | return __raw_bit_test_asm(a, nr & 0x1f) != 0; | ||
44 | } | 62 | } |
45 | 63 | ||
46 | /* | 64 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
47 | * clear_bit() doesn't provide any barrier for the compiler. | 65 | { |
48 | */ | 66 | volatile unsigned long *a = addr + (nr >> 5); |
49 | #define smp_mb__before_clear_bit() barrier() | 67 | return __raw_bit_test_set_asm(a, nr & 0x1f); |
50 | #define smp_mb__after_clear_bit() barrier() | 68 | } |
69 | |||
70 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
71 | { | ||
72 | volatile unsigned long *a = addr + (nr >> 5); | ||
73 | return __raw_bit_test_clear_asm(a, nr & 0x1f); | ||
74 | } | ||
75 | |||
76 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | ||
77 | { | ||
78 | volatile unsigned long *a = addr + (nr >> 5); | ||
79 | return __raw_bit_test_toggle_asm(a, nr & 0x1f); | ||
80 | } | ||
81 | |||
82 | #else /* !CONFIG_SMP */ | ||
83 | |||
84 | #include <asm/system.h> /* save_flags */ | ||
51 | 85 | ||
52 | static __inline__ void clear_bit(int nr, volatile unsigned long *addr) | 86 | static inline void set_bit(int nr, volatile unsigned long *addr) |
53 | { | 87 | { |
54 | int *a = (int *)addr; | 88 | int *a = (int *)addr; |
55 | int mask; | 89 | int mask; |
56 | unsigned long flags; | 90 | unsigned long flags; |
57 | a += nr >> 5; | 91 | a += nr >> 5; |
58 | mask = 1 << (nr & 0x1f); | 92 | mask = 1 << (nr & 0x1f); |
59 | local_irq_save(flags); | 93 | local_irq_save_hw(flags); |
60 | *a &= ~mask; | 94 | *a |= mask; |
61 | local_irq_restore(flags); | 95 | local_irq_restore_hw(flags); |
62 | } | 96 | } |
63 | 97 | ||
64 | static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) | 98 | static inline void clear_bit(int nr, volatile unsigned long *addr) |
65 | { | 99 | { |
66 | int *a = (int *)addr; | 100 | int *a = (int *)addr; |
67 | int mask; | 101 | int mask; |
68 | 102 | unsigned long flags; | |
69 | a += nr >> 5; | 103 | a += nr >> 5; |
70 | mask = 1 << (nr & 0x1f); | 104 | mask = 1 << (nr & 0x1f); |
105 | local_irq_save_hw(flags); | ||
71 | *a &= ~mask; | 106 | *a &= ~mask; |
107 | local_irq_restore_hw(flags); | ||
72 | } | 108 | } |
73 | 109 | ||
74 | static __inline__ void change_bit(int nr, volatile unsigned long *addr) | 110 | static inline void change_bit(int nr, volatile unsigned long *addr) |
75 | { | 111 | { |
76 | int mask, flags; | 112 | int mask, flags; |
77 | unsigned long *ADDR = (unsigned long *)addr; | 113 | unsigned long *ADDR = (unsigned long *)addr; |
78 | 114 | ||
79 | ADDR += nr >> 5; | 115 | ADDR += nr >> 5; |
80 | mask = 1 << (nr & 31); | 116 | mask = 1 << (nr & 31); |
81 | local_irq_save(flags); | 117 | local_irq_save_hw(flags); |
82 | *ADDR ^= mask; | ||
83 | local_irq_restore(flags); | ||
84 | } | ||
85 | |||
86 | static __inline__ void __change_bit(int nr, volatile unsigned long *addr) | ||
87 | { | ||
88 | int mask; | ||
89 | unsigned long *ADDR = (unsigned long *)addr; | ||
90 | |||
91 | ADDR += nr >> 5; | ||
92 | mask = 1 << (nr & 31); | ||
93 | *ADDR ^= mask; | 118 | *ADDR ^= mask; |
119 | local_irq_restore_hw(flags); | ||
94 | } | 120 | } |
95 | 121 | ||
96 | static __inline__ int test_and_set_bit(int nr, void *addr) | 122 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) |
97 | { | 123 | { |
98 | int mask, retval; | 124 | int mask, retval; |
99 | volatile unsigned int *a = (volatile unsigned int *)addr; | 125 | volatile unsigned int *a = (volatile unsigned int *)addr; |
@@ -101,27 +127,31 @@ static __inline__ int test_and_set_bit(int nr, void *addr) | |||
101 | 127 | ||
102 | a += nr >> 5; | 128 | a += nr >> 5; |
103 | mask = 1 << (nr & 0x1f); | 129 | mask = 1 << (nr & 0x1f); |
104 | local_irq_save(flags); | 130 | local_irq_save_hw(flags); |
105 | retval = (mask & *a) != 0; | 131 | retval = (mask & *a) != 0; |
106 | *a |= mask; | 132 | *a |= mask; |
107 | local_irq_restore(flags); | 133 | local_irq_restore_hw(flags); |
108 | 134 | ||
109 | return retval; | 135 | return retval; |
110 | } | 136 | } |
111 | 137 | ||
112 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) | 138 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) |
113 | { | 139 | { |
114 | int mask, retval; | 140 | int mask, retval; |
115 | volatile unsigned int *a = (volatile unsigned int *)addr; | 141 | volatile unsigned int *a = (volatile unsigned int *)addr; |
142 | unsigned long flags; | ||
116 | 143 | ||
117 | a += nr >> 5; | 144 | a += nr >> 5; |
118 | mask = 1 << (nr & 0x1f); | 145 | mask = 1 << (nr & 0x1f); |
146 | local_irq_save_hw(flags); | ||
119 | retval = (mask & *a) != 0; | 147 | retval = (mask & *a) != 0; |
120 | *a |= mask; | 148 | *a &= ~mask; |
149 | local_irq_restore_hw(flags); | ||
150 | |||
121 | return retval; | 151 | return retval; |
122 | } | 152 | } |
123 | 153 | ||
124 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) | 154 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) |
125 | { | 155 | { |
126 | int mask, retval; | 156 | int mask, retval; |
127 | volatile unsigned int *a = (volatile unsigned int *)addr; | 157 | volatile unsigned int *a = (volatile unsigned int *)addr; |
@@ -129,15 +159,52 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
129 | 159 | ||
130 | a += nr >> 5; | 160 | a += nr >> 5; |
131 | mask = 1 << (nr & 0x1f); | 161 | mask = 1 << (nr & 0x1f); |
132 | local_irq_save(flags); | 162 | local_irq_save_hw(flags); |
133 | retval = (mask & *a) != 0; | 163 | retval = (mask & *a) != 0; |
164 | *a ^= mask; | ||
165 | local_irq_restore_hw(flags); | ||
166 | return retval; | ||
167 | } | ||
168 | |||
169 | #endif /* CONFIG_SMP */ | ||
170 | |||
171 | /* | ||
172 | * clear_bit() doesn't provide any barrier for the compiler. | ||
173 | */ | ||
174 | #define smp_mb__before_clear_bit() barrier() | ||
175 | #define smp_mb__after_clear_bit() barrier() | ||
176 | |||
177 | static inline void __set_bit(int nr, volatile unsigned long *addr) | ||
178 | { | ||
179 | int *a = (int *)addr; | ||
180 | int mask; | ||
181 | |||
182 | a += nr >> 5; | ||
183 | mask = 1 << (nr & 0x1f); | ||
184 | *a |= mask; | ||
185 | } | ||
186 | |||
187 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | ||
188 | { | ||
189 | int *a = (int *)addr; | ||
190 | int mask; | ||
191 | |||
192 | a += nr >> 5; | ||
193 | mask = 1 << (nr & 0x1f); | ||
134 | *a &= ~mask; | 194 | *a &= ~mask; |
135 | local_irq_restore(flags); | 195 | } |
136 | 196 | ||
137 | return retval; | 197 | static inline void __change_bit(int nr, volatile unsigned long *addr) |
198 | { | ||
199 | int mask; | ||
200 | unsigned long *ADDR = (unsigned long *)addr; | ||
201 | |||
202 | ADDR += nr >> 5; | ||
203 | mask = 1 << (nr & 31); | ||
204 | *ADDR ^= mask; | ||
138 | } | 205 | } |
139 | 206 | ||
140 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) | 207 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) |
141 | { | 208 | { |
142 | int mask, retval; | 209 | int mask, retval; |
143 | volatile unsigned int *a = (volatile unsigned int *)addr; | 210 | volatile unsigned int *a = (volatile unsigned int *)addr; |
@@ -145,26 +212,23 @@ static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
145 | a += nr >> 5; | 212 | a += nr >> 5; |
146 | mask = 1 << (nr & 0x1f); | 213 | mask = 1 << (nr & 0x1f); |
147 | retval = (mask & *a) != 0; | 214 | retval = (mask & *a) != 0; |
148 | *a &= ~mask; | 215 | *a |= mask; |
149 | return retval; | 216 | return retval; |
150 | } | 217 | } |
151 | 218 | ||
152 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr) | 219 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) |
153 | { | 220 | { |
154 | int mask, retval; | 221 | int mask, retval; |
155 | volatile unsigned int *a = (volatile unsigned int *)addr; | 222 | volatile unsigned int *a = (volatile unsigned int *)addr; |
156 | unsigned long flags; | ||
157 | 223 | ||
158 | a += nr >> 5; | 224 | a += nr >> 5; |
159 | mask = 1 << (nr & 0x1f); | 225 | mask = 1 << (nr & 0x1f); |
160 | local_irq_save(flags); | ||
161 | retval = (mask & *a) != 0; | 226 | retval = (mask & *a) != 0; |
162 | *a ^= mask; | 227 | *a &= ~mask; |
163 | local_irq_restore(flags); | ||
164 | return retval; | 228 | return retval; |
165 | } | 229 | } |
166 | 230 | ||
167 | static __inline__ int __test_and_change_bit(int nr, | 231 | static inline int __test_and_change_bit(int nr, |
168 | volatile unsigned long *addr) | 232 | volatile unsigned long *addr) |
169 | { | 233 | { |
170 | int mask, retval; | 234 | int mask, retval; |
@@ -177,16 +241,7 @@ static __inline__ int __test_and_change_bit(int nr, | |||
177 | return retval; | 241 | return retval; |
178 | } | 242 | } |
179 | 243 | ||
180 | /* | 244 | static inline int __test_bit(int nr, const void *addr) |
181 | * This routine doesn't need to be atomic. | ||
182 | */ | ||
183 | static __inline__ int __constant_test_bit(int nr, const void *addr) | ||
184 | { | ||
185 | return ((1UL << (nr & 31)) & | ||
186 | (((const volatile unsigned int *)addr)[nr >> 5])) != 0; | ||
187 | } | ||
188 | |||
189 | static __inline__ int __test_bit(int nr, const void *addr) | ||
190 | { | 245 | { |
191 | int *a = (int *)addr; | 246 | int *a = (int *)addr; |
192 | int mask; | 247 | int mask; |
@@ -196,10 +251,16 @@ static __inline__ int __test_bit(int nr, const void *addr) | |||
196 | return ((mask & *a) != 0); | 251 | return ((mask & *a) != 0); |
197 | } | 252 | } |
198 | 253 | ||
199 | #define test_bit(nr,addr) \ | 254 | #ifndef CONFIG_SMP |
200 | (__builtin_constant_p(nr) ? \ | 255 | /* |
201 | __constant_test_bit((nr),(addr)) : \ | 256 | * This routine doesn't need irq save and restore ops in UP |
202 | __test_bit((nr),(addr))) | 257 | * context. |
258 | */ | ||
259 | static inline int test_bit(int nr, const void *addr) | ||
260 | { | ||
261 | return __test_bit(nr, addr); | ||
262 | } | ||
263 | #endif | ||
203 | 264 | ||
204 | #include <asm-generic/bitops/find.h> | 265 | #include <asm-generic/bitops/find.h> |
205 | #include <asm-generic/bitops/hweight.h> | 266 | #include <asm-generic/bitops/hweight.h> |
diff --git a/arch/blackfin/include/asm/blackfin.h b/arch/blackfin/include/asm/blackfin.h index 8749b0e321ab..8bb2cb139756 100644 --- a/arch/blackfin/include/asm/blackfin.h +++ b/arch/blackfin/include/asm/blackfin.h | |||
@@ -6,11 +6,6 @@ | |||
6 | #ifndef _BLACKFIN_H_ | 6 | #ifndef _BLACKFIN_H_ |
7 | #define _BLACKFIN_H_ | 7 | #define _BLACKFIN_H_ |
8 | 8 | ||
9 | #define LO(con32) ((con32) & 0xFFFF) | ||
10 | #define lo(con32) ((con32) & 0xFFFF) | ||
11 | #define HI(con32) (((con32) >> 16) & 0xFFFF) | ||
12 | #define hi(con32) (((con32) >> 16) & 0xFFFF) | ||
13 | |||
14 | #include <mach/anomaly.h> | 9 | #include <mach/anomaly.h> |
15 | 10 | ||
16 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
@@ -65,6 +60,11 @@ static inline void CSYNC(void) | |||
65 | 60 | ||
66 | #else /* __ASSEMBLY__ */ | 61 | #else /* __ASSEMBLY__ */ |
67 | 62 | ||
63 | #define LO(con32) ((con32) & 0xFFFF) | ||
64 | #define lo(con32) ((con32) & 0xFFFF) | ||
65 | #define HI(con32) (((con32) >> 16) & 0xFFFF) | ||
66 | #define hi(con32) (((con32) >> 16) & 0xFFFF) | ||
67 | |||
68 | /* SSYNC & CSYNC implementations for assembly files */ | 68 | /* SSYNC & CSYNC implementations for assembly files */ |
69 | 69 | ||
70 | #define ssync(x) SSYNC(x) | 70 | #define ssync(x) SSYNC(x) |
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h index 023d72133b5a..86637814cf25 100644 --- a/arch/blackfin/include/asm/cache.h +++ b/arch/blackfin/include/asm/cache.h | |||
@@ -12,6 +12,11 @@ | |||
12 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | 12 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
13 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | 13 | #define SMP_CACHE_BYTES L1_CACHE_BYTES |
14 | 14 | ||
15 | #ifdef CONFIG_SMP | ||
16 | #define __cacheline_aligned | ||
17 | #else | ||
18 | #define ____cacheline_aligned | ||
19 | |||
15 | /* | 20 | /* |
16 | * Put cacheline_aliged data to L1 data memory | 21 | * Put cacheline_aliged data to L1 data memory |
17 | */ | 22 | */ |
@@ -21,9 +26,33 @@ | |||
21 | __section__(".data_l1.cacheline_aligned"))) | 26 | __section__(".data_l1.cacheline_aligned"))) |
22 | #endif | 27 | #endif |
23 | 28 | ||
29 | #endif | ||
30 | |||
24 | /* | 31 | /* |
25 | * largest L1 which this arch supports | 32 | * largest L1 which this arch supports |
26 | */ | 33 | */ |
27 | #define L1_CACHE_SHIFT_MAX 5 | 34 | #define L1_CACHE_SHIFT_MAX 5 |
28 | 35 | ||
36 | #if defined(CONFIG_SMP) && \ | ||
37 | !defined(CONFIG_BFIN_CACHE_COHERENT) && \ | ||
38 | defined(CONFIG_BFIN_DCACHE) | ||
39 | #define __ARCH_SYNC_CORE_DCACHE | ||
40 | #ifndef __ASSEMBLY__ | ||
41 | asmlinkage void __raw_smp_mark_barrier_asm(void); | ||
42 | asmlinkage void __raw_smp_check_barrier_asm(void); | ||
43 | |||
44 | static inline void smp_mark_barrier(void) | ||
45 | { | ||
46 | __raw_smp_mark_barrier_asm(); | ||
47 | } | ||
48 | static inline void smp_check_barrier(void) | ||
49 | { | ||
50 | __raw_smp_check_barrier_asm(); | ||
51 | } | ||
52 | |||
53 | void resync_core_dcache(void); | ||
54 | #endif | ||
55 | #endif | ||
56 | |||
57 | |||
29 | #endif | 58 | #endif |
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h index 4403415583fa..1b040f5b4feb 100644 --- a/arch/blackfin/include/asm/cacheflush.h +++ b/arch/blackfin/include/asm/cacheflush.h | |||
@@ -35,6 +35,7 @@ extern void blackfin_icache_flush_range(unsigned long start_address, unsigned lo | |||
35 | extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address); | 35 | extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address); |
36 | extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address); | 36 | extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address); |
37 | extern void blackfin_dflush_page(void *page); | 37 | extern void blackfin_dflush_page(void *page); |
38 | extern void blackfin_invalidate_entire_dcache(void); | ||
38 | 39 | ||
39 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 40 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
40 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 41 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
@@ -44,12 +45,20 @@ extern void blackfin_dflush_page(void *page); | |||
44 | #define flush_cache_vmap(start, end) do { } while (0) | 45 | #define flush_cache_vmap(start, end) do { } while (0) |
45 | #define flush_cache_vunmap(start, end) do { } while (0) | 46 | #define flush_cache_vunmap(start, end) do { } while (0) |
46 | 47 | ||
48 | #ifdef CONFIG_SMP | ||
49 | #define flush_icache_range_others(start, end) \ | ||
50 | smp_icache_flush_range_others((start), (end)) | ||
51 | #else | ||
52 | #define flush_icache_range_others(start, end) do { } while (0) | ||
53 | #endif | ||
54 | |||
47 | static inline void flush_icache_range(unsigned start, unsigned end) | 55 | static inline void flush_icache_range(unsigned start, unsigned end) |
48 | { | 56 | { |
49 | #if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_ICACHE) | 57 | #if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_ICACHE) |
50 | 58 | ||
51 | # if defined(CONFIG_BFIN_WT) | 59 | # if defined(CONFIG_BFIN_WT) |
52 | blackfin_icache_flush_range((start), (end)); | 60 | blackfin_icache_flush_range((start), (end)); |
61 | flush_icache_range_others(start, end); | ||
53 | # else | 62 | # else |
54 | blackfin_icache_dcache_flush_range((start), (end)); | 63 | blackfin_icache_dcache_flush_range((start), (end)); |
55 | # endif | 64 | # endif |
@@ -58,6 +67,7 @@ static inline void flush_icache_range(unsigned start, unsigned end) | |||
58 | 67 | ||
59 | # if defined(CONFIG_BFIN_ICACHE) | 68 | # if defined(CONFIG_BFIN_ICACHE) |
60 | blackfin_icache_flush_range((start), (end)); | 69 | blackfin_icache_flush_range((start), (end)); |
70 | flush_icache_range_others(start, end); | ||
61 | # endif | 71 | # endif |
62 | # if defined(CONFIG_BFIN_DCACHE) | 72 | # if defined(CONFIG_BFIN_DCACHE) |
63 | blackfin_dcache_flush_range((start), (end)); | 73 | blackfin_dcache_flush_range((start), (end)); |
@@ -66,10 +76,12 @@ static inline void flush_icache_range(unsigned start, unsigned end) | |||
66 | #endif | 76 | #endif |
67 | } | 77 | } |
68 | 78 | ||
69 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 79 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
70 | do { memcpy(dst, src, len); \ | 80 | do { memcpy(dst, src, len); \ |
71 | flush_icache_range ((unsigned) (dst), (unsigned) (dst) + (len)); \ | 81 | flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \ |
82 | flush_icache_range_others((unsigned long) (dst), (unsigned long) (dst) + (len));\ | ||
72 | } while (0) | 83 | } while (0) |
84 | |||
73 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len) | 85 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) memcpy(dst, src, len) |
74 | 86 | ||
75 | #if defined(CONFIG_BFIN_DCACHE) | 87 | #if defined(CONFIG_BFIN_DCACHE) |
@@ -82,7 +94,7 @@ do { memcpy(dst, src, len); \ | |||
82 | # define flush_dcache_page(page) blackfin_dflush_page(page_address(page)) | 94 | # define flush_dcache_page(page) blackfin_dflush_page(page_address(page)) |
83 | #else | 95 | #else |
84 | # define flush_dcache_range(start,end) do { } while (0) | 96 | # define flush_dcache_range(start,end) do { } while (0) |
85 | # define flush_dcache_page(page) do { } while (0) | 97 | # define flush_dcache_page(page) do { } while (0) |
86 | #endif | 98 | #endif |
87 | 99 | ||
88 | extern unsigned long reserved_mem_dcache_on; | 100 | extern unsigned long reserved_mem_dcache_on; |
diff --git a/arch/blackfin/include/asm/checksum.h b/arch/blackfin/include/asm/checksum.h index 6f6af2b8e9e0..f67289a0d8d2 100644 --- a/arch/blackfin/include/asm/checksum.h +++ b/arch/blackfin/include/asm/checksum.h | |||
@@ -78,7 +78,8 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, | |||
78 | "%0 = %0 + %4;\n\t" | 78 | "%0 = %0 + %4;\n\t" |
79 | "NOP;\n\t" | 79 | "NOP;\n\t" |
80 | : "=d" (sum) | 80 | : "=d" (sum) |
81 | : "d" (daddr), "d" (saddr), "d" ((ntohs(len)<<16)+proto*256), "d" (1), "0"(sum)); | 81 | : "d" (daddr), "d" (saddr), "d" ((ntohs(len)<<16)+proto*256), "d" (1), "0"(sum) |
82 | : "CC"); | ||
82 | 83 | ||
83 | return (sum); | 84 | return (sum); |
84 | } | 85 | } |
diff --git a/arch/blackfin/include/asm/context.S b/arch/blackfin/include/asm/context.S index c0e630edfb9a..16561ab18b38 100644 --- a/arch/blackfin/include/asm/context.S +++ b/arch/blackfin/include/asm/context.S | |||
@@ -303,9 +303,14 @@ | |||
303 | RETI = [sp++]; | 303 | RETI = [sp++]; |
304 | RETS = [sp++]; | 304 | RETS = [sp++]; |
305 | 305 | ||
306 | p0.h = _irq_flags; | 306 | #ifdef CONFIG_SMP |
307 | p0.l = _irq_flags; | 307 | GET_PDA(p0, r0); |
308 | r0 = [p0 + PDA_IRQFLAGS]; | ||
309 | #else | ||
310 | p0.h = _bfin_irq_flags; | ||
311 | p0.l = _bfin_irq_flags; | ||
308 | r0 = [p0]; | 312 | r0 = [p0]; |
313 | #endif | ||
309 | sti r0; | 314 | sti r0; |
310 | 315 | ||
311 | sp += 4; /* Skip Reserved */ | 316 | sp += 4; /* Skip Reserved */ |
@@ -353,3 +358,41 @@ | |||
353 | csync; | 358 | csync; |
354 | .endm | 359 | .endm |
355 | 360 | ||
361 | .macro save_context_cplb | ||
362 | [--sp] = (R7:0, P5:0); | ||
363 | [--sp] = fp; | ||
364 | |||
365 | [--sp] = a0.x; | ||
366 | [--sp] = a0.w; | ||
367 | [--sp] = a1.x; | ||
368 | [--sp] = a1.w; | ||
369 | |||
370 | [--sp] = LC0; | ||
371 | [--sp] = LC1; | ||
372 | [--sp] = LT0; | ||
373 | [--sp] = LT1; | ||
374 | [--sp] = LB0; | ||
375 | [--sp] = LB1; | ||
376 | |||
377 | [--sp] = RETS; | ||
378 | .endm | ||
379 | |||
380 | .macro restore_context_cplb | ||
381 | RETS = [sp++]; | ||
382 | |||
383 | LB1 = [sp++]; | ||
384 | LB0 = [sp++]; | ||
385 | LT1 = [sp++]; | ||
386 | LT0 = [sp++]; | ||
387 | LC1 = [sp++]; | ||
388 | LC0 = [sp++]; | ||
389 | |||
390 | a1.w = [sp++]; | ||
391 | a1.x = [sp++]; | ||
392 | a0.w = [sp++]; | ||
393 | a0.x = [sp++]; | ||
394 | |||
395 | fp = [sp++]; | ||
396 | |||
397 | (R7:0, P5:0) = [SP++]; | ||
398 | .endm | ||
diff --git a/arch/blackfin/include/asm/cplb-mpu.h b/arch/blackfin/include/asm/cplb-mpu.h deleted file mode 100644 index 75c67b99d607..000000000000 --- a/arch/blackfin/include/asm/cplb-mpu.h +++ /dev/null | |||
@@ -1,61 +0,0 @@ | |||
1 | /* | ||
2 | * File: include/asm-blackfin/cplbinit.h | ||
3 | * Based on: | ||
4 | * Author: | ||
5 | * | ||
6 | * Created: | ||
7 | * Description: | ||
8 | * | ||
9 | * Modified: | ||
10 | * Copyright 2004-2006 Analog Devices Inc. | ||
11 | * | ||
12 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, see the file COPYING, or write | ||
26 | * to the Free Software Foundation, Inc., | ||
27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
28 | */ | ||
29 | #ifndef __ASM_BFIN_CPLB_MPU_H | ||
30 | #define __ASM_BFIN_CPLB_MPU_H | ||
31 | |||
32 | struct cplb_entry { | ||
33 | unsigned long data, addr; | ||
34 | }; | ||
35 | |||
36 | struct mem_region { | ||
37 | unsigned long start, end; | ||
38 | unsigned long dcplb_data; | ||
39 | unsigned long icplb_data; | ||
40 | }; | ||
41 | |||
42 | extern struct cplb_entry dcplb_tbl[MAX_CPLBS]; | ||
43 | extern struct cplb_entry icplb_tbl[MAX_CPLBS]; | ||
44 | extern int first_switched_icplb; | ||
45 | extern int first_mask_dcplb; | ||
46 | extern int first_switched_dcplb; | ||
47 | |||
48 | extern int nr_dcplb_miss, nr_icplb_miss, nr_icplb_supv_miss, nr_dcplb_prot; | ||
49 | extern int nr_cplb_flush; | ||
50 | |||
51 | extern int page_mask_order; | ||
52 | extern int page_mask_nelts; | ||
53 | |||
54 | extern unsigned long *current_rwx_mask; | ||
55 | |||
56 | extern void flush_switched_cplbs(void); | ||
57 | extern void set_mask_dcplbs(unsigned long *); | ||
58 | |||
59 | extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *); | ||
60 | |||
61 | #endif /* __ASM_BFIN_CPLB_MPU_H */ | ||
diff --git a/arch/blackfin/include/asm/cplb.h b/arch/blackfin/include/asm/cplb.h index 9e8b4035fcec..ad566ff9ad16 100644 --- a/arch/blackfin/include/asm/cplb.h +++ b/arch/blackfin/include/asm/cplb.h | |||
@@ -30,7 +30,6 @@ | |||
30 | #ifndef _CPLB_H | 30 | #ifndef _CPLB_H |
31 | #define _CPLB_H | 31 | #define _CPLB_H |
32 | 32 | ||
33 | #include <asm/blackfin.h> | ||
34 | #include <mach/anomaly.h> | 33 | #include <mach/anomaly.h> |
35 | 34 | ||
36 | #define SDRAM_IGENERIC (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | CPLB_PORTPRIO) | 35 | #define SDRAM_IGENERIC (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | CPLB_PORTPRIO) |
@@ -55,13 +54,24 @@ | |||
55 | #endif | 54 | #endif |
56 | 55 | ||
57 | #define L1_DMEMORY (CPLB_LOCK | CPLB_COMMON) | 56 | #define L1_DMEMORY (CPLB_LOCK | CPLB_COMMON) |
57 | |||
58 | #ifdef CONFIG_SMP | ||
59 | #define L2_ATTR (INITIAL_T | I_CPLB | D_CPLB) | ||
60 | #define L2_IMEMORY (CPLB_COMMON | CPLB_LOCK) | ||
61 | #define L2_DMEMORY (CPLB_COMMON | CPLB_LOCK) | ||
62 | |||
63 | #else | ||
58 | #ifdef CONFIG_BFIN_L2_CACHEABLE | 64 | #ifdef CONFIG_BFIN_L2_CACHEABLE |
59 | #define L2_IMEMORY (SDRAM_IGENERIC) | 65 | #define L2_IMEMORY (SDRAM_IGENERIC) |
60 | #define L2_DMEMORY (SDRAM_DGENERIC) | 66 | #define L2_DMEMORY (SDRAM_DGENERIC) |
61 | #else | 67 | #else |
62 | #define L2_IMEMORY (CPLB_COMMON) | 68 | #define L2_IMEMORY (CPLB_COMMON) |
63 | #define L2_DMEMORY (CPLB_COMMON) | 69 | #define L2_DMEMORY (CPLB_COMMON) |
64 | #endif | 70 | #endif /* CONFIG_BFIN_L2_CACHEABLE */ |
71 | |||
72 | #define L2_ATTR (INITIAL_T | SWITCH_T | I_CPLB | D_CPLB) | ||
73 | #endif /* CONFIG_SMP */ | ||
74 | |||
65 | #define SDRAM_DNON_CHBL (CPLB_COMMON) | 75 | #define SDRAM_DNON_CHBL (CPLB_COMMON) |
66 | #define SDRAM_EBIU (CPLB_COMMON) | 76 | #define SDRAM_EBIU (CPLB_COMMON) |
67 | #define SDRAM_OOPS (CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY) | 77 | #define SDRAM_OOPS (CPLB_VALID | ANOMALY_05000158_WORKAROUND | CPLB_LOCK | CPLB_DIRTY) |
@@ -71,14 +81,7 @@ | |||
71 | #define SIZE_1M 0x00100000 /* 1M */ | 81 | #define SIZE_1M 0x00100000 /* 1M */ |
72 | #define SIZE_4M 0x00400000 /* 4M */ | 82 | #define SIZE_4M 0x00400000 /* 4M */ |
73 | 83 | ||
74 | #ifdef CONFIG_MPU | ||
75 | #define MAX_CPLBS 16 | 84 | #define MAX_CPLBS 16 |
76 | #else | ||
77 | #define MAX_CPLBS (16 * 2) | ||
78 | #endif | ||
79 | |||
80 | #define ASYNC_MEMORY_CPLB_COVERAGE ((ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \ | ||
81 | ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) / SIZE_4M) | ||
82 | 85 | ||
83 | #define CPLB_ENABLE_ICACHE_P 0 | 86 | #define CPLB_ENABLE_ICACHE_P 0 |
84 | #define CPLB_ENABLE_DCACHE_P 1 | 87 | #define CPLB_ENABLE_DCACHE_P 1 |
@@ -113,4 +116,8 @@ | |||
113 | #define CPLB_INOCACHE CPLB_USER_RD | CPLB_VALID | 116 | #define CPLB_INOCACHE CPLB_USER_RD | CPLB_VALID |
114 | #define CPLB_IDOCACHE CPLB_INOCACHE | CPLB_L1_CHBL | 117 | #define CPLB_IDOCACHE CPLB_INOCACHE | CPLB_L1_CHBL |
115 | 118 | ||
119 | #define FAULT_RW (1 << 16) | ||
120 | #define FAULT_USERSUPV (1 << 17) | ||
121 | #define FAULT_CPLBBITS 0x0000ffff | ||
122 | |||
116 | #endif /* _CPLB_H */ | 123 | #endif /* _CPLB_H */ |
diff --git a/arch/blackfin/include/asm/cplbinit.h b/arch/blackfin/include/asm/cplbinit.h index f845b41147ba..05b14a631d0c 100644 --- a/arch/blackfin/include/asm/cplbinit.h +++ b/arch/blackfin/include/asm/cplbinit.h | |||
@@ -32,61 +32,56 @@ | |||
32 | 32 | ||
33 | #include <asm/blackfin.h> | 33 | #include <asm/blackfin.h> |
34 | #include <asm/cplb.h> | 34 | #include <asm/cplb.h> |
35 | #include <linux/threads.h> | ||
35 | 36 | ||
36 | #ifdef CONFIG_MPU | 37 | #ifdef CONFIG_CPLB_SWITCH_TAB_L1 |
37 | 38 | # define PDT_ATTR __attribute__((l1_data)) | |
38 | #include <asm/cplb-mpu.h> | ||
39 | |||
40 | #else | 39 | #else |
40 | # define PDT_ATTR | ||
41 | #endif | ||
41 | 42 | ||
42 | #define INITIAL_T 0x1 | 43 | struct cplb_entry { |
43 | #define SWITCH_T 0x2 | 44 | unsigned long data, addr; |
44 | #define I_CPLB 0x4 | ||
45 | #define D_CPLB 0x8 | ||
46 | |||
47 | #define IN_KERNEL 1 | ||
48 | |||
49 | enum | ||
50 | {ZERO_P, L1I_MEM, L1D_MEM, SDRAM_KERN , SDRAM_RAM_MTD, SDRAM_DMAZ, RES_MEM, ASYNC_MEM, L2_MEM}; | ||
51 | |||
52 | struct cplb_desc { | ||
53 | u32 start; /* start address */ | ||
54 | u32 end; /* end address */ | ||
55 | u32 psize; /* prefered size if any otherwise 1MB or 4MB*/ | ||
56 | u16 attr;/* attributes */ | ||
57 | u16 i_conf;/* I-CPLB DATA */ | ||
58 | u16 d_conf;/* D-CPLB DATA */ | ||
59 | u16 valid;/* valid */ | ||
60 | const s8 name[30];/* name */ | ||
61 | }; | 45 | }; |
62 | 46 | ||
63 | struct cplb_tab { | 47 | struct cplb_boundary { |
64 | u_long *tab; | 48 | unsigned long eaddr; /* End of this region. */ |
65 | u16 pos; | 49 | unsigned long data; /* CPLB data value. */ |
66 | u16 size; | ||
67 | }; | 50 | }; |
68 | 51 | ||
69 | extern u_long icplb_table[]; | 52 | extern struct cplb_boundary dcplb_bounds[]; |
70 | extern u_long dcplb_table[]; | 53 | extern struct cplb_boundary icplb_bounds[]; |
54 | extern int dcplb_nr_bounds, icplb_nr_bounds; | ||
71 | 55 | ||
72 | /* Till here we are discussing about the static memory management model. | 56 | extern struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS]; |
73 | * However, the operating envoronments commonly define more CPLB | 57 | extern struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS]; |
74 | * descriptors to cover the entire addressable memory than will fit into | 58 | extern int first_switched_icplb; |
75 | * the available on-chip 16 CPLB MMRs. When this happens, the below table | 59 | extern int first_switched_dcplb; |
76 | * will be used which will hold all the potentially required CPLB descriptors | ||
77 | * | ||
78 | * This is how Page descriptor Table is implemented in uClinux/Blackfin. | ||
79 | */ | ||
80 | 60 | ||
81 | extern u_long ipdt_table[]; | 61 | extern int nr_dcplb_miss[], nr_icplb_miss[], nr_icplb_supv_miss[]; |
82 | extern u_long dpdt_table[]; | 62 | extern int nr_dcplb_prot[], nr_cplb_flush[]; |
83 | #ifdef CONFIG_CPLB_INFO | 63 | |
84 | extern u_long ipdt_swapcount_table[]; | 64 | #ifdef CONFIG_MPU |
85 | extern u_long dpdt_swapcount_table[]; | 65 | |
86 | #endif | 66 | extern int first_mask_dcplb; |
67 | |||
68 | extern int page_mask_order; | ||
69 | extern int page_mask_nelts; | ||
70 | |||
71 | extern unsigned long *current_rwx_mask[NR_CPUS]; | ||
72 | |||
73 | extern void flush_switched_cplbs(unsigned int); | ||
74 | extern void set_mask_dcplbs(unsigned long *, unsigned int); | ||
75 | |||
76 | extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *); | ||
87 | 77 | ||
88 | #endif /* CONFIG_MPU */ | 78 | #endif /* CONFIG_MPU */ |
89 | 79 | ||
90 | extern void generate_cplb_tables(void); | 80 | extern void bfin_icache_init(struct cplb_entry *icplb_tbl); |
81 | extern void bfin_dcache_init(struct cplb_entry *icplb_tbl); | ||
91 | 82 | ||
83 | #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) | ||
84 | extern void generate_cplb_tables_all(void); | ||
85 | extern void generate_cplb_tables_cpu(unsigned int cpu); | ||
86 | #endif | ||
92 | #endif | 87 | #endif |
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h new file mode 100644 index 000000000000..c2594ef877f6 --- /dev/null +++ b/arch/blackfin/include/asm/cpu.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * File: arch/blackfin/include/asm/cpu.h. | ||
3 | * Author: Philippe Gerum <rpm@xenomai.org> | ||
4 | * | ||
5 | * Copyright 2007 Analog Devices Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, see the file COPYING, or write | ||
19 | * to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #ifndef __ASM_BLACKFIN_CPU_H | ||
24 | #define __ASM_BLACKFIN_CPU_H | ||
25 | |||
26 | #include <linux/percpu.h> | ||
27 | |||
28 | struct task_struct; | ||
29 | |||
30 | struct blackfin_cpudata { | ||
31 | struct cpu cpu; | ||
32 | struct task_struct *idle; | ||
33 | unsigned int imemctl; | ||
34 | unsigned int dmemctl; | ||
35 | unsigned long loops_per_jiffy; | ||
36 | unsigned long dcache_invld_count; | ||
37 | }; | ||
38 | |||
39 | DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data); | ||
40 | |||
41 | #endif | ||
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h index 6509733bb0f6..e4f7b8043f02 100644 --- a/arch/blackfin/include/asm/dma.h +++ b/arch/blackfin/include/asm/dma.h | |||
@@ -1,44 +1,17 @@ | |||
1 | /* | 1 | /* |
2 | * File: include/asm-blackfin/simple_bf533_dma.h | 2 | * dma.h - Blackfin DMA defines/structures/etc... |
3 | * Based on: none - original work | ||
4 | * Author: LG Soft India | ||
5 | * Copyright (C) 2004-2005 Analog Devices Inc. | ||
6 | * Created: Tue Sep 21 2004 | ||
7 | * Description: This file contains the major Data structures and constants | ||
8 | * used for DMA Implementation in BF533 | ||
9 | * Modified: | ||
10 | * | 3 | * |
11 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | 4 | * Copyright 2004-2008 Analog Devices Inc. |
12 | * | 5 | * Licensed under the GPL-2 or later. |
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2, or (at your option) | ||
16 | * any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; see the file COPYING. | ||
25 | * If not, write to the Free Software Foundation, | ||
26 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
27 | */ | 6 | */ |
28 | 7 | ||
29 | #ifndef _BLACKFIN_DMA_H_ | 8 | #ifndef _BLACKFIN_DMA_H_ |
30 | #define _BLACKFIN_DMA_H_ | 9 | #define _BLACKFIN_DMA_H_ |
31 | 10 | ||
32 | #include <asm/io.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <asm/irq.h> | ||
35 | #include <asm/signal.h> | ||
36 | |||
37 | #include <linux/kernel.h> | ||
38 | #include <mach/dma.h> | ||
39 | #include <linux/mm.h> | ||
40 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <mach/dma.h> | ||
41 | #include <asm/blackfin.h> | 13 | #include <asm/blackfin.h> |
14 | #include <asm/page.h> | ||
42 | 15 | ||
43 | #define MAX_DMA_ADDRESS PAGE_OFFSET | 16 | #define MAX_DMA_ADDRESS PAGE_OFFSET |
44 | 17 | ||
@@ -79,7 +52,7 @@ enum dma_chan_status { | |||
79 | #define DMA_SYNC_RESTART 1 | 52 | #define DMA_SYNC_RESTART 1 |
80 | 53 | ||
81 | struct dmasg { | 54 | struct dmasg { |
82 | unsigned long next_desc_addr; | 55 | void *next_desc_addr; |
83 | unsigned long start_addr; | 56 | unsigned long start_addr; |
84 | unsigned short cfg; | 57 | unsigned short cfg; |
85 | unsigned short x_count; | 58 | unsigned short x_count; |
@@ -89,7 +62,7 @@ struct dmasg { | |||
89 | } __attribute__((packed)); | 62 | } __attribute__((packed)); |
90 | 63 | ||
91 | struct dma_register { | 64 | struct dma_register { |
92 | unsigned long next_desc_ptr; /* DMA Next Descriptor Pointer register */ | 65 | void *next_desc_ptr; /* DMA Next Descriptor Pointer register */ |
93 | unsigned long start_addr; /* DMA Start address register */ | 66 | unsigned long start_addr; /* DMA Start address register */ |
94 | 67 | ||
95 | unsigned short cfg; /* DMA Configuration register */ | 68 | unsigned short cfg; /* DMA Configuration register */ |
@@ -109,7 +82,7 @@ struct dma_register { | |||
109 | short y_modify; /* DMA y_modify register */ | 82 | short y_modify; /* DMA y_modify register */ |
110 | unsigned short dummy5; | 83 | unsigned short dummy5; |
111 | 84 | ||
112 | unsigned long curr_desc_ptr; /* DMA Current Descriptor Pointer | 85 | void *curr_desc_ptr; /* DMA Current Descriptor Pointer |
113 | register */ | 86 | register */ |
114 | unsigned long curr_addr_ptr; /* DMA Current Address Pointer | 87 | unsigned long curr_addr_ptr; /* DMA Current Address Pointer |
115 | register */ | 88 | register */ |
@@ -131,19 +104,15 @@ struct dma_register { | |||
131 | 104 | ||
132 | }; | 105 | }; |
133 | 106 | ||
134 | typedef irqreturn_t(*dma_interrupt_t) (int irq, void *dev_id); | 107 | struct mutex; |
135 | |||
136 | struct dma_channel { | 108 | struct dma_channel { |
137 | struct mutex dmalock; | 109 | struct mutex dmalock; |
138 | char *device_id; | 110 | const char *device_id; |
139 | enum dma_chan_status chan_status; | 111 | enum dma_chan_status chan_status; |
140 | struct dma_register *regs; | 112 | volatile struct dma_register *regs; |
141 | struct dmasg *sg; /* large mode descriptor */ | 113 | struct dmasg *sg; /* large mode descriptor */ |
142 | unsigned int ctrl_num; /* controller number */ | 114 | unsigned int irq; |
143 | dma_interrupt_t irq_callback; | ||
144 | void *data; | 115 | void *data; |
145 | unsigned int dma_enable_flag; | ||
146 | unsigned int loopback_flag; | ||
147 | #ifdef CONFIG_PM | 116 | #ifdef CONFIG_PM |
148 | unsigned short saved_peripheral_map; | 117 | unsigned short saved_peripheral_map; |
149 | #endif | 118 | #endif |
@@ -157,49 +126,132 @@ void blackfin_dma_resume(void); | |||
157 | /******************************************************************************* | 126 | /******************************************************************************* |
158 | * DMA API's | 127 | * DMA API's |
159 | *******************************************************************************/ | 128 | *******************************************************************************/ |
160 | /* functions to set register mode */ | 129 | extern struct dma_channel dma_ch[MAX_DMA_CHANNELS]; |
161 | void set_dma_start_addr(unsigned int channel, unsigned long addr); | 130 | extern struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS]; |
162 | void set_dma_next_desc_addr(unsigned int channel, unsigned long addr); | 131 | extern int channel2irq(unsigned int channel); |
163 | void set_dma_curr_desc_addr(unsigned int channel, unsigned long addr); | 132 | |
164 | void set_dma_x_count(unsigned int channel, unsigned short x_count); | 133 | static inline void set_dma_start_addr(unsigned int channel, unsigned long addr) |
165 | void set_dma_x_modify(unsigned int channel, short x_modify); | 134 | { |
166 | void set_dma_y_count(unsigned int channel, unsigned short y_count); | 135 | dma_ch[channel].regs->start_addr = addr; |
167 | void set_dma_y_modify(unsigned int channel, short y_modify); | 136 | } |
168 | void set_dma_config(unsigned int channel, unsigned short config); | 137 | static inline void set_dma_next_desc_addr(unsigned int channel, void *addr) |
169 | unsigned short set_bfin_dma_config(char direction, char flow_mode, | 138 | { |
170 | char intr_mode, char dma_mode, char width, | 139 | dma_ch[channel].regs->next_desc_ptr = addr; |
171 | char syncmode); | 140 | } |
172 | void set_dma_curr_addr(unsigned int channel, unsigned long addr); | 141 | static inline void set_dma_curr_desc_addr(unsigned int channel, void *addr) |
173 | 142 | { | |
174 | /* get curr status for polling */ | 143 | dma_ch[channel].regs->curr_desc_ptr = addr; |
175 | unsigned short get_dma_curr_irqstat(unsigned int channel); | 144 | } |
176 | unsigned short get_dma_curr_xcount(unsigned int channel); | 145 | static inline void set_dma_x_count(unsigned int channel, unsigned short x_count) |
177 | unsigned short get_dma_curr_ycount(unsigned int channel); | 146 | { |
178 | unsigned long get_dma_next_desc_ptr(unsigned int channel); | 147 | dma_ch[channel].regs->x_count = x_count; |
179 | unsigned long get_dma_curr_desc_ptr(unsigned int channel); | 148 | } |
180 | unsigned long get_dma_curr_addr(unsigned int channel); | 149 | static inline void set_dma_y_count(unsigned int channel, unsigned short y_count) |
181 | 150 | { | |
182 | /* set large DMA mode descriptor */ | 151 | dma_ch[channel].regs->y_count = y_count; |
183 | void set_dma_sg(unsigned int channel, struct dmasg *sg, int nr_sg); | 152 | } |
184 | 153 | static inline void set_dma_x_modify(unsigned int channel, short x_modify) | |
185 | /* check if current channel is in use */ | 154 | { |
186 | int dma_channel_active(unsigned int channel); | 155 | dma_ch[channel].regs->x_modify = x_modify; |
187 | 156 | } | |
188 | /* common functions must be called in any mode */ | 157 | static inline void set_dma_y_modify(unsigned int channel, short y_modify) |
158 | { | ||
159 | dma_ch[channel].regs->y_modify = y_modify; | ||
160 | } | ||
161 | static inline void set_dma_config(unsigned int channel, unsigned short config) | ||
162 | { | ||
163 | dma_ch[channel].regs->cfg = config; | ||
164 | } | ||
165 | static inline void set_dma_curr_addr(unsigned int channel, unsigned long addr) | ||
166 | { | ||
167 | dma_ch[channel].regs->curr_addr_ptr = addr; | ||
168 | } | ||
169 | |||
170 | static inline unsigned short | ||
171 | set_bfin_dma_config(char direction, char flow_mode, | ||
172 | char intr_mode, char dma_mode, char width, char syncmode) | ||
173 | { | ||
174 | return (direction << 1) | (width << 2) | (dma_mode << 4) | | ||
175 | (intr_mode << 6) | (flow_mode << 12) | (syncmode << 5); | ||
176 | } | ||
177 | |||
178 | static inline unsigned short get_dma_curr_irqstat(unsigned int channel) | ||
179 | { | ||
180 | return dma_ch[channel].regs->irq_status; | ||
181 | } | ||
182 | static inline unsigned short get_dma_curr_xcount(unsigned int channel) | ||
183 | { | ||
184 | return dma_ch[channel].regs->curr_x_count; | ||
185 | } | ||
186 | static inline unsigned short get_dma_curr_ycount(unsigned int channel) | ||
187 | { | ||
188 | return dma_ch[channel].regs->curr_y_count; | ||
189 | } | ||
190 | static inline void *get_dma_next_desc_ptr(unsigned int channel) | ||
191 | { | ||
192 | return dma_ch[channel].regs->next_desc_ptr; | ||
193 | } | ||
194 | static inline void *get_dma_curr_desc_ptr(unsigned int channel) | ||
195 | { | ||
196 | return dma_ch[channel].regs->curr_desc_ptr; | ||
197 | } | ||
198 | static inline unsigned short get_dma_config(unsigned int channel) | ||
199 | { | ||
200 | return dma_ch[channel].regs->cfg; | ||
201 | } | ||
202 | static inline unsigned long get_dma_curr_addr(unsigned int channel) | ||
203 | { | ||
204 | return dma_ch[channel].regs->curr_addr_ptr; | ||
205 | } | ||
206 | |||
207 | static inline void set_dma_sg(unsigned int channel, struct dmasg *sg, int ndsize) | ||
208 | { | ||
209 | dma_ch[channel].regs->cfg = | ||
210 | (dma_ch[channel].regs->cfg & ~(0xf << 8)) | | ||
211 | ((ndsize & 0xf) << 8); | ||
212 | dma_ch[channel].regs->next_desc_ptr = sg; | ||
213 | } | ||
214 | |||
215 | static inline int dma_channel_active(unsigned int channel) | ||
216 | { | ||
217 | if (dma_ch[channel].chan_status == DMA_CHANNEL_FREE) | ||
218 | return 0; | ||
219 | else | ||
220 | return 1; | ||
221 | } | ||
222 | |||
223 | static inline void disable_dma(unsigned int channel) | ||
224 | { | ||
225 | dma_ch[channel].regs->cfg &= ~DMAEN; | ||
226 | SSYNC(); | ||
227 | dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED; | ||
228 | } | ||
229 | static inline void enable_dma(unsigned int channel) | ||
230 | { | ||
231 | dma_ch[channel].regs->curr_x_count = 0; | ||
232 | dma_ch[channel].regs->curr_y_count = 0; | ||
233 | dma_ch[channel].regs->cfg |= DMAEN; | ||
234 | dma_ch[channel].chan_status = DMA_CHANNEL_ENABLED; | ||
235 | } | ||
189 | void free_dma(unsigned int channel); | 236 | void free_dma(unsigned int channel); |
190 | int dma_channel_active(unsigned int channel); /* check if a channel is in use */ | 237 | int request_dma(unsigned int channel, const char *device_id); |
191 | void disable_dma(unsigned int channel); | 238 | int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data); |
192 | void enable_dma(unsigned int channel); | 239 | |
193 | int request_dma(unsigned int channel, char *device_id); | 240 | static inline void dma_disable_irq(unsigned int channel) |
194 | int set_dma_callback(unsigned int channel, dma_interrupt_t callback, | 241 | { |
195 | void *data); | 242 | disable_irq(dma_ch[channel].irq); |
196 | void dma_disable_irq(unsigned int channel); | 243 | } |
197 | void dma_enable_irq(unsigned int channel); | 244 | static inline void dma_enable_irq(unsigned int channel) |
198 | void clear_dma_irqstat(unsigned int channel); | 245 | { |
246 | enable_irq(dma_ch[channel].irq); | ||
247 | } | ||
248 | static inline void clear_dma_irqstat(unsigned int channel) | ||
249 | { | ||
250 | dma_ch[channel].regs->irq_status = DMA_DONE | DMA_ERR; | ||
251 | } | ||
252 | |||
199 | void *dma_memcpy(void *dest, const void *src, size_t count); | 253 | void *dma_memcpy(void *dest, const void *src, size_t count); |
200 | void *safe_dma_memcpy(void *dest, const void *src, size_t count); | 254 | void *safe_dma_memcpy(void *dest, const void *src, size_t count); |
201 | 255 | void blackfin_dma_early_init(void); | |
202 | extern int channel2irq(unsigned int channel); | ||
203 | extern struct dma_register *dma_io_base_addr[MAX_BLACKFIN_DMA_CHANNEL]; | ||
204 | 256 | ||
205 | #endif | 257 | #endif |
diff --git a/arch/blackfin/include/asm/entry.h b/arch/blackfin/include/asm/entry.h index c4f721e0d00d..b30a2968e274 100644 --- a/arch/blackfin/include/asm/entry.h +++ b/arch/blackfin/include/asm/entry.h | |||
@@ -27,6 +27,14 @@ | |||
27 | #define SAVE_ALL_SYS save_context_no_interrupts | 27 | #define SAVE_ALL_SYS save_context_no_interrupts |
28 | /* This is used for all normal interrupts. It saves a minimum of registers | 28 | /* This is used for all normal interrupts. It saves a minimum of registers |
29 | to the stack, loads the IRQ number, and jumps to common code. */ | 29 | to the stack, loads the IRQ number, and jumps to common code. */ |
30 | #ifdef CONFIG_IPIPE | ||
31 | # define LOAD_IPIPE_IPEND \ | ||
32 | P0.l = lo(IPEND); \ | ||
33 | P0.h = hi(IPEND); \ | ||
34 | R1 = [P0]; | ||
35 | #else | ||
36 | # define LOAD_IPIPE_IPEND | ||
37 | #endif | ||
30 | #define INTERRUPT_ENTRY(N) \ | 38 | #define INTERRUPT_ENTRY(N) \ |
31 | [--sp] = SYSCFG; \ | 39 | [--sp] = SYSCFG; \ |
32 | \ | 40 | \ |
@@ -34,6 +42,7 @@ | |||
34 | [--sp] = R0; /*orig_r0*/ \ | 42 | [--sp] = R0; /*orig_r0*/ \ |
35 | [--sp] = (R7:0,P5:0); \ | 43 | [--sp] = (R7:0,P5:0); \ |
36 | R0 = (N); \ | 44 | R0 = (N); \ |
45 | LOAD_IPIPE_IPEND \ | ||
37 | jump __common_int_entry; | 46 | jump __common_int_entry; |
38 | 47 | ||
39 | /* For timer interrupts, we need to save IPEND, since the user_mode | 48 | /* For timer interrupts, we need to save IPEND, since the user_mode |
@@ -53,9 +62,11 @@ | |||
53 | /* This one pushes RETI without using CLI. Interrupts are enabled. */ | 62 | /* This one pushes RETI without using CLI. Interrupts are enabled. */ |
54 | #define SAVE_CONTEXT_SYSCALL save_context_syscall | 63 | #define SAVE_CONTEXT_SYSCALL save_context_syscall |
55 | #define SAVE_CONTEXT save_context_with_interrupts | 64 | #define SAVE_CONTEXT save_context_with_interrupts |
65 | #define SAVE_CONTEXT_CPLB save_context_cplb | ||
56 | 66 | ||
57 | #define RESTORE_ALL_SYS restore_context_no_interrupts | 67 | #define RESTORE_ALL_SYS restore_context_no_interrupts |
58 | #define RESTORE_CONTEXT restore_context_with_interrupts | 68 | #define RESTORE_CONTEXT restore_context_with_interrupts |
69 | #define RESTORE_CONTEXT_CPLB restore_context_cplb | ||
59 | 70 | ||
60 | #endif /* __ASSEMBLY__ */ | 71 | #endif /* __ASSEMBLY__ */ |
61 | #endif /* __BFIN_ENTRY_H */ | 72 | #endif /* __BFIN_ENTRY_H */ |
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h index ad33ac271fd9..9477d82fcad2 100644 --- a/arch/blackfin/include/asm/gpio.h +++ b/arch/blackfin/include/asm/gpio.h | |||
@@ -84,11 +84,14 @@ | |||
84 | #ifndef __ARCH_BLACKFIN_GPIO_H__ | 84 | #ifndef __ARCH_BLACKFIN_GPIO_H__ |
85 | #define __ARCH_BLACKFIN_GPIO_H__ | 85 | #define __ARCH_BLACKFIN_GPIO_H__ |
86 | 86 | ||
87 | #define gpio_bank(x) ((x) >> 4) | 87 | #define gpio_bank(x) ((x) >> 4) |
88 | #define gpio_bit(x) (1<<((x) & 0xF)) | 88 | #define gpio_bit(x) (1<<((x) & 0xF)) |
89 | #define gpio_sub_n(x) ((x) & 0xF) | 89 | #define gpio_sub_n(x) ((x) & 0xF) |
90 | 90 | ||
91 | #define GPIO_BANKSIZE 16 | 91 | #define GPIO_BANKSIZE 16 |
92 | #define GPIO_BANK_NUM DIV_ROUND_UP(MAX_BLACKFIN_GPIOS, GPIO_BANKSIZE) | ||
93 | |||
94 | #include <mach/gpio.h> | ||
92 | 95 | ||
93 | #define GPIO_0 0 | 96 | #define GPIO_0 0 |
94 | #define GPIO_1 1 | 97 | #define GPIO_1 1 |
@@ -139,151 +142,9 @@ | |||
139 | #define GPIO_46 46 | 142 | #define GPIO_46 46 |
140 | #define GPIO_47 47 | 143 | #define GPIO_47 47 |
141 | 144 | ||
142 | |||
143 | #define PERIPHERAL_USAGE 1 | 145 | #define PERIPHERAL_USAGE 1 |
144 | #define GPIO_USAGE 0 | 146 | #define GPIO_USAGE 0 |
145 | 147 | ||
146 | #ifdef BF533_FAMILY | ||
147 | #define MAX_BLACKFIN_GPIOS 16 | ||
148 | |||
149 | #define GPIO_PF0 0 | ||
150 | #define GPIO_PF1 1 | ||
151 | #define GPIO_PF2 2 | ||
152 | #define GPIO_PF3 3 | ||
153 | #define GPIO_PF4 4 | ||
154 | #define GPIO_PF5 5 | ||
155 | #define GPIO_PF6 6 | ||
156 | #define GPIO_PF7 7 | ||
157 | #define GPIO_PF8 8 | ||
158 | #define GPIO_PF9 9 | ||
159 | #define GPIO_PF10 10 | ||
160 | #define GPIO_PF11 11 | ||
161 | #define GPIO_PF12 12 | ||
162 | #define GPIO_PF13 13 | ||
163 | #define GPIO_PF14 14 | ||
164 | #define GPIO_PF15 15 | ||
165 | |||
166 | #endif | ||
167 | |||
168 | #if defined(BF527_FAMILY) || defined(BF537_FAMILY) | ||
169 | #define MAX_BLACKFIN_GPIOS 48 | ||
170 | |||
171 | #define GPIO_PF0 0 | ||
172 | #define GPIO_PF1 1 | ||
173 | #define GPIO_PF2 2 | ||
174 | #define GPIO_PF3 3 | ||
175 | #define GPIO_PF4 4 | ||
176 | #define GPIO_PF5 5 | ||
177 | #define GPIO_PF6 6 | ||
178 | #define GPIO_PF7 7 | ||
179 | #define GPIO_PF8 8 | ||
180 | #define GPIO_PF9 9 | ||
181 | #define GPIO_PF10 10 | ||
182 | #define GPIO_PF11 11 | ||
183 | #define GPIO_PF12 12 | ||
184 | #define GPIO_PF13 13 | ||
185 | #define GPIO_PF14 14 | ||
186 | #define GPIO_PF15 15 | ||
187 | #define GPIO_PG0 16 | ||
188 | #define GPIO_PG1 17 | ||
189 | #define GPIO_PG2 18 | ||
190 | #define GPIO_PG3 19 | ||
191 | #define GPIO_PG4 20 | ||
192 | #define GPIO_PG5 21 | ||
193 | #define GPIO_PG6 22 | ||
194 | #define GPIO_PG7 23 | ||
195 | #define GPIO_PG8 24 | ||
196 | #define GPIO_PG9 25 | ||
197 | #define GPIO_PG10 26 | ||
198 | #define GPIO_PG11 27 | ||
199 | #define GPIO_PG12 28 | ||
200 | #define GPIO_PG13 29 | ||
201 | #define GPIO_PG14 30 | ||
202 | #define GPIO_PG15 31 | ||
203 | #define GPIO_PH0 32 | ||
204 | #define GPIO_PH1 33 | ||
205 | #define GPIO_PH2 34 | ||
206 | #define GPIO_PH3 35 | ||
207 | #define GPIO_PH4 36 | ||
208 | #define GPIO_PH5 37 | ||
209 | #define GPIO_PH6 38 | ||
210 | #define GPIO_PH7 39 | ||
211 | #define GPIO_PH8 40 | ||
212 | #define GPIO_PH9 41 | ||
213 | #define GPIO_PH10 42 | ||
214 | #define GPIO_PH11 43 | ||
215 | #define GPIO_PH12 44 | ||
216 | #define GPIO_PH13 45 | ||
217 | #define GPIO_PH14 46 | ||
218 | #define GPIO_PH15 47 | ||
219 | |||
220 | #define PORT_F GPIO_PF0 | ||
221 | #define PORT_G GPIO_PG0 | ||
222 | #define PORT_H GPIO_PH0 | ||
223 | |||
224 | #endif | ||
225 | |||
226 | #ifdef BF548_FAMILY | ||
227 | #include <mach/gpio.h> | ||
228 | #endif | ||
229 | |||
230 | #ifdef BF561_FAMILY | ||
231 | #define MAX_BLACKFIN_GPIOS 48 | ||
232 | |||
233 | #define GPIO_PF0 0 | ||
234 | #define GPIO_PF1 1 | ||
235 | #define GPIO_PF2 2 | ||
236 | #define GPIO_PF3 3 | ||
237 | #define GPIO_PF4 4 | ||
238 | #define GPIO_PF5 5 | ||
239 | #define GPIO_PF6 6 | ||
240 | #define GPIO_PF7 7 | ||
241 | #define GPIO_PF8 8 | ||
242 | #define GPIO_PF9 9 | ||
243 | #define GPIO_PF10 10 | ||
244 | #define GPIO_PF11 11 | ||
245 | #define GPIO_PF12 12 | ||
246 | #define GPIO_PF13 13 | ||
247 | #define GPIO_PF14 14 | ||
248 | #define GPIO_PF15 15 | ||
249 | #define GPIO_PF16 16 | ||
250 | #define GPIO_PF17 17 | ||
251 | #define GPIO_PF18 18 | ||
252 | #define GPIO_PF19 19 | ||
253 | #define GPIO_PF20 20 | ||
254 | #define GPIO_PF21 21 | ||
255 | #define GPIO_PF22 22 | ||
256 | #define GPIO_PF23 23 | ||
257 | #define GPIO_PF24 24 | ||
258 | #define GPIO_PF25 25 | ||
259 | #define GPIO_PF26 26 | ||
260 | #define GPIO_PF27 27 | ||
261 | #define GPIO_PF28 28 | ||
262 | #define GPIO_PF29 29 | ||
263 | #define GPIO_PF30 30 | ||
264 | #define GPIO_PF31 31 | ||
265 | #define GPIO_PF32 32 | ||
266 | #define GPIO_PF33 33 | ||
267 | #define GPIO_PF34 34 | ||
268 | #define GPIO_PF35 35 | ||
269 | #define GPIO_PF36 36 | ||
270 | #define GPIO_PF37 37 | ||
271 | #define GPIO_PF38 38 | ||
272 | #define GPIO_PF39 39 | ||
273 | #define GPIO_PF40 40 | ||
274 | #define GPIO_PF41 41 | ||
275 | #define GPIO_PF42 42 | ||
276 | #define GPIO_PF43 43 | ||
277 | #define GPIO_PF44 44 | ||
278 | #define GPIO_PF45 45 | ||
279 | #define GPIO_PF46 46 | ||
280 | #define GPIO_PF47 47 | ||
281 | |||
282 | #define PORT_FIO0 GPIO_0 | ||
283 | #define PORT_FIO1 GPIO_16 | ||
284 | #define PORT_FIO2 GPIO_32 | ||
285 | #endif | ||
286 | |||
287 | #ifndef __ASSEMBLY__ | 148 | #ifndef __ASSEMBLY__ |
288 | 149 | ||
289 | /*********************************************************** | 150 | /*********************************************************** |
@@ -425,20 +286,77 @@ struct gpio_port_s { | |||
425 | * MODIFICATION HISTORY : | 286 | * MODIFICATION HISTORY : |
426 | **************************************************************/ | 287 | **************************************************************/ |
427 | 288 | ||
428 | int gpio_request(unsigned, const char *); | 289 | int bfin_gpio_request(unsigned gpio, const char *label); |
429 | void gpio_free(unsigned); | 290 | void bfin_gpio_free(unsigned gpio); |
430 | 291 | int bfin_gpio_irq_request(unsigned gpio, const char *label); | |
431 | void gpio_set_value(unsigned gpio, int arg); | 292 | void bfin_gpio_irq_free(unsigned gpio); |
432 | int gpio_get_value(unsigned gpio); | 293 | int bfin_gpio_direction_input(unsigned gpio); |
294 | int bfin_gpio_direction_output(unsigned gpio, int value); | ||
295 | int bfin_gpio_get_value(unsigned gpio); | ||
296 | void bfin_gpio_set_value(unsigned gpio, int value); | ||
433 | 297 | ||
434 | #ifndef BF548_FAMILY | 298 | #ifndef BF548_FAMILY |
435 | #define gpio_set_value(gpio, value) set_gpio_data(gpio, value) | 299 | #define bfin_gpio_set_value(gpio, value) set_gpio_data(gpio, value) |
436 | #endif | 300 | #endif |
437 | 301 | ||
438 | int gpio_direction_input(unsigned gpio); | 302 | #ifdef CONFIG_GPIOLIB |
439 | int gpio_direction_output(unsigned gpio, int value); | 303 | #include <asm-generic/gpio.h> /* cansleep wrappers */ |
304 | |||
305 | static inline int gpio_get_value(unsigned int gpio) | ||
306 | { | ||
307 | if (gpio < MAX_BLACKFIN_GPIOS) | ||
308 | return bfin_gpio_get_value(gpio); | ||
309 | else | ||
310 | return __gpio_get_value(gpio); | ||
311 | } | ||
312 | |||
313 | static inline void gpio_set_value(unsigned int gpio, int value) | ||
314 | { | ||
315 | if (gpio < MAX_BLACKFIN_GPIOS) | ||
316 | bfin_gpio_set_value(gpio, value); | ||
317 | else | ||
318 | __gpio_set_value(gpio, value); | ||
319 | } | ||
320 | |||
321 | static inline int gpio_cansleep(unsigned int gpio) | ||
322 | { | ||
323 | return __gpio_cansleep(gpio); | ||
324 | } | ||
325 | |||
326 | #else /* !CONFIG_GPIOLIB */ | ||
327 | |||
328 | static inline int gpio_request(unsigned gpio, const char *label) | ||
329 | { | ||
330 | return bfin_gpio_request(gpio, label); | ||
331 | } | ||
332 | |||
333 | static inline void gpio_free(unsigned gpio) | ||
334 | { | ||
335 | return bfin_gpio_free(gpio); | ||
336 | } | ||
337 | |||
338 | static inline int gpio_direction_input(unsigned gpio) | ||
339 | { | ||
340 | return bfin_gpio_direction_input(gpio); | ||
341 | } | ||
342 | |||
343 | static inline int gpio_direction_output(unsigned gpio, int value) | ||
344 | { | ||
345 | return bfin_gpio_direction_output(gpio, value); | ||
346 | } | ||
347 | |||
348 | static inline int gpio_get_value(unsigned gpio) | ||
349 | { | ||
350 | return bfin_gpio_get_value(gpio); | ||
351 | } | ||
352 | |||
353 | static inline void gpio_set_value(unsigned gpio, int value) | ||
354 | { | ||
355 | return bfin_gpio_set_value(gpio, value); | ||
356 | } | ||
440 | 357 | ||
441 | #include <asm-generic/gpio.h> /* cansleep wrappers */ | 358 | #include <asm-generic/gpio.h> /* cansleep wrappers */ |
359 | #endif /* !CONFIG_GPIOLIB */ | ||
442 | #include <asm/irq.h> | 360 | #include <asm/irq.h> |
443 | 361 | ||
444 | static inline int gpio_to_irq(unsigned gpio) | 362 | static inline int gpio_to_irq(unsigned gpio) |
diff --git a/arch/blackfin/include/asm/hardirq.h b/arch/blackfin/include/asm/hardirq.h index b6b19f1b9dab..717181a1749b 100644 --- a/arch/blackfin/include/asm/hardirq.h +++ b/arch/blackfin/include/asm/hardirq.h | |||
@@ -42,4 +42,6 @@ typedef struct { | |||
42 | 42 | ||
43 | #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 | 43 | #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 |
44 | 44 | ||
45 | extern void ack_bad_irq(unsigned int irq); | ||
46 | |||
45 | #endif | 47 | #endif |
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h index 7dc77a21fdf3..63b2d8c78570 100644 --- a/arch/blackfin/include/asm/io.h +++ b/arch/blackfin/include/asm/io.h | |||
@@ -94,12 +94,12 @@ static inline unsigned int readl(const volatile void __iomem *addr) | |||
94 | #define outw_p(x,addr) outw(x,addr) | 94 | #define outw_p(x,addr) outw(x,addr) |
95 | #define outl_p(x,addr) outl(x,addr) | 95 | #define outl_p(x,addr) outl(x,addr) |
96 | 96 | ||
97 | #define ioread8_rep(a,d,c) insb(a,d,c) | 97 | #define ioread8_rep(a,d,c) readsb(a,d,c) |
98 | #define ioread16_rep(a,d,c) insw(a,d,c) | 98 | #define ioread16_rep(a,d,c) readsw(a,d,c) |
99 | #define ioread32_rep(a,d,c) insl(a,d,c) | 99 | #define ioread32_rep(a,d,c) readsl(a,d,c) |
100 | #define iowrite8_rep(a,s,c) outsb(a,s,c) | 100 | #define iowrite8_rep(a,s,c) writesb(a,s,c) |
101 | #define iowrite16_rep(a,s,c) outsw(a,s,c) | 101 | #define iowrite16_rep(a,s,c) writesw(a,s,c) |
102 | #define iowrite32_rep(a,s,c) outsl(a,s,c) | 102 | #define iowrite32_rep(a,s,c) writesl(a,s,c) |
103 | 103 | ||
104 | #define ioread8(X) readb(X) | 104 | #define ioread8(X) readb(X) |
105 | #define ioread16(X) readw(X) | 105 | #define ioread16(X) readw(X) |
@@ -108,6 +108,8 @@ static inline unsigned int readl(const volatile void __iomem *addr) | |||
108 | #define iowrite16(val,X) writew(val,X) | 108 | #define iowrite16(val,X) writew(val,X) |
109 | #define iowrite32(val,X) writel(val,X) | 109 | #define iowrite32(val,X) writel(val,X) |
110 | 110 | ||
111 | #define mmiowb() wmb() | ||
112 | |||
111 | #define IO_SPACE_LIMIT 0xffffffff | 113 | #define IO_SPACE_LIMIT 0xffffffff |
112 | 114 | ||
113 | /* Values for nocacheflag and cmode */ | 115 | /* Values for nocacheflag and cmode */ |
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h new file mode 100644 index 000000000000..76f53d8b9a0d --- /dev/null +++ b/arch/blackfin/include/asm/ipipe.h | |||
@@ -0,0 +1,278 @@ | |||
1 | /* -*- linux-c -*- | ||
2 | * include/asm-blackfin/ipipe.h | ||
3 | * | ||
4 | * Copyright (C) 2002-2007 Philippe Gerum. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | ||
9 | * USA; either version 2 of the License, or (at your option) any later | ||
10 | * version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #ifndef __ASM_BLACKFIN_IPIPE_H | ||
23 | #define __ASM_BLACKFIN_IPIPE_H | ||
24 | |||
25 | #ifdef CONFIG_IPIPE | ||
26 | |||
27 | #include <linux/cpumask.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/threads.h> | ||
30 | #include <linux/irq.h> | ||
31 | #include <linux/ipipe_percpu.h> | ||
32 | #include <asm/ptrace.h> | ||
33 | #include <asm/irq.h> | ||
34 | #include <asm/bitops.h> | ||
35 | #include <asm/atomic.h> | ||
36 | #include <asm/traps.h> | ||
37 | |||
38 | #define IPIPE_ARCH_STRING "1.8-00" | ||
39 | #define IPIPE_MAJOR_NUMBER 1 | ||
40 | #define IPIPE_MINOR_NUMBER 8 | ||
41 | #define IPIPE_PATCH_NUMBER 0 | ||
42 | |||
43 | #ifdef CONFIG_SMP | ||
44 | #error "I-pipe/blackfin: SMP not implemented" | ||
45 | #else /* !CONFIG_SMP */ | ||
46 | #define ipipe_processor_id() 0 | ||
47 | #endif /* CONFIG_SMP */ | ||
48 | |||
49 | #define prepare_arch_switch(next) \ | ||
50 | do { \ | ||
51 | ipipe_schedule_notify(current, next); \ | ||
52 | local_irq_disable_hw(); \ | ||
53 | } while (0) | ||
54 | |||
55 | #define task_hijacked(p) \ | ||
56 | ({ \ | ||
57 | int __x__ = ipipe_current_domain != ipipe_root_domain; \ | ||
58 | /* We would need to clear the SYNC flag for the root domain */ \ | ||
59 | /* over the current processor in SMP mode. */ \ | ||
60 | local_irq_enable_hw(); __x__; \ | ||
61 | }) | ||
62 | |||
63 | struct ipipe_domain; | ||
64 | |||
65 | struct ipipe_sysinfo { | ||
66 | |||
67 | int ncpus; /* Number of CPUs on board */ | ||
68 | u64 cpufreq; /* CPU frequency (in Hz) */ | ||
69 | |||
70 | /* Arch-dependent block */ | ||
71 | |||
72 | struct { | ||
73 | unsigned tmirq; /* Timer tick IRQ */ | ||
74 | u64 tmfreq; /* Timer frequency */ | ||
75 | } archdep; | ||
76 | }; | ||
77 | |||
78 | #define ipipe_read_tsc(t) \ | ||
79 | ({ \ | ||
80 | unsigned long __cy2; \ | ||
81 | __asm__ __volatile__ ("1: %0 = CYCLES2\n" \ | ||
82 | "%1 = CYCLES\n" \ | ||
83 | "%2 = CYCLES2\n" \ | ||
84 | "CC = %2 == %0\n" \ | ||
85 | "if ! CC jump 1b\n" \ | ||
86 | : "=r" (((unsigned long *)&t)[1]), \ | ||
87 | "=r" (((unsigned long *)&t)[0]), \ | ||
88 | "=r" (__cy2) \ | ||
89 | : /*no input*/ : "CC"); \ | ||
90 | t; \ | ||
91 | }) | ||
92 | |||
93 | #define ipipe_cpu_freq() __ipipe_core_clock | ||
94 | #define ipipe_tsc2ns(_t) (((unsigned long)(_t)) * __ipipe_freq_scale) | ||
95 | #define ipipe_tsc2us(_t) (ipipe_tsc2ns(_t) / 1000 + 1) | ||
96 | |||
97 | /* Private interface -- Internal use only */ | ||
98 | |||
99 | #define __ipipe_check_platform() do { } while (0) | ||
100 | |||
101 | #define __ipipe_init_platform() do { } while (0) | ||
102 | |||
103 | extern atomic_t __ipipe_irq_lvdepth[IVG15 + 1]; | ||
104 | |||
105 | extern unsigned long __ipipe_irq_lvmask; | ||
106 | |||
107 | extern struct ipipe_domain ipipe_root; | ||
108 | |||
109 | /* enable/disable_irqdesc _must_ be used in pairs. */ | ||
110 | |||
111 | void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, | ||
112 | unsigned irq); | ||
113 | |||
114 | void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, | ||
115 | unsigned irq); | ||
116 | |||
117 | #define __ipipe_enable_irq(irq) (irq_desc[irq].chip->unmask(irq)) | ||
118 | |||
119 | #define __ipipe_disable_irq(irq) (irq_desc[irq].chip->mask(irq)) | ||
120 | |||
121 | #define __ipipe_lock_root() \ | ||
122 | set_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags) | ||
123 | |||
124 | #define __ipipe_unlock_root() \ | ||
125 | clear_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags) | ||
126 | |||
127 | void __ipipe_enable_pipeline(void); | ||
128 | |||
129 | #define __ipipe_hook_critical_ipi(ipd) do { } while (0) | ||
130 | |||
131 | #define __ipipe_sync_pipeline(syncmask) \ | ||
132 | do { \ | ||
133 | struct ipipe_domain *ipd = ipipe_current_domain; \ | ||
134 | if (likely(ipd != ipipe_root_domain || !test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags))) \ | ||
135 | __ipipe_sync_stage(syncmask); \ | ||
136 | } while (0) | ||
137 | |||
138 | void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs); | ||
139 | |||
140 | int __ipipe_get_irq_priority(unsigned irq); | ||
141 | |||
142 | int __ipipe_get_irqthread_priority(unsigned irq); | ||
143 | |||
144 | void __ipipe_stall_root_raw(void); | ||
145 | |||
146 | void __ipipe_unstall_root_raw(void); | ||
147 | |||
148 | void __ipipe_serial_debug(const char *fmt, ...); | ||
149 | |||
150 | DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs); | ||
151 | |||
152 | extern unsigned long __ipipe_core_clock; | ||
153 | |||
154 | extern unsigned long __ipipe_freq_scale; | ||
155 | |||
156 | extern unsigned long __ipipe_irq_tail_hook; | ||
157 | |||
158 | static inline unsigned long __ipipe_ffnz(unsigned long ul) | ||
159 | { | ||
160 | return ffs(ul) - 1; | ||
161 | } | ||
162 | |||
163 | #define __ipipe_run_irqtail() /* Must be a macro */ \ | ||
164 | do { \ | ||
165 | asmlinkage void __ipipe_call_irqtail(void); \ | ||
166 | unsigned long __pending; \ | ||
167 | CSYNC(); \ | ||
168 | __pending = bfin_read_IPEND(); \ | ||
169 | if (__pending & 0x8000) { \ | ||
170 | __pending &= ~0x8010; \ | ||
171 | if (__pending && (__pending & (__pending - 1)) == 0) \ | ||
172 | __ipipe_call_irqtail(); \ | ||
173 | } \ | ||
174 | } while (0) | ||
175 | |||
176 | #define __ipipe_run_isr(ipd, irq) \ | ||
177 | do { \ | ||
178 | if (ipd == ipipe_root_domain) { \ | ||
179 | /* \ | ||
180 | * Note: the I-pipe implements a threaded interrupt model on \ | ||
181 | * this arch for Linux external IRQs. The interrupt handler we \ | ||
182 | * call here only wakes up the associated IRQ thread. \ | ||
183 | */ \ | ||
184 | if (ipipe_virtual_irq_p(irq)) { \ | ||
185 | /* No irqtail here; virtual interrupts have no effect \ | ||
186 | on IPEND so there is no need for processing \ | ||
187 | deferral. */ \ | ||
188 | local_irq_enable_nohead(ipd); \ | ||
189 | ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ | ||
190 | local_irq_disable_nohead(ipd); \ | ||
191 | } else \ | ||
192 | /* \ | ||
193 | * No need to run the irqtail here either; \ | ||
194 | * we can't be preempted by hw IRQs, so \ | ||
195 | * non-Linux IRQs cannot stack over the short \ | ||
196 | * thread wakeup code. Which in turn means \ | ||
197 | * that no irqtail condition could be pending \ | ||
198 | * for domains above Linux in the pipeline. \ | ||
199 | */ \ | ||
200 | ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \ | ||
201 | } else { \ | ||
202 | __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ | ||
203 | local_irq_enable_nohead(ipd); \ | ||
204 | ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ | ||
205 | /* Attempt to exit the outer interrupt level before \ | ||
206 | * starting the deferred IRQ processing. */ \ | ||
207 | local_irq_disable_nohead(ipd); \ | ||
208 | __ipipe_run_irqtail(); \ | ||
209 | __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ | ||
210 | } \ | ||
211 | } while (0) | ||
212 | |||
213 | #define __ipipe_syscall_watched_p(p, sc) \ | ||
214 | (((p)->flags & PF_EVNOTIFY) || (unsigned long)sc >= NR_syscalls) | ||
215 | |||
216 | void ipipe_init_irq_threads(void); | ||
217 | |||
218 | int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc); | ||
219 | |||
220 | #define IS_SYSIRQ(irq) ((irq) > IRQ_CORETMR && (irq) <= SYS_IRQS) | ||
221 | #define IS_GPIOIRQ(irq) ((irq) >= GPIO_IRQ_BASE && (irq) < NR_IRQS) | ||
222 | |||
223 | #define IRQ_SYSTMR IRQ_TIMER0 | ||
224 | #define IRQ_PRIOTMR CONFIG_IRQ_TIMER0 | ||
225 | |||
226 | #if defined(CONFIG_BF531) || defined(CONFIG_BF532) || defined(CONFIG_BF533) | ||
227 | #define PRIO_GPIODEMUX(irq) CONFIG_PFA | ||
228 | #elif defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537) | ||
229 | #define PRIO_GPIODEMUX(irq) CONFIG_IRQ_PROG_INTA | ||
230 | #elif defined(CONFIG_BF52x) | ||
231 | #define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PORTF_INTA ? CONFIG_IRQ_PORTF_INTA : \ | ||
232 | (irq) == IRQ_PORTG_INTA ? CONFIG_IRQ_PORTG_INTA : \ | ||
233 | (irq) == IRQ_PORTH_INTA ? CONFIG_IRQ_PORTH_INTA : \ | ||
234 | -1) | ||
235 | #elif defined(CONFIG_BF561) | ||
236 | #define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PROG0_INTA ? CONFIG_IRQ_PROG0_INTA : \ | ||
237 | (irq) == IRQ_PROG1_INTA ? CONFIG_IRQ_PROG1_INTA : \ | ||
238 | (irq) == IRQ_PROG2_INTA ? CONFIG_IRQ_PROG2_INTA : \ | ||
239 | -1) | ||
240 | #define bfin_write_TIMER_DISABLE(val) bfin_write_TMRS8_DISABLE(val) | ||
241 | #define bfin_write_TIMER_ENABLE(val) bfin_write_TMRS8_ENABLE(val) | ||
242 | #define bfin_write_TIMER_STATUS(val) bfin_write_TMRS8_STATUS(val) | ||
243 | #define bfin_read_TIMER_STATUS() bfin_read_TMRS8_STATUS() | ||
244 | #elif defined(CONFIG_BF54x) | ||
245 | #define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PINT0 ? CONFIG_IRQ_PINT0 : \ | ||
246 | (irq) == IRQ_PINT1 ? CONFIG_IRQ_PINT1 : \ | ||
247 | (irq) == IRQ_PINT2 ? CONFIG_IRQ_PINT2 : \ | ||
248 | (irq) == IRQ_PINT3 ? CONFIG_IRQ_PINT3 : \ | ||
249 | -1) | ||
250 | #define bfin_write_TIMER_DISABLE(val) bfin_write_TIMER_DISABLE0(val) | ||
251 | #define bfin_write_TIMER_ENABLE(val) bfin_write_TIMER_ENABLE0(val) | ||
252 | #define bfin_write_TIMER_STATUS(val) bfin_write_TIMER_STATUS0(val) | ||
253 | #define bfin_read_TIMER_STATUS(val) bfin_read_TIMER_STATUS0(val) | ||
254 | #else | ||
255 | # error "no PRIO_GPIODEMUX() for this part" | ||
256 | #endif | ||
257 | |||
258 | #define __ipipe_root_tick_p(regs) ((regs->ipend & 0x10) != 0) | ||
259 | |||
260 | #else /* !CONFIG_IPIPE */ | ||
261 | |||
262 | #define task_hijacked(p) 0 | ||
263 | #define ipipe_trap_notify(t, r) 0 | ||
264 | |||
265 | #define __ipipe_stall_root_raw() do { } while (0) | ||
266 | #define __ipipe_unstall_root_raw() do { } while (0) | ||
267 | |||
268 | #define ipipe_init_irq_threads() do { } while (0) | ||
269 | #define ipipe_start_irq_thread(irq, desc) 0 | ||
270 | |||
271 | #define IRQ_SYSTMR IRQ_CORETMR | ||
272 | #define IRQ_PRIOTMR IRQ_CORETMR | ||
273 | |||
274 | #define __ipipe_root_tick_p(regs) 1 | ||
275 | |||
276 | #endif /* !CONFIG_IPIPE */ | ||
277 | |||
278 | #endif /* !__ASM_BLACKFIN_IPIPE_H */ | ||
diff --git a/arch/blackfin/include/asm/ipipe_base.h b/arch/blackfin/include/asm/ipipe_base.h new file mode 100644 index 000000000000..cb1025aeabcf --- /dev/null +++ b/arch/blackfin/include/asm/ipipe_base.h | |||
@@ -0,0 +1,80 @@ | |||
1 | /* -*- linux-c -*- | ||
2 | * include/asm-blackfin/_baseipipe.h | ||
3 | * | ||
4 | * Copyright (C) 2007 Philippe Gerum. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, | ||
9 | * USA; either version 2 of the License, or (at your option) any later | ||
10 | * version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #ifndef __ASM_BLACKFIN_IPIPE_BASE_H | ||
23 | #define __ASM_BLACKFIN_IPIPE_BASE_H | ||
24 | |||
25 | #ifdef CONFIG_IPIPE | ||
26 | |||
27 | #define IPIPE_NR_XIRQS NR_IRQS | ||
28 | #define IPIPE_IRQ_ISHIFT 5 /* 2^5 for 32bits arch. */ | ||
29 | |||
30 | /* Blackfin-specific, global domain flags */ | ||
31 | #define IPIPE_ROOTLOCK_FLAG 1 /* Lock pipeline for root */ | ||
32 | |||
33 | /* Blackfin traps -- i.e. exception vector numbers */ | ||
34 | #define IPIPE_NR_FAULTS 52 /* We leave a gap after VEC_ILL_RES. */ | ||
35 | /* Pseudo-vectors used for kernel events */ | ||
36 | #define IPIPE_FIRST_EVENT IPIPE_NR_FAULTS | ||
37 | #define IPIPE_EVENT_SYSCALL (IPIPE_FIRST_EVENT) | ||
38 | #define IPIPE_EVENT_SCHEDULE (IPIPE_FIRST_EVENT + 1) | ||
39 | #define IPIPE_EVENT_SIGWAKE (IPIPE_FIRST_EVENT + 2) | ||
40 | #define IPIPE_EVENT_SETSCHED (IPIPE_FIRST_EVENT + 3) | ||
41 | #define IPIPE_EVENT_INIT (IPIPE_FIRST_EVENT + 4) | ||
42 | #define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 5) | ||
43 | #define IPIPE_EVENT_CLEANUP (IPIPE_FIRST_EVENT + 6) | ||
44 | #define IPIPE_LAST_EVENT IPIPE_EVENT_CLEANUP | ||
45 | #define IPIPE_NR_EVENTS (IPIPE_LAST_EVENT + 1) | ||
46 | |||
47 | #define IPIPE_TIMER_IRQ IRQ_CORETMR | ||
48 | |||
49 | #ifndef __ASSEMBLY__ | ||
50 | |||
51 | #include <linux/bitops.h> | ||
52 | |||
53 | extern int test_bit(int nr, const void *addr); | ||
54 | |||
55 | |||
56 | extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */ | ||
57 | |||
58 | static inline void __ipipe_stall_root(void) | ||
59 | { | ||
60 | volatile unsigned long *p = &__ipipe_root_status; | ||
61 | set_bit(0, p); | ||
62 | } | ||
63 | |||
64 | static inline unsigned long __ipipe_test_and_stall_root(void) | ||
65 | { | ||
66 | volatile unsigned long *p = &__ipipe_root_status; | ||
67 | return test_and_set_bit(0, p); | ||
68 | } | ||
69 | |||
70 | static inline unsigned long __ipipe_test_root(void) | ||
71 | { | ||
72 | const unsigned long *p = &__ipipe_root_status; | ||
73 | return test_bit(0, p); | ||
74 | } | ||
75 | |||
76 | #endif /* !__ASSEMBLY__ */ | ||
77 | |||
78 | #endif /* CONFIG_IPIPE */ | ||
79 | |||
80 | #endif /* !__ASM_BLACKFIN_IPIPE_BASE_H */ | ||
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h index 89f59e18af93..3d977909ce7d 100644 --- a/arch/blackfin/include/asm/irq.h +++ b/arch/blackfin/include/asm/irq.h | |||
@@ -17,56 +17,272 @@ | |||
17 | #ifndef _BFIN_IRQ_H_ | 17 | #ifndef _BFIN_IRQ_H_ |
18 | #define _BFIN_IRQ_H_ | 18 | #define _BFIN_IRQ_H_ |
19 | 19 | ||
20 | /* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/ | ||
20 | #include <mach/irq.h> | 21 | #include <mach/irq.h> |
21 | #include <asm/ptrace.h> | 22 | #include <asm/pda.h> |
22 | 23 | #include <asm/processor.h> | |
23 | /******************************************************************************* | ||
24 | ***** INTRODUCTION *********** | ||
25 | * On the Blackfin, the interrupt structure allows remmapping of the hardware | ||
26 | * levels. | ||
27 | * - I'm going to assume that the H/W level is going to stay at the default | ||
28 | * settings. If someone wants to go through and abstart this out, feel free | ||
29 | * to mod the interrupt numbering scheme. | ||
30 | * - I'm abstracting the interrupts so that uClinux does not know anything | ||
31 | * about the H/W levels. If you want to change the H/W AND keep the abstracted | ||
32 | * levels that uClinux sees, you should be able to do most of it here. | ||
33 | * - I've left the "abstract" numbering sparce in case someone wants to pull the | ||
34 | * interrupts apart (just the TX/RX for the various devices) | ||
35 | *******************************************************************************/ | ||
36 | 24 | ||
37 | /* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/ | 25 | #ifdef CONFIG_SMP |
26 | /* Forward decl needed due to cdef inter dependencies */ | ||
27 | static inline uint32_t __pure bfin_dspid(void); | ||
28 | # define blackfin_core_id() (bfin_dspid() & 0xff) | ||
29 | # define bfin_irq_flags cpu_pda[blackfin_core_id()].imask | ||
30 | #else | ||
31 | extern unsigned long bfin_irq_flags; | ||
32 | #endif | ||
38 | 33 | ||
39 | /* | 34 | #ifdef CONFIG_IPIPE |
40 | * Machine specific interrupt sources. | 35 | |
41 | * | 36 | #include <linux/ipipe_trace.h> |
42 | * Adding an interrupt service routine for a source with this bit | 37 | |
43 | * set indicates a special machine specific interrupt source. | 38 | void __ipipe_unstall_root(void); |
44 | * The machine specific files define these sources. | 39 | |
45 | * | 40 | void __ipipe_restore_root(unsigned long flags); |
46 | * The IRQ_MACHSPEC bit is now gone - the only thing it did was to | 41 | |
47 | * introduce unnecessary overhead. | 42 | #ifdef CONFIG_DEBUG_HWERR |
48 | * | 43 | # define __all_masked_irq_flags 0x3f |
49 | * All interrupt handling is actually machine specific so it is better | 44 | # define __save_and_cli_hw(x) \ |
50 | * to use function pointers, as used by the Sparc port, and select the | 45 | __asm__ __volatile__( \ |
51 | * interrupt handling functions when initializing the kernel. This way | 46 | "cli %0;" \ |
52 | * we save some unnecessary overhead at run-time. | 47 | "sti %1;" \ |
53 | * 01/11/97 - Jes | 48 | : "=&d"(x) \ |
54 | */ | 49 | : "d" (0x3F) \ |
50 | ) | ||
51 | #else | ||
52 | # define __all_masked_irq_flags 0x1f | ||
53 | # define __save_and_cli_hw(x) \ | ||
54 | __asm__ __volatile__( \ | ||
55 | "cli %0;" \ | ||
56 | : "=&d"(x) \ | ||
57 | ) | ||
58 | #endif | ||
59 | |||
60 | #define irqs_enabled_from_flags_hw(x) ((x) != __all_masked_irq_flags) | ||
61 | #define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags)) | ||
62 | #define local_test_iflag_hw(x) irqs_enabled_from_flags_hw(x) | ||
55 | 63 | ||
56 | extern void ack_bad_irq(unsigned int irq); | 64 | #define local_save_flags(x) \ |
65 | do { \ | ||
66 | (x) = __ipipe_test_root() ? \ | ||
67 | __all_masked_irq_flags : bfin_irq_flags; \ | ||
68 | } while (0) | ||
57 | 69 | ||
58 | static __inline__ int irq_canonicalize(int irq) | 70 | #define local_irq_save(x) \ |
71 | do { \ | ||
72 | (x) = __ipipe_test_and_stall_root(); \ | ||
73 | } while (0) | ||
74 | |||
75 | #define local_irq_restore(x) __ipipe_restore_root(x) | ||
76 | #define local_irq_disable() __ipipe_stall_root() | ||
77 | #define local_irq_enable() __ipipe_unstall_root() | ||
78 | #define irqs_disabled() __ipipe_test_root() | ||
79 | |||
80 | #define local_save_flags_hw(x) \ | ||
81 | __asm__ __volatile__( \ | ||
82 | "cli %0;" \ | ||
83 | "sti %0;" \ | ||
84 | : "=d"(x) \ | ||
85 | ) | ||
86 | |||
87 | #define irqs_disabled_hw() \ | ||
88 | ({ \ | ||
89 | unsigned long flags; \ | ||
90 | local_save_flags_hw(flags); \ | ||
91 | !irqs_enabled_from_flags_hw(flags); \ | ||
92 | }) | ||
93 | |||
94 | static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real) | ||
59 | { | 95 | { |
60 | return irq; | 96 | /* Merge virtual and real interrupt mask bits into a single |
97 | 32bit word. */ | ||
98 | return (real & ~(1 << 31)) | ((virt != 0) << 31); | ||
99 | } | ||
100 | |||
101 | static inline int raw_demangle_irq_bits(unsigned long *x) | ||
102 | { | ||
103 | int virt = (*x & (1 << 31)) != 0; | ||
104 | *x &= ~(1L << 31); | ||
105 | return virt; | ||
61 | } | 106 | } |
62 | 107 | ||
63 | /* count of spurious interrupts */ | 108 | #ifdef CONFIG_IPIPE_TRACE_IRQSOFF |
64 | /* extern volatile unsigned int num_spurious; */ | 109 | |
110 | #define local_irq_disable_hw() \ | ||
111 | do { \ | ||
112 | int _tmp_dummy; \ | ||
113 | if (!irqs_disabled_hw()) \ | ||
114 | ipipe_trace_begin(0x80000000); \ | ||
115 | __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \ | ||
116 | } while (0) | ||
117 | |||
118 | #define local_irq_enable_hw() \ | ||
119 | do { \ | ||
120 | if (irqs_disabled_hw()) \ | ||
121 | ipipe_trace_end(0x80000000); \ | ||
122 | __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags)); \ | ||
123 | } while (0) | ||
124 | |||
125 | #define local_irq_save_hw(x) \ | ||
126 | do { \ | ||
127 | __save_and_cli_hw(x); \ | ||
128 | if (local_test_iflag_hw(x)) \ | ||
129 | ipipe_trace_begin(0x80000001); \ | ||
130 | } while (0) | ||
131 | |||
132 | #define local_irq_restore_hw(x) \ | ||
133 | do { \ | ||
134 | if (local_test_iflag_hw(x)) { \ | ||
135 | ipipe_trace_end(0x80000001); \ | ||
136 | local_irq_enable_hw_notrace(); \ | ||
137 | } \ | ||
138 | } while (0) | ||
139 | |||
140 | #define local_irq_disable_hw_notrace() \ | ||
141 | do { \ | ||
142 | int _tmp_dummy; \ | ||
143 | __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \ | ||
144 | } while (0) | ||
145 | |||
146 | #define local_irq_enable_hw_notrace() \ | ||
147 | __asm__ __volatile__( \ | ||
148 | "sti %0;" \ | ||
149 | : \ | ||
150 | : "d"(bfin_irq_flags) \ | ||
151 | ) | ||
65 | 152 | ||
66 | #ifndef NO_IRQ | 153 | #define local_irq_save_hw_notrace(x) __save_and_cli_hw(x) |
67 | #define NO_IRQ ((unsigned int)(-1)) | 154 | |
155 | #define local_irq_restore_hw_notrace(x) \ | ||
156 | do { \ | ||
157 | if (local_test_iflag_hw(x)) \ | ||
158 | local_irq_enable_hw_notrace(); \ | ||
159 | } while (0) | ||
160 | |||
161 | #else /* CONFIG_IPIPE_TRACE_IRQSOFF */ | ||
162 | |||
163 | #define local_irq_enable_hw() \ | ||
164 | __asm__ __volatile__( \ | ||
165 | "sti %0;" \ | ||
166 | : \ | ||
167 | : "d"(bfin_irq_flags) \ | ||
168 | ) | ||
169 | |||
170 | #define local_irq_disable_hw() \ | ||
171 | do { \ | ||
172 | int _tmp_dummy; \ | ||
173 | __asm__ __volatile__ ( \ | ||
174 | "cli %0;" \ | ||
175 | : "=d" (_tmp_dummy)); \ | ||
176 | } while (0) | ||
177 | |||
178 | #define local_irq_restore_hw(x) \ | ||
179 | do { \ | ||
180 | if (irqs_enabled_from_flags_hw(x)) \ | ||
181 | local_irq_enable_hw(); \ | ||
182 | } while (0) | ||
183 | |||
184 | #define local_irq_save_hw(x) __save_and_cli_hw(x) | ||
185 | |||
186 | #define local_irq_disable_hw_notrace() local_irq_disable_hw() | ||
187 | #define local_irq_enable_hw_notrace() local_irq_enable_hw() | ||
188 | #define local_irq_save_hw_notrace(x) local_irq_save_hw(x) | ||
189 | #define local_irq_restore_hw_notrace(x) local_irq_restore_hw(x) | ||
190 | |||
191 | #endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ | ||
192 | |||
193 | #else /* !CONFIG_IPIPE */ | ||
194 | |||
195 | /* | ||
196 | * Interrupt configuring macros. | ||
197 | */ | ||
198 | #define local_irq_disable() \ | ||
199 | do { \ | ||
200 | int __tmp_dummy; \ | ||
201 | __asm__ __volatile__( \ | ||
202 | "cli %0;" \ | ||
203 | : "=d" (__tmp_dummy) \ | ||
204 | ); \ | ||
205 | } while (0) | ||
206 | |||
207 | #define local_irq_enable() \ | ||
208 | __asm__ __volatile__( \ | ||
209 | "sti %0;" \ | ||
210 | : \ | ||
211 | : "d" (bfin_irq_flags) \ | ||
212 | ) | ||
213 | |||
214 | #ifdef CONFIG_DEBUG_HWERR | ||
215 | # define __save_and_cli(x) \ | ||
216 | __asm__ __volatile__( \ | ||
217 | "cli %0;" \ | ||
218 | "sti %1;" \ | ||
219 | : "=&d" (x) \ | ||
220 | : "d" (0x3F) \ | ||
221 | ) | ||
222 | #else | ||
223 | # define __save_and_cli(x) \ | ||
224 | __asm__ __volatile__( \ | ||
225 | "cli %0;" \ | ||
226 | : "=&d" (x) \ | ||
227 | ) | ||
68 | #endif | 228 | #endif |
69 | 229 | ||
70 | #define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) | 230 | #define local_save_flags(x) \ |
231 | __asm__ __volatile__( \ | ||
232 | "cli %0;" \ | ||
233 | "sti %0;" \ | ||
234 | : "=d" (x) \ | ||
235 | ) | ||
236 | |||
237 | #ifdef CONFIG_DEBUG_HWERR | ||
238 | #define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0) | ||
239 | #else | ||
240 | #define irqs_enabled_from_flags(x) ((x) != 0x1f) | ||
241 | #endif | ||
242 | |||
243 | #define local_irq_restore(x) \ | ||
244 | do { \ | ||
245 | if (irqs_enabled_from_flags(x)) \ | ||
246 | local_irq_enable(); \ | ||
247 | } while (0) | ||
248 | |||
249 | /* For spinlocks etc */ | ||
250 | #define local_irq_save(x) __save_and_cli(x) | ||
251 | |||
252 | #define irqs_disabled() \ | ||
253 | ({ \ | ||
254 | unsigned long flags; \ | ||
255 | local_save_flags(flags); \ | ||
256 | !irqs_enabled_from_flags(flags); \ | ||
257 | }) | ||
258 | |||
259 | #define local_irq_save_hw(x) local_irq_save(x) | ||
260 | #define local_irq_restore_hw(x) local_irq_restore(x) | ||
261 | #define local_irq_enable_hw() local_irq_enable() | ||
262 | #define local_irq_disable_hw() local_irq_disable() | ||
263 | #define irqs_disabled_hw() irqs_disabled() | ||
264 | |||
265 | #endif /* !CONFIG_IPIPE */ | ||
266 | |||
267 | #if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE) | ||
268 | # define NOP_PAD_ANOMALY_05000244 "nop; nop;" | ||
269 | #else | ||
270 | # define NOP_PAD_ANOMALY_05000244 | ||
271 | #endif | ||
272 | |||
273 | #define idle_with_irq_disabled() \ | ||
274 | __asm__ __volatile__( \ | ||
275 | NOP_PAD_ANOMALY_05000244 \ | ||
276 | ".align 8;" \ | ||
277 | "sti %0;" \ | ||
278 | "idle;" \ | ||
279 | : \ | ||
280 | : "d" (bfin_irq_flags) \ | ||
281 | ) | ||
282 | |||
283 | static inline int irq_canonicalize(int irq) | ||
284 | { | ||
285 | return irq; | ||
286 | } | ||
71 | 287 | ||
72 | #endif /* _BFIN_IRQ_H_ */ | 288 | #endif /* _BFIN_IRQ_H_ */ |
diff --git a/arch/blackfin/include/asm/l1layout.h b/arch/blackfin/include/asm/l1layout.h index c13ded777828..79dbefaa5bef 100644 --- a/arch/blackfin/include/asm/l1layout.h +++ b/arch/blackfin/include/asm/l1layout.h | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <asm/blackfin.h> | 9 | #include <asm/blackfin.h> |
10 | 10 | ||
11 | #ifndef CONFIG_SMP | ||
11 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
12 | 13 | ||
13 | /* Data that is "mapped" into the process VM at the start of the L1 scratch | 14 | /* Data that is "mapped" into the process VM at the start of the L1 scratch |
@@ -24,8 +25,10 @@ struct l1_scratch_task_info | |||
24 | }; | 25 | }; |
25 | 26 | ||
26 | /* A pointer to the structure in memory. */ | 27 | /* A pointer to the structure in memory. */ |
27 | #define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)L1_SCRATCH_START) | 28 | #define L1_SCRATCH_TASK_INFO ((struct l1_scratch_task_info *)\ |
29 | get_l1_scratch_start()) | ||
28 | 30 | ||
29 | #endif | 31 | #endif |
32 | #endif | ||
30 | 33 | ||
31 | #endif | 34 | #endif |
diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h new file mode 100644 index 000000000000..255a9316ad36 --- /dev/null +++ b/arch/blackfin/include/asm/mem_init.h | |||
@@ -0,0 +1,364 @@ | |||
1 | /* | ||
2 | * arch/blackfin/include/asm/mem_init.h - reprogram clocks / memory | ||
3 | * | ||
4 | * Copyright 2004-2008 Analog Devices Inc. | ||
5 | * | ||
6 | * Licensed under the GPL-2 or later. | ||
7 | */ | ||
8 | |||
9 | #if defined(EBIU_SDGCTL) | ||
10 | #if defined(CONFIG_MEM_MT48LC16M16A2TG_75) || \ | ||
11 | defined(CONFIG_MEM_MT48LC64M4A2FB_7E) || \ | ||
12 | defined(CONFIG_MEM_MT48LC16M8A2TG_75) || \ | ||
13 | defined(CONFIG_MEM_GENERIC_BOARD) || \ | ||
14 | defined(CONFIG_MEM_MT48LC32M8A2_75) || \ | ||
15 | defined(CONFIG_MEM_MT48LC8M32B2B5_7) || \ | ||
16 | defined(CONFIG_MEM_MT48LC32M16A2TG_75) || \ | ||
17 | defined(CONFIG_MEM_MT48LC32M8A2_75) | ||
18 | #if (CONFIG_SCLK_HZ > 119402985) | ||
19 | #define SDRAM_tRP TRP_2 | ||
20 | #define SDRAM_tRP_num 2 | ||
21 | #define SDRAM_tRAS TRAS_7 | ||
22 | #define SDRAM_tRAS_num 7 | ||
23 | #define SDRAM_tRCD TRCD_2 | ||
24 | #define SDRAM_tWR TWR_2 | ||
25 | #endif | ||
26 | #if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985) | ||
27 | #define SDRAM_tRP TRP_2 | ||
28 | #define SDRAM_tRP_num 2 | ||
29 | #define SDRAM_tRAS TRAS_6 | ||
30 | #define SDRAM_tRAS_num 6 | ||
31 | #define SDRAM_tRCD TRCD_2 | ||
32 | #define SDRAM_tWR TWR_2 | ||
33 | #endif | ||
34 | #if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612) | ||
35 | #define SDRAM_tRP TRP_2 | ||
36 | #define SDRAM_tRP_num 2 | ||
37 | #define SDRAM_tRAS TRAS_5 | ||
38 | #define SDRAM_tRAS_num 5 | ||
39 | #define SDRAM_tRCD TRCD_2 | ||
40 | #define SDRAM_tWR TWR_2 | ||
41 | #endif | ||
42 | #if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239) | ||
43 | #define SDRAM_tRP TRP_2 | ||
44 | #define SDRAM_tRP_num 2 | ||
45 | #define SDRAM_tRAS TRAS_4 | ||
46 | #define SDRAM_tRAS_num 4 | ||
47 | #define SDRAM_tRCD TRCD_2 | ||
48 | #define SDRAM_tWR TWR_2 | ||
49 | #endif | ||
50 | #if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866) | ||
51 | #define SDRAM_tRP TRP_2 | ||
52 | #define SDRAM_tRP_num 2 | ||
53 | #define SDRAM_tRAS TRAS_3 | ||
54 | #define SDRAM_tRAS_num 3 | ||
55 | #define SDRAM_tRCD TRCD_2 | ||
56 | #define SDRAM_tWR TWR_2 | ||
57 | #endif | ||
58 | #if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667) | ||
59 | #define SDRAM_tRP TRP_1 | ||
60 | #define SDRAM_tRP_num 1 | ||
61 | #define SDRAM_tRAS TRAS_4 | ||
62 | #define SDRAM_tRAS_num 3 | ||
63 | #define SDRAM_tRCD TRCD_1 | ||
64 | #define SDRAM_tWR TWR_2 | ||
65 | #endif | ||
66 | #if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493) | ||
67 | #define SDRAM_tRP TRP_1 | ||
68 | #define SDRAM_tRP_num 1 | ||
69 | #define SDRAM_tRAS TRAS_3 | ||
70 | #define SDRAM_tRAS_num 3 | ||
71 | #define SDRAM_tRCD TRCD_1 | ||
72 | #define SDRAM_tWR TWR_2 | ||
73 | #endif | ||
74 | #if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119) | ||
75 | #define SDRAM_tRP TRP_1 | ||
76 | #define SDRAM_tRP_num 1 | ||
77 | #define SDRAM_tRAS TRAS_2 | ||
78 | #define SDRAM_tRAS_num 2 | ||
79 | #define SDRAM_tRCD TRCD_1 | ||
80 | #define SDRAM_tWR TWR_2 | ||
81 | #endif | ||
82 | #if (CONFIG_SCLK_HZ <= 29850746) | ||
83 | #define SDRAM_tRP TRP_1 | ||
84 | #define SDRAM_tRP_num 1 | ||
85 | #define SDRAM_tRAS TRAS_1 | ||
86 | #define SDRAM_tRAS_num 1 | ||
87 | #define SDRAM_tRCD TRCD_1 | ||
88 | #define SDRAM_tWR TWR_2 | ||
89 | #endif | ||
90 | #endif | ||
91 | |||
92 | #if defined(CONFIG_MEM_MT48LC16M8A2TG_75) || \ | ||
93 | defined(CONFIG_MEM_MT48LC8M32B2B5_7) | ||
94 | /*SDRAM INFORMATION: */ | ||
95 | #define SDRAM_Tref 64 /* Refresh period in milliseconds */ | ||
96 | #define SDRAM_NRA 4096 /* Number of row addresses in SDRAM */ | ||
97 | #define SDRAM_CL CL_3 | ||
98 | #endif | ||
99 | |||
100 | #if defined(CONFIG_MEM_MT48LC32M8A2_75) || \ | ||
101 | defined(CONFIG_MEM_MT48LC64M4A2FB_7E) || \ | ||
102 | defined(CONFIG_MEM_GENERIC_BOARD) || \ | ||
103 | defined(CONFIG_MEM_MT48LC32M16A2TG_75) || \ | ||
104 | defined(CONFIG_MEM_MT48LC16M16A2TG_75) || \ | ||
105 | defined(CONFIG_MEM_MT48LC32M8A2_75) | ||
106 | /*SDRAM INFORMATION: */ | ||
107 | #define SDRAM_Tref 64 /* Refresh period in milliseconds */ | ||
108 | #define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */ | ||
109 | #define SDRAM_CL CL_3 | ||
110 | #endif | ||
111 | |||
112 | |||
113 | #ifdef CONFIG_BFIN_KERNEL_CLOCK_MEMINIT_CALC | ||
114 | /* Equation from section 17 (p17-46) of BF533 HRM */ | ||
115 | #define mem_SDRRC (((CONFIG_SCLK_HZ / 1000) * SDRAM_Tref) / SDRAM_NRA) - (SDRAM_tRAS_num + SDRAM_tRP_num) | ||
116 | |||
117 | /* Enable SCLK Out */ | ||
118 | #define mem_SDGCTL (0x80000000 | SCTLE | SDRAM_CL | SDRAM_tRAS | SDRAM_tRP | SDRAM_tRCD | SDRAM_tWR | PSS) | ||
119 | #else | ||
120 | #define mem_SDRRC CONFIG_MEM_SDRRC | ||
121 | #define mem_SDGCTL CONFIG_MEM_SDGCTL | ||
122 | #endif | ||
123 | #endif | ||
124 | |||
125 | |||
126 | #if defined(EBIU_DDRCTL0) | ||
127 | #define MIN_DDR_SCLK(x) (x*(CONFIG_SCLK_HZ/1000/1000)/1000 + 1) | ||
128 | #define MAX_DDR_SCLK(x) (x*(CONFIG_SCLK_HZ/1000/1000)/1000) | ||
129 | #define DDR_CLK_HZ(x) (1000*1000*1000/x) | ||
130 | |||
131 | #if defined(CONFIG_MEM_MT46V32M16_6T) | ||
132 | #define DDR_SIZE DEVSZ_512 | ||
133 | #define DDR_WIDTH DEVWD_16 | ||
134 | #define DDR_MAX_tCK 13 | ||
135 | |||
136 | #define DDR_tRC DDR_TRC(MIN_DDR_SCLK(60)) | ||
137 | #define DDR_tRAS DDR_TRAS(MIN_DDR_SCLK(42)) | ||
138 | #define DDR_tRP DDR_TRP(MIN_DDR_SCLK(15)) | ||
139 | #define DDR_tRFC DDR_TRFC(MIN_DDR_SCLK(72)) | ||
140 | #define DDR_tREFI DDR_TREFI(MAX_DDR_SCLK(7800)) | ||
141 | |||
142 | #define DDR_tRCD DDR_TRCD(MIN_DDR_SCLK(15)) | ||
143 | #define DDR_tWTR DDR_TWTR(1) | ||
144 | #define DDR_tMRD DDR_TMRD(MIN_DDR_SCLK(12)) | ||
145 | #define DDR_tWR DDR_TWR(MIN_DDR_SCLK(15)) | ||
146 | #endif | ||
147 | |||
148 | #if defined(CONFIG_MEM_MT46V32M16_5B) | ||
149 | #define DDR_SIZE DEVSZ_512 | ||
150 | #define DDR_WIDTH DEVWD_16 | ||
151 | #define DDR_MAX_tCK 13 | ||
152 | |||
153 | #define DDR_tRC DDR_TRC(MIN_DDR_SCLK(55)) | ||
154 | #define DDR_tRAS DDR_TRAS(MIN_DDR_SCLK(40)) | ||
155 | #define DDR_tRP DDR_TRP(MIN_DDR_SCLK(15)) | ||
156 | #define DDR_tRFC DDR_TRFC(MIN_DDR_SCLK(70)) | ||
157 | #define DDR_tREFI DDR_TREFI(MAX_DDR_SCLK(7800)) | ||
158 | |||
159 | #define DDR_tRCD DDR_TRCD(MIN_DDR_SCLK(15)) | ||
160 | #define DDR_tWTR DDR_TWTR(2) | ||
161 | #define DDR_tMRD DDR_TMRD(MIN_DDR_SCLK(10)) | ||
162 | #define DDR_tWR DDR_TWR(MIN_DDR_SCLK(15)) | ||
163 | #endif | ||
164 | |||
165 | #if defined(CONFIG_MEM_GENERIC_BOARD) | ||
166 | #define DDR_SIZE DEVSZ_512 | ||
167 | #define DDR_WIDTH DEVWD_16 | ||
168 | #define DDR_MAX_tCK 13 | ||
169 | |||
170 | #define DDR_tRCD DDR_TRCD(3) | ||
171 | #define DDR_tWTR DDR_TWTR(2) | ||
172 | #define DDR_tWR DDR_TWR(2) | ||
173 | #define DDR_tMRD DDR_TMRD(2) | ||
174 | #define DDR_tRP DDR_TRP(3) | ||
175 | #define DDR_tRAS DDR_TRAS(7) | ||
176 | #define DDR_tRC DDR_TRC(10) | ||
177 | #define DDR_tRFC DDR_TRFC(12) | ||
178 | #define DDR_tREFI DDR_TREFI(1288) | ||
179 | #endif | ||
180 | |||
181 | #if (CONFIG_SCLK_HZ < DDR_CLK_HZ(DDR_MAX_tCK)) | ||
182 | # error "CONFIG_SCLK_HZ is too small (<DDR_CLK_HZ(DDR_MAX_tCK) Hz)." | ||
183 | #elif(CONFIG_SCLK_HZ <= 133333333) | ||
184 | # define DDR_CL CL_2 | ||
185 | #else | ||
186 | # error "CONFIG_SCLK_HZ is too large (>133333333 Hz)." | ||
187 | #endif | ||
188 | |||
189 | #ifdef CONFIG_BFIN_KERNEL_CLOCK_MEMINIT_CALC | ||
190 | #define mem_DDRCTL0 (DDR_tRP | DDR_tRAS | DDR_tRC | DDR_tRFC | DDR_tREFI) | ||
191 | #define mem_DDRCTL1 (DDR_DATWIDTH | EXTBANK_1 | DDR_SIZE | DDR_WIDTH | DDR_tWTR \ | ||
192 | | DDR_tMRD | DDR_tWR | DDR_tRCD) | ||
193 | #define mem_DDRCTL2 DDR_CL | ||
194 | #else | ||
195 | #define mem_DDRCTL0 CONFIG_MEM_DDRCTL0 | ||
196 | #define mem_DDRCTL1 CONFIG_MEM_DDRCTL1 | ||
197 | #define mem_DDRCTL2 CONFIG_MEM_DDRCTL2 | ||
198 | #endif | ||
199 | #endif | ||
200 | |||
201 | #if defined CONFIG_CLKIN_HALF | ||
202 | #define CLKIN_HALF 1 | ||
203 | #else | ||
204 | #define CLKIN_HALF 0 | ||
205 | #endif | ||
206 | |||
207 | #if defined CONFIG_PLL_BYPASS | ||
208 | #define PLL_BYPASS 1 | ||
209 | #else | ||
210 | #define PLL_BYPASS 0 | ||
211 | #endif | ||
212 | |||
213 | /***************************************Currently Not Being Used *********************************/ | ||
214 | |||
215 | #if defined(CONFIG_FLASH_SPEED_BWAT) && \ | ||
216 | defined(CONFIG_FLASH_SPEED_BRAT) && \ | ||
217 | defined(CONFIG_FLASH_SPEED_BHT) && \ | ||
218 | defined(CONFIG_FLASH_SPEED_BST) && \ | ||
219 | defined(CONFIG_FLASH_SPEED_BTT) | ||
220 | |||
221 | #define flash_EBIU_AMBCTL_WAT ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 | ||
222 | #define flash_EBIU_AMBCTL_RAT ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 | ||
223 | #define flash_EBIU_AMBCTL_HT ((CONFIG_FLASH_SPEED_BHT * 4) / (4000000000 / CONFIG_SCLK_HZ)) | ||
224 | #define flash_EBIU_AMBCTL_ST ((CONFIG_FLASH_SPEED_BST * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 | ||
225 | #define flash_EBIU_AMBCTL_TT ((CONFIG_FLASH_SPEED_BTT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1 | ||
226 | |||
227 | #if (flash_EBIU_AMBCTL_TT > 3) | ||
228 | #define flash_EBIU_AMBCTL0_TT B0TT_4 | ||
229 | #endif | ||
230 | #if (flash_EBIU_AMBCTL_TT == 3) | ||
231 | #define flash_EBIU_AMBCTL0_TT B0TT_3 | ||
232 | #endif | ||
233 | #if (flash_EBIU_AMBCTL_TT == 2) | ||
234 | #define flash_EBIU_AMBCTL0_TT B0TT_2 | ||
235 | #endif | ||
236 | #if (flash_EBIU_AMBCTL_TT < 2) | ||
237 | #define flash_EBIU_AMBCTL0_TT B0TT_1 | ||
238 | #endif | ||
239 | |||
240 | #if (flash_EBIU_AMBCTL_ST > 3) | ||
241 | #define flash_EBIU_AMBCTL0_ST B0ST_4 | ||
242 | #endif | ||
243 | #if (flash_EBIU_AMBCTL_ST == 3) | ||
244 | #define flash_EBIU_AMBCTL0_ST B0ST_3 | ||
245 | #endif | ||
246 | #if (flash_EBIU_AMBCTL_ST == 2) | ||
247 | #define flash_EBIU_AMBCTL0_ST B0ST_2 | ||
248 | #endif | ||
249 | #if (flash_EBIU_AMBCTL_ST < 2) | ||
250 | #define flash_EBIU_AMBCTL0_ST B0ST_1 | ||
251 | #endif | ||
252 | |||
253 | #if (flash_EBIU_AMBCTL_HT > 2) | ||
254 | #define flash_EBIU_AMBCTL0_HT B0HT_3 | ||
255 | #endif | ||
256 | #if (flash_EBIU_AMBCTL_HT == 2) | ||
257 | #define flash_EBIU_AMBCTL0_HT B0HT_2 | ||
258 | #endif | ||
259 | #if (flash_EBIU_AMBCTL_HT == 1) | ||
260 | #define flash_EBIU_AMBCTL0_HT B0HT_1 | ||
261 | #endif | ||
262 | #if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0) | ||
263 | #define flash_EBIU_AMBCTL0_HT B0HT_0 | ||
264 | #endif | ||
265 | #if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0) | ||
266 | #define flash_EBIU_AMBCTL0_HT B0HT_1 | ||
267 | #endif | ||
268 | |||
269 | #if (flash_EBIU_AMBCTL_WAT > 14) | ||
270 | #define flash_EBIU_AMBCTL0_WAT B0WAT_15 | ||
271 | #endif | ||
272 | #if (flash_EBIU_AMBCTL_WAT == 14) | ||
273 | #define flash_EBIU_AMBCTL0_WAT B0WAT_14 | ||
274 | #endif | ||
275 | #if (flash_EBIU_AMBCTL_WAT == 13) | ||
276 | #define flash_EBIU_AMBCTL0_WAT B0WAT_13 | ||
277 | #endif | ||
278 | #if (flash_EBIU_AMBCTL_WAT == 12) | ||
279 | #define flash_EBIU_AMBCTL0_WAT B0WAT_12 | ||
280 | #endif | ||
281 | #if (flash_EBIU_AMBCTL_WAT == 11) | ||
282 | #define flash_EBIU_AMBCTL0_WAT B0WAT_11 | ||
283 | #endif | ||
284 | #if (flash_EBIU_AMBCTL_WAT == 10) | ||
285 | #define flash_EBIU_AMBCTL0_WAT B0WAT_10 | ||
286 | #endif | ||
287 | #if (flash_EBIU_AMBCTL_WAT == 9) | ||
288 | #define flash_EBIU_AMBCTL0_WAT B0WAT_9 | ||
289 | #endif | ||
290 | #if (flash_EBIU_AMBCTL_WAT == 8) | ||
291 | #define flash_EBIU_AMBCTL0_WAT B0WAT_8 | ||
292 | #endif | ||
293 | #if (flash_EBIU_AMBCTL_WAT == 7) | ||
294 | #define flash_EBIU_AMBCTL0_WAT B0WAT_7 | ||
295 | #endif | ||
296 | #if (flash_EBIU_AMBCTL_WAT == 6) | ||
297 | #define flash_EBIU_AMBCTL0_WAT B0WAT_6 | ||
298 | #endif | ||
299 | #if (flash_EBIU_AMBCTL_WAT == 5) | ||
300 | #define flash_EBIU_AMBCTL0_WAT B0WAT_5 | ||
301 | #endif | ||
302 | #if (flash_EBIU_AMBCTL_WAT == 4) | ||
303 | #define flash_EBIU_AMBCTL0_WAT B0WAT_4 | ||
304 | #endif | ||
305 | #if (flash_EBIU_AMBCTL_WAT == 3) | ||
306 | #define flash_EBIU_AMBCTL0_WAT B0WAT_3 | ||
307 | #endif | ||
308 | #if (flash_EBIU_AMBCTL_WAT == 2) | ||
309 | #define flash_EBIU_AMBCTL0_WAT B0WAT_2 | ||
310 | #endif | ||
311 | #if (flash_EBIU_AMBCTL_WAT == 1) | ||
312 | #define flash_EBIU_AMBCTL0_WAT B0WAT_1 | ||
313 | #endif | ||
314 | |||
315 | #if (flash_EBIU_AMBCTL_RAT > 14) | ||
316 | #define flash_EBIU_AMBCTL0_RAT B0RAT_15 | ||
317 | #endif | ||
318 | #if (flash_EBIU_AMBCTL_RAT == 14) | ||
319 | #define flash_EBIU_AMBCTL0_RAT B0RAT_14 | ||
320 | #endif | ||
321 | #if (flash_EBIU_AMBCTL_RAT == 13) | ||
322 | #define flash_EBIU_AMBCTL0_RAT B0RAT_13 | ||
323 | #endif | ||
324 | #if (flash_EBIU_AMBCTL_RAT == 12) | ||
325 | #define flash_EBIU_AMBCTL0_RAT B0RAT_12 | ||
326 | #endif | ||
327 | #if (flash_EBIU_AMBCTL_RAT == 11) | ||
328 | #define flash_EBIU_AMBCTL0_RAT B0RAT_11 | ||
329 | #endif | ||
330 | #if (flash_EBIU_AMBCTL_RAT == 10) | ||
331 | #define flash_EBIU_AMBCTL0_RAT B0RAT_10 | ||
332 | #endif | ||
333 | #if (flash_EBIU_AMBCTL_RAT == 9) | ||
334 | #define flash_EBIU_AMBCTL0_RAT B0RAT_9 | ||
335 | #endif | ||
336 | #if (flash_EBIU_AMBCTL_RAT == 8) | ||
337 | #define flash_EBIU_AMBCTL0_RAT B0RAT_8 | ||
338 | #endif | ||
339 | #if (flash_EBIU_AMBCTL_RAT == 7) | ||
340 | #define flash_EBIU_AMBCTL0_RAT B0RAT_7 | ||
341 | #endif | ||
342 | #if (flash_EBIU_AMBCTL_RAT == 6) | ||
343 | #define flash_EBIU_AMBCTL0_RAT B0RAT_6 | ||
344 | #endif | ||
345 | #if (flash_EBIU_AMBCTL_RAT == 5) | ||
346 | #define flash_EBIU_AMBCTL0_RAT B0RAT_5 | ||
347 | #endif | ||
348 | #if (flash_EBIU_AMBCTL_RAT == 4) | ||
349 | #define flash_EBIU_AMBCTL0_RAT B0RAT_4 | ||
350 | #endif | ||
351 | #if (flash_EBIU_AMBCTL_RAT == 3) | ||
352 | #define flash_EBIU_AMBCTL0_RAT B0RAT_3 | ||
353 | #endif | ||
354 | #if (flash_EBIU_AMBCTL_RAT == 2) | ||
355 | #define flash_EBIU_AMBCTL0_RAT B0RAT_2 | ||
356 | #endif | ||
357 | #if (flash_EBIU_AMBCTL_RAT == 1) | ||
358 | #define flash_EBIU_AMBCTL0_RAT B0RAT_1 | ||
359 | #endif | ||
360 | |||
361 | #define flash_EBIU_AMBCTL0 \ | ||
362 | (flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \ | ||
363 | flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN) | ||
364 | #endif | ||
diff --git a/arch/blackfin/include/asm/mem_map.h b/arch/blackfin/include/asm/mem_map.h index 88d04a707708..e92b31051bb7 100644 --- a/arch/blackfin/include/asm/mem_map.h +++ b/arch/blackfin/include/asm/mem_map.h | |||
@@ -9,4 +9,79 @@ | |||
9 | 9 | ||
10 | #include <mach/mem_map.h> | 10 | #include <mach/mem_map.h> |
11 | 11 | ||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | #ifdef CONFIG_SMP | ||
15 | static inline ulong get_l1_scratch_start_cpu(int cpu) | ||
16 | { | ||
17 | return (cpu) ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START; | ||
18 | } | ||
19 | static inline ulong get_l1_code_start_cpu(int cpu) | ||
20 | { | ||
21 | return (cpu) ? COREB_L1_CODE_START : COREA_L1_CODE_START; | ||
22 | } | ||
23 | static inline ulong get_l1_data_a_start_cpu(int cpu) | ||
24 | { | ||
25 | return (cpu) ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START; | ||
26 | } | ||
27 | static inline ulong get_l1_data_b_start_cpu(int cpu) | ||
28 | { | ||
29 | return (cpu) ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START; | ||
30 | } | ||
31 | |||
32 | static inline ulong get_l1_scratch_start(void) | ||
33 | { | ||
34 | return get_l1_scratch_start_cpu(blackfin_core_id()); | ||
35 | } | ||
36 | static inline ulong get_l1_code_start(void) | ||
37 | { | ||
38 | return get_l1_code_start_cpu(blackfin_core_id()); | ||
39 | } | ||
40 | static inline ulong get_l1_data_a_start(void) | ||
41 | { | ||
42 | return get_l1_data_a_start_cpu(blackfin_core_id()); | ||
43 | } | ||
44 | static inline ulong get_l1_data_b_start(void) | ||
45 | { | ||
46 | return get_l1_data_b_start_cpu(blackfin_core_id()); | ||
47 | } | ||
48 | |||
49 | #else /* !CONFIG_SMP */ | ||
50 | |||
51 | static inline ulong get_l1_scratch_start_cpu(int cpu) | ||
52 | { | ||
53 | return L1_SCRATCH_START; | ||
54 | } | ||
55 | static inline ulong get_l1_code_start_cpu(int cpu) | ||
56 | { | ||
57 | return L1_CODE_START; | ||
58 | } | ||
59 | static inline ulong get_l1_data_a_start_cpu(int cpu) | ||
60 | { | ||
61 | return L1_DATA_A_START; | ||
62 | } | ||
63 | static inline ulong get_l1_data_b_start_cpu(int cpu) | ||
64 | { | ||
65 | return L1_DATA_B_START; | ||
66 | } | ||
67 | static inline ulong get_l1_scratch_start(void) | ||
68 | { | ||
69 | return get_l1_scratch_start_cpu(0); | ||
70 | } | ||
71 | static inline ulong get_l1_code_start(void) | ||
72 | { | ||
73 | return get_l1_code_start_cpu(0); | ||
74 | } | ||
75 | static inline ulong get_l1_data_a_start(void) | ||
76 | { | ||
77 | return get_l1_data_a_start_cpu(0); | ||
78 | } | ||
79 | static inline ulong get_l1_data_b_start(void) | ||
80 | { | ||
81 | return get_l1_data_b_start_cpu(0); | ||
82 | } | ||
83 | |||
84 | #endif /* CONFIG_SMP */ | ||
85 | #endif /* __ASSEMBLY__ */ | ||
86 | |||
12 | #endif /* _MEM_MAP_H_ */ | 87 | #endif /* _MEM_MAP_H_ */ |
diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h index 35593dda2a4d..944e29faae48 100644 --- a/arch/blackfin/include/asm/mmu_context.h +++ b/arch/blackfin/include/asm/mmu_context.h | |||
@@ -37,6 +37,10 @@ | |||
37 | #include <asm/pgalloc.h> | 37 | #include <asm/pgalloc.h> |
38 | #include <asm/cplbinit.h> | 38 | #include <asm/cplbinit.h> |
39 | 39 | ||
40 | /* Note: L1 stacks are CPU-private things, so we bluntly disable this | ||
41 | feature in SMP mode, and use the per-CPU scratch SRAM bank only to | ||
42 | store the PDA instead. */ | ||
43 | |||
40 | extern void *current_l1_stack_save; | 44 | extern void *current_l1_stack_save; |
41 | extern int nr_l1stack_tasks; | 45 | extern int nr_l1stack_tasks; |
42 | extern void *l1_stack_base; | 46 | extern void *l1_stack_base; |
@@ -88,12 +92,15 @@ activate_l1stack(struct mm_struct *mm, unsigned long sp_base) | |||
88 | static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, | 92 | static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, |
89 | struct task_struct *tsk) | 93 | struct task_struct *tsk) |
90 | { | 94 | { |
95 | #ifdef CONFIG_MPU | ||
96 | unsigned int cpu = smp_processor_id(); | ||
97 | #endif | ||
91 | if (prev_mm == next_mm) | 98 | if (prev_mm == next_mm) |
92 | return; | 99 | return; |
93 | #ifdef CONFIG_MPU | 100 | #ifdef CONFIG_MPU |
94 | if (prev_mm->context.page_rwx_mask == current_rwx_mask) { | 101 | if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) { |
95 | flush_switched_cplbs(); | 102 | flush_switched_cplbs(cpu); |
96 | set_mask_dcplbs(next_mm->context.page_rwx_mask); | 103 | set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu); |
97 | } | 104 | } |
98 | #endif | 105 | #endif |
99 | 106 | ||
@@ -138,9 +145,10 @@ static inline void protect_page(struct mm_struct *mm, unsigned long addr, | |||
138 | 145 | ||
139 | static inline void update_protections(struct mm_struct *mm) | 146 | static inline void update_protections(struct mm_struct *mm) |
140 | { | 147 | { |
141 | if (mm->context.page_rwx_mask == current_rwx_mask) { | 148 | unsigned int cpu = smp_processor_id(); |
142 | flush_switched_cplbs(); | 149 | if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) { |
143 | set_mask_dcplbs(mm->context.page_rwx_mask); | 150 | flush_switched_cplbs(cpu); |
151 | set_mask_dcplbs(mm->context.page_rwx_mask, cpu); | ||
144 | } | 152 | } |
145 | } | 153 | } |
146 | #endif | 154 | #endif |
@@ -165,6 +173,9 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
165 | static inline void destroy_context(struct mm_struct *mm) | 173 | static inline void destroy_context(struct mm_struct *mm) |
166 | { | 174 | { |
167 | struct sram_list_struct *tmp; | 175 | struct sram_list_struct *tmp; |
176 | #ifdef CONFIG_MPU | ||
177 | unsigned int cpu = smp_processor_id(); | ||
178 | #endif | ||
168 | 179 | ||
169 | #ifdef CONFIG_APP_STACK_L1 | 180 | #ifdef CONFIG_APP_STACK_L1 |
170 | if (current_l1_stack_save == mm->context.l1_stack_save) | 181 | if (current_l1_stack_save == mm->context.l1_stack_save) |
@@ -179,8 +190,8 @@ static inline void destroy_context(struct mm_struct *mm) | |||
179 | kfree(tmp); | 190 | kfree(tmp); |
180 | } | 191 | } |
181 | #ifdef CONFIG_MPU | 192 | #ifdef CONFIG_MPU |
182 | if (current_rwx_mask == mm->context.page_rwx_mask) | 193 | if (current_rwx_mask[cpu] == mm->context.page_rwx_mask) |
183 | current_rwx_mask = NULL; | 194 | current_rwx_mask[cpu] = NULL; |
184 | free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); | 195 | free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); |
185 | #endif | 196 | #endif |
186 | } | 197 | } |
diff --git a/arch/blackfin/include/asm/mutex-dec.h b/arch/blackfin/include/asm/mutex-dec.h new file mode 100644 index 000000000000..0134151656af --- /dev/null +++ b/arch/blackfin/include/asm/mutex-dec.h | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * include/asm-generic/mutex-dec.h | ||
3 | * | ||
4 | * Generic implementation of the mutex fastpath, based on atomic | ||
5 | * decrement/increment. | ||
6 | */ | ||
7 | #ifndef _ASM_GENERIC_MUTEX_DEC_H | ||
8 | #define _ASM_GENERIC_MUTEX_DEC_H | ||
9 | |||
10 | /** | ||
11 | * __mutex_fastpath_lock - try to take the lock by moving the count | ||
12 | * from 1 to a 0 value | ||
13 | * @count: pointer of type atomic_t | ||
14 | * @fail_fn: function to call if the original value was not 1 | ||
15 | * | ||
16 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | ||
17 | * it wasn't 1 originally. This function MUST leave the value lower than | ||
18 | * 1 even when the "1" assertion wasn't true. | ||
19 | */ | ||
20 | static inline void | ||
21 | __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | ||
22 | { | ||
23 | if (unlikely(atomic_dec_return(count) < 0)) | ||
24 | fail_fn(count); | ||
25 | else | ||
26 | smp_mb(); | ||
27 | } | ||
28 | |||
29 | /** | ||
30 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count | ||
31 | * from 1 to a 0 value | ||
32 | * @count: pointer of type atomic_t | ||
33 | * @fail_fn: function to call if the original value was not 1 | ||
34 | * | ||
35 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | ||
36 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, | ||
37 | * or anything the slow path function returns. | ||
38 | */ | ||
39 | static inline int | ||
40 | __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) | ||
41 | { | ||
42 | if (unlikely(atomic_dec_return(count) < 0)) | ||
43 | return fail_fn(count); | ||
44 | else { | ||
45 | smp_mb(); | ||
46 | return 0; | ||
47 | } | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * __mutex_fastpath_unlock - try to promote the count from 0 to 1 | ||
52 | * @count: pointer of type atomic_t | ||
53 | * @fail_fn: function to call if the original value was not 0 | ||
54 | * | ||
55 | * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. | ||
56 | * In the failure case, this function is allowed to either set the value to | ||
57 | * 1, or to set it to a value lower than 1. | ||
58 | * | ||
59 | * If the implementation sets it to a value of lower than 1, then the | ||
60 | * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs | ||
61 | * to return 0 otherwise. | ||
62 | */ | ||
63 | static inline void | ||
64 | __mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) | ||
65 | { | ||
66 | smp_mb(); | ||
67 | if (unlikely(atomic_inc_return(count) <= 0)) | ||
68 | fail_fn(count); | ||
69 | } | ||
70 | |||
71 | #define __mutex_slowpath_needs_to_unlock() 1 | ||
72 | |||
73 | /** | ||
74 | * __mutex_fastpath_trylock - try to acquire the mutex, without waiting | ||
75 | * | ||
76 | * @count: pointer of type atomic_t | ||
77 | * @fail_fn: fallback function | ||
78 | * | ||
79 | * Change the count from 1 to a value lower than 1, and return 0 (failure) | ||
80 | * if it wasn't 1 originally, or return 1 (success) otherwise. This function | ||
81 | * MUST leave the value lower than 1 even when the "1" assertion wasn't true. | ||
82 | * Additionally, if the value was < 0 originally, this function must not leave | ||
83 | * it to 0 on failure. | ||
84 | * | ||
85 | * If the architecture has no effective trylock variant, it should call the | ||
86 | * <fail_fn> spinlock-based trylock variant unconditionally. | ||
87 | */ | ||
88 | static inline int | ||
89 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
90 | { | ||
91 | /* | ||
92 | * We have two variants here. The cmpxchg based one is the best one | ||
93 | * because it never induce a false contention state. It is included | ||
94 | * here because architectures using the inc/dec algorithms over the | ||
95 | * xchg ones are much more likely to support cmpxchg natively. | ||
96 | * | ||
97 | * If not we fall back to the spinlock based variant - that is | ||
98 | * just as efficient (and simpler) as a 'destructive' probing of | ||
99 | * the mutex state would be. | ||
100 | */ | ||
101 | #ifdef __HAVE_ARCH_CMPXCHG | ||
102 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { | ||
103 | smp_mb(); | ||
104 | return 1; | ||
105 | } | ||
106 | return 0; | ||
107 | #else | ||
108 | return fail_fn(count); | ||
109 | #endif | ||
110 | } | ||
111 | |||
112 | #endif | ||
diff --git a/arch/blackfin/include/asm/mutex.h b/arch/blackfin/include/asm/mutex.h index 458c1f7fbc18..5d399256bf06 100644 --- a/arch/blackfin/include/asm/mutex.h +++ b/arch/blackfin/include/asm/mutex.h | |||
@@ -6,4 +6,67 @@ | |||
6 | * implementation. (see asm-generic/mutex-xchg.h for details) | 6 | * implementation. (see asm-generic/mutex-xchg.h for details) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef _ASM_MUTEX_H | ||
10 | #define _ASM_MUTEX_H | ||
11 | |||
12 | #ifndef CONFIG_SMP | ||
9 | #include <asm-generic/mutex-dec.h> | 13 | #include <asm-generic/mutex-dec.h> |
14 | #else | ||
15 | |||
16 | static inline void | ||
17 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
18 | { | ||
19 | if (unlikely(atomic_dec_return(count) < 0)) | ||
20 | fail_fn(count); | ||
21 | else | ||
22 | smp_mb(); | ||
23 | } | ||
24 | |||
25 | static inline int | ||
26 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
27 | { | ||
28 | if (unlikely(atomic_dec_return(count) < 0)) | ||
29 | return fail_fn(count); | ||
30 | else { | ||
31 | smp_mb(); | ||
32 | return 0; | ||
33 | } | ||
34 | } | ||
35 | |||
36 | static inline void | ||
37 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | ||
38 | { | ||
39 | smp_mb(); | ||
40 | if (unlikely(atomic_inc_return(count) <= 0)) | ||
41 | fail_fn(count); | ||
42 | } | ||
43 | |||
44 | #define __mutex_slowpath_needs_to_unlock() 1 | ||
45 | |||
46 | static inline int | ||
47 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | ||
48 | { | ||
49 | /* | ||
50 | * We have two variants here. The cmpxchg based one is the best one | ||
51 | * because it never induce a false contention state. It is included | ||
52 | * here because architectures using the inc/dec algorithms over the | ||
53 | * xchg ones are much more likely to support cmpxchg natively. | ||
54 | * | ||
55 | * If not we fall back to the spinlock based variant - that is | ||
56 | * just as efficient (and simpler) as a 'destructive' probing of | ||
57 | * the mutex state would be. | ||
58 | */ | ||
59 | #ifdef __HAVE_ARCH_CMPXCHG | ||
60 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { | ||
61 | smp_mb(); | ||
62 | return 1; | ||
63 | } | ||
64 | return 0; | ||
65 | #else | ||
66 | return fail_fn(count); | ||
67 | #endif | ||
68 | } | ||
69 | |||
70 | #endif | ||
71 | |||
72 | #endif | ||
diff --git a/arch/blackfin/include/asm/pda.h b/arch/blackfin/include/asm/pda.h new file mode 100644 index 000000000000..bd8d4a7efeb2 --- /dev/null +++ b/arch/blackfin/include/asm/pda.h | |||
@@ -0,0 +1,70 @@ | |||
1 | /* | ||
2 | * File: arch/blackfin/include/asm/pda.h | ||
3 | * Author: Philippe Gerum <rpm@xenomai.org> | ||
4 | * | ||
5 | * Copyright 2007 Analog Devices Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, see the file COPYING, or write | ||
19 | * to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #ifndef _ASM_BLACKFIN_PDA_H | ||
24 | #define _ASM_BLACKFIN_PDA_H | ||
25 | |||
26 | #include <mach/anomaly.h> | ||
27 | |||
28 | #ifndef __ASSEMBLY__ | ||
29 | |||
30 | struct blackfin_pda { /* Per-processor Data Area */ | ||
31 | struct blackfin_pda *next; | ||
32 | |||
33 | unsigned long syscfg; | ||
34 | #ifdef CONFIG_SMP | ||
35 | unsigned long imask; /* Current IMASK value */ | ||
36 | #endif | ||
37 | |||
38 | unsigned long *ipdt; /* Start of switchable I-CPLB table */ | ||
39 | unsigned long *ipdt_swapcount; /* Number of swaps in ipdt */ | ||
40 | unsigned long *dpdt; /* Start of switchable D-CPLB table */ | ||
41 | unsigned long *dpdt_swapcount; /* Number of swaps in dpdt */ | ||
42 | |||
43 | /* | ||
44 | * Single instructions can have multiple faults, which | ||
45 | * need to be handled by traps.c, in irq5. We store | ||
46 | * the exception cause to ensure we don't miss a | ||
47 | * double fault condition | ||
48 | */ | ||
49 | unsigned long ex_iptr; | ||
50 | unsigned long ex_optr; | ||
51 | unsigned long ex_buf[4]; | ||
52 | unsigned long ex_imask; /* Saved imask from exception */ | ||
53 | unsigned long *ex_stack; /* Exception stack space */ | ||
54 | |||
55 | #ifdef ANOMALY_05000261 | ||
56 | unsigned long last_cplb_fault_retx; | ||
57 | #endif | ||
58 | unsigned long dcplb_fault_addr; | ||
59 | unsigned long icplb_fault_addr; | ||
60 | unsigned long retx; | ||
61 | unsigned long seqstat; | ||
62 | }; | ||
63 | |||
64 | extern struct blackfin_pda cpu_pda[]; | ||
65 | |||
66 | void reserve_pda(void); | ||
67 | |||
68 | #endif /* __ASSEMBLY__ */ | ||
69 | |||
70 | #endif /* _ASM_BLACKFIN_PDA_H */ | ||
diff --git a/arch/blackfin/include/asm/percpu.h b/arch/blackfin/include/asm/percpu.h index 78dd61f6b39f..797c0c165069 100644 --- a/arch/blackfin/include/asm/percpu.h +++ b/arch/blackfin/include/asm/percpu.h | |||
@@ -3,4 +3,14 @@ | |||
3 | 3 | ||
4 | #include <asm-generic/percpu.h> | 4 | #include <asm-generic/percpu.h> |
5 | 5 | ||
6 | #endif /* __ARCH_BLACKFIN_PERCPU__ */ | 6 | #ifdef CONFIG_MODULES |
7 | #define PERCPU_MODULE_RESERVE 8192 | ||
8 | #else | ||
9 | #define PERCPU_MODULE_RESERVE 0 | ||
10 | #endif | ||
11 | |||
12 | #define PERCPU_ENOUGH_ROOM \ | ||
13 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ | ||
14 | PERCPU_MODULE_RESERVE) | ||
15 | |||
16 | #endif /* __ARCH_BLACKFIN_PERCPU__ */ | ||
diff --git a/arch/blackfin/include/asm/pgtable.h b/arch/blackfin/include/asm/pgtable.h index f11684e4ade7..783c8f7f8f8c 100644 --- a/arch/blackfin/include/asm/pgtable.h +++ b/arch/blackfin/include/asm/pgtable.h | |||
@@ -29,6 +29,7 @@ typedef pte_t *pte_addr_t; | |||
29 | #define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */ | 29 | #define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */ |
30 | #define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */ | 30 | #define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */ |
31 | #define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */ | 31 | #define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */ |
32 | #define pgprot_noncached(prot) (prot) | ||
32 | 33 | ||
33 | extern void paging_init(void); | 34 | extern void paging_init(void); |
34 | 35 | ||
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h index e3e9b41fa8db..0eece23b41c7 100644 --- a/arch/blackfin/include/asm/processor.h +++ b/arch/blackfin/include/asm/processor.h | |||
@@ -24,6 +24,14 @@ static inline void wrusp(unsigned long usp) | |||
24 | __asm__ __volatile__("usp = %0;\n\t"::"da"(usp)); | 24 | __asm__ __volatile__("usp = %0;\n\t"::"da"(usp)); |
25 | } | 25 | } |
26 | 26 | ||
27 | static inline unsigned long __get_SP(void) | ||
28 | { | ||
29 | unsigned long sp; | ||
30 | |||
31 | __asm__ __volatile__("%0 = sp;\n\t" : "=da"(sp)); | ||
32 | return sp; | ||
33 | } | ||
34 | |||
27 | /* | 35 | /* |
28 | * User space process size: 1st byte beyond user address space. | 36 | * User space process size: 1st byte beyond user address space. |
29 | * Fairly meaningless on nommu. Parts of user programs can be scattered | 37 | * Fairly meaningless on nommu. Parts of user programs can be scattered |
@@ -57,6 +65,7 @@ struct thread_struct { | |||
57 | * pass the data segment into user programs if it exists, | 65 | * pass the data segment into user programs if it exists, |
58 | * it can't hurt anything as far as I can tell | 66 | * it can't hurt anything as far as I can tell |
59 | */ | 67 | */ |
68 | #ifndef CONFIG_SMP | ||
60 | #define start_thread(_regs, _pc, _usp) \ | 69 | #define start_thread(_regs, _pc, _usp) \ |
61 | do { \ | 70 | do { \ |
62 | set_fs(USER_DS); \ | 71 | set_fs(USER_DS); \ |
@@ -70,6 +79,16 @@ do { \ | |||
70 | sizeof(*L1_SCRATCH_TASK_INFO)); \ | 79 | sizeof(*L1_SCRATCH_TASK_INFO)); \ |
71 | wrusp(_usp); \ | 80 | wrusp(_usp); \ |
72 | } while(0) | 81 | } while(0) |
82 | #else | ||
83 | #define start_thread(_regs, _pc, _usp) \ | ||
84 | do { \ | ||
85 | set_fs(USER_DS); \ | ||
86 | (_regs)->pc = (_pc); \ | ||
87 | if (current->mm) \ | ||
88 | (_regs)->p5 = current->mm->start_data; \ | ||
89 | wrusp(_usp); \ | ||
90 | } while (0) | ||
91 | #endif | ||
73 | 92 | ||
74 | /* Forward declaration, a strange C thing */ | 93 | /* Forward declaration, a strange C thing */ |
75 | struct task_struct; | 94 | struct task_struct; |
@@ -106,7 +125,8 @@ unsigned long get_wchan(struct task_struct *p); | |||
106 | eip; }) | 125 | eip; }) |
107 | #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) | 126 | #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) |
108 | 127 | ||
109 | #define cpu_relax() barrier() | 128 | #define cpu_relax() smp_mb() |
129 | |||
110 | 130 | ||
111 | /* Get the Silicon Revision of the chip */ | 131 | /* Get the Silicon Revision of the chip */ |
112 | static inline uint32_t __pure bfin_revid(void) | 132 | static inline uint32_t __pure bfin_revid(void) |
@@ -137,7 +157,11 @@ static inline uint32_t __pure bfin_revid(void) | |||
137 | static inline uint16_t __pure bfin_cpuid(void) | 157 | static inline uint16_t __pure bfin_cpuid(void) |
138 | { | 158 | { |
139 | return (bfin_read_CHIPID() & CHIPID_FAMILY) >> 12; | 159 | return (bfin_read_CHIPID() & CHIPID_FAMILY) >> 12; |
160 | } | ||
140 | 161 | ||
162 | static inline uint32_t __pure bfin_dspid(void) | ||
163 | { | ||
164 | return bfin_read_DSPID(); | ||
141 | } | 165 | } |
142 | 166 | ||
143 | static inline uint32_t __pure bfin_compiled_revid(void) | 167 | static inline uint32_t __pure bfin_compiled_revid(void) |
@@ -154,6 +178,8 @@ static inline uint32_t __pure bfin_compiled_revid(void) | |||
154 | return 4; | 178 | return 4; |
155 | #elif defined(CONFIG_BF_REV_0_5) | 179 | #elif defined(CONFIG_BF_REV_0_5) |
156 | return 5; | 180 | return 5; |
181 | #elif defined(CONFIG_BF_REV_0_6) | ||
182 | return 6; | ||
157 | #elif defined(CONFIG_BF_REV_ANY) | 183 | #elif defined(CONFIG_BF_REV_ANY) |
158 | return 0xffff; | 184 | return 0xffff; |
159 | #else | 185 | #else |
diff --git a/arch/blackfin/include/asm/reboot.h b/arch/blackfin/include/asm/reboot.h index 6d448b5f5985..4856d62b7467 100644 --- a/arch/blackfin/include/asm/reboot.h +++ b/arch/blackfin/include/asm/reboot.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-blackfin/reboot.h - shutdown/reboot header | 2 | * reboot.h - shutdown/reboot header |
3 | * | 3 | * |
4 | * Copyright 2004-2007 Analog Devices Inc. | 4 | * Copyright 2004-2008 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Licensed under the GPL-2 or later. | 6 | * Licensed under the GPL-2 or later. |
7 | */ | 7 | */ |
diff --git a/arch/blackfin/include/asm/rwlock.h b/arch/blackfin/include/asm/rwlock.h new file mode 100644 index 000000000000..4a724b378971 --- /dev/null +++ b/arch/blackfin/include/asm/rwlock.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_BLACKFIN_RWLOCK_H | ||
2 | #define _ASM_BLACKFIN_RWLOCK_H | ||
3 | |||
4 | #define RW_LOCK_BIAS 0x01000000 | ||
5 | |||
6 | #endif | ||
diff --git a/arch/blackfin/include/asm/serial.h b/arch/blackfin/include/asm/serial.h index 994dd869558c..3a47606c858b 100644 --- a/arch/blackfin/include/asm/serial.h +++ b/arch/blackfin/include/asm/serial.h | |||
@@ -3,3 +3,4 @@ | |||
3 | */ | 3 | */ |
4 | 4 | ||
5 | #define SERIAL_EXTRA_IRQ_FLAGS IRQF_TRIGGER_HIGH | 5 | #define SERIAL_EXTRA_IRQ_FLAGS IRQF_TRIGGER_HIGH |
6 | #define BASE_BAUD (1843200 / 16) | ||
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h new file mode 100644 index 000000000000..118deeeae7c0 --- /dev/null +++ b/arch/blackfin/include/asm/smp.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * File: arch/blackfin/include/asm/smp.h | ||
3 | * Author: Philippe Gerum <rpm@xenomai.org> | ||
4 | * | ||
5 | * Copyright 2007 Analog Devices Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, see the file COPYING, or write | ||
19 | * to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #ifndef __ASM_BLACKFIN_SMP_H | ||
24 | #define __ASM_BLACKFIN_SMP_H | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/threads.h> | ||
28 | #include <linux/cpumask.h> | ||
29 | #include <linux/cache.h> | ||
30 | #include <asm/blackfin.h> | ||
31 | #include <mach/smp.h> | ||
32 | |||
33 | #define raw_smp_processor_id() blackfin_core_id() | ||
34 | |||
35 | extern char coreb_trampoline_start, coreb_trampoline_end; | ||
36 | |||
37 | struct corelock_slot { | ||
38 | int lock; | ||
39 | }; | ||
40 | |||
41 | void smp_icache_flush_range_others(unsigned long start, | ||
42 | unsigned long end); | ||
43 | |||
44 | #endif /* !__ASM_BLACKFIN_SMP_H */ | ||
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index 64e908a50646..0249ac319476 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h | |||
@@ -1,6 +1,89 @@ | |||
1 | #ifndef __BFIN_SPINLOCK_H | 1 | #ifndef __BFIN_SPINLOCK_H |
2 | #define __BFIN_SPINLOCK_H | 2 | #define __BFIN_SPINLOCK_H |
3 | 3 | ||
4 | #error blackfin architecture does not support SMP spin lock yet | 4 | #include <asm/atomic.h> |
5 | 5 | ||
6 | #endif | 6 | asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); |
7 | asmlinkage void __raw_spin_lock_asm(volatile int *ptr); | ||
8 | asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); | ||
9 | asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); | ||
10 | asmlinkage void __raw_read_lock_asm(volatile int *ptr); | ||
11 | asmlinkage int __raw_read_trylock_asm(volatile int *ptr); | ||
12 | asmlinkage void __raw_read_unlock_asm(volatile int *ptr); | ||
13 | asmlinkage void __raw_write_lock_asm(volatile int *ptr); | ||
14 | asmlinkage int __raw_write_trylock_asm(volatile int *ptr); | ||
15 | asmlinkage void __raw_write_unlock_asm(volatile int *ptr); | ||
16 | |||
17 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | ||
18 | { | ||
19 | return __raw_spin_is_locked_asm(&lock->lock); | ||
20 | } | ||
21 | |||
22 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
23 | { | ||
24 | __raw_spin_lock_asm(&lock->lock); | ||
25 | } | ||
26 | |||
27 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | ||
28 | |||
29 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
30 | { | ||
31 | return __raw_spin_trylock_asm(&lock->lock); | ||
32 | } | ||
33 | |||
34 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
35 | { | ||
36 | __raw_spin_unlock_asm(&lock->lock); | ||
37 | } | ||
38 | |||
39 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | ||
40 | { | ||
41 | while (__raw_spin_is_locked(lock)) | ||
42 | cpu_relax(); | ||
43 | } | ||
44 | |||
45 | static inline int __raw_read_can_lock(raw_rwlock_t *rw) | ||
46 | { | ||
47 | return __raw_uncached_fetch_asm(&rw->lock) > 0; | ||
48 | } | ||
49 | |||
50 | static inline int __raw_write_can_lock(raw_rwlock_t *rw) | ||
51 | { | ||
52 | return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; | ||
53 | } | ||
54 | |||
55 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
56 | { | ||
57 | __raw_read_lock_asm(&rw->lock); | ||
58 | } | ||
59 | |||
60 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | ||
61 | { | ||
62 | return __raw_read_trylock_asm(&rw->lock); | ||
63 | } | ||
64 | |||
65 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
66 | { | ||
67 | __raw_read_unlock_asm(&rw->lock); | ||
68 | } | ||
69 | |||
70 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
71 | { | ||
72 | __raw_write_lock_asm(&rw->lock); | ||
73 | } | ||
74 | |||
75 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | ||
76 | { | ||
77 | return __raw_write_trylock_asm(&rw->lock); | ||
78 | } | ||
79 | |||
80 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
81 | { | ||
82 | __raw_write_unlock_asm(&rw->lock); | ||
83 | } | ||
84 | |||
85 | #define _raw_spin_relax(lock) cpu_relax() | ||
86 | #define _raw_read_relax(lock) cpu_relax() | ||
87 | #define _raw_write_relax(lock) cpu_relax() | ||
88 | |||
89 | #endif /* !__BFIN_SPINLOCK_H */ | ||
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h new file mode 100644 index 000000000000..b1e3c4c7b382 --- /dev/null +++ b/arch/blackfin/include/asm/spinlock_types.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | #include <asm/rwlock.h> | ||
9 | |||
10 | typedef struct { | ||
11 | volatile unsigned int lock; | ||
12 | } raw_spinlock_t; | ||
13 | |||
14 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
15 | |||
16 | typedef struct { | ||
17 | volatile unsigned int lock; | ||
18 | } raw_rwlock_t; | ||
19 | |||
20 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | ||
21 | |||
22 | #endif | ||
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h index 8f1627d8bf09..a4c8254bec55 100644 --- a/arch/blackfin/include/asm/system.h +++ b/arch/blackfin/include/asm/system.h | |||
@@ -37,114 +37,98 @@ | |||
37 | #include <linux/linkage.h> | 37 | #include <linux/linkage.h> |
38 | #include <linux/compiler.h> | 38 | #include <linux/compiler.h> |
39 | #include <mach/anomaly.h> | 39 | #include <mach/anomaly.h> |
40 | #include <asm/pda.h> | ||
41 | #include <asm/processor.h> | ||
42 | #include <asm/irq.h> | ||
40 | 43 | ||
41 | /* | 44 | /* |
42 | * Interrupt configuring macros. | 45 | * Force strict CPU ordering. |
43 | */ | 46 | */ |
47 | #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) | ||
48 | #define mb() __asm__ __volatile__ ("" : : : "memory") | ||
49 | #define rmb() __asm__ __volatile__ ("" : : : "memory") | ||
50 | #define wmb() __asm__ __volatile__ ("" : : : "memory") | ||
51 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | ||
52 | #define read_barrier_depends() do { } while(0) | ||
44 | 53 | ||
45 | extern unsigned long irq_flags; | 54 | #ifdef CONFIG_SMP |
46 | 55 | asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); | |
47 | #define local_irq_enable() \ | 56 | asmlinkage unsigned long __raw_xchg_2_asm(volatile void *ptr, unsigned long value); |
48 | __asm__ __volatile__( \ | 57 | asmlinkage unsigned long __raw_xchg_4_asm(volatile void *ptr, unsigned long value); |
49 | "sti %0;" \ | 58 | asmlinkage unsigned long __raw_cmpxchg_1_asm(volatile void *ptr, |
50 | : \ | 59 | unsigned long new, unsigned long old); |
51 | : "d" (irq_flags) \ | 60 | asmlinkage unsigned long __raw_cmpxchg_2_asm(volatile void *ptr, |
52 | ) | 61 | unsigned long new, unsigned long old); |
53 | 62 | asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, | |
54 | #define local_irq_disable() \ | 63 | unsigned long new, unsigned long old); |
55 | do { \ | 64 | |
56 | int __tmp_dummy; \ | 65 | #ifdef __ARCH_SYNC_CORE_DCACHE |
57 | __asm__ __volatile__( \ | 66 | # define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) |
58 | "cli %0;" \ | 67 | # define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) |
59 | : "=d" (__tmp_dummy) \ | 68 | # define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) |
60 | ); \ | 69 | #define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) |
61 | } while (0) | ||
62 | |||
63 | #if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE) | ||
64 | # define NOP_PAD_ANOMALY_05000244 "nop; nop;" | ||
65 | #else | ||
66 | # define NOP_PAD_ANOMALY_05000244 | ||
67 | #endif | ||
68 | |||
69 | #define idle_with_irq_disabled() \ | ||
70 | __asm__ __volatile__( \ | ||
71 | NOP_PAD_ANOMALY_05000244 \ | ||
72 | ".align 8;" \ | ||
73 | "sti %0;" \ | ||
74 | "idle;" \ | ||
75 | : \ | ||
76 | : "d" (irq_flags) \ | ||
77 | ) | ||
78 | |||
79 | #ifdef CONFIG_DEBUG_HWERR | ||
80 | # define __save_and_cli(x) \ | ||
81 | __asm__ __volatile__( \ | ||
82 | "cli %0;" \ | ||
83 | "sti %1;" \ | ||
84 | : "=&d" (x) \ | ||
85 | : "d" (0x3F) \ | ||
86 | ) | ||
87 | #else | ||
88 | # define __save_and_cli(x) \ | ||
89 | __asm__ __volatile__( \ | ||
90 | "cli %0;" \ | ||
91 | : "=&d" (x) \ | ||
92 | ) | ||
93 | #endif | ||
94 | |||
95 | #define local_save_flags(x) \ | ||
96 | __asm__ __volatile__( \ | ||
97 | "cli %0;" \ | ||
98 | "sti %0;" \ | ||
99 | : "=d" (x) \ | ||
100 | ) | ||
101 | 70 | ||
102 | #ifdef CONFIG_DEBUG_HWERR | ||
103 | #define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0) | ||
104 | #else | 71 | #else |
105 | #define irqs_enabled_from_flags(x) ((x) != 0x1f) | 72 | # define smp_mb() barrier() |
73 | # define smp_rmb() barrier() | ||
74 | # define smp_wmb() barrier() | ||
75 | #define smp_read_barrier_depends() barrier() | ||
106 | #endif | 76 | #endif |
107 | 77 | ||
108 | #define local_irq_restore(x) \ | 78 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
109 | do { \ | 79 | int size) |
110 | if (irqs_enabled_from_flags(x)) \ | 80 | { |
111 | local_irq_enable(); \ | 81 | unsigned long tmp; |
112 | } while (0) | ||
113 | 82 | ||
114 | /* For spinlocks etc */ | 83 | switch (size) { |
115 | #define local_irq_save(x) __save_and_cli(x) | 84 | case 1: |
85 | tmp = __raw_xchg_1_asm(ptr, x); | ||
86 | break; | ||
87 | case 2: | ||
88 | tmp = __raw_xchg_2_asm(ptr, x); | ||
89 | break; | ||
90 | case 4: | ||
91 | tmp = __raw_xchg_4_asm(ptr, x); | ||
92 | break; | ||
93 | } | ||
116 | 94 | ||
117 | #define irqs_disabled() \ | 95 | return tmp; |
118 | ({ \ | 96 | } |
119 | unsigned long flags; \ | ||
120 | local_save_flags(flags); \ | ||
121 | !irqs_enabled_from_flags(flags); \ | ||
122 | }) | ||
123 | 97 | ||
124 | /* | 98 | /* |
125 | * Force strict CPU ordering. | 99 | * Atomic compare and exchange. Compare OLD with MEM, if identical, |
100 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
101 | * indicated by comparing RETURN with OLD. | ||
126 | */ | 102 | */ |
127 | #define nop() asm volatile ("nop;\n\t"::) | 103 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
128 | #define mb() asm volatile ("" : : :"memory") | 104 | unsigned long new, int size) |
129 | #define rmb() asm volatile ("" : : :"memory") | 105 | { |
130 | #define wmb() asm volatile ("" : : :"memory") | 106 | unsigned long tmp; |
131 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | ||
132 | 107 | ||
133 | #define read_barrier_depends() do { } while(0) | 108 | switch (size) { |
109 | case 1: | ||
110 | tmp = __raw_cmpxchg_1_asm(ptr, new, old); | ||
111 | break; | ||
112 | case 2: | ||
113 | tmp = __raw_cmpxchg_2_asm(ptr, new, old); | ||
114 | break; | ||
115 | case 4: | ||
116 | tmp = __raw_cmpxchg_4_asm(ptr, new, old); | ||
117 | break; | ||
118 | } | ||
119 | |||
120 | return tmp; | ||
121 | } | ||
122 | #define cmpxchg(ptr, o, n) \ | ||
123 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | ||
124 | (unsigned long)(n), sizeof(*(ptr)))) | ||
125 | |||
126 | #else /* !CONFIG_SMP */ | ||
134 | 127 | ||
135 | #ifdef CONFIG_SMP | ||
136 | #define smp_mb() mb() | ||
137 | #define smp_rmb() rmb() | ||
138 | #define smp_wmb() wmb() | ||
139 | #define smp_read_barrier_depends() read_barrier_depends() | ||
140 | #else | ||
141 | #define smp_mb() barrier() | 128 | #define smp_mb() barrier() |
142 | #define smp_rmb() barrier() | 129 | #define smp_rmb() barrier() |
143 | #define smp_wmb() barrier() | 130 | #define smp_wmb() barrier() |
144 | #define smp_read_barrier_depends() do { } while(0) | 131 | #define smp_read_barrier_depends() do { } while(0) |
145 | #endif | ||
146 | |||
147 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | ||
148 | 132 | ||
149 | struct __xchg_dummy { | 133 | struct __xchg_dummy { |
150 | unsigned long a[100]; | 134 | unsigned long a[100]; |
@@ -157,7 +141,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |||
157 | unsigned long tmp = 0; | 141 | unsigned long tmp = 0; |
158 | unsigned long flags = 0; | 142 | unsigned long flags = 0; |
159 | 143 | ||
160 | local_irq_save(flags); | 144 | local_irq_save_hw(flags); |
161 | 145 | ||
162 | switch (size) { | 146 | switch (size) { |
163 | case 1: | 147 | case 1: |
@@ -179,7 +163,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |||
179 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | 163 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); |
180 | break; | 164 | break; |
181 | } | 165 | } |
182 | local_irq_restore(flags); | 166 | local_irq_restore_hw(flags); |
183 | return tmp; | 167 | return tmp; |
184 | } | 168 | } |
185 | 169 | ||
@@ -194,9 +178,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |||
194 | (unsigned long)(n), sizeof(*(ptr)))) | 178 | (unsigned long)(n), sizeof(*(ptr)))) |
195 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | 179 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
196 | 180 | ||
197 | #ifndef CONFIG_SMP | ||
198 | #include <asm-generic/cmpxchg.h> | 181 | #include <asm-generic/cmpxchg.h> |
199 | #endif | 182 | |
183 | #endif /* !CONFIG_SMP */ | ||
184 | |||
185 | #define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) | ||
186 | #define tas(ptr) ((void)xchg((ptr), 1)) | ||
200 | 187 | ||
201 | #define prepare_to_switch() do { } while(0) | 188 | #define prepare_to_switch() do { } while(0) |
202 | 189 | ||
@@ -205,10 +192,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |||
205 | * ptr isn't the current task, in which case it does nothing. | 192 | * ptr isn't the current task, in which case it does nothing. |
206 | */ | 193 | */ |
207 | 194 | ||
208 | #include <asm/blackfin.h> | 195 | #include <asm/l1layout.h> |
196 | #include <asm/mem_map.h> | ||
209 | 197 | ||
210 | asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next); | 198 | asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next); |
211 | 199 | ||
200 | #ifndef CONFIG_SMP | ||
212 | #define switch_to(prev,next,last) \ | 201 | #define switch_to(prev,next,last) \ |
213 | do { \ | 202 | do { \ |
214 | memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \ | 203 | memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \ |
@@ -217,5 +206,11 @@ do { \ | |||
217 | sizeof *L1_SCRATCH_TASK_INFO); \ | 206 | sizeof *L1_SCRATCH_TASK_INFO); \ |
218 | (last) = resume (prev, next); \ | 207 | (last) = resume (prev, next); \ |
219 | } while (0) | 208 | } while (0) |
209 | #else | ||
210 | #define switch_to(prev, next, last) \ | ||
211 | do { \ | ||
212 | (last) = resume(prev, next); \ | ||
213 | } while (0) | ||
214 | #endif | ||
220 | 215 | ||
221 | #endif /* _BLACKFIN_SYSTEM_H */ | 216 | #endif /* _BLACKFIN_SYSTEM_H */ |
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h index 642769329d12..e721ce55956c 100644 --- a/arch/blackfin/include/asm/thread_info.h +++ b/arch/blackfin/include/asm/thread_info.h | |||
@@ -44,6 +44,7 @@ | |||
44 | */ | 44 | */ |
45 | #define THREAD_SIZE_ORDER 1 | 45 | #define THREAD_SIZE_ORDER 1 |
46 | #define THREAD_SIZE 8192 /* 2 pages */ | 46 | #define THREAD_SIZE 8192 /* 2 pages */ |
47 | #define STACK_WARN (THREAD_SIZE/8) | ||
47 | 48 | ||
48 | #ifndef __ASSEMBLY__ | 49 | #ifndef __ASSEMBLY__ |
49 | 50 | ||
@@ -62,7 +63,9 @@ struct thread_info { | |||
62 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 63 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
63 | mm_segment_t addr_limit; /* address limit */ | 64 | mm_segment_t addr_limit; /* address limit */ |
64 | struct restart_block restart_block; | 65 | struct restart_block restart_block; |
66 | #ifndef CONFIG_SMP | ||
65 | struct l1_scratch_task_info l1_task_info; | 67 | struct l1_scratch_task_info l1_task_info; |
68 | #endif | ||
66 | }; | 69 | }; |
67 | 70 | ||
68 | /* | 71 | /* |
@@ -90,7 +93,7 @@ __attribute_const__ | |||
90 | static inline struct thread_info *current_thread_info(void) | 93 | static inline struct thread_info *current_thread_info(void) |
91 | { | 94 | { |
92 | struct thread_info *ti; | 95 | struct thread_info *ti; |
93 | __asm__("%0 = sp;": "=&d"(ti): | 96 | __asm__("%0 = sp;" : "=da"(ti) : |
94 | ); | 97 | ); |
95 | return (struct thread_info *)((long)ti & ~((long)THREAD_SIZE-1)); | 98 | return (struct thread_info *)((long)ti & ~((long)THREAD_SIZE-1)); |
96 | } | 99 | } |
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index d928b8099056..3248033531e6 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h | |||
@@ -149,54 +149,42 @@ static inline int bad_user_access_length(void) | |||
149 | : /* no outputs */ \ | 149 | : /* no outputs */ \ |
150 | :"d" (x),"a" (__ptr(p)) : "memory") | 150 | :"d" (x),"a" (__ptr(p)) : "memory") |
151 | 151 | ||
152 | #define get_user(x,p) \ | 152 | #define get_user(x, ptr) \ |
153 | ({ \ | 153 | ({ \ |
154 | int _err = 0; \ | 154 | int _err = 0; \ |
155 | typeof(*(p)) *_p = (p); \ | 155 | unsigned long _val = 0; \ |
156 | if (!access_ok(VERIFY_READ, _p, sizeof(*(_p)))) { \ | 156 | const typeof(*(ptr)) __user *_p = (ptr); \ |
157 | _err = -EFAULT; \ | 157 | const size_t ptr_size = sizeof(*(_p)); \ |
158 | } \ | 158 | if (likely(access_ok(VERIFY_READ, _p, ptr_size))) { \ |
159 | else { \ | 159 | BUILD_BUG_ON(ptr_size >= 8); \ |
160 | switch (sizeof(*(_p))) { \ | 160 | switch (ptr_size) { \ |
161 | case 1: \ | 161 | case 1: \ |
162 | __get_user_asm(x, _p, B,(Z)); \ | 162 | __get_user_asm(_val, _p, B,(Z)); \ |
163 | break; \ | 163 | break; \ |
164 | case 2: \ | 164 | case 2: \ |
165 | __get_user_asm(x, _p, W,(Z)); \ | 165 | __get_user_asm(_val, _p, W,(Z)); \ |
166 | break; \ | 166 | break; \ |
167 | case 4: \ | 167 | case 4: \ |
168 | __get_user_asm(x, _p, , ); \ | 168 | __get_user_asm(_val, _p, , ); \ |
169 | break; \ | 169 | break; \ |
170 | case 8: { \ | 170 | } \ |
171 | unsigned long _xl, _xh; \ | 171 | } else \ |
172 | __get_user_asm(_xl, ((unsigned long *)_p)+0, , ); \ | 172 | _err = -EFAULT; \ |
173 | __get_user_asm(_xh, ((unsigned long *)_p)+1, , ); \ | 173 | x = (typeof(*(ptr)))_val; \ |
174 | ((unsigned long *)&x)[0] = _xl; \ | 174 | _err; \ |
175 | ((unsigned long *)&x)[1] = _xh; \ | 175 | }) |
176 | } break; \ | ||
177 | default: \ | ||
178 | x = 0; \ | ||
179 | printk(KERN_INFO "get_user_bad: %s:%d %s\n", \ | ||
180 | __FILE__, __LINE__, __func__); \ | ||
181 | _err = __get_user_bad(); \ | ||
182 | break; \ | ||
183 | } \ | ||
184 | } \ | ||
185 | _err; \ | ||
186 | }) | ||
187 | 176 | ||
188 | #define __get_user(x,p) get_user(x,p) | 177 | #define __get_user(x,p) get_user(x,p) |
189 | 178 | ||
190 | #define __get_user_bad() (bad_user_access_length(), (-EFAULT)) | 179 | #define __get_user_bad() (bad_user_access_length(), (-EFAULT)) |
191 | 180 | ||
192 | #define __get_user_asm(x,p,bhw,option) \ | 181 | #define __get_user_asm(x, ptr, bhw, option) \ |
193 | { \ | 182 | ({ \ |
194 | unsigned long _tmp; \ | 183 | __asm__ __volatile__ ( \ |
195 | __asm__ ("%0 =" #bhw "[%1]"#option";\n\t" \ | 184 | "%0 =" #bhw "[%1]" #option ";" \ |
196 | : "=d" (_tmp) \ | 185 | : "=d" (x) \ |
197 | : "a" (__ptr(p))); \ | 186 | : "a" (__ptr(ptr))); \ |
198 | (x) = (__typeof__(*(p))) _tmp; \ | 187 | }) |
199 | } | ||
200 | 188 | ||
201 | #define __copy_from_user(to, from, n) copy_from_user(to, from, n) | 189 | #define __copy_from_user(to, from, n) copy_from_user(to, from, n) |
202 | #define __copy_to_user(to, from, n) copy_to_user(to, from, n) | 190 | #define __copy_to_user(to, from, n) copy_to_user(to, from, n) |
@@ -209,8 +197,8 @@ static inline int bad_user_access_length(void) | |||
209 | #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\ | 197 | #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\ |
210 | return retval; }) | 198 | return retval; }) |
211 | 199 | ||
212 | static inline long copy_from_user(void *to, | 200 | static inline unsigned long __must_check |
213 | const void __user * from, unsigned long n) | 201 | copy_from_user(void *to, const void __user *from, unsigned long n) |
214 | { | 202 | { |
215 | if (access_ok(VERIFY_READ, from, n)) | 203 | if (access_ok(VERIFY_READ, from, n)) |
216 | memcpy(to, from, n); | 204 | memcpy(to, from, n); |
@@ -219,8 +207,8 @@ static inline long copy_from_user(void *to, | |||
219 | return 0; | 207 | return 0; |
220 | } | 208 | } |
221 | 209 | ||
222 | static inline long copy_to_user(void *to, | 210 | static inline unsigned long __must_check |
223 | const void __user * from, unsigned long n) | 211 | copy_to_user(void *to, const void __user *from, unsigned long n) |
224 | { | 212 | { |
225 | if (access_ok(VERIFY_WRITE, to, n)) | 213 | if (access_ok(VERIFY_WRITE, to, n)) |
226 | memcpy(to, from, n); | 214 | memcpy(to, from, n); |
@@ -233,8 +221,8 @@ static inline long copy_to_user(void *to, | |||
233 | * Copy a null terminated string from userspace. | 221 | * Copy a null terminated string from userspace. |
234 | */ | 222 | */ |
235 | 223 | ||
236 | static inline long strncpy_from_user(char *dst, | 224 | static inline long __must_check |
237 | const char *src, long count) | 225 | strncpy_from_user(char *dst, const char *src, long count) |
238 | { | 226 | { |
239 | char *tmp; | 227 | char *tmp; |
240 | if (!access_ok(VERIFY_READ, src, 1)) | 228 | if (!access_ok(VERIFY_READ, src, 1)) |
@@ -260,7 +248,8 @@ static inline long strnlen_user(const char *src, long n) | |||
260 | * Zero Userspace | 248 | * Zero Userspace |
261 | */ | 249 | */ |
262 | 250 | ||
263 | static inline unsigned long __clear_user(void *to, unsigned long n) | 251 | static inline unsigned long __must_check |
252 | __clear_user(void *to, unsigned long n) | ||
264 | { | 253 | { |
265 | memset(to, 0, n); | 254 | memset(to, 0, n); |
266 | return 0; | 255 | return 0; |
diff --git a/arch/blackfin/include/asm/xor.h b/arch/blackfin/include/asm/xor.h new file mode 100644 index 000000000000..c82eb12a5b18 --- /dev/null +++ b/arch/blackfin/include/asm/xor.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/xor.h> | |||