aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMikael Starvik <mikael.starvik@axis.com>2005-07-27 14:44:42 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:26:01 -0400
commit8d20a541b089ecb67a88a673548161b686ed7b85 (patch)
tree53bac804d538068c80684becb76cd76937956502
parent21783c9746619a782c21be606f6498bbd4d4615e (diff)
[PATCH] CRIS update: SMP
Patches to support SMP. * Each CPU has its own current_pgd. * flush_tlb_range is implemented as flush_tlb_mm. * Atomic operations implemented with spinlocks. * Semaphores implemented with spinlocks. Signed-off-by: Mikael Starvik <starvik@axis.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/cris/arch-v10/mm/fault.c26
-rw-r--r--arch/cris/arch-v10/mm/init.c2
-rw-r--r--arch/cris/arch-v10/mm/tlb.c49
-rw-r--r--include/asm-cris/arch-v10/atomic.h7
-rw-r--r--include/asm-cris/atomic.h66
-rw-r--r--include/asm-cris/mmu_context.h2
-rw-r--r--include/asm-cris/semaphore.h21
-rw-r--r--include/asm-cris/smp.h7
-rw-r--r--include/asm-cris/spinlock.h1
-rw-r--r--include/asm-cris/tlbflush.h19
10 files changed, 69 insertions, 131 deletions
diff --git a/arch/cris/arch-v10/mm/fault.c b/arch/cris/arch-v10/mm/fault.c
index 6805cdb25a53..fe2615022b97 100644
--- a/arch/cris/arch-v10/mm/fault.c
+++ b/arch/cris/arch-v10/mm/fault.c
@@ -14,6 +14,7 @@
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/arch/svinto.h> 16#include <asm/arch/svinto.h>
17#include <asm/mmu_context.h>
17 18
18/* debug of low-level TLB reload */ 19/* debug of low-level TLB reload */
19#undef DEBUG 20#undef DEBUG
@@ -24,8 +25,6 @@
24#define D(x) 25#define D(x)
25#endif 26#endif
26 27
27extern volatile pgd_t *current_pgd;
28
29extern const struct exception_table_entry 28extern const struct exception_table_entry
30 *search_exception_tables(unsigned long addr); 29 *search_exception_tables(unsigned long addr);
31 30
@@ -46,7 +45,7 @@ handle_mmu_bus_fault(struct pt_regs *regs)
46 int page_id; 45 int page_id;
47 int acc, inv; 46 int acc, inv;
48#endif 47#endif
49 pgd_t* pgd = (pgd_t*)current_pgd; 48 pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id());
50 pmd_t *pmd; 49 pmd_t *pmd;
51 pte_t pte; 50 pte_t pte;
52 int miss, we, writeac; 51 int miss, we, writeac;
@@ -94,24 +93,3 @@ handle_mmu_bus_fault(struct pt_regs *regs)
94 *R_TLB_LO = pte_val(pte); 93 *R_TLB_LO = pte_val(pte);
95 local_irq_restore(flags); 94 local_irq_restore(flags);
96} 95}
97
98/* Called from arch/cris/mm/fault.c to find fixup code. */
99int
100find_fixup_code(struct pt_regs *regs)
101{
102 const struct exception_table_entry *fixup;
103
104 if ((fixup = search_exception_tables(regs->irp)) != 0) {
105 /* Adjust the instruction pointer in the stackframe. */
106 regs->irp = fixup->fixup;
107
108 /*
109 * Don't return by restoring the CPU state, so switch
110 * frame-type.
111 */
112 regs->frametype = CRIS_FRAME_NORMAL;
113 return 1;
114 }
115
116 return 0;
117}
diff --git a/arch/cris/arch-v10/mm/init.c b/arch/cris/arch-v10/mm/init.c
index a9f975a9cfb5..ff3481e76dd4 100644
--- a/arch/cris/arch-v10/mm/init.c
+++ b/arch/cris/arch-v10/mm/init.c
@@ -42,7 +42,7 @@ paging_init(void)
42 * switch_mm) 42 * switch_mm)
43 */ 43 */
44 44
45 current_pgd = init_mm.pgd; 45 per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
46 46
47 /* initialise the TLB (tlb.c) */ 47 /* initialise the TLB (tlb.c) */
48 48
diff --git a/arch/cris/arch-v10/mm/tlb.c b/arch/cris/arch-v10/mm/tlb.c
index 9d06125ff5a2..70a5523eff78 100644
--- a/arch/cris/arch-v10/mm/tlb.c
+++ b/arch/cris/arch-v10/mm/tlb.c
@@ -139,53 +139,6 @@ flush_tlb_page(struct vm_area_struct *vma,
139 local_irq_restore(flags); 139 local_irq_restore(flags);
140} 140}
141 141
142/* invalidate a page range */
143
144void
145flush_tlb_range(struct vm_area_struct *vma,
146 unsigned long start,
147 unsigned long end)
148{
149 struct mm_struct *mm = vma->vm_mm;
150 int page_id = mm->context.page_id;
151 int i;
152 unsigned long flags;
153
154 D(printk("tlb: flush range %p<->%p in context %d (%p)\n",
155 start, end, page_id, mm));
156
157 if(page_id == NO_CONTEXT)
158 return;
159
160 start &= PAGE_MASK; /* probably not necessary */
161 end &= PAGE_MASK; /* dito */
162
163 /* invalidate those TLB entries that match both the mm context
164 * and the virtual address range
165 */
166
167 local_save_flags(flags);
168 local_irq_disable();
169 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
170 unsigned long tlb_hi, vpn;
171 *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
172 tlb_hi = *R_TLB_HI;
173 vpn = tlb_hi & PAGE_MASK;
174 if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
175 vpn >= start && vpn < end) {
176 *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
177 IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
178
179 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
180 IO_STATE(R_TLB_LO, valid, no ) |
181 IO_STATE(R_TLB_LO, kernel,no ) |
182 IO_STATE(R_TLB_LO, we, no ) |
183 IO_FIELD(R_TLB_LO, pfn, 0 ) );
184 }
185 }
186 local_irq_restore(flags);
187}
188
189/* dump the entire TLB for debug purposes */ 142/* dump the entire TLB for debug purposes */
190 143
191#if 0 144#if 0
@@ -237,7 +190,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
237 * the pgd. 190 * the pgd.
238 */ 191 */
239 192
240 current_pgd = next->pgd; 193 per_cpu(current_pgd, smp_processor_id()) = next->pgd;
241 194
242 /* switch context in the MMU */ 195 /* switch context in the MMU */
243 196
diff --git a/include/asm-cris/arch-v10/atomic.h b/include/asm-cris/arch-v10/atomic.h
new file mode 100644
index 000000000000..6ef5e7d09024
--- /dev/null
+++ b/include/asm-cris/arch-v10/atomic.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_CRIS_ARCH_ATOMIC__
2#define __ASM_CRIS_ARCH_ATOMIC__
3
4#define cris_atomic_save(addr, flags) local_irq_save(flags);
5#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
6
7#endif
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
index b3dfea5a71e4..70605b09e8b7 100644
--- a/include/asm-cris/atomic.h
+++ b/include/asm-cris/atomic.h
@@ -4,21 +4,14 @@
4#define __ASM_CRIS_ATOMIC__ 4#define __ASM_CRIS_ATOMIC__
5 5
6#include <asm/system.h> 6#include <asm/system.h>
7#include <asm/arch/atomic.h>
7 8
8/* 9/*
9 * Atomic operations that C can't guarantee us. Useful for 10 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc.. 11 * resource counting etc..
11 */ 12 */
12 13
13/* 14typedef struct { volatile int counter; } atomic_t;
14 * Make sure gcc doesn't try to be clever and move things around
15 * on us. We need to use _exactly_ the address the user gave us,
16 * not some alias that contains the same information.
17 */
18
19#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
20
21typedef struct { int counter; } atomic_t;
22 15
23#define ATOMIC_INIT(i) { (i) } 16#define ATOMIC_INIT(i) { (i) }
24 17
@@ -30,29 +23,26 @@ typedef struct { int counter; } atomic_t;
30extern __inline__ void atomic_add(int i, volatile atomic_t *v) 23extern __inline__ void atomic_add(int i, volatile atomic_t *v)
31{ 24{
32 unsigned long flags; 25 unsigned long flags;
33 local_save_flags(flags); 26 cris_atomic_save(v, flags);
34 local_irq_disable();
35 v->counter += i; 27 v->counter += i;
36 local_irq_restore(flags); 28 cris_atomic_restore(v, flags);
37} 29}
38 30
39extern __inline__ void atomic_sub(int i, volatile atomic_t *v) 31extern __inline__ void atomic_sub(int i, volatile atomic_t *v)
40{ 32{
41 unsigned long flags; 33 unsigned long flags;
42 local_save_flags(flags); 34 cris_atomic_save(v, flags);
43 local_irq_disable();
44 v->counter -= i; 35 v->counter -= i;
45 local_irq_restore(flags); 36 cris_atomic_restore(v, flags);
46} 37}
47 38
48extern __inline__ int atomic_add_return(int i, volatile atomic_t *v) 39extern __inline__ int atomic_add_return(int i, volatile atomic_t *v)
49{ 40{
50 unsigned long flags; 41 unsigned long flags;
51 int retval; 42 int retval;
52 local_save_flags(flags); 43 cris_atomic_save(v, flags);
53 local_irq_disable();
54 retval = (v->counter += i); 44 retval = (v->counter += i);
55 local_irq_restore(flags); 45 cris_atomic_restore(v, flags);
56 return retval; 46 return retval;
57} 47}
58 48
@@ -62,10 +52,9 @@ extern __inline__ int atomic_sub_return(int i, volatile atomic_t *v)
62{ 52{
63 unsigned long flags; 53 unsigned long flags;
64 int retval; 54 int retval;
65 local_save_flags(flags); 55 cris_atomic_save(v, flags);
66 local_irq_disable();
67 retval = (v->counter -= i); 56 retval = (v->counter -= i);
68 local_irq_restore(flags); 57 cris_atomic_restore(v, flags);
69 return retval; 58 return retval;
70} 59}
71 60
@@ -73,39 +62,35 @@ extern __inline__ int atomic_sub_and_test(int i, volatile atomic_t *v)
73{ 62{
74 int retval; 63 int retval;
75 unsigned long flags; 64 unsigned long flags;
76 local_save_flags(flags); 65 cris_atomic_save(v, flags);
77 local_irq_disable();
78 retval = (v->counter -= i) == 0; 66 retval = (v->counter -= i) == 0;
79 local_irq_restore(flags); 67 cris_atomic_restore(v, flags);
80 return retval; 68 return retval;
81} 69}
82 70
83extern __inline__ void atomic_inc(volatile atomic_t *v) 71extern __inline__ void atomic_inc(volatile atomic_t *v)
84{ 72{
85 unsigned long flags; 73 unsigned long flags;
86 local_save_flags(flags); 74 cris_atomic_save(v, flags);
87 local_irq_disable();
88 (v->counter)++; 75 (v->counter)++;
89 local_irq_restore(flags); 76 cris_atomic_restore(v, flags);
90} 77}
91 78
92extern __inline__ void atomic_dec(volatile atomic_t *v) 79extern __inline__ void atomic_dec(volatile atomic_t *v)
93{ 80{
94 unsigned long flags; 81 unsigned long flags;
95 local_save_flags(flags); 82 cris_atomic_save(v, flags);
96 local_irq_disable();
97 (v->counter)--; 83 (v->counter)--;
98 local_irq_restore(flags); 84 cris_atomic_restore(v, flags);
99} 85}
100 86
101extern __inline__ int atomic_inc_return(volatile atomic_t *v) 87extern __inline__ int atomic_inc_return(volatile atomic_t *v)
102{ 88{
103 unsigned long flags; 89 unsigned long flags;
104 int retval; 90 int retval;
105 local_save_flags(flags); 91 cris_atomic_save(v, flags);
106 local_irq_disable();
107 retval = (v->counter)++; 92 retval = (v->counter)++;
108 local_irq_restore(flags); 93 cris_atomic_restore(v, flags);
109 return retval; 94 return retval;
110} 95}
111 96
@@ -113,20 +98,18 @@ extern __inline__ int atomic_dec_return(volatile atomic_t *v)
113{ 98{
114 unsigned long flags; 99 unsigned long flags;
115 int retval; 100 int retval;
116 local_save_flags(flags); 101 cris_atomic_save(v, flags);
117 local_irq_disable();
118 retval = (v->counter)--; 102 retval = (v->counter)--;
119 local_irq_restore(flags); 103 cris_atomic_restore(v, flags);
120 return retval; 104 return retval;
121} 105}
122extern __inline__ int atomic_dec_and_test(volatile atomic_t *v) 106extern __inline__ int atomic_dec_and_test(volatile atomic_t *v)
123{ 107{
124 int retval; 108 int retval;
125 unsigned long flags; 109 unsigned long flags;
126 local_save_flags(flags); 110 cris_atomic_save(v, flags);
127 local_irq_disable();
128 retval = --(v->counter) == 0; 111 retval = --(v->counter) == 0;
129 local_irq_restore(flags); 112 cris_atomic_restore(v, flags);
130 return retval; 113 return retval;
131} 114}
132 115
@@ -134,10 +117,9 @@ extern __inline__ int atomic_inc_and_test(volatile atomic_t *v)
134{ 117{
135 int retval; 118 int retval;
136 unsigned long flags; 119 unsigned long flags;
137 local_save_flags(flags); 120 cris_atomic_save(v, flags);
138 local_irq_disable();
139 retval = ++(v->counter) == 0; 121 retval = ++(v->counter) == 0;
140 local_irq_restore(flags); 122 cris_atomic_restore(v, flags);
141 return retval; 123 return retval;
142} 124}
143 125
diff --git a/include/asm-cris/mmu_context.h b/include/asm-cris/mmu_context.h
index f9308c5bbd99..e6e659dc757b 100644
--- a/include/asm-cris/mmu_context.h
+++ b/include/asm-cris/mmu_context.h
@@ -15,7 +15,7 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15 * registers like cr3 on the i386 15 * registers like cr3 on the i386
16 */ 16 */
17 17
18extern volatile pgd_t *current_pgd; /* defined in arch/cris/mm/fault.c */ 18extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */
19 19
20static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 20static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
21{ 21{
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h
index 605aa7eaaaf8..8ed7636ab311 100644
--- a/include/asm-cris/semaphore.h
+++ b/include/asm-cris/semaphore.h
@@ -72,10 +72,9 @@ extern inline void down(struct semaphore * sem)
72 might_sleep(); 72 might_sleep();
73 73
74 /* atomically decrement the semaphores count, and if its negative, we wait */ 74 /* atomically decrement the semaphores count, and if its negative, we wait */
75 local_save_flags(flags); 75 cris_atomic_save(sem, flags);
76 local_irq_disable();
77 failed = --(sem->count.counter) < 0; 76 failed = --(sem->count.counter) < 0;
78 local_irq_restore(flags); 77 cris_atomic_restore(sem, flags);
79 if(failed) { 78 if(failed) {
80 __down(sem); 79 __down(sem);
81 } 80 }
@@ -95,10 +94,9 @@ extern inline int down_interruptible(struct semaphore * sem)
95 might_sleep(); 94 might_sleep();
96 95
97 /* atomically decrement the semaphores count, and if its negative, we wait */ 96 /* atomically decrement the semaphores count, and if its negative, we wait */
98 local_save_flags(flags); 97 cris_atomic_save(sem, flags);
99 local_irq_disable();
100 failed = --(sem->count.counter) < 0; 98 failed = --(sem->count.counter) < 0;
101 local_irq_restore(flags); 99 cris_atomic_restore(sem, flags);
102 if(failed) 100 if(failed)
103 failed = __down_interruptible(sem); 101 failed = __down_interruptible(sem);
104 return(failed); 102 return(failed);
@@ -109,13 +107,13 @@ extern inline int down_trylock(struct semaphore * sem)
109 unsigned long flags; 107 unsigned long flags;
110 int failed; 108 int failed;
111 109
112 local_save_flags(flags); 110 cris_atomic_save(sem, flags);
113 local_irq_disable();
114 failed = --(sem->count.counter) < 0; 111 failed = --(sem->count.counter) < 0;
115 local_irq_restore(flags); 112 cris_atomic_restore(sem, flags);
116 if(failed) 113 if(failed)
117 failed = __down_trylock(sem); 114 failed = __down_trylock(sem);
118 return(failed); 115 return(failed);
116
119} 117}
120 118
121/* 119/*
@@ -130,10 +128,9 @@ extern inline void up(struct semaphore * sem)
130 int wakeup; 128 int wakeup;
131 129
132 /* atomically increment the semaphores count, and if it was negative, we wake people */ 130 /* atomically increment the semaphores count, and if it was negative, we wake people */
133 local_save_flags(flags); 131 cris_atomic_save(sem, flags);
134 local_irq_disable();
135 wakeup = ++(sem->count.counter) <= 0; 132 wakeup = ++(sem->count.counter) <= 0;
136 local_irq_restore(flags); 133 cris_atomic_restore(sem, flags);
137 if(wakeup) { 134 if(wakeup) {
138 __up(sem); 135 __up(sem);
139 } 136 }
diff --git a/include/asm-cris/smp.h b/include/asm-cris/smp.h
index c2f4feaa041d..dca5ef1d8c97 100644
--- a/include/asm-cris/smp.h
+++ b/include/asm-cris/smp.h
@@ -1,4 +1,11 @@
1#ifndef __ASM_SMP_H 1#ifndef __ASM_SMP_H
2#define __ASM_SMP_H 2#define __ASM_SMP_H
3 3
4#include <linux/cpumask.h>
5
6extern cpumask_t phys_cpu_present_map;
7#define cpu_possible_map phys_cpu_present_map
8
9#define __smp_processor_id() (current_thread_info()->cpu)
10
4#endif 11#endif
diff --git a/include/asm-cris/spinlock.h b/include/asm-cris/spinlock.h
new file mode 100644
index 000000000000..2e8ba8afc7af
--- /dev/null
+++ b/include/asm-cris/spinlock.h
@@ -0,0 +1 @@
#include <asm/arch/spinlock.h>
diff --git a/include/asm-cris/tlbflush.h b/include/asm-cris/tlbflush.h
index 1781fe1a32f6..6ed7d9ae90db 100644
--- a/include/asm-cris/tlbflush.h
+++ b/include/asm-cris/tlbflush.h
@@ -18,13 +18,26 @@
18 * 18 *
19 */ 19 */
20 20
21extern void __flush_tlb_all(void);
22extern void __flush_tlb_mm(struct mm_struct *mm);
23extern void __flush_tlb_page(struct vm_area_struct *vma,
24 unsigned long addr);
25
26#ifdef CONFIG_SMP
21extern void flush_tlb_all(void); 27extern void flush_tlb_all(void);
22extern void flush_tlb_mm(struct mm_struct *mm); 28extern void flush_tlb_mm(struct mm_struct *mm);
23extern void flush_tlb_page(struct vm_area_struct *vma, 29extern void flush_tlb_page(struct vm_area_struct *vma,
24 unsigned long addr); 30 unsigned long addr);
25extern void flush_tlb_range(struct vm_area_struct *vma, 31#else
26 unsigned long start, 32#define flush_tlb_all __flush_tlb_all
27 unsigned long end); 33#define flush_tlb_mm __flush_tlb_mm
34#define flush_tlb_page __flush_tlb_page
35#endif
36
37static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
38{
39 flush_tlb_mm(vma->vm_mm);
40}
28 41
29extern inline void flush_tlb_pgtables(struct mm_struct *mm, 42extern inline void flush_tlb_pgtables(struct mm_struct *mm,
30 unsigned long start, unsigned long end) 43 unsigned long start, unsigned long end)