diff options
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/delay.h | 5 | ||||
-rw-r--r-- | arch/arc/include/asm/entry.h | 24 | ||||
-rw-r--r-- | arch/arc/include/asm/io.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/irqflags.h | 7 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu.h | 11 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu_context.h | 161 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 61 | ||||
-rw-r--r-- | arch/arc/include/asm/ptrace.h | 36 | ||||
-rw-r--r-- | arch/arc/include/asm/spinlock_types.h | 6 |
10 files changed, 116 insertions, 201 deletions
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 5802849a6cae..e4abdaac6f9f 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h | |||
@@ -57,7 +57,7 @@ | |||
57 | 57 | ||
58 | extern void arc_cache_init(void); | 58 | extern void arc_cache_init(void); |
59 | extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); | 59 | extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); |
60 | extern void __init read_decode_cache_bcr(void); | 60 | extern void read_decode_cache_bcr(void); |
61 | 61 | ||
62 | #endif /* !__ASSEMBLY__ */ | 62 | #endif /* !__ASSEMBLY__ */ |
63 | 63 | ||
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h index 442ce5d0f709..43de30256981 100644 --- a/arch/arc/include/asm/delay.h +++ b/arch/arc/include/asm/delay.h | |||
@@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs) | |||
53 | { | 53 | { |
54 | unsigned long loops; | 54 | unsigned long loops; |
55 | 55 | ||
56 | /* (long long) cast ensures 64 bit MPY - real or emulated | 56 | /* (u64) cast ensures 64 bit MPY - real or emulated |
57 | * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops | 57 | * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops |
58 | */ | 58 | */ |
59 | loops = ((long long)(usecs * 4295 * HZ) * | 59 | loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32; |
60 | (long long)(loops_per_jiffy)) >> 32; | ||
61 | 60 | ||
62 | __delay(loops); | 61 | __delay(loops); |
63 | } | 62 | } |
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index df57611652e5..884081099f80 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h | |||
@@ -365,7 +365,7 @@ | |||
365 | * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP). | 365 | * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP). |
366 | * | 366 | * |
367 | * Before saving the full regfile - this reg is restored back, only | 367 | * Before saving the full regfile - this reg is restored back, only |
368 | * to be saved again on kernel mode stack, as part of ptregs. | 368 | * to be saved again on kernel mode stack, as part of pt_regs. |
369 | *-------------------------------------------------------------*/ | 369 | *-------------------------------------------------------------*/ |
370 | .macro EXCPN_PROLOG_FREEUP_REG reg | 370 | .macro EXCPN_PROLOG_FREEUP_REG reg |
371 | #ifdef CONFIG_SMP | 371 | #ifdef CONFIG_SMP |
@@ -384,6 +384,28 @@ | |||
384 | .endm | 384 | .endm |
385 | 385 | ||
386 | /*-------------------------------------------------------------- | 386 | /*-------------------------------------------------------------- |
387 | * Exception Entry prologue | ||
388 | * -Switches stack to K mode (if not already) | ||
389 | * -Saves the register file | ||
390 | * | ||
391 | * After this it is safe to call the "C" handlers | ||
392 | *-------------------------------------------------------------*/ | ||
393 | .macro EXCEPTION_PROLOGUE | ||
394 | |||
395 | /* Need at least 1 reg to code the early exception prologue */ | ||
396 | EXCPN_PROLOG_FREEUP_REG r9 | ||
397 | |||
398 | /* U/K mode at time of exception (stack not switched if already K) */ | ||
399 | lr r9, [erstatus] | ||
400 | |||
401 | /* ARC700 doesn't provide auto-stack switching */ | ||
402 | SWITCH_TO_KERNEL_STK | ||
403 | |||
404 | /* save the regfile */ | ||
405 | SAVE_ALL_SYS | ||
406 | .endm | ||
407 | |||
408 | /*-------------------------------------------------------------- | ||
387 | * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc) | 409 | * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc) |
388 | * Requires SP to be already switched to kernel mode Stack | 410 | * Requires SP to be already switched to kernel mode Stack |
389 | * sp points to the next free element on the stack at exit of this macro. | 411 | * sp points to the next free element on the stack at exit of this macro. |
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 473424d7528b..334ce7017a18 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h | |||
@@ -100,6 +100,10 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) | |||
100 | 100 | ||
101 | } | 101 | } |
102 | 102 | ||
103 | #define readb_relaxed readb | ||
104 | #define readw_relaxed readw | ||
105 | #define readl_relaxed readl | ||
106 | |||
103 | #include <asm-generic/io.h> | 107 | #include <asm-generic/io.h> |
104 | 108 | ||
105 | #endif /* _ASM_ARC_IO_H */ | 109 | #endif /* _ASM_ARC_IO_H */ |
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index d99f79bcf865..b68b53f458d1 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h | |||
@@ -157,13 +157,6 @@ static inline void arch_unmask_irq(unsigned int irq) | |||
157 | flag \scratch | 157 | flag \scratch |
158 | .endm | 158 | .endm |
159 | 159 | ||
160 | .macro IRQ_DISABLE_SAVE scratch, save | ||
161 | lr \scratch, [status32] | ||
162 | mov \save, \scratch /* Make a copy */ | ||
163 | bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) | ||
164 | flag \scratch | ||
165 | .endm | ||
166 | |||
167 | .macro IRQ_ENABLE scratch | 160 | .macro IRQ_ENABLE scratch |
168 | lr \scratch, [status32] | 161 | lr \scratch, [status32] |
169 | or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) | 162 | or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) |
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h index 7c03fe61759c..c2663b32866b 100644 --- a/arch/arc/include/asm/mmu.h +++ b/arch/arc/include/asm/mmu.h | |||
@@ -32,6 +32,8 @@ | |||
32 | /* Error code if probe fails */ | 32 | /* Error code if probe fails */ |
33 | #define TLB_LKUP_ERR 0x80000000 | 33 | #define TLB_LKUP_ERR 0x80000000 |
34 | 34 | ||
35 | #define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001) | ||
36 | |||
35 | /* TLB Commands */ | 37 | /* TLB Commands */ |
36 | #define TLBWrite 0x1 | 38 | #define TLBWrite 0x1 |
37 | #define TLBRead 0x2 | 39 | #define TLBRead 0x2 |
@@ -46,21 +48,18 @@ | |||
46 | #ifndef __ASSEMBLY__ | 48 | #ifndef __ASSEMBLY__ |
47 | 49 | ||
48 | typedef struct { | 50 | typedef struct { |
49 | unsigned long asid; /* Pvt Addr-Space ID for mm */ | 51 | unsigned long asid; /* 8 bit MMU PID + Generation cycle */ |
50 | #ifdef CONFIG_ARC_TLB_DBG | ||
51 | struct task_struct *tsk; | ||
52 | #endif | ||
53 | } mm_context_t; | 52 | } mm_context_t; |
54 | 53 | ||
55 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA | 54 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA |
56 | void tlb_paranoid_check(unsigned int pid_sw, unsigned long address); | 55 | void tlb_paranoid_check(unsigned int mm_asid, unsigned long address); |
57 | #else | 56 | #else |
58 | #define tlb_paranoid_check(a, b) | 57 | #define tlb_paranoid_check(a, b) |
59 | #endif | 58 | #endif |
60 | 59 | ||
61 | void arc_mmu_init(void); | 60 | void arc_mmu_init(void); |
62 | extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); | 61 | extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); |
63 | void __init read_decode_mmu_bcr(void); | 62 | void read_decode_mmu_bcr(void); |
64 | 63 | ||
65 | #endif /* !__ASSEMBLY__ */ | 64 | #endif /* !__ASSEMBLY__ */ |
66 | 65 | ||
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index 0d71fb11b57c..43a1b51bb8cc 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h | |||
@@ -34,95 +34,65 @@ | |||
34 | * When it reaches max 255, the allocation cycle starts afresh by flushing | 34 | * When it reaches max 255, the allocation cycle starts afresh by flushing |
35 | * the entire TLB and wrapping ASID back to zero. | 35 | * the entire TLB and wrapping ASID back to zero. |
36 | * | 36 | * |
37 | * For book-keeping, Linux uses a couple of data-structures: | 37 | * A new allocation cycle, post rollover, could potentially reassign an ASID |
38 | * -mm_struct has an @asid field to keep a note of task's ASID (needed at the | 38 | * to a different task. Thus the rule is to refresh the ASID in a new cycle. |
39 | * time of say switch_mm( ) | 39 | * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits |
40 | * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping, | 40 | * serve as cycle/generation indicator and natural 32 bit unsigned math |
41 | * given an ASID, finding the mm struct associated. | 41 | * automagically increments the generation when lower 8 bits rollover. |
42 | * | ||
43 | * The round-robin allocation algorithm allows for ASID stealing. | ||
44 | * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was | ||
45 | * already assigned to another (switched-out) task. Obviously the prev owner | ||
46 | * is marked with an invalid ASID to make it request for a new ASID when it | ||
47 | * gets scheduled next time. However its TLB entries (with ASID "x") could | ||
48 | * exist, which must be cleared before the same ASID is used by the new owner. | ||
49 | * Flushing them would be plausible but costly solution. Instead we force a | ||
50 | * allocation policy quirk, which ensures that a stolen ASID won't have any | ||
51 | * TLB entries associates, alleviating the need to flush. | ||
52 | * The quirk essentially is not allowing ASID allocated in prev cycle | ||
53 | * to be used past a roll-over in the next cycle. | ||
54 | * When this happens (i.e. task ASID > asid tracker), task needs to refresh | ||
55 | * its ASID, aligning it to current value of tracker. If the task doesn't get | ||
56 | * scheduled past a roll-over, hence its ASID is not yet realigned with | ||
57 | * tracker, such ASID is anyways safely reusable because it is | ||
58 | * gauranteed that TLB entries with that ASID wont exist. | ||
59 | */ | 42 | */ |
60 | 43 | ||
61 | #define FIRST_ASID 0 | 44 | #define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */ |
62 | #define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */ | 45 | #define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK) |
63 | #define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */ | 46 | |
64 | #define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1) | 47 | #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1) |
48 | #define MM_CTXT_NO_ASID 0UL | ||
65 | 49 | ||
66 | /* ASID to mm struct mapping */ | 50 | #define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK) |
67 | extern struct mm_struct *asid_mm_map[NUM_ASID + 1]; | ||
68 | 51 | ||
69 | extern int asid_cache; | 52 | extern unsigned int asid_cache; |
70 | 53 | ||
71 | /* | 54 | /* |
72 | * Assign a new ASID to task. If the task already has an ASID, it is | 55 | * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) |
73 | * relinquished. | 56 | * Also set the MMU PID register to existing/updated ASID |
74 | */ | 57 | */ |
75 | static inline void get_new_mmu_context(struct mm_struct *mm) | 58 | static inline void get_new_mmu_context(struct mm_struct *mm) |
76 | { | 59 | { |
77 | struct mm_struct *prev_owner; | ||
78 | unsigned long flags; | 60 | unsigned long flags; |
79 | 61 | ||
80 | local_irq_save(flags); | 62 | local_irq_save(flags); |
81 | 63 | ||
82 | /* | 64 | /* |
83 | * Relinquish the currently owned ASID (if any). | 65 | * Move to new ASID if it was not from current alloc-cycle/generation. |
84 | * Doing unconditionally saves a cmp-n-branch; for already unused | 66 | * This is done by ensuring that the generation bits in both mm->ASID |
85 | * ASID slot, the value was/remains NULL | 67 | * and cpu's ASID counter are exactly same. |
68 | * | ||
69 | * Note: Callers needing new ASID unconditionally, independent of | ||
70 | * generation, e.g. local_flush_tlb_mm() for forking parent, | ||
71 | * first need to destroy the context, setting it to invalid | ||
72 | * value. | ||
86 | */ | 73 | */ |
87 | asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL; | 74 | if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK)) |
75 | goto set_hw; | ||
76 | |||
77 | /* move to new ASID and handle rollover */ | ||
78 | if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) { | ||
88 | 79 | ||
89 | /* move to new ASID */ | ||
90 | if (++asid_cache > MAX_ASID) { /* ASID roll-over */ | ||
91 | asid_cache = FIRST_ASID; | ||
92 | flush_tlb_all(); | 80 | flush_tlb_all(); |
93 | } | ||
94 | 81 | ||
95 | /* | 82 | /* |
96 | * Is next ASID already owned by some-one else (we are stealing it). | 83 | * Above checke for rollover of 8 bit ASID in 32 bit container. |
97 | * If so, let the orig owner be aware of this, so when it runs, it | 84 | * If the container itself wrapped around, set it to a non zero |
98 | * asks for a brand new ASID. This would only happen for a long-lived | 85 | * "generation" to distinguish from no context |
99 | * task with ASID from prev allocation cycle (before ASID roll-over). | 86 | */ |
100 | * | 87 | if (!asid_cache) |
101 | * This might look wrong - if we are re-using some other task's ASID, | 88 | asid_cache = MM_CTXT_FIRST_CYCLE; |
102 | * won't we use it's stale TLB entries too. Actually switch_mm( ) takes | 89 | } |
103 | * care of such a case: it ensures that task with ASID from prev alloc | ||
104 | * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below | ||
105 | * The stealing scenario described here will only happen if that task | ||
106 | * didn't get a chance to refresh it's ASID - implying stale entries | ||
107 | * won't exist. | ||
108 | */ | ||
109 | prev_owner = asid_mm_map[asid_cache]; | ||
110 | if (prev_owner) | ||
111 | prev_owner->context.asid = NO_ASID; | ||
112 | 90 | ||
113 | /* Assign new ASID to tsk */ | 91 | /* Assign new ASID to tsk */ |
114 | asid_mm_map[asid_cache] = mm; | ||
115 | mm->context.asid = asid_cache; | 92 | mm->context.asid = asid_cache; |
116 | 93 | ||
117 | #ifdef CONFIG_ARC_TLB_DBG | 94 | set_hw: |
118 | pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s," | 95 | write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE); |
119 | " pid:%u, assigned asid:%lu\n", | ||
120 | (unsigned int)mm, (unsigned int)prev_owner, | ||
121 | (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm, | ||
122 | (mm->context.tsk)->pid, mm->context.asid); | ||
123 | #endif | ||
124 | |||
125 | write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE); | ||
126 | 96 | ||
127 | local_irq_restore(flags); | 97 | local_irq_restore(flags); |
128 | } | 98 | } |
@@ -134,10 +104,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) | |||
134 | static inline int | 104 | static inline int |
135 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 105 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
136 | { | 106 | { |
137 | mm->context.asid = NO_ASID; | 107 | mm->context.asid = MM_CTXT_NO_ASID; |
138 | #ifdef CONFIG_ARC_TLB_DBG | ||
139 | mm->context.tsk = tsk; | ||
140 | #endif | ||
141 | return 0; | 108 | return 0; |
142 | } | 109 | } |
143 | 110 | ||
@@ -152,40 +119,21 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
152 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | 119 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); |
153 | #endif | 120 | #endif |
154 | 121 | ||
155 | /* | 122 | get_new_mmu_context(next); |
156 | * Get a new ASID if task doesn't have a valid one. Possible when | ||
157 | * -task never had an ASID (fresh after fork) | ||
158 | * -it's ASID was stolen - past an ASID roll-over. | ||
159 | * -There's a third obscure scenario (if this task is running for the | ||
160 | * first time afer an ASID rollover), where despite having a valid | ||
161 | * ASID, we force a get for new ASID - see comments at top. | ||
162 | * | ||
163 | * Both the non-alloc scenario and first-use-after-rollover can be | ||
164 | * detected using the single condition below: NO_ASID = 256 | ||
165 | * while asid_cache is always a valid ASID value (0-255). | ||
166 | */ | ||
167 | if (next->context.asid > asid_cache) { | ||
168 | get_new_mmu_context(next); | ||
169 | } else { | ||
170 | /* | ||
171 | * XXX: This will never happen given the chks above | ||
172 | * BUG_ON(next->context.asid > MAX_ASID); | ||
173 | */ | ||
174 | write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE); | ||
175 | } | ||
176 | |||
177 | } | 123 | } |
178 | 124 | ||
125 | /* | ||
126 | * Called at the time of execve() to get a new ASID | ||
127 | * Note the subtlety here: get_new_mmu_context() behaves differently here | ||
128 | * vs. in switch_mm(). Here it always returns a new ASID, because mm has | ||
129 | * an unallocated "initial" value, while in latter, it moves to a new ASID, | ||
130 | * only if it was unallocated | ||
131 | */ | ||
132 | #define activate_mm(prev, next) switch_mm(prev, next, NULL) | ||
133 | |||
179 | static inline void destroy_context(struct mm_struct *mm) | 134 | static inline void destroy_context(struct mm_struct *mm) |
180 | { | 135 | { |
181 | unsigned long flags; | 136 | mm->context.asid = MM_CTXT_NO_ASID; |
182 | |||
183 | local_irq_save(flags); | ||
184 | |||
185 | asid_mm_map[mm->context.asid] = NULL; | ||
186 | mm->context.asid = NO_ASID; | ||
187 | |||
188 | local_irq_restore(flags); | ||
189 | } | 137 | } |
190 | 138 | ||
191 | /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping | 139 | /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping |
@@ -197,17 +145,6 @@ static inline void destroy_context(struct mm_struct *mm) | |||
197 | */ | 145 | */ |
198 | #define deactivate_mm(tsk, mm) do { } while (0) | 146 | #define deactivate_mm(tsk, mm) do { } while (0) |
199 | 147 | ||
200 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | ||
201 | { | ||
202 | #ifndef CONFIG_SMP | ||
203 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | ||
204 | #endif | ||
205 | |||
206 | /* Unconditionally get a new ASID */ | ||
207 | get_new_mmu_context(next); | ||
208 | |||
209 | } | ||
210 | |||
211 | #define enter_lazy_tlb(mm, tsk) | 148 | #define enter_lazy_tlb(mm, tsk) |
212 | 149 | ||
213 | #endif /* __ASM_ARC_MMU_CONTEXT_H */ | 150 | #endif /* __ASM_ARC_MMU_CONTEXT_H */ |
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 4749a0eee1cf..6b0b7f7ef783 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -57,43 +57,31 @@ | |||
57 | 57 | ||
58 | #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ | 58 | #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ |
59 | #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ | 59 | #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ |
60 | #define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */ | 60 | #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ |
61 | #define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */ | 61 | #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ |
62 | #define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */ | 62 | #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ |
63 | #define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */ | 63 | #define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */ |
64 | #define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */ | 64 | #define _PAGE_FILE (1<<7) /* page cache/ swap (S) */ |
65 | #define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */ | 65 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
66 | #define _PAGE_GLOBAL (1<<9) /* Page is global (H) */ | 66 | #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ |
67 | #define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */ | ||
68 | #define _PAGE_FILE (1<<10) /* page cache/ swap (S) */ | ||
69 | #define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */ | ||
70 | 67 | ||
71 | #else | 68 | #else /* MMU v3 onwards */ |
72 | 69 | ||
73 | /* PD1 */ | ||
74 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ | 70 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ |
75 | #define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */ | 71 | #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ |
76 | #define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */ | 72 | #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ |
77 | #define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */ | 73 | #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ |
78 | #define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */ | 74 | #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ |
79 | #define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */ | 75 | #define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ |
80 | #define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */ | 76 | #define _PAGE_FILE (1<<6) /* page cache/ swap (S) */ |
81 | #define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */ | ||
82 | |||
83 | /* PD0 */ | ||
84 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ | 77 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
85 | #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ | 78 | #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ |
86 | #define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr | 79 | #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr |
87 | usable for shared TLB entries (H) */ | 80 | usable for shared TLB entries (H) */ |
88 | |||
89 | #define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */ | ||
90 | #define _PAGE_FILE (1<<12) /* page cache/ swap (S) */ | ||
91 | |||
92 | #define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */ | ||
93 | #endif | 81 | #endif |
94 | 82 | ||
95 | /* Kernel allowed all permissions for all pages */ | 83 | /* vmalloc permissions */ |
96 | #define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \ | 84 | #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ |
97 | _PAGE_GLOBAL | _PAGE_PRESENT) | 85 | _PAGE_GLOBAL | _PAGE_PRESENT) |
98 | 86 | ||
99 | #ifdef CONFIG_ARC_CACHE_PAGES | 87 | #ifdef CONFIG_ARC_CACHE_PAGES |
@@ -109,10 +97,6 @@ | |||
109 | */ | 97 | */ |
110 | #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) | 98 | #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) |
111 | 99 | ||
112 | #define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ) | ||
113 | #define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE) | ||
114 | #define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE) | ||
115 | |||
116 | /* Set of bits not changed in pte_modify */ | 100 | /* Set of bits not changed in pte_modify */ |
117 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) | 101 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) |
118 | 102 | ||
@@ -126,8 +110,8 @@ | |||
126 | 110 | ||
127 | #define PAGE_SHARED PAGE_U_W_R | 111 | #define PAGE_SHARED PAGE_U_W_R |
128 | 112 | ||
129 | /* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of | 113 | /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of |
130 | * kernel vaddr space - visible in all addr spaces, but kernel mode only | 114 | * user vaddr space - visible in all addr spaces, but kernel mode only |
131 | * Thus Global, all-kernel-access, no-user-access, cached | 115 | * Thus Global, all-kernel-access, no-user-access, cached |
132 | */ | 116 | */ |
133 | #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) | 117 | #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) |
@@ -136,10 +120,9 @@ | |||
136 | #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) | 120 | #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) |
137 | 121 | ||
138 | /* Masks for actual TLB "PD"s */ | 122 | /* Masks for actual TLB "PD"s */ |
139 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) | 123 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) |
140 | #define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ | 124 | #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) |
141 | _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \ | 125 | #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) |
142 | _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) | ||
143 | 126 | ||
144 | /************************************************************************** | 127 | /************************************************************************** |
145 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) | 128 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) |
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index c9938e7a7dbd..1bfeec2c0558 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h | |||
@@ -20,27 +20,17 @@ struct pt_regs { | |||
20 | 20 | ||
21 | /* Real registers */ | 21 | /* Real registers */ |
22 | long bta; /* bta_l1, bta_l2, erbta */ | 22 | long bta; /* bta_l1, bta_l2, erbta */ |
23 | long lp_start; | 23 | |
24 | long lp_end; | 24 | long lp_start, lp_end, lp_count; |
25 | long lp_count; | 25 | |
26 | long status32; /* status32_l1, status32_l2, erstatus */ | 26 | long status32; /* status32_l1, status32_l2, erstatus */ |
27 | long ret; /* ilink1, ilink2 or eret */ | 27 | long ret; /* ilink1, ilink2 or eret */ |
28 | long blink; | 28 | long blink; |
29 | long fp; | 29 | long fp; |
30 | long r26; /* gp */ | 30 | long r26; /* gp */ |
31 | long r12; | 31 | |
32 | long r11; | 32 | long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; |
33 | long r10; | 33 | |
34 | long r9; | ||
35 | long r8; | ||
36 | long r7; | ||
37 | long r6; | ||
38 | long r5; | ||
39 | long r4; | ||
40 | long r3; | ||
41 | long r2; | ||
42 | long r1; | ||
43 | long r0; | ||
44 | long sp; /* user/kernel sp depending on where we came from */ | 34 | long sp; /* user/kernel sp depending on where we came from */ |
45 | long orig_r0; | 35 | long orig_r0; |
46 | 36 | ||
@@ -70,19 +60,7 @@ struct pt_regs { | |||
70 | /* Callee saved registers - need to be saved only when you are scheduled out */ | 60 | /* Callee saved registers - need to be saved only when you are scheduled out */ |
71 | 61 | ||
72 | struct callee_regs { | 62 | struct callee_regs { |
73 | long r25; | 63 | long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; |
74 | long r24; | ||
75 | long r23; | ||
76 | long r22; | ||
77 | long r21; | ||
78 | long r20; | ||
79 | long r19; | ||
80 | long r18; | ||
81 | long r17; | ||
82 | long r16; | ||
83 | long r15; | ||
84 | long r14; | ||
85 | long r13; | ||
86 | }; | 64 | }; |
87 | 65 | ||
88 | #define instruction_pointer(regs) ((regs)->ret) | 66 | #define instruction_pointer(regs) ((regs)->ret) |
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h index 8276bfd61704..662627ced4f2 100644 --- a/arch/arc/include/asm/spinlock_types.h +++ b/arch/arc/include/asm/spinlock_types.h | |||
@@ -20,9 +20,9 @@ typedef struct { | |||
20 | #define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ } | 20 | #define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ } |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Unlocked: 0x01_00_00_00 | 23 | * Unlocked : 0x0100_0000 |
24 | * Read lock(s): 0x00_FF_00_00 to say 0x01 | 24 | * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it) |
25 | * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000 | 25 | * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000 |
26 | */ | 26 | */ |
27 | typedef struct { | 27 | typedef struct { |
28 | volatile unsigned int counter; | 28 | volatile unsigned int counter; |