diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-09 12:05:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-09 12:05:33 -0400 |
commit | 89c5a9461d02613c829cf9beffdc3d3c6c3df401 (patch) | |
tree | e0c6ada024a32da3cd6bb4b4f0469d66e5df45c9 | |
parent | 833ae40b517a99e05fc4aea399e71f633f3348d2 (diff) | |
parent | 07b9b65147d1d7cc03b9ff1e1f3b1c163ba4d067 (diff) |
Merge tag 'arc-v3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC changes from Vineet Gupta:
- ARC MM changes:
- preparation for MMUv4 (accomodate new PTE bits, new cmds)
- Rework the ASID allocation algorithm to remove asid-mm reverse map
- Boilerplate code consolidation in Exception Handlers
- Disable FRAME_POINTER for ARC
- Unaligned Access Emulation for Big-Endian from Noam
- Bunch of fixes (udelay, missing accessors) from Mischa
* tag 'arc-v3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
ARC: fix new Section mismatches in build (post __cpuinit cleanup)
Kconfig.debug: Add FRAME_POINTER anti-dependency for ARC
ARC: Fix __udelay calculation
ARC: remove console_verbose() from setup_arch()
ARC: Add read*_relaxed to asm/io.h
ARC: Handle un-aligned user space access in BE.
ARC: [ASID] Track ASID allocation cycles/generations
ARC: [ASID] activate_mm() == switch_mm()
ARC: [ASID] get_new_mmu_context() to conditionally allocate new ASID
ARC: [ASID] Refactor the TLB paranoid debug code
ARC: [ASID] Remove legacy/unused debug code
ARC: No need to flush the TLB in early boot
ARC: MMUv4 preps/3 - Abstract out TLB Insert/Delete
ARC: MMUv4 preps/2 - Reshuffle PTE bits
ARC: MMUv4 preps/1 - Fold PTE K/U access flags
ARC: Code cosmetics (Nothing semantical)
ARC: Entry Handler tweaks: Optimize away redundant IRQ_DISABLE_SAVE
ARC: Exception Handlers Code consolidation
ARC: Add some .gitignore entries
-rw-r--r-- | arch/arc/boot/.gitignore | 1 | ||||
-rw-r--r-- | arch/arc/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/delay.h | 5 | ||||
-rw-r--r-- | arch/arc/include/asm/entry.h | 24 | ||||
-rw-r--r-- | arch/arc/include/asm/io.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/irqflags.h | 7 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu.h | 11 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu_context.h | 161 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 61 | ||||
-rw-r--r-- | arch/arc/include/asm/ptrace.h | 36 | ||||
-rw-r--r-- | arch/arc/include/asm/spinlock_types.h | 6 | ||||
-rw-r--r-- | arch/arc/kernel/.gitignore | 1 | ||||
-rw-r--r-- | arch/arc/kernel/entry.S | 66 | ||||
-rw-r--r-- | arch/arc/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/arc/kernel/unaligned.c | 26 | ||||
-rw-r--r-- | arch/arc/mm/cache_arc700.c | 8 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 174 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 205 | ||||
-rw-r--r-- | lib/Kconfig.debug | 6 |
19 files changed, 352 insertions, 454 deletions
diff --git a/arch/arc/boot/.gitignore b/arch/arc/boot/.gitignore new file mode 100644 index 000000000000..5d65b54bf17a --- /dev/null +++ b/arch/arc/boot/.gitignore | |||
@@ -0,0 +1 @@ | |||
*.dtb* | |||
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 5802849a6cae..e4abdaac6f9f 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h | |||
@@ -57,7 +57,7 @@ | |||
57 | 57 | ||
58 | extern void arc_cache_init(void); | 58 | extern void arc_cache_init(void); |
59 | extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); | 59 | extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); |
60 | extern void __init read_decode_cache_bcr(void); | 60 | extern void read_decode_cache_bcr(void); |
61 | 61 | ||
62 | #endif /* !__ASSEMBLY__ */ | 62 | #endif /* !__ASSEMBLY__ */ |
63 | 63 | ||
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h index 442ce5d0f709..43de30256981 100644 --- a/arch/arc/include/asm/delay.h +++ b/arch/arc/include/asm/delay.h | |||
@@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs) | |||
53 | { | 53 | { |
54 | unsigned long loops; | 54 | unsigned long loops; |
55 | 55 | ||
56 | /* (long long) cast ensures 64 bit MPY - real or emulated | 56 | /* (u64) cast ensures 64 bit MPY - real or emulated |
57 | * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops | 57 | * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops |
58 | */ | 58 | */ |
59 | loops = ((long long)(usecs * 4295 * HZ) * | 59 | loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32; |
60 | (long long)(loops_per_jiffy)) >> 32; | ||
61 | 60 | ||
62 | __delay(loops); | 61 | __delay(loops); |
63 | } | 62 | } |
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index df57611652e5..884081099f80 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h | |||
@@ -365,7 +365,7 @@ | |||
365 | * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP). | 365 | * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP). |
366 | * | 366 | * |
367 | * Before saving the full regfile - this reg is restored back, only | 367 | * Before saving the full regfile - this reg is restored back, only |
368 | * to be saved again on kernel mode stack, as part of ptregs. | 368 | * to be saved again on kernel mode stack, as part of pt_regs. |
369 | *-------------------------------------------------------------*/ | 369 | *-------------------------------------------------------------*/ |
370 | .macro EXCPN_PROLOG_FREEUP_REG reg | 370 | .macro EXCPN_PROLOG_FREEUP_REG reg |
371 | #ifdef CONFIG_SMP | 371 | #ifdef CONFIG_SMP |
@@ -384,6 +384,28 @@ | |||
384 | .endm | 384 | .endm |
385 | 385 | ||
386 | /*-------------------------------------------------------------- | 386 | /*-------------------------------------------------------------- |
387 | * Exception Entry prologue | ||
388 | * -Switches stack to K mode (if not already) | ||
389 | * -Saves the register file | ||
390 | * | ||
391 | * After this it is safe to call the "C" handlers | ||
392 | *-------------------------------------------------------------*/ | ||
393 | .macro EXCEPTION_PROLOGUE | ||
394 | |||
395 | /* Need at least 1 reg to code the early exception prologue */ | ||
396 | EXCPN_PROLOG_FREEUP_REG r9 | ||
397 | |||
398 | /* U/K mode at time of exception (stack not switched if already K) */ | ||
399 | lr r9, [erstatus] | ||
400 | |||
401 | /* ARC700 doesn't provide auto-stack switching */ | ||
402 | SWITCH_TO_KERNEL_STK | ||
403 | |||
404 | /* save the regfile */ | ||
405 | SAVE_ALL_SYS | ||
406 | .endm | ||
407 | |||
408 | /*-------------------------------------------------------------- | ||
387 | * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc) | 409 | * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc) |
388 | * Requires SP to be already switched to kernel mode Stack | 410 | * Requires SP to be already switched to kernel mode Stack |
389 | * sp points to the next free element on the stack at exit of this macro. | 411 | * sp points to the next free element on the stack at exit of this macro. |
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 473424d7528b..334ce7017a18 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h | |||
@@ -100,6 +100,10 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) | |||
100 | 100 | ||
101 | } | 101 | } |
102 | 102 | ||
103 | #define readb_relaxed readb | ||
104 | #define readw_relaxed readw | ||
105 | #define readl_relaxed readl | ||
106 | |||
103 | #include <asm-generic/io.h> | 107 | #include <asm-generic/io.h> |
104 | 108 | ||
105 | #endif /* _ASM_ARC_IO_H */ | 109 | #endif /* _ASM_ARC_IO_H */ |
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index d99f79bcf865..b68b53f458d1 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h | |||
@@ -157,13 +157,6 @@ static inline void arch_unmask_irq(unsigned int irq) | |||
157 | flag \scratch | 157 | flag \scratch |
158 | .endm | 158 | .endm |
159 | 159 | ||
160 | .macro IRQ_DISABLE_SAVE scratch, save | ||
161 | lr \scratch, [status32] | ||
162 | mov \save, \scratch /* Make a copy */ | ||
163 | bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) | ||
164 | flag \scratch | ||
165 | .endm | ||
166 | |||
167 | .macro IRQ_ENABLE scratch | 160 | .macro IRQ_ENABLE scratch |
168 | lr \scratch, [status32] | 161 | lr \scratch, [status32] |
169 | or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) | 162 | or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) |
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h index 7c03fe61759c..c2663b32866b 100644 --- a/arch/arc/include/asm/mmu.h +++ b/arch/arc/include/asm/mmu.h | |||
@@ -32,6 +32,8 @@ | |||
32 | /* Error code if probe fails */ | 32 | /* Error code if probe fails */ |
33 | #define TLB_LKUP_ERR 0x80000000 | 33 | #define TLB_LKUP_ERR 0x80000000 |
34 | 34 | ||
35 | #define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001) | ||
36 | |||
35 | /* TLB Commands */ | 37 | /* TLB Commands */ |
36 | #define TLBWrite 0x1 | 38 | #define TLBWrite 0x1 |
37 | #define TLBRead 0x2 | 39 | #define TLBRead 0x2 |
@@ -46,21 +48,18 @@ | |||
46 | #ifndef __ASSEMBLY__ | 48 | #ifndef __ASSEMBLY__ |
47 | 49 | ||
48 | typedef struct { | 50 | typedef struct { |
49 | unsigned long asid; /* Pvt Addr-Space ID for mm */ | 51 | unsigned long asid; /* 8 bit MMU PID + Generation cycle */ |
50 | #ifdef CONFIG_ARC_TLB_DBG | ||
51 | struct task_struct *tsk; | ||
52 | #endif | ||
53 | } mm_context_t; | 52 | } mm_context_t; |
54 | 53 | ||
55 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA | 54 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA |
56 | void tlb_paranoid_check(unsigned int pid_sw, unsigned long address); | 55 | void tlb_paranoid_check(unsigned int mm_asid, unsigned long address); |
57 | #else | 56 | #else |
58 | #define tlb_paranoid_check(a, b) | 57 | #define tlb_paranoid_check(a, b) |
59 | #endif | 58 | #endif |
60 | 59 | ||
61 | void arc_mmu_init(void); | 60 | void arc_mmu_init(void); |
62 | extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); | 61 | extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); |
63 | void __init read_decode_mmu_bcr(void); | 62 | void read_decode_mmu_bcr(void); |
64 | 63 | ||
65 | #endif /* !__ASSEMBLY__ */ | 64 | #endif /* !__ASSEMBLY__ */ |
66 | 65 | ||
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index 0d71fb11b57c..43a1b51bb8cc 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h | |||
@@ -34,95 +34,65 @@ | |||
34 | * When it reaches max 255, the allocation cycle starts afresh by flushing | 34 | * When it reaches max 255, the allocation cycle starts afresh by flushing |
35 | * the entire TLB and wrapping ASID back to zero. | 35 | * the entire TLB and wrapping ASID back to zero. |
36 | * | 36 | * |
37 | * For book-keeping, Linux uses a couple of data-structures: | 37 | * A new allocation cycle, post rollover, could potentially reassign an ASID |
38 | * -mm_struct has an @asid field to keep a note of task's ASID (needed at the | 38 | * to a different task. Thus the rule is to refresh the ASID in a new cycle. |
39 | * time of say switch_mm( ) | 39 | * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits |
40 | * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping, | 40 | * serve as cycle/generation indicator and natural 32 bit unsigned math |
41 | * given an ASID, finding the mm struct associated. | 41 | * automagically increments the generation when lower 8 bits rollover. |
42 | * | ||
43 | * The round-robin allocation algorithm allows for ASID stealing. | ||
44 | * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was | ||
45 | * already assigned to another (switched-out) task. Obviously the prev owner | ||
46 | * is marked with an invalid ASID to make it request for a new ASID when it | ||
47 | * gets scheduled next time. However its TLB entries (with ASID "x") could | ||
48 | * exist, which must be cleared before the same ASID is used by the new owner. | ||
49 | * Flushing them would be plausible but costly solution. Instead we force a | ||
50 | * allocation policy quirk, which ensures that a stolen ASID won't have any | ||
51 | * TLB entries associates, alleviating the need to flush. | ||
52 | * The quirk essentially is not allowing ASID allocated in prev cycle | ||
53 | * to be used past a roll-over in the next cycle. | ||
54 | * When this happens (i.e. task ASID > asid tracker), task needs to refresh | ||
55 | * its ASID, aligning it to current value of tracker. If the task doesn't get | ||
56 | * scheduled past a roll-over, hence its ASID is not yet realigned with | ||
57 | * tracker, such ASID is anyways safely reusable because it is | ||
58 | * gauranteed that TLB entries with that ASID wont exist. | ||
59 | */ | 42 | */ |
60 | 43 | ||
61 | #define FIRST_ASID 0 | 44 | #define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */ |
62 | #define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */ | 45 | #define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK) |
63 | #define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */ | 46 | |
64 | #define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1) | 47 | #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1) |
48 | #define MM_CTXT_NO_ASID 0UL | ||
65 | 49 | ||
66 | /* ASID to mm struct mapping */ | 50 | #define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK) |
67 | extern struct mm_struct *asid_mm_map[NUM_ASID + 1]; | ||
68 | 51 | ||
69 | extern int asid_cache; | 52 | extern unsigned int asid_cache; |
70 | 53 | ||
71 | /* | 54 | /* |
72 | * Assign a new ASID to task. If the task already has an ASID, it is | 55 | * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) |
73 | * relinquished. | 56 | * Also set the MMU PID register to existing/updated ASID |
74 | */ | 57 | */ |
75 | static inline void get_new_mmu_context(struct mm_struct *mm) | 58 | static inline void get_new_mmu_context(struct mm_struct *mm) |
76 | { | 59 | { |
77 | struct mm_struct *prev_owner; | ||
78 | unsigned long flags; | 60 | unsigned long flags; |
79 | 61 | ||
80 | local_irq_save(flags); | 62 | local_irq_save(flags); |
81 | 63 | ||
82 | /* | 64 | /* |
83 | * Relinquish the currently owned ASID (if any). | 65 | * Move to new ASID if it was not from current alloc-cycle/generation. |
84 | * Doing unconditionally saves a cmp-n-branch; for already unused | 66 | * This is done by ensuring that the generation bits in both mm->ASID |
85 | * ASID slot, the value was/remains NULL | 67 | * and cpu's ASID counter are exactly same. |
68 | * | ||
69 | * Note: Callers needing new ASID unconditionally, independent of | ||
70 | * generation, e.g. local_flush_tlb_mm() for forking parent, | ||
71 | * first need to destroy the context, setting it to invalid | ||
72 | * value. | ||
86 | */ | 73 | */ |
87 | asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL; | 74 | if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK)) |
75 | goto set_hw; | ||
76 | |||
77 | /* move to new ASID and handle rollover */ | ||
78 | if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) { | ||
88 | 79 | ||
89 | /* move to new ASID */ | ||
90 | if (++asid_cache > MAX_ASID) { /* ASID roll-over */ | ||
91 | asid_cache = FIRST_ASID; | ||
92 | flush_tlb_all(); | 80 | flush_tlb_all(); |
93 | } | ||
94 | 81 | ||
95 | /* | 82 | /* |
96 | * Is next ASID already owned by some-one else (we are stealing it). | 83 | * Above checke for rollover of 8 bit ASID in 32 bit container. |
97 | * If so, let the orig owner be aware of this, so when it runs, it | 84 | * If the container itself wrapped around, set it to a non zero |
98 | * asks for a brand new ASID. This would only happen for a long-lived | 85 | * "generation" to distinguish from no context |
99 | * task with ASID from prev allocation cycle (before ASID roll-over). | 86 | */ |
100 | * | 87 | if (!asid_cache) |
101 | * This might look wrong - if we are re-using some other task's ASID, | 88 | asid_cache = MM_CTXT_FIRST_CYCLE; |
102 | * won't we use it's stale TLB entries too. Actually switch_mm( ) takes | 89 | } |
103 | * care of such a case: it ensures that task with ASID from prev alloc | ||
104 | * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below | ||
105 | * The stealing scenario described here will only happen if that task | ||
106 | * didn't get a chance to refresh it's ASID - implying stale entries | ||
107 | * won't exist. | ||
108 | */ | ||
109 | prev_owner = asid_mm_map[asid_cache]; | ||
110 | if (prev_owner) | ||
111 | prev_owner->context.asid = NO_ASID; | ||
112 | 90 | ||
113 | /* Assign new ASID to tsk */ | 91 | /* Assign new ASID to tsk */ |
114 | asid_mm_map[asid_cache] = mm; | ||
115 | mm->context.asid = asid_cache; | 92 | mm->context.asid = asid_cache; |
116 | 93 | ||
117 | #ifdef CONFIG_ARC_TLB_DBG | 94 | set_hw: |
118 | pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s," | 95 | write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE); |
119 | " pid:%u, assigned asid:%lu\n", | ||
120 | (unsigned int)mm, (unsigned int)prev_owner, | ||
121 | (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm, | ||
122 | (mm->context.tsk)->pid, mm->context.asid); | ||
123 | #endif | ||
124 | |||
125 | write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE); | ||
126 | 96 | ||
127 | local_irq_restore(flags); | 97 | local_irq_restore(flags); |
128 | } | 98 | } |
@@ -134,10 +104,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) | |||
134 | static inline int | 104 | static inline int |
135 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 105 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
136 | { | 106 | { |
137 | mm->context.asid = NO_ASID; | 107 | mm->context.asid = MM_CTXT_NO_ASID; |
138 | #ifdef CONFIG_ARC_TLB_DBG | ||
139 | mm->context.tsk = tsk; | ||
140 | #endif | ||
141 | return 0; | 108 | return 0; |
142 | } | 109 | } |
143 | 110 | ||
@@ -152,40 +119,21 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
152 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | 119 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); |
153 | #endif | 120 | #endif |
154 | 121 | ||
155 | /* | 122 | get_new_mmu_context(next); |
156 | * Get a new ASID if task doesn't have a valid one. Possible when | ||
157 | * -task never had an ASID (fresh after fork) | ||
158 | * -it's ASID was stolen - past an ASID roll-over. | ||
159 | * -There's a third obscure scenario (if this task is running for the | ||
160 | * first time afer an ASID rollover), where despite having a valid | ||
161 | * ASID, we force a get for new ASID - see comments at top. | ||
162 | * | ||
163 | * Both the non-alloc scenario and first-use-after-rollover can be | ||
164 | * detected using the single condition below: NO_ASID = 256 | ||
165 | * while asid_cache is always a valid ASID value (0-255). | ||
166 | */ | ||
167 | if (next->context.asid > asid_cache) { | ||
168 | get_new_mmu_context(next); | ||
169 | } else { | ||
170 | /* | ||
171 | * XXX: This will never happen given the chks above | ||
172 | * BUG_ON(next->context.asid > MAX_ASID); | ||
173 | */ | ||
174 | write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE); | ||
175 | } | ||
176 | |||
177 | } | 123 | } |
178 | 124 | ||
125 | /* | ||
126 | * Called at the time of execve() to get a new ASID | ||
127 | * Note the subtlety here: get_new_mmu_context() behaves differently here | ||
128 | * vs. in switch_mm(). Here it always returns a new ASID, because mm has | ||
129 | * an unallocated "initial" value, while in latter, it moves to a new ASID, | ||
130 | * only if it was unallocated | ||
131 | */ | ||
132 | #define activate_mm(prev, next) switch_mm(prev, next, NULL) | ||
133 | |||
179 | static inline void destroy_context(struct mm_struct *mm) | 134 | static inline void destroy_context(struct mm_struct *mm) |
180 | { | 135 | { |
181 | unsigned long flags; | 136 | mm->context.asid = MM_CTXT_NO_ASID; |
182 | |||
183 | local_irq_save(flags); | ||
184 | |||
185 | asid_mm_map[mm->context.asid] = NULL; | ||
186 | mm->context.asid = NO_ASID; | ||
187 | |||
188 | local_irq_restore(flags); | ||
189 | } | 137 | } |
190 | 138 | ||
191 | /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping | 139 | /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping |
@@ -197,17 +145,6 @@ static inline void destroy_context(struct mm_struct *mm) | |||
197 | */ | 145 | */ |
198 | #define deactivate_mm(tsk, mm) do { } while (0) | 146 | #define deactivate_mm(tsk, mm) do { } while (0) |
199 | 147 | ||
200 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | ||
201 | { | ||
202 | #ifndef CONFIG_SMP | ||
203 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | ||
204 | #endif | ||
205 | |||
206 | /* Unconditionally get a new ASID */ | ||
207 | get_new_mmu_context(next); | ||
208 | |||
209 | } | ||
210 | |||
211 | #define enter_lazy_tlb(mm, tsk) | 148 | #define enter_lazy_tlb(mm, tsk) |
212 | 149 | ||
213 | #endif /* __ASM_ARC_MMU_CONTEXT_H */ | 150 | #endif /* __ASM_ARC_MMU_CONTEXT_H */ |
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 4749a0eee1cf..6b0b7f7ef783 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -57,43 +57,31 @@ | |||
57 | 57 | ||
58 | #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ | 58 | #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ |
59 | #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ | 59 | #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ |
60 | #define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */ | 60 | #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ |
61 | #define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */ | 61 | #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ |
62 | #define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */ | 62 | #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ |
63 | #define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */ | 63 | #define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */ |
64 | #define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */ | 64 | #define _PAGE_FILE (1<<7) /* page cache/ swap (S) */ |
65 | #define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */ | 65 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
66 | #define _PAGE_GLOBAL (1<<9) /* Page is global (H) */ | 66 | #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ |
67 | #define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */ | ||
68 | #define _PAGE_FILE (1<<10) /* page cache/ swap (S) */ | ||
69 | #define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */ | ||
70 | 67 | ||
71 | #else | 68 | #else /* MMU v3 onwards */ |
72 | 69 | ||
73 | /* PD1 */ | ||
74 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ | 70 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ |
75 | #define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */ | 71 | #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ |
76 | #define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */ | 72 | #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ |
77 | #define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */ | 73 | #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ |
78 | #define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */ | 74 | #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ |
79 | #define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */ | 75 | #define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ |
80 | #define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */ | 76 | #define _PAGE_FILE (1<<6) /* page cache/ swap (S) */ |
81 | #define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */ | ||
82 | |||
83 | /* PD0 */ | ||
84 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ | 77 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
85 | #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ | 78 | #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ |
86 | #define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr | 79 | #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr |
87 | usable for shared TLB entries (H) */ | 80 | usable for shared TLB entries (H) */ |
88 | |||
89 | #define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */ | ||
90 | #define _PAGE_FILE (1<<12) /* page cache/ swap (S) */ | ||
91 | |||
92 | #define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */ | ||
93 | #endif | 81 | #endif |
94 | 82 | ||
95 | /* Kernel allowed all permissions for all pages */ | 83 | /* vmalloc permissions */ |
96 | #define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \ | 84 | #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ |
97 | _PAGE_GLOBAL | _PAGE_PRESENT) | 85 | _PAGE_GLOBAL | _PAGE_PRESENT) |
98 | 86 | ||
99 | #ifdef CONFIG_ARC_CACHE_PAGES | 87 | #ifdef CONFIG_ARC_CACHE_PAGES |
@@ -109,10 +97,6 @@ | |||
109 | */ | 97 | */ |
110 | #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) | 98 | #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) |
111 | 99 | ||
112 | #define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ) | ||
113 | #define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE) | ||
114 | #define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE) | ||
115 | |||
116 | /* Set of bits not changed in pte_modify */ | 100 | /* Set of bits not changed in pte_modify */ |
117 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) | 101 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) |
118 | 102 | ||
@@ -126,8 +110,8 @@ | |||
126 | 110 | ||
127 | #define PAGE_SHARED PAGE_U_W_R | 111 | #define PAGE_SHARED PAGE_U_W_R |
128 | 112 | ||
129 | /* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of | 113 | /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of |
130 | * kernel vaddr space - visible in all addr spaces, but kernel mode only | 114 | * user vaddr space - visible in all addr spaces, but kernel mode only |
131 | * Thus Global, all-kernel-access, no-user-access, cached | 115 | * Thus Global, all-kernel-access, no-user-access, cached |
132 | */ | 116 | */ |
133 | #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) | 117 | #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) |
@@ -136,10 +120,9 @@ | |||
136 | #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) | 120 | #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) |
137 | 121 | ||
138 | /* Masks for actual TLB "PD"s */ | 122 | /* Masks for actual TLB "PD"s */ |
139 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) | 123 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) |
140 | #define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ | 124 | #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) |
141 | _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \ | 125 | #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) |
142 | _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) | ||
143 | 126 | ||
144 | /************************************************************************** | 127 | /************************************************************************** |
145 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) | 128 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) |
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index c9938e7a7dbd..1bfeec2c0558 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h | |||
@@ -20,27 +20,17 @@ struct pt_regs { | |||
20 | 20 | ||
21 | /* Real registers */ | 21 | /* Real registers */ |
22 | long bta; /* bta_l1, bta_l2, erbta */ | 22 | long bta; /* bta_l1, bta_l2, erbta */ |
23 | long lp_start; | 23 | |
24 | long lp_end; | 24 | long lp_start, lp_end, lp_count; |
25 | long lp_count; | 25 | |
26 | long status32; /* status32_l1, status32_l2, erstatus */ | 26 | long status32; /* status32_l1, status32_l2, erstatus */ |
27 | long ret; /* ilink1, ilink2 or eret */ | 27 | long ret; /* ilink1, ilink2 or eret */ |
28 | long blink; | 28 | long blink; |
29 | long fp; | 29 | long fp; |
30 | long r26; /* gp */ | 30 | long r26; /* gp */ |
31 | long r12; | 31 | |
32 | long r11; | 32 | long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; |
33 | long r10; | 33 | |
34 | long r9; | ||
35 | long r8; | ||
36 | long r7; | ||
37 | long r6; | ||
38 | long r5; | ||
39 | long r4; | ||
40 | long r3; | ||
41 | long r2; | ||
42 | long r1; | ||
43 | long r0; | ||
44 | long sp; /* user/kernel sp depending on where we came from */ | 34 | long sp; /* user/kernel sp depending on where we came from */ |
45 | long orig_r0; | 35 | long orig_r0; |
46 | 36 | ||
@@ -70,19 +60,7 @@ struct pt_regs { | |||
70 | /* Callee saved registers - need to be saved only when you are scheduled out */ | 60 | /* Callee saved registers - need to be saved only when you are scheduled out */ |
71 | 61 | ||
72 | struct callee_regs { | 62 | struct callee_regs { |
73 | long r25; | 63 | long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; |
74 | long r24; | ||
75 | long r23; | ||
76 | long r22; | ||
77 | long r21; | ||
78 | long r20; | ||
79 | long r19; | ||
80 | long r18; | ||
81 | long r17; | ||
82 | long r16; | ||
83 | long r15; | ||
84 | long r14; | ||
85 | long r13; | ||
86 | }; | 64 | }; |
87 | 65 | ||
88 | #define instruction_pointer(regs) ((regs)->ret) | 66 | #define instruction_pointer(regs) ((regs)->ret) |
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h index 8276bfd61704..662627ced4f2 100644 --- a/arch/arc/include/asm/spinlock_types.h +++ b/arch/arc/include/asm/spinlock_types.h | |||
@@ -20,9 +20,9 @@ typedef struct { | |||
20 | #define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ } | 20 | #define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ } |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Unlocked: 0x01_00_00_00 | 23 | * Unlocked : 0x0100_0000 |
24 | * Read lock(s): 0x00_FF_00_00 to say 0x01 | 24 | * Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it) |
25 | * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000 | 25 | * Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000 |
26 | */ | 26 | */ |
27 | typedef struct { | 27 | typedef struct { |
28 | volatile unsigned int counter; | 28 | volatile unsigned int counter; |
diff --git a/arch/arc/kernel/.gitignore b/arch/arc/kernel/.gitignore new file mode 100644 index 000000000000..c5f676c3c224 --- /dev/null +++ b/arch/arc/kernel/.gitignore | |||
@@ -0,0 +1 @@ | |||
vmlinux.lds | |||
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 1d7165156e17..b908dde8a331 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S | |||
@@ -267,12 +267,7 @@ ARC_EXIT handle_interrupt_level1 | |||
267 | 267 | ||
268 | ARC_ENTRY instr_service | 268 | ARC_ENTRY instr_service |
269 | 269 | ||
270 | EXCPN_PROLOG_FREEUP_REG r9 | 270 | EXCEPTION_PROLOGUE |
271 | |||
272 | lr r9, [erstatus] | ||
273 | |||
274 | SWITCH_TO_KERNEL_STK | ||
275 | SAVE_ALL_SYS | ||
276 | 271 | ||
277 | lr r0, [efa] | 272 | lr r0, [efa] |
278 | mov r1, sp | 273 | mov r1, sp |
@@ -289,15 +284,13 @@ ARC_EXIT instr_service | |||
289 | 284 | ||
290 | ARC_ENTRY mem_service | 285 | ARC_ENTRY mem_service |
291 | 286 | ||
292 | EXCPN_PROLOG_FREEUP_REG r9 | 287 | EXCEPTION_PROLOGUE |
293 | |||
294 | lr r9, [erstatus] | ||
295 | |||
296 | SWITCH_TO_KERNEL_STK | ||
297 | SAVE_ALL_SYS | ||
298 | 288 | ||
299 | lr r0, [efa] | 289 | lr r0, [efa] |
300 | mov r1, sp | 290 | mov r1, sp |
291 | |||
292 | FAKE_RET_FROM_EXCPN r9 | ||
293 | |||
301 | bl do_memory_error | 294 | bl do_memory_error |
302 | b ret_from_exception | 295 | b ret_from_exception |
303 | ARC_EXIT mem_service | 296 | ARC_EXIT mem_service |
@@ -308,11 +301,7 @@ ARC_EXIT mem_service | |||
308 | 301 | ||
309 | ARC_ENTRY EV_MachineCheck | 302 | ARC_ENTRY EV_MachineCheck |
310 | 303 | ||
311 | EXCPN_PROLOG_FREEUP_REG r9 | 304 | EXCEPTION_PROLOGUE |
312 | lr r9, [erstatus] | ||
313 | |||
314 | SWITCH_TO_KERNEL_STK | ||
315 | SAVE_ALL_SYS | ||
316 | 305 | ||
317 | lr r2, [ecr] | 306 | lr r2, [ecr] |
318 | lr r0, [efa] | 307 | lr r0, [efa] |
@@ -342,13 +331,7 @@ ARC_EXIT EV_MachineCheck | |||
342 | 331 | ||
343 | ARC_ENTRY EV_TLBProtV | 332 | ARC_ENTRY EV_TLBProtV |
344 | 333 | ||
345 | EXCPN_PROLOG_FREEUP_REG r9 | 334 | EXCEPTION_PROLOGUE |
346 | |||
347 | ;Which mode (user/kernel) was the system in when Exception occured | ||
348 | lr r9, [erstatus] | ||
349 | |||
350 | SWITCH_TO_KERNEL_STK | ||
351 | SAVE_ALL_SYS | ||
352 | 335 | ||
353 | ;---------(3) Save some more regs----------------- | 336 | ;---------(3) Save some more regs----------------- |
354 | ; vineetg: Mar 6th: Random Seg Fault issue #1 | 337 | ; vineetg: Mar 6th: Random Seg Fault issue #1 |
@@ -406,12 +389,7 @@ ARC_EXIT EV_TLBProtV | |||
406 | ; --------------------------------------------- | 389 | ; --------------------------------------------- |
407 | ARC_ENTRY EV_PrivilegeV | 390 | ARC_ENTRY EV_PrivilegeV |
408 | 391 | ||
409 | EXCPN_PROLOG_FREEUP_REG r9 | 392 | EXCEPTION_PROLOGUE |
410 | |||
411 | lr r9, [erstatus] | ||
412 | |||
413 | SWITCH_TO_KERNEL_STK | ||
414 | SAVE_ALL_SYS | ||
415 | 393 | ||
416 | lr r0, [efa] | 394 | lr r0, [efa] |
417 | mov r1, sp | 395 | mov r1, sp |
@@ -427,14 +405,13 @@ ARC_EXIT EV_PrivilegeV | |||
427 | ; --------------------------------------------- | 405 | ; --------------------------------------------- |
428 | ARC_ENTRY EV_Extension | 406 | ARC_ENTRY EV_Extension |
429 | 407 | ||
430 | EXCPN_PROLOG_FREEUP_REG r9 | 408 | EXCEPTION_PROLOGUE |
431 | lr r9, [erstatus] | ||
432 | |||
433 | SWITCH_TO_KERNEL_STK | ||
434 | SAVE_ALL_SYS | ||
435 | 409 | ||
436 | lr r0, [efa] | 410 | lr r0, [efa] |
437 | mov r1, sp | 411 | mov r1, sp |
412 | |||
413 | FAKE_RET_FROM_EXCPN r9 | ||
414 | |||
438 | bl do_extension_fault | 415 | bl do_extension_fault |
439 | b ret_from_exception | 416 | b ret_from_exception |
440 | ARC_EXIT EV_Extension | 417 | ARC_EXIT EV_Extension |
@@ -526,14 +503,7 @@ trap_with_param: | |||
526 | 503 | ||
527 | ARC_ENTRY EV_Trap | 504 | ARC_ENTRY EV_Trap |
528 | 505 | ||
529 | ; Need at least 1 reg to code the early exception prolog | 506 | EXCEPTION_PROLOGUE |
530 | EXCPN_PROLOG_FREEUP_REG r9 | ||
531 | |||
532 | ;Which mode (user/kernel) was the system in when intr occured | ||
533 | lr r9, [erstatus] | ||
534 | |||
535 | SWITCH_TO_KERNEL_STK | ||
536 | SAVE_ALL_SYS | ||
537 | 507 | ||
538 | ;------- (4) What caused the Trap -------------- | 508 | ;------- (4) What caused the Trap -------------- |
539 | lr r12, [ecr] | 509 | lr r12, [ecr] |
@@ -642,6 +612,9 @@ resume_kernel_mode: | |||
642 | 612 | ||
643 | #ifdef CONFIG_PREEMPT | 613 | #ifdef CONFIG_PREEMPT |
644 | 614 | ||
615 | ; This is a must for preempt_schedule_irq() | ||
616 | IRQ_DISABLE r9 | ||
617 | |||
645 | ; Can't preempt if preemption disabled | 618 | ; Can't preempt if preemption disabled |
646 | GET_CURR_THR_INFO_FROM_SP r10 | 619 | GET_CURR_THR_INFO_FROM_SP r10 |
647 | ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] | 620 | ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] |
@@ -651,8 +624,6 @@ resume_kernel_mode: | |||
651 | ld r9, [r10, THREAD_INFO_FLAGS] | 624 | ld r9, [r10, THREAD_INFO_FLAGS] |
652 | bbit0 r9, TIF_NEED_RESCHED, restore_regs | 625 | bbit0 r9, TIF_NEED_RESCHED, restore_regs |
653 | 626 | ||
654 | IRQ_DISABLE r9 | ||
655 | |||
656 | ; Invoke PREEMPTION | 627 | ; Invoke PREEMPTION |
657 | bl preempt_schedule_irq | 628 | bl preempt_schedule_irq |
658 | 629 | ||
@@ -665,12 +636,11 @@ resume_kernel_mode: | |||
665 | ; | 636 | ; |
666 | ; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap) | 637 | ; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap) |
667 | ; IRQ shd definitely not happen between now and rtie | 638 | ; IRQ shd definitely not happen between now and rtie |
639 | ; All 2 entry points to here already disable interrupts | ||
668 | 640 | ||
669 | restore_regs : | 641 | restore_regs : |
670 | 642 | ||
671 | ; Disable Interrupts while restoring reg-file back | 643 | lr r10, [status32] |
672 | ; XXX can this be optimised out | ||
673 | IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy | ||
674 | 644 | ||
675 | ; Restore REG File. In case multiple Events outstanding, | 645 | ; Restore REG File. In case multiple Events outstanding, |
676 | ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None | 646 | ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 6b083454d039..b011f8c164a1 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
@@ -357,8 +357,6 @@ void __init setup_arch(char **cmdline_p) | |||
357 | */ | 357 | */ |
358 | root_mountflags &= ~MS_RDONLY; | 358 | root_mountflags &= ~MS_RDONLY; |
359 | 359 | ||
360 | console_verbose(); | ||
361 | |||
362 | #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) | 360 | #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) |
363 | conswitchp = &dummy_con; | 361 | conswitchp = &dummy_con; |
364 | #endif | 362 | #endif |
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index c0f832f595d3..28d170060747 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c | |||
@@ -16,6 +16,16 @@ | |||
16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
17 | #include <asm/disasm.h> | 17 | #include <asm/disasm.h> |
18 | 18 | ||
19 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
20 | #define BE 1 | ||
21 | #define FIRST_BYTE_16 "swap %1, %1\n swape %1, %1\n" | ||
22 | #define FIRST_BYTE_32 "swape %1, %1\n" | ||
23 | #else | ||
24 | #define BE 0 | ||
25 | #define FIRST_BYTE_16 | ||
26 | #define FIRST_BYTE_32 | ||
27 | #endif | ||
28 | |||
19 | #define __get8_unaligned_check(val, addr, err) \ | 29 | #define __get8_unaligned_check(val, addr, err) \ |
20 | __asm__( \ | 30 | __asm__( \ |
21 | "1: ldb.ab %1, [%2, 1]\n" \ | 31 | "1: ldb.ab %1, [%2, 1]\n" \ |
@@ -36,9 +46,9 @@ | |||
36 | do { \ | 46 | do { \ |
37 | unsigned int err = 0, v, a = addr; \ | 47 | unsigned int err = 0, v, a = addr; \ |
38 | __get8_unaligned_check(v, a, err); \ | 48 | __get8_unaligned_check(v, a, err); \ |
39 | val = v ; \ | 49 | val = v << ((BE) ? 8 : 0); \ |
40 | __get8_unaligned_check(v, a, err); \ | 50 | __get8_unaligned_check(v, a, err); \ |
41 | val |= v << 8; \ | 51 | val |= v << ((BE) ? 0 : 8); \ |
42 | if (err) \ | 52 | if (err) \ |
43 | goto fault; \ | 53 | goto fault; \ |
44 | } while (0) | 54 | } while (0) |
@@ -47,13 +57,13 @@ | |||
47 | do { \ | 57 | do { \ |
48 | unsigned int err = 0, v, a = addr; \ | 58 | unsigned int err = 0, v, a = addr; \ |
49 | __get8_unaligned_check(v, a, err); \ | 59 | __get8_unaligned_check(v, a, err); \ |
50 | val = v << 0; \ | 60 | val = v << ((BE) ? 24 : 0); \ |
51 | __get8_unaligned_check(v, a, err); \ | 61 | __get8_unaligned_check(v, a, err); \ |
52 | val |= v << 8; \ | 62 | val |= v << ((BE) ? 16 : 8); \ |
53 | __get8_unaligned_check(v, a, err); \ | 63 | __get8_unaligned_check(v, a, err); \ |
54 | val |= v << 16; \ | 64 | val |= v << ((BE) ? 8 : 16); \ |
55 | __get8_unaligned_check(v, a, err); \ | 65 | __get8_unaligned_check(v, a, err); \ |
56 | val |= v << 24; \ | 66 | val |= v << ((BE) ? 0 : 24); \ |
57 | if (err) \ | 67 | if (err) \ |
58 | goto fault; \ | 68 | goto fault; \ |
59 | } while (0) | 69 | } while (0) |
@@ -63,6 +73,7 @@ | |||
63 | unsigned int err = 0, v = val, a = addr;\ | 73 | unsigned int err = 0, v = val, a = addr;\ |
64 | \ | 74 | \ |
65 | __asm__( \ | 75 | __asm__( \ |
76 | FIRST_BYTE_16 \ | ||
66 | "1: stb.ab %1, [%2, 1]\n" \ | 77 | "1: stb.ab %1, [%2, 1]\n" \ |
67 | " lsr %1, %1, 8\n" \ | 78 | " lsr %1, %1, 8\n" \ |
68 | "2: stb %1, [%2]\n" \ | 79 | "2: stb %1, [%2]\n" \ |
@@ -87,8 +98,9 @@ | |||
87 | #define put32_unaligned_check(val, addr) \ | 98 | #define put32_unaligned_check(val, addr) \ |
88 | do { \ | 99 | do { \ |
89 | unsigned int err = 0, v = val, a = addr;\ | 100 | unsigned int err = 0, v = val, a = addr;\ |
90 | __asm__( \ | ||
91 | \ | 101 | \ |
102 | __asm__( \ | ||
103 | FIRST_BYTE_32 \ | ||
92 | "1: stb.ab %1, [%2, 1]\n" \ | 104 | "1: stb.ab %1, [%2, 1]\n" \ |
93 | " lsr %1, %1, 8\n" \ | 105 | " lsr %1, %1, 8\n" \ |
94 | "2: stb.ab %1, [%2, 1]\n" \ | 106 | "2: stb.ab %1, [%2, 1]\n" \ |
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index f415d851b765..5a1259cd948c 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c | |||
@@ -622,12 +622,12 @@ void flush_icache_range(unsigned long kstart, unsigned long kend) | |||
622 | /* | 622 | /* |
623 | * General purpose helper to make I and D cache lines consistent. | 623 | * General purpose helper to make I and D cache lines consistent. |
624 | * @paddr is phy addr of region | 624 | * @paddr is phy addr of region |
625 | * @vaddr is typically user or kernel vaddr (vmalloc) | 625 | * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc) |
626 | * Howver in one instance, flush_icache_range() by kprobe (for a breakpt in | 626 | * However in one instance, when called by kprobe (for a breakpt in |
627 | * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will | 627 | * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will |
628 | * use a paddr to index the cache (despite VIPT). This is fine since since a | 628 | * use a paddr to index the cache (despite VIPT). This is fine since since a |
629 | * built-in kernel page will not have any virtual mappings (not even kernel) | 629 | * builtin kernel page will not have any virtual mappings. |
630 | * kprobe on loadable module is different as it will have kvaddr. | 630 | * kprobe on loadable module will be kernel vaddr. |
631 | */ | 631 | */ |
632 | void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) | 632 | void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) |
633 | { | 633 | { |
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 7957dc4e4d4a..71cb26df4255 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
@@ -52,6 +52,7 @@ | |||
52 | */ | 52 | */ |
53 | 53 | ||
54 | #include <linux/module.h> | 54 | #include <linux/module.h> |
55 | #include <linux/bug.h> | ||
55 | #include <asm/arcregs.h> | 56 | #include <asm/arcregs.h> |
56 | #include <asm/setup.h> | 57 | #include <asm/setup.h> |
57 | #include <asm/mmu_context.h> | 58 | #include <asm/mmu_context.h> |
@@ -99,48 +100,45 @@ | |||
99 | 100 | ||
100 | 101 | ||
101 | /* A copy of the ASID from the PID reg is kept in asid_cache */ | 102 | /* A copy of the ASID from the PID reg is kept in asid_cache */ |
102 | int asid_cache = FIRST_ASID; | 103 | unsigned int asid_cache = MM_CTXT_FIRST_CYCLE; |
103 | |||
104 | /* ASID to mm struct mapping. We have one extra entry corresponding to | ||
105 | * NO_ASID to save us a compare when clearing the mm entry for old asid | ||
106 | * see get_new_mmu_context (asm-arc/mmu_context.h) | ||
107 | */ | ||
108 | struct mm_struct *asid_mm_map[NUM_ASID + 1]; | ||
109 | 104 | ||
110 | /* | 105 | /* |
111 | * Utility Routine to erase a J-TLB entry | 106 | * Utility Routine to erase a J-TLB entry |
112 | * The procedure is to look it up in the MMU. If found, ERASE it by | 107 | * Caller needs to setup Index Reg (manually or via getIndex) |
113 | * issuing a TlbWrite CMD with PD0 = PD1 = 0 | ||
114 | */ | 108 | */ |
115 | 109 | static inline void __tlb_entry_erase(void) | |
116 | static void __tlb_entry_erase(void) | ||
117 | { | 110 | { |
118 | write_aux_reg(ARC_REG_TLBPD1, 0); | 111 | write_aux_reg(ARC_REG_TLBPD1, 0); |
119 | write_aux_reg(ARC_REG_TLBPD0, 0); | 112 | write_aux_reg(ARC_REG_TLBPD0, 0); |
120 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | 113 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
121 | } | 114 | } |
122 | 115 | ||
123 | static void tlb_entry_erase(unsigned int vaddr_n_asid) | 116 | static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid) |
124 | { | 117 | { |
125 | unsigned int idx; | 118 | unsigned int idx; |
126 | 119 | ||
127 | /* Locate the TLB entry for this vaddr + ASID */ | ||
128 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); | 120 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); |
121 | |||
129 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | 122 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); |
130 | idx = read_aux_reg(ARC_REG_TLBINDEX); | 123 | idx = read_aux_reg(ARC_REG_TLBINDEX); |
131 | 124 | ||
125 | return idx; | ||
126 | } | ||
127 | |||
128 | static void tlb_entry_erase(unsigned int vaddr_n_asid) | ||
129 | { | ||
130 | unsigned int idx; | ||
131 | |||
132 | /* Locate the TLB entry for this vaddr + ASID */ | ||
133 | idx = tlb_entry_lkup(vaddr_n_asid); | ||
134 | |||
132 | /* No error means entry found, zero it out */ | 135 | /* No error means entry found, zero it out */ |
133 | if (likely(!(idx & TLB_LKUP_ERR))) { | 136 | if (likely(!(idx & TLB_LKUP_ERR))) { |
134 | __tlb_entry_erase(); | 137 | __tlb_entry_erase(); |
135 | } else { /* Some sort of Error */ | 138 | } else { |
136 | |||
137 | /* Duplicate entry error */ | 139 | /* Duplicate entry error */ |
138 | if (idx & 0x1) { | 140 | WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n", |
139 | /* TODO we need to handle this case too */ | 141 | vaddr_n_asid); |
140 | pr_emerg("unhandled Duplicate flush for %x\n", | ||
141 | vaddr_n_asid); | ||
142 | } | ||
143 | /* else entry not found so nothing to do */ | ||
144 | } | 142 | } |
145 | } | 143 | } |
146 | 144 | ||
@@ -159,7 +157,7 @@ static void utlb_invalidate(void) | |||
159 | { | 157 | { |
160 | #if (CONFIG_ARC_MMU_VER >= 2) | 158 | #if (CONFIG_ARC_MMU_VER >= 2) |
161 | 159 | ||
162 | #if (CONFIG_ARC_MMU_VER < 3) | 160 | #if (CONFIG_ARC_MMU_VER == 2) |
163 | /* MMU v2 introduced the uTLB Flush command. | 161 | /* MMU v2 introduced the uTLB Flush command. |
164 | * There was however an obscure hardware bug, where uTLB flush would | 162 | * There was however an obscure hardware bug, where uTLB flush would |
165 | * fail when a prior probe for J-TLB (both totally unrelated) would | 163 | * fail when a prior probe for J-TLB (both totally unrelated) would |
@@ -182,6 +180,36 @@ static void utlb_invalidate(void) | |||
182 | 180 | ||
183 | } | 181 | } |
184 | 182 | ||
183 | static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) | ||
184 | { | ||
185 | unsigned int idx; | ||
186 | |||
187 | /* | ||
188 | * First verify if entry for this vaddr+ASID already exists | ||
189 | * This also sets up PD0 (vaddr, ASID..) for final commit | ||
190 | */ | ||
191 | idx = tlb_entry_lkup(pd0); | ||
192 | |||
193 | /* | ||
194 | * If Not already present get a free slot from MMU. | ||
195 | * Otherwise, Probe would have located the entry and set INDEX Reg | ||
196 | * with existing location. This will cause Write CMD to over-write | ||
197 | * existing entry with new PD0 and PD1 | ||
198 | */ | ||
199 | if (likely(idx & TLB_LKUP_ERR)) | ||
200 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | ||
201 | |||
202 | /* setup the other half of TLB entry (pfn, rwx..) */ | ||
203 | write_aux_reg(ARC_REG_TLBPD1, pd1); | ||
204 | |||
205 | /* | ||
206 | * Commit the Entry to MMU | ||
207 | * It doesnt sound safe to use the TLBWriteNI cmd here | ||
208 | * which doesn't flush uTLBs. I'd rather be safe than sorry. | ||
209 | */ | ||
210 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | ||
211 | } | ||
212 | |||
185 | /* | 213 | /* |
186 | * Un-conditionally (without lookup) erase the entire MMU contents | 214 | * Un-conditionally (without lookup) erase the entire MMU contents |
187 | */ | 215 | */ |
@@ -224,13 +252,14 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm) | |||
224 | return; | 252 | return; |
225 | 253 | ||
226 | /* | 254 | /* |
227 | * Workaround for Android weirdism: | 255 | * - Move to a new ASID, but only if the mm is still wired in |
228 | * A binder VMA could end up in a task such that vma->mm != tsk->mm | 256 | * (Android Binder ended up calling this for vma->mm != tsk->mm, |
229 | * old code would cause h/w - s/w ASID to get out of sync | 257 | * causing h/w - s/w ASID to get out of sync) |
258 | * - Also get_new_mmu_context() new implementation allocates a new | ||
259 | * ASID only if it is not allocated already - so unallocate first | ||
230 | */ | 260 | */ |
231 | if (current->mm != mm) | 261 | destroy_context(mm); |
232 | destroy_context(mm); | 262 | if (current->mm == mm) |
233 | else | ||
234 | get_new_mmu_context(mm); | 263 | get_new_mmu_context(mm); |
235 | } | 264 | } |
236 | 265 | ||
@@ -246,7 +275,6 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
246 | unsigned long end) | 275 | unsigned long end) |
247 | { | 276 | { |
248 | unsigned long flags; | 277 | unsigned long flags; |
249 | unsigned int asid; | ||
250 | 278 | ||
251 | /* If range @start to @end is more than 32 TLB entries deep, | 279 | /* If range @start to @end is more than 32 TLB entries deep, |
252 | * its better to move to a new ASID rather than searching for | 280 | * its better to move to a new ASID rather than searching for |
@@ -268,11 +296,10 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
268 | start &= PAGE_MASK; | 296 | start &= PAGE_MASK; |
269 | 297 | ||
270 | local_irq_save(flags); | 298 | local_irq_save(flags); |
271 | asid = vma->vm_mm->context.asid; | ||
272 | 299 | ||
273 | if (asid != NO_ASID) { | 300 | if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) { |
274 | while (start < end) { | 301 | while (start < end) { |
275 | tlb_entry_erase(start | (asid & 0xff)); | 302 | tlb_entry_erase(start | hw_pid(vma->vm_mm)); |
276 | start += PAGE_SIZE; | 303 | start += PAGE_SIZE; |
277 | } | 304 | } |
278 | } | 305 | } |
@@ -326,9 +353,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
326 | */ | 353 | */ |
327 | local_irq_save(flags); | 354 | local_irq_save(flags); |
328 | 355 | ||
329 | if (vma->vm_mm->context.asid != NO_ASID) { | 356 | if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) { |
330 | tlb_entry_erase((page & PAGE_MASK) | | 357 | tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm)); |
331 | (vma->vm_mm->context.asid & 0xff)); | ||
332 | utlb_invalidate(); | 358 | utlb_invalidate(); |
333 | } | 359 | } |
334 | 360 | ||
@@ -341,8 +367,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
341 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | 367 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
342 | { | 368 | { |
343 | unsigned long flags; | 369 | unsigned long flags; |
344 | unsigned int idx, asid_or_sasid; | 370 | unsigned int asid_or_sasid, rwx; |
345 | unsigned long pd0_flags; | 371 | unsigned long pd0, pd1; |
346 | 372 | ||
347 | /* | 373 | /* |
348 | * create_tlb() assumes that current->mm == vma->mm, since | 374 | * create_tlb() assumes that current->mm == vma->mm, since |
@@ -381,40 +407,30 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |||
381 | /* update this PTE credentials */ | 407 | /* update this PTE credentials */ |
382 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); | 408 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); |
383 | 409 | ||
384 | /* Create HW TLB entry Flags (in PD0) from PTE Flags */ | 410 | /* Create HW TLB(PD0,PD1) from PTE */ |
385 | #if (CONFIG_ARC_MMU_VER <= 2) | ||
386 | pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1); | ||
387 | #else | ||
388 | pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0)); | ||
389 | #endif | ||
390 | 411 | ||
391 | /* ASID for this task */ | 412 | /* ASID for this task */ |
392 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; | 413 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; |
393 | 414 | ||
394 | write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); | 415 | pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); |
395 | |||
396 | /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ | ||
397 | write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1)); | ||
398 | |||
399 | /* First verify if entry for this vaddr+ASID already exists */ | ||
400 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | ||
401 | idx = read_aux_reg(ARC_REG_TLBINDEX); | ||
402 | 416 | ||
403 | /* | 417 | /* |
404 | * If Not already present get a free slot from MMU. | 418 | * ARC MMU provides fully orthogonal access bits for K/U mode, |
405 | * Otherwise, Probe would have located the entry and set INDEX Reg | 419 | * however Linux only saves 1 set to save PTE real-estate |
406 | * with existing location. This will cause Write CMD to over-write | 420 | * Here we convert 3 PTE bits into 6 MMU bits: |
407 | * existing entry with new PD0 and PD1 | 421 | * -Kernel only entries have Kr Kw Kx 0 0 0 |
422 | * -User entries have mirrored K and U bits | ||
408 | */ | 423 | */ |
409 | if (likely(idx & TLB_LKUP_ERR)) | 424 | rwx = pte_val(*ptep) & PTE_BITS_RWX; |
410 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | ||
411 | 425 | ||
412 | /* | 426 | if (pte_val(*ptep) & _PAGE_GLOBAL) |
413 | * Commit the Entry to MMU | 427 | rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */ |
414 | * It doesnt sound safe to use the TLBWriteNI cmd here | 428 | else |
415 | * which doesn't flush uTLBs. I'd rather be safe than sorry. | 429 | rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ |
416 | */ | 430 | |
417 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | 431 | pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1); |
432 | |||
433 | tlb_entry_insert(pd0, pd1); | ||
418 | 434 | ||
419 | local_irq_restore(flags); | 435 | local_irq_restore(flags); |
420 | } | 436 | } |
@@ -553,13 +569,6 @@ void arc_mmu_init(void) | |||
553 | if (mmu->pg_sz != PAGE_SIZE) | 569 | if (mmu->pg_sz != PAGE_SIZE) |
554 | panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); | 570 | panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); |
555 | 571 | ||
556 | /* | ||
557 | * ASID mgmt data structures are compile time init | ||
558 | * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes | ||
559 | */ | ||
560 | |||
561 | local_flush_tlb_all(); | ||
562 | |||
563 | /* Enable the MMU */ | 572 | /* Enable the MMU */ |
564 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); | 573 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); |
565 | 574 | ||
@@ -671,25 +680,28 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, | |||
671 | * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS | 680 | * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS |
672 | * don't match | 681 | * don't match |
673 | */ | 682 | */ |
674 | void print_asid_mismatch(int is_fast_path) | 683 | void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path) |
675 | { | 684 | { |
676 | int pid_sw, pid_hw; | ||
677 | pid_sw = current->active_mm->context.asid; | ||
678 | pid_hw = read_aux_reg(ARC_REG_PID) & 0xff; | ||
679 | |||
680 | pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", | 685 | pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", |
681 | is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw); | 686 | is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid); |
682 | 687 | ||
683 | __asm__ __volatile__("flag 1"); | 688 | __asm__ __volatile__("flag 1"); |
684 | } | 689 | } |
685 | 690 | ||
686 | void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr) | 691 | void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr) |
687 | { | 692 | { |
688 | unsigned int pid_hw; | 693 | unsigned int mmu_asid; |
689 | 694 | ||
690 | pid_hw = read_aux_reg(ARC_REG_PID) & 0xff; | 695 | mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff; |
691 | 696 | ||
692 | if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID))) | 697 | /* |
693 | print_asid_mismatch(0); | 698 | * At the time of a TLB miss/installation |
699 | * - HW version needs to match SW version | ||
700 | * - SW needs to have a valid ASID | ||
701 | */ | ||
702 | if (addr < 0x70000000 && | ||
703 | ((mm_asid == MM_CTXT_NO_ASID) || | ||
704 | (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK)))) | ||
705 | print_asid_mismatch(mm_asid, mmu_asid, 0); | ||
694 | } | 706 | } |
695 | #endif | 707 | #endif |
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 5c5bb23001b0..cf7d7d9ad695 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S | |||
@@ -44,17 +44,36 @@ | |||
44 | #include <asm/arcregs.h> | 44 | #include <asm/arcregs.h> |
45 | #include <asm/cache.h> | 45 | #include <asm/cache.h> |
46 | #include <asm/processor.h> | 46 | #include <asm/processor.h> |
47 | #if (CONFIG_ARC_MMU_VER == 1) | ||
48 | #include <asm/tlb-mmu1.h> | 47 | #include <asm/tlb-mmu1.h> |
49 | #endif | ||
50 | 48 | ||
51 | ;-------------------------------------------------------------------------- | 49 | ;----------------------------------------------------------------- |
52 | ; scratch memory to save the registers (r0-r3) used to code TLB refill Handler | 50 | ; ARC700 Exception Handling doesn't auto-switch stack and it only provides |
53 | ; For details refer to comments before TLBMISS_FREEUP_REGS below | 51 | ; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0" |
52 | ; | ||
53 | ; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a | ||
54 | ; "global" is used to free-up FIRST core reg to be able to code the rest of | ||
55 | ; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe). | ||
56 | ; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3 | ||
57 | ; need to be saved as well by extending the "global" to be 4 words. Hence | ||
58 | ; ".size ex_saved_reg1, 16" | ||
59 | ; [All of this dance is to avoid stack switching for each TLB Miss, since we | ||
60 | ; only need to save only a handful of regs, as opposed to complete reg file] | ||
61 | ; | ||
62 | ; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST | ||
63 | ; core reg as it will not be SMP safe. | ||
64 | ; Thus scratch AUX reg is used (and no longer used to cache task PGD). | ||
65 | ; To save the rest of 3 regs - per cpu, the global is made "per-cpu". | ||
66 | ; Epilogue thus has to locate the "per-cpu" storage for regs. | ||
67 | ; To avoid cache line bouncing the per-cpu global is aligned/sized per | ||
68 | ; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence | ||
69 | ; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" | ||
70 | |||
71 | ; As simple as that.... | ||
54 | ;-------------------------------------------------------------------------- | 72 | ;-------------------------------------------------------------------------- |
55 | 73 | ||
74 | ; scratch memory to save [r0-r3] used to code TLB refill Handler | ||
56 | ARCFP_DATA ex_saved_reg1 | 75 | ARCFP_DATA ex_saved_reg1 |
57 | .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned | 76 | .align 1 << L1_CACHE_SHIFT |
58 | .type ex_saved_reg1, @object | 77 | .type ex_saved_reg1, @object |
59 | #ifdef CONFIG_SMP | 78 | #ifdef CONFIG_SMP |
60 | .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) | 79 | .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) |
@@ -66,6 +85,44 @@ ex_saved_reg1: | |||
66 | .zero 16 | 85 | .zero 16 |
67 | #endif | 86 | #endif |
68 | 87 | ||
88 | .macro TLBMISS_FREEUP_REGS | ||
89 | #ifdef CONFIG_SMP | ||
90 | sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with | ||
91 | GET_CPU_ID r0 ; get to per cpu scratch mem, | ||
92 | lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu | ||
93 | add r0, @ex_saved_reg1, r0 | ||
94 | #else | ||
95 | st r0, [@ex_saved_reg1] | ||
96 | mov_s r0, @ex_saved_reg1 | ||
97 | #endif | ||
98 | st_s r1, [r0, 4] | ||
99 | st_s r2, [r0, 8] | ||
100 | st_s r3, [r0, 12] | ||
101 | |||
102 | ; VERIFY if the ASID in MMU-PID Reg is same as | ||
103 | ; one in Linux data structures | ||
104 | |||
105 | tlb_paranoid_check_asm | ||
106 | .endm | ||
107 | |||
108 | .macro TLBMISS_RESTORE_REGS | ||
109 | #ifdef CONFIG_SMP | ||
110 | GET_CPU_ID r0 ; get to per cpu scratch mem | ||
111 | lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide | ||
112 | add r0, @ex_saved_reg1, r0 | ||
113 | ld_s r3, [r0,12] | ||
114 | ld_s r2, [r0, 8] | ||
115 | ld_s r1, [r0, 4] | ||
116 | lr r0, [ARC_REG_SCRATCH_DATA0] | ||
117 | #else | ||
118 | mov_s r0, @ex_saved_reg1 | ||
119 | ld_s r3, [r0,12] | ||
120 | ld_s r2, [r0, 8] | ||
121 | ld_s r1, [r0, 4] | ||
122 | ld_s r0, [r0] | ||
123 | #endif | ||
124 | .endm | ||
125 | |||
69 | ;============================================================================ | 126 | ;============================================================================ |
70 | ; Troubleshooting Stuff | 127 | ; Troubleshooting Stuff |
71 | ;============================================================================ | 128 | ;============================================================================ |
@@ -76,34 +133,35 @@ ex_saved_reg1: | |||
76 | ; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble. | 133 | ; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble. |
77 | ; So we try to detect this in TLB Mis shandler | 134 | ; So we try to detect this in TLB Mis shandler |
78 | 135 | ||
79 | 136 | .macro tlb_paranoid_check_asm | |
80 | .macro DBG_ASID_MISMATCH | ||
81 | 137 | ||
82 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA | 138 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA |
83 | 139 | ||
84 | ; make sure h/w ASID is same as s/w ASID | ||
85 | |||
86 | GET_CURR_TASK_ON_CPU r3 | 140 | GET_CURR_TASK_ON_CPU r3 |
87 | ld r0, [r3, TASK_ACT_MM] | 141 | ld r0, [r3, TASK_ACT_MM] |
88 | ld r0, [r0, MM_CTXT+MM_CTXT_ASID] | 142 | ld r0, [r0, MM_CTXT+MM_CTXT_ASID] |
143 | breq r0, 0, 55f ; Error if no ASID allocated | ||
89 | 144 | ||
90 | lr r1, [ARC_REG_PID] | 145 | lr r1, [ARC_REG_PID] |
91 | and r1, r1, 0xFF | 146 | and r1, r1, 0xFF |
92 | breq r1, r0, 5f | ||
93 | 147 | ||
148 | and r2, r0, 0xFF ; MMU PID bits only for comparison | ||
149 | breq r1, r2, 5f | ||
150 | |||
151 | 55: | ||
94 | ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode | 152 | ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode |
95 | lr r0, [erstatus] | 153 | lr r2, [erstatus] |
96 | bbit0 r0, STATUS_U_BIT, 5f | 154 | bbit0 r2, STATUS_U_BIT, 5f |
97 | 155 | ||
98 | ; We sure are in troubled waters, Flag the error, but to do so | 156 | ; We sure are in troubled waters, Flag the error, but to do so |
99 | ; need to switch to kernel mode stack to call error routine | 157 | ; need to switch to kernel mode stack to call error routine |
100 | GET_TSK_STACK_BASE r3, sp | 158 | GET_TSK_STACK_BASE r3, sp |
101 | 159 | ||
102 | ; Call printk to shoutout aloud | 160 | ; Call printk to shoutout aloud |
103 | mov r0, 1 | 161 | mov r2, 1 |
104 | j print_asid_mismatch | 162 | j print_asid_mismatch |
105 | 163 | ||
106 | 5: ; ASIDs match so proceed normally | 164 | 5: ; ASIDs match so proceed normally |
107 | nop | 165 | nop |
108 | 166 | ||
109 | #endif | 167 | #endif |
@@ -161,13 +219,17 @@ ex_saved_reg1: | |||
161 | ; IN: r0 = PTE, r1 = ptr to PTE | 219 | ; IN: r0 = PTE, r1 = ptr to PTE |
162 | 220 | ||
163 | .macro CONV_PTE_TO_TLB | 221 | .macro CONV_PTE_TO_TLB |
164 | and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE | 222 | and r3, r0, PTE_BITS_RWX ; r w x |
165 | sr r3, [ARC_REG_TLBPD1] ; these go in PD1 | 223 | lsl r2, r3, 3 ; r w x 0 0 0 |
224 | and.f 0, r0, _PAGE_GLOBAL | ||
225 | or.z r2, r2, r3 ; r w x r w x | ||
226 | |||
227 | and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE | ||
228 | or r3, r3, r2 | ||
229 | |||
230 | sr r3, [ARC_REG_TLBPD1] ; these go in PD1 | ||
166 | 231 | ||
167 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb | 232 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb |
168 | #if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */ | ||
169 | lsr r2, r2 ; shift PTE flags to match layout in PD0 | ||
170 | #endif | ||
171 | 233 | ||
172 | lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid | 234 | lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid |
173 | 235 | ||
@@ -191,68 +253,6 @@ ex_saved_reg1: | |||
191 | #endif | 253 | #endif |
192 | .endm | 254 | .endm |
193 | 255 | ||
194 | ;----------------------------------------------------------------- | ||
195 | ; ARC700 Exception Handling doesn't auto-switch stack and it only provides | ||
196 | ; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0" | ||
197 | ; | ||
198 | ; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a | ||
199 | ; "global" is used to free-up FIRST core reg to be able to code the rest of | ||
200 | ; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe). | ||
201 | ; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3 | ||
202 | ; need to be saved as well by extending the "global" to be 4 words. Hence | ||
203 | ; ".size ex_saved_reg1, 16" | ||
204 | ; [All of this dance is to avoid stack switching for each TLB Miss, since we | ||
205 | ; only need to save only a handful of regs, as opposed to complete reg file] | ||
206 | ; | ||
207 | ; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST | ||
208 | ; core reg as it will not be SMP safe. | ||
209 | ; Thus scratch AUX reg is used (and no longer used to cache task PGD). | ||
210 | ; To save the rest of 3 regs - per cpu, the global is made "per-cpu". | ||
211 | ; Epilogue thus has to locate the "per-cpu" storage for regs. | ||
212 | ; To avoid cache line bouncing the per-cpu global is aligned/sized per | ||
213 | ; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence | ||
214 | ; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" | ||
215 | |||
216 | ; As simple as that.... | ||
217 | |||
218 | .macro TLBMISS_FREEUP_REGS | ||
219 | #ifdef CONFIG_SMP | ||
220 | sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with | ||
221 | GET_CPU_ID r0 ; get to per cpu scratch mem, | ||
222 | lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu | ||
223 | add r0, @ex_saved_reg1, r0 | ||
224 | #else | ||
225 | st r0, [@ex_saved_reg1] | ||
226 | mov_s r0, @ex_saved_reg1 | ||
227 | #endif | ||
228 | st_s r1, [r0, 4] | ||
229 | st_s r2, [r0, 8] | ||
230 | st_s r3, [r0, 12] | ||
231 | |||
232 | ; VERIFY if the ASID in MMU-PID Reg is same as | ||
233 | ; one in Linux data structures | ||
234 | |||
235 | DBG_ASID_MISMATCH | ||
236 | .endm | ||
237 | |||
238 | ;----------------------------------------------------------------- | ||
239 | .macro TLBMISS_RESTORE_REGS | ||
240 | #ifdef CONFIG_SMP | ||
241 | GET_CPU_ID r0 ; get to per cpu scratch mem | ||
242 | lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide | ||
243 | add r0, @ex_saved_reg1, r0 | ||
244 | ld_s r3, [r0,12] | ||
245 | ld_s r2, [r0, 8] | ||
246 | ld_s r1, [r0, 4] | ||
247 | lr r0, [ARC_REG_SCRATCH_DATA0] | ||
248 | #else | ||
249 | mov_s r0, @ex_saved_reg1 | ||
250 | ld_s r3, [r0,12] | ||
251 | ld_s r2, [r0, 8] | ||
252 | ld_s r1, [r0, 4] | ||
253 | ld_s r0, [r0] | ||
254 | #endif | ||
255 | .endm | ||
256 | 256 | ||
257 | ARCFP_CODE ;Fast Path Code, candidate for ICCM | 257 | ARCFP_CODE ;Fast Path Code, candidate for ICCM |
258 | 258 | ||
@@ -277,8 +277,8 @@ ARC_ENTRY EV_TLBMissI | |||
277 | ;---------------------------------------------------------------- | 277 | ;---------------------------------------------------------------- |
278 | ; VERIFY_PTE: Check if PTE permissions approp for executing code | 278 | ; VERIFY_PTE: Check if PTE permissions approp for executing code |
279 | cmp_s r2, VMALLOC_START | 279 | cmp_s r2, VMALLOC_START |
280 | mov.lo r2, (_PAGE_PRESENT | _PAGE_U_EXECUTE) | 280 | mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE) |
281 | mov.hs r2, (_PAGE_PRESENT | _PAGE_K_EXECUTE) | 281 | or.hs r2, r2, _PAGE_GLOBAL |
282 | 282 | ||
283 | and r3, r0, r2 ; Mask out NON Flag bits from PTE | 283 | and r3, r0, r2 ; Mask out NON Flag bits from PTE |
284 | xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) | 284 | xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) |
@@ -317,26 +317,21 @@ ARC_ENTRY EV_TLBMissD | |||
317 | ;---------------------------------------------------------------- | 317 | ;---------------------------------------------------------------- |
318 | ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) | 318 | ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) |
319 | 319 | ||
320 | mov_s r2, 0 | 320 | cmp_s r2, VMALLOC_START |
321 | mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE | ||
322 | or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only | ||
323 | |||
324 | ; Linux PTE [RWX] bits are semantically overloaded: | ||
325 | ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc) | ||
326 | ; -Otherwise they are user-mode permissions, and those are exactly | ||
327 | ; same for kernel mode as well (e.g. copy_(to|from)_user) | ||
328 | |||
321 | lr r3, [ecr] | 329 | lr r3, [ecr] |
322 | btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access | 330 | btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access |
323 | or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE | 331 | or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE |
324 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access | 332 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access |
325 | or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE | 333 | or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE |
326 | ; Above laddering takes care of XCHG access | 334 | ; Above laddering takes care of XCHG access (both R and W) |
327 | ; which is both Read and Write | ||
328 | |||
329 | ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx | ||
330 | ; For copy_(to|from)_user, despite exception taken in kernel mode, | ||
331 | ; this code is not hit, because EFA would still be the user mode | ||
332 | ; address (EFA < 0x6000_0000). | ||
333 | ; This code is for legit kernel mode faults, vmalloc specifically | ||
334 | ; (EFA: 0x7000_0000 to 0x7FFF_FFFF) | ||
335 | |||
336 | lr r3, [efa] | ||
337 | cmp r3, VMALLOC_START - 1 ; If kernel mode access | ||
338 | asl.hi r2, r2, 3 ; make _PAGE_xx flags as _PAGE_K_xx | ||
339 | or r2, r2, _PAGE_PRESENT ; Common flag for K/U mode | ||
340 | 335 | ||
341 | ; By now, r2 setup with all the Flags we need to check in PTE | 336 | ; By now, r2 setup with all the Flags we need to check in PTE |
342 | and r3, r0, r2 ; Mask out NON Flag bits from PTE | 337 | and r3, r0, r2 ; Mask out NON Flag bits from PTE |
@@ -371,13 +366,7 @@ do_slow_path_pf: | |||
371 | 366 | ||
372 | ; Slow path TLB Miss handled as a regular ARC Exception | 367 | ; Slow path TLB Miss handled as a regular ARC Exception |
373 | ; (stack switching / save the complete reg-file). | 368 | ; (stack switching / save the complete reg-file). |
374 | ; That requires freeing up r9 | 369 | EXCEPTION_PROLOGUE |
375 | EXCPN_PROLOG_FREEUP_REG r9 | ||
376 | |||
377 | lr r9, [erstatus] | ||
378 | |||
379 | SWITCH_TO_KERNEL_STK | ||
380 | SAVE_ALL_SYS | ||
381 | 370 | ||
382 | ; ------- setup args for Linux Page fault Hanlder --------- | 371 | ; ------- setup args for Linux Page fault Hanlder --------- |
383 | mov_s r0, sp | 372 | mov_s r0, sp |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 444e1c12fea9..652bea9054f0 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -908,7 +908,7 @@ config LOCKDEP | |||
908 | bool | 908 | bool |
909 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 909 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
910 | select STACKTRACE | 910 | select STACKTRACE |
911 | select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE | 911 | select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC |
912 | select KALLSYMS | 912 | select KALLSYMS |
913 | select KALLSYMS_ALL | 913 | select KALLSYMS_ALL |
914 | 914 | ||
@@ -1366,7 +1366,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
1366 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT | 1366 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT |
1367 | depends on !X86_64 | 1367 | depends on !X86_64 |
1368 | select STACKTRACE | 1368 | select STACKTRACE |
1369 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND | 1369 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC |
1370 | help | 1370 | help |
1371 | Provide stacktrace filter for fault-injection capabilities | 1371 | Provide stacktrace filter for fault-injection capabilities |
1372 | 1372 | ||
@@ -1376,7 +1376,7 @@ config LATENCYTOP | |||
1376 | depends on DEBUG_KERNEL | 1376 | depends on DEBUG_KERNEL |
1377 | depends on STACKTRACE_SUPPORT | 1377 | depends on STACKTRACE_SUPPORT |
1378 | depends on PROC_FS | 1378 | depends on PROC_FS |
1379 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND | 1379 | select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC |
1380 | select KALLSYMS | 1380 | select KALLSYMS |
1381 | select KALLSYMS_ALL | 1381 | select KALLSYMS_ALL |
1382 | select STACKTRACE | 1382 | select STACKTRACE |