aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-09-21 15:11:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-09-21 15:11:52 -0400
commitdae0af783ddce6d4f6f3d09ee8d71cc8c92c8d9f (patch)
treec1bd322d02897fa43cf7304bb5811b1b814a8ecb
parentc1f03b486429633597bccbee413fb621b53603a8 (diff)
parent505013bc9065391f09a51d51cd3bf0b06dfb570a (diff)
Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King: "Fixes for ARM, the most notable being the fix from Nathan Lynch to fix the state of various registers during execve, to ensure that data can't be leaked between two executables. Fixes from Victor Kamensky for get_user() on big endian platforms, since the addition of 8-byte get_user() support broke these fairly badly. A fix from Sudeep Holla for affinity setting when hotplugging CPU 0. A fix from Stephen Boyd for a perf-induced sleep attempt while atomic. Lastly, a correctness fix for emulation of the SWP instruction on ARMv7+, and a fix for wrong carry handling when updating the translation table base address on LPAE platforms" * 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts ARM: 8148/1: flush TLS and thumbee register state during exec ARM: 8151/1: add missing exports for asm functions required by get_user macro ARM: 8137/1: fix get_user BE behavior for target variable with size of 8 bytes ARM: 8135/1: Fix in-correct barrier usage in SWP{B} emulation ARM: 8133/1: use irq_set_affinity with force=false when migrating irqs ARM: 8132/1: LPAE: drop wrong carry flag correction after adding TTBR1_OFFSET
-rw-r--r--arch/arm/include/asm/tls.h62
-rw-r--r--arch/arm/include/asm/uaccess.h48
-rw-r--r--arch/arm/kernel/armksyms.c8
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/perf_event_cpu.c14
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/swp_emulate.c15
-rw-r--r--arch/arm/kernel/thumbee.c2
-rw-r--r--arch/arm/kernel/traps.c17
-rw-r--r--arch/arm/lib/getuser.S38
-rw-r--r--arch/arm/mm/proc-v7-3level.S1
11 files changed, 154 insertions, 55 deletions
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 83259b873333..36172adda9d0 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -1,6 +1,9 @@
1#ifndef __ASMARM_TLS_H 1#ifndef __ASMARM_TLS_H
2#define __ASMARM_TLS_H 2#define __ASMARM_TLS_H
3 3
4#include <linux/compiler.h>
5#include <asm/thread_info.h>
6
4#ifdef __ASSEMBLY__ 7#ifdef __ASSEMBLY__
5#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
6 .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 9 .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
@@ -50,6 +53,47 @@
50#endif 53#endif
51 54
52#ifndef __ASSEMBLY__ 55#ifndef __ASSEMBLY__
56
57static inline void set_tls(unsigned long val)
58{
59 struct thread_info *thread;
60
61 thread = current_thread_info();
62
63 thread->tp_value[0] = val;
64
65 /*
66 * This code runs with preemption enabled and therefore must
67 * be reentrant with respect to switch_tls.
68 *
69 * We need to ensure ordering between the shadow state and the
70 * hardware state, so that we don't corrupt the hardware state
71 * with a stale shadow state during context switch.
72 *
73 * If we're preempted here, switch_tls will load TPIDRURO from
74 * thread_info upon resuming execution and the following mcr
75 * is merely redundant.
76 */
77 barrier();
78
79 if (!tls_emu) {
80 if (has_tls_reg) {
81 asm("mcr p15, 0, %0, c13, c0, 3"
82 : : "r" (val));
83 } else {
84 /*
85 * User space must never try to access this
86 * directly. Expect your app to break
87 * eventually if you do so. The user helper
88 * at 0xffff0fe0 must be used instead. (see
89 * entry-armv.S for details)
90 */
91 *((unsigned int *)0xffff0ff0) = val;
92 }
93
94 }
95}
96
53static inline unsigned long get_tpuser(void) 97static inline unsigned long get_tpuser(void)
54{ 98{
55 unsigned long reg = 0; 99 unsigned long reg = 0;
@@ -59,5 +103,23 @@ static inline unsigned long get_tpuser(void)
59 103
60 return reg; 104 return reg;
61} 105}
106
107static inline void set_tpuser(unsigned long val)
108{
109 /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
110 * we need not update thread_info.
111 */
112 if (has_tls_reg && !tls_emu) {
113 asm("mcr p15, 0, %0, c13, c0, 2"
114 : : "r" (val));
115 }
116}
117
118static inline void flush_tls(void)
119{
120 set_tls(0);
121 set_tpuser(0);
122}
123
62#endif 124#endif
63#endif /* __ASMARM_TLS_H */ 125#endif /* __ASMARM_TLS_H */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index a4cd7af475e9..4767eb9caa78 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -107,8 +107,11 @@ static inline void set_fs(mm_segment_t fs)
107extern int __get_user_1(void *); 107extern int __get_user_1(void *);
108extern int __get_user_2(void *); 108extern int __get_user_2(void *);
109extern int __get_user_4(void *); 109extern int __get_user_4(void *);
110extern int __get_user_lo8(void *); 110extern int __get_user_32t_8(void *);
111extern int __get_user_8(void *); 111extern int __get_user_8(void *);
112extern int __get_user_64t_1(void *);
113extern int __get_user_64t_2(void *);
114extern int __get_user_64t_4(void *);
112 115
113#define __GUP_CLOBBER_1 "lr", "cc" 116#define __GUP_CLOBBER_1 "lr", "cc"
114#ifdef CONFIG_CPU_USE_DOMAINS 117#ifdef CONFIG_CPU_USE_DOMAINS
@@ -117,7 +120,7 @@ extern int __get_user_8(void *);
117#define __GUP_CLOBBER_2 "lr", "cc" 120#define __GUP_CLOBBER_2 "lr", "cc"
118#endif 121#endif
119#define __GUP_CLOBBER_4 "lr", "cc" 122#define __GUP_CLOBBER_4 "lr", "cc"
120#define __GUP_CLOBBER_lo8 "lr", "cc" 123#define __GUP_CLOBBER_32t_8 "lr", "cc"
121#define __GUP_CLOBBER_8 "lr", "cc" 124#define __GUP_CLOBBER_8 "lr", "cc"
122 125
123#define __get_user_x(__r2,__p,__e,__l,__s) \ 126#define __get_user_x(__r2,__p,__e,__l,__s) \
@@ -131,12 +134,30 @@ extern int __get_user_8(void *);
131 134
132/* narrowing a double-word get into a single 32bit word register: */ 135/* narrowing a double-word get into a single 32bit word register: */
133#ifdef __ARMEB__ 136#ifdef __ARMEB__
134#define __get_user_xb(__r2, __p, __e, __l, __s) \ 137#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
135 __get_user_x(__r2, __p, __e, __l, lo8) 138 __get_user_x(__r2, __p, __e, __l, 32t_8)
136#else 139#else
137#define __get_user_xb __get_user_x 140#define __get_user_x_32t __get_user_x
138#endif 141#endif
139 142
143/*
144 * storing result into proper least significant word of 64bit target var,
145 * different only for big endian case where 64 bit __r2 lsw is r3:
146 */
147#ifdef __ARMEB__
148#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
149 __asm__ __volatile__ ( \
150 __asmeq("%0", "r0") __asmeq("%1", "r2") \
151 __asmeq("%3", "r1") \
152 "bl __get_user_64t_" #__s \
153 : "=&r" (__e), "=r" (__r2) \
154 : "0" (__p), "r" (__l) \
155 : __GUP_CLOBBER_##__s)
156#else
157#define __get_user_x_64t __get_user_x
158#endif
159
160
140#define __get_user_check(x,p) \ 161#define __get_user_check(x,p) \
141 ({ \ 162 ({ \
142 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 163 unsigned long __limit = current_thread_info()->addr_limit - 1; \
@@ -146,17 +167,26 @@ extern int __get_user_8(void *);
146 register int __e asm("r0"); \ 167 register int __e asm("r0"); \
147 switch (sizeof(*(__p))) { \ 168 switch (sizeof(*(__p))) { \
148 case 1: \ 169 case 1: \
149 __get_user_x(__r2, __p, __e, __l, 1); \ 170 if (sizeof((x)) >= 8) \
171 __get_user_x_64t(__r2, __p, __e, __l, 1); \
172 else \
173 __get_user_x(__r2, __p, __e, __l, 1); \
150 break; \ 174 break; \
151 case 2: \ 175 case 2: \
152 __get_user_x(__r2, __p, __e, __l, 2); \ 176 if (sizeof((x)) >= 8) \
177 __get_user_x_64t(__r2, __p, __e, __l, 2); \
178 else \
179 __get_user_x(__r2, __p, __e, __l, 2); \
153 break; \ 180 break; \
154 case 4: \ 181 case 4: \
155 __get_user_x(__r2, __p, __e, __l, 4); \ 182 if (sizeof((x)) >= 8) \
183 __get_user_x_64t(__r2, __p, __e, __l, 4); \
184 else \
185 __get_user_x(__r2, __p, __e, __l, 4); \
156 break; \ 186 break; \
157 case 8: \ 187 case 8: \
158 if (sizeof((x)) < 8) \ 188 if (sizeof((x)) < 8) \
159 __get_user_xb(__r2, __p, __e, __l, 4); \ 189 __get_user_x_32t(__r2, __p, __e, __l, 4); \
160 else \ 190 else \
161 __get_user_x(__r2, __p, __e, __l, 8); \ 191 __get_user_x(__r2, __p, __e, __l, 8); \
162 break; \ 192 break; \
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index f7b450f97e68..a88671cfe1ff 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -98,6 +98,14 @@ EXPORT_SYMBOL(__clear_user);
98EXPORT_SYMBOL(__get_user_1); 98EXPORT_SYMBOL(__get_user_1);
99EXPORT_SYMBOL(__get_user_2); 99EXPORT_SYMBOL(__get_user_2);
100EXPORT_SYMBOL(__get_user_4); 100EXPORT_SYMBOL(__get_user_4);
101EXPORT_SYMBOL(__get_user_8);
102
103#ifdef __ARMEB__
104EXPORT_SYMBOL(__get_user_64t_1);
105EXPORT_SYMBOL(__get_user_64t_2);
106EXPORT_SYMBOL(__get_user_64t_4);
107EXPORT_SYMBOL(__get_user_32t_8);
108#endif
101 109
102EXPORT_SYMBOL(__put_user_1); 110EXPORT_SYMBOL(__put_user_1);
103EXPORT_SYMBOL(__put_user_2); 111EXPORT_SYMBOL(__put_user_2);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 2c4257604513..5c4d38e32a51 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -175,7 +175,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
175 c = irq_data_get_irq_chip(d); 175 c = irq_data_get_irq_chip(d);
176 if (!c->irq_set_affinity) 176 if (!c->irq_set_affinity)
177 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 177 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
178 else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) 178 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
179 cpumask_copy(d->affinity, affinity); 179 cpumask_copy(d->affinity, affinity);
180 180
181 return ret; 181 return ret;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index e6a6edbec613..4bf4cce759fe 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
76 76
77static void cpu_pmu_enable_percpu_irq(void *data) 77static void cpu_pmu_enable_percpu_irq(void *data)
78{ 78{
79 struct arm_pmu *cpu_pmu = data; 79 int irq = *(int *)data;
80 struct platform_device *pmu_device = cpu_pmu->plat_device;
81 int irq = platform_get_irq(pmu_device, 0);
82 80
83 enable_percpu_irq(irq, IRQ_TYPE_NONE); 81 enable_percpu_irq(irq, IRQ_TYPE_NONE);
84 cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
85} 82}
86 83
87static void cpu_pmu_disable_percpu_irq(void *data) 84static void cpu_pmu_disable_percpu_irq(void *data)
88{ 85{
89 struct arm_pmu *cpu_pmu = data; 86 int irq = *(int *)data;
90 struct platform_device *pmu_device = cpu_pmu->plat_device;
91 int irq = platform_get_irq(pmu_device, 0);
92 87
93 cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
94 disable_percpu_irq(irq); 88 disable_percpu_irq(irq);
95} 89}
96 90
@@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
103 97
104 irq = platform_get_irq(pmu_device, 0); 98 irq = platform_get_irq(pmu_device, 0);
105 if (irq >= 0 && irq_is_percpu(irq)) { 99 if (irq >= 0 && irq_is_percpu(irq)) {
106 on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); 100 on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
107 free_percpu_irq(irq, &percpu_pmu); 101 free_percpu_irq(irq, &percpu_pmu);
108 } else { 102 } else {
109 for (i = 0; i < irqs; ++i) { 103 for (i = 0; i < irqs; ++i) {
@@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
138 irq); 132 irq);
139 return err; 133 return err;
140 } 134 }
141 on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); 135 on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
142 } else { 136 } else {
143 for (i = 0; i < irqs; ++i) { 137 for (i = 0; i < irqs; ++i) {
144 err = 0; 138 err = 0;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 81ef686a91ca..a35f6ebbd2c2 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -334,6 +334,8 @@ void flush_thread(void)
334 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 334 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
335 memset(&thread->fpstate, 0, sizeof(union fp_state)); 335 memset(&thread->fpstate, 0, sizeof(union fp_state));
336 336
337 flush_tls();
338
337 thread_notify(THREAD_NOTIFY_FLUSH, thread); 339 thread_notify(THREAD_NOTIFY_FLUSH, thread);
338} 340}
339 341
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 67ca8578c6d8..587fdfe1a72c 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -142,14 +142,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
142 while (1) { 142 while (1) {
143 unsigned long temp; 143 unsigned long temp;
144 144
145 /*
146 * Barrier required between accessing protected resource and
147 * releasing a lock for it. Legacy code might not have done
148 * this, and we cannot determine that this is not the case
149 * being emulated, so insert always.
150 */
151 smp_mb();
152
153 if (type == TYPE_SWPB) 145 if (type == TYPE_SWPB)
154 __user_swpb_asm(*data, address, res, temp); 146 __user_swpb_asm(*data, address, res, temp);
155 else 147 else
@@ -162,13 +154,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
162 } 154 }
163 155
164 if (res == 0) { 156 if (res == 0) {
165 /*
166 * Barrier also required between acquiring a lock for a
167 * protected resource and accessing the resource. Inserted for
168 * same reason as above.
169 */
170 smp_mb();
171
172 if (type == TYPE_SWPB) 157 if (type == TYPE_SWPB)
173 swpbcounter++; 158 swpbcounter++;
174 else 159 else
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
index 7b8403b76666..80f0d69205e7 100644
--- a/arch/arm/kernel/thumbee.c
+++ b/arch/arm/kernel/thumbee.c
@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
45 45
46 switch (cmd) { 46 switch (cmd) {
47 case THREAD_NOTIFY_FLUSH: 47 case THREAD_NOTIFY_FLUSH:
48 thread->thumbee_state = 0; 48 teehbr_write(0);
49 break; 49 break;
50 case THREAD_NOTIFY_SWITCH: 50 case THREAD_NOTIFY_SWITCH:
51 current_thread_info()->thumbee_state = teehbr_read(); 51 current_thread_info()->thumbee_state = teehbr_read();
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index c8e4bb714944..a964c9f40f87 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -581,7 +581,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
581#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) 581#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
582asmlinkage int arm_syscall(int no, struct pt_regs *regs) 582asmlinkage int arm_syscall(int no, struct pt_regs *regs)
583{ 583{
584 struct thread_info *thread = current_thread_info();
585 siginfo_t info; 584 siginfo_t info;
586 585
587 if ((no >> 16) != (__ARM_NR_BASE>> 16)) 586 if ((no >> 16) != (__ARM_NR_BASE>> 16))
@@ -632,21 +631,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
632 return regs->ARM_r0; 631 return regs->ARM_r0;
633 632
634 case NR(set_tls): 633 case NR(set_tls):
635 thread->tp_value[0] = regs->ARM_r0; 634 set_tls(regs->ARM_r0);
636 if (tls_emu)
637 return 0;
638 if (has_tls_reg) {
639 asm ("mcr p15, 0, %0, c13, c0, 3"
640 : : "r" (regs->ARM_r0));
641 } else {
642 /*
643 * User space must never try to access this directly.
644 * Expect your app to break eventually if you do so.
645 * The user helper at 0xffff0fe0 must be used instead.
646 * (see entry-armv.S for details)
647 */
648 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
649 }
650 return 0; 635 return 0;
651 636
652#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG 637#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 938600098b88..8ecfd15c3a02 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -80,7 +80,7 @@ ENTRY(__get_user_8)
80ENDPROC(__get_user_8) 80ENDPROC(__get_user_8)
81 81
82#ifdef __ARMEB__ 82#ifdef __ARMEB__
83ENTRY(__get_user_lo8) 83ENTRY(__get_user_32t_8)
84 check_uaccess r0, 8, r1, r2, __get_user_bad 84 check_uaccess r0, 8, r1, r2, __get_user_bad
85#ifdef CONFIG_CPU_USE_DOMAINS 85#ifdef CONFIG_CPU_USE_DOMAINS
86 add r0, r0, #4 86 add r0, r0, #4
@@ -90,7 +90,37 @@ ENTRY(__get_user_lo8)
90#endif 90#endif
91 mov r0, #0 91 mov r0, #0
92 ret lr 92 ret lr
93ENDPROC(__get_user_lo8) 93ENDPROC(__get_user_32t_8)
94
95ENTRY(__get_user_64t_1)
96 check_uaccess r0, 1, r1, r2, __get_user_bad8
978: TUSER(ldrb) r3, [r0]
98 mov r0, #0
99 ret lr
100ENDPROC(__get_user_64t_1)
101
102ENTRY(__get_user_64t_2)
103 check_uaccess r0, 2, r1, r2, __get_user_bad8
104#ifdef CONFIG_CPU_USE_DOMAINS
105rb .req ip
1069: ldrbt r3, [r0], #1
10710: ldrbt rb, [r0], #0
108#else
109rb .req r0
1109: ldrb r3, [r0]
11110: ldrb rb, [r0, #1]
112#endif
113 orr r3, rb, r3, lsl #8
114 mov r0, #0
115 ret lr
116ENDPROC(__get_user_64t_2)
117
118ENTRY(__get_user_64t_4)
119 check_uaccess r0, 4, r1, r2, __get_user_bad8
12011: TUSER(ldr) r3, [r0]
121 mov r0, #0
122 ret lr
123ENDPROC(__get_user_64t_4)
94#endif 124#endif
95 125
96__get_user_bad8: 126__get_user_bad8:
@@ -111,5 +141,9 @@ ENDPROC(__get_user_bad8)
111 .long 6b, __get_user_bad8 141 .long 6b, __get_user_bad8
112#ifdef __ARMEB__ 142#ifdef __ARMEB__
113 .long 7b, __get_user_bad 143 .long 7b, __get_user_bad
144 .long 8b, __get_user_bad8
145 .long 9b, __get_user_bad8
146 .long 10b, __get_user_bad8
147 .long 11b, __get_user_bad8
114#endif 148#endif
115.popsection 149.popsection
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 1a24e9232ec8..b64e67c7f176 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -146,7 +146,6 @@ ENDPROC(cpu_v7_set_pte_ext)
146 mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits 146 mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
147 mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits 147 mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits
148 addls \ttbr1, \ttbr1, #TTBR1_OFFSET 148 addls \ttbr1, \ttbr1, #TTBR1_OFFSET
149 adcls \tmp, \tmp, #0
150 mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1 149 mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1
151 mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits 150 mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
152 mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits 151 mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits