diff options
author | David S. Miller <davem@davemloft.net> | 2014-09-23 12:09:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-23 12:09:27 -0400 |
commit | 1f6d80358dc9bbbeb56cb43384fa11fd645d9289 (patch) | |
tree | 152bfa5165292a8e4f06d536b6d222a68480e573 /arch | |
parent | a2aeb02a8e6a9fef397c344245a54eeae67341f6 (diff) | |
parent | 98f75b8291a89ba6bf73e322ee467ce0bfeb91c1 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
arch/mips/net/bpf_jit.c
drivers/net/can/flexcan.c
Both the flexcan and MIPS bpf_jit conflicts were cases of simple
overlapping changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
91 files changed, 1034 insertions, 398 deletions
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 1fe45d1f75ec..4361777a08d8 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
@@ -93,7 +93,7 @@ | |||
93 | }; | 93 | }; |
94 | 94 | ||
95 | tv: connector { | 95 | tv: connector { |
96 | compatible = "composite-connector"; | 96 | compatible = "composite-video-connector"; |
97 | label = "tv"; | 97 | label = "tv"; |
98 | 98 | ||
99 | port { | 99 | port { |
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 83259b873333..36172adda9d0 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h | |||
@@ -1,6 +1,9 @@ | |||
1 | #ifndef __ASMARM_TLS_H | 1 | #ifndef __ASMARM_TLS_H |
2 | #define __ASMARM_TLS_H | 2 | #define __ASMARM_TLS_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | ||
5 | #include <asm/thread_info.h> | ||
6 | |||
4 | #ifdef __ASSEMBLY__ | 7 | #ifdef __ASSEMBLY__ |
5 | #include <asm/asm-offsets.h> | 8 | #include <asm/asm-offsets.h> |
6 | .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 | 9 | .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 |
@@ -50,6 +53,47 @@ | |||
50 | #endif | 53 | #endif |
51 | 54 | ||
52 | #ifndef __ASSEMBLY__ | 55 | #ifndef __ASSEMBLY__ |
56 | |||
57 | static inline void set_tls(unsigned long val) | ||
58 | { | ||
59 | struct thread_info *thread; | ||
60 | |||
61 | thread = current_thread_info(); | ||
62 | |||
63 | thread->tp_value[0] = val; | ||
64 | |||
65 | /* | ||
66 | * This code runs with preemption enabled and therefore must | ||
67 | * be reentrant with respect to switch_tls. | ||
68 | * | ||
69 | * We need to ensure ordering between the shadow state and the | ||
70 | * hardware state, so that we don't corrupt the hardware state | ||
71 | * with a stale shadow state during context switch. | ||
72 | * | ||
73 | * If we're preempted here, switch_tls will load TPIDRURO from | ||
74 | * thread_info upon resuming execution and the following mcr | ||
75 | * is merely redundant. | ||
76 | */ | ||
77 | barrier(); | ||
78 | |||
79 | if (!tls_emu) { | ||
80 | if (has_tls_reg) { | ||
81 | asm("mcr p15, 0, %0, c13, c0, 3" | ||
82 | : : "r" (val)); | ||
83 | } else { | ||
84 | /* | ||
85 | * User space must never try to access this | ||
86 | * directly. Expect your app to break | ||
87 | * eventually if you do so. The user helper | ||
88 | * at 0xffff0fe0 must be used instead. (see | ||
89 | * entry-armv.S for details) | ||
90 | */ | ||
91 | *((unsigned int *)0xffff0ff0) = val; | ||
92 | } | ||
93 | |||
94 | } | ||
95 | } | ||
96 | |||
53 | static inline unsigned long get_tpuser(void) | 97 | static inline unsigned long get_tpuser(void) |
54 | { | 98 | { |
55 | unsigned long reg = 0; | 99 | unsigned long reg = 0; |
@@ -59,5 +103,23 @@ static inline unsigned long get_tpuser(void) | |||
59 | 103 | ||
60 | return reg; | 104 | return reg; |
61 | } | 105 | } |
106 | |||
107 | static inline void set_tpuser(unsigned long val) | ||
108 | { | ||
109 | /* Since TPIDRURW is fully context-switched (unlike TPIDRURO), | ||
110 | * we need not update thread_info. | ||
111 | */ | ||
112 | if (has_tls_reg && !tls_emu) { | ||
113 | asm("mcr p15, 0, %0, c13, c0, 2" | ||
114 | : : "r" (val)); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | static inline void flush_tls(void) | ||
119 | { | ||
120 | set_tls(0); | ||
121 | set_tpuser(0); | ||
122 | } | ||
123 | |||
62 | #endif | 124 | #endif |
63 | #endif /* __ASMARM_TLS_H */ | 125 | #endif /* __ASMARM_TLS_H */ |
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index a4cd7af475e9..4767eb9caa78 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
@@ -107,8 +107,11 @@ static inline void set_fs(mm_segment_t fs) | |||
107 | extern int __get_user_1(void *); | 107 | extern int __get_user_1(void *); |
108 | extern int __get_user_2(void *); | 108 | extern int __get_user_2(void *); |
109 | extern int __get_user_4(void *); | 109 | extern int __get_user_4(void *); |
110 | extern int __get_user_lo8(void *); | 110 | extern int __get_user_32t_8(void *); |
111 | extern int __get_user_8(void *); | 111 | extern int __get_user_8(void *); |
112 | extern int __get_user_64t_1(void *); | ||
113 | extern int __get_user_64t_2(void *); | ||
114 | extern int __get_user_64t_4(void *); | ||
112 | 115 | ||
113 | #define __GUP_CLOBBER_1 "lr", "cc" | 116 | #define __GUP_CLOBBER_1 "lr", "cc" |
114 | #ifdef CONFIG_CPU_USE_DOMAINS | 117 | #ifdef CONFIG_CPU_USE_DOMAINS |
@@ -117,7 +120,7 @@ extern int __get_user_8(void *); | |||
117 | #define __GUP_CLOBBER_2 "lr", "cc" | 120 | #define __GUP_CLOBBER_2 "lr", "cc" |
118 | #endif | 121 | #endif |
119 | #define __GUP_CLOBBER_4 "lr", "cc" | 122 | #define __GUP_CLOBBER_4 "lr", "cc" |
120 | #define __GUP_CLOBBER_lo8 "lr", "cc" | 123 | #define __GUP_CLOBBER_32t_8 "lr", "cc" |
121 | #define __GUP_CLOBBER_8 "lr", "cc" | 124 | #define __GUP_CLOBBER_8 "lr", "cc" |
122 | 125 | ||
123 | #define __get_user_x(__r2,__p,__e,__l,__s) \ | 126 | #define __get_user_x(__r2,__p,__e,__l,__s) \ |
@@ -131,12 +134,30 @@ extern int __get_user_8(void *); | |||
131 | 134 | ||
132 | /* narrowing a double-word get into a single 32bit word register: */ | 135 | /* narrowing a double-word get into a single 32bit word register: */ |
133 | #ifdef __ARMEB__ | 136 | #ifdef __ARMEB__ |
134 | #define __get_user_xb(__r2, __p, __e, __l, __s) \ | 137 | #define __get_user_x_32t(__r2, __p, __e, __l, __s) \ |
135 | __get_user_x(__r2, __p, __e, __l, lo8) | 138 | __get_user_x(__r2, __p, __e, __l, 32t_8) |
136 | #else | 139 | #else |
137 | #define __get_user_xb __get_user_x | 140 | #define __get_user_x_32t __get_user_x |
138 | #endif | 141 | #endif |
139 | 142 | ||
143 | /* | ||
144 | * storing result into proper least significant word of 64bit target var, | ||
145 | * different only for big endian case where 64 bit __r2 lsw is r3: | ||
146 | */ | ||
147 | #ifdef __ARMEB__ | ||
148 | #define __get_user_x_64t(__r2, __p, __e, __l, __s) \ | ||
149 | __asm__ __volatile__ ( \ | ||
150 | __asmeq("%0", "r0") __asmeq("%1", "r2") \ | ||
151 | __asmeq("%3", "r1") \ | ||
152 | "bl __get_user_64t_" #__s \ | ||
153 | : "=&r" (__e), "=r" (__r2) \ | ||
154 | : "0" (__p), "r" (__l) \ | ||
155 | : __GUP_CLOBBER_##__s) | ||
156 | #else | ||
157 | #define __get_user_x_64t __get_user_x | ||
158 | #endif | ||
159 | |||
160 | |||
140 | #define __get_user_check(x,p) \ | 161 | #define __get_user_check(x,p) \ |
141 | ({ \ | 162 | ({ \ |
142 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ | 163 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ |
@@ -146,17 +167,26 @@ extern int __get_user_8(void *); | |||
146 | register int __e asm("r0"); \ | 167 | register int __e asm("r0"); \ |
147 | switch (sizeof(*(__p))) { \ | 168 | switch (sizeof(*(__p))) { \ |
148 | case 1: \ | 169 | case 1: \ |
149 | __get_user_x(__r2, __p, __e, __l, 1); \ | 170 | if (sizeof((x)) >= 8) \ |
171 | __get_user_x_64t(__r2, __p, __e, __l, 1); \ | ||
172 | else \ | ||
173 | __get_user_x(__r2, __p, __e, __l, 1); \ | ||
150 | break; \ | 174 | break; \ |
151 | case 2: \ | 175 | case 2: \ |
152 | __get_user_x(__r2, __p, __e, __l, 2); \ | 176 | if (sizeof((x)) >= 8) \ |
177 | __get_user_x_64t(__r2, __p, __e, __l, 2); \ | ||
178 | else \ | ||
179 | __get_user_x(__r2, __p, __e, __l, 2); \ | ||
153 | break; \ | 180 | break; \ |
154 | case 4: \ | 181 | case 4: \ |
155 | __get_user_x(__r2, __p, __e, __l, 4); \ | 182 | if (sizeof((x)) >= 8) \ |
183 | __get_user_x_64t(__r2, __p, __e, __l, 4); \ | ||
184 | else \ | ||
185 | __get_user_x(__r2, __p, __e, __l, 4); \ | ||
156 | break; \ | 186 | break; \ |
157 | case 8: \ | 187 | case 8: \ |
158 | if (sizeof((x)) < 8) \ | 188 | if (sizeof((x)) < 8) \ |
159 | __get_user_xb(__r2, __p, __e, __l, 4); \ | 189 | __get_user_x_32t(__r2, __p, __e, __l, 4); \ |
160 | else \ | 190 | else \ |
161 | __get_user_x(__r2, __p, __e, __l, 8); \ | 191 | __get_user_x(__r2, __p, __e, __l, 8); \ |
162 | break; \ | 192 | break; \ |
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 1109017499e5..e8275ea88e88 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h | |||
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | |||
26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | 26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); |
27 | } | 27 | } |
28 | 28 | ||
29 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 29 | void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
30 | size_t size, enum dma_data_direction dir, | 30 | size_t size, enum dma_data_direction dir, |
31 | struct dma_attrs *attrs) | 31 | struct dma_attrs *attrs); |
32 | { | ||
33 | if (__generic_dma_ops(hwdev)->unmap_page) | ||
34 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
35 | } | ||
36 | 32 | ||
37 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | 33 | void xen_dma_sync_single_for_cpu(struct device *hwdev, |
38 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 34 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
39 | { | 35 | |
40 | if (__generic_dma_ops(hwdev)->sync_single_for_cpu) | 36 | void xen_dma_sync_single_for_device(struct device *hwdev, |
41 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | 37 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
42 | } | ||
43 | 38 | ||
44 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
45 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
46 | { | ||
47 | if (__generic_dma_ops(hwdev)->sync_single_for_device) | ||
48 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
49 | } | ||
50 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ | 39 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ |
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index ded062f9b358..135c24a5ba26 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -33,7 +33,6 @@ typedef struct xpaddr { | |||
33 | #define INVALID_P2M_ENTRY (~0UL) | 33 | #define INVALID_P2M_ENTRY (~0UL) |
34 | 34 | ||
35 | unsigned long __pfn_to_mfn(unsigned long pfn); | 35 | unsigned long __pfn_to_mfn(unsigned long pfn); |
36 | unsigned long __mfn_to_pfn(unsigned long mfn); | ||
37 | extern struct rb_root phys_to_mach; | 36 | extern struct rb_root phys_to_mach; |
38 | 37 | ||
39 | static inline unsigned long pfn_to_mfn(unsigned long pfn) | 38 | static inline unsigned long pfn_to_mfn(unsigned long pfn) |
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) | |||
51 | 50 | ||
52 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | 51 | static inline unsigned long mfn_to_pfn(unsigned long mfn) |
53 | { | 52 | { |
54 | unsigned long pfn; | ||
55 | |||
56 | if (phys_to_mach.rb_node != NULL) { | ||
57 | pfn = __mfn_to_pfn(mfn); | ||
58 | if (pfn != INVALID_P2M_ENTRY) | ||
59 | return pfn; | ||
60 | } | ||
61 | |||
62 | return mfn; | 53 | return mfn; |
63 | } | 54 | } |
64 | 55 | ||
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index f7b450f97e68..a88671cfe1ff 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c | |||
@@ -98,6 +98,14 @@ EXPORT_SYMBOL(__clear_user); | |||
98 | EXPORT_SYMBOL(__get_user_1); | 98 | EXPORT_SYMBOL(__get_user_1); |
99 | EXPORT_SYMBOL(__get_user_2); | 99 | EXPORT_SYMBOL(__get_user_2); |
100 | EXPORT_SYMBOL(__get_user_4); | 100 | EXPORT_SYMBOL(__get_user_4); |
101 | EXPORT_SYMBOL(__get_user_8); | ||
102 | |||
103 | #ifdef __ARMEB__ | ||
104 | EXPORT_SYMBOL(__get_user_64t_1); | ||
105 | EXPORT_SYMBOL(__get_user_64t_2); | ||
106 | EXPORT_SYMBOL(__get_user_64t_4); | ||
107 | EXPORT_SYMBOL(__get_user_32t_8); | ||
108 | #endif | ||
101 | 109 | ||
102 | EXPORT_SYMBOL(__put_user_1); | 110 | EXPORT_SYMBOL(__put_user_1); |
103 | EXPORT_SYMBOL(__put_user_2); | 111 | EXPORT_SYMBOL(__put_user_2); |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2c4257604513..5c4d38e32a51 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -175,7 +175,7 @@ static bool migrate_one_irq(struct irq_desc *desc) | |||
175 | c = irq_data_get_irq_chip(d); | 175 | c = irq_data_get_irq_chip(d); |
176 | if (!c->irq_set_affinity) | 176 | if (!c->irq_set_affinity) |
177 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | 177 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); |
178 | else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) | 178 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) |
179 | cpumask_copy(d->affinity, affinity); | 179 | cpumask_copy(d->affinity, affinity); |
180 | 180 | ||
181 | return ret; | 181 | return ret; |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index e6a6edbec613..4bf4cce759fe 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) | |||
76 | 76 | ||
77 | static void cpu_pmu_enable_percpu_irq(void *data) | 77 | static void cpu_pmu_enable_percpu_irq(void *data) |
78 | { | 78 | { |
79 | struct arm_pmu *cpu_pmu = data; | 79 | int irq = *(int *)data; |
80 | struct platform_device *pmu_device = cpu_pmu->plat_device; | ||
81 | int irq = platform_get_irq(pmu_device, 0); | ||
82 | 80 | ||
83 | enable_percpu_irq(irq, IRQ_TYPE_NONE); | 81 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
84 | cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs); | ||
85 | } | 82 | } |
86 | 83 | ||
87 | static void cpu_pmu_disable_percpu_irq(void *data) | 84 | static void cpu_pmu_disable_percpu_irq(void *data) |
88 | { | 85 | { |
89 | struct arm_pmu *cpu_pmu = data; | 86 | int irq = *(int *)data; |
90 | struct platform_device *pmu_device = cpu_pmu->plat_device; | ||
91 | int irq = platform_get_irq(pmu_device, 0); | ||
92 | 87 | ||
93 | cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs); | ||
94 | disable_percpu_irq(irq); | 88 | disable_percpu_irq(irq); |
95 | } | 89 | } |
96 | 90 | ||
@@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) | |||
103 | 97 | ||
104 | irq = platform_get_irq(pmu_device, 0); | 98 | irq = platform_get_irq(pmu_device, 0); |
105 | if (irq >= 0 && irq_is_percpu(irq)) { | 99 | if (irq >= 0 && irq_is_percpu(irq)) { |
106 | on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); | 100 | on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); |
107 | free_percpu_irq(irq, &percpu_pmu); | 101 | free_percpu_irq(irq, &percpu_pmu); |
108 | } else { | 102 | } else { |
109 | for (i = 0; i < irqs; ++i) { | 103 | for (i = 0; i < irqs; ++i) { |
@@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) | |||
138 | irq); | 132 | irq); |
139 | return err; | 133 | return err; |
140 | } | 134 | } |
141 | on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); | 135 | on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); |
142 | } else { | 136 | } else { |
143 | for (i = 0; i < irqs; ++i) { | 137 | for (i = 0; i < irqs; ++i) { |
144 | err = 0; | 138 | err = 0; |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 81ef686a91ca..a35f6ebbd2c2 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -334,6 +334,8 @@ void flush_thread(void) | |||
334 | memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); | 334 | memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); |
335 | memset(&thread->fpstate, 0, sizeof(union fp_state)); | 335 | memset(&thread->fpstate, 0, sizeof(union fp_state)); |
336 | 336 | ||
337 | flush_tls(); | ||
338 | |||
337 | thread_notify(THREAD_NOTIFY_FLUSH, thread); | 339 | thread_notify(THREAD_NOTIFY_FLUSH, thread); |
338 | } | 340 | } |
339 | 341 | ||
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 67ca8578c6d8..587fdfe1a72c 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c | |||
@@ -142,14 +142,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data, | |||
142 | while (1) { | 142 | while (1) { |
143 | unsigned long temp; | 143 | unsigned long temp; |
144 | 144 | ||
145 | /* | ||
146 | * Barrier required between accessing protected resource and | ||
147 | * releasing a lock for it. Legacy code might not have done | ||
148 | * this, and we cannot determine that this is not the case | ||
149 | * being emulated, so insert always. | ||
150 | */ | ||
151 | smp_mb(); | ||
152 | |||
153 | if (type == TYPE_SWPB) | 145 | if (type == TYPE_SWPB) |
154 | __user_swpb_asm(*data, address, res, temp); | 146 | __user_swpb_asm(*data, address, res, temp); |
155 | else | 147 | else |
@@ -162,13 +154,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data, | |||
162 | } | 154 | } |
163 | 155 | ||
164 | if (res == 0) { | 156 | if (res == 0) { |
165 | /* | ||
166 | * Barrier also required between acquiring a lock for a | ||
167 | * protected resource and accessing the resource. Inserted for | ||
168 | * same reason as above. | ||
169 | */ | ||
170 | smp_mb(); | ||
171 | |||
172 | if (type == TYPE_SWPB) | 157 | if (type == TYPE_SWPB) |
173 | swpbcounter++; | 158 | swpbcounter++; |
174 | else | 159 | else |
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c index 7b8403b76666..80f0d69205e7 100644 --- a/arch/arm/kernel/thumbee.c +++ b/arch/arm/kernel/thumbee.c | |||
@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void | |||
45 | 45 | ||
46 | switch (cmd) { | 46 | switch (cmd) { |
47 | case THREAD_NOTIFY_FLUSH: | 47 | case THREAD_NOTIFY_FLUSH: |
48 | thread->thumbee_state = 0; | 48 | teehbr_write(0); |
49 | break; | 49 | break; |
50 | case THREAD_NOTIFY_SWITCH: | 50 | case THREAD_NOTIFY_SWITCH: |
51 | current_thread_info()->thumbee_state = teehbr_read(); | 51 | current_thread_info()->thumbee_state = teehbr_read(); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c8e4bb714944..a964c9f40f87 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -581,7 +581,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags) | |||
581 | #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) | 581 | #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) |
582 | asmlinkage int arm_syscall(int no, struct pt_regs *regs) | 582 | asmlinkage int arm_syscall(int no, struct pt_regs *regs) |
583 | { | 583 | { |
584 | struct thread_info *thread = current_thread_info(); | ||
585 | siginfo_t info; | 584 | siginfo_t info; |
586 | 585 | ||
587 | if ((no >> 16) != (__ARM_NR_BASE>> 16)) | 586 | if ((no >> 16) != (__ARM_NR_BASE>> 16)) |
@@ -632,21 +631,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
632 | return regs->ARM_r0; | 631 | return regs->ARM_r0; |
633 | 632 | ||
634 | case NR(set_tls): | 633 | case NR(set_tls): |
635 | thread->tp_value[0] = regs->ARM_r0; | 634 | set_tls(regs->ARM_r0); |
636 | if (tls_emu) | ||
637 | return 0; | ||
638 | if (has_tls_reg) { | ||
639 | asm ("mcr p15, 0, %0, c13, c0, 3" | ||
640 | : : "r" (regs->ARM_r0)); | ||
641 | } else { | ||
642 | /* | ||
643 | * User space must never try to access this directly. | ||
644 | * Expect your app to break eventually if you do so. | ||
645 | * The user helper at 0xffff0fe0 must be used instead. | ||
646 | * (see entry-armv.S for details) | ||
647 | */ | ||
648 | *((unsigned int *)0xffff0ff0) = regs->ARM_r0; | ||
649 | } | ||
650 | return 0; | 635 | return 0; |
651 | 636 | ||
652 | #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG | 637 | #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG |
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 938600098b88..8ecfd15c3a02 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S | |||
@@ -80,7 +80,7 @@ ENTRY(__get_user_8) | |||
80 | ENDPROC(__get_user_8) | 80 | ENDPROC(__get_user_8) |
81 | 81 | ||
82 | #ifdef __ARMEB__ | 82 | #ifdef __ARMEB__ |
83 | ENTRY(__get_user_lo8) | 83 | ENTRY(__get_user_32t_8) |
84 | check_uaccess r0, 8, r1, r2, __get_user_bad | 84 | check_uaccess r0, 8, r1, r2, __get_user_bad |
85 | #ifdef CONFIG_CPU_USE_DOMAINS | 85 | #ifdef CONFIG_CPU_USE_DOMAINS |
86 | add r0, r0, #4 | 86 | add r0, r0, #4 |
@@ -90,7 +90,37 @@ ENTRY(__get_user_lo8) | |||
90 | #endif | 90 | #endif |
91 | mov r0, #0 | 91 | mov r0, #0 |
92 | ret lr | 92 | ret lr |
93 | ENDPROC(__get_user_lo8) | 93 | ENDPROC(__get_user_32t_8) |
94 | |||
95 | ENTRY(__get_user_64t_1) | ||
96 | check_uaccess r0, 1, r1, r2, __get_user_bad8 | ||
97 | 8: TUSER(ldrb) r3, [r0] | ||
98 | mov r0, #0 | ||
99 | ret lr | ||
100 | ENDPROC(__get_user_64t_1) | ||
101 | |||
102 | ENTRY(__get_user_64t_2) | ||
103 | check_uaccess r0, 2, r1, r2, __get_user_bad8 | ||
104 | #ifdef CONFIG_CPU_USE_DOMAINS | ||
105 | rb .req ip | ||
106 | 9: ldrbt r3, [r0], #1 | ||
107 | 10: ldrbt rb, [r0], #0 | ||
108 | #else | ||
109 | rb .req r0 | ||
110 | 9: ldrb r3, [r0] | ||
111 | 10: ldrb rb, [r0, #1] | ||
112 | #endif | ||
113 | orr r3, rb, r3, lsl #8 | ||
114 | mov r0, #0 | ||
115 | ret lr | ||
116 | ENDPROC(__get_user_64t_2) | ||
117 | |||
118 | ENTRY(__get_user_64t_4) | ||
119 | check_uaccess r0, 4, r1, r2, __get_user_bad8 | ||
120 | 11: TUSER(ldr) r3, [r0] | ||
121 | mov r0, #0 | ||
122 | ret lr | ||
123 | ENDPROC(__get_user_64t_4) | ||
94 | #endif | 124 | #endif |
95 | 125 | ||
96 | __get_user_bad8: | 126 | __get_user_bad8: |
@@ -111,5 +141,9 @@ ENDPROC(__get_user_bad8) | |||
111 | .long 6b, __get_user_bad8 | 141 | .long 6b, __get_user_bad8 |
112 | #ifdef __ARMEB__ | 142 | #ifdef __ARMEB__ |
113 | .long 7b, __get_user_bad | 143 | .long 7b, __get_user_bad |
144 | .long 8b, __get_user_bad8 | ||
145 | .long 9b, __get_user_bad8 | ||
146 | .long 10b, __get_user_bad8 | ||
147 | .long 11b, __get_user_bad8 | ||
114 | #endif | 148 | #endif |
115 | .popsection | 149 | .popsection |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 1a24e9232ec8..b64e67c7f176 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -146,7 +146,6 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
146 | mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits | 146 | mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits |
147 | mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits | 147 | mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits |
148 | addls \ttbr1, \ttbr1, #TTBR1_OFFSET | 148 | addls \ttbr1, \ttbr1, #TTBR1_OFFSET |
149 | adcls \tmp, \tmp, #0 | ||
150 | mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1 | 149 | mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1 |
151 | mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits | 150 | mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits |
152 | mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits | 151 | mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits |
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile index 12969523414c..1f85bfe6b470 100644 --- a/arch/arm/xen/Makefile +++ b/arch/arm/xen/Makefile | |||
@@ -1 +1 @@ | |||
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o | obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o | ||
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 98544c5f86e9..0e15f011f9c8 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -260,6 +260,12 @@ static int __init xen_guest_init(void) | |||
260 | xen_domain_type = XEN_HVM_DOMAIN; | 260 | xen_domain_type = XEN_HVM_DOMAIN; |
261 | 261 | ||
262 | xen_setup_features(); | 262 | xen_setup_features(); |
263 | |||
264 | if (!xen_feature(XENFEAT_grant_map_identity)) { | ||
265 | pr_warn("Please upgrade your Xen.\n" | ||
266 | "If your platform has any non-coherent DMA devices, they won't work properly.\n"); | ||
267 | } | ||
268 | |||
263 | if (xen_feature(XENFEAT_dom0)) | 269 | if (xen_feature(XENFEAT_dom0)) |
264 | xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; | 270 | xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; |
265 | else | 271 | else |
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c new file mode 100644 index 000000000000..3b99860fd7ae --- /dev/null +++ b/arch/arm/xen/mm32.c | |||
@@ -0,0 +1,202 @@ | |||
1 | #include <linux/cpu.h> | ||
2 | #include <linux/dma-mapping.h> | ||
3 | #include <linux/gfp.h> | ||
4 | #include <linux/highmem.h> | ||
5 | |||
6 | #include <xen/features.h> | ||
7 | |||
8 | static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt); | ||
9 | static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep); | ||
10 | |||
11 | static int alloc_xen_mm32_scratch_page(int cpu) | ||
12 | { | ||
13 | struct page *page; | ||
14 | unsigned long virt; | ||
15 | pmd_t *pmdp; | ||
16 | pte_t *ptep; | ||
17 | |||
18 | if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL) | ||
19 | return 0; | ||
20 | |||
21 | page = alloc_page(GFP_KERNEL); | ||
22 | if (page == NULL) { | ||
23 | pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu); | ||
24 | return -ENOMEM; | ||
25 | } | ||
26 | |||
27 | virt = (unsigned long)__va(page_to_phys(page)); | ||
28 | pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); | ||
29 | ptep = pte_offset_kernel(pmdp, virt); | ||
30 | |||
31 | per_cpu(xen_mm32_scratch_virt, cpu) = virt; | ||
32 | per_cpu(xen_mm32_scratch_ptep, cpu) = ptep; | ||
33 | |||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | static int xen_mm32_cpu_notify(struct notifier_block *self, | ||
38 | unsigned long action, void *hcpu) | ||
39 | { | ||
40 | int cpu = (long)hcpu; | ||
41 | switch (action) { | ||
42 | case CPU_UP_PREPARE: | ||
43 | if (alloc_xen_mm32_scratch_page(cpu)) | ||
44 | return NOTIFY_BAD; | ||
45 | break; | ||
46 | default: | ||
47 | break; | ||
48 | } | ||
49 | return NOTIFY_OK; | ||
50 | } | ||
51 | |||
52 | static struct notifier_block xen_mm32_cpu_notifier = { | ||
53 | .notifier_call = xen_mm32_cpu_notify, | ||
54 | }; | ||
55 | |||
56 | static void* xen_mm32_remap_page(dma_addr_t handle) | ||
57 | { | ||
58 | unsigned long virt = get_cpu_var(xen_mm32_scratch_virt); | ||
59 | pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep); | ||
60 | |||
61 | *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL); | ||
62 | local_flush_tlb_kernel_page(virt); | ||
63 | |||
64 | return (void*)virt; | ||
65 | } | ||
66 | |||
67 | static void xen_mm32_unmap(void *vaddr) | ||
68 | { | ||
69 | put_cpu_var(xen_mm32_scratch_virt); | ||
70 | } | ||
71 | |||
72 | |||
73 | /* functions called by SWIOTLB */ | ||
74 | |||
75 | static void dma_cache_maint(dma_addr_t handle, unsigned long offset, | ||
76 | size_t size, enum dma_data_direction dir, | ||
77 | void (*op)(const void *, size_t, int)) | ||
78 | { | ||
79 | unsigned long pfn; | ||
80 | size_t left = size; | ||
81 | |||
82 | pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE; | ||
83 | offset %= PAGE_SIZE; | ||
84 | |||
85 | do { | ||
86 | size_t len = left; | ||
87 | void *vaddr; | ||
88 | |||
89 | if (!pfn_valid(pfn)) | ||
90 | { | ||
91 | /* Cannot map the page, we don't know its physical address. | ||
92 | * Return and hope for the best */ | ||
93 | if (!xen_feature(XENFEAT_grant_map_identity)) | ||
94 | return; | ||
95 | vaddr = xen_mm32_remap_page(handle) + offset; | ||
96 | op(vaddr, len, dir); | ||
97 | xen_mm32_unmap(vaddr - offset); | ||
98 | } else { | ||
99 | struct page *page = pfn_to_page(pfn); | ||
100 | |||
101 | if (PageHighMem(page)) { | ||
102 | if (len + offset > PAGE_SIZE) | ||
103 | len = PAGE_SIZE - offset; | ||
104 | |||
105 | if (cache_is_vipt_nonaliasing()) { | ||
106 | vaddr = kmap_atomic(page); | ||
107 | op(vaddr + offset, len, dir); | ||
108 | kunmap_atomic(vaddr); | ||
109 | } else { | ||
110 | vaddr = kmap_high_get(page); | ||
111 | if (vaddr) { | ||
112 | op(vaddr + offset, len, dir); | ||
113 | kunmap_high(page); | ||
114 | } | ||
115 | } | ||
116 | } else { | ||
117 | vaddr = page_address(page) + offset; | ||
118 | op(vaddr, len, dir); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | offset = 0; | ||
123 | pfn++; | ||
124 | left -= len; | ||
125 | } while (left); | ||
126 | } | ||
127 | |||
128 | static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, | ||
129 | size_t size, enum dma_data_direction dir) | ||
130 | { | ||
131 | /* Cannot use __dma_page_dev_to_cpu because we don't have a | ||
132 | * struct page for handle */ | ||
133 | |||
134 | if (dir != DMA_TO_DEVICE) | ||
135 | outer_inv_range(handle, handle + size); | ||
136 | |||
137 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area); | ||
138 | } | ||
139 | |||
140 | static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, | ||
141 | size_t size, enum dma_data_direction dir) | ||
142 | { | ||
143 | |||
144 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area); | ||
145 | |||
146 | if (dir == DMA_FROM_DEVICE) { | ||
147 | outer_inv_range(handle, handle + size); | ||
148 | } else { | ||
149 | outer_clean_range(handle, handle + size); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
154 | size_t size, enum dma_data_direction dir, | ||
155 | struct dma_attrs *attrs) | ||
156 | |||
157 | { | ||
158 | if (!__generic_dma_ops(hwdev)->unmap_page) | ||
159 | return; | ||
160 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
161 | return; | ||
162 | |||
163 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | ||
164 | } | ||
165 | |||
166 | void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
167 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
168 | { | ||
169 | if (!__generic_dma_ops(hwdev)->sync_single_for_cpu) | ||
170 | return; | ||
171 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | ||
172 | } | ||
173 | |||
174 | void xen_dma_sync_single_for_device(struct device *hwdev, | ||
175 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
176 | { | ||
177 | if (!__generic_dma_ops(hwdev)->sync_single_for_device) | ||
178 | return; | ||
179 | __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); | ||
180 | } | ||
181 | |||
182 | int __init xen_mm32_init(void) | ||
183 | { | ||
184 | int cpu; | ||
185 | |||
186 | if (!xen_initial_domain()) | ||
187 | return 0; | ||
188 | |||
189 | register_cpu_notifier(&xen_mm32_cpu_notifier); | ||
190 | get_online_cpus(); | ||
191 | for_each_online_cpu(cpu) { | ||
192 | if (alloc_xen_mm32_scratch_page(cpu)) { | ||
193 | put_online_cpus(); | ||
194 | unregister_cpu_notifier(&xen_mm32_cpu_notifier); | ||
195 | return -ENOMEM; | ||
196 | } | ||
197 | } | ||
198 | put_online_cpus(); | ||
199 | |||
200 | return 0; | ||
201 | } | ||
202 | arch_initcall(xen_mm32_init); | ||
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index 97baf4427817..054857776254 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c | |||
@@ -21,14 +21,12 @@ struct xen_p2m_entry { | |||
21 | unsigned long pfn; | 21 | unsigned long pfn; |
22 | unsigned long mfn; | 22 | unsigned long mfn; |
23 | unsigned long nr_pages; | 23 | unsigned long nr_pages; |
24 | struct rb_node rbnode_mach; | ||
25 | struct rb_node rbnode_phys; | 24 | struct rb_node rbnode_phys; |
26 | }; | 25 | }; |
27 | 26 | ||
28 | static rwlock_t p2m_lock; | 27 | static rwlock_t p2m_lock; |
29 | struct rb_root phys_to_mach = RB_ROOT; | 28 | struct rb_root phys_to_mach = RB_ROOT; |
30 | EXPORT_SYMBOL_GPL(phys_to_mach); | 29 | EXPORT_SYMBOL_GPL(phys_to_mach); |
31 | static struct rb_root mach_to_phys = RB_ROOT; | ||
32 | 30 | ||
33 | static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) | 31 | static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) |
34 | { | 32 | { |
@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) | |||
41 | parent = *link; | 39 | parent = *link; |
42 | entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); | 40 | entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); |
43 | 41 | ||
44 | if (new->mfn == entry->mfn) | ||
45 | goto err_out; | ||
46 | if (new->pfn == entry->pfn) | 42 | if (new->pfn == entry->pfn) |
47 | goto err_out; | 43 | goto err_out; |
48 | 44 | ||
@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn) | |||
88 | } | 84 | } |
89 | EXPORT_SYMBOL_GPL(__pfn_to_mfn); | 85 | EXPORT_SYMBOL_GPL(__pfn_to_mfn); |
90 | 86 | ||
91 | static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new) | ||
92 | { | ||
93 | struct rb_node **link = &mach_to_phys.rb_node; | ||
94 | struct rb_node *parent = NULL; | ||
95 | struct xen_p2m_entry *entry; | ||
96 | int rc = 0; | ||
97 | |||
98 | while (*link) { | ||
99 | parent = *link; | ||
100 | entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach); | ||
101 | |||
102 | if (new->mfn == entry->mfn) | ||
103 | goto err_out; | ||
104 | if (new->pfn == entry->pfn) | ||
105 | goto err_out; | ||
106 | |||
107 | if (new->mfn < entry->mfn) | ||
108 | link = &(*link)->rb_left; | ||
109 | else | ||
110 | link = &(*link)->rb_right; | ||
111 | } | ||
112 | rb_link_node(&new->rbnode_mach, parent, link); | ||
113 | rb_insert_color(&new->rbnode_mach, &mach_to_phys); | ||
114 | goto out; | ||
115 | |||
116 | err_out: | ||
117 | rc = -EINVAL; | ||
118 | pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", | ||
119 | __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); | ||
120 | out: | ||
121 | return rc; | ||
122 | } | ||
123 | |||
124 | unsigned long __mfn_to_pfn(unsigned long mfn) | ||
125 | { | ||
126 | struct rb_node *n = mach_to_phys.rb_node; | ||
127 | struct xen_p2m_entry *entry; | ||
128 | unsigned long irqflags; | ||
129 | |||
130 | read_lock_irqsave(&p2m_lock, irqflags); | ||
131 | while (n) { | ||
132 | entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach); | ||
133 | if (entry->mfn <= mfn && | ||
134 | entry->mfn + entry->nr_pages > mfn) { | ||
135 | read_unlock_irqrestore(&p2m_lock, irqflags); | ||
136 | return entry->pfn + (mfn - entry->mfn); | ||
137 | } | ||
138 | if (mfn < entry->mfn) | ||
139 | n = n->rb_left; | ||
140 | else | ||
141 | n = n->rb_right; | ||
142 | } | ||
143 | read_unlock_irqrestore(&p2m_lock, irqflags); | ||
144 | |||
145 | return INVALID_P2M_ENTRY; | ||
146 | } | ||
147 | EXPORT_SYMBOL_GPL(__mfn_to_pfn); | ||
148 | |||
149 | int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, | 87 | int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, |
150 | struct gnttab_map_grant_ref *kmap_ops, | 88 | struct gnttab_map_grant_ref *kmap_ops, |
151 | struct page **pages, unsigned int count) | 89 | struct page **pages, unsigned int count) |
@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn, | |||
192 | p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); | 130 | p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); |
193 | if (p2m_entry->pfn <= pfn && | 131 | if (p2m_entry->pfn <= pfn && |
194 | p2m_entry->pfn + p2m_entry->nr_pages > pfn) { | 132 | p2m_entry->pfn + p2m_entry->nr_pages > pfn) { |
195 | rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys); | ||
196 | rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); | 133 | rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); |
197 | write_unlock_irqrestore(&p2m_lock, irqflags); | 134 | write_unlock_irqrestore(&p2m_lock, irqflags); |
198 | kfree(p2m_entry); | 135 | kfree(p2m_entry); |
@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn, | |||
217 | p2m_entry->mfn = mfn; | 154 | p2m_entry->mfn = mfn; |
218 | 155 | ||
219 | write_lock_irqsave(&p2m_lock, irqflags); | 156 | write_lock_irqsave(&p2m_lock, irqflags); |
220 | if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) || | 157 | if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) { |
221 | (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) { | ||
222 | write_unlock_irqrestore(&p2m_lock, irqflags); | 158 | write_unlock_irqrestore(&p2m_lock, irqflags); |
223 | return false; | 159 | return false; |
224 | } | 160 | } |
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 0f08dfd69ebc..dfa6e3e74fdd 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c | |||
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc) | |||
97 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | 97 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) |
98 | return false; | 98 | return false; |
99 | 99 | ||
100 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) | 100 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
101 | affinity = cpu_online_mask; | ||
101 | ret = true; | 102 | ret = true; |
103 | } | ||
102 | 104 | ||
103 | /* | ||
104 | * when using forced irq_set_affinity we must ensure that the cpu | ||
105 | * being offlined is not present in the affinity mask, it may be | ||
106 | * selected as the target CPU otherwise | ||
107 | */ | ||
108 | affinity = cpu_online_mask; | ||
109 | c = irq_data_get_irq_chip(d); | 105 | c = irq_data_get_irq_chip(d); |
110 | if (!c->irq_set_affinity) | 106 | if (!c->irq_set_affinity) |
111 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | 107 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); |
112 | else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) | 108 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) |
113 | cpumask_copy(d->affinity, affinity); | 109 | cpumask_copy(d->affinity, affinity); |
114 | 110 | ||
115 | return ret; | 111 | return ret; |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1309d64aa926..29d48690f2ac 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -230,9 +230,27 @@ void exit_thread(void) | |||
230 | { | 230 | { |
231 | } | 231 | } |
232 | 232 | ||
233 | static void tls_thread_flush(void) | ||
234 | { | ||
235 | asm ("msr tpidr_el0, xzr"); | ||
236 | |||
237 | if (is_compat_task()) { | ||
238 | current->thread.tp_value = 0; | ||
239 | |||
240 | /* | ||
241 | * We need to ensure ordering between the shadow state and the | ||
242 | * hardware state, so that we don't corrupt the hardware state | ||
243 | * with a stale shadow state during context switch. | ||
244 | */ | ||
245 | barrier(); | ||
246 | asm ("msr tpidrro_el0, xzr"); | ||
247 | } | ||
248 | } | ||
249 | |||
233 | void flush_thread(void) | 250 | void flush_thread(void) |
234 | { | 251 | { |
235 | fpsimd_flush_thread(); | 252 | fpsimd_flush_thread(); |
253 | tls_thread_flush(); | ||
236 | flush_ptrace_hw_breakpoint(current); | 254 | flush_ptrace_hw_breakpoint(current); |
237 | } | 255 | } |
238 | 256 | ||
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index de2b0226e06d..dc47e53e9e28 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c | |||
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs) | |||
79 | 79 | ||
80 | case __ARM_NR_compat_set_tls: | 80 | case __ARM_NR_compat_set_tls: |
81 | current->thread.tp_value = regs->regs[0]; | 81 | current->thread.tp_value = regs->regs[0]; |
82 | |||
83 | /* | ||
84 | * Protect against register corruption from context switch. | ||
85 | * See comment in tls_thread_flush. | ||
86 | */ | ||
87 | barrier(); | ||
82 | asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); | 88 | asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); |
83 | return 0; | 89 | return 0; |
84 | 90 | ||
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5472c2401876..a83061f37e43 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -149,8 +149,7 @@ void __init arm64_memblock_init(void) | |||
149 | memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); | 149 | memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); |
150 | #endif | 150 | #endif |
151 | 151 | ||
152 | if (!efi_enabled(EFI_MEMMAP)) | 152 | early_init_fdt_scan_reserved_mem(); |
153 | early_init_fdt_scan_reserved_mem(); | ||
154 | 153 | ||
155 | /* 4GB maximum for 32-bit only capable devices */ | 154 | /* 4GB maximum for 32-bit only capable devices */ |
156 | if (IS_ENABLED(CONFIG_ZONE_DMA)) | 155 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 6a65bb7d0657..18026b2eb582 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h | |||
@@ -329,6 +329,6 @@ | |||
329 | #define __NR_sched_getattr 1337 | 329 | #define __NR_sched_getattr 1337 |
330 | #define __NR_renameat2 1338 | 330 | #define __NR_renameat2 1338 |
331 | #define __NR_getrandom 1339 | 331 | #define __NR_getrandom 1339 |
332 | #define __NR_memfd_create 1339 | 332 | #define __NR_memfd_create 1340 |
333 | 333 | ||
334 | #endif /* _UAPI_ASM_IA64_UNISTD_H */ | 334 | #endif /* _UAPI_ASM_IA64_UNISTD_H */ |
diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c index ec73b2cf912a..fc505d58f078 100644 --- a/arch/ia64/pci/fixup.c +++ b/arch/ia64/pci/fixup.c | |||
@@ -38,27 +38,6 @@ static void pci_fixup_video(struct pci_dev *pdev) | |||
38 | return; | 38 | return; |
39 | /* Maybe, this machine supports legacy memory map. */ | 39 | /* Maybe, this machine supports legacy memory map. */ |
40 | 40 | ||
41 | if (!vga_default_device()) { | ||
42 | resource_size_t start, end; | ||
43 | int i; | ||
44 | |||
45 | /* Does firmware framebuffer belong to us? */ | ||
46 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
47 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) | ||
48 | continue; | ||
49 | |||
50 | start = pci_resource_start(pdev, i); | ||
51 | end = pci_resource_end(pdev, i); | ||
52 | |||
53 | if (!start || !end) | ||
54 | continue; | ||
55 | |||
56 | if (screen_info.lfb_base >= start && | ||
57 | (screen_info.lfb_base + screen_info.lfb_size) < end) | ||
58 | vga_set_default_device(pdev); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | /* Is VGA routed to us? */ | 41 | /* Is VGA routed to us? */ |
63 | bus = pdev->bus; | 42 | bus = pdev->bus; |
64 | while (bus) { | 43 | while (bus) { |
@@ -83,8 +62,7 @@ static void pci_fixup_video(struct pci_dev *pdev) | |||
83 | pci_read_config_word(pdev, PCI_COMMAND, &config); | 62 | pci_read_config_word(pdev, PCI_COMMAND, &config); |
84 | if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { | 63 | if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { |
85 | pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; | 64 | pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; |
86 | dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n"); | 65 | dev_printk(KERN_DEBUG, &pdev->dev, "Video device with shadowed ROM\n"); |
87 | vga_set_default_device(pdev); | ||
88 | } | 66 | } |
89 | } | 67 | } |
90 | } | 68 | } |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 40e1c1dd0e24..6feded3b0c4c 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -127,7 +127,7 @@ config SECCOMP | |||
127 | 127 | ||
128 | endmenu | 128 | endmenu |
129 | 129 | ||
130 | menu "Advanced setup" | 130 | menu "Kernel features" |
131 | 131 | ||
132 | config ADVANCED_OPTIONS | 132 | config ADVANCED_OPTIONS |
133 | bool "Prompt for advanced kernel configuration options" | 133 | bool "Prompt for advanced kernel configuration options" |
@@ -248,10 +248,10 @@ config MICROBLAZE_64K_PAGES | |||
248 | 248 | ||
249 | endchoice | 249 | endchoice |
250 | 250 | ||
251 | endmenu | ||
252 | |||
253 | source "mm/Kconfig" | 251 | source "mm/Kconfig" |
254 | 252 | ||
253 | endmenu | ||
254 | |||
255 | menu "Executable file formats" | 255 | menu "Executable file formats" |
256 | 256 | ||
257 | source "fs/Kconfig.binfmt" | 257 | source "fs/Kconfig.binfmt" |
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h index b4a4cb150aa9..596e485ae707 100644 --- a/arch/microblaze/include/asm/entry.h +++ b/arch/microblaze/include/asm/entry.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <asm/percpu.h> | 16 | #include <asm/percpu.h> |
17 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
18 | #include <linux/linkage.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * These are per-cpu variables required in entry.S, among other | 21 | * These are per-cpu variables required in entry.S, among other |
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 0aa005703a0b..59a89a64a865 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h | |||
@@ -98,13 +98,13 @@ static inline int access_ok(int type, const void __user *addr, | |||
98 | 98 | ||
99 | if ((get_fs().seg < ((unsigned long)addr)) || | 99 | if ((get_fs().seg < ((unsigned long)addr)) || |
100 | (get_fs().seg < ((unsigned long)addr + size - 1))) { | 100 | (get_fs().seg < ((unsigned long)addr + size - 1))) { |
101 | pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", | 101 | pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", |
102 | type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, | 102 | type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, |
103 | (u32)get_fs().seg); | 103 | (u32)get_fs().seg); |
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | ok: | 106 | ok: |
107 | pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", | 107 | pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", |
108 | type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, | 108 | type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, |
109 | (u32)get_fs().seg); | 109 | (u32)get_fs().seg); |
110 | return 1; | 110 | return 1; |
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index fd56a8f66489..ea4b233647c1 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h | |||
@@ -38,6 +38,6 @@ | |||
38 | 38 | ||
39 | #endif /* __ASSEMBLY__ */ | 39 | #endif /* __ASSEMBLY__ */ |
40 | 40 | ||
41 | #define __NR_syscalls 381 | 41 | #define __NR_syscalls 387 |
42 | 42 | ||
43 | #endif /* _ASM_MICROBLAZE_UNISTD_H */ | 43 | #endif /* _ASM_MICROBLAZE_UNISTD_H */ |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 900c7e5333b6..574c43000699 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -546,6 +546,7 @@ config SGI_IP28 | |||
546 | # select SYS_HAS_EARLY_PRINTK | 546 | # select SYS_HAS_EARLY_PRINTK |
547 | select SYS_SUPPORTS_64BIT_KERNEL | 547 | select SYS_SUPPORTS_64BIT_KERNEL |
548 | select SYS_SUPPORTS_BIG_ENDIAN | 548 | select SYS_SUPPORTS_BIG_ENDIAN |
549 | select MIPS_L1_CACHE_SHIFT_7 | ||
549 | help | 550 | help |
550 | This is the SGI Indigo2 with R10000 processor. To compile a Linux | 551 | This is the SGI Indigo2 with R10000 processor. To compile a Linux |
551 | kernel that runs on these, say Y here. | 552 | kernel that runs on these, say Y here. |
@@ -2029,7 +2030,9 @@ config MIPS_CMP | |||
2029 | bool "MIPS CMP framework support (DEPRECATED)" | 2030 | bool "MIPS CMP framework support (DEPRECATED)" |
2030 | depends on SYS_SUPPORTS_MIPS_CMP | 2031 | depends on SYS_SUPPORTS_MIPS_CMP |
2031 | select MIPS_GIC_IPI | 2032 | select MIPS_GIC_IPI |
2033 | select SMP | ||
2032 | select SYNC_R4K | 2034 | select SYNC_R4K |
2035 | select SYS_SUPPORTS_SMP | ||
2033 | select WEAK_ORDERING | 2036 | select WEAK_ORDERING |
2034 | default n | 2037 | default n |
2035 | help | 2038 | help |
diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 9336509f47ad..bbac51e11179 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile | |||
@@ -113,7 +113,16 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__ | |||
113 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) | 113 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) |
114 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) | 114 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) |
115 | 115 | ||
116 | cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) | 116 | # For smartmips configurations, there are hundreds of warnings due to ISA overrides |
117 | # in assembly and header files. smartmips is only supported for MIPS32r1 onwards | ||
118 | # and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or | ||
119 | # similar directives in the kernel will spam the build logs with the following warnings: | ||
120 | # Warning: the `smartmips' extension requires MIPS32 revision 1 or greater | ||
121 | # or | ||
122 | # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension | ||
123 | # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has | ||
124 | # been fixed properly. | ||
125 | cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) -Wa,--no-warn | ||
117 | cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips) | 126 | cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips) |
118 | 127 | ||
119 | cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ | 128 | cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ |
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index 37eb2d1fa69a..b94bf44d8d8e 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c | |||
@@ -434,7 +434,7 @@ static void bcm63xx_init_irq(void) | |||
434 | irq_stat_addr[0] += PERF_IRQSTAT_3368_REG; | 434 | irq_stat_addr[0] += PERF_IRQSTAT_3368_REG; |
435 | irq_mask_addr[0] += PERF_IRQMASK_3368_REG; | 435 | irq_mask_addr[0] += PERF_IRQMASK_3368_REG; |
436 | irq_stat_addr[1] = 0; | 436 | irq_stat_addr[1] = 0; |
437 | irq_stat_addr[1] = 0; | 437 | irq_mask_addr[1] = 0; |
438 | irq_bits = 32; | 438 | irq_bits = 32; |
439 | ext_irq_count = 4; | 439 | ext_irq_count = 4; |
440 | ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368; | 440 | ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368; |
@@ -443,7 +443,7 @@ static void bcm63xx_init_irq(void) | |||
443 | irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0); | 443 | irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0); |
444 | irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0); | 444 | irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0); |
445 | irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1); | 445 | irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1); |
446 | irq_stat_addr[1] += PERF_IRQMASK_6328_REG(1); | 446 | irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1); |
447 | irq_bits = 64; | 447 | irq_bits = 64; |
448 | ext_irq_count = 4; | 448 | ext_irq_count = 4; |
449 | is_ext_irq_cascaded = 1; | 449 | is_ext_irq_cascaded = 1; |
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c index b49c7adbfa89..31903cf9709d 100644 --- a/arch/mips/boot/compressed/decompress.c +++ b/arch/mips/boot/compressed/decompress.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/string.h> | ||
16 | 17 | ||
17 | #include <asm/addrspace.h> | 18 | #include <asm/addrspace.h> |
18 | 19 | ||
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h index d0352983b94d..51f80bd36fcc 100644 --- a/arch/mips/include/asm/cop2.h +++ b/arch/mips/include/asm/cop2.h | |||
@@ -16,8 +16,8 @@ | |||
16 | extern void octeon_cop2_save(struct octeon_cop2_state *); | 16 | extern void octeon_cop2_save(struct octeon_cop2_state *); |
17 | extern void octeon_cop2_restore(struct octeon_cop2_state *); | 17 | extern void octeon_cop2_restore(struct octeon_cop2_state *); |
18 | 18 | ||
19 | #define cop2_save(r) octeon_cop2_save(r) | 19 | #define cop2_save(r) octeon_cop2_save(&(r)->thread.cp2) |
20 | #define cop2_restore(r) octeon_cop2_restore(r) | 20 | #define cop2_restore(r) octeon_cop2_restore(&(r)->thread.cp2) |
21 | 21 | ||
22 | #define cop2_present 1 | 22 | #define cop2_present 1 |
23 | #define cop2_lazy_restore 1 | 23 | #define cop2_lazy_restore 1 |
@@ -26,26 +26,26 @@ extern void octeon_cop2_restore(struct octeon_cop2_state *); | |||
26 | 26 | ||
27 | extern void nlm_cop2_save(struct nlm_cop2_state *); | 27 | extern void nlm_cop2_save(struct nlm_cop2_state *); |
28 | extern void nlm_cop2_restore(struct nlm_cop2_state *); | 28 | extern void nlm_cop2_restore(struct nlm_cop2_state *); |
29 | #define cop2_save(r) nlm_cop2_save(r) | 29 | |
30 | #define cop2_restore(r) nlm_cop2_restore(r) | 30 | #define cop2_save(r) nlm_cop2_save(&(r)->thread.cp2) |
31 | #define cop2_restore(r) nlm_cop2_restore(&(r)->thread.cp2) | ||
31 | 32 | ||
32 | #define cop2_present 1 | 33 | #define cop2_present 1 |
33 | #define cop2_lazy_restore 0 | 34 | #define cop2_lazy_restore 0 |
34 | 35 | ||
35 | #elif defined(CONFIG_CPU_LOONGSON3) | 36 | #elif defined(CONFIG_CPU_LOONGSON3) |
36 | 37 | ||
37 | #define cop2_save(r) | ||
38 | #define cop2_restore(r) | ||
39 | |||
40 | #define cop2_present 1 | 38 | #define cop2_present 1 |
41 | #define cop2_lazy_restore 1 | 39 | #define cop2_lazy_restore 1 |
40 | #define cop2_save(r) do { (r); } while (0) | ||
41 | #define cop2_restore(r) do { (r); } while (0) | ||
42 | 42 | ||
43 | #else | 43 | #else |
44 | 44 | ||
45 | #define cop2_present 0 | 45 | #define cop2_present 0 |
46 | #define cop2_lazy_restore 0 | 46 | #define cop2_lazy_restore 0 |
47 | #define cop2_save(r) | 47 | #define cop2_save(r) do { (r); } while (0) |
48 | #define cop2_restore(r) | 48 | #define cop2_restore(r) do { (r); } while (0) |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | enum cu2_ops { | 51 | enum cu2_ops { |
diff --git a/arch/mips/include/asm/mach-ip28/spaces.h b/arch/mips/include/asm/mach-ip28/spaces.h index 5d6a76434d00..c4a912733b65 100644 --- a/arch/mips/include/asm/mach-ip28/spaces.h +++ b/arch/mips/include/asm/mach-ip28/spaces.h | |||
@@ -11,15 +11,8 @@ | |||
11 | #ifndef _ASM_MACH_IP28_SPACES_H | 11 | #ifndef _ASM_MACH_IP28_SPACES_H |
12 | #define _ASM_MACH_IP28_SPACES_H | 12 | #define _ASM_MACH_IP28_SPACES_H |
13 | 13 | ||
14 | #define CAC_BASE _AC(0xa800000000000000, UL) | ||
15 | |||
16 | #define HIGHMEM_START (~0UL) | ||
17 | |||
18 | #define PHYS_OFFSET _AC(0x20000000, UL) | 14 | #define PHYS_OFFSET _AC(0x20000000, UL) |
19 | 15 | ||
20 | #define UNCAC_BASE _AC(0xc0000000, UL) /* 0xa0000000 + PHYS_OFFSET */ | ||
21 | #define IO_BASE UNCAC_BASE | ||
22 | |||
23 | #include <asm/mach-generic/spaces.h> | 16 | #include <asm/mach-generic/spaces.h> |
24 | 17 | ||
25 | #endif /* _ASM_MACH_IP28_SPACES_H */ | 18 | #endif /* _ASM_MACH_IP28_SPACES_H */ |
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 5699ec3a71af..3be81803595d 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
@@ -37,7 +37,7 @@ | |||
37 | 37 | ||
38 | /* | 38 | /* |
39 | * This is used for calculating the real page sizes | 39 | * This is used for calculating the real page sizes |
40 | * for FTLB or VTLB + FTLB confugrations. | 40 | * for FTLB or VTLB + FTLB configurations. |
41 | */ | 41 | */ |
42 | static inline unsigned int page_size_ftlb(unsigned int mmuextdef) | 42 | static inline unsigned int page_size_ftlb(unsigned int mmuextdef) |
43 | { | 43 | { |
@@ -223,7 +223,8 @@ static inline int pfn_valid(unsigned long pfn) | |||
223 | 223 | ||
224 | #endif | 224 | #endif |
225 | 225 | ||
226 | #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr))) | 226 | #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys((void *) \ |
227 | (kaddr)))) | ||
227 | 228 | ||
228 | extern int __virt_addr_valid(const volatile void *kaddr); | 229 | extern int __virt_addr_valid(const volatile void *kaddr); |
229 | #define virt_addr_valid(kaddr) \ | 230 | #define virt_addr_valid(kaddr) \ |
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 1e0f20a9cdda..eacf865d21c2 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h | |||
@@ -37,11 +37,6 @@ extern int __cpu_logical_map[NR_CPUS]; | |||
37 | 37 | ||
38 | #define NO_PROC_ID (-1) | 38 | #define NO_PROC_ID (-1) |
39 | 39 | ||
40 | #define topology_physical_package_id(cpu) (cpu_data[cpu].package) | ||
41 | #define topology_core_id(cpu) (cpu_data[cpu].core) | ||
42 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | ||
43 | #define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) | ||
44 | |||
45 | #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ | 40 | #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ |
46 | #define SMP_CALL_FUNCTION 0x2 | 41 | #define SMP_CALL_FUNCTION 0x2 |
47 | /* Octeon - Tell another core to flush its icache */ | 42 | /* Octeon - Tell another core to flush its icache */ |
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 495c1041a2cc..b928b6f898cd 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h | |||
@@ -92,7 +92,7 @@ do { \ | |||
92 | KSTK_STATUS(prev) &= ~ST0_CU2; \ | 92 | KSTK_STATUS(prev) &= ~ST0_CU2; \ |
93 | __c0_stat = read_c0_status(); \ | 93 | __c0_stat = read_c0_status(); \ |
94 | write_c0_status(__c0_stat | ST0_CU2); \ | 94 | write_c0_status(__c0_stat | ST0_CU2); \ |
95 | cop2_save(&prev->thread.cp2); \ | 95 | cop2_save(prev); \ |
96 | write_c0_status(__c0_stat & ~ST0_CU2); \ | 96 | write_c0_status(__c0_stat & ~ST0_CU2); \ |
97 | } \ | 97 | } \ |
98 | __clear_software_ll_bit(); \ | 98 | __clear_software_ll_bit(); \ |
@@ -111,7 +111,7 @@ do { \ | |||
111 | (KSTK_STATUS(current) & ST0_CU2)) { \ | 111 | (KSTK_STATUS(current) & ST0_CU2)) { \ |
112 | __c0_stat = read_c0_status(); \ | 112 | __c0_stat = read_c0_status(); \ |
113 | write_c0_status(__c0_stat | ST0_CU2); \ | 113 | write_c0_status(__c0_stat | ST0_CU2); \ |
114 | cop2_restore(¤t->thread.cp2); \ | 114 | cop2_restore(current); \ |
115 | write_c0_status(__c0_stat & ~ST0_CU2); \ | 115 | write_c0_status(__c0_stat & ~ST0_CU2); \ |
116 | } \ | 116 | } \ |
117 | if (cpu_has_dsp) \ | 117 | if (cpu_has_dsp) \ |
diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h index 20ea4859c822..3e307ec2afba 100644 --- a/arch/mips/include/asm/topology.h +++ b/arch/mips/include/asm/topology.h | |||
@@ -9,5 +9,13 @@ | |||
9 | #define __ASM_TOPOLOGY_H | 9 | #define __ASM_TOPOLOGY_H |
10 | 10 | ||
11 | #include <topology.h> | 11 | #include <topology.h> |
12 | #include <linux/smp.h> | ||
13 | |||
14 | #ifdef CONFIG_SMP | ||
15 | #define topology_physical_package_id(cpu) (cpu_data[cpu].package) | ||
16 | #define topology_core_id(cpu) (cpu_data[cpu].core) | ||
17 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | ||
18 | #define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) | ||
19 | #endif | ||
12 | 20 | ||
13 | #endif /* __ASM_TOPOLOGY_H */ | 21 | #endif /* __ASM_TOPOLOGY_H */ |
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 9bc13eaf9d67..fdb4923777d1 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h | |||
@@ -373,16 +373,18 @@ | |||
373 | #define __NR_sched_getattr (__NR_Linux + 350) | 373 | #define __NR_sched_getattr (__NR_Linux + 350) |
374 | #define __NR_renameat2 (__NR_Linux + 351) | 374 | #define __NR_renameat2 (__NR_Linux + 351) |
375 | #define __NR_seccomp (__NR_Linux + 352) | 375 | #define __NR_seccomp (__NR_Linux + 352) |
376 | #define __NR_getrandom (__NR_Linux + 353) | ||
377 | #define __NR_memfd_create (__NR_Linux + 354) | ||
376 | 378 | ||
377 | /* | 379 | /* |
378 | * Offset of the last Linux o32 flavoured syscall | 380 | * Offset of the last Linux o32 flavoured syscall |
379 | */ | 381 | */ |
380 | #define __NR_Linux_syscalls 352 | 382 | #define __NR_Linux_syscalls 354 |
381 | 383 | ||
382 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 384 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
383 | 385 | ||
384 | #define __NR_O32_Linux 4000 | 386 | #define __NR_O32_Linux 4000 |
385 | #define __NR_O32_Linux_syscalls 352 | 387 | #define __NR_O32_Linux_syscalls 354 |
386 | 388 | ||
387 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 389 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
388 | 390 | ||
@@ -703,16 +705,18 @@ | |||
703 | #define __NR_sched_getattr (__NR_Linux + 310) | 705 | #define __NR_sched_getattr (__NR_Linux + 310) |
704 | #define __NR_renameat2 (__NR_Linux + 311) | 706 | #define __NR_renameat2 (__NR_Linux + 311) |
705 | #define __NR_seccomp (__NR_Linux + 312) | 707 | #define __NR_seccomp (__NR_Linux + 312) |
708 | #define __NR_getrandom (__NR_Linux + 313) | ||
709 | #define __NR_memfd_create (__NR_Linux + 314) | ||
706 | 710 | ||
707 | /* | 711 | /* |
708 | * Offset of the last Linux 64-bit flavoured syscall | 712 | * Offset of the last Linux 64-bit flavoured syscall |
709 | */ | 713 | */ |
710 | #define __NR_Linux_syscalls 312 | 714 | #define __NR_Linux_syscalls 314 |
711 | 715 | ||
712 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 716 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
713 | 717 | ||
714 | #define __NR_64_Linux 5000 | 718 | #define __NR_64_Linux 5000 |
715 | #define __NR_64_Linux_syscalls 312 | 719 | #define __NR_64_Linux_syscalls 314 |
716 | 720 | ||
717 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 721 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
718 | 722 | ||
@@ -1037,15 +1041,17 @@ | |||
1037 | #define __NR_sched_getattr (__NR_Linux + 314) | 1041 | #define __NR_sched_getattr (__NR_Linux + 314) |
1038 | #define __NR_renameat2 (__NR_Linux + 315) | 1042 | #define __NR_renameat2 (__NR_Linux + 315) |
1039 | #define __NR_seccomp (__NR_Linux + 316) | 1043 | #define __NR_seccomp (__NR_Linux + 316) |
1044 | #define __NR_getrandom (__NR_Linux + 317) | ||
1045 | #define __NR_memfd_create (__NR_Linux + 318) | ||
1040 | 1046 | ||
1041 | /* | 1047 | /* |
1042 | * Offset of the last N32 flavoured syscall | 1048 | * Offset of the last N32 flavoured syscall |
1043 | */ | 1049 | */ |
1044 | #define __NR_Linux_syscalls 316 | 1050 | #define __NR_Linux_syscalls 318 |
1045 | 1051 | ||
1046 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 1052 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
1047 | 1053 | ||
1048 | #define __NR_N32_Linux 6000 | 1054 | #define __NR_N32_Linux 6000 |
1049 | #define __NR_N32_Linux_syscalls 316 | 1055 | #define __NR_N32_Linux_syscalls 318 |
1050 | 1056 | ||
1051 | #endif /* _UAPI_ASM_UNISTD_H */ | 1057 | #endif /* _UAPI_ASM_UNISTD_H */ |
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 992e18474da5..50980bf3983e 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c | |||
@@ -71,8 +71,12 @@ machine_kexec(struct kimage *image) | |||
71 | kexec_start_address = | 71 | kexec_start_address = |
72 | (unsigned long) phys_to_virt(image->start); | 72 | (unsigned long) phys_to_virt(image->start); |
73 | 73 | ||
74 | kexec_indirection_page = | 74 | if (image->type == KEXEC_TYPE_DEFAULT) { |
75 | (unsigned long) phys_to_virt(image->head & PAGE_MASK); | 75 | kexec_indirection_page = |
76 | (unsigned long) phys_to_virt(image->head & PAGE_MASK); | ||
77 | } else { | ||
78 | kexec_indirection_page = (unsigned long)&image->head; | ||
79 | } | ||
76 | 80 | ||
77 | memcpy((void*)reboot_code_buffer, relocate_new_kernel, | 81 | memcpy((void*)reboot_code_buffer, relocate_new_kernel, |
78 | relocate_new_kernel_size); | 82 | relocate_new_kernel_size); |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index f93b4cbec739..744cd10ba599 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -577,3 +577,5 @@ EXPORT(sys_call_table) | |||
577 | PTR sys_sched_getattr /* 4350 */ | 577 | PTR sys_sched_getattr /* 4350 */ |
578 | PTR sys_renameat2 | 578 | PTR sys_renameat2 |
579 | PTR sys_seccomp | 579 | PTR sys_seccomp |
580 | PTR sys_getrandom | ||
581 | PTR sys_memfd_create | ||
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 03ebd9979ad2..002b1bc09c38 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -432,4 +432,6 @@ EXPORT(sys_call_table) | |||
432 | PTR sys_sched_getattr /* 5310 */ | 432 | PTR sys_sched_getattr /* 5310 */ |
433 | PTR sys_renameat2 | 433 | PTR sys_renameat2 |
434 | PTR sys_seccomp | 434 | PTR sys_seccomp |
435 | PTR sys_getrandom | ||
436 | PTR sys_memfd_create | ||
435 | .size sys_call_table,.-sys_call_table | 437 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index ebc9228e2e15..ca6cbbe9805b 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -425,4 +425,6 @@ EXPORT(sysn32_call_table) | |||
425 | PTR sys_sched_getattr | 425 | PTR sys_sched_getattr |
426 | PTR sys_renameat2 /* 6315 */ | 426 | PTR sys_renameat2 /* 6315 */ |
427 | PTR sys_seccomp | 427 | PTR sys_seccomp |
428 | PTR sys_getrandom | ||
429 | PTR sys_memfd_create | ||
428 | .size sysn32_call_table,.-sysn32_call_table | 430 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 25bb8400156d..9e10d11fbb84 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -562,4 +562,6 @@ EXPORT(sys32_call_table) | |||
562 | PTR sys_sched_getattr /* 4350 */ | 562 | PTR sys_sched_getattr /* 4350 */ |
563 | PTR sys_renameat2 | 563 | PTR sys_renameat2 |
564 | PTR sys_seccomp | 564 | PTR sys_seccomp |
565 | PTR sys_getrandom | ||
566 | PTR sys_memfd_create | ||
565 | .size sys32_call_table,.-sys32_call_table | 567 | .size sys32_call_table,.-sys32_call_table |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 571aab064936..f42e35e42790 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -53,6 +53,7 @@ | |||
53 | */ | 53 | */ |
54 | unsigned long empty_zero_page, zero_page_mask; | 54 | unsigned long empty_zero_page, zero_page_mask; |
55 | EXPORT_SYMBOL_GPL(empty_zero_page); | 55 | EXPORT_SYMBOL_GPL(empty_zero_page); |
56 | EXPORT_SYMBOL(zero_page_mask); | ||
56 | 57 | ||
57 | /* | 58 | /* |
58 | * Not static inline because used by IP27 special magic initialization code | 59 | * Not static inline because used by IP27 special magic initialization code |
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 7edc08398c4a..64ecf9a4e526 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c | |||
@@ -772,6 +772,7 @@ static int build_body(struct jit_ctx *ctx) | |||
772 | const struct sock_filter *inst; | 772 | const struct sock_filter *inst; |
773 | unsigned int i, off, load_order, condt; | 773 | unsigned int i, off, load_order, condt; |
774 | u32 k, b_off __maybe_unused; | 774 | u32 k, b_off __maybe_unused; |
775 | int tmp; | ||
775 | 776 | ||
776 | for (i = 0; i < prog->len; i++) { | 777 | for (i = 0; i < prog->len; i++) { |
777 | u16 code; | 778 | u16 code; |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 6e75e2030927..1554a6f2a5bb 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -321,6 +321,22 @@ source "fs/Kconfig" | |||
321 | 321 | ||
322 | source "arch/parisc/Kconfig.debug" | 322 | source "arch/parisc/Kconfig.debug" |
323 | 323 | ||
324 | config SECCOMP | ||
325 | def_bool y | ||
326 | prompt "Enable seccomp to safely compute untrusted bytecode" | ||
327 | ---help--- | ||
328 | This kernel feature is useful for number crunching applications | ||
329 | that may need to compute untrusted bytecode during their | ||
330 | execution. By using pipes or other transports made available to | ||
331 | the process as file descriptors supporting the read/write | ||
332 | syscalls, it's possible to isolate those applications in | ||
333 | their own address space using seccomp. Once seccomp is | ||
334 | enabled via prctl(PR_SET_SECCOMP), it cannot be disabled | ||
335 | and the task is only allowed to execute a few safe syscalls | ||
336 | defined by each seccomp mode. | ||
337 | |||
338 | If unsure, say Y. Only embedded should say N here. | ||
339 | |||
324 | source "security/Kconfig" | 340 | source "security/Kconfig" |
325 | 341 | ||
326 | source "crypto/Kconfig" | 342 | source "crypto/Kconfig" |
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c index d9dc6cd3b7d2..e5c4da035810 100644 --- a/arch/parisc/hpux/sys_hpux.c +++ b/arch/parisc/hpux/sys_hpux.c | |||
@@ -456,7 +456,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2) | |||
456 | } | 456 | } |
457 | 457 | ||
458 | /* String could be altered by userspace after strlen_user() */ | 458 | /* String could be altered by userspace after strlen_user() */ |
459 | fsname[len] = '\0'; | 459 | fsname[len - 1] = '\0'; |
460 | 460 | ||
461 | printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname); | 461 | printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname); |
462 | if ( !strcmp(fsname, "hfs") ) { | 462 | if ( !strcmp(fsname, "hfs") ) { |
diff --git a/arch/parisc/include/asm/seccomp.h b/arch/parisc/include/asm/seccomp.h new file mode 100644 index 000000000000..015f7887aa29 --- /dev/null +++ b/arch/parisc/include/asm/seccomp.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _ASM_PARISC_SECCOMP_H | ||
2 | #define _ASM_PARISC_SECCOMP_H | ||
3 | |||
4 | #include <linux/unistd.h> | ||
5 | |||
6 | #define __NR_seccomp_read __NR_read | ||
7 | #define __NR_seccomp_write __NR_write | ||
8 | #define __NR_seccomp_exit __NR_exit | ||
9 | #define __NR_seccomp_sigreturn __NR_rt_sigreturn | ||
10 | |||
11 | #define __NR_seccomp_read_32 __NR_read | ||
12 | #define __NR_seccomp_write_32 __NR_write | ||
13 | #define __NR_seccomp_exit_32 __NR_exit | ||
14 | #define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn | ||
15 | |||
16 | #endif /* _ASM_PARISC_SECCOMP_H */ | ||
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 4b9b10ce1f9d..a84611835549 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h | |||
@@ -60,6 +60,7 @@ struct thread_info { | |||
60 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | 60 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ |
61 | #define TIF_SINGLESTEP 9 /* single stepping? */ | 61 | #define TIF_SINGLESTEP 9 /* single stepping? */ |
62 | #define TIF_BLOCKSTEP 10 /* branch stepping? */ | 62 | #define TIF_BLOCKSTEP 10 /* branch stepping? */ |
63 | #define TIF_SECCOMP 11 /* secure computing */ | ||
63 | 64 | ||
64 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 65 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
65 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 66 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
@@ -70,11 +71,13 @@ struct thread_info { | |||
70 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 71 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
71 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 72 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
72 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) | 73 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) |
74 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | ||
73 | 75 | ||
74 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ | 76 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ |
75 | _TIF_NEED_RESCHED) | 77 | _TIF_NEED_RESCHED) |
76 | #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ | 78 | #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ |
77 | _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT) | 79 | _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \ |
80 | _TIF_SECCOMP) | ||
78 | 81 | ||
79 | #ifdef CONFIG_64BIT | 82 | #ifdef CONFIG_64BIT |
80 | # ifdef CONFIG_COMPAT | 83 | # ifdef CONFIG_COMPAT |
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 47e0e21d2272..8667f18be238 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h | |||
@@ -830,8 +830,11 @@ | |||
830 | #define __NR_sched_getattr (__NR_Linux + 335) | 830 | #define __NR_sched_getattr (__NR_Linux + 335) |
831 | #define __NR_utimes (__NR_Linux + 336) | 831 | #define __NR_utimes (__NR_Linux + 336) |
832 | #define __NR_renameat2 (__NR_Linux + 337) | 832 | #define __NR_renameat2 (__NR_Linux + 337) |
833 | #define __NR_seccomp (__NR_Linux + 338) | ||
834 | #define __NR_getrandom (__NR_Linux + 339) | ||
835 | #define __NR_memfd_create (__NR_Linux + 340) | ||
833 | 836 | ||
834 | #define __NR_Linux_syscalls (__NR_renameat2 + 1) | 837 | #define __NR_Linux_syscalls (__NR_memfd_create + 1) |
835 | 838 | ||
836 | 839 | ||
837 | #define __IGNORE_select /* newselect */ | 840 | #define __IGNORE_select /* newselect */ |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index e842ee233db4..3bab72462ab5 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -270,6 +270,12 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
270 | { | 270 | { |
271 | long ret = 0; | 271 | long ret = 0; |
272 | 272 | ||
273 | /* Do the secure computing check first. */ | ||
274 | if (secure_computing(regs->gr[20])) { | ||
275 | /* seccomp failures shouldn't expose any additional code. */ | ||
276 | return -1; | ||
277 | } | ||
278 | |||
273 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 279 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
274 | tracehook_report_syscall_entry(regs)) | 280 | tracehook_report_syscall_entry(regs)) |
275 | ret = -1L; | 281 | ret = -1L; |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 838786011037..7ef22e3387e0 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
@@ -74,7 +74,7 @@ ENTRY(linux_gateway_page) | |||
74 | /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ | 74 | /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ |
75 | /* Light-weight-syscall entry must always be located at 0xb0 */ | 75 | /* Light-weight-syscall entry must always be located at 0xb0 */ |
76 | /* WARNING: Keep this number updated with table size changes */ | 76 | /* WARNING: Keep this number updated with table size changes */ |
77 | #define __NR_lws_entries (2) | 77 | #define __NR_lws_entries (3) |
78 | 78 | ||
79 | lws_entry: | 79 | lws_entry: |
80 | gate lws_start, %r0 /* increase privilege */ | 80 | gate lws_start, %r0 /* increase privilege */ |
@@ -502,7 +502,7 @@ lws_exit: | |||
502 | 502 | ||
503 | 503 | ||
504 | /*************************************************** | 504 | /*************************************************** |
505 | Implementing CAS as an atomic operation: | 505 | Implementing 32bit CAS as an atomic operation: |
506 | 506 | ||
507 | %r26 - Address to examine | 507 | %r26 - Address to examine |
508 | %r25 - Old value to check (old) | 508 | %r25 - Old value to check (old) |
@@ -659,6 +659,230 @@ cas_action: | |||
659 | ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) | 659 | ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) |
660 | 660 | ||
661 | 661 | ||
662 | /*************************************************** | ||
663 | New CAS implementation which uses pointers and variable size | ||
664 | information. The value pointed by old and new MUST NOT change | ||
665 | while performing CAS. The lock only protect the value at %r26. | ||
666 | |||
667 | %r26 - Address to examine | ||
668 | %r25 - Pointer to the value to check (old) | ||
669 | %r24 - Pointer to the value to set (new) | ||
670 | %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit) | ||
671 | %r28 - Return non-zero on failure | ||
672 | %r21 - Kernel error code | ||
673 | |||
674 | %r21 has the following meanings: | ||
675 | |||
676 | EAGAIN - CAS is busy, ldcw failed, try again. | ||
677 | EFAULT - Read or write failed. | ||
678 | |||
679 | Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only) | ||
680 | |||
681 | ****************************************************/ | ||
682 | |||
683 | /* ELF32 Process entry path */ | ||
684 | lws_compare_and_swap_2: | ||
685 | #ifdef CONFIG_64BIT | ||
686 | /* Clip the input registers */ | ||
687 | depdi 0, 31, 32, %r26 | ||
688 | depdi 0, 31, 32, %r25 | ||
689 | depdi 0, 31, 32, %r24 | ||
690 | depdi 0, 31, 32, %r23 | ||
691 | #endif | ||
692 | |||
693 | /* Check the validity of the size pointer */ | ||
694 | subi,>>= 4, %r23, %r0 | ||
695 | b,n lws_exit_nosys | ||
696 | |||
697 | /* Jump to the functions which will load the old and new values into | ||
698 | registers depending on the their size */ | ||
699 | shlw %r23, 2, %r29 | ||
700 | blr %r29, %r0 | ||
701 | nop | ||
702 | |||
703 | /* 8bit load */ | ||
704 | 4: ldb 0(%sr3,%r25), %r25 | ||
705 | b cas2_lock_start | ||
706 | 5: ldb 0(%sr3,%r24), %r24 | ||
707 | nop | ||
708 | nop | ||
709 | nop | ||
710 | nop | ||
711 | nop | ||
712 | |||
713 | /* 16bit load */ | ||
714 | 6: ldh 0(%sr3,%r25), %r25 | ||
715 | b cas2_lock_start | ||
716 | 7: ldh 0(%sr3,%r24), %r24 | ||
717 | nop | ||
718 | nop | ||
719 | nop | ||
720 | nop | ||
721 | nop | ||
722 | |||
723 | /* 32bit load */ | ||
724 | 8: ldw 0(%sr3,%r25), %r25 | ||
725 | b cas2_lock_start | ||
726 | 9: ldw 0(%sr3,%r24), %r24 | ||
727 | nop | ||
728 | nop | ||
729 | nop | ||
730 | nop | ||
731 | nop | ||
732 | |||
733 | /* 64bit load */ | ||
734 | #ifdef CONFIG_64BIT | ||
735 | 10: ldd 0(%sr3,%r25), %r25 | ||
736 | 11: ldd 0(%sr3,%r24), %r24 | ||
737 | #else | ||
738 | /* Load new value into r22/r23 - high/low */ | ||
739 | 10: ldw 0(%sr3,%r25), %r22 | ||
740 | 11: ldw 4(%sr3,%r25), %r23 | ||
741 | /* Load new value into fr4 for atomic store later */ | ||
742 | 12: flddx 0(%sr3,%r24), %fr4 | ||
743 | #endif | ||
744 | |||
745 | cas2_lock_start: | ||
746 | /* Load start of lock table */ | ||
747 | ldil L%lws_lock_start, %r20 | ||
748 | ldo R%lws_lock_start(%r20), %r28 | ||
749 | |||
750 | /* Extract four bits from r26 and hash lock (Bits 4-7) */ | ||
751 | extru %r26, 27, 4, %r20 | ||
752 | |||
753 | /* Find lock to use, the hash is either one of 0 to | ||
754 | 15, multiplied by 16 (keep it 16-byte aligned) | ||
755 | and add to the lock table offset. */ | ||
756 | shlw %r20, 4, %r20 | ||
757 | add %r20, %r28, %r20 | ||
758 | |||
759 | rsm PSW_SM_I, %r0 /* Disable interrupts */ | ||
760 | /* COW breaks can cause contention on UP systems */ | ||
761 | LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ | ||
762 | cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */ | ||
763 | cas2_wouldblock: | ||
764 | ldo 2(%r0), %r28 /* 2nd case */ | ||
765 | ssm PSW_SM_I, %r0 | ||
766 | b lws_exit /* Contended... */ | ||
767 | ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ | ||
768 | |||
769 | /* | ||
770 | prev = *addr; | ||
771 | if ( prev == old ) | ||
772 | *addr = new; | ||
773 | return prev; | ||
774 | */ | ||
775 | |||
776 | /* NOTES: | ||
777 | This all works becuse intr_do_signal | ||
778 | and schedule both check the return iasq | ||
779 | and see that we are on the kernel page | ||
780 | so this process is never scheduled off | ||
781 | or is ever sent any signal of any sort, | ||
782 | thus it is wholly atomic from usrspaces | ||
783 | perspective | ||
784 | */ | ||
785 | cas2_action: | ||
786 | /* Jump to the correct function */ | ||
787 | blr %r29, %r0 | ||
788 | /* Set %r28 as non-zero for now */ | ||
789 | ldo 1(%r0),%r28 | ||
790 | |||
791 | /* 8bit CAS */ | ||
792 | 13: ldb,ma 0(%sr3,%r26), %r29 | ||
793 | sub,= %r29, %r25, %r0 | ||
794 | b,n cas2_end | ||
795 | 14: stb,ma %r24, 0(%sr3,%r26) | ||
796 | b cas2_end | ||
797 | copy %r0, %r28 | ||
798 | nop | ||
799 | nop | ||
800 | |||
801 | /* 16bit CAS */ | ||
802 | 15: ldh,ma 0(%sr3,%r26), %r29 | ||
803 | sub,= %r29, %r25, %r0 | ||
804 | b,n cas2_end | ||
805 | 16: sth,ma %r24, 0(%sr3,%r26) | ||
806 | b cas2_end | ||
807 | copy %r0, %r28 | ||
808 | nop | ||
809 | nop | ||
810 | |||
811 | /* 32bit CAS */ | ||
812 | 17: ldw,ma 0(%sr3,%r26), %r29 | ||
813 | sub,= %r29, %r25, %r0 | ||
814 | b,n cas2_end | ||
815 | 18: stw,ma %r24, 0(%sr3,%r26) | ||
816 | b cas2_end | ||
817 | copy %r0, %r28 | ||
818 | nop | ||
819 | nop | ||
820 | |||
821 | /* 64bit CAS */ | ||
822 | #ifdef CONFIG_64BIT | ||
823 | 19: ldd,ma 0(%sr3,%r26), %r29 | ||
824 | sub,= %r29, %r25, %r0 | ||
825 | b,n cas2_end | ||
826 | 20: std,ma %r24, 0(%sr3,%r26) | ||
827 | copy %r0, %r28 | ||
828 | #else | ||
829 | /* Compare first word */ | ||
830 | 19: ldw,ma 0(%sr3,%r26), %r29 | ||
831 | sub,= %r29, %r22, %r0 | ||
832 | b,n cas2_end | ||
833 | /* Compare second word */ | ||
834 | 20: ldw,ma 4(%sr3,%r26), %r29 | ||
835 | sub,= %r29, %r23, %r0 | ||
836 | b,n cas2_end | ||
837 | /* Perform the store */ | ||
838 | 21: fstdx %fr4, 0(%sr3,%r26) | ||
839 | copy %r0, %r28 | ||
840 | #endif | ||
841 | |||
842 | cas2_end: | ||
843 | /* Free lock */ | ||
844 | stw,ma %r20, 0(%sr2,%r20) | ||
845 | /* Enable interrupts */ | ||
846 | ssm PSW_SM_I, %r0 | ||
847 | /* Return to userspace, set no error */ | ||
848 | b lws_exit | ||
849 | copy %r0, %r21 | ||
850 | |||
851 | 22: | ||
852 | /* Error occurred on load or store */ | ||
853 | /* Free lock */ | ||
854 | stw %r20, 0(%sr2,%r20) | ||
855 | ssm PSW_SM_I, %r0 | ||
856 | ldo 1(%r0),%r28 | ||
857 | b lws_exit | ||
858 | ldo -EFAULT(%r0),%r21 /* set errno */ | ||
859 | nop | ||
860 | nop | ||
861 | nop | ||
862 | |||
863 | /* Exception table entries, for the load and store, return EFAULT. | ||
864 | Each of the entries must be relocated. */ | ||
865 | ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page) | ||
866 | ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page) | ||
867 | ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page) | ||
868 | ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page) | ||
869 | ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page) | ||
870 | ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page) | ||
871 | ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page) | ||
872 | ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page) | ||
873 | ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page) | ||
874 | ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page) | ||
875 | ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page) | ||
876 | ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page) | ||
877 | ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page) | ||
878 | ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page) | ||
879 | ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page) | ||
880 | ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page) | ||
881 | #ifndef CONFIG_64BIT | ||
882 | ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page) | ||
883 | ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page) | ||
884 | #endif | ||
885 | |||
662 | /* Make sure nothing else is placed on this page */ | 886 | /* Make sure nothing else is placed on this page */ |
663 | .align PAGE_SIZE | 887 | .align PAGE_SIZE |
664 | END(linux_gateway_page) | 888 | END(linux_gateway_page) |
@@ -675,8 +899,9 @@ ENTRY(end_linux_gateway_page) | |||
675 | /* Light-weight-syscall table */ | 899 | /* Light-weight-syscall table */ |
676 | /* Start of lws table. */ | 900 | /* Start of lws table. */ |
677 | ENTRY(lws_table) | 901 | ENTRY(lws_table) |
678 | LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */ | 902 | LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */ |
679 | LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ | 903 | LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */ |
904 | LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */ | ||
680 | END(lws_table) | 905 | END(lws_table) |
681 | /* End of lws table */ | 906 | /* End of lws table */ |
682 | 907 | ||
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 84c5d3a58fa1..b563d9c8268b 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -433,6 +433,9 @@ | |||
433 | ENTRY_SAME(sched_getattr) /* 335 */ | 433 | ENTRY_SAME(sched_getattr) /* 335 */ |
434 | ENTRY_COMP(utimes) | 434 | ENTRY_COMP(utimes) |
435 | ENTRY_SAME(renameat2) | 435 | ENTRY_SAME(renameat2) |
436 | ENTRY_SAME(seccomp) | ||
437 | ENTRY_SAME(getrandom) | ||
438 | ENTRY_SAME(memfd_create) /* 340 */ | ||
436 | 439 | ||
437 | /* Nothing yet */ | 440 | /* Nothing yet */ |
438 | 441 | ||
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index 4bee1a6d41d0..45fd06cdc3e8 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig | |||
@@ -5,6 +5,7 @@ CONFIG_SMP=y | |||
5 | CONFIG_NR_CPUS=4 | 5 | CONFIG_NR_CPUS=4 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
7 | CONFIG_SYSVIPC=y | 7 | CONFIG_SYSVIPC=y |
8 | CONFIG_FHANDLE=y | ||
8 | CONFIG_IKCONFIG=y | 9 | CONFIG_IKCONFIG=y |
9 | CONFIG_IKCONFIG_PROC=y | 10 | CONFIG_IKCONFIG_PROC=y |
10 | CONFIG_LOG_BUF_SHIFT=15 | 11 | CONFIG_LOG_BUF_SHIFT=15 |
diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig index 6d7b22f41b50..77d7bf3ca2ac 100644 --- a/arch/powerpc/configs/celleb_defconfig +++ b/arch/powerpc/configs/celleb_defconfig | |||
@@ -5,6 +5,7 @@ CONFIG_SMP=y | |||
5 | CONFIG_NR_CPUS=4 | 5 | CONFIG_NR_CPUS=4 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
7 | CONFIG_SYSVIPC=y | 7 | CONFIG_SYSVIPC=y |
8 | CONFIG_FHANDLE=y | ||
8 | CONFIG_IKCONFIG=y | 9 | CONFIG_IKCONFIG=y |
9 | CONFIG_IKCONFIG_PROC=y | 10 | CONFIG_IKCONFIG_PROC=y |
10 | CONFIG_LOG_BUF_SHIFT=15 | 11 | CONFIG_LOG_BUF_SHIFT=15 |
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index 4b07bade1ba9..269d6e47c67d 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig | |||
@@ -4,6 +4,7 @@ CONFIG_ALTIVEC=y | |||
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_NR_CPUS=24 | 5 | CONFIG_NR_CPUS=24 |
6 | CONFIG_SYSVIPC=y | 6 | CONFIG_SYSVIPC=y |
7 | CONFIG_FHANDLE=y | ||
7 | CONFIG_IRQ_DOMAIN_DEBUG=y | 8 | CONFIG_IRQ_DOMAIN_DEBUG=y |
8 | CONFIG_NO_HZ=y | 9 | CONFIG_NO_HZ=y |
9 | CONFIG_HIGH_RES_TIMERS=y | 10 | CONFIG_HIGH_RES_TIMERS=y |
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 3c72fa615bd9..7594c5ac6481 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig | |||
@@ -5,6 +5,7 @@ CONFIG_NR_CPUS=4 | |||
5 | CONFIG_EXPERIMENTAL=y | 5 | CONFIG_EXPERIMENTAL=y |
6 | CONFIG_SYSVIPC=y | 6 | CONFIG_SYSVIPC=y |
7 | CONFIG_POSIX_MQUEUE=y | 7 | CONFIG_POSIX_MQUEUE=y |
8 | CONFIG_FHANDLE=y | ||
8 | CONFIG_IKCONFIG=y | 9 | CONFIG_IKCONFIG=y |
9 | CONFIG_IKCONFIG_PROC=y | 10 | CONFIG_IKCONFIG_PROC=y |
10 | CONFIG_BLK_DEV_INITRD=y | 11 | CONFIG_BLK_DEV_INITRD=y |
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 95e545d9f25c..c8b6a9ddb21b 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig | |||
@@ -4,6 +4,7 @@ CONFIG_NR_CPUS=4 | |||
4 | CONFIG_EXPERIMENTAL=y | 4 | CONFIG_EXPERIMENTAL=y |
5 | CONFIG_SYSVIPC=y | 5 | CONFIG_SYSVIPC=y |
6 | CONFIG_POSIX_MQUEUE=y | 6 | CONFIG_POSIX_MQUEUE=y |
7 | CONFIG_FHANDLE=y | ||
7 | CONFIG_IKCONFIG=y | 8 | CONFIG_IKCONFIG=y |
8 | CONFIG_IKCONFIG_PROC=y | 9 | CONFIG_IKCONFIG_PROC=y |
9 | # CONFIG_COMPAT_BRK is not set | 10 | # CONFIG_COMPAT_BRK is not set |
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index cec044a3ff69..e5e7838af008 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig | |||
@@ -3,6 +3,7 @@ CONFIG_ALTIVEC=y | |||
3 | CONFIG_SMP=y | 3 | CONFIG_SMP=y |
4 | CONFIG_NR_CPUS=2 | 4 | CONFIG_NR_CPUS=2 |
5 | CONFIG_SYSVIPC=y | 5 | CONFIG_SYSVIPC=y |
6 | CONFIG_FHANDLE=y | ||
6 | CONFIG_NO_HZ=y | 7 | CONFIG_NO_HZ=y |
7 | CONFIG_HIGH_RES_TIMERS=y | 8 | CONFIG_HIGH_RES_TIMERS=y |
8 | CONFIG_BLK_DEV_INITRD=y | 9 | CONFIG_BLK_DEV_INITRD=y |
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index f26b267eb71f..f6c02f8cdc62 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
@@ -4,6 +4,7 @@ CONFIG_VSX=y | |||
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_SYSVIPC=y | 5 | CONFIG_SYSVIPC=y |
6 | CONFIG_POSIX_MQUEUE=y | 6 | CONFIG_POSIX_MQUEUE=y |
7 | CONFIG_FHANDLE=y | ||
7 | CONFIG_IRQ_DOMAIN_DEBUG=y | 8 | CONFIG_IRQ_DOMAIN_DEBUG=y |
8 | CONFIG_NO_HZ=y | 9 | CONFIG_NO_HZ=y |
9 | CONFIG_HIGH_RES_TIMERS=y | 10 | CONFIG_HIGH_RES_TIMERS=y |
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 438e813dc9cb..587f5514f9b1 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig | |||
@@ -3,6 +3,7 @@ CONFIG_PPC_BOOK3E_64=y | |||
3 | CONFIG_SMP=y | 3 | CONFIG_SMP=y |
4 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
5 | CONFIG_POSIX_MQUEUE=y | 5 | CONFIG_POSIX_MQUEUE=y |
6 | CONFIG_FHANDLE=y | ||
6 | CONFIG_NO_HZ=y | 7 | CONFIG_NO_HZ=y |
7 | CONFIG_HIGH_RES_TIMERS=y | 8 | CONFIG_HIGH_RES_TIMERS=y |
8 | CONFIG_TASKSTATS=y | 9 | CONFIG_TASKSTATS=y |
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index fdee37fab81c..2e637c881d2b 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig | |||
@@ -5,6 +5,7 @@ CONFIG_SMP=y | |||
5 | CONFIG_NR_CPUS=2 | 5 | CONFIG_NR_CPUS=2 |
6 | CONFIG_SYSVIPC=y | 6 | CONFIG_SYSVIPC=y |
7 | CONFIG_POSIX_MQUEUE=y | 7 | CONFIG_POSIX_MQUEUE=y |
8 | CONFIG_FHANDLE=y | ||
8 | CONFIG_HIGH_RES_TIMERS=y | 9 | CONFIG_HIGH_RES_TIMERS=y |
9 | CONFIG_BLK_DEV_INITRD=y | 10 | CONFIG_BLK_DEV_INITRD=y |
10 | CONFIG_RD_LZMA=y | 11 | CONFIG_RD_LZMA=y |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index a905063281cc..50375f1f59e7 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
@@ -5,6 +5,7 @@ CONFIG_SMP=y | |||
5 | CONFIG_NR_CPUS=2048 | 5 | CONFIG_NR_CPUS=2048 |
6 | CONFIG_SYSVIPC=y | 6 | CONFIG_SYSVIPC=y |
7 | CONFIG_POSIX_MQUEUE=y | 7 | CONFIG_POSIX_MQUEUE=y |
8 | CONFIG_FHANDLE=y | ||
8 | CONFIG_AUDIT=y | 9 | CONFIG_AUDIT=y |
9 | CONFIG_AUDITSYSCALL=y | 10 | CONFIG_AUDITSYSCALL=y |
10 | CONFIG_IRQ_DOMAIN_DEBUG=y | 11 | CONFIG_IRQ_DOMAIN_DEBUG=y |
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig index 58e3dbf43ca4..4428ee428f4e 100644 --- a/arch/powerpc/configs/pseries_le_defconfig +++ b/arch/powerpc/configs/pseries_le_defconfig | |||
@@ -6,6 +6,7 @@ CONFIG_NR_CPUS=2048 | |||
6 | CONFIG_CPU_LITTLE_ENDIAN=y | 6 | CONFIG_CPU_LITTLE_ENDIAN=y |
7 | CONFIG_SYSVIPC=y | 7 | CONFIG_SYSVIPC=y |
8 | CONFIG_POSIX_MQUEUE=y | 8 | CONFIG_POSIX_MQUEUE=y |
9 | CONFIG_FHANDLE=y | ||
9 | CONFIG_AUDIT=y | 10 | CONFIG_AUDIT=y |
10 | CONFIG_AUDITSYSCALL=y | 11 | CONFIG_AUDITSYSCALL=y |
11 | CONFIG_IRQ_DOMAIN_DEBUG=y | 12 | CONFIG_IRQ_DOMAIN_DEBUG=y |
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 279b80f3bb29..c0c61fa9cd9e 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
@@ -47,6 +47,12 @@ | |||
47 | STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) | 47 | STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) |
48 | #define STACK_FRAME_MARKER 12 | 48 | #define STACK_FRAME_MARKER 12 |
49 | 49 | ||
50 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
51 | #define STACK_FRAME_MIN_SIZE 32 | ||
52 | #else | ||
53 | #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD | ||
54 | #endif | ||
55 | |||
50 | /* Size of dummy stack frame allocated when calling signal handler. */ | 56 | /* Size of dummy stack frame allocated when calling signal handler. */ |
51 | #define __SIGNAL_FRAMESIZE 128 | 57 | #define __SIGNAL_FRAMESIZE 128 |
52 | #define __SIGNAL_FRAMESIZE32 64 | 58 | #define __SIGNAL_FRAMESIZE32 64 |
@@ -60,6 +66,7 @@ | |||
60 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) | 66 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) |
61 | #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD) | 67 | #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD) |
62 | #define STACK_FRAME_MARKER 2 | 68 | #define STACK_FRAME_MARKER 2 |
69 | #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD | ||
63 | 70 | ||
64 | /* Size of stack frame allocated when calling signal handler. */ | 71 | /* Size of stack frame allocated when calling signal handler. */ |
65 | #define __SIGNAL_FRAMESIZE 64 | 72 | #define __SIGNAL_FRAMESIZE 64 |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 542bc0f0673f..7d8a60068805 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
@@ -362,3 +362,6 @@ SYSCALL(ni_syscall) /* sys_kcmp */ | |||
362 | SYSCALL_SPU(sched_setattr) | 362 | SYSCALL_SPU(sched_setattr) |
363 | SYSCALL_SPU(sched_getattr) | 363 | SYSCALL_SPU(sched_getattr) |
364 | SYSCALL_SPU(renameat2) | 364 | SYSCALL_SPU(renameat2) |
365 | SYSCALL_SPU(seccomp) | ||
366 | SYSCALL_SPU(getrandom) | ||
367 | SYSCALL_SPU(memfd_create) | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 5ce5552ab9f5..4e9af3fd43e7 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <uapi/asm/unistd.h> | 12 | #include <uapi/asm/unistd.h> |
13 | 13 | ||
14 | 14 | ||
15 | #define __NR_syscalls 358 | 15 | #define __NR_syscalls 361 |
16 | 16 | ||
17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
18 | #define NR_syscalls __NR_syscalls | 18 | #define NR_syscalls __NR_syscalls |
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 2d526f7b48da..0688fc06e183 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h | |||
@@ -380,5 +380,8 @@ | |||
380 | #define __NR_sched_setattr 355 | 380 | #define __NR_sched_setattr 355 |
381 | #define __NR_sched_getattr 356 | 381 | #define __NR_sched_getattr 356 |
382 | #define __NR_renameat2 357 | 382 | #define __NR_renameat2 357 |
383 | #define __NR_seccomp 358 | ||
384 | #define __NR_getrandom 359 | ||
385 | #define __NR_memfd_create 360 | ||
383 | 386 | ||
384 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ | 387 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 74d1e780748b..2396dda282cd 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c | |||
@@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | |||
35 | return 0; /* must be 16-byte aligned */ | 35 | return 0; /* must be 16-byte aligned */ |
36 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | 36 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) |
37 | return 0; | 37 | return 0; |
38 | if (sp >= prev_sp + STACK_FRAME_OVERHEAD) | 38 | if (sp >= prev_sp + STACK_FRAME_MIN_SIZE) |
39 | return 1; | 39 | return 1; |
40 | /* | 40 | /* |
41 | * sp could decrease when we jump off an interrupt stack | 41 | * sp could decrease when we jump off an interrupt stack |
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c index 97ac8dc33667..5e1ed1575aab 100644 --- a/arch/powerpc/platforms/powernv/opal-hmi.c +++ b/arch/powerpc/platforms/powernv/opal-hmi.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <asm/opal.h> | 29 | #include <asm/opal.h> |
30 | #include <asm/cputable.h> | 30 | #include <asm/cputable.h> |
31 | #include <asm/machdep.h> | ||
31 | 32 | ||
32 | static int opal_hmi_handler_nb_init; | 33 | static int opal_hmi_handler_nb_init; |
33 | struct OpalHmiEvtNode { | 34 | struct OpalHmiEvtNode { |
@@ -185,4 +186,4 @@ static int __init opal_hmi_handler_init(void) | |||
185 | } | 186 | } |
186 | return 0; | 187 | return 0; |
187 | } | 188 | } |
188 | subsys_initcall(opal_hmi_handler_init); | 189 | machine_subsys_initcall(powernv, opal_hmi_handler_init); |
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index c904583baf4b..17ee193960a0 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
@@ -113,7 +113,7 @@ out: | |||
113 | static int pseries_remove_mem_node(struct device_node *np) | 113 | static int pseries_remove_mem_node(struct device_node *np) |
114 | { | 114 | { |
115 | const char *type; | 115 | const char *type; |
116 | const unsigned int *regs; | 116 | const __be32 *regs; |
117 | unsigned long base; | 117 | unsigned long base; |
118 | unsigned int lmb_size; | 118 | unsigned int lmb_size; |
119 | int ret = -EINVAL; | 119 | int ret = -EINVAL; |
@@ -132,8 +132,8 @@ static int pseries_remove_mem_node(struct device_node *np) | |||
132 | if (!regs) | 132 | if (!regs) |
133 | return ret; | 133 | return ret; |
134 | 134 | ||
135 | base = *(unsigned long *)regs; | 135 | base = be64_to_cpu(*(unsigned long *)regs); |
136 | lmb_size = regs[3]; | 136 | lmb_size = be32_to_cpu(regs[3]); |
137 | 137 | ||
138 | pseries_remove_memblock(base, lmb_size); | 138 | pseries_remove_memblock(base, lmb_size); |
139 | return 0; | 139 | return 0; |
@@ -153,7 +153,7 @@ static inline int pseries_remove_mem_node(struct device_node *np) | |||
153 | static int pseries_add_mem_node(struct device_node *np) | 153 | static int pseries_add_mem_node(struct device_node *np) |
154 | { | 154 | { |
155 | const char *type; | 155 | const char *type; |
156 | const unsigned int *regs; | 156 | const __be32 *regs; |
157 | unsigned long base; | 157 | unsigned long base; |
158 | unsigned int lmb_size; | 158 | unsigned int lmb_size; |
159 | int ret = -EINVAL; | 159 | int ret = -EINVAL; |
@@ -172,8 +172,8 @@ static int pseries_add_mem_node(struct device_node *np) | |||
172 | if (!regs) | 172 | if (!regs) |
173 | return ret; | 173 | return ret; |
174 | 174 | ||
175 | base = *(unsigned long *)regs; | 175 | base = be64_to_cpu(*(unsigned long *)regs); |
176 | lmb_size = regs[3]; | 176 | lmb_size = be32_to_cpu(regs[3]); |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * Update memory region to represent the memory add | 179 | * Update memory region to represent the memory add |
@@ -187,14 +187,14 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr) | |||
187 | struct of_drconf_cell *new_drmem, *old_drmem; | 187 | struct of_drconf_cell *new_drmem, *old_drmem; |
188 | unsigned long memblock_size; | 188 | unsigned long memblock_size; |
189 | u32 entries; | 189 | u32 entries; |
190 | u32 *p; | 190 | __be32 *p; |
191 | int i, rc = -EINVAL; | 191 | int i, rc = -EINVAL; |
192 | 192 | ||
193 | memblock_size = pseries_memory_block_size(); | 193 | memblock_size = pseries_memory_block_size(); |
194 | if (!memblock_size) | 194 | if (!memblock_size) |
195 | return -EINVAL; | 195 | return -EINVAL; |
196 | 196 | ||
197 | p = (u32 *) pr->old_prop->value; | 197 | p = (__be32 *) pr->old_prop->value; |
198 | if (!p) | 198 | if (!p) |
199 | return -EINVAL; | 199 | return -EINVAL; |
200 | 200 | ||
@@ -203,28 +203,30 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr) | |||
203 | * entries. Get the niumber of entries and skip to the array of | 203 | * entries. Get the niumber of entries and skip to the array of |
204 | * of_drconf_cell's. | 204 | * of_drconf_cell's. |
205 | */ | 205 | */ |
206 | entries = *p++; | 206 | entries = be32_to_cpu(*p++); |
207 | old_drmem = (struct of_drconf_cell *)p; | 207 | old_drmem = (struct of_drconf_cell *)p; |
208 | 208 | ||
209 | p = (u32 *)pr->prop->value; | 209 | p = (__be32 *)pr->prop->value; |
210 | p++; | 210 | p++; |
211 | new_drmem = (struct of_drconf_cell *)p; | 211 | new_drmem = (struct of_drconf_cell *)p; |
212 | 212 | ||
213 | for (i = 0; i < entries; i++) { | 213 | for (i = 0; i < entries; i++) { |
214 | if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) && | 214 | if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) && |
215 | (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) { | 215 | (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) { |
216 | rc = pseries_remove_memblock(old_drmem[i].base_addr, | 216 | rc = pseries_remove_memblock( |
217 | be64_to_cpu(old_drmem[i].base_addr), | ||
217 | memblock_size); | 218 | memblock_size); |
218 | break; | 219 | break; |
219 | } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) && | 220 | } else if ((!(be32_to_cpu(old_drmem[i].flags) & |
220 | (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) { | 221 | DRCONF_MEM_ASSIGNED)) && |
221 | rc = memblock_add(old_drmem[i].base_addr, | 222 | (be32_to_cpu(new_drmem[i].flags) & |
223 | DRCONF_MEM_ASSIGNED)) { | ||
224 | rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr), | ||
222 | memblock_size); | 225 | memblock_size); |
223 | rc = (rc < 0) ? -EINVAL : 0; | 226 | rc = (rc < 0) ? -EINVAL : 0; |
224 | break; | 227 | break; |
225 | } | 228 | } |
226 | } | 229 | } |
227 | |||
228 | return rc; | 230 | return rc; |
229 | } | 231 | } |
230 | 232 | ||
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 2fcccc0c997c..c81661e756a0 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h | |||
@@ -17,12 +17,12 @@ | |||
17 | #define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \ | 17 | #define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \ |
18 | sizeof(struct ipl_block_fcp)) | 18 | sizeof(struct ipl_block_fcp)) |
19 | 19 | ||
20 | #define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 8) | 20 | #define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16) |
21 | 21 | ||
22 | #define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \ | 22 | #define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \ |
23 | sizeof(struct ipl_block_ccw)) | 23 | sizeof(struct ipl_block_ccw)) |
24 | 24 | ||
25 | #define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 8) | 25 | #define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16) |
26 | 26 | ||
27 | #define IPL_MAX_SUPPORTED_VERSION (0) | 27 | #define IPL_MAX_SUPPORTED_VERSION (0) |
28 | 28 | ||
@@ -38,10 +38,11 @@ struct ipl_list_hdr { | |||
38 | u8 pbt; | 38 | u8 pbt; |
39 | u8 flags; | 39 | u8 flags; |
40 | u16 reserved2; | 40 | u16 reserved2; |
41 | u8 loadparm[8]; | ||
41 | } __attribute__((packed)); | 42 | } __attribute__((packed)); |
42 | 43 | ||
43 | struct ipl_block_fcp { | 44 | struct ipl_block_fcp { |
44 | u8 reserved1[313-1]; | 45 | u8 reserved1[305-1]; |
45 | u8 opt; | 46 | u8 opt; |
46 | u8 reserved2[3]; | 47 | u8 reserved2[3]; |
47 | u16 reserved3; | 48 | u16 reserved3; |
@@ -62,7 +63,6 @@ struct ipl_block_fcp { | |||
62 | offsetof(struct ipl_block_fcp, scp_data))) | 63 | offsetof(struct ipl_block_fcp, scp_data))) |
63 | 64 | ||
64 | struct ipl_block_ccw { | 65 | struct ipl_block_ccw { |
65 | u8 load_parm[8]; | ||
66 | u8 reserved1[84]; | 66 | u8 reserved1[84]; |
67 | u8 reserved2[2]; | 67 | u8 reserved2[2]; |
68 | u16 devno; | 68 | u16 devno; |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 22aac5885ba2..39badb9ca0b3 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -455,22 +455,6 @@ DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long) | |||
455 | DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long) | 455 | DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long) |
456 | IPL_PARMBLOCK_START->ipl_info.fcp.br_lba); | 456 | IPL_PARMBLOCK_START->ipl_info.fcp.br_lba); |
457 | 457 | ||
458 | static struct attribute *ipl_fcp_attrs[] = { | ||
459 | &sys_ipl_type_attr.attr, | ||
460 | &sys_ipl_device_attr.attr, | ||
461 | &sys_ipl_fcp_wwpn_attr.attr, | ||
462 | &sys_ipl_fcp_lun_attr.attr, | ||
463 | &sys_ipl_fcp_bootprog_attr.attr, | ||
464 | &sys_ipl_fcp_br_lba_attr.attr, | ||
465 | NULL, | ||
466 | }; | ||
467 | |||
468 | static struct attribute_group ipl_fcp_attr_group = { | ||
469 | .attrs = ipl_fcp_attrs, | ||
470 | }; | ||
471 | |||
472 | /* CCW ipl device attributes */ | ||
473 | |||
474 | static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, | 458 | static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, |
475 | struct kobj_attribute *attr, char *page) | 459 | struct kobj_attribute *attr, char *page) |
476 | { | 460 | { |
@@ -487,6 +471,23 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, | |||
487 | static struct kobj_attribute sys_ipl_ccw_loadparm_attr = | 471 | static struct kobj_attribute sys_ipl_ccw_loadparm_attr = |
488 | __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); | 472 | __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); |
489 | 473 | ||
474 | static struct attribute *ipl_fcp_attrs[] = { | ||
475 | &sys_ipl_type_attr.attr, | ||
476 | &sys_ipl_device_attr.attr, | ||
477 | &sys_ipl_fcp_wwpn_attr.attr, | ||
478 | &sys_ipl_fcp_lun_attr.attr, | ||
479 | &sys_ipl_fcp_bootprog_attr.attr, | ||
480 | &sys_ipl_fcp_br_lba_attr.attr, | ||
481 | &sys_ipl_ccw_loadparm_attr.attr, | ||
482 | NULL, | ||
483 | }; | ||
484 | |||
485 | static struct attribute_group ipl_fcp_attr_group = { | ||
486 | .attrs = ipl_fcp_attrs, | ||
487 | }; | ||
488 | |||
489 | /* CCW ipl device attributes */ | ||
490 | |||
490 | static struct attribute *ipl_ccw_attrs_vm[] = { | 491 | static struct attribute *ipl_ccw_attrs_vm[] = { |
491 | &sys_ipl_type_attr.attr, | 492 | &sys_ipl_type_attr.attr, |
492 | &sys_ipl_device_attr.attr, | 493 | &sys_ipl_device_attr.attr, |
@@ -765,28 +766,10 @@ DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n", | |||
765 | DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", | 766 | DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", |
766 | reipl_block_fcp->ipl_info.fcp.devno); | 767 | reipl_block_fcp->ipl_info.fcp.devno); |
767 | 768 | ||
768 | static struct attribute *reipl_fcp_attrs[] = { | ||
769 | &sys_reipl_fcp_device_attr.attr, | ||
770 | &sys_reipl_fcp_wwpn_attr.attr, | ||
771 | &sys_reipl_fcp_lun_attr.attr, | ||
772 | &sys_reipl_fcp_bootprog_attr.attr, | ||
773 | &sys_reipl_fcp_br_lba_attr.attr, | ||
774 | NULL, | ||
775 | }; | ||
776 | |||
777 | static struct attribute_group reipl_fcp_attr_group = { | ||
778 | .attrs = reipl_fcp_attrs, | ||
779 | }; | ||
780 | |||
781 | /* CCW reipl device attributes */ | ||
782 | |||
783 | DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", | ||
784 | reipl_block_ccw->ipl_info.ccw.devno); | ||
785 | |||
786 | static void reipl_get_ascii_loadparm(char *loadparm, | 769 | static void reipl_get_ascii_loadparm(char *loadparm, |
787 | struct ipl_parameter_block *ibp) | 770 | struct ipl_parameter_block *ibp) |
788 | { | 771 | { |
789 | memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN); | 772 | memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN); |
790 | EBCASC(loadparm, LOADPARM_LEN); | 773 | EBCASC(loadparm, LOADPARM_LEN); |
791 | loadparm[LOADPARM_LEN] = 0; | 774 | loadparm[LOADPARM_LEN] = 0; |
792 | strim(loadparm); | 775 | strim(loadparm); |
@@ -821,13 +804,50 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb, | |||
821 | return -EINVAL; | 804 | return -EINVAL; |
822 | } | 805 | } |
823 | /* initialize loadparm with blanks */ | 806 | /* initialize loadparm with blanks */ |
824 | memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN); | 807 | memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN); |
825 | /* copy and convert to ebcdic */ | 808 | /* copy and convert to ebcdic */ |
826 | memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len); | 809 | memcpy(ipb->hdr.loadparm, buf, lp_len); |
827 | ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN); | 810 | ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN); |
828 | return len; | 811 | return len; |
829 | } | 812 | } |
830 | 813 | ||
814 | /* FCP wrapper */ | ||
815 | static ssize_t reipl_fcp_loadparm_show(struct kobject *kobj, | ||
816 | struct kobj_attribute *attr, char *page) | ||
817 | { | ||
818 | return reipl_generic_loadparm_show(reipl_block_fcp, page); | ||
819 | } | ||
820 | |||
821 | static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj, | ||
822 | struct kobj_attribute *attr, | ||
823 | const char *buf, size_t len) | ||
824 | { | ||
825 | return reipl_generic_loadparm_store(reipl_block_fcp, buf, len); | ||
826 | } | ||
827 | |||
828 | static struct kobj_attribute sys_reipl_fcp_loadparm_attr = | ||
829 | __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show, | ||
830 | reipl_fcp_loadparm_store); | ||
831 | |||
832 | static struct attribute *reipl_fcp_attrs[] = { | ||
833 | &sys_reipl_fcp_device_attr.attr, | ||
834 | &sys_reipl_fcp_wwpn_attr.attr, | ||
835 | &sys_reipl_fcp_lun_attr.attr, | ||
836 | &sys_reipl_fcp_bootprog_attr.attr, | ||
837 | &sys_reipl_fcp_br_lba_attr.attr, | ||
838 | &sys_reipl_fcp_loadparm_attr.attr, | ||
839 | NULL, | ||
840 | }; | ||
841 | |||
842 | static struct attribute_group reipl_fcp_attr_group = { | ||
843 | .attrs = reipl_fcp_attrs, | ||
844 | }; | ||
845 | |||
846 | /* CCW reipl device attributes */ | ||
847 | |||
848 | DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", | ||
849 | reipl_block_ccw->ipl_info.ccw.devno); | ||
850 | |||
831 | /* NSS wrapper */ | 851 | /* NSS wrapper */ |
832 | static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, | 852 | static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, |
833 | struct kobj_attribute *attr, char *page) | 853 | struct kobj_attribute *attr, char *page) |
@@ -1125,11 +1145,10 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb) | |||
1125 | /* LOADPARM */ | 1145 | /* LOADPARM */ |
1126 | /* check if read scp info worked and set loadparm */ | 1146 | /* check if read scp info worked and set loadparm */ |
1127 | if (sclp_ipl_info.is_valid) | 1147 | if (sclp_ipl_info.is_valid) |
1128 | memcpy(ipb->ipl_info.ccw.load_parm, | 1148 | memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN); |
1129 | &sclp_ipl_info.loadparm, LOADPARM_LEN); | ||
1130 | else | 1149 | else |
1131 | /* read scp info failed: set empty loadparm (EBCDIC blanks) */ | 1150 | /* read scp info failed: set empty loadparm (EBCDIC blanks) */ |
1132 | memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN); | 1151 | memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN); |
1133 | ipb->hdr.flags = DIAG308_FLAGS_LP_VALID; | 1152 | ipb->hdr.flags = DIAG308_FLAGS_LP_VALID; |
1134 | 1153 | ||
1135 | /* VM PARM */ | 1154 | /* VM PARM */ |
@@ -1251,9 +1270,16 @@ static int __init reipl_fcp_init(void) | |||
1251 | return rc; | 1270 | return rc; |
1252 | } | 1271 | } |
1253 | 1272 | ||
1254 | if (ipl_info.type == IPL_TYPE_FCP) | 1273 | if (ipl_info.type == IPL_TYPE_FCP) { |
1255 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); | 1274 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); |
1256 | else { | 1275 | /* |
1276 | * Fix loadparm: There are systems where the (SCSI) LOADPARM | ||
1277 | * is invalid in the SCSI IPL parameter block, so take it | ||
1278 | * always from sclp_ipl_info. | ||
1279 | */ | ||
1280 | memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm, | ||
1281 | LOADPARM_LEN); | ||
1282 | } else { | ||
1257 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; | 1283 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; |
1258 | reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; | 1284 | reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; |
1259 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; | 1285 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; |
@@ -1864,7 +1890,23 @@ static void __init shutdown_actions_init(void) | |||
1864 | 1890 | ||
1865 | static int __init s390_ipl_init(void) | 1891 | static int __init s390_ipl_init(void) |
1866 | { | 1892 | { |
1893 | char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40}; | ||
1894 | |||
1867 | sclp_get_ipl_info(&sclp_ipl_info); | 1895 | sclp_get_ipl_info(&sclp_ipl_info); |
1896 | /* | ||
1897 | * Fix loadparm: There are systems where the (SCSI) LOADPARM | ||
1898 | * returned by read SCP info is invalid (contains EBCDIC blanks) | ||
1899 | * when the system has been booted via diag308. In that case we use | ||
1900 | * the value from diag308, if available. | ||
1901 | * | ||
1902 | * There are also systems where diag308 store does not work in | ||
1903 | * case the system is booted from HMC. Fortunately in this case | ||
1904 | * READ SCP info provides the correct value. | ||
1905 | */ | ||
1906 | if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 && | ||
1907 | diag308_set_works) | ||
1908 | memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm, | ||
1909 | LOADPARM_LEN); | ||
1868 | shutdown_actions_init(); | 1910 | shutdown_actions_init(); |
1869 | shutdown_triggers_init(); | 1911 | shutdown_triggers_init(); |
1870 | return 0; | 1912 | return 0; |
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index 65fc3979c2f1..7cf18f8d4cb4 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S | |||
@@ -22,13 +22,11 @@ __kernel_clock_gettime: | |||
22 | basr %r5,0 | 22 | basr %r5,0 |
23 | 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ | 23 | 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ |
24 | chi %r2,__CLOCK_REALTIME | 24 | chi %r2,__CLOCK_REALTIME |
25 | je 10f | 25 | je 11f |
26 | chi %r2,__CLOCK_MONOTONIC | 26 | chi %r2,__CLOCK_MONOTONIC |
27 | jne 19f | 27 | jne 19f |
28 | 28 | ||
29 | /* CLOCK_MONOTONIC */ | 29 | /* CLOCK_MONOTONIC */ |
30 | ltr %r3,%r3 | ||
31 | jz 9f /* tp == NULL */ | ||
32 | 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ | 30 | 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ |
33 | tml %r4,0x0001 /* pending update ? loop */ | 31 | tml %r4,0x0001 /* pending update ? loop */ |
34 | jnz 1b | 32 | jnz 1b |
@@ -67,12 +65,10 @@ __kernel_clock_gettime: | |||
67 | j 6b | 65 | j 6b |
68 | 8: st %r2,0(%r3) /* store tp->tv_sec */ | 66 | 8: st %r2,0(%r3) /* store tp->tv_sec */ |
69 | st %r1,4(%r3) /* store tp->tv_nsec */ | 67 | st %r1,4(%r3) /* store tp->tv_nsec */ |
70 | 9: lhi %r2,0 | 68 | lhi %r2,0 |
71 | br %r14 | 69 | br %r14 |
72 | 70 | ||
73 | /* CLOCK_REALTIME */ | 71 | /* CLOCK_REALTIME */ |
74 | 10: ltr %r3,%r3 /* tp == NULL */ | ||
75 | jz 18f | ||
76 | 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ | 72 | 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ |
77 | tml %r4,0x0001 /* pending update ? loop */ | 73 | tml %r4,0x0001 /* pending update ? loop */ |
78 | jnz 11b | 74 | jnz 11b |
@@ -111,7 +107,7 @@ __kernel_clock_gettime: | |||
111 | j 15b | 107 | j 15b |
112 | 17: st %r2,0(%r3) /* store tp->tv_sec */ | 108 | 17: st %r2,0(%r3) /* store tp->tv_sec */ |
113 | st %r1,4(%r3) /* store tp->tv_nsec */ | 109 | st %r1,4(%r3) /* store tp->tv_nsec */ |
114 | 18: lhi %r2,0 | 110 | lhi %r2,0 |
115 | br %r14 | 111 | br %r14 |
116 | 112 | ||
117 | /* Fallback to system call */ | 113 | /* Fallback to system call */ |
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 91940ed33a4a..3f34e09db5f4 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S | |||
@@ -21,7 +21,7 @@ __kernel_clock_gettime: | |||
21 | .cfi_startproc | 21 | .cfi_startproc |
22 | larl %r5,_vdso_data | 22 | larl %r5,_vdso_data |
23 | cghi %r2,__CLOCK_REALTIME | 23 | cghi %r2,__CLOCK_REALTIME |
24 | je 4f | 24 | je 5f |
25 | cghi %r2,__CLOCK_THREAD_CPUTIME_ID | 25 | cghi %r2,__CLOCK_THREAD_CPUTIME_ID |
26 | je 9f | 26 | je 9f |
27 | cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ | 27 | cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ |
@@ -30,8 +30,6 @@ __kernel_clock_gettime: | |||
30 | jne 12f | 30 | jne 12f |
31 | 31 | ||
32 | /* CLOCK_MONOTONIC */ | 32 | /* CLOCK_MONOTONIC */ |
33 | ltgr %r3,%r3 | ||
34 | jz 3f /* tp == NULL */ | ||
35 | 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ | 33 | 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ |
36 | tmll %r4,0x0001 /* pending update ? loop */ | 34 | tmll %r4,0x0001 /* pending update ? loop */ |
37 | jnz 0b | 35 | jnz 0b |
@@ -53,12 +51,10 @@ __kernel_clock_gettime: | |||
53 | j 1b | 51 | j 1b |
54 | 2: stg %r0,0(%r3) /* store tp->tv_sec */ | 52 | 2: stg %r0,0(%r3) /* store tp->tv_sec */ |
55 | stg %r1,8(%r3) /* store tp->tv_nsec */ | 53 | stg %r1,8(%r3) /* store tp->tv_nsec */ |
56 | 3: lghi %r2,0 | 54 | lghi %r2,0 |
57 | br %r14 | 55 | br %r14 |
58 | 56 | ||
59 | /* CLOCK_REALTIME */ | 57 | /* CLOCK_REALTIME */ |
60 | 4: ltr %r3,%r3 /* tp == NULL */ | ||
61 | jz 8f | ||
62 | 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ | 58 | 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ |
63 | tmll %r4,0x0001 /* pending update ? loop */ | 59 | tmll %r4,0x0001 /* pending update ? loop */ |
64 | jnz 5b | 60 | jnz 5b |
@@ -80,7 +76,7 @@ __kernel_clock_gettime: | |||
80 | j 6b | 76 | j 6b |
81 | 7: stg %r0,0(%r3) /* store tp->tv_sec */ | 77 | 7: stg %r0,0(%r3) /* store tp->tv_sec */ |
82 | stg %r1,8(%r3) /* store tp->tv_nsec */ | 78 | stg %r1,8(%r3) /* store tp->tv_nsec */ |
83 | 8: lghi %r2,0 | 79 | lghi %r2,0 |
84 | br %r14 | 80 | br %r14 |
85 | 81 | ||
86 | /* CLOCK_THREAD_CPUTIME_ID for this thread */ | 82 | /* CLOCK_THREAD_CPUTIME_ID for this thread */ |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 0c1073ed1e84..c7235e01fd67 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -43,6 +43,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | |||
43 | 43 | ||
44 | unsigned long empty_zero_page, zero_page_mask; | 44 | unsigned long empty_zero_page, zero_page_mask; |
45 | EXPORT_SYMBOL(empty_zero_page); | 45 | EXPORT_SYMBOL(empty_zero_page); |
46 | EXPORT_SYMBOL(zero_page_mask); | ||
46 | 47 | ||
47 | static void __init setup_zero_pages(void) | 48 | static void __init setup_zero_pages(void) |
48 | { | 49 | { |
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c index bf8daf9d9c9b..37458f38b220 100644 --- a/arch/sh/mm/gup.c +++ b/arch/sh/mm/gup.c | |||
@@ -105,6 +105,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
105 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | 105 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
106 | page = pte_page(pte); | 106 | page = pte_page(pte); |
107 | get_page(page); | 107 | get_page(page); |
108 | __flush_anon_page(page, addr); | ||
109 | flush_dcache_page(page); | ||
108 | pages[*nr] = page; | 110 | pages[*nr] = page; |
109 | (*nr)++; | 111 | (*nr)++; |
110 | 112 | ||
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 2dde48bdcc42..7c06f18150ab 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c | |||
@@ -234,12 +234,18 @@ do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \ | |||
234 | __emit_load8(BASE, STRUCT, FIELD, DEST); \ | 234 | __emit_load8(BASE, STRUCT, FIELD, DEST); \ |
235 | } while (0) | 235 | } while (0) |
236 | 236 | ||
237 | #define emit_ldmem(OFF, DEST) \ | 237 | #ifdef CONFIG_SPARC64 |
238 | do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST); \ | 238 | #define BIAS (STACK_BIAS - 4) |
239 | #else | ||
240 | #define BIAS (-4) | ||
241 | #endif | ||
242 | |||
243 | #define emit_ldmem(OFF, DEST) \ | ||
244 | do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \ | ||
239 | } while (0) | 245 | } while (0) |
240 | 246 | ||
241 | #define emit_stmem(OFF, SRC) \ | 247 | #define emit_stmem(OFF, SRC) \ |
242 | do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC); \ | 248 | do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \ |
243 | } while (0) | 249 | } while (0) |
244 | 250 | ||
245 | #ifdef CONFIG_SMP | 251 | #ifdef CONFIG_SMP |
@@ -610,10 +616,11 @@ void bpf_jit_compile(struct bpf_prog *fp) | |||
610 | case BPF_ANC | SKF_AD_VLAN_TAG: | 616 | case BPF_ANC | SKF_AD_VLAN_TAG: |
611 | case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: | 617 | case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: |
612 | emit_skb_load16(vlan_tci, r_A); | 618 | emit_skb_load16(vlan_tci, r_A); |
613 | if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { | 619 | if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) { |
614 | emit_andi(r_A, VLAN_VID_MASK, r_A); | 620 | emit_alu_K(SRL, 12); |
621 | emit_andi(r_A, 1, r_A); | ||
615 | } else { | 622 | } else { |
616 | emit_loadimm(VLAN_TAG_PRESENT, r_TMP); | 623 | emit_loadimm(~VLAN_TAG_PRESENT, r_TMP); |
617 | emit_and(r_A, r_TMP, r_A); | 624 | emit_and(r_A, r_TMP, r_A); |
618 | } | 625 | } |
619 | break; | 626 | break; |
@@ -625,15 +632,19 @@ void bpf_jit_compile(struct bpf_prog *fp) | |||
625 | emit_loadimm(K, r_X); | 632 | emit_loadimm(K, r_X); |
626 | break; | 633 | break; |
627 | case BPF_LD | BPF_MEM: | 634 | case BPF_LD | BPF_MEM: |
635 | seen |= SEEN_MEM; | ||
628 | emit_ldmem(K * 4, r_A); | 636 | emit_ldmem(K * 4, r_A); |
629 | break; | 637 | break; |
630 | case BPF_LDX | BPF_MEM: | 638 | case BPF_LDX | BPF_MEM: |
639 | seen |= SEEN_MEM | SEEN_XREG; | ||
631 | emit_ldmem(K * 4, r_X); | 640 | emit_ldmem(K * 4, r_X); |
632 | break; | 641 | break; |
633 | case BPF_ST: | 642 | case BPF_ST: |
643 | seen |= SEEN_MEM; | ||
634 | emit_stmem(K * 4, r_A); | 644 | emit_stmem(K * 4, r_A); |
635 | break; | 645 | break; |
636 | case BPF_STX: | 646 | case BPF_STX: |
647 | seen |= SEEN_MEM | SEEN_XREG; | ||
637 | emit_stmem(K * 4, r_X); | 648 | emit_stmem(K * 4, r_X); |
638 | break; | 649 | break; |
639 | 650 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 778178f4c7d1..36327438caf0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -23,6 +23,7 @@ config X86 | |||
23 | def_bool y | 23 | def_bool y |
24 | select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI | 24 | select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI |
25 | select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS | 25 | select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS |
26 | select ARCH_HAS_FAST_MULTIPLIER | ||
26 | select ARCH_MIGHT_HAVE_PC_PARPORT | 27 | select ARCH_MIGHT_HAVE_PC_PARPORT |
27 | select ARCH_MIGHT_HAVE_PC_SERIO | 28 | select ARCH_MIGHT_HAVE_PC_SERIO |
28 | select HAVE_AOUT if X86_32 | 29 | select HAVE_AOUT if X86_32 |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index f277184e2ac1..dca9842d8f91 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -1032,7 +1032,6 @@ struct boot_params *make_boot_params(struct efi_config *c) | |||
1032 | int i; | 1032 | int i; |
1033 | unsigned long ramdisk_addr; | 1033 | unsigned long ramdisk_addr; |
1034 | unsigned long ramdisk_size; | 1034 | unsigned long ramdisk_size; |
1035 | unsigned long initrd_addr_max; | ||
1036 | 1035 | ||
1037 | efi_early = c; | 1036 | efi_early = c; |
1038 | sys_table = (efi_system_table_t *)(unsigned long)efi_early->table; | 1037 | sys_table = (efi_system_table_t *)(unsigned long)efi_early->table; |
@@ -1095,15 +1094,20 @@ struct boot_params *make_boot_params(struct efi_config *c) | |||
1095 | 1094 | ||
1096 | memset(sdt, 0, sizeof(*sdt)); | 1095 | memset(sdt, 0, sizeof(*sdt)); |
1097 | 1096 | ||
1098 | if (hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G) | ||
1099 | initrd_addr_max = -1UL; | ||
1100 | else | ||
1101 | initrd_addr_max = hdr->initrd_addr_max; | ||
1102 | |||
1103 | status = handle_cmdline_files(sys_table, image, | 1097 | status = handle_cmdline_files(sys_table, image, |
1104 | (char *)(unsigned long)hdr->cmd_line_ptr, | 1098 | (char *)(unsigned long)hdr->cmd_line_ptr, |
1105 | "initrd=", initrd_addr_max, | 1099 | "initrd=", hdr->initrd_addr_max, |
1106 | &ramdisk_addr, &ramdisk_size); | 1100 | &ramdisk_addr, &ramdisk_size); |
1101 | |||
1102 | if (status != EFI_SUCCESS && | ||
1103 | hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G) { | ||
1104 | efi_printk(sys_table, "Trying to load files to higher address\n"); | ||
1105 | status = handle_cmdline_files(sys_table, image, | ||
1106 | (char *)(unsigned long)hdr->cmd_line_ptr, | ||
1107 | "initrd=", -1UL, | ||
1108 | &ramdisk_addr, &ramdisk_size); | ||
1109 | } | ||
1110 | |||
1107 | if (status != EFI_SUCCESS) | 1111 | if (status != EFI_SUCCESS) |
1108 | goto fail2; | 1112 | goto fail2; |
1109 | hdr->ramdisk_image = ramdisk_addr & 0xffffffff; | 1113 | hdr->ramdisk_image = ramdisk_addr & 0xffffffff; |
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index cbed1407a5cd..d6b8aa4c986c 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S | |||
@@ -30,6 +30,33 @@ | |||
30 | #include <asm/boot.h> | 30 | #include <asm/boot.h> |
31 | #include <asm/asm-offsets.h> | 31 | #include <asm/asm-offsets.h> |
32 | 32 | ||
33 | /* | ||
34 | * Adjust our own GOT | ||
35 | * | ||
36 | * The relocation base must be in %ebx | ||
37 | * | ||
38 | * It is safe to call this macro more than once, because in some of the | ||
39 | * code paths multiple invocations are inevitable, e.g. via the efi* | ||
40 | * entry points. | ||
41 | * | ||
42 | * Relocation is only performed the first time. | ||
43 | */ | ||
44 | .macro FIXUP_GOT | ||
45 | cmpb $1, got_fixed(%ebx) | ||
46 | je 2f | ||
47 | |||
48 | leal _got(%ebx), %edx | ||
49 | leal _egot(%ebx), %ecx | ||
50 | 1: | ||
51 | cmpl %ecx, %edx | ||
52 | jae 2f | ||
53 | addl %ebx, (%edx) | ||
54 | addl $4, %edx | ||
55 | jmp 1b | ||
56 | 2: | ||
57 | movb $1, got_fixed(%ebx) | ||
58 | .endm | ||
59 | |||
33 | __HEAD | 60 | __HEAD |
34 | ENTRY(startup_32) | 61 | ENTRY(startup_32) |
35 | #ifdef CONFIG_EFI_STUB | 62 | #ifdef CONFIG_EFI_STUB |
@@ -56,6 +83,9 @@ ENTRY(efi_pe_entry) | |||
56 | add %esi, 88(%eax) | 83 | add %esi, 88(%eax) |
57 | pushl %eax | 84 | pushl %eax |
58 | 85 | ||
86 | movl %esi, %ebx | ||
87 | FIXUP_GOT | ||
88 | |||
59 | call make_boot_params | 89 | call make_boot_params |
60 | cmpl $0, %eax | 90 | cmpl $0, %eax |
61 | je fail | 91 | je fail |
@@ -81,6 +111,10 @@ ENTRY(efi32_stub_entry) | |||
81 | leal efi32_config(%esi), %eax | 111 | leal efi32_config(%esi), %eax |
82 | add %esi, 88(%eax) | 112 | add %esi, 88(%eax) |
83 | pushl %eax | 113 | pushl %eax |
114 | |||
115 | movl %esi, %ebx | ||
116 | FIXUP_GOT | ||
117 | |||
84 | 2: | 118 | 2: |
85 | call efi_main | 119 | call efi_main |
86 | cmpl $0, %eax | 120 | cmpl $0, %eax |
@@ -190,19 +224,7 @@ relocated: | |||
190 | shrl $2, %ecx | 224 | shrl $2, %ecx |
191 | rep stosl | 225 | rep stosl |
192 | 226 | ||
193 | /* | 227 | FIXUP_GOT |
194 | * Adjust our own GOT | ||
195 | */ | ||
196 | leal _got(%ebx), %edx | ||
197 | leal _egot(%ebx), %ecx | ||
198 | 1: | ||
199 | cmpl %ecx, %edx | ||
200 | jae 2f | ||
201 | addl %ebx, (%edx) | ||
202 | addl $4, %edx | ||
203 | jmp 1b | ||
204 | 2: | ||
205 | |||
206 | /* | 228 | /* |
207 | * Do the decompression, and jump to the new kernel.. | 229 | * Do the decompression, and jump to the new kernel.. |
208 | */ | 230 | */ |
@@ -225,8 +247,12 @@ relocated: | |||
225 | xorl %ebx, %ebx | 247 | xorl %ebx, %ebx |
226 | jmp *%eax | 248 | jmp *%eax |
227 | 249 | ||
228 | #ifdef CONFIG_EFI_STUB | ||
229 | .data | 250 | .data |
251 | /* Have we relocated the GOT? */ | ||
252 | got_fixed: | ||
253 | .byte 0 | ||
254 | |||
255 | #ifdef CONFIG_EFI_STUB | ||
230 | efi32_config: | 256 | efi32_config: |
231 | .fill 11,8,0 | 257 | .fill 11,8,0 |
232 | .long efi_call_phys | 258 | .long efi_call_phys |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 2884e0c3e8a5..50f69c7eaaf4 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -32,6 +32,33 @@ | |||
32 | #include <asm/processor-flags.h> | 32 | #include <asm/processor-flags.h> |
33 | #include <asm/asm-offsets.h> | 33 | #include <asm/asm-offsets.h> |
34 | 34 | ||
35 | /* | ||
36 | * Adjust our own GOT | ||
37 | * | ||
38 | * The relocation base must be in %rbx | ||
39 | * | ||
40 | * It is safe to call this macro more than once, because in some of the | ||
41 | * code paths multiple invocations are inevitable, e.g. via the efi* | ||
42 | * entry points. | ||
43 | * | ||
44 | * Relocation is only performed the first time. | ||
45 | */ | ||
46 | .macro FIXUP_GOT | ||
47 | cmpb $1, got_fixed(%rip) | ||
48 | je 2f | ||
49 | |||
50 | leaq _got(%rip), %rdx | ||
51 | leaq _egot(%rip), %rcx | ||
52 | 1: | ||
53 | cmpq %rcx, %rdx | ||
54 | jae 2f | ||
55 | addq %rbx, (%rdx) | ||
56 | addq $8, %rdx | ||
57 | jmp 1b | ||
58 | 2: | ||
59 | movb $1, got_fixed(%rip) | ||
60 | .endm | ||
61 | |||
35 | __HEAD | 62 | __HEAD |
36 | .code32 | 63 | .code32 |
37 | ENTRY(startup_32) | 64 | ENTRY(startup_32) |
@@ -252,10 +279,13 @@ ENTRY(efi_pe_entry) | |||
252 | subq $1b, %rbp | 279 | subq $1b, %rbp |
253 | 280 | ||
254 | /* | 281 | /* |
255 | * Relocate efi_config->call(). | 282 | * Relocate efi_config->call() and the GOT entries. |
256 | */ | 283 | */ |
257 | addq %rbp, efi64_config+88(%rip) | 284 | addq %rbp, efi64_config+88(%rip) |
258 | 285 | ||
286 | movq %rbp, %rbx | ||
287 | FIXUP_GOT | ||
288 | |||
259 | movq %rax, %rdi | 289 | movq %rax, %rdi |
260 | call make_boot_params | 290 | call make_boot_params |
261 | cmpq $0,%rax | 291 | cmpq $0,%rax |
@@ -271,10 +301,13 @@ handover_entry: | |||
271 | subq $1b, %rbp | 301 | subq $1b, %rbp |
272 | 302 | ||
273 | /* | 303 | /* |
274 | * Relocate efi_config->call(). | 304 | * Relocate efi_config->call() and the GOT entries. |
275 | */ | 305 | */ |
276 | movq efi_config(%rip), %rax | 306 | movq efi_config(%rip), %rax |
277 | addq %rbp, 88(%rax) | 307 | addq %rbp, 88(%rax) |
308 | |||
309 | movq %rbp, %rbx | ||
310 | FIXUP_GOT | ||
278 | 2: | 311 | 2: |
279 | movq efi_config(%rip), %rdi | 312 | movq efi_config(%rip), %rdi |
280 | call efi_main | 313 | call efi_main |
@@ -385,19 +418,8 @@ relocated: | |||
385 | shrq $3, %rcx | 418 | shrq $3, %rcx |
386 | rep stosq | 419 | rep stosq |
387 | 420 | ||
388 | /* | 421 | FIXUP_GOT |
389 | * Adjust our own GOT | 422 | |
390 | */ | ||
391 | leaq _got(%rip), %rdx | ||
392 | leaq _egot(%rip), %rcx | ||
393 | 1: | ||
394 | cmpq %rcx, %rdx | ||
395 | jae 2f | ||
396 | addq %rbx, (%rdx) | ||
397 | addq $8, %rdx | ||
398 | jmp 1b | ||
399 | 2: | ||
400 | |||
401 | /* | 423 | /* |
402 | * Do the decompression, and jump to the new kernel.. | 424 | * Do the decompression, and jump to the new kernel.. |
403 | */ | 425 | */ |
@@ -437,6 +459,10 @@ gdt: | |||
437 | .quad 0x0000000000000000 /* TS continued */ | 459 | .quad 0x0000000000000000 /* TS continued */ |
438 | gdt_end: | 460 | gdt_end: |
439 | 461 | ||
462 | /* Have we relocated the GOT? */ | ||
463 | got_fixed: | ||
464 | .byte 0 | ||
465 | |||
440 | #ifdef CONFIG_EFI_STUB | 466 | #ifdef CONFIG_EFI_STUB |
441 | efi_config: | 467 | efi_config: |
442 | .quad 0 | 468 | .quad 0 |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index afcd35d331de..cfe3b954d5e4 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -497,8 +497,6 @@ static __always_inline int fls64(__u64 x) | |||
497 | 497 | ||
498 | #include <asm-generic/bitops/sched.h> | 498 | #include <asm-generic/bitops/sched.h> |
499 | 499 | ||
500 | #define ARCH_HAS_FAST_MULTIPLIER 1 | ||
501 | |||
502 | #include <asm/arch_hweight.h> | 500 | #include <asm/arch_hweight.h> |
503 | 501 | ||
504 | #include <asm-generic/bitops/const_hweight.h> | 502 | #include <asm-generic/bitops/const_hweight.h> |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 478c490f3654..1733ab49ac5e 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -239,6 +239,7 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; } | |||
239 | static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; } | 239 | static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; } |
240 | static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; } | 240 | static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; } |
241 | static inline void mp_unmap_irq(int irq) { } | 241 | static inline void mp_unmap_irq(int irq) { } |
242 | static inline bool mp_should_keep_irq(struct device *dev) { return 1; } | ||
242 | 243 | ||
243 | static inline int save_ioapic_entries(void) | 244 | static inline int save_ioapic_entries(void) |
244 | { | 245 | { |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 5be9063545d2..3874693c0e53 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512]; | |||
19 | extern pmd_t level2_kernel_pgt[512]; | 19 | extern pmd_t level2_kernel_pgt[512]; |
20 | extern pmd_t level2_fixmap_pgt[512]; | 20 | extern pmd_t level2_fixmap_pgt[512]; |
21 | extern pmd_t level2_ident_pgt[512]; | 21 | extern pmd_t level2_ident_pgt[512]; |
22 | extern pte_t level1_fixmap_pgt[512]; | ||
22 | extern pgd_t init_level4_pgt[]; | 23 | extern pgd_t init_level4_pgt[]; |
23 | 24 | ||
24 | #define swapper_pg_dir init_level4_pgt | 25 | #define swapper_pg_dir init_level4_pgt |
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index f304773285ae..f1314d0bcf0a 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c | |||
@@ -338,8 +338,10 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | |||
338 | * a relative jump. | 338 | * a relative jump. |
339 | */ | 339 | */ |
340 | rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; | 340 | rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; |
341 | if (abs(rel) > 0x7fffffff) | 341 | if (abs(rel) > 0x7fffffff) { |
342 | __arch_remove_optimized_kprobe(op, 0); | ||
342 | return -ERANGE; | 343 | return -ERANGE; |
344 | } | ||
343 | 345 | ||
344 | buf = (u8 *)op->optinsn.insn; | 346 | buf = (u8 *)op->optinsn.insn; |
345 | 347 | ||
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 167ffcac16ed..95a427e57887 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
@@ -48,7 +48,9 @@ enum address_markers_idx { | |||
48 | LOW_KERNEL_NR, | 48 | LOW_KERNEL_NR, |
49 | VMALLOC_START_NR, | 49 | VMALLOC_START_NR, |
50 | VMEMMAP_START_NR, | 50 | VMEMMAP_START_NR, |
51 | # ifdef CONFIG_X86_ESPFIX64 | ||
51 | ESPFIX_START_NR, | 52 | ESPFIX_START_NR, |
53 | # endif | ||
52 | HIGH_KERNEL_NR, | 54 | HIGH_KERNEL_NR, |
53 | MODULES_VADDR_NR, | 55 | MODULES_VADDR_NR, |
54 | MODULES_END_NR, | 56 | MODULES_END_NR, |
@@ -71,7 +73,9 @@ static struct addr_marker address_markers[] = { | |||
71 | { PAGE_OFFSET, "Low Kernel Mapping" }, | 73 | { PAGE_OFFSET, "Low Kernel Mapping" }, |
72 | { VMALLOC_START, "vmalloc() Area" }, | 74 | { VMALLOC_START, "vmalloc() Area" }, |
73 | { VMEMMAP_START, "Vmemmap" }, | 75 | { VMEMMAP_START, "Vmemmap" }, |
76 | # ifdef CONFIG_X86_ESPFIX64 | ||
74 | { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, | 77 | { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, |
78 | # endif | ||
75 | { __START_KERNEL_map, "High Kernel Mapping" }, | 79 | { __START_KERNEL_map, "High Kernel Mapping" }, |
76 | { MODULES_VADDR, "Modules" }, | 80 | { MODULES_VADDR, "Modules" }, |
77 | { MODULES_END, "End Modules" }, | 81 | { MODULES_END, "End Modules" }, |
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 25e7e1372bb2..919b91205cd4 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
32 | #include <asm/elf.h> | 32 | #include <asm/elf.h> |
33 | 33 | ||
34 | struct __read_mostly va_alignment va_align = { | 34 | struct va_alignment __read_mostly va_align = { |
35 | .flags = -1, | 35 | .flags = -1, |
36 | }; | 36 | }; |
37 | 37 | ||
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index c61ea57d1ba1..9a2b7101ae8a 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c | |||
@@ -326,27 +326,6 @@ static void pci_fixup_video(struct pci_dev *pdev) | |||
326 | struct pci_bus *bus; | 326 | struct pci_bus *bus; |
327 | u16 config; | 327 | u16 config; |
328 | 328 | ||
329 | if (!vga_default_device()) { | ||
330 | resource_size_t start, end; | ||
331 | int i; | ||
332 | |||
333 | /* Does firmware framebuffer belong to us? */ | ||
334 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
335 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) | ||
336 | continue; | ||
337 | |||
338 | start = pci_resource_start(pdev, i); | ||
339 | end = pci_resource_end(pdev, i); | ||
340 | |||
341 | if (!start || !end) | ||
342 | continue; | ||
343 | |||
344 | if (screen_info.lfb_base >= start && | ||
345 | (screen_info.lfb_base + screen_info.lfb_size) < end) | ||
346 | vga_set_default_device(pdev); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | /* Is VGA routed to us? */ | 329 | /* Is VGA routed to us? */ |
351 | bus = pdev->bus; | 330 | bus = pdev->bus; |
352 | while (bus) { | 331 | while (bus) { |
@@ -371,8 +350,7 @@ static void pci_fixup_video(struct pci_dev *pdev) | |||
371 | pci_read_config_word(pdev, PCI_COMMAND, &config); | 350 | pci_read_config_word(pdev, PCI_COMMAND, &config); |
372 | if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { | 351 | if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { |
373 | pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; | 352 | pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; |
374 | dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n"); | 353 | dev_printk(KERN_DEBUG, &pdev->dev, "Video device with shadowed ROM\n"); |
375 | vga_set_default_device(pdev); | ||
376 | } | 354 | } |
377 | } | 355 | } |
378 | } | 356 | } |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index e8a1201c3293..16fb0099b7f2 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, | |||
1866 | * | 1866 | * |
1867 | * We can construct this by grafting the Xen provided pagetable into | 1867 | * We can construct this by grafting the Xen provided pagetable into |
1868 | * head_64.S's preconstructed pagetables. We copy the Xen L2's into | 1868 | * head_64.S's preconstructed pagetables. We copy the Xen L2's into |
1869 | * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This | 1869 | * level2_ident_pgt, and level2_kernel_pgt. This means that only the |
1870 | * means that only the kernel has a physical mapping to start with - | 1870 | * kernel has a physical mapping to start with - but that's enough to |
1871 | * but that's enough to get __va working. We need to fill in the rest | 1871 | * get __va working. We need to fill in the rest of the physical |
1872 | * of the physical mapping once some sort of allocator has been set | 1872 | * mapping once some sort of allocator has been set up. NOTE: for |
1873 | * up. | 1873 | * PVH, the page tables are native. |
1874 | * NOTE: for PVH, the page tables are native. | ||
1875 | */ | 1874 | */ |
1876 | void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | 1875 | void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
1877 | { | 1876 | { |
@@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
1902 | /* L3_i[0] -> level2_ident_pgt */ | 1901 | /* L3_i[0] -> level2_ident_pgt */ |
1903 | convert_pfn_mfn(level3_ident_pgt); | 1902 | convert_pfn_mfn(level3_ident_pgt); |
1904 | /* L3_k[510] -> level2_kernel_pgt | 1903 | /* L3_k[510] -> level2_kernel_pgt |
1905 | * L3_i[511] -> level2_fixmap_pgt */ | 1904 | * L3_k[511] -> level2_fixmap_pgt */ |
1906 | convert_pfn_mfn(level3_kernel_pgt); | 1905 | convert_pfn_mfn(level3_kernel_pgt); |
1906 | |||
1907 | /* L3_k[511][506] -> level1_fixmap_pgt */ | ||
1908 | convert_pfn_mfn(level2_fixmap_pgt); | ||
1907 | } | 1909 | } |
1908 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ | 1910 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ |
1909 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); | 1911 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); |
@@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
1913 | addr[1] = (unsigned long)l3; | 1915 | addr[1] = (unsigned long)l3; |
1914 | addr[2] = (unsigned long)l2; | 1916 | addr[2] = (unsigned long)l2; |
1915 | /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: | 1917 | /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: |
1916 | * Both L4[272][0] and L4[511][511] have entries that point to the same | 1918 | * Both L4[272][0] and L4[511][510] have entries that point to the same |
1917 | * L2 (PMD) tables. Meaning that if you modify it in __va space | 1919 | * L2 (PMD) tables. Meaning that if you modify it in __va space |
1918 | * it will be also modified in the __ka space! (But if you just | 1920 | * it will be also modified in the __ka space! (But if you just |
1919 | * modify the PMD table to point to other PTE's or none, then you | 1921 | * modify the PMD table to point to other PTE's or none, then you |
1920 | * are OK - which is what cleanup_highmap does) */ | 1922 | * are OK - which is what cleanup_highmap does) */ |
1921 | copy_page(level2_ident_pgt, l2); | 1923 | copy_page(level2_ident_pgt, l2); |
1922 | /* Graft it onto L4[511][511] */ | 1924 | /* Graft it onto L4[511][510] */ |
1923 | copy_page(level2_kernel_pgt, l2); | 1925 | copy_page(level2_kernel_pgt, l2); |
1924 | 1926 | ||
1925 | /* Get [511][510] and graft that in level2_fixmap_pgt */ | ||
1926 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); | ||
1927 | l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); | ||
1928 | copy_page(level2_fixmap_pgt, l2); | ||
1929 | /* Note that we don't do anything with level1_fixmap_pgt which | ||
1930 | * we don't need. */ | ||
1931 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 1927 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
1932 | /* Make pagetable pieces RO */ | 1928 | /* Make pagetable pieces RO */ |
1933 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); | 1929 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); |
@@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
1937 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); | 1933 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); |
1938 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | 1934 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); |
1939 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); | 1935 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); |
1936 | set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); | ||
1940 | 1937 | ||
1941 | /* Pin down new L4 */ | 1938 | /* Pin down new L4 */ |
1942 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, | 1939 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, |