diff options
60 files changed, 752 insertions, 626 deletions
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt index e540fd67f767..b443f1de0e5a 100644 --- a/Documentation/x86/boot.txt +++ b/Documentation/x86/boot.txt | |||
@@ -390,6 +390,7 @@ Protocol: 2.00+ | |||
390 | F Special (0xFF = undefined) | 390 | F Special (0xFF = undefined) |
391 | 10 Reserved | 391 | 10 Reserved |
392 | 11 Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de> | 392 | 11 Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de> |
393 | 12 OVMF UEFI virtualization stack | ||
393 | 394 | ||
394 | Please contact <hpa@zytor.com> if you need a bootloader ID | 395 | Please contact <hpa@zytor.com> if you need a bootloader ID |
395 | value assigned. | 396 | value assigned. |
diff --git a/MAINTAINERS b/MAINTAINERS index 5abc10efffa2..8bbd949f8c2e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -7524,7 +7524,7 @@ S: Maintained | |||
7524 | F: drivers/media/tuners/tea5767.* | 7524 | F: drivers/media/tuners/tea5767.* |
7525 | 7525 | ||
7526 | TEAM DRIVER | 7526 | TEAM DRIVER |
7527 | M: Jiri Pirko <jpirko@redhat.com> | 7527 | M: Jiri Pirko <jiri@resnulli.us> |
7528 | L: netdev@vger.kernel.org | 7528 | L: netdev@vger.kernel.org |
7529 | S: Supported | 7529 | S: Supported |
7530 | F: drivers/net/team/ | 7530 | F: drivers/net/team/ |
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 4eb6d005ffaa..86dff32a0737 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h | |||
@@ -7,8 +7,14 @@ | |||
7 | 7 | ||
8 | #ifndef __ASSEMBLER__ | 8 | #ifndef __ASSEMBLER__ |
9 | unsigned int scu_get_core_count(void __iomem *); | 9 | unsigned int scu_get_core_count(void __iomem *); |
10 | void scu_enable(void __iomem *); | ||
11 | int scu_power_mode(void __iomem *, unsigned int); | 10 | int scu_power_mode(void __iomem *, unsigned int); |
11 | |||
12 | #ifdef CONFIG_SMP | ||
13 | void scu_enable(void __iomem *scu_base); | ||
14 | #else | ||
15 | static inline void scu_enable(void __iomem *scu_base) {} | ||
16 | #endif | ||
17 | |||
12 | #endif | 18 | #endif |
13 | 19 | ||
14 | #endif | 20 | #endif |
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index b9f015e843d8..45eac87ed66a 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c | |||
@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base) | |||
75 | int scu_power_mode(void __iomem *scu_base, unsigned int mode) | 75 | int scu_power_mode(void __iomem *scu_base, unsigned int mode) |
76 | { | 76 | { |
77 | unsigned int val; | 77 | unsigned int val; |
78 | int cpu = cpu_logical_map(smp_processor_id()); | 78 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
79 | 79 | ||
80 | if (mode > 3 || mode == 1 || cpu > 3) | 80 | if (mode > 3 || mode == 1 || cpu > 3) |
81 | return -EINVAL; | 81 | return -EINVAL; |
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 981dc1e1da51..e6c061282939 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <asm/arch_timer.h> | 29 | #include <asm/arch_timer.h> |
30 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
31 | #include <asm/cputype.h> | ||
31 | #include <asm/smp_plat.h> | 32 | #include <asm/smp_plat.h> |
32 | #include <asm/smp_twd.h> | 33 | #include <asm/smp_twd.h> |
33 | #include <asm/hardware/arm_timer.h> | 34 | #include <asm/hardware/arm_timer.h> |
@@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void) | |||
59 | 60 | ||
60 | void highbank_set_cpu_jump(int cpu, void *jump_addr) | 61 | void highbank_set_cpu_jump(int cpu, void *jump_addr) |
61 | { | 62 | { |
62 | cpu = cpu_logical_map(cpu); | 63 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0); |
63 | writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); | 64 | writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); |
64 | __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); | 65 | __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); |
65 | outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), | 66 | outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), |
diff --git a/arch/arm/mach-highbank/sysregs.h b/arch/arm/mach-highbank/sysregs.h index 70af9d13fcef..5995df7f2622 100644 --- a/arch/arm/mach-highbank/sysregs.h +++ b/arch/arm/mach-highbank/sysregs.h | |||
@@ -37,7 +37,7 @@ extern void __iomem *sregs_base; | |||
37 | 37 | ||
38 | static inline void highbank_set_core_pwr(void) | 38 | static inline void highbank_set_core_pwr(void) |
39 | { | 39 | { |
40 | int cpu = cpu_logical_map(smp_processor_id()); | 40 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
41 | if (scu_base_addr) | 41 | if (scu_base_addr) |
42 | scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); | 42 | scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); |
43 | else | 43 | else |
@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void) | |||
46 | 46 | ||
47 | static inline void highbank_clear_core_pwr(void) | 47 | static inline void highbank_clear_core_pwr(void) |
48 | { | 48 | { |
49 | int cpu = cpu_logical_map(smp_processor_id()); | 49 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
50 | if (scu_base_addr) | 50 | if (scu_base_addr) |
51 | scu_power_mode(scu_base_addr, SCU_PM_NORMAL); | 51 | scu_power_mode(scu_base_addr, SCU_PM_NORMAL); |
52 | else | 52 | else |
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index a34f1e214116..6828ef6ce80e 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -341,10 +341,17 @@ static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) | |||
341 | 341 | ||
342 | static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) | 342 | static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) |
343 | { | 343 | { |
344 | emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx); | 344 | /* r_dst = (r_src << 8) | (r_src >> 8) */ |
345 | emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx); | 345 | emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx); |
346 | emit(ARM_LSL_I(r_dst, r_dst, 8), ctx); | 346 | emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx); |
347 | emit(ARM_LSL_R(r_dst, r_dst, 8), ctx); | 347 | |
348 | /* | ||
349 | * we need to mask out the bits set in r_dst[23:16] due to | ||
350 | * the first shift instruction. | ||
351 | * | ||
352 | * note that 0x8ff is the encoded immediate 0x00ff0000. | ||
353 | */ | ||
354 | emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx); | ||
348 | } | 355 | } |
349 | 356 | ||
350 | #else /* ARMv6+ */ | 357 | #else /* ARMv6+ */ |
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index ae700f49e51d..b0768a657920 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h | |||
@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs); | |||
130 | #define start_thread(_regs, _pc, _usp) \ | 130 | #define start_thread(_regs, _pc, _usp) \ |
131 | do { \ | 131 | do { \ |
132 | (_regs)->pc = (_pc); \ | 132 | (_regs)->pc = (_pc); \ |
133 | ((struct switch_stack *)(_regs))[-1].a6 = 0; \ | ||
134 | setframeformat(_regs); \ | 133 | setframeformat(_regs); \ |
135 | if (current->mm) \ | 134 | if (current->mm) \ |
136 | (_regs)->d5 = current->mm->start_data; \ | 135 | (_regs)->d5 = current->mm->start_data; \ |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index a5f4f5a1d24b..0aa98db8a80d 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires, | |||
120 | nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); | 120 | nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); |
121 | do_div(nsecs, 125); | 121 | do_div(nsecs, 125); |
122 | S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); | 122 | S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); |
123 | /* Program the maximum value if we have an overflow (== year 2042) */ | ||
124 | if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) | ||
125 | S390_lowcore.clock_comparator = -1ULL; | ||
123 | set_clock_comparator(S390_lowcore.clock_comparator); | 126 | set_clock_comparator(S390_lowcore.clock_comparator); |
124 | return 0; | 127 | return 0; |
125 | } | 128 | } |
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 875d008828b8..1bb7ad4aeff4 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG | |||
140 | 140 | ||
141 | source "init/Kconfig" | 141 | source "init/Kconfig" |
142 | 142 | ||
143 | source "kernel/Kconfig.freezer" | ||
144 | |||
143 | menu "Tilera-specific configuration" | 145 | menu "Tilera-specific configuration" |
144 | 146 | ||
145 | config NR_CPUS | 147 | config NR_CPUS |
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 2a9b293fece6..31672918064c 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h | |||
@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr) | |||
250 | #define iowrite32 writel | 250 | #define iowrite32 writel |
251 | #define iowrite64 writeq | 251 | #define iowrite64 writeq |
252 | 252 | ||
253 | static inline void memset_io(void *dst, int val, size_t len) | 253 | #if CHIP_HAS_MMIO() || defined(CONFIG_PCI) |
254 | |||
255 | static inline void memset_io(volatile void *dst, int val, size_t len) | ||
254 | { | 256 | { |
255 | int x; | 257 | int x; |
256 | BUG_ON((unsigned long)dst & 0x3); | 258 | BUG_ON((unsigned long)dst & 0x3); |
@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, | |||
277 | writel(*(u32 *)(src + x), dst + x); | 279 | writel(*(u32 *)(src + x), dst + x); |
278 | } | 280 | } |
279 | 281 | ||
282 | #endif | ||
283 | |||
280 | /* | 284 | /* |
281 | * The Tile architecture does not support IOPORT, even with PCI. | 285 | * The Tile architecture does not support IOPORT, even with PCI. |
282 | * Unfortunately we can't yet simply not declare these methods, | 286 | * Unfortunately we can't yet simply not declare these methods, |
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index b4e96fef2cf8..241c0bb60b12 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -18,32 +18,20 @@ | |||
18 | #include <arch/interrupts.h> | 18 | #include <arch/interrupts.h> |
19 | #include <arch/chip.h> | 19 | #include <arch/chip.h> |
20 | 20 | ||
21 | #if !defined(__tilegx__) && defined(__ASSEMBLY__) | ||
22 | |||
23 | /* | 21 | /* |
24 | * The set of interrupts we want to allow when interrupts are nominally | 22 | * The set of interrupts we want to allow when interrupts are nominally |
25 | * disabled. The remainder are effectively "NMI" interrupts from | 23 | * disabled. The remainder are effectively "NMI" interrupts from |
26 | * the point of view of the generic Linux code. Note that synchronous | 24 | * the point of view of the generic Linux code. Note that synchronous |
27 | * interrupts (aka "non-queued") are not blocked by the mask in any case. | 25 | * interrupts (aka "non-queued") are not blocked by the mask in any case. |
28 | */ | 26 | */ |
29 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
30 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
31 | (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) | ||
32 | #else | ||
33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
34 | (~(INT_MASK_HI(INT_PERF_COUNT))) | ||
35 | #endif | ||
36 | |||
37 | #else | ||
38 | |||
39 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
40 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
41 | (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) | ||
42 | #else | ||
43 | #define LINUX_MASKABLE_INTERRUPTS \ | 27 | #define LINUX_MASKABLE_INTERRUPTS \ |
44 | (~(INT_MASK(INT_PERF_COUNT))) | 28 | (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT))) |
45 | #endif | ||
46 | 29 | ||
30 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
31 | /* The same macro, but for the two 32-bit SPRs separately. */ | ||
32 | #define LINUX_MASKABLE_INTERRUPTS_LO (-1) | ||
33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
34 | (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32)))) | ||
47 | #endif | 35 | #endif |
48 | 36 | ||
49 | #ifndef __ASSEMBLY__ | 37 | #ifndef __ASSEMBLY__ |
@@ -126,7 +114,7 @@ | |||
126 | * to know our current state. | 114 | * to know our current state. |
127 | */ | 115 | */ |
128 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | 116 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); |
129 | #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) | 117 | #define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR) |
130 | 118 | ||
131 | /* Disable interrupts. */ | 119 | /* Disable interrupts. */ |
132 | #define arch_local_irq_disable() \ | 120 | #define arch_local_irq_disable() \ |
@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
165 | 153 | ||
166 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ | 154 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ |
167 | #define arch_local_irq_mask(interrupt) \ | 155 | #define arch_local_irq_mask(interrupt) \ |
168 | (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) | 156 | (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt))) |
169 | 157 | ||
170 | /* Prevent the given interrupt from being enabled immediately. */ | 158 | /* Prevent the given interrupt from being enabled immediately. */ |
171 | #define arch_local_irq_mask_now(interrupt) do { \ | 159 | #define arch_local_irq_mask_now(interrupt) do { \ |
@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
175 | 163 | ||
176 | /* Allow the given interrupt to be enabled next time we enable irqs. */ | 164 | /* Allow the given interrupt to be enabled next time we enable irqs. */ |
177 | #define arch_local_irq_unmask(interrupt) \ | 165 | #define arch_local_irq_unmask(interrupt) \ |
178 | (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) | 166 | (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt))) |
179 | 167 | ||
180 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ | 168 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ |
181 | #define arch_local_irq_unmask_now(interrupt) do { \ | 169 | #define arch_local_irq_unmask_now(interrupt) do { \ |
@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
250 | /* Disable interrupts. */ | 238 | /* Disable interrupts. */ |
251 | #define IRQ_DISABLE(tmp0, tmp1) \ | 239 | #define IRQ_DISABLE(tmp0, tmp1) \ |
252 | { \ | 240 | { \ |
253 | movei tmp0, -1; \ | 241 | movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \ |
254 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ | 242 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ |
255 | }; \ | 243 | }; \ |
256 | { \ | 244 | { \ |
diff --git a/arch/tile/include/uapi/arch/interrupts_32.h b/arch/tile/include/uapi/arch/interrupts_32.h index 96b5710505b6..2efe3f68b2d6 100644 --- a/arch/tile/include/uapi/arch/interrupts_32.h +++ b/arch/tile/include/uapi/arch/interrupts_32.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #ifndef __ARCH_INTERRUPTS_H__ | 15 | #ifndef __ARCH_INTERRUPTS_H__ |
16 | #define __ARCH_INTERRUPTS_H__ | 16 | #define __ARCH_INTERRUPTS_H__ |
17 | 17 | ||
18 | #ifndef __KERNEL__ | ||
18 | /** Mask for an interrupt. */ | 19 | /** Mask for an interrupt. */ |
19 | /* Note: must handle breaking interrupts into high and low words manually. */ | 20 | /* Note: must handle breaking interrupts into high and low words manually. */ |
20 | #define INT_MASK_LO(intno) (1 << (intno)) | 21 | #define INT_MASK_LO(intno) (1 << (intno)) |
@@ -23,6 +24,7 @@ | |||
23 | #ifndef __ASSEMBLER__ | 24 | #ifndef __ASSEMBLER__ |
24 | #define INT_MASK(intno) (1ULL << (intno)) | 25 | #define INT_MASK(intno) (1ULL << (intno)) |
25 | #endif | 26 | #endif |
27 | #endif | ||
26 | 28 | ||
27 | 29 | ||
28 | /** Where a given interrupt executes */ | 30 | /** Where a given interrupt executes */ |
@@ -92,216 +94,216 @@ | |||
92 | 94 | ||
93 | #ifndef __ASSEMBLER__ | 95 | #ifndef __ASSEMBLER__ |
94 | #define QUEUED_INTERRUPTS ( \ | 96 | #define QUEUED_INTERRUPTS ( \ |
95 | INT_MASK(INT_MEM_ERROR) | \ | 97 | (1ULL << INT_MEM_ERROR) | \ |
96 | INT_MASK(INT_DMATLB_MISS) | \ | 98 | (1ULL << INT_DMATLB_MISS) | \ |
97 | INT_MASK(INT_DMATLB_ACCESS) | \ | 99 | (1ULL << INT_DMATLB_ACCESS) | \ |
98 | INT_MASK(INT_SNITLB_MISS) | \ | 100 | (1ULL << INT_SNITLB_MISS) | \ |
99 | INT_MASK(INT_SN_NOTIFY) | \ | 101 | (1ULL << INT_SN_NOTIFY) | \ |
100 | INT_MASK(INT_SN_FIREWALL) | \ | 102 | (1ULL << INT_SN_FIREWALL) | \ |
101 | INT_MASK(INT_IDN_FIREWALL) | \ | 103 | (1ULL << INT_IDN_FIREWALL) | \ |
102 | INT_MASK(INT_UDN_FIREWALL) | \ | 104 | (1ULL << INT_UDN_FIREWALL) | \ |
103 | INT_MASK(INT_TILE_TIMER) | \ | 105 | (1ULL << INT_TILE_TIMER) | \ |
104 | INT_MASK(INT_IDN_TIMER) | \ | 106 | (1ULL << INT_IDN_TIMER) | \ |
105 | INT_MASK(INT_UDN_TIMER) | \ | 107 | (1ULL << INT_UDN_TIMER) | \ |
106 | INT_MASK(INT_DMA_NOTIFY) | \ | 108 | (1ULL << INT_DMA_NOTIFY) | \ |
107 | INT_MASK(INT_IDN_CA) | \ | 109 | (1ULL << INT_IDN_CA) | \ |
108 | INT_MASK(INT_UDN_CA) | \ | 110 | (1ULL << INT_UDN_CA) | \ |
109 | INT_MASK(INT_IDN_AVAIL) | \ | 111 | (1ULL << INT_IDN_AVAIL) | \ |
110 | INT_MASK(INT_UDN_AVAIL) | \ | 112 | (1ULL << INT_UDN_AVAIL) | \ |
111 | INT_MASK(INT_PERF_COUNT) | \ | 113 | (1ULL << INT_PERF_COUNT) | \ |
112 | INT_MASK(INT_INTCTRL_3) | \ | 114 | (1ULL << INT_INTCTRL_3) | \ |
113 | INT_MASK(INT_INTCTRL_2) | \ | 115 | (1ULL << INT_INTCTRL_2) | \ |
114 | INT_MASK(INT_INTCTRL_1) | \ | 116 | (1ULL << INT_INTCTRL_1) | \ |
115 | INT_MASK(INT_INTCTRL_0) | \ | 117 | (1ULL << INT_INTCTRL_0) | \ |
116 | INT_MASK(INT_BOOT_ACCESS) | \ | 118 | (1ULL << INT_BOOT_ACCESS) | \ |
117 | INT_MASK(INT_WORLD_ACCESS) | \ | 119 | (1ULL << INT_WORLD_ACCESS) | \ |
118 | INT_MASK(INT_I_ASID) | \ | 120 | (1ULL << INT_I_ASID) | \ |
119 | INT_MASK(INT_D_ASID) | \ | 121 | (1ULL << INT_D_ASID) | \ |
120 | INT_MASK(INT_DMA_ASID) | \ | 122 | (1ULL << INT_DMA_ASID) | \ |
121 | INT_MASK(INT_SNI_ASID) | \ | 123 | (1ULL << INT_SNI_ASID) | \ |
122 | INT_MASK(INT_DMA_CPL) | \ | 124 | (1ULL << INT_DMA_CPL) | \ |
123 | INT_MASK(INT_SN_CPL) | \ | 125 | (1ULL << INT_SN_CPL) | \ |
124 | INT_MASK(INT_DOUBLE_FAULT) | \ | 126 | (1ULL << INT_DOUBLE_FAULT) | \ |
125 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 127 | (1ULL << INT_AUX_PERF_COUNT) | \ |
126 | 0) | 128 | 0) |
127 | #define NONQUEUED_INTERRUPTS ( \ | 129 | #define NONQUEUED_INTERRUPTS ( \ |
128 | INT_MASK(INT_ITLB_MISS) | \ | 130 | (1ULL << INT_ITLB_MISS) | \ |
129 | INT_MASK(INT_ILL) | \ | 131 | (1ULL << INT_ILL) | \ |
130 | INT_MASK(INT_GPV) | \ | 132 | (1ULL << INT_GPV) | \ |
131 | INT_MASK(INT_SN_ACCESS) | \ | 133 | (1ULL << INT_SN_ACCESS) | \ |
132 | INT_MASK(INT_IDN_ACCESS) | \ | 134 | (1ULL << INT_IDN_ACCESS) | \ |
133 | INT_MASK(INT_UDN_ACCESS) | \ | 135 | (1ULL << INT_UDN_ACCESS) | \ |
134 | INT_MASK(INT_IDN_REFILL) | \ | 136 | (1ULL << INT_IDN_REFILL) | \ |
135 | INT_MASK(INT_UDN_REFILL) | \ | 137 | (1ULL << INT_UDN_REFILL) | \ |
136 | INT_MASK(INT_IDN_COMPLETE) | \ | 138 | (1ULL << INT_IDN_COMPLETE) | \ |
137 | INT_MASK(INT_UDN_COMPLETE) | \ | 139 | (1ULL << INT_UDN_COMPLETE) | \ |
138 | INT_MASK(INT_SWINT_3) | \ | 140 | (1ULL << INT_SWINT_3) | \ |
139 | INT_MASK(INT_SWINT_2) | \ | 141 | (1ULL << INT_SWINT_2) | \ |
140 | INT_MASK(INT_SWINT_1) | \ | 142 | (1ULL << INT_SWINT_1) | \ |
141 | INT_MASK(INT_SWINT_0) | \ | 143 | (1ULL << INT_SWINT_0) | \ |
142 | INT_MASK(INT_UNALIGN_DATA) | \ | 144 | (1ULL << INT_UNALIGN_DATA) | \ |
143 | INT_MASK(INT_DTLB_MISS) | \ | 145 | (1ULL << INT_DTLB_MISS) | \ |
144 | INT_MASK(INT_DTLB_ACCESS) | \ | 146 | (1ULL << INT_DTLB_ACCESS) | \ |
145 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 147 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
146 | 0) | 148 | 0) |
147 | #define CRITICAL_MASKED_INTERRUPTS ( \ | 149 | #define CRITICAL_MASKED_INTERRUPTS ( \ |
148 | INT_MASK(INT_MEM_ERROR) | \ | 150 | (1ULL << INT_MEM_ERROR) | \ |
149 | INT_MASK(INT_DMATLB_MISS) | \ | 151 | (1ULL << INT_DMATLB_MISS) | \ |
150 | INT_MASK(INT_DMATLB_ACCESS) | \ | 152 | (1ULL << INT_DMATLB_ACCESS) | \ |
151 | INT_MASK(INT_SNITLB_MISS) | \ | 153 | (1ULL << INT_SNITLB_MISS) | \ |
152 | INT_MASK(INT_SN_NOTIFY) | \ | 154 | (1ULL << INT_SN_NOTIFY) | \ |
153 | INT_MASK(INT_SN_FIREWALL) | \ | 155 | (1ULL << INT_SN_FIREWALL) | \ |
154 | INT_MASK(INT_IDN_FIREWALL) | \ | 156 | (1ULL << INT_IDN_FIREWALL) | \ |
155 | INT_MASK(INT_UDN_FIREWALL) | \ | 157 | (1ULL << INT_UDN_FIREWALL) | \ |
156 | INT_MASK(INT_TILE_TIMER) | \ | 158 | (1ULL << INT_TILE_TIMER) | \ |
157 | INT_MASK(INT_IDN_TIMER) | \ | 159 | (1ULL << INT_IDN_TIMER) | \ |
158 | INT_MASK(INT_UDN_TIMER) | \ | 160 | (1ULL << INT_UDN_TIMER) | \ |
159 | INT_MASK(INT_DMA_NOTIFY) | \ | 161 | (1ULL << INT_DMA_NOTIFY) | \ |
160 | INT_MASK(INT_IDN_CA) | \ | 162 | (1ULL << INT_IDN_CA) | \ |
161 | INT_MASK(INT_UDN_CA) | \ | 163 | (1ULL << INT_UDN_CA) | \ |
162 | INT_MASK(INT_IDN_AVAIL) | \ | 164 | (1ULL << INT_IDN_AVAIL) | \ |
163 | INT_MASK(INT_UDN_AVAIL) | \ | 165 | (1ULL << INT_UDN_AVAIL) | \ |
164 | INT_MASK(INT_PERF_COUNT) | \ | 166 | (1ULL << INT_PERF_COUNT) | \ |
165 | INT_MASK(INT_INTCTRL_3) | \ | 167 | (1ULL << INT_INTCTRL_3) | \ |
166 | INT_MASK(INT_INTCTRL_2) | \ | 168 | (1ULL << INT_INTCTRL_2) | \ |
167 | INT_MASK(INT_INTCTRL_1) | \ | 169 | (1ULL << INT_INTCTRL_1) | \ |
168 | INT_MASK(INT_INTCTRL_0) | \ | 170 | (1ULL << INT_INTCTRL_0) | \ |
169 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 171 | (1ULL << INT_AUX_PERF_COUNT) | \ |
170 | 0) | 172 | 0) |
171 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | 173 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ |
172 | INT_MASK(INT_ITLB_MISS) | \ | 174 | (1ULL << INT_ITLB_MISS) | \ |
173 | INT_MASK(INT_ILL) | \ | 175 | (1ULL << INT_ILL) | \ |
174 | INT_MASK(INT_GPV) | \ | 176 | (1ULL << INT_GPV) | \ |
175 | INT_MASK(INT_SN_ACCESS) | \ | 177 | (1ULL << INT_SN_ACCESS) | \ |
176 | INT_MASK(INT_IDN_ACCESS) | \ | 178 | (1ULL << INT_IDN_ACCESS) | \ |
177 | INT_MASK(INT_UDN_ACCESS) | \ | 179 | (1ULL << INT_UDN_ACCESS) | \ |
178 | INT_MASK(INT_IDN_REFILL) | \ | 180 | (1ULL << INT_IDN_REFILL) | \ |
179 | INT_MASK(INT_UDN_REFILL) | \ | 181 | (1ULL << INT_UDN_REFILL) | \ |
180 | INT_MASK(INT_IDN_COMPLETE) | \ | 182 | (1ULL << INT_IDN_COMPLETE) | \ |
181 | INT_MASK(INT_UDN_COMPLETE) | \ | 183 | (1ULL << INT_UDN_COMPLETE) | \ |
182 | INT_MASK(INT_SWINT_3) | \ | 184 | (1ULL << INT_SWINT_3) | \ |
183 | INT_MASK(INT_SWINT_2) | \ | 185 | (1ULL << INT_SWINT_2) | \ |
184 | INT_MASK(INT_SWINT_1) | \ | 186 | (1ULL << INT_SWINT_1) | \ |
185 | INT_MASK(INT_SWINT_0) | \ | 187 | (1ULL << INT_SWINT_0) | \ |
186 | INT_MASK(INT_UNALIGN_DATA) | \ | 188 | (1ULL << INT_UNALIGN_DATA) | \ |
187 | INT_MASK(INT_DTLB_MISS) | \ | 189 | (1ULL << INT_DTLB_MISS) | \ |
188 | INT_MASK(INT_DTLB_ACCESS) | \ | 190 | (1ULL << INT_DTLB_ACCESS) | \ |
189 | INT_MASK(INT_BOOT_ACCESS) | \ | 191 | (1ULL << INT_BOOT_ACCESS) | \ |
190 | INT_MASK(INT_WORLD_ACCESS) | \ | 192 | (1ULL << INT_WORLD_ACCESS) | \ |
191 | INT_MASK(INT_I_ASID) | \ | 193 | (1ULL << INT_I_ASID) | \ |
192 | INT_MASK(INT_D_ASID) | \ | 194 | (1ULL << INT_D_ASID) | \ |
193 | INT_MASK(INT_DMA_ASID) | \ | 195 | (1ULL << INT_DMA_ASID) | \ |
194 | INT_MASK(INT_SNI_ASID) | \ | 196 | (1ULL << INT_SNI_ASID) | \ |
195 | INT_MASK(INT_DMA_CPL) | \ | 197 | (1ULL << INT_DMA_CPL) | \ |
196 | INT_MASK(INT_SN_CPL) | \ | 198 | (1ULL << INT_SN_CPL) | \ |
197 | INT_MASK(INT_DOUBLE_FAULT) | \ | 199 | (1ULL << INT_DOUBLE_FAULT) | \ |
198 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 200 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
199 | 0) | 201 | 0) |
200 | #define MASKABLE_INTERRUPTS ( \ | 202 | #define MASKABLE_INTERRUPTS ( \ |
201 | INT_MASK(INT_MEM_ERROR) | \ | 203 | (1ULL << INT_MEM_ERROR) | \ |
202 | INT_MASK(INT_IDN_REFILL) | \ | 204 | (1ULL << INT_IDN_REFILL) | \ |
203 | INT_MASK(INT_UDN_REFILL) | \ | 205 | (1ULL << INT_UDN_REFILL) | \ |
204 | INT_MASK(INT_IDN_COMPLETE) | \ | 206 | (1ULL << INT_IDN_COMPLETE) | \ |
205 | INT_MASK(INT_UDN_COMPLETE) | \ | 207 | (1ULL << INT_UDN_COMPLETE) | \ |
206 | INT_MASK(INT_DMATLB_MISS) | \ | 208 | (1ULL << INT_DMATLB_MISS) | \ |
207 | INT_MASK(INT_DMATLB_ACCESS) | \ | 209 | (1ULL << INT_DMATLB_ACCESS) | \ |
208 | INT_MASK(INT_SNITLB_MISS) | \ | 210 | (1ULL << INT_SNITLB_MISS) | \ |
209 | INT_MASK(INT_SN_NOTIFY) | \ | 211 | (1ULL << INT_SN_NOTIFY) | \ |
210 | INT_MASK(INT_SN_FIREWALL) | \ | 212 | (1ULL << INT_SN_FIREWALL) | \ |
211 | INT_MASK(INT_IDN_FIREWALL) | \ | 213 | (1ULL << INT_IDN_FIREWALL) | \ |
212 | INT_MASK(INT_UDN_FIREWALL) | \ | 214 | (1ULL << INT_UDN_FIREWALL) | \ |
213 | INT_MASK(INT_TILE_TIMER) | \ | 215 | (1ULL << INT_TILE_TIMER) | \ |
214 | INT_MASK(INT_IDN_TIMER) | \ | 216 | (1ULL << INT_IDN_TIMER) | \ |
215 | INT_MASK(INT_UDN_TIMER) | \ | 217 | (1ULL << INT_UDN_TIMER) | \ |
216 | INT_MASK(INT_DMA_NOTIFY) | \ | 218 | (1ULL << INT_DMA_NOTIFY) | \ |
217 | INT_MASK(INT_IDN_CA) | \ | 219 | (1ULL << INT_IDN_CA) | \ |
218 | INT_MASK(INT_UDN_CA) | \ | 220 | (1ULL << INT_UDN_CA) | \ |
219 | INT_MASK(INT_IDN_AVAIL) | \ | 221 | (1ULL << INT_IDN_AVAIL) | \ |
220 | INT_MASK(INT_UDN_AVAIL) | \ | 222 | (1ULL << INT_UDN_AVAIL) | \ |
221 | INT_MASK(INT_PERF_COUNT) | \ | 223 | (1ULL << INT_PERF_COUNT) | \ |
222 | INT_MASK(INT_INTCTRL_3) | \ | 224 | (1ULL << INT_INTCTRL_3) | \ |
223 | INT_MASK(INT_INTCTRL_2) | \ | 225 | (1ULL << INT_INTCTRL_2) | \ |
224 | INT_MASK(INT_INTCTRL_1) | \ | 226 | (1ULL << INT_INTCTRL_1) | \ |
225 | INT_MASK(INT_INTCTRL_0) | \ | 227 | (1ULL << INT_INTCTRL_0) | \ |
226 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 228 | (1ULL << INT_AUX_PERF_COUNT) | \ |
227 | 0) | 229 | 0) |
228 | #define UNMASKABLE_INTERRUPTS ( \ | 230 | #define UNMASKABLE_INTERRUPTS ( \ |
229 | INT_MASK(INT_ITLB_MISS) | \ | 231 | (1ULL << INT_ITLB_MISS) | \ |
230 | INT_MASK(INT_ILL) | \ | 232 | (1ULL << INT_ILL) | \ |
231 | INT_MASK(INT_GPV) | \ | 233 | (1ULL << INT_GPV) | \ |
232 | INT_MASK(INT_SN_ACCESS) | \ | 234 | (1ULL << INT_SN_ACCESS) | \ |
233 | INT_MASK(INT_IDN_ACCESS) | \ | 235 | (1ULL << INT_IDN_ACCESS) | \ |
234 | INT_MASK(INT_UDN_ACCESS) | \ | 236 | (1ULL << INT_UDN_ACCESS) | \ |
235 | INT_MASK(INT_SWINT_3) | \ | 237 | (1ULL << INT_SWINT_3) | \ |
236 | INT_MASK(INT_SWINT_2) | \ | 238 | (1ULL << INT_SWINT_2) | \ |
237 | INT_MASK(INT_SWINT_1) | \ | 239 | (1ULL << INT_SWINT_1) | \ |
238 | INT_MASK(INT_SWINT_0) | \ | 240 | (1ULL << INT_SWINT_0) | \ |
239 | INT_MASK(INT_UNALIGN_DATA) | \ | 241 | (1ULL << INT_UNALIGN_DATA) | \ |
240 | INT_MASK(INT_DTLB_MISS) | \ | 242 | (1ULL << INT_DTLB_MISS) | \ |
241 | INT_MASK(INT_DTLB_ACCESS) | \ | 243 | (1ULL << INT_DTLB_ACCESS) | \ |
242 | INT_MASK(INT_BOOT_ACCESS) | \ | 244 | (1ULL << INT_BOOT_ACCESS) | \ |
243 | INT_MASK(INT_WORLD_ACCESS) | \ | 245 | (1ULL << INT_WORLD_ACCESS) | \ |
244 | INT_MASK(INT_I_ASID) | \ | 246 | (1ULL << INT_I_ASID) | \ |
245 | INT_MASK(INT_D_ASID) | \ | 247 | (1ULL << INT_D_ASID) | \ |
246 | INT_MASK(INT_DMA_ASID) | \ | 248 | (1ULL << INT_DMA_ASID) | \ |
247 | INT_MASK(INT_SNI_ASID) | \ | 249 | (1ULL << INT_SNI_ASID) | \ |
248 | INT_MASK(INT_DMA_CPL) | \ | 250 | (1ULL << INT_DMA_CPL) | \ |
249 | INT_MASK(INT_SN_CPL) | \ | 251 | (1ULL << INT_SN_CPL) | \ |
250 | INT_MASK(INT_DOUBLE_FAULT) | \ | 252 | (1ULL << INT_DOUBLE_FAULT) | \ |
251 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 253 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
252 | 0) | 254 | 0) |
253 | #define SYNC_INTERRUPTS ( \ | 255 | #define SYNC_INTERRUPTS ( \ |
254 | INT_MASK(INT_ITLB_MISS) | \ | 256 | (1ULL << INT_ITLB_MISS) | \ |
255 | INT_MASK(INT_ILL) | \ | 257 | (1ULL << INT_ILL) | \ |
256 | INT_MASK(INT_GPV) | \ | 258 | (1ULL << INT_GPV) | \ |
257 | INT_MASK(INT_SN_ACCESS) | \ | 259 | (1ULL << INT_SN_ACCESS) | \ |
258 | INT_MASK(INT_IDN_ACCESS) | \ | 260 | (1ULL << INT_IDN_ACCESS) | \ |
259 | INT_MASK(INT_UDN_ACCESS) | \ | 261 | (1ULL << INT_UDN_ACCESS) | \ |
260 | INT_MASK(INT_IDN_REFILL) | \ | 262 | (1ULL << INT_IDN_REFILL) | \ |
261 | INT_MASK(INT_UDN_REFILL) | \ | 263 | (1ULL << INT_UDN_REFILL) | \ |
262 | INT_MASK(INT_IDN_COMPLETE) | \ | 264 | (1ULL << INT_IDN_COMPLETE) | \ |
263 | INT_MASK(INT_UDN_COMPLETE) | \ | 265 | (1ULL << INT_UDN_COMPLETE) | \ |
264 | INT_MASK(INT_SWINT_3) | \ | 266 | (1ULL << INT_SWINT_3) | \ |
265 | INT_MASK(INT_SWINT_2) | \ | 267 | (1ULL << INT_SWINT_2) | \ |
266 | INT_MASK(INT_SWINT_1) | \ | 268 | (1ULL << INT_SWINT_1) | \ |
267 | INT_MASK(INT_SWINT_0) | \ | 269 | (1ULL << INT_SWINT_0) | \ |
268 | INT_MASK(INT_UNALIGN_DATA) | \ | 270 | (1ULL << INT_UNALIGN_DATA) | \ |
269 | INT_MASK(INT_DTLB_MISS) | \ | 271 | (1ULL << INT_DTLB_MISS) | \ |
270 | INT_MASK(INT_DTLB_ACCESS) | \ | 272 | (1ULL << INT_DTLB_ACCESS) | \ |
271 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 273 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
272 | 0) | 274 | 0) |
273 | #define NON_SYNC_INTERRUPTS ( \ | 275 | #define NON_SYNC_INTERRUPTS ( \ |
274 | INT_MASK(INT_MEM_ERROR) | \ | 276 | (1ULL << INT_MEM_ERROR) | \ |
275 | INT_MASK(INT_DMATLB_MISS) | \ | 277 | (1ULL << INT_DMATLB_MISS) | \ |
276 | INT_MASK(INT_DMATLB_ACCESS) | \ | 278 | (1ULL << INT_DMATLB_ACCESS) | \ |
277 | INT_MASK(INT_SNITLB_MISS) | \ | 279 | (1ULL << INT_SNITLB_MISS) | \ |
278 | INT_MASK(INT_SN_NOTIFY) | \ | 280 | (1ULL << INT_SN_NOTIFY) | \ |
279 | INT_MASK(INT_SN_FIREWALL) | \ | 281 | (1ULL << INT_SN_FIREWALL) | \ |
280 | INT_MASK(INT_IDN_FIREWALL) | \ | 282 | (1ULL << INT_IDN_FIREWALL) | \ |
281 | INT_MASK(INT_UDN_FIREWALL) | \ | 283 | (1ULL << INT_UDN_FIREWALL) | \ |
282 | INT_MASK(INT_TILE_TIMER) | \ | 284 | (1ULL << INT_TILE_TIMER) | \ |
283 | INT_MASK(INT_IDN_TIMER) | \ | 285 | (1ULL << INT_IDN_TIMER) | \ |
284 | INT_MASK(INT_UDN_TIMER) | \ | 286 | (1ULL << INT_UDN_TIMER) | \ |
285 | INT_MASK(INT_DMA_NOTIFY) | \ | 287 | (1ULL << INT_DMA_NOTIFY) | \ |
286 | INT_MASK(INT_IDN_CA) | \ | 288 | (1ULL << INT_IDN_CA) | \ |
287 | INT_MASK(INT_UDN_CA) | \ | 289 | (1ULL << INT_UDN_CA) | \ |
288 | INT_MASK(INT_IDN_AVAIL) | \ | 290 | (1ULL << INT_IDN_AVAIL) | \ |
289 | INT_MASK(INT_UDN_AVAIL) | \ | 291 | (1ULL << INT_UDN_AVAIL) | \ |
290 | INT_MASK(INT_PERF_COUNT) | \ | 292 | (1ULL << INT_PERF_COUNT) | \ |
291 | INT_MASK(INT_INTCTRL_3) | \ | 293 | (1ULL << INT_INTCTRL_3) | \ |
292 | INT_MASK(INT_INTCTRL_2) | \ | 294 | (1ULL << INT_INTCTRL_2) | \ |
293 | INT_MASK(INT_INTCTRL_1) | \ | 295 | (1ULL << INT_INTCTRL_1) | \ |
294 | INT_MASK(INT_INTCTRL_0) | \ | 296 | (1ULL << INT_INTCTRL_0) | \ |
295 | INT_MASK(INT_BOOT_ACCESS) | \ | 297 | (1ULL << INT_BOOT_ACCESS) | \ |
296 | INT_MASK(INT_WORLD_ACCESS) | \ | 298 | (1ULL << INT_WORLD_ACCESS) | \ |
297 | INT_MASK(INT_I_ASID) | \ | 299 | (1ULL << INT_I_ASID) | \ |
298 | INT_MASK(INT_D_ASID) | \ | 300 | (1ULL << INT_D_ASID) | \ |
299 | INT_MASK(INT_DMA_ASID) | \ | 301 | (1ULL << INT_DMA_ASID) | \ |
300 | INT_MASK(INT_SNI_ASID) | \ | 302 | (1ULL << INT_SNI_ASID) | \ |
301 | INT_MASK(INT_DMA_CPL) | \ | 303 | (1ULL << INT_DMA_CPL) | \ |
302 | INT_MASK(INT_SN_CPL) | \ | 304 | (1ULL << INT_SN_CPL) | \ |
303 | INT_MASK(INT_DOUBLE_FAULT) | \ | 305 | (1ULL << INT_DOUBLE_FAULT) | \ |
304 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 306 | (1ULL << INT_AUX_PERF_COUNT) | \ |
305 | 0) | 307 | 0) |
306 | #endif /* !__ASSEMBLER__ */ | 308 | #endif /* !__ASSEMBLER__ */ |
307 | #endif /* !__ARCH_INTERRUPTS_H__ */ | 309 | #endif /* !__ARCH_INTERRUPTS_H__ */ |
diff --git a/arch/tile/include/uapi/arch/interrupts_64.h b/arch/tile/include/uapi/arch/interrupts_64.h index 5bb58b2e4e6f..13c9f9182348 100644 --- a/arch/tile/include/uapi/arch/interrupts_64.h +++ b/arch/tile/include/uapi/arch/interrupts_64.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #ifndef __ARCH_INTERRUPTS_H__ | 15 | #ifndef __ARCH_INTERRUPTS_H__ |
16 | #define __ARCH_INTERRUPTS_H__ | 16 | #define __ARCH_INTERRUPTS_H__ |
17 | 17 | ||
18 | #ifndef __KERNEL__ | ||
18 | /** Mask for an interrupt. */ | 19 | /** Mask for an interrupt. */ |
19 | #ifdef __ASSEMBLER__ | 20 | #ifdef __ASSEMBLER__ |
20 | /* Note: must handle breaking interrupts into high and low words manually. */ | 21 | /* Note: must handle breaking interrupts into high and low words manually. */ |
@@ -22,6 +23,7 @@ | |||
22 | #else | 23 | #else |
23 | #define INT_MASK(intno) (1ULL << (intno)) | 24 | #define INT_MASK(intno) (1ULL << (intno)) |
24 | #endif | 25 | #endif |
26 | #endif | ||
25 | 27 | ||
26 | 28 | ||
27 | /** Where a given interrupt executes */ | 29 | /** Where a given interrupt executes */ |
@@ -85,192 +87,192 @@ | |||
85 | 87 | ||
86 | #ifndef __ASSEMBLER__ | 88 | #ifndef __ASSEMBLER__ |
87 | #define QUEUED_INTERRUPTS ( \ | 89 | #define QUEUED_INTERRUPTS ( \ |
88 | INT_MASK(INT_MEM_ERROR) | \ | 90 | (1ULL << INT_MEM_ERROR) | \ |
89 | INT_MASK(INT_IDN_COMPLETE) | \ | 91 | (1ULL << INT_IDN_COMPLETE) | \ |
90 | INT_MASK(INT_UDN_COMPLETE) | \ | 92 | (1ULL << INT_UDN_COMPLETE) | \ |
91 | INT_MASK(INT_IDN_FIREWALL) | \ | 93 | (1ULL << INT_IDN_FIREWALL) | \ |
92 | INT_MASK(INT_UDN_FIREWALL) | \ | 94 | (1ULL << INT_UDN_FIREWALL) | \ |
93 | INT_MASK(INT_TILE_TIMER) | \ | 95 | (1ULL << INT_TILE_TIMER) | \ |
94 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 96 | (1ULL << INT_AUX_TILE_TIMER) | \ |
95 | INT_MASK(INT_IDN_TIMER) | \ | 97 | (1ULL << INT_IDN_TIMER) | \ |
96 | INT_MASK(INT_UDN_TIMER) | \ | 98 | (1ULL << INT_UDN_TIMER) | \ |
97 | INT_MASK(INT_IDN_AVAIL) | \ | 99 | (1ULL << INT_IDN_AVAIL) | \ |
98 | INT_MASK(INT_UDN_AVAIL) | \ | 100 | (1ULL << INT_UDN_AVAIL) | \ |
99 | INT_MASK(INT_IPI_3) | \ | 101 | (1ULL << INT_IPI_3) | \ |
100 | INT_MASK(INT_IPI_2) | \ | 102 | (1ULL << INT_IPI_2) | \ |
101 | INT_MASK(INT_IPI_1) | \ | 103 | (1ULL << INT_IPI_1) | \ |
102 | INT_MASK(INT_IPI_0) | \ | 104 | (1ULL << INT_IPI_0) | \ |
103 | INT_MASK(INT_PERF_COUNT) | \ | 105 | (1ULL << INT_PERF_COUNT) | \ |
104 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 106 | (1ULL << INT_AUX_PERF_COUNT) | \ |
105 | INT_MASK(INT_INTCTRL_3) | \ | 107 | (1ULL << INT_INTCTRL_3) | \ |
106 | INT_MASK(INT_INTCTRL_2) | \ | 108 | (1ULL << INT_INTCTRL_2) | \ |
107 | INT_MASK(INT_INTCTRL_1) | \ | 109 | (1ULL << INT_INTCTRL_1) | \ |
108 | INT_MASK(INT_INTCTRL_0) | \ | 110 | (1ULL << INT_INTCTRL_0) | \ |
109 | INT_MASK(INT_BOOT_ACCESS) | \ | 111 | (1ULL << INT_BOOT_ACCESS) | \ |
110 | INT_MASK(INT_WORLD_ACCESS) | \ | 112 | (1ULL << INT_WORLD_ACCESS) | \ |
111 | INT_MASK(INT_I_ASID) | \ | 113 | (1ULL << INT_I_ASID) | \ |
112 | INT_MASK(INT_D_ASID) | \ | 114 | (1ULL << INT_D_ASID) | \ |
113 | INT_MASK(INT_DOUBLE_FAULT) | \ | 115 | (1ULL << INT_DOUBLE_FAULT) | \ |
114 | 0) | 116 | 0) |
115 | #define NONQUEUED_INTERRUPTS ( \ | 117 | #define NONQUEUED_INTERRUPTS ( \ |
116 | INT_MASK(INT_SINGLE_STEP_3) | \ | 118 | (1ULL << INT_SINGLE_STEP_3) | \ |
117 | INT_MASK(INT_SINGLE_STEP_2) | \ | 119 | (1ULL << INT_SINGLE_STEP_2) | \ |
118 | INT_MASK(INT_SINGLE_STEP_1) | \ | 120 | (1ULL << INT_SINGLE_STEP_1) | \ |
119 | INT_MASK(INT_SINGLE_STEP_0) | \ | 121 | (1ULL << INT_SINGLE_STEP_0) | \ |
120 | INT_MASK(INT_ITLB_MISS) | \ | 122 | (1ULL << INT_ITLB_MISS) | \ |
121 | INT_MASK(INT_ILL) | \ | 123 | (1ULL << INT_ILL) | \ |
122 | INT_MASK(INT_GPV) | \ | 124 | (1ULL << INT_GPV) | \ |
123 | INT_MASK(INT_IDN_ACCESS) | \ | 125 | (1ULL << INT_IDN_ACCESS) | \ |
124 | INT_MASK(INT_UDN_ACCESS) | \ | 126 | (1ULL << INT_UDN_ACCESS) | \ |
125 | INT_MASK(INT_SWINT_3) | \ | 127 | (1ULL << INT_SWINT_3) | \ |
126 | INT_MASK(INT_SWINT_2) | \ | 128 | (1ULL << INT_SWINT_2) | \ |
127 | INT_MASK(INT_SWINT_1) | \ | 129 | (1ULL << INT_SWINT_1) | \ |
128 | INT_MASK(INT_SWINT_0) | \ | 130 | (1ULL << INT_SWINT_0) | \ |
129 | INT_MASK(INT_ILL_TRANS) | \ | 131 | (1ULL << INT_ILL_TRANS) | \ |
130 | INT_MASK(INT_UNALIGN_DATA) | \ | 132 | (1ULL << INT_UNALIGN_DATA) | \ |
131 | INT_MASK(INT_DTLB_MISS) | \ | 133 | (1ULL << INT_DTLB_MISS) | \ |
132 | INT_MASK(INT_DTLB_ACCESS) | \ | 134 | (1ULL << INT_DTLB_ACCESS) | \ |
133 | 0) | 135 | 0) |
134 | #define CRITICAL_MASKED_INTERRUPTS ( \ | 136 | #define CRITICAL_MASKED_INTERRUPTS ( \ |
135 | INT_MASK(INT_MEM_ERROR) | \ | 137 | (1ULL << INT_MEM_ERROR) | \ |
136 | INT_MASK(INT_SINGLE_STEP_3) | \ | 138 | (1ULL << INT_SINGLE_STEP_3) | \ |
137 | INT_MASK(INT_SINGLE_STEP_2) | \ | 139 | (1ULL << INT_SINGLE_STEP_2) | \ |
138 | INT_MASK(INT_SINGLE_STEP_1) | \ | 140 | (1ULL << INT_SINGLE_STEP_1) | \ |
139 | INT_MASK(INT_SINGLE_STEP_0) | \ | 141 | (1ULL << INT_SINGLE_STEP_0) | \ |
140 | INT_MASK(INT_IDN_COMPLETE) | \ | 142 | (1ULL << INT_IDN_COMPLETE) | \ |
141 | INT_MASK(INT_UDN_COMPLETE) | \ | 143 | (1ULL << INT_UDN_COMPLETE) | \ |
142 | INT_MASK(INT_IDN_FIREWALL) | \ | 144 | (1ULL << INT_IDN_FIREWALL) | \ |
143 | INT_MASK(INT_UDN_FIREWALL) | \ | 145 | (1ULL << INT_UDN_FIREWALL) | \ |
144 | INT_MASK(INT_TILE_TIMER) | \ | 146 | (1ULL << INT_TILE_TIMER) | \ |
145 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 147 | (1ULL << INT_AUX_TILE_TIMER) | \ |
146 | INT_MASK(INT_IDN_TIMER) | \ | 148 | (1ULL << INT_IDN_TIMER) | \ |
147 | INT_MASK(INT_UDN_TIMER) | \ | 149 | (1ULL << INT_UDN_TIMER) | \ |
148 | INT_MASK(INT_IDN_AVAIL) | \ | 150 | (1ULL << INT_IDN_AVAIL) | \ |
149 | INT_MASK(INT_UDN_AVAIL) | \ | 151 | (1ULL << INT_UDN_AVAIL) | \ |
150 | INT_MASK(INT_IPI_3) | \ | 152 | (1ULL << INT_IPI_3) | \ |
151 | INT_MASK(INT_IPI_2) | \ | 153 | (1ULL << INT_IPI_2) | \ |
152 | INT_MASK(INT_IPI_1) | \ | 154 | (1ULL << INT_IPI_1) | \ |
153 | INT_MASK(INT_IPI_0) | \ | 155 | (1ULL << INT_IPI_0) | \ |
154 | INT_MASK(INT_PERF_COUNT) | \ | 156 | (1ULL << INT_PERF_COUNT) | \ |
155 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 157 | (1ULL << INT_AUX_PERF_COUNT) | \ |
156 | INT_MASK(INT_INTCTRL_3) | \ | 158 | (1ULL << INT_INTCTRL_3) | \ |
157 | INT_MASK(INT_INTCTRL_2) | \ | 159 | (1ULL << INT_INTCTRL_2) | \ |
158 | INT_MASK(INT_INTCTRL_1) | \ | 160 | (1ULL << INT_INTCTRL_1) | \ |
159 | INT_MASK(INT_INTCTRL_0) | \ | 161 | (1ULL << INT_INTCTRL_0) | \ |
160 | 0) | 162 | 0) |
161 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | 163 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ |
162 | INT_MASK(INT_ITLB_MISS) | \ | 164 | (1ULL << INT_ITLB_MISS) | \ |
163 | INT_MASK(INT_ILL) | \ | 165 | (1ULL << INT_ILL) | \ |
164 | INT_MASK(INT_GPV) | \ | 166 | (1ULL << INT_GPV) | \ |
165 | INT_MASK(INT_IDN_ACCESS) | \ | 167 | (1ULL << INT_IDN_ACCESS) | \ |
166 | INT_MASK(INT_UDN_ACCESS) | \ | 168 | (1ULL << INT_UDN_ACCESS) | \ |
167 | INT_MASK(INT_SWINT_3) | \ | 169 | (1ULL << INT_SWINT_3) | \ |
168 | INT_MASK(INT_SWINT_2) | \ | 170 | (1ULL << INT_SWINT_2) | \ |
169 | INT_MASK(INT_SWINT_1) | \ | 171 | (1ULL << INT_SWINT_1) | \ |
170 | INT_MASK(INT_SWINT_0) | \ | 172 | (1ULL << INT_SWINT_0) | \ |
171 | INT_MASK(INT_ILL_TRANS) | \ | 173 | (1ULL << INT_ILL_TRANS) | \ |
172 | INT_MASK(INT_UNALIGN_DATA) | \ | 174 | (1ULL << INT_UNALIGN_DATA) | \ |
173 | INT_MASK(INT_DTLB_MISS) | \ | 175 | (1ULL << INT_DTLB_MISS) | \ |
174 | INT_MASK(INT_DTLB_ACCESS) | \ | 176 | (1ULL << INT_DTLB_ACCESS) | \ |
175 | INT_MASK(INT_BOOT_ACCESS) | \ | 177 | (1ULL << INT_BOOT_ACCESS) | \ |
176 | INT_MASK(INT_WORLD_ACCESS) | \ | 178 | (1ULL << INT_WORLD_ACCESS) | \ |
177 | INT_MASK(INT_I_ASID) | \ | 179 | (1ULL << INT_I_ASID) | \ |
178 | INT_MASK(INT_D_ASID) | \ | 180 | (1ULL << INT_D_ASID) | \ |
179 | INT_MASK(INT_DOUBLE_FAULT) | \ | 181 | (1ULL << INT_DOUBLE_FAULT) | \ |
180 | 0) | 182 | 0) |
181 | #define MASKABLE_INTERRUPTS ( \ | 183 | #define MASKABLE_INTERRUPTS ( \ |
182 | INT_MASK(INT_MEM_ERROR) | \ | 184 | (1ULL << INT_MEM_ERROR) | \ |
183 | INT_MASK(INT_SINGLE_STEP_3) | \ | 185 | (1ULL << INT_SINGLE_STEP_3) | \ |
184 | INT_MASK(INT_SINGLE_STEP_2) | \ | 186 | (1ULL << INT_SINGLE_STEP_2) | \ |
185 | INT_MASK(INT_SINGLE_STEP_1) | \ | 187 | (1ULL << INT_SINGLE_STEP_1) | \ |
186 | INT_MASK(INT_SINGLE_STEP_0) | \ | 188 | (1ULL << INT_SINGLE_STEP_0) | \ |
187 | INT_MASK(INT_IDN_COMPLETE) | \ | 189 | (1ULL << INT_IDN_COMPLETE) | \ |
188 | INT_MASK(INT_UDN_COMPLETE) | \ | 190 | (1ULL << INT_UDN_COMPLETE) | \ |
189 | INT_MASK(INT_IDN_FIREWALL) | \ | 191 | (1ULL << INT_IDN_FIREWALL) | \ |
190 | INT_MASK(INT_UDN_FIREWALL) | \ | 192 | (1ULL << INT_UDN_FIREWALL) | \ |
191 | INT_MASK(INT_TILE_TIMER) | \ | 193 | (1ULL << INT_TILE_TIMER) | \ |
192 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 194 | (1ULL << INT_AUX_TILE_TIMER) | \ |
193 | INT_MASK(INT_IDN_TIMER) | \ | 195 | (1ULL << INT_IDN_TIMER) | \ |
194 | INT_MASK(INT_UDN_TIMER) | \ | 196 | (1ULL << INT_UDN_TIMER) | \ |
195 | INT_MASK(INT_IDN_AVAIL) | \ | 197 | (1ULL << INT_IDN_AVAIL) | \ |
196 | INT_MASK(INT_UDN_AVAIL) | \ | 198 | (1ULL << INT_UDN_AVAIL) | \ |
197 | INT_MASK(INT_IPI_3) | \ | 199 | (1ULL << INT_IPI_3) | \ |
198 | INT_MASK(INT_IPI_2) | \ | 200 | (1ULL << INT_IPI_2) | \ |
199 | INT_MASK(INT_IPI_1) | \ | 201 | (1ULL << INT_IPI_1) | \ |
200 | INT_MASK(INT_IPI_0) | \ | 202 | (1ULL << INT_IPI_0) | \ |
201 | INT_MASK(INT_PERF_COUNT) | \ | 203 | (1ULL << INT_PERF_COUNT) | \ |
202 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 204 | (1ULL << INT_AUX_PERF_COUNT) | \ |
203 | INT_MASK(INT_INTCTRL_3) | \ | 205 | (1ULL << INT_INTCTRL_3) | \ |
204 | INT_MASK(INT_INTCTRL_2) | \ | 206 | (1ULL << INT_INTCTRL_2) | \ |
205 | INT_MASK(INT_INTCTRL_1) | \ | 207 | (1ULL << INT_INTCTRL_1) | \ |
206 | INT_MASK(INT_INTCTRL_0) | \ | 208 | (1ULL << INT_INTCTRL_0) | \ |
207 | 0) | 209 | 0) |
208 | #define UNMASKABLE_INTERRUPTS ( \ | 210 | #define UNMASKABLE_INTERRUPTS ( \ |
209 | INT_MASK(INT_ITLB_MISS) | \ | 211 | (1ULL << INT_ITLB_MISS) | \ |
210 | INT_MASK(INT_ILL) | \ | 212 | (1ULL << INT_ILL) | \ |
211 | INT_MASK(INT_GPV) | \ | 213 | (1ULL << INT_GPV) | \ |
212 | INT_MASK(INT_IDN_ACCESS) | \ | 214 | (1ULL << INT_IDN_ACCESS) | \ |
213 | INT_MASK(INT_UDN_ACCESS) | \ | 215 | (1ULL << INT_UDN_ACCESS) | \ |
214 | INT_MASK(INT_SWINT_3) | \ | 216 | (1ULL << INT_SWINT_3) | \ |
215 | INT_MASK(INT_SWINT_2) | \ | 217 | (1ULL << INT_SWINT_2) | \ |
216 | INT_MASK(INT_SWINT_1) | \ | 218 | (1ULL << INT_SWINT_1) | \ |
217 | INT_MASK(INT_SWINT_0) | \ | 219 | (1ULL << INT_SWINT_0) | \ |
218 | INT_MASK(INT_ILL_TRANS) | \ | 220 | (1ULL << INT_ILL_TRANS) | \ |
219 | INT_MASK(INT_UNALIGN_DATA) | \ | 221 | (1ULL << INT_UNALIGN_DATA) | \ |
220 | INT_MASK(INT_DTLB_MISS) | \ | 222 | (1ULL << INT_DTLB_MISS) | \ |
221 | INT_MASK(INT_DTLB_ACCESS) | \ | 223 | (1ULL << INT_DTLB_ACCESS) | \ |
222 | INT_MASK(INT_BOOT_ACCESS) | \ | 224 | (1ULL << INT_BOOT_ACCESS) | \ |
223 | INT_MASK(INT_WORLD_ACCESS) | \ | 225 | (1ULL << INT_WORLD_ACCESS) | \ |
224 | INT_MASK(INT_I_ASID) | \ | 226 | (1ULL << INT_I_ASID) | \ |
225 | INT_MASK(INT_D_ASID) | \ | 227 | (1ULL << INT_D_ASID) | \ |
226 | INT_MASK(INT_DOUBLE_FAULT) | \ | 228 | (1ULL << INT_DOUBLE_FAULT) | \ |
227 | 0) | 229 | 0) |
228 | #define SYNC_INTERRUPTS ( \ | 230 | #define SYNC_INTERRUPTS ( \ |
229 | INT_MASK(INT_SINGLE_STEP_3) | \ | 231 | (1ULL << INT_SINGLE_STEP_3) | \ |
230 | INT_MASK(INT_SINGLE_STEP_2) | \ | 232 | (1ULL << INT_SINGLE_STEP_2) | \ |
231 | INT_MASK(INT_SINGLE_STEP_1) | \ | 233 | (1ULL << INT_SINGLE_STEP_1) | \ |
232 | INT_MASK(INT_SINGLE_STEP_0) | \ | 234 | (1ULL << INT_SINGLE_STEP_0) | \ |
233 | INT_MASK(INT_IDN_COMPLETE) | \ | 235 | (1ULL << INT_IDN_COMPLETE) | \ |
234 | INT_MASK(INT_UDN_COMPLETE) | \ | 236 | (1ULL << INT_UDN_COMPLETE) | \ |
235 | INT_MASK(INT_ITLB_MISS) | \ | 237 | (1ULL << INT_ITLB_MISS) | \ |
236 | INT_MASK(INT_ILL) | \ | 238 | (1ULL << INT_ILL) | \ |
237 | INT_MASK(INT_GPV) | \ | 239 | (1ULL << INT_GPV) | \ |
238 | INT_MASK(INT_IDN_ACCESS) | \ | 240 | (1ULL << INT_IDN_ACCESS) | \ |
239 | INT_MASK(INT_UDN_ACCESS) | \ | 241 | (1ULL << INT_UDN_ACCESS) | \ |
240 | INT_MASK(INT_SWINT_3) | \ | 242 | (1ULL << INT_SWINT_3) | \ |
241 | INT_MASK(INT_SWINT_2) | \ | 243 | (1ULL << INT_SWINT_2) | \ |
242 | INT_MASK(INT_SWINT_1) | \ | 244 | (1ULL << INT_SWINT_1) | \ |
243 | INT_MASK(INT_SWINT_0) | \ | 245 | (1ULL << INT_SWINT_0) | \ |
244 | INT_MASK(INT_ILL_TRANS) | \ | 246 | (1ULL << INT_ILL_TRANS) | \ |
245 | INT_MASK(INT_UNALIGN_DATA) | \ | 247 | (1ULL << INT_UNALIGN_DATA) | \ |
246 | INT_MASK(INT_DTLB_MISS) | \ | 248 | (1ULL << INT_DTLB_MISS) | \ |
247 | INT_MASK(INT_DTLB_ACCESS) | \ | 249 | (1ULL << INT_DTLB_ACCESS) | \ |
248 | 0) | 250 | 0) |
249 | #define NON_SYNC_INTERRUPTS ( \ | 251 | #define NON_SYNC_INTERRUPTS ( \ |
250 | INT_MASK(INT_MEM_ERROR) | \ | 252 | (1ULL << INT_MEM_ERROR) | \ |
251 | INT_MASK(INT_IDN_FIREWALL) | \ | 253 | (1ULL << INT_IDN_FIREWALL) | \ |
252 | INT_MASK(INT_UDN_FIREWALL) | \ | 254 | (1ULL << INT_UDN_FIREWALL) | \ |
253 | INT_MASK(INT_TILE_TIMER) | \ | 255 | (1ULL << INT_TILE_TIMER) | \ |
254 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 256 | (1ULL << INT_AUX_TILE_TIMER) | \ |
255 | INT_MASK(INT_IDN_TIMER) | \ | 257 | (1ULL << INT_IDN_TIMER) | \ |
256 | INT_MASK(INT_UDN_TIMER) | \ | 258 | (1ULL << INT_UDN_TIMER) | \ |
257 | INT_MASK(INT_IDN_AVAIL) | \ | 259 | (1ULL << INT_IDN_AVAIL) | \ |
258 | INT_MASK(INT_UDN_AVAIL) | \ | 260 | (1ULL << INT_UDN_AVAIL) | \ |
259 | INT_MASK(INT_IPI_3) | \ | 261 | (1ULL << INT_IPI_3) | \ |
260 | INT_MASK(INT_IPI_2) | \ | 262 | (1ULL << INT_IPI_2) | \ |
261 | INT_MASK(INT_IPI_1) | \ | 263 | (1ULL << INT_IPI_1) | \ |
262 | INT_MASK(INT_IPI_0) | \ | 264 | (1ULL << INT_IPI_0) | \ |
263 | INT_MASK(INT_PERF_COUNT) | \ | 265 | (1ULL << INT_PERF_COUNT) | \ |
264 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 266 | (1ULL << INT_AUX_PERF_COUNT) | \ |
265 | INT_MASK(INT_INTCTRL_3) | \ | 267 | (1ULL << INT_INTCTRL_3) | \ |
266 | INT_MASK(INT_INTCTRL_2) | \ | 268 | (1ULL << INT_INTCTRL_2) | \ |
267 | INT_MASK(INT_INTCTRL_1) | \ | 269 | (1ULL << INT_INTCTRL_1) | \ |
268 | INT_MASK(INT_INTCTRL_0) | \ | 270 | (1ULL << INT_INTCTRL_0) | \ |
269 | INT_MASK(INT_BOOT_ACCESS) | \ | 271 | (1ULL << INT_BOOT_ACCESS) | \ |
270 | INT_MASK(INT_WORLD_ACCESS) | \ | 272 | (1ULL << INT_WORLD_ACCESS) | \ |
271 | INT_MASK(INT_I_ASID) | \ | 273 | (1ULL << INT_I_ASID) | \ |
272 | INT_MASK(INT_D_ASID) | \ | 274 | (1ULL << INT_D_ASID) | \ |
273 | INT_MASK(INT_DOUBLE_FAULT) | \ | 275 | (1ULL << INT_DOUBLE_FAULT) | \ |
274 | 0) | 276 | 0) |
275 | #endif /* !__ASSEMBLER__ */ | 277 | #endif /* !__ASSEMBLER__ */ |
276 | #endif /* !__ARCH_INTERRUPTS_H__ */ | 278 | #endif /* !__ARCH_INTERRUPTS_H__ */ |
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 54bc9a6678e8..4ea080902654 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S | |||
@@ -1035,7 +1035,9 @@ handle_syscall: | |||
1035 | /* Ensure that the syscall number is within the legal range. */ | 1035 | /* Ensure that the syscall number is within the legal range. */ |
1036 | { | 1036 | { |
1037 | moveli r20, hw2(sys_call_table) | 1037 | moveli r20, hw2(sys_call_table) |
1038 | #ifdef CONFIG_COMPAT | ||
1038 | blbs r30, .Lcompat_syscall | 1039 | blbs r30, .Lcompat_syscall |
1040 | #endif | ||
1039 | } | 1041 | } |
1040 | { | 1042 | { |
1041 | cmpltu r21, TREG_SYSCALL_NR_NAME, r21 | 1043 | cmpltu r21, TREG_SYSCALL_NR_NAME, r21 |
@@ -1093,6 +1095,7 @@ handle_syscall: | |||
1093 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1095 | j .Lresume_userspace /* jump into middle of interrupt_return */ |
1094 | } | 1096 | } |
1095 | 1097 | ||
1098 | #ifdef CONFIG_COMPAT | ||
1096 | .Lcompat_syscall: | 1099 | .Lcompat_syscall: |
1097 | /* | 1100 | /* |
1098 | * Load the base of the compat syscall table in r20, and | 1101 | * Load the base of the compat syscall table in r20, and |
@@ -1117,6 +1120,7 @@ handle_syscall: | |||
1117 | { move r15, r4; addxi r4, r4, 0 } | 1120 | { move r15, r4; addxi r4, r4, 0 } |
1118 | { move r16, r5; addxi r5, r5, 0 } | 1121 | { move r16, r5; addxi r5, r5, 0 } |
1119 | j .Lload_syscall_pointer | 1122 | j .Lload_syscall_pointer |
1123 | #endif | ||
1120 | 1124 | ||
1121 | .Linvalid_syscall: | 1125 | .Linvalid_syscall: |
1122 | /* Report an invalid syscall back to the user program */ | 1126 | /* Report an invalid syscall back to the user program */ |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 0e5661e7d00d..caf93ae11793 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t); | |||
159 | int copy_thread(unsigned long clone_flags, unsigned long sp, | 159 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
160 | unsigned long arg, struct task_struct *p) | 160 | unsigned long arg, struct task_struct *p) |
161 | { | 161 | { |
162 | struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs(); | 162 | struct pt_regs *childregs = task_pt_regs(p); |
163 | unsigned long ksp; | 163 | unsigned long ksp; |
164 | unsigned long *callee_regs; | 164 | unsigned long *callee_regs; |
165 | 165 | ||
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c index baa3d905fee2..d1b5c913ae72 100644 --- a/arch/tile/kernel/reboot.c +++ b/arch/tile/kernel/reboot.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/reboot.h> | 16 | #include <linux/reboot.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/pm.h> | 18 | #include <linux/pm.h> |
19 | #include <linux/export.h> | ||
19 | #include <asm/page.h> | 20 | #include <asm/page.h> |
20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
21 | #include <hv/hypervisor.h> | 22 | #include <hv/hypervisor.h> |
@@ -49,3 +50,4 @@ void machine_restart(char *cmd) | |||
49 | 50 | ||
50 | /* No interesting distinction to be made here. */ | 51 | /* No interesting distinction to be made here. */ |
51 | void (*pm_power_off)(void) = NULL; | 52 | void (*pm_power_off)(void) = NULL; |
53 | EXPORT_SYMBOL(pm_power_off); | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6a649a4462d3..d1e15f7b59c6 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/timex.h> | 31 | #include <linux/timex.h> |
32 | #include <linux/hugetlb.h> | 32 | #include <linux/hugetlb.h> |
33 | #include <linux/start_kernel.h> | 33 | #include <linux/start_kernel.h> |
34 | #include <linux/screen_info.h> | ||
34 | #include <asm/setup.h> | 35 | #include <asm/setup.h> |
35 | #include <asm/sections.h> | 36 | #include <asm/sections.h> |
36 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; } | |||
49 | /* Chip information */ | 50 | /* Chip information */ |
50 | char chip_model[64] __write_once; | 51 | char chip_model[64] __write_once; |
51 | 52 | ||
53 | #ifdef CONFIG_VT | ||
54 | struct screen_info screen_info; | ||
55 | #endif | ||
56 | |||
52 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; | 57 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; |
53 | EXPORT_SYMBOL(node_data); | 58 | EXPORT_SYMBOL(node_data); |
54 | 59 | ||
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index b2f44c28dda6..ed258b8ae320 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | |||
112 | p->pc, p->sp, p->ex1); | 112 | p->pc, p->sp, p->ex1); |
113 | p = NULL; | 113 | p = NULL; |
114 | } | 114 | } |
115 | if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) | 115 | if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0) |
116 | return p; | 116 | return p; |
117 | return NULL; | 117 | return NULL; |
118 | } | 118 | } |
@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
484 | { | 484 | { |
485 | save_stack_trace_tsk(NULL, trace); | 485 | save_stack_trace_tsk(NULL, trace); |
486 | } | 486 | } |
487 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
487 | 488 | ||
488 | #endif | 489 | #endif |
489 | 490 | ||
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c index db4fb89e12d8..8f8ad814b139 100644 --- a/arch/tile/lib/cacheflush.c +++ b/arch/tile/lib/cacheflush.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * more details. | 12 | * more details. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/export.h> | ||
15 | #include <asm/page.h> | 16 | #include <asm/page.h> |
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
17 | #include <arch/icache.h> | 18 | #include <arch/icache.h> |
@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) | |||
165 | __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); | 166 | __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); |
166 | #endif | 167 | #endif |
167 | } | 168 | } |
169 | EXPORT_SYMBOL_GPL(finv_buffer_remote); | ||
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c index fdc403614d12..75947edccb26 100644 --- a/arch/tile/lib/cpumask.c +++ b/arch/tile/lib/cpumask.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/export.h> | ||
19 | 20 | ||
20 | /* | 21 | /* |
21 | * Allow cropping out bits beyond the end of the array. | 22 | * Allow cropping out bits beyond the end of the array. |
@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) | |||
50 | } while (*bp != '\0' && *bp != '\n'); | 51 | } while (*bp != '\0' && *bp != '\n'); |
51 | return 0; | 52 | return 0; |
52 | } | 53 | } |
54 | EXPORT_SYMBOL(bitmap_parselist_crop); | ||
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index dd5f0a33fdaf..4385cb6fa00a 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c | |||
@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel); | |||
55 | EXPORT_SYMBOL(hv_dev_close); | 55 | EXPORT_SYMBOL(hv_dev_close); |
56 | EXPORT_SYMBOL(hv_sysconf); | 56 | EXPORT_SYMBOL(hv_sysconf); |
57 | EXPORT_SYMBOL(hv_confstr); | 57 | EXPORT_SYMBOL(hv_confstr); |
58 | EXPORT_SYMBOL(hv_get_rtc); | ||
59 | EXPORT_SYMBOL(hv_set_rtc); | ||
58 | 60 | ||
59 | /* libgcc.a */ | 61 | /* libgcc.a */ |
60 | uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); | 62 | uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 5f7868dcd6d4..1ae911939a18 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home) | |||
408 | __set_pte(ptep, pte_set_home(pteval, home)); | 408 | __set_pte(ptep, pte_set_home(pteval, home)); |
409 | } | 409 | } |
410 | } | 410 | } |
411 | EXPORT_SYMBOL(homecache_change_page_home); | ||
411 | 412 | ||
412 | struct page *homecache_alloc_pages(gfp_t gfp_mask, | 413 | struct page *homecache_alloc_pages(gfp_t gfp_mask, |
413 | unsigned int order, int home) | 414 | unsigned int order, int home) |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index ecdfee60ee4a..f4076af1f4ed 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -3,6 +3,90 @@ | |||
3 | 3 | ||
4 | #include <uapi/asm/mce.h> | 4 | #include <uapi/asm/mce.h> |
5 | 5 | ||
6 | /* | ||
7 | * Machine Check support for x86 | ||
8 | */ | ||
9 | |||
10 | /* MCG_CAP register defines */ | ||
11 | #define MCG_BANKCNT_MASK 0xff /* Number of Banks */ | ||
12 | #define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ | ||
13 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ | ||
14 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | ||
15 | #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ | ||
16 | #define MCG_EXT_CNT_SHIFT 16 | ||
17 | #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) | ||
18 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ | ||
19 | |||
20 | /* MCG_STATUS register defines */ | ||
21 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ | ||
22 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ | ||
23 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ | ||
24 | |||
25 | /* MCi_STATUS register defines */ | ||
26 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ | ||
27 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ | ||
28 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ | ||
29 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ | ||
30 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ | ||
31 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ | ||
32 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | ||
33 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | ||
34 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | ||
35 | #define MCACOD 0xffff /* MCA Error Code */ | ||
36 | |||
37 | /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ | ||
38 | #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ | ||
39 | #define MCACOD_SCRUBMSK 0xfff0 | ||
40 | #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ | ||
41 | #define MCACOD_DATA 0x0134 /* Data Load */ | ||
42 | #define MCACOD_INSTR 0x0150 /* Instruction Fetch */ | ||
43 | |||
44 | /* MCi_MISC register defines */ | ||
45 | #define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) | ||
46 | #define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7) | ||
47 | #define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */ | ||
48 | #define MCI_MISC_ADDR_LINEAR 1 /* linear address */ | ||
49 | #define MCI_MISC_ADDR_PHYS 2 /* physical address */ | ||
50 | #define MCI_MISC_ADDR_MEM 3 /* memory address */ | ||
51 | #define MCI_MISC_ADDR_GENERIC 7 /* generic */ | ||
52 | |||
53 | /* CTL2 register defines */ | ||
54 | #define MCI_CTL2_CMCI_EN (1ULL << 30) | ||
55 | #define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL | ||
56 | |||
57 | #define MCJ_CTX_MASK 3 | ||
58 | #define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK) | ||
59 | #define MCJ_CTX_RANDOM 0 /* inject context: random */ | ||
60 | #define MCJ_CTX_PROCESS 0x1 /* inject context: process */ | ||
61 | #define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */ | ||
62 | #define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */ | ||
63 | #define MCJ_EXCEPTION 0x8 /* raise as exception */ | ||
64 | #define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */ | ||
65 | |||
66 | #define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ | ||
67 | |||
68 | /* Software defined banks */ | ||
69 | #define MCE_EXTENDED_BANK 128 | ||
70 | #define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0) | ||
71 | #define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) | ||
72 | |||
73 | #define MCE_LOG_LEN 32 | ||
74 | #define MCE_LOG_SIGNATURE "MACHINECHECK" | ||
75 | |||
76 | /* | ||
77 | * This structure contains all data related to the MCE log. Also | ||
78 | * carries a signature to make it easier to find from external | ||
79 | * debugging tools. Each entry is only valid when its finished flag | ||
80 | * is set. | ||
81 | */ | ||
82 | struct mce_log { | ||
83 | char signature[12]; /* "MACHINECHECK" */ | ||
84 | unsigned len; /* = MCE_LOG_LEN */ | ||
85 | unsigned next; | ||
86 | unsigned flags; | ||
87 | unsigned recordlen; /* length of struct mce */ | ||
88 | struct mce entry[MCE_LOG_LEN]; | ||
89 | }; | ||
6 | 90 | ||
7 | struct mca_config { | 91 | struct mca_config { |
8 | bool dont_log_ce; | 92 | bool dont_log_ce; |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5199db2923d3..1c1a955e67c0 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd) | |||
142 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; | 142 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline unsigned long pud_pfn(pud_t pud) | ||
146 | { | ||
147 | return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
148 | } | ||
149 | |||
145 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | 150 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
146 | 151 | ||
147 | static inline int pmd_large(pmd_t pte) | 152 | static inline int pmd_large(pmd_t pte) |
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 58c829871c31..a0eab85ce7b8 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h | |||
@@ -4,66 +4,6 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/ioctls.h> | 5 | #include <asm/ioctls.h> |
6 | 6 | ||
7 | /* | ||
8 | * Machine Check support for x86 | ||
9 | */ | ||
10 | |||
11 | /* MCG_CAP register defines */ | ||
12 | #define MCG_BANKCNT_MASK 0xff /* Number of Banks */ | ||
13 | #define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ | ||
14 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ | ||
15 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | ||
16 | #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ | ||
17 | #define MCG_EXT_CNT_SHIFT 16 | ||
18 | #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) | ||
19 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ | ||
20 | |||
21 | /* MCG_STATUS register defines */ | ||
22 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ | ||
23 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ | ||
24 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ | ||
25 | |||
26 | /* MCi_STATUS register defines */ | ||
27 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ | ||
28 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ | ||
29 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ | ||
30 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ | ||
31 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ | ||
32 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ | ||
33 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | ||
34 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | ||
35 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | ||
36 | #define MCACOD 0xffff /* MCA Error Code */ | ||
37 | |||
38 | /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ | ||
39 | #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ | ||
40 | #define MCACOD_SCRUBMSK 0xfff0 | ||
41 | #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ | ||
42 | #define MCACOD_DATA 0x0134 /* Data Load */ | ||
43 | #define MCACOD_INSTR 0x0150 /* Instruction Fetch */ | ||
44 | |||
45 | /* MCi_MISC register defines */ | ||
46 | #define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) | ||
47 | #define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7) | ||
48 | #define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */ | ||
49 | #define MCI_MISC_ADDR_LINEAR 1 /* linear address */ | ||
50 | #define MCI_MISC_ADDR_PHYS 2 /* physical address */ | ||
51 | #define MCI_MISC_ADDR_MEM 3 /* memory address */ | ||
52 | #define MCI_MISC_ADDR_GENERIC 7 /* generic */ | ||
53 | |||
54 | /* CTL2 register defines */ | ||
55 | #define MCI_CTL2_CMCI_EN (1ULL << 30) | ||
56 | #define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL | ||
57 | |||
58 | #define MCJ_CTX_MASK 3 | ||
59 | #define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK) | ||
60 | #define MCJ_CTX_RANDOM 0 /* inject context: random */ | ||
61 | #define MCJ_CTX_PROCESS 0x1 /* inject context: process */ | ||
62 | #define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */ | ||
63 | #define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */ | ||
64 | #define MCJ_EXCEPTION 0x8 /* raise as exception */ | ||
65 | #define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */ | ||
66 | |||
67 | /* Fields are zero when not available */ | 7 | /* Fields are zero when not available */ |
68 | struct mce { | 8 | struct mce { |
69 | __u64 status; | 9 | __u64 status; |
@@ -87,35 +27,8 @@ struct mce { | |||
87 | __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ | 27 | __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ |
88 | }; | 28 | }; |
89 | 29 | ||
90 | /* | ||
91 | * This structure contains all data related to the MCE log. Also | ||
92 | * carries a signature to make it easier to find from external | ||
93 | * debugging tools. Each entry is only valid when its finished flag | ||
94 | * is set. | ||
95 | */ | ||
96 | |||
97 | #define MCE_LOG_LEN 32 | ||
98 | |||
99 | struct mce_log { | ||
100 | char signature[12]; /* "MACHINECHECK" */ | ||
101 | unsigned len; /* = MCE_LOG_LEN */ | ||
102 | unsigned next; | ||
103 | unsigned flags; | ||
104 | unsigned recordlen; /* length of struct mce */ | ||
105 | struct mce entry[MCE_LOG_LEN]; | ||
106 | }; | ||
107 | |||
108 | #define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ | ||
109 | |||
110 | #define MCE_LOG_SIGNATURE "MACHINECHECK" | ||
111 | |||
112 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) | 30 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) |
113 | #define MCE_GET_LOG_LEN _IOR('M', 2, int) | 31 | #define MCE_GET_LOG_LEN _IOR('M', 2, int) |
114 | #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) | 32 | #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) |
115 | 33 | ||
116 | /* Software defined banks */ | ||
117 | #define MCE_EXTENDED_BANK 128 | ||
118 | #define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 | ||
119 | #define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) | ||
120 | |||
121 | #endif /* _UAPI_ASM_X86_MCE_H */ | 34 | #endif /* _UAPI_ASM_X86_MCE_H */ |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index e03a1e180e81..562a76d433c8 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg) | |||
20 | } | 20 | } |
21 | early_param("x2apic_phys", set_x2apic_phys_mode); | 21 | early_param("x2apic_phys", set_x2apic_phys_mode); |
22 | 22 | ||
23 | static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 23 | static bool x2apic_fadt_phys(void) |
24 | { | 24 | { |
25 | if (x2apic_phys) | 25 | if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && |
26 | return x2apic_enabled(); | 26 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { |
27 | else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && | ||
28 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && | ||
29 | x2apic_enabled()) { | ||
30 | printk(KERN_DEBUG "System requires x2apic physical mode\n"); | 27 | printk(KERN_DEBUG "System requires x2apic physical mode\n"); |
31 | return 1; | 28 | return true; |
32 | } | 29 | } |
33 | else | 30 | return false; |
34 | return 0; | 31 | } |
32 | |||
33 | static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
34 | { | ||
35 | return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys()); | ||
35 | } | 36 | } |
36 | 37 | ||
37 | static void | 38 | static void |
@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void) | |||
82 | 83 | ||
83 | static int x2apic_phys_probe(void) | 84 | static int x2apic_phys_probe(void) |
84 | { | 85 | { |
85 | if (x2apic_mode && x2apic_phys) | 86 | if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys())) |
86 | return 1; | 87 | return 1; |
87 | 88 | ||
88 | return apic == &apic_x2apic_phys; | 89 | return apic == &apic_x2apic_phys; |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 027088f2f7dd..fb674fd3fc22 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |||
748 | return; | 748 | return; |
749 | } | 749 | } |
750 | #endif | 750 | #endif |
751 | /* Kernel addresses are always protection faults: */ | ||
752 | if (address >= TASK_SIZE) | ||
753 | error_code |= PF_PROT; | ||
751 | 754 | ||
752 | if (unlikely(show_unhandled_signals)) | 755 | if (likely(show_unhandled_signals)) |
753 | show_signal_msg(regs, error_code, address, tsk); | 756 | show_signal_msg(regs, error_code, address, tsk); |
754 | 757 | ||
755 | /* Kernel addresses are always protection faults: */ | ||
756 | tsk->thread.cr2 = address; | 758 | tsk->thread.cr2 = address; |
757 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | 759 | tsk->thread.error_code = error_code; |
758 | tsk->thread.trap_nr = X86_TRAP_PF; | 760 | tsk->thread.trap_nr = X86_TRAP_PF; |
759 | 761 | ||
760 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); | 762 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 2ead3c8a4c84..75c9a6a59697 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr) | |||
831 | if (pud_none(*pud)) | 831 | if (pud_none(*pud)) |
832 | return 0; | 832 | return 0; |
833 | 833 | ||
834 | if (pud_large(*pud)) | ||
835 | return pfn_valid(pud_pfn(*pud)); | ||
836 | |||
834 | pmd = pmd_offset(pud, addr); | 837 | pmd = pmd_offset(pud, addr); |
835 | if (pmd_none(*pmd)) | 838 | if (pmd_none(*pmd)) |
836 | return 0; | 839 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c index 6b0843c33877..e05c15777588 100644 --- a/drivers/gpu/drm/nouveau/core/core/falcon.c +++ b/drivers/gpu/drm/nouveau/core/core/falcon.c | |||
@@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object) | |||
73 | nv_debug(falcon, "data limit: %d\n", falcon->data.limit); | 73 | nv_debug(falcon, "data limit: %d\n", falcon->data.limit); |
74 | 74 | ||
75 | /* wait for 'uc halted' to be signalled before continuing */ | 75 | /* wait for 'uc halted' to be signalled before continuing */ |
76 | if (falcon->secret) { | 76 | if (falcon->secret && falcon->version < 4) { |
77 | nv_wait(falcon, 0x008, 0x00000010, 0x00000010); | 77 | if (!falcon->version) |
78 | nv_wait(falcon, 0x008, 0x00000010, 0x00000010); | ||
79 | else | ||
80 | nv_wait(falcon, 0x180, 0x80000000, 0); | ||
78 | nv_wo32(falcon, 0x004, 0x00000010); | 81 | nv_wo32(falcon, 0x004, 0x00000010); |
79 | } | 82 | } |
80 | 83 | ||
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c index f74c30aa33a0..48f06378d3f9 100644 --- a/drivers/gpu/drm/nouveau/core/core/subdev.c +++ b/drivers/gpu/drm/nouveau/core/core/subdev.c | |||
@@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent, | |||
99 | if (ret) | 99 | if (ret) |
100 | return ret; | 100 | return ret; |
101 | 101 | ||
102 | mutex_init(&subdev->mutex); | 102 | __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key); |
103 | subdev->name = subname; | 103 | subdev->name = subname; |
104 | 104 | ||
105 | if (parent) { | 105 | if (parent) { |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h index 5982935ee23a..106bb19fdd9a 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h | |||
@@ -50,10 +50,13 @@ int nouveau_object_fini(struct nouveau_object *, bool suspend); | |||
50 | 50 | ||
51 | extern struct nouveau_ofuncs nouveau_object_ofuncs; | 51 | extern struct nouveau_ofuncs nouveau_object_ofuncs; |
52 | 52 | ||
53 | /* Don't allocate dynamically, because lockdep needs lock_class_keys to be in | ||
54 | * ".data". */ | ||
53 | struct nouveau_oclass { | 55 | struct nouveau_oclass { |
54 | u32 handle; | 56 | u32 handle; |
55 | struct nouveau_ofuncs *ofuncs; | 57 | struct nouveau_ofuncs * const ofuncs; |
56 | struct nouveau_omthds *omthds; | 58 | struct nouveau_omthds * const omthds; |
59 | struct lock_class_key lock_class_key; | ||
57 | }; | 60 | }; |
58 | 61 | ||
59 | #define nv_oclass(o) nv_object(o)->oclass | 62 | #define nv_oclass(o) nv_object(o)->oclass |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c index d6d16007ec1a..d62045f454b2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c | |||
@@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb) | |||
86 | return ret; | 86 | return ret; |
87 | } | 87 | } |
88 | 88 | ||
89 | if (!nouveau_mm_initialised(&pfb->tags) && tags) { | 89 | if (!nouveau_mm_initialised(&pfb->tags)) { |
90 | ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1); | 90 | ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1); |
91 | if (ret) | 91 | if (ret) |
92 | return ret; | 92 | return ret; |
93 | } | 93 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 487cb8c6c204..eac236ed19b2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c | |||
@@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb) | |||
99 | struct nouveau_bios *bios = nouveau_bios(device); | 99 | struct nouveau_bios *bios = nouveau_bios(device); |
100 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ | 100 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ |
101 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ | 101 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ |
102 | u32 size; | 102 | u32 size, tags = 0; |
103 | int ret; | 103 | int ret; |
104 | 104 | ||
105 | pfb->ram.size = nv_rd32(pfb, 0x10020c); | 105 | pfb->ram.size = nv_rd32(pfb, 0x10020c); |
@@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb) | |||
140 | return ret; | 140 | return ret; |
141 | 141 | ||
142 | pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; | 142 | pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; |
143 | tags = nv_rd32(pfb, 0x100320); | ||
143 | break; | 144 | break; |
144 | } | 145 | } |
145 | 146 | ||
146 | return nv_rd32(pfb, 0x100320); | 147 | return tags; |
147 | } | 148 | } |
148 | 149 | ||
149 | static int | 150 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 69d7b1d0b9d6..1699a9083a2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -28,6 +28,7 @@ | |||
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <core/engine.h> | 30 | #include <core/engine.h> |
31 | #include <linux/swiotlb.h> | ||
31 | 32 | ||
32 | #include <subdev/fb.h> | 33 | #include <subdev/fb.h> |
33 | #include <subdev/vm.h> | 34 | #include <subdev/vm.h> |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 8b090f1eb51d..5e7aef23825a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -245,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev, | |||
245 | return 0; | 245 | return 0; |
246 | } | 246 | } |
247 | 247 | ||
248 | static struct lock_class_key drm_client_lock_class_key; | ||
249 | |||
248 | static int | 250 | static int |
249 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) | 251 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) |
250 | { | 252 | { |
@@ -256,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
256 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); | 258 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); |
257 | if (ret) | 259 | if (ret) |
258 | return ret; | 260 | return ret; |
261 | lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key); | ||
259 | 262 | ||
260 | dev->dev_private = drm; | 263 | dev->dev_private = drm; |
261 | drm->dev = dev; | 264 | drm->dev = dev; |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 7a445666e71f..ee4cff534f10 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2909 | return -EINVAL; | 2909 | return -EINVAL; |
2910 | } | 2910 | } |
2911 | if (tiled) { | 2911 | if (tiled) { |
2912 | dst_offset = ib[idx+1]; | 2912 | dst_offset = radeon_get_ib_value(p, idx+1); |
2913 | dst_offset <<= 8; | 2913 | dst_offset <<= 8; |
2914 | 2914 | ||
2915 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2915 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
2916 | p->idx += count + 7; | 2916 | p->idx += count + 7; |
2917 | } else { | 2917 | } else { |
2918 | dst_offset = ib[idx+1]; | 2918 | dst_offset = radeon_get_ib_value(p, idx+1); |
2919 | dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; | 2919 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; |
2920 | 2920 | ||
2921 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2921 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
2922 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2922 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
@@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2954 | DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); | 2954 | DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); |
2955 | return -EINVAL; | 2955 | return -EINVAL; |
2956 | } | 2956 | } |
2957 | dst_offset = ib[idx+1]; | 2957 | dst_offset = radeon_get_ib_value(p, idx+1); |
2958 | dst_offset <<= 8; | 2958 | dst_offset <<= 8; |
2959 | dst2_offset = ib[idx+2]; | 2959 | dst2_offset = radeon_get_ib_value(p, idx+2); |
2960 | dst2_offset <<= 8; | 2960 | dst2_offset <<= 8; |
2961 | src_offset = ib[idx+8]; | 2961 | src_offset = radeon_get_ib_value(p, idx+8); |
2962 | src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; | 2962 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; |
2963 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 2963 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
2964 | dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", | 2964 | dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", |
2965 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 2965 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
@@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3014 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); | 3014 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); |
3015 | return -EINVAL; | 3015 | return -EINVAL; |
3016 | } | 3016 | } |
3017 | dst_offset = ib[idx+1]; | 3017 | dst_offset = radeon_get_ib_value(p, idx+1); |
3018 | dst_offset <<= 8; | 3018 | dst_offset <<= 8; |
3019 | dst2_offset = ib[idx+2]; | 3019 | dst2_offset = radeon_get_ib_value(p, idx+2); |
3020 | dst2_offset <<= 8; | 3020 | dst2_offset <<= 8; |
3021 | src_offset = ib[idx+8]; | 3021 | src_offset = radeon_get_ib_value(p, idx+8); |
3022 | src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; | 3022 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; |
3023 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3023 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
3024 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", | 3024 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", |
3025 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3025 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
@@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3046 | /* detile bit */ | 3046 | /* detile bit */ |
3047 | if (idx_value & (1 << 31)) { | 3047 | if (idx_value & (1 << 31)) { |
3048 | /* tiled src, linear dst */ | 3048 | /* tiled src, linear dst */ |
3049 | src_offset = ib[idx+1]; | 3049 | src_offset = radeon_get_ib_value(p, idx+1); |
3050 | src_offset <<= 8; | 3050 | src_offset <<= 8; |
3051 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 3051 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); |
3052 | 3052 | ||
3053 | dst_offset = ib[idx+7]; | 3053 | dst_offset = radeon_get_ib_value(p, idx+7); |
3054 | dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3054 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
3055 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 3055 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
3056 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 3056 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
3057 | } else { | 3057 | } else { |
3058 | /* linear src, tiled dst */ | 3058 | /* linear src, tiled dst */ |
3059 | src_offset = ib[idx+7]; | 3059 | src_offset = radeon_get_ib_value(p, idx+7); |
3060 | src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3060 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
3061 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 3061 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
3062 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 3062 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
3063 | 3063 | ||
3064 | dst_offset = ib[idx+1]; | 3064 | dst_offset = radeon_get_ib_value(p, idx+1); |
3065 | dst_offset <<= 8; | 3065 | dst_offset <<= 8; |
3066 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 3066 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
3067 | } | 3067 | } |
@@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3098 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); | 3098 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); |
3099 | return -EINVAL; | 3099 | return -EINVAL; |
3100 | } | 3100 | } |
3101 | dst_offset = ib[idx+1]; | 3101 | dst_offset = radeon_get_ib_value(p, idx+1); |
3102 | dst_offset <<= 8; | 3102 | dst_offset <<= 8; |
3103 | dst2_offset = ib[idx+2]; | 3103 | dst2_offset = radeon_get_ib_value(p, idx+2); |
3104 | dst2_offset <<= 8; | 3104 | dst2_offset <<= 8; |
3105 | src_offset = ib[idx+8]; | 3105 | src_offset = radeon_get_ib_value(p, idx+8); |
3106 | src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; | 3106 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; |
3107 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3107 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
3108 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", | 3108 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", |
3109 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3109 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
@@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3135 | /* detile bit */ | 3135 | /* detile bit */ |
3136 | if (idx_value & (1 << 31)) { | 3136 | if (idx_value & (1 << 31)) { |
3137 | /* tiled src, linear dst */ | 3137 | /* tiled src, linear dst */ |
3138 | src_offset = ib[idx+1]; | 3138 | src_offset = radeon_get_ib_value(p, idx+1); |
3139 | src_offset <<= 8; | 3139 | src_offset <<= 8; |
3140 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 3140 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); |
3141 | 3141 | ||
3142 | dst_offset = ib[idx+7]; | 3142 | dst_offset = radeon_get_ib_value(p, idx+7); |
3143 | dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3143 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
3144 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 3144 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
3145 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 3145 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
3146 | } else { | 3146 | } else { |
3147 | /* linear src, tiled dst */ | 3147 | /* linear src, tiled dst */ |
3148 | src_offset = ib[idx+7]; | 3148 | src_offset = radeon_get_ib_value(p, idx+7); |
3149 | src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3149 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
3150 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 3150 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
3151 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 3151 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
3152 | 3152 | ||
3153 | dst_offset = ib[idx+1]; | 3153 | dst_offset = radeon_get_ib_value(p, idx+1); |
3154 | dst_offset <<= 8; | 3154 | dst_offset <<= 8; |
3155 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 3155 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
3156 | } | 3156 | } |
@@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3176 | switch (misc) { | 3176 | switch (misc) { |
3177 | case 0: | 3177 | case 0: |
3178 | /* L2L, byte */ | 3178 | /* L2L, byte */ |
3179 | src_offset = ib[idx+2]; | 3179 | src_offset = radeon_get_ib_value(p, idx+2); |
3180 | src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 3180 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
3181 | dst_offset = ib[idx+1]; | 3181 | dst_offset = radeon_get_ib_value(p, idx+1); |
3182 | dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 3182 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
3183 | if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { | 3183 | if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { |
3184 | dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", | 3184 | dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", |
3185 | src_offset + count, radeon_bo_size(src_reloc->robj)); | 3185 | src_offset + count, radeon_bo_size(src_reloc->robj)); |
@@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3216 | DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); | 3216 | DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); |
3217 | return -EINVAL; | 3217 | return -EINVAL; |
3218 | } | 3218 | } |
3219 | dst_offset = ib[idx+1]; | 3219 | dst_offset = radeon_get_ib_value(p, idx+1); |
3220 | dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 3220 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
3221 | dst2_offset = ib[idx+2]; | 3221 | dst2_offset = radeon_get_ib_value(p, idx+2); |
3222 | dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32; | 3222 | dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32; |
3223 | src_offset = ib[idx+3]; | 3223 | src_offset = radeon_get_ib_value(p, idx+3); |
3224 | src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; | 3224 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
3225 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3225 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
3226 | dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", | 3226 | dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", |
3227 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3227 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
@@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3251 | } | 3251 | } |
3252 | } else { | 3252 | } else { |
3253 | /* L2L, dw */ | 3253 | /* L2L, dw */ |
3254 | src_offset = ib[idx+2]; | 3254 | src_offset = radeon_get_ib_value(p, idx+2); |
3255 | src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 3255 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
3256 | dst_offset = ib[idx+1]; | 3256 | dst_offset = radeon_get_ib_value(p, idx+1); |
3257 | dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 3257 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
3258 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3258 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
3259 | dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", | 3259 | dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", |
3260 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3260 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
@@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3279 | DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); | 3279 | DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); |
3280 | return -EINVAL; | 3280 | return -EINVAL; |
3281 | } | 3281 | } |
3282 | dst_offset = ib[idx+1]; | 3282 | dst_offset = radeon_get_ib_value(p, idx+1); |
3283 | dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; | 3283 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; |
3284 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { | 3284 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { |
3285 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", | 3285 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", |
3286 | dst_offset, radeon_bo_size(dst_reloc->robj)); | 3286 | dst_offset, radeon_bo_size(dst_reloc->robj)); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 69ec24ab8d63..9b2512bf1a46 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2623 | return -EINVAL; | 2623 | return -EINVAL; |
2624 | } | 2624 | } |
2625 | if (tiled) { | 2625 | if (tiled) { |
2626 | dst_offset = ib[idx+1]; | 2626 | dst_offset = radeon_get_ib_value(p, idx+1); |
2627 | dst_offset <<= 8; | 2627 | dst_offset <<= 8; |
2628 | 2628 | ||
2629 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2629 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
2630 | p->idx += count + 5; | 2630 | p->idx += count + 5; |
2631 | } else { | 2631 | } else { |
2632 | dst_offset = ib[idx+1]; | 2632 | dst_offset = radeon_get_ib_value(p, idx+1); |
2633 | dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; | 2633 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; |
2634 | 2634 | ||
2635 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2635 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
2636 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2636 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
@@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2658 | /* detile bit */ | 2658 | /* detile bit */ |
2659 | if (idx_value & (1 << 31)) { | 2659 | if (idx_value & (1 << 31)) { |
2660 | /* tiled src, linear dst */ | 2660 | /* tiled src, linear dst */ |
2661 | src_offset = ib[idx+1]; | 2661 | src_offset = radeon_get_ib_value(p, idx+1); |
2662 | src_offset <<= 8; | 2662 | src_offset <<= 8; |
2663 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 2663 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); |
2664 | 2664 | ||
2665 | dst_offset = ib[idx+5]; | 2665 | dst_offset = radeon_get_ib_value(p, idx+5); |
2666 | dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; | 2666 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
2667 | ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2667 | ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
2668 | ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2668 | ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
2669 | } else { | 2669 | } else { |
2670 | /* linear src, tiled dst */ | 2670 | /* linear src, tiled dst */ |
2671 | src_offset = ib[idx+5]; | 2671 | src_offset = radeon_get_ib_value(p, idx+5); |
2672 | src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; | 2672 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
2673 | ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2673 | ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
2674 | ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2674 | ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
2675 | 2675 | ||
2676 | dst_offset = ib[idx+1]; | 2676 | dst_offset = radeon_get_ib_value(p, idx+1); |
2677 | dst_offset <<= 8; | 2677 | dst_offset <<= 8; |
2678 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2678 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
2679 | } | 2679 | } |
2680 | p->idx += 7; | 2680 | p->idx += 7; |
2681 | } else { | 2681 | } else { |
2682 | if (p->family >= CHIP_RV770) { | 2682 | if (p->family >= CHIP_RV770) { |
2683 | src_offset = ib[idx+2]; | 2683 | src_offset = radeon_get_ib_value(p, idx+2); |
2684 | src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 2684 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
2685 | dst_offset = ib[idx+1]; | 2685 | dst_offset = radeon_get_ib_value(p, idx+1); |
2686 | dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 2686 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
2687 | 2687 | ||
2688 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2688 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
2689 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2689 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
@@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2691 | ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2691 | ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
2692 | p->idx += 5; | 2692 | p->idx += 5; |
2693 | } else { | 2693 | } else { |
2694 | src_offset = ib[idx+2]; | 2694 | src_offset = radeon_get_ib_value(p, idx+2); |
2695 | src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 2695 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
2696 | dst_offset = ib[idx+1]; | 2696 | dst_offset = radeon_get_ib_value(p, idx+1); |
2697 | dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16; | 2697 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; |
2698 | 2698 | ||
2699 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2699 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
2700 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2700 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
@@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2724 | DRM_ERROR("bad DMA_PACKET_WRITE\n"); | 2724 | DRM_ERROR("bad DMA_PACKET_WRITE\n"); |
2725 | return -EINVAL; | 2725 | return -EINVAL; |
2726 | } | 2726 | } |
2727 | dst_offset = ib[idx+1]; | 2727 | dst_offset = radeon_get_ib_value(p, idx+1); |
2728 | dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; | 2728 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; |
2729 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { | 2729 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { |
2730 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", | 2730 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", |
2731 | dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); | 2731 | dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1d8ff2f850ba..93f760e27a92 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <drm/radeon_drm.h> | 38 | #include <drm/radeon_drm.h> |
39 | #include <linux/seq_file.h> | 39 | #include <linux/seq_file.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/swiotlb.h> | ||
41 | #include "radeon_reg.h" | 42 | #include "radeon_reg.h" |
42 | #include "radeon.h" | 43 | #include "radeon.h" |
43 | 44 | ||
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 358cd7ee905b..7cd74e29cbc8 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c | |||
@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void) | |||
162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | 162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) |
163 | #define DELTA(x,y) ((y)-(x)) | 163 | #define DELTA(x,y) ((y)-(x)) |
164 | #define TIME_NAME "PCC" | 164 | #define TIME_NAME "PCC" |
165 | #elif defined(CONFIG_MN10300) | 165 | #elif defined(CONFIG_MN10300) || defined(CONFIG_TILE) |
166 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | 166 | #define GET_TIME(x) do { x = get_cycles(); } while (0) |
167 | #define DELTA(x, y) ((x) - (y)) | 167 | #define DELTA(x, y) ((x) - (y)) |
168 | #define TIME_NAME "TSC" | 168 | #define TIME_NAME "TSC" |
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 571b5145c079..8f3331517433 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | #include "atl1c.h" | 22 | #include "atl1c.h" |
23 | 23 | ||
24 | #define ATL1C_DRV_VERSION "1.0.1.0-NAPI" | 24 | #define ATL1C_DRV_VERSION "1.0.1.1-NAPI" |
25 | char atl1c_driver_name[] = "atl1c"; | 25 | char atl1c_driver_name[] = "atl1c"; |
26 | char atl1c_driver_version[] = ATL1C_DRV_VERSION; | 26 | char atl1c_driver_version[] = ATL1C_DRV_VERSION; |
27 | 27 | ||
@@ -1649,6 +1649,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) | |||
1649 | u16 num_alloc = 0; | 1649 | u16 num_alloc = 0; |
1650 | u16 rfd_next_to_use, next_next; | 1650 | u16 rfd_next_to_use, next_next; |
1651 | struct atl1c_rx_free_desc *rfd_desc; | 1651 | struct atl1c_rx_free_desc *rfd_desc; |
1652 | dma_addr_t mapping; | ||
1652 | 1653 | ||
1653 | next_next = rfd_next_to_use = rfd_ring->next_to_use; | 1654 | next_next = rfd_next_to_use = rfd_ring->next_to_use; |
1654 | if (++next_next == rfd_ring->count) | 1655 | if (++next_next == rfd_ring->count) |
@@ -1675,9 +1676,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) | |||
1675 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); | 1676 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); |
1676 | buffer_info->skb = skb; | 1677 | buffer_info->skb = skb; |
1677 | buffer_info->length = adapter->rx_buffer_len; | 1678 | buffer_info->length = adapter->rx_buffer_len; |
1678 | buffer_info->dma = pci_map_single(pdev, vir_addr, | 1679 | mapping = pci_map_single(pdev, vir_addr, |
1679 | buffer_info->length, | 1680 | buffer_info->length, |
1680 | PCI_DMA_FROMDEVICE); | 1681 | PCI_DMA_FROMDEVICE); |
1682 | if (unlikely(pci_dma_mapping_error(pdev, mapping))) { | ||
1683 | dev_kfree_skb(skb); | ||
1684 | buffer_info->skb = NULL; | ||
1685 | buffer_info->length = 0; | ||
1686 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); | ||
1687 | netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed"); | ||
1688 | break; | ||
1689 | } | ||
1690 | buffer_info->dma = mapping; | ||
1681 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, | 1691 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, |
1682 | ATL1C_PCIMAP_FROMDEVICE); | 1692 | ATL1C_PCIMAP_FROMDEVICE); |
1683 | rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 1693 | rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
@@ -2012,7 +2022,29 @@ check_sum: | |||
2012 | return 0; | 2022 | return 0; |
2013 | } | 2023 | } |
2014 | 2024 | ||
2015 | static void atl1c_tx_map(struct atl1c_adapter *adapter, | 2025 | static void atl1c_tx_rollback(struct atl1c_adapter *adpt, |
2026 | struct atl1c_tpd_desc *first_tpd, | ||
2027 | enum atl1c_trans_queue type) | ||
2028 | { | ||
2029 | struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type]; | ||
2030 | struct atl1c_buffer *buffer_info; | ||
2031 | struct atl1c_tpd_desc *tpd; | ||
2032 | u16 first_index, index; | ||
2033 | |||
2034 | first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc; | ||
2035 | index = first_index; | ||
2036 | while (index != tpd_ring->next_to_use) { | ||
2037 | tpd = ATL1C_TPD_DESC(tpd_ring, index); | ||
2038 | buffer_info = &tpd_ring->buffer_info[index]; | ||
2039 | atl1c_clean_buffer(adpt->pdev, buffer_info, 0); | ||
2040 | memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); | ||
2041 | if (++index == tpd_ring->count) | ||
2042 | index = 0; | ||
2043 | } | ||
2044 | tpd_ring->next_to_use = first_index; | ||
2045 | } | ||
2046 | |||
2047 | static int atl1c_tx_map(struct atl1c_adapter *adapter, | ||
2016 | struct sk_buff *skb, struct atl1c_tpd_desc *tpd, | 2048 | struct sk_buff *skb, struct atl1c_tpd_desc *tpd, |
2017 | enum atl1c_trans_queue type) | 2049 | enum atl1c_trans_queue type) |
2018 | { | 2050 | { |
@@ -2037,7 +2069,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, | |||
2037 | buffer_info->length = map_len; | 2069 | buffer_info->length = map_len; |
2038 | buffer_info->dma = pci_map_single(adapter->pdev, | 2070 | buffer_info->dma = pci_map_single(adapter->pdev, |
2039 | skb->data, hdr_len, PCI_DMA_TODEVICE); | 2071 | skb->data, hdr_len, PCI_DMA_TODEVICE); |
2040 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); | 2072 | if (unlikely(pci_dma_mapping_error(adapter->pdev, |
2073 | buffer_info->dma))) | ||
2074 | goto err_dma; | ||
2075 | |||
2041 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, | 2076 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, |
2042 | ATL1C_PCIMAP_TODEVICE); | 2077 | ATL1C_PCIMAP_TODEVICE); |
2043 | mapped_len += map_len; | 2078 | mapped_len += map_len; |
@@ -2059,6 +2094,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, | |||
2059 | buffer_info->dma = | 2094 | buffer_info->dma = |
2060 | pci_map_single(adapter->pdev, skb->data + mapped_len, | 2095 | pci_map_single(adapter->pdev, skb->data + mapped_len, |
2061 | buffer_info->length, PCI_DMA_TODEVICE); | 2096 | buffer_info->length, PCI_DMA_TODEVICE); |
2097 | if (unlikely(pci_dma_mapping_error(adapter->pdev, | ||
2098 | buffer_info->dma))) | ||
2099 | goto err_dma; | ||
2100 | |||
2062 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); | 2101 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); |
2063 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, | 2102 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, |
2064 | ATL1C_PCIMAP_TODEVICE); | 2103 | ATL1C_PCIMAP_TODEVICE); |
@@ -2080,6 +2119,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, | |||
2080 | frag, 0, | 2119 | frag, 0, |
2081 | buffer_info->length, | 2120 | buffer_info->length, |
2082 | DMA_TO_DEVICE); | 2121 | DMA_TO_DEVICE); |
2122 | if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) | ||
2123 | goto err_dma; | ||
2124 | |||
2083 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); | 2125 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); |
2084 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, | 2126 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, |
2085 | ATL1C_PCIMAP_TODEVICE); | 2127 | ATL1C_PCIMAP_TODEVICE); |
@@ -2092,6 +2134,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, | |||
2092 | /* The last buffer info contain the skb address, | 2134 | /* The last buffer info contain the skb address, |
2093 | so it will be free after unmap */ | 2135 | so it will be free after unmap */ |
2094 | buffer_info->skb = skb; | 2136 | buffer_info->skb = skb; |
2137 | |||
2138 | return 0; | ||
2139 | |||
2140 | err_dma: | ||
2141 | buffer_info->dma = 0; | ||
2142 | buffer_info->length = 0; | ||
2143 | return -1; | ||
2095 | } | 2144 | } |
2096 | 2145 | ||
2097 | static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, | 2146 | static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, |
@@ -2154,10 +2203,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, | |||
2154 | if (skb_network_offset(skb) != ETH_HLEN) | 2203 | if (skb_network_offset(skb) != ETH_HLEN) |
2155 | tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ | 2204 | tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ |
2156 | 2205 | ||
2157 | atl1c_tx_map(adapter, skb, tpd, type); | 2206 | if (atl1c_tx_map(adapter, skb, tpd, type) < 0) { |
2158 | atl1c_tx_queue(adapter, skb, tpd, type); | 2207 | netif_info(adapter, tx_done, adapter->netdev, |
2208 | "tx-skb droppted due to dma error\n"); | ||
2209 | /* roll back tpd/buffer */ | ||
2210 | atl1c_tx_rollback(adapter, tpd, type); | ||
2211 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
2212 | dev_kfree_skb(skb); | ||
2213 | } else { | ||
2214 | atl1c_tx_queue(adapter, skb, tpd, type); | ||
2215 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
2216 | } | ||
2159 | 2217 | ||
2160 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
2161 | return NETDEV_TX_OK; | 2218 | return NETDEV_TX_OK; |
2162 | } | 2219 | } |
2163 | 2220 | ||
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 352190b9ebe7..79039439bfdc 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget) | |||
693 | * get notified when new packets arrive. | 693 | * get notified when new packets arrive. |
694 | */ | 694 | */ |
695 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); | 695 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); |
696 | |||
697 | /* Packets received while interrupts were disabled */ | ||
698 | status = macb_readl(bp, RSR); | ||
699 | if (unlikely(status)) | ||
700 | napi_reschedule(napi); | ||
696 | } | 701 | } |
697 | 702 | ||
698 | /* TODO: Handle errors */ | 703 | /* TODO: Handle errors */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d185a50870b2..39c6c5524633 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -2254,7 +2254,7 @@ static int __init stmmac_cmdline_opt(char *str) | |||
2254 | } else if (!strncmp(opt, "pause:", 6)) { | 2254 | } else if (!strncmp(opt, "pause:", 6)) { |
2255 | if (kstrtoint(opt + 6, 0, &pause)) | 2255 | if (kstrtoint(opt + 6, 0, &pause)) |
2256 | goto err; | 2256 | goto err; |
2257 | } else if (!strncmp(opt, "eee_timer:", 6)) { | 2257 | } else if (!strncmp(opt, "eee_timer:", 10)) { |
2258 | if (kstrtoint(opt + 10, 0, &eee_timer)) | 2258 | if (kstrtoint(opt + 10, 0, &eee_timer)) |
2259 | goto err; | 2259 | goto err; |
2260 | } | 2260 | } |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index d7115eca5efe..4a8c25a22294 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -576,9 +576,14 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) | |||
576 | if ((intf->num_altsetting == 2) && | 576 | if ((intf->num_altsetting == 2) && |
577 | !usb_set_interface(dev->udev, | 577 | !usb_set_interface(dev->udev, |
578 | intf->cur_altsetting->desc.bInterfaceNumber, | 578 | intf->cur_altsetting->desc.bInterfaceNumber, |
579 | CDC_NCM_COMM_ALTSETTING_MBIM) && | 579 | CDC_NCM_COMM_ALTSETTING_MBIM)) { |
580 | cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) | 580 | if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) |
581 | return -ENODEV; | 581 | return -ENODEV; |
582 | else | ||
583 | usb_set_interface(dev->udev, | ||
584 | intf->cur_altsetting->desc.bInterfaceNumber, | ||
585 | CDC_NCM_COMM_ALTSETTING_NCM); | ||
586 | } | ||
582 | #endif | 587 | #endif |
583 | 588 | ||
584 | /* NCM data altsetting is always 1 */ | 589 | /* NCM data altsetting is always 1 */ |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index fc1b8d78f4bc..d98414168485 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif) | |||
132 | static void xenvif_down(struct xenvif *vif) | 132 | static void xenvif_down(struct xenvif *vif) |
133 | { | 133 | { |
134 | disable_irq(vif->irq); | 134 | disable_irq(vif->irq); |
135 | del_timer_sync(&vif->credit_timeout); | ||
135 | xen_netbk_deschedule_xenvif(vif); | 136 | xen_netbk_deschedule_xenvif(vif); |
136 | xen_netbk_remove_xenvif(vif); | 137 | xen_netbk_remove_xenvif(vif); |
137 | } | 138 | } |
@@ -365,8 +366,6 @@ void xenvif_disconnect(struct xenvif *vif) | |||
365 | atomic_dec(&vif->refcnt); | 366 | atomic_dec(&vif->refcnt); |
366 | wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); | 367 | wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); |
367 | 368 | ||
368 | del_timer_sync(&vif->credit_timeout); | ||
369 | |||
370 | if (vif->irq) | 369 | if (vif->irq) |
371 | unbind_from_irqhandler(vif->irq, vif); | 370 | unbind_from_irqhandler(vif->irq, vif); |
372 | 371 | ||
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 2b9520c46e97..cd49ba949636 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -911,13 +911,13 @@ static int netbk_count_requests(struct xenvif *vif, | |||
911 | if (frags >= work_to_do) { | 911 | if (frags >= work_to_do) { |
912 | netdev_err(vif->dev, "Need more frags\n"); | 912 | netdev_err(vif->dev, "Need more frags\n"); |
913 | netbk_fatal_tx_err(vif); | 913 | netbk_fatal_tx_err(vif); |
914 | return -frags; | 914 | return -ENODATA; |
915 | } | 915 | } |
916 | 916 | ||
917 | if (unlikely(frags >= MAX_SKB_FRAGS)) { | 917 | if (unlikely(frags >= MAX_SKB_FRAGS)) { |
918 | netdev_err(vif->dev, "Too many frags\n"); | 918 | netdev_err(vif->dev, "Too many frags\n"); |
919 | netbk_fatal_tx_err(vif); | 919 | netbk_fatal_tx_err(vif); |
920 | return -frags; | 920 | return -E2BIG; |
921 | } | 921 | } |
922 | 922 | ||
923 | memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), | 923 | memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), |
@@ -925,7 +925,7 @@ static int netbk_count_requests(struct xenvif *vif, | |||
925 | if (txp->size > first->size) { | 925 | if (txp->size > first->size) { |
926 | netdev_err(vif->dev, "Frag is bigger than frame.\n"); | 926 | netdev_err(vif->dev, "Frag is bigger than frame.\n"); |
927 | netbk_fatal_tx_err(vif); | 927 | netbk_fatal_tx_err(vif); |
928 | return -frags; | 928 | return -EIO; |
929 | } | 929 | } |
930 | 930 | ||
931 | first->size -= txp->size; | 931 | first->size -= txp->size; |
@@ -935,7 +935,7 @@ static int netbk_count_requests(struct xenvif *vif, | |||
935 | netdev_err(vif->dev, "txp->offset: %x, size: %u\n", | 935 | netdev_err(vif->dev, "txp->offset: %x, size: %u\n", |
936 | txp->offset, txp->size); | 936 | txp->offset, txp->size); |
937 | netbk_fatal_tx_err(vif); | 937 | netbk_fatal_tx_err(vif); |
938 | return -frags; | 938 | return -EINVAL; |
939 | } | 939 | } |
940 | } while ((txp++)->flags & XEN_NETTXF_more_data); | 940 | } while ((txp++)->flags & XEN_NETTXF_more_data); |
941 | return frags; | 941 | return frags; |
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 10c1a3454e48..81c5077feff3 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c | |||
@@ -350,7 +350,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) | |||
350 | /* Enable the clockwatch on ST Variants */ | 350 | /* Enable the clockwatch on ST Variants */ |
351 | if (vendor->clockwatch) | 351 | if (vendor->clockwatch) |
352 | data |= RTC_CR_CWEN; | 352 | data |= RTC_CR_CWEN; |
353 | writel(data | RTC_CR_EN, ldata->base + RTC_CR); | 353 | else |
354 | data |= RTC_CR_EN; | ||
355 | writel(data, ldata->base + RTC_CR); | ||
354 | 356 | ||
355 | /* | 357 | /* |
356 | * On ST PL031 variants, the RTC reset value does not provide correct | 358 | * On ST PL031 variants, the RTC reset value does not provide correct |
diff --git a/include/net/sock.h b/include/net/sock.h index a340ab46b41c..a66caa223d18 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1041,7 +1041,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk) | |||
1041 | sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); | 1041 | sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); |
1042 | } | 1042 | } |
1043 | 1043 | ||
1044 | inline void sk_refcnt_debug_release(const struct sock *sk) | 1044 | static inline void sk_refcnt_debug_release(const struct sock *sk) |
1045 | { | 1045 | { |
1046 | if (atomic_read(&sk->sk_refcnt) != 1) | 1046 | if (atomic_read(&sk->sk_refcnt) != 1) |
1047 | printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", | 1047 | printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", |
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index 77cdba9df274..bb991dfe134f 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h | |||
@@ -28,25 +28,16 @@ | |||
28 | #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION | 28 | #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * Architectures where both 32- and 64-bit binaries can be executed | 31 | * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed |
32 | * on 64-bit kernels need this. This keeps the structure format | 32 | * back to the kernel via ioctl from userspace. On architectures where 32- and |
33 | * uniform, and makes sure the wait_queue_token isn't too big to be | 33 | * 64-bit userspace binaries can be executed it's important that the size of |
34 | * passed back down to the kernel. | 34 | * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we |
35 | * | 35 | * do not break the binary ABI interface by changing the structure size. |
36 | * This assumes that on these architectures: | ||
37 | * mode 32 bit 64 bit | ||
38 | * ------------------------- | ||
39 | * int 32 bit 32 bit | ||
40 | * long 32 bit 64 bit | ||
41 | * | ||
42 | * If so, 32-bit user-space code should be backwards compatible. | ||
43 | */ | 36 | */ |
44 | 37 | #if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */ | |
45 | #if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \ | ||
46 | || defined(__powerpc__) || defined(__s390__) | ||
47 | typedef unsigned int autofs_wqt_t; | ||
48 | #else | ||
49 | typedef unsigned long autofs_wqt_t; | 38 | typedef unsigned long autofs_wqt_t; |
39 | #else | ||
40 | typedef unsigned int autofs_wqt_t; | ||
50 | #endif | 41 | #endif |
51 | 42 | ||
52 | /* Packet types */ | 43 | /* Packet types */ |
diff --git a/kernel/pid.c b/kernel/pid.c index de9af600006f..f2c6a6825098 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -331,7 +331,7 @@ out: | |||
331 | return pid; | 331 | return pid; |
332 | 332 | ||
333 | out_unlock: | 333 | out_unlock: |
334 | spin_unlock(&pidmap_lock); | 334 | spin_unlock_irq(&pidmap_lock); |
335 | out_free: | 335 | out_free: |
336 | while (++i <= ns->level) | 336 | while (++i <= ns->level) |
337 | free_pidmap(pid->numbers + i); | 337 | free_pidmap(pid->numbers + i); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 09255ec8159c..fbb60b103e64 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | |||
3030 | if (memcg) { | 3030 | if (memcg) { |
3031 | s->memcg_params->memcg = memcg; | 3031 | s->memcg_params->memcg = memcg; |
3032 | s->memcg_params->root_cache = root_cache; | 3032 | s->memcg_params->root_cache = root_cache; |
3033 | } | 3033 | } else |
3034 | s->memcg_params->is_root_cache = true; | ||
3035 | |||
3034 | return 0; | 3036 | return 0; |
3035 | } | 3037 | } |
3036 | 3038 | ||
diff --git a/mm/mlock.c b/mm/mlock.c index f0b9ce572fc7..c9bd528b01d2 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) | |||
517 | static int do_mlockall(int flags) | 517 | static int do_mlockall(int flags) |
518 | { | 518 | { |
519 | struct vm_area_struct * vma, * prev = NULL; | 519 | struct vm_area_struct * vma, * prev = NULL; |
520 | unsigned int def_flags = 0; | ||
521 | 520 | ||
522 | if (flags & MCL_FUTURE) | 521 | if (flags & MCL_FUTURE) |
523 | def_flags = VM_LOCKED; | 522 | current->mm->def_flags |= VM_LOCKED; |
524 | current->mm->def_flags = def_flags; | 523 | else |
524 | current->mm->def_flags &= ~VM_LOCKED; | ||
525 | if (flags == MCL_FUTURE) | 525 | if (flags == MCL_FUTURE) |
526 | goto out; | 526 | goto out; |
527 | 527 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index df2022ff0c8a..9673d96b1ba7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -773,6 +773,10 @@ void __init init_cma_reserved_pageblock(struct page *page) | |||
773 | set_pageblock_migratetype(page, MIGRATE_CMA); | 773 | set_pageblock_migratetype(page, MIGRATE_CMA); |
774 | __free_pages(page, pageblock_order); | 774 | __free_pages(page, pageblock_order); |
775 | totalram_pages += pageblock_nr_pages; | 775 | totalram_pages += pageblock_nr_pages; |
776 | #ifdef CONFIG_HIGHMEM | ||
777 | if (PageHighMem(page)) | ||
778 | totalhigh_pages += pageblock_nr_pages; | ||
779 | #endif | ||
776 | } | 780 | } |
777 | #endif | 781 | #endif |
778 | 782 | ||
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index ea0bd31d41c2..761a59002e34 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
@@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res, | |||
440 | /* this is an hash collision with the temporary selected node. Choose | 440 | /* this is an hash collision with the temporary selected node. Choose |
441 | * the one with the lowest address | 441 | * the one with the lowest address |
442 | */ | 442 | */ |
443 | if ((tmp_max == max) && | 443 | if ((tmp_max == max) && max_orig_node && |
444 | (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) | 444 | (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) |
445 | goto out; | 445 | goto out; |
446 | 446 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index b89a8c3186cd..54087e96d7b8 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -97,9 +97,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net) | |||
97 | if (table == NULL) | 97 | if (table == NULL) |
98 | goto err_alloc; | 98 | goto err_alloc; |
99 | 99 | ||
100 | table[0].data = &net->ipv6.frags.high_thresh; | 100 | table[0].data = &net->nf_frag.frags.timeout; |
101 | table[1].data = &net->ipv6.frags.low_thresh; | 101 | table[1].data = &net->nf_frag.frags.low_thresh; |
102 | table[2].data = &net->ipv6.frags.timeout; | 102 | table[2].data = &net->nf_frag.frags.high_thresh; |
103 | } | 103 | } |
104 | 104 | ||
105 | hdr = register_net_sysctl(net, "net/netfilter", table); | 105 | hdr = register_net_sysctl(net, "net/netfilter", table); |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 2f380f73c4c0..1343a4bb4031 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -236,7 +236,9 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, | |||
236 | /* We only allow helper re-assignment of the same sort since | 236 | /* We only allow helper re-assignment of the same sort since |
237 | * we cannot reallocate the helper extension area. | 237 | * we cannot reallocate the helper extension area. |
238 | */ | 238 | */ |
239 | if (help->helper != helper) { | 239 | struct nf_conntrack_helper *tmp = rcu_dereference(help->helper); |
240 | |||
241 | if (tmp && tmp->help != helper->help) { | ||
240 | RCU_INIT_POINTER(help->helper, NULL); | 242 | RCU_INIT_POINTER(help->helper, NULL); |
241 | goto out; | 243 | goto out; |
242 | } | 244 | } |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 2334cc5d2b16..79c2d507ac49 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1782,6 +1782,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
1782 | if (nlh->nlmsg_flags & NLM_F_CREATE) { | 1782 | if (nlh->nlmsg_flags & NLM_F_CREATE) { |
1783 | enum ip_conntrack_events events; | 1783 | enum ip_conntrack_events events; |
1784 | 1784 | ||
1785 | if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) | ||
1786 | return -EINVAL; | ||
1787 | |||
1785 | ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, | 1788 | ct = ctnetlink_create_conntrack(net, zone, cda, &otuple, |
1786 | &rtuple, u3); | 1789 | &rtuple, u3); |
1787 | if (IS_ERR(ct)) | 1790 | if (IS_ERR(ct)) |
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index 7521d944c0fb..cf4852814e0c 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig | |||
@@ -3,8 +3,8 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | menuconfig IP_SCTP | 5 | menuconfig IP_SCTP |
6 | tristate "The SCTP Protocol (EXPERIMENTAL)" | 6 | tristate "The SCTP Protocol" |
7 | depends on INET && EXPERIMENTAL | 7 | depends on INET |
8 | depends on IPV6 || IPV6=n | 8 | depends on IPV6 || IPV6=n |
9 | select CRYPTO | 9 | select CRYPTO |
10 | select CRYPTO_HMAC | 10 | select CRYPTO_HMAC |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index f3f0f4dc31dd..391a245d5203 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
326 | */ | 326 | */ |
327 | rcu_read_lock(); | 327 | rcu_read_lock(); |
328 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { | 328 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
329 | if (!laddr->valid && laddr->state != SCTP_ADDR_SRC) | 329 | if (!laddr->valid) |
330 | continue; | 330 | continue; |
331 | if ((laddr->a.sa.sa_family == AF_INET6) && | 331 | if ((laddr->state == SCTP_ADDR_SRC) && |
332 | (laddr->a.sa.sa_family == AF_INET6) && | ||
332 | (scope <= sctp_scope(&laddr->a))) { | 333 | (scope <= sctp_scope(&laddr->a))) { |
333 | bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); | 334 | bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); |
334 | if (!baddr || (matchlen < bmatchlen)) { | 335 | if (!baddr || (matchlen < bmatchlen)) { |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 54f89f90ac33..2655c9f4ecad 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -774,6 +774,7 @@ void tipc_bclink_init(void) | |||
774 | bcl->owner = &bclink->node; | 774 | bcl->owner = &bclink->node; |
775 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; | 775 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; |
776 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); | 776 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); |
777 | spin_lock_init(&bcbearer->bearer.lock); | ||
777 | bcl->b_ptr = &bcbearer->bearer; | 778 | bcl->b_ptr = &bcbearer->bearer; |
778 | bcl->state = WORKING_WORKING; | 779 | bcl->state = WORKING_WORKING; |
779 | strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); | 780 | strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); |