diff options
66 files changed, 831 insertions, 743 deletions
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt index e540fd67f767..b443f1de0e5a 100644 --- a/Documentation/x86/boot.txt +++ b/Documentation/x86/boot.txt | |||
@@ -390,6 +390,7 @@ Protocol: 2.00+ | |||
390 | F Special (0xFF = undefined) | 390 | F Special (0xFF = undefined) |
391 | 10 Reserved | 391 | 10 Reserved |
392 | 11 Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de> | 392 | 11 Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de> |
393 | 12 OVMF UEFI virtualization stack | ||
393 | 394 | ||
394 | Please contact <hpa@zytor.com> if you need a bootloader ID | 395 | Please contact <hpa@zytor.com> if you need a bootloader ID |
395 | value assigned. | 396 | value assigned. |
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 4eb6d005ffaa..86dff32a0737 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h | |||
@@ -7,8 +7,14 @@ | |||
7 | 7 | ||
8 | #ifndef __ASSEMBLER__ | 8 | #ifndef __ASSEMBLER__ |
9 | unsigned int scu_get_core_count(void __iomem *); | 9 | unsigned int scu_get_core_count(void __iomem *); |
10 | void scu_enable(void __iomem *); | ||
11 | int scu_power_mode(void __iomem *, unsigned int); | 10 | int scu_power_mode(void __iomem *, unsigned int); |
11 | |||
12 | #ifdef CONFIG_SMP | ||
13 | void scu_enable(void __iomem *scu_base); | ||
14 | #else | ||
15 | static inline void scu_enable(void __iomem *scu_base) {} | ||
16 | #endif | ||
17 | |||
12 | #endif | 18 | #endif |
13 | 19 | ||
14 | #endif | 20 | #endif |
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index b9f015e843d8..45eac87ed66a 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c | |||
@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base) | |||
75 | int scu_power_mode(void __iomem *scu_base, unsigned int mode) | 75 | int scu_power_mode(void __iomem *scu_base, unsigned int mode) |
76 | { | 76 | { |
77 | unsigned int val; | 77 | unsigned int val; |
78 | int cpu = cpu_logical_map(smp_processor_id()); | 78 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
79 | 79 | ||
80 | if (mode > 3 || mode == 1 || cpu > 3) | 80 | if (mode > 3 || mode == 1 || cpu > 3) |
81 | return -EINVAL; | 81 | return -EINVAL; |
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 981dc1e1da51..e6c061282939 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <asm/arch_timer.h> | 29 | #include <asm/arch_timer.h> |
30 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
31 | #include <asm/cputype.h> | ||
31 | #include <asm/smp_plat.h> | 32 | #include <asm/smp_plat.h> |
32 | #include <asm/smp_twd.h> | 33 | #include <asm/smp_twd.h> |
33 | #include <asm/hardware/arm_timer.h> | 34 | #include <asm/hardware/arm_timer.h> |
@@ -59,7 +60,7 @@ static void __init highbank_scu_map_io(void) | |||
59 | 60 | ||
60 | void highbank_set_cpu_jump(int cpu, void *jump_addr) | 61 | void highbank_set_cpu_jump(int cpu, void *jump_addr) |
61 | { | 62 | { |
62 | cpu = cpu_logical_map(cpu); | 63 | cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0); |
63 | writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); | 64 | writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu)); |
64 | __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); | 65 | __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16); |
65 | outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), | 66 | outer_clean_range(HB_JUMP_TABLE_PHYS(cpu), |
diff --git a/arch/arm/mach-highbank/sysregs.h b/arch/arm/mach-highbank/sysregs.h index 70af9d13fcef..5995df7f2622 100644 --- a/arch/arm/mach-highbank/sysregs.h +++ b/arch/arm/mach-highbank/sysregs.h | |||
@@ -37,7 +37,7 @@ extern void __iomem *sregs_base; | |||
37 | 37 | ||
38 | static inline void highbank_set_core_pwr(void) | 38 | static inline void highbank_set_core_pwr(void) |
39 | { | 39 | { |
40 | int cpu = cpu_logical_map(smp_processor_id()); | 40 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
41 | if (scu_base_addr) | 41 | if (scu_base_addr) |
42 | scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); | 42 | scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); |
43 | else | 43 | else |
@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void) | |||
46 | 46 | ||
47 | static inline void highbank_clear_core_pwr(void) | 47 | static inline void highbank_clear_core_pwr(void) |
48 | { | 48 | { |
49 | int cpu = cpu_logical_map(smp_processor_id()); | 49 | int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); |
50 | if (scu_base_addr) | 50 | if (scu_base_addr) |
51 | scu_power_mode(scu_base_addr, SCU_PM_NORMAL); | 51 | scu_power_mode(scu_base_addr, SCU_PM_NORMAL); |
52 | else | 52 | else |
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index ae700f49e51d..b0768a657920 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h | |||
@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs); | |||
130 | #define start_thread(_regs, _pc, _usp) \ | 130 | #define start_thread(_regs, _pc, _usp) \ |
131 | do { \ | 131 | do { \ |
132 | (_regs)->pc = (_pc); \ | 132 | (_regs)->pc = (_pc); \ |
133 | ((struct switch_stack *)(_regs))[-1].a6 = 0; \ | ||
134 | setframeformat(_regs); \ | 133 | setframeformat(_regs); \ |
135 | if (current->mm) \ | 134 | if (current->mm) \ |
136 | (_regs)->d5 = current->mm->start_data; \ | 135 | (_regs)->d5 = current->mm->start_data; \ |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index a5f4f5a1d24b..0aa98db8a80d 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires, | |||
120 | nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); | 120 | nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); |
121 | do_div(nsecs, 125); | 121 | do_div(nsecs, 125); |
122 | S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); | 122 | S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); |
123 | /* Program the maximum value if we have an overflow (== year 2042) */ | ||
124 | if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) | ||
125 | S390_lowcore.clock_comparator = -1ULL; | ||
123 | set_clock_comparator(S390_lowcore.clock_comparator); | 126 | set_clock_comparator(S390_lowcore.clock_comparator); |
124 | return 0; | 127 | return 0; |
125 | } | 128 | } |
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 875d008828b8..1bb7ad4aeff4 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG | |||
140 | 140 | ||
141 | source "init/Kconfig" | 141 | source "init/Kconfig" |
142 | 142 | ||
143 | source "kernel/Kconfig.freezer" | ||
144 | |||
143 | menu "Tilera-specific configuration" | 145 | menu "Tilera-specific configuration" |
144 | 146 | ||
145 | config NR_CPUS | 147 | config NR_CPUS |
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 2a9b293fece6..31672918064c 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h | |||
@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr) | |||
250 | #define iowrite32 writel | 250 | #define iowrite32 writel |
251 | #define iowrite64 writeq | 251 | #define iowrite64 writeq |
252 | 252 | ||
253 | static inline void memset_io(void *dst, int val, size_t len) | 253 | #if CHIP_HAS_MMIO() || defined(CONFIG_PCI) |
254 | |||
255 | static inline void memset_io(volatile void *dst, int val, size_t len) | ||
254 | { | 256 | { |
255 | int x; | 257 | int x; |
256 | BUG_ON((unsigned long)dst & 0x3); | 258 | BUG_ON((unsigned long)dst & 0x3); |
@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, | |||
277 | writel(*(u32 *)(src + x), dst + x); | 279 | writel(*(u32 *)(src + x), dst + x); |
278 | } | 280 | } |
279 | 281 | ||
282 | #endif | ||
283 | |||
280 | /* | 284 | /* |
281 | * The Tile architecture does not support IOPORT, even with PCI. | 285 | * The Tile architecture does not support IOPORT, even with PCI. |
282 | * Unfortunately we can't yet simply not declare these methods, | 286 | * Unfortunately we can't yet simply not declare these methods, |
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index b4e96fef2cf8..241c0bb60b12 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -18,32 +18,20 @@ | |||
18 | #include <arch/interrupts.h> | 18 | #include <arch/interrupts.h> |
19 | #include <arch/chip.h> | 19 | #include <arch/chip.h> |
20 | 20 | ||
21 | #if !defined(__tilegx__) && defined(__ASSEMBLY__) | ||
22 | |||
23 | /* | 21 | /* |
24 | * The set of interrupts we want to allow when interrupts are nominally | 22 | * The set of interrupts we want to allow when interrupts are nominally |
25 | * disabled. The remainder are effectively "NMI" interrupts from | 23 | * disabled. The remainder are effectively "NMI" interrupts from |
26 | * the point of view of the generic Linux code. Note that synchronous | 24 | * the point of view of the generic Linux code. Note that synchronous |
27 | * interrupts (aka "non-queued") are not blocked by the mask in any case. | 25 | * interrupts (aka "non-queued") are not blocked by the mask in any case. |
28 | */ | 26 | */ |
29 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
30 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
31 | (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) | ||
32 | #else | ||
33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
34 | (~(INT_MASK_HI(INT_PERF_COUNT))) | ||
35 | #endif | ||
36 | |||
37 | #else | ||
38 | |||
39 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
40 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
41 | (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) | ||
42 | #else | ||
43 | #define LINUX_MASKABLE_INTERRUPTS \ | 27 | #define LINUX_MASKABLE_INTERRUPTS \ |
44 | (~(INT_MASK(INT_PERF_COUNT))) | 28 | (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT))) |
45 | #endif | ||
46 | 29 | ||
30 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
31 | /* The same macro, but for the two 32-bit SPRs separately. */ | ||
32 | #define LINUX_MASKABLE_INTERRUPTS_LO (-1) | ||
33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
34 | (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32)))) | ||
47 | #endif | 35 | #endif |
48 | 36 | ||
49 | #ifndef __ASSEMBLY__ | 37 | #ifndef __ASSEMBLY__ |
@@ -126,7 +114,7 @@ | |||
126 | * to know our current state. | 114 | * to know our current state. |
127 | */ | 115 | */ |
128 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | 116 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); |
129 | #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) | 117 | #define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR) |
130 | 118 | ||
131 | /* Disable interrupts. */ | 119 | /* Disable interrupts. */ |
132 | #define arch_local_irq_disable() \ | 120 | #define arch_local_irq_disable() \ |
@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
165 | 153 | ||
166 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ | 154 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ |
167 | #define arch_local_irq_mask(interrupt) \ | 155 | #define arch_local_irq_mask(interrupt) \ |
168 | (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) | 156 | (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt))) |
169 | 157 | ||
170 | /* Prevent the given interrupt from being enabled immediately. */ | 158 | /* Prevent the given interrupt from being enabled immediately. */ |
171 | #define arch_local_irq_mask_now(interrupt) do { \ | 159 | #define arch_local_irq_mask_now(interrupt) do { \ |
@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
175 | 163 | ||
176 | /* Allow the given interrupt to be enabled next time we enable irqs. */ | 164 | /* Allow the given interrupt to be enabled next time we enable irqs. */ |
177 | #define arch_local_irq_unmask(interrupt) \ | 165 | #define arch_local_irq_unmask(interrupt) \ |
178 | (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) | 166 | (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt))) |
179 | 167 | ||
180 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ | 168 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ |
181 | #define arch_local_irq_unmask_now(interrupt) do { \ | 169 | #define arch_local_irq_unmask_now(interrupt) do { \ |
@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
250 | /* Disable interrupts. */ | 238 | /* Disable interrupts. */ |
251 | #define IRQ_DISABLE(tmp0, tmp1) \ | 239 | #define IRQ_DISABLE(tmp0, tmp1) \ |
252 | { \ | 240 | { \ |
253 | movei tmp0, -1; \ | 241 | movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \ |
254 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ | 242 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ |
255 | }; \ | 243 | }; \ |
256 | { \ | 244 | { \ |
diff --git a/arch/tile/include/uapi/arch/interrupts_32.h b/arch/tile/include/uapi/arch/interrupts_32.h index 96b5710505b6..2efe3f68b2d6 100644 --- a/arch/tile/include/uapi/arch/interrupts_32.h +++ b/arch/tile/include/uapi/arch/interrupts_32.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #ifndef __ARCH_INTERRUPTS_H__ | 15 | #ifndef __ARCH_INTERRUPTS_H__ |
16 | #define __ARCH_INTERRUPTS_H__ | 16 | #define __ARCH_INTERRUPTS_H__ |
17 | 17 | ||
18 | #ifndef __KERNEL__ | ||
18 | /** Mask for an interrupt. */ | 19 | /** Mask for an interrupt. */ |
19 | /* Note: must handle breaking interrupts into high and low words manually. */ | 20 | /* Note: must handle breaking interrupts into high and low words manually. */ |
20 | #define INT_MASK_LO(intno) (1 << (intno)) | 21 | #define INT_MASK_LO(intno) (1 << (intno)) |
@@ -23,6 +24,7 @@ | |||
23 | #ifndef __ASSEMBLER__ | 24 | #ifndef __ASSEMBLER__ |
24 | #define INT_MASK(intno) (1ULL << (intno)) | 25 | #define INT_MASK(intno) (1ULL << (intno)) |
25 | #endif | 26 | #endif |
27 | #endif | ||
26 | 28 | ||
27 | 29 | ||
28 | /** Where a given interrupt executes */ | 30 | /** Where a given interrupt executes */ |
@@ -92,216 +94,216 @@ | |||
92 | 94 | ||
93 | #ifndef __ASSEMBLER__ | 95 | #ifndef __ASSEMBLER__ |
94 | #define QUEUED_INTERRUPTS ( \ | 96 | #define QUEUED_INTERRUPTS ( \ |
95 | INT_MASK(INT_MEM_ERROR) | \ | 97 | (1ULL << INT_MEM_ERROR) | \ |
96 | INT_MASK(INT_DMATLB_MISS) | \ | 98 | (1ULL << INT_DMATLB_MISS) | \ |
97 | INT_MASK(INT_DMATLB_ACCESS) | \ | 99 | (1ULL << INT_DMATLB_ACCESS) | \ |
98 | INT_MASK(INT_SNITLB_MISS) | \ | 100 | (1ULL << INT_SNITLB_MISS) | \ |
99 | INT_MASK(INT_SN_NOTIFY) | \ | 101 | (1ULL << INT_SN_NOTIFY) | \ |
100 | INT_MASK(INT_SN_FIREWALL) | \ | 102 | (1ULL << INT_SN_FIREWALL) | \ |
101 | INT_MASK(INT_IDN_FIREWALL) | \ | 103 | (1ULL << INT_IDN_FIREWALL) | \ |
102 | INT_MASK(INT_UDN_FIREWALL) | \ | 104 | (1ULL << INT_UDN_FIREWALL) | \ |
103 | INT_MASK(INT_TILE_TIMER) | \ | 105 | (1ULL << INT_TILE_TIMER) | \ |
104 | INT_MASK(INT_IDN_TIMER) | \ | 106 | (1ULL << INT_IDN_TIMER) | \ |
105 | INT_MASK(INT_UDN_TIMER) | \ | 107 | (1ULL << INT_UDN_TIMER) | \ |
106 | INT_MASK(INT_DMA_NOTIFY) | \ | 108 | (1ULL << INT_DMA_NOTIFY) | \ |
107 | INT_MASK(INT_IDN_CA) | \ | 109 | (1ULL << INT_IDN_CA) | \ |
108 | INT_MASK(INT_UDN_CA) | \ | 110 | (1ULL << INT_UDN_CA) | \ |
109 | INT_MASK(INT_IDN_AVAIL) | \ | 111 | (1ULL << INT_IDN_AVAIL) | \ |
110 | INT_MASK(INT_UDN_AVAIL) | \ | 112 | (1ULL << INT_UDN_AVAIL) | \ |
111 | INT_MASK(INT_PERF_COUNT) | \ | 113 | (1ULL << INT_PERF_COUNT) | \ |
112 | INT_MASK(INT_INTCTRL_3) | \ | 114 | (1ULL << INT_INTCTRL_3) | \ |
113 | INT_MASK(INT_INTCTRL_2) | \ | 115 | (1ULL << INT_INTCTRL_2) | \ |
114 | INT_MASK(INT_INTCTRL_1) | \ | 116 | (1ULL << INT_INTCTRL_1) | \ |
115 | INT_MASK(INT_INTCTRL_0) | \ | 117 | (1ULL << INT_INTCTRL_0) | \ |
116 | INT_MASK(INT_BOOT_ACCESS) | \ | 118 | (1ULL << INT_BOOT_ACCESS) | \ |
117 | INT_MASK(INT_WORLD_ACCESS) | \ | 119 | (1ULL << INT_WORLD_ACCESS) | \ |
118 | INT_MASK(INT_I_ASID) | \ | 120 | (1ULL << INT_I_ASID) | \ |
119 | INT_MASK(INT_D_ASID) | \ | 121 | (1ULL << INT_D_ASID) | \ |
120 | INT_MASK(INT_DMA_ASID) | \ | 122 | (1ULL << INT_DMA_ASID) | \ |
121 | INT_MASK(INT_SNI_ASID) | \ | 123 | (1ULL << INT_SNI_ASID) | \ |
122 | INT_MASK(INT_DMA_CPL) | \ | 124 | (1ULL << INT_DMA_CPL) | \ |
123 | INT_MASK(INT_SN_CPL) | \ | 125 | (1ULL << INT_SN_CPL) | \ |
124 | INT_MASK(INT_DOUBLE_FAULT) | \ | 126 | (1ULL << INT_DOUBLE_FAULT) | \ |
125 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 127 | (1ULL << INT_AUX_PERF_COUNT) | \ |
126 | 0) | 128 | 0) |
127 | #define NONQUEUED_INTERRUPTS ( \ | 129 | #define NONQUEUED_INTERRUPTS ( \ |
128 | INT_MASK(INT_ITLB_MISS) | \ | 130 | (1ULL << INT_ITLB_MISS) | \ |
129 | INT_MASK(INT_ILL) | \ | 131 | (1ULL << INT_ILL) | \ |
130 | INT_MASK(INT_GPV) | \ | 132 | (1ULL << INT_GPV) | \ |
131 | INT_MASK(INT_SN_ACCESS) | \ | 133 | (1ULL << INT_SN_ACCESS) | \ |
132 | INT_MASK(INT_IDN_ACCESS) | \ | 134 | (1ULL << INT_IDN_ACCESS) | \ |
133 | INT_MASK(INT_UDN_ACCESS) | \ | 135 | (1ULL << INT_UDN_ACCESS) | \ |
134 | INT_MASK(INT_IDN_REFILL) | \ | 136 | (1ULL << INT_IDN_REFILL) | \ |
135 | INT_MASK(INT_UDN_REFILL) | \ | 137 | (1ULL << INT_UDN_REFILL) | \ |
136 | INT_MASK(INT_IDN_COMPLETE) | \ | 138 | (1ULL << INT_IDN_COMPLETE) | \ |
137 | INT_MASK(INT_UDN_COMPLETE) | \ | 139 | (1ULL << INT_UDN_COMPLETE) | \ |
138 | INT_MASK(INT_SWINT_3) | \ | 140 | (1ULL << INT_SWINT_3) | \ |
139 | INT_MASK(INT_SWINT_2) | \ | 141 | (1ULL << INT_SWINT_2) | \ |
140 | INT_MASK(INT_SWINT_1) | \ | 142 | (1ULL << INT_SWINT_1) | \ |
141 | INT_MASK(INT_SWINT_0) | \ | 143 | (1ULL << INT_SWINT_0) | \ |
142 | INT_MASK(INT_UNALIGN_DATA) | \ | 144 | (1ULL << INT_UNALIGN_DATA) | \ |
143 | INT_MASK(INT_DTLB_MISS) | \ | 145 | (1ULL << INT_DTLB_MISS) | \ |
144 | INT_MASK(INT_DTLB_ACCESS) | \ | 146 | (1ULL << INT_DTLB_ACCESS) | \ |
145 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 147 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
146 | 0) | 148 | 0) |
147 | #define CRITICAL_MASKED_INTERRUPTS ( \ | 149 | #define CRITICAL_MASKED_INTERRUPTS ( \ |
148 | INT_MASK(INT_MEM_ERROR) | \ | 150 | (1ULL << INT_MEM_ERROR) | \ |
149 | INT_MASK(INT_DMATLB_MISS) | \ | 151 | (1ULL << INT_DMATLB_MISS) | \ |
150 | INT_MASK(INT_DMATLB_ACCESS) | \ | 152 | (1ULL << INT_DMATLB_ACCESS) | \ |
151 | INT_MASK(INT_SNITLB_MISS) | \ | 153 | (1ULL << INT_SNITLB_MISS) | \ |
152 | INT_MASK(INT_SN_NOTIFY) | \ | 154 | (1ULL << INT_SN_NOTIFY) | \ |
153 | INT_MASK(INT_SN_FIREWALL) | \ | 155 | (1ULL << INT_SN_FIREWALL) | \ |
154 | INT_MASK(INT_IDN_FIREWALL) | \ | 156 | (1ULL << INT_IDN_FIREWALL) | \ |
155 | INT_MASK(INT_UDN_FIREWALL) | \ | 157 | (1ULL << INT_UDN_FIREWALL) | \ |
156 | INT_MASK(INT_TILE_TIMER) | \ | 158 | (1ULL << INT_TILE_TIMER) | \ |
157 | INT_MASK(INT_IDN_TIMER) | \ | 159 | (1ULL << INT_IDN_TIMER) | \ |
158 | INT_MASK(INT_UDN_TIMER) | \ | 160 | (1ULL << INT_UDN_TIMER) | \ |
159 | INT_MASK(INT_DMA_NOTIFY) | \ | 161 | (1ULL << INT_DMA_NOTIFY) | \ |
160 | INT_MASK(INT_IDN_CA) | \ | 162 | (1ULL << INT_IDN_CA) | \ |
161 | INT_MASK(INT_UDN_CA) | \ | 163 | (1ULL << INT_UDN_CA) | \ |
162 | INT_MASK(INT_IDN_AVAIL) | \ | 164 | (1ULL << INT_IDN_AVAIL) | \ |
163 | INT_MASK(INT_UDN_AVAIL) | \ | 165 | (1ULL << INT_UDN_AVAIL) | \ |
164 | INT_MASK(INT_PERF_COUNT) | \ | 166 | (1ULL << INT_PERF_COUNT) | \ |
165 | INT_MASK(INT_INTCTRL_3) | \ | 167 | (1ULL << INT_INTCTRL_3) | \ |
166 | INT_MASK(INT_INTCTRL_2) | \ | 168 | (1ULL << INT_INTCTRL_2) | \ |
167 | INT_MASK(INT_INTCTRL_1) | \ | 169 | (1ULL << INT_INTCTRL_1) | \ |
168 | INT_MASK(INT_INTCTRL_0) | \ | 170 | (1ULL << INT_INTCTRL_0) | \ |
169 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 171 | (1ULL << INT_AUX_PERF_COUNT) | \ |
170 | 0) | 172 | 0) |
171 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | 173 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ |
172 | INT_MASK(INT_ITLB_MISS) | \ | 174 | (1ULL << INT_ITLB_MISS) | \ |
173 | INT_MASK(INT_ILL) | \ | 175 | (1ULL << INT_ILL) | \ |
174 | INT_MASK(INT_GPV) | \ | 176 | (1ULL << INT_GPV) | \ |
175 | INT_MASK(INT_SN_ACCESS) | \ | 177 | (1ULL << INT_SN_ACCESS) | \ |
176 | INT_MASK(INT_IDN_ACCESS) | \ | 178 | (1ULL << INT_IDN_ACCESS) | \ |
177 | INT_MASK(INT_UDN_ACCESS) | \ | 179 | (1ULL << INT_UDN_ACCESS) | \ |
178 | INT_MASK(INT_IDN_REFILL) | \ | 180 | (1ULL << INT_IDN_REFILL) | \ |
179 | INT_MASK(INT_UDN_REFILL) | \ | 181 | (1ULL << INT_UDN_REFILL) | \ |
180 | INT_MASK(INT_IDN_COMPLETE) | \ | 182 | (1ULL << INT_IDN_COMPLETE) | \ |
181 | INT_MASK(INT_UDN_COMPLETE) | \ | 183 | (1ULL << INT_UDN_COMPLETE) | \ |
182 | INT_MASK(INT_SWINT_3) | \ | 184 | (1ULL << INT_SWINT_3) | \ |
183 | INT_MASK(INT_SWINT_2) | \ | 185 | (1ULL << INT_SWINT_2) | \ |
184 | INT_MASK(INT_SWINT_1) | \ | 186 | (1ULL << INT_SWINT_1) | \ |
185 | INT_MASK(INT_SWINT_0) | \ | 187 | (1ULL << INT_SWINT_0) | \ |
186 | INT_MASK(INT_UNALIGN_DATA) | \ | 188 | (1ULL << INT_UNALIGN_DATA) | \ |
187 | INT_MASK(INT_DTLB_MISS) | \ | 189 | (1ULL << INT_DTLB_MISS) | \ |
188 | INT_MASK(INT_DTLB_ACCESS) | \ | 190 | (1ULL << INT_DTLB_ACCESS) | \ |
189 | INT_MASK(INT_BOOT_ACCESS) | \ | 191 | (1ULL << INT_BOOT_ACCESS) | \ |
190 | INT_MASK(INT_WORLD_ACCESS) | \ | 192 | (1ULL << INT_WORLD_ACCESS) | \ |
191 | INT_MASK(INT_I_ASID) | \ | 193 | (1ULL << INT_I_ASID) | \ |
192 | INT_MASK(INT_D_ASID) | \ | 194 | (1ULL << INT_D_ASID) | \ |
193 | INT_MASK(INT_DMA_ASID) | \ | 195 | (1ULL << INT_DMA_ASID) | \ |
194 | INT_MASK(INT_SNI_ASID) | \ | 196 | (1ULL << INT_SNI_ASID) | \ |
195 | INT_MASK(INT_DMA_CPL) | \ | 197 | (1ULL << INT_DMA_CPL) | \ |
196 | INT_MASK(INT_SN_CPL) | \ | 198 | (1ULL << INT_SN_CPL) | \ |
197 | INT_MASK(INT_DOUBLE_FAULT) | \ | 199 | (1ULL << INT_DOUBLE_FAULT) | \ |
198 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 200 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
199 | 0) | 201 | 0) |
200 | #define MASKABLE_INTERRUPTS ( \ | 202 | #define MASKABLE_INTERRUPTS ( \ |
201 | INT_MASK(INT_MEM_ERROR) | \ | 203 | (1ULL << INT_MEM_ERROR) | \ |
202 | INT_MASK(INT_IDN_REFILL) | \ | 204 | (1ULL << INT_IDN_REFILL) | \ |
203 | INT_MASK(INT_UDN_REFILL) | \ | 205 | (1ULL << INT_UDN_REFILL) | \ |
204 | INT_MASK(INT_IDN_COMPLETE) | \ | 206 | (1ULL << INT_IDN_COMPLETE) | \ |
205 | INT_MASK(INT_UDN_COMPLETE) | \ | 207 | (1ULL << INT_UDN_COMPLETE) | \ |
206 | INT_MASK(INT_DMATLB_MISS) | \ | 208 | (1ULL << INT_DMATLB_MISS) | \ |
207 | INT_MASK(INT_DMATLB_ACCESS) | \ | 209 | (1ULL << INT_DMATLB_ACCESS) | \ |
208 | INT_MASK(INT_SNITLB_MISS) | \ | 210 | (1ULL << INT_SNITLB_MISS) | \ |
209 | INT_MASK(INT_SN_NOTIFY) | \ | 211 | (1ULL << INT_SN_NOTIFY) | \ |
210 | INT_MASK(INT_SN_FIREWALL) | \ | 212 | (1ULL << INT_SN_FIREWALL) | \ |
211 | INT_MASK(INT_IDN_FIREWALL) | \ | 213 | (1ULL << INT_IDN_FIREWALL) | \ |
212 | INT_MASK(INT_UDN_FIREWALL) | \ | 214 | (1ULL << INT_UDN_FIREWALL) | \ |
213 | INT_MASK(INT_TILE_TIMER) | \ | 215 | (1ULL << INT_TILE_TIMER) | \ |
214 | INT_MASK(INT_IDN_TIMER) | \ | 216 | (1ULL << INT_IDN_TIMER) | \ |
215 | INT_MASK(INT_UDN_TIMER) | \ | 217 | (1ULL << INT_UDN_TIMER) | \ |
216 | INT_MASK(INT_DMA_NOTIFY) | \ | 218 | (1ULL << INT_DMA_NOTIFY) | \ |
217 | INT_MASK(INT_IDN_CA) | \ | 219 | (1ULL << INT_IDN_CA) | \ |
218 | INT_MASK(INT_UDN_CA) | \ | 220 | (1ULL << INT_UDN_CA) | \ |
219 | INT_MASK(INT_IDN_AVAIL) | \ | 221 | (1ULL << INT_IDN_AVAIL) | \ |
220 | INT_MASK(INT_UDN_AVAIL) | \ | 222 | (1ULL << INT_UDN_AVAIL) | \ |
221 | INT_MASK(INT_PERF_COUNT) | \ | 223 | (1ULL << INT_PERF_COUNT) | \ |
222 | INT_MASK(INT_INTCTRL_3) | \ | 224 | (1ULL << INT_INTCTRL_3) | \ |
223 | INT_MASK(INT_INTCTRL_2) | \ | 225 | (1ULL << INT_INTCTRL_2) | \ |
224 | INT_MASK(INT_INTCTRL_1) | \ | 226 | (1ULL << INT_INTCTRL_1) | \ |
225 | INT_MASK(INT_INTCTRL_0) | \ | 227 | (1ULL << INT_INTCTRL_0) | \ |
226 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 228 | (1ULL << INT_AUX_PERF_COUNT) | \ |
227 | 0) | 229 | 0) |
228 | #define UNMASKABLE_INTERRUPTS ( \ | 230 | #define UNMASKABLE_INTERRUPTS ( \ |
229 | INT_MASK(INT_ITLB_MISS) | \ | 231 | (1ULL << INT_ITLB_MISS) | \ |
230 | INT_MASK(INT_ILL) | \ | 232 | (1ULL << INT_ILL) | \ |
231 | INT_MASK(INT_GPV) | \ | 233 | (1ULL << INT_GPV) | \ |
232 | INT_MASK(INT_SN_ACCESS) | \ | 234 | (1ULL << INT_SN_ACCESS) | \ |
233 | INT_MASK(INT_IDN_ACCESS) | \ | 235 | (1ULL << INT_IDN_ACCESS) | \ |
234 | INT_MASK(INT_UDN_ACCESS) | \ | 236 | (1ULL << INT_UDN_ACCESS) | \ |
235 | INT_MASK(INT_SWINT_3) | \ | 237 | (1ULL << INT_SWINT_3) | \ |
236 | INT_MASK(INT_SWINT_2) | \ | 238 | (1ULL << INT_SWINT_2) | \ |
237 | INT_MASK(INT_SWINT_1) | \ | 239 | (1ULL << INT_SWINT_1) | \ |
238 | INT_MASK(INT_SWINT_0) | \ | 240 | (1ULL << INT_SWINT_0) | \ |
239 | INT_MASK(INT_UNALIGN_DATA) | \ | 241 | (1ULL << INT_UNALIGN_DATA) | \ |
240 | INT_MASK(INT_DTLB_MISS) | \ | 242 | (1ULL << INT_DTLB_MISS) | \ |
241 | INT_MASK(INT_DTLB_ACCESS) | \ | 243 | (1ULL << INT_DTLB_ACCESS) | \ |
242 | INT_MASK(INT_BOOT_ACCESS) | \ | 244 | (1ULL << INT_BOOT_ACCESS) | \ |
243 | INT_MASK(INT_WORLD_ACCESS) | \ | 245 | (1ULL << INT_WORLD_ACCESS) | \ |
244 | INT_MASK(INT_I_ASID) | \ | 246 | (1ULL << INT_I_ASID) | \ |
245 | INT_MASK(INT_D_ASID) | \ | 247 | (1ULL << INT_D_ASID) | \ |
246 | INT_MASK(INT_DMA_ASID) | \ | 248 | (1ULL << INT_DMA_ASID) | \ |
247 | INT_MASK(INT_SNI_ASID) | \ | 249 | (1ULL << INT_SNI_ASID) | \ |
248 | INT_MASK(INT_DMA_CPL) | \ | 250 | (1ULL << INT_DMA_CPL) | \ |
249 | INT_MASK(INT_SN_CPL) | \ | 251 | (1ULL << INT_SN_CPL) | \ |
250 | INT_MASK(INT_DOUBLE_FAULT) | \ | 252 | (1ULL << INT_DOUBLE_FAULT) | \ |
251 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 253 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
252 | 0) | 254 | 0) |
253 | #define SYNC_INTERRUPTS ( \ | 255 | #define SYNC_INTERRUPTS ( \ |
254 | INT_MASK(INT_ITLB_MISS) | \ | 256 | (1ULL << INT_ITLB_MISS) | \ |
255 | INT_MASK(INT_ILL) | \ | 257 | (1ULL << INT_ILL) | \ |
256 | INT_MASK(INT_GPV) | \ | 258 | (1ULL << INT_GPV) | \ |
257 | INT_MASK(INT_SN_ACCESS) | \ | 259 | (1ULL << INT_SN_ACCESS) | \ |
258 | INT_MASK(INT_IDN_ACCESS) | \ | 260 | (1ULL << INT_IDN_ACCESS) | \ |
259 | INT_MASK(INT_UDN_ACCESS) | \ | 261 | (1ULL << INT_UDN_ACCESS) | \ |
260 | INT_MASK(INT_IDN_REFILL) | \ | 262 | (1ULL << INT_IDN_REFILL) | \ |
261 | INT_MASK(INT_UDN_REFILL) | \ | 263 | (1ULL << INT_UDN_REFILL) | \ |
262 | INT_MASK(INT_IDN_COMPLETE) | \ | 264 | (1ULL << INT_IDN_COMPLETE) | \ |
263 | INT_MASK(INT_UDN_COMPLETE) | \ | 265 | (1ULL << INT_UDN_COMPLETE) | \ |
264 | INT_MASK(INT_SWINT_3) | \ | 266 | (1ULL << INT_SWINT_3) | \ |
265 | INT_MASK(INT_SWINT_2) | \ | 267 | (1ULL << INT_SWINT_2) | \ |
266 | INT_MASK(INT_SWINT_1) | \ | 268 | (1ULL << INT_SWINT_1) | \ |
267 | INT_MASK(INT_SWINT_0) | \ | 269 | (1ULL << INT_SWINT_0) | \ |
268 | INT_MASK(INT_UNALIGN_DATA) | \ | 270 | (1ULL << INT_UNALIGN_DATA) | \ |
269 | INT_MASK(INT_DTLB_MISS) | \ | 271 | (1ULL << INT_DTLB_MISS) | \ |
270 | INT_MASK(INT_DTLB_ACCESS) | \ | 272 | (1ULL << INT_DTLB_ACCESS) | \ |
271 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | 273 | (1ULL << INT_SN_STATIC_ACCESS) | \ |
272 | 0) | 274 | 0) |
273 | #define NON_SYNC_INTERRUPTS ( \ | 275 | #define NON_SYNC_INTERRUPTS ( \ |
274 | INT_MASK(INT_MEM_ERROR) | \ | 276 | (1ULL << INT_MEM_ERROR) | \ |
275 | INT_MASK(INT_DMATLB_MISS) | \ | 277 | (1ULL << INT_DMATLB_MISS) | \ |
276 | INT_MASK(INT_DMATLB_ACCESS) | \ | 278 | (1ULL << INT_DMATLB_ACCESS) | \ |
277 | INT_MASK(INT_SNITLB_MISS) | \ | 279 | (1ULL << INT_SNITLB_MISS) | \ |
278 | INT_MASK(INT_SN_NOTIFY) | \ | 280 | (1ULL << INT_SN_NOTIFY) | \ |
279 | INT_MASK(INT_SN_FIREWALL) | \ | 281 | (1ULL << INT_SN_FIREWALL) | \ |
280 | INT_MASK(INT_IDN_FIREWALL) | \ | 282 | (1ULL << INT_IDN_FIREWALL) | \ |
281 | INT_MASK(INT_UDN_FIREWALL) | \ | 283 | (1ULL << INT_UDN_FIREWALL) | \ |
282 | INT_MASK(INT_TILE_TIMER) | \ | 284 | (1ULL << INT_TILE_TIMER) | \ |
283 | INT_MASK(INT_IDN_TIMER) | \ | 285 | (1ULL << INT_IDN_TIMER) | \ |
284 | INT_MASK(INT_UDN_TIMER) | \ | 286 | (1ULL << INT_UDN_TIMER) | \ |
285 | INT_MASK(INT_DMA_NOTIFY) | \ | 287 | (1ULL << INT_DMA_NOTIFY) | \ |
286 | INT_MASK(INT_IDN_CA) | \ | 288 | (1ULL << INT_IDN_CA) | \ |
287 | INT_MASK(INT_UDN_CA) | \ | 289 | (1ULL << INT_UDN_CA) | \ |
288 | INT_MASK(INT_IDN_AVAIL) | \ | 290 | (1ULL << INT_IDN_AVAIL) | \ |
289 | INT_MASK(INT_UDN_AVAIL) | \ | 291 | (1ULL << INT_UDN_AVAIL) | \ |
290 | INT_MASK(INT_PERF_COUNT) | \ | 292 | (1ULL << INT_PERF_COUNT) | \ |
291 | INT_MASK(INT_INTCTRL_3) | \ | 293 | (1ULL << INT_INTCTRL_3) | \ |
292 | INT_MASK(INT_INTCTRL_2) | \ | 294 | (1ULL << INT_INTCTRL_2) | \ |
293 | INT_MASK(INT_INTCTRL_1) | \ | 295 | (1ULL << INT_INTCTRL_1) | \ |
294 | INT_MASK(INT_INTCTRL_0) | \ | 296 | (1ULL << INT_INTCTRL_0) | \ |
295 | INT_MASK(INT_BOOT_ACCESS) | \ | 297 | (1ULL << INT_BOOT_ACCESS) | \ |
296 | INT_MASK(INT_WORLD_ACCESS) | \ | 298 | (1ULL << INT_WORLD_ACCESS) | \ |
297 | INT_MASK(INT_I_ASID) | \ | 299 | (1ULL << INT_I_ASID) | \ |
298 | INT_MASK(INT_D_ASID) | \ | 300 | (1ULL << INT_D_ASID) | \ |
299 | INT_MASK(INT_DMA_ASID) | \ | 301 | (1ULL << INT_DMA_ASID) | \ |
300 | INT_MASK(INT_SNI_ASID) | \ | 302 | (1ULL << INT_SNI_ASID) | \ |
301 | INT_MASK(INT_DMA_CPL) | \ | 303 | (1ULL << INT_DMA_CPL) | \ |
302 | INT_MASK(INT_SN_CPL) | \ | 304 | (1ULL << INT_SN_CPL) | \ |
303 | INT_MASK(INT_DOUBLE_FAULT) | \ | 305 | (1ULL << INT_DOUBLE_FAULT) | \ |
304 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 306 | (1ULL << INT_AUX_PERF_COUNT) | \ |
305 | 0) | 307 | 0) |
306 | #endif /* !__ASSEMBLER__ */ | 308 | #endif /* !__ASSEMBLER__ */ |
307 | #endif /* !__ARCH_INTERRUPTS_H__ */ | 309 | #endif /* !__ARCH_INTERRUPTS_H__ */ |
diff --git a/arch/tile/include/uapi/arch/interrupts_64.h b/arch/tile/include/uapi/arch/interrupts_64.h index 5bb58b2e4e6f..13c9f9182348 100644 --- a/arch/tile/include/uapi/arch/interrupts_64.h +++ b/arch/tile/include/uapi/arch/interrupts_64.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #ifndef __ARCH_INTERRUPTS_H__ | 15 | #ifndef __ARCH_INTERRUPTS_H__ |
16 | #define __ARCH_INTERRUPTS_H__ | 16 | #define __ARCH_INTERRUPTS_H__ |
17 | 17 | ||
18 | #ifndef __KERNEL__ | ||
18 | /** Mask for an interrupt. */ | 19 | /** Mask for an interrupt. */ |
19 | #ifdef __ASSEMBLER__ | 20 | #ifdef __ASSEMBLER__ |
20 | /* Note: must handle breaking interrupts into high and low words manually. */ | 21 | /* Note: must handle breaking interrupts into high and low words manually. */ |
@@ -22,6 +23,7 @@ | |||
22 | #else | 23 | #else |
23 | #define INT_MASK(intno) (1ULL << (intno)) | 24 | #define INT_MASK(intno) (1ULL << (intno)) |
24 | #endif | 25 | #endif |
26 | #endif | ||
25 | 27 | ||
26 | 28 | ||
27 | /** Where a given interrupt executes */ | 29 | /** Where a given interrupt executes */ |
@@ -85,192 +87,192 @@ | |||
85 | 87 | ||
86 | #ifndef __ASSEMBLER__ | 88 | #ifndef __ASSEMBLER__ |
87 | #define QUEUED_INTERRUPTS ( \ | 89 | #define QUEUED_INTERRUPTS ( \ |
88 | INT_MASK(INT_MEM_ERROR) | \ | 90 | (1ULL << INT_MEM_ERROR) | \ |
89 | INT_MASK(INT_IDN_COMPLETE) | \ | 91 | (1ULL << INT_IDN_COMPLETE) | \ |
90 | INT_MASK(INT_UDN_COMPLETE) | \ | 92 | (1ULL << INT_UDN_COMPLETE) | \ |
91 | INT_MASK(INT_IDN_FIREWALL) | \ | 93 | (1ULL << INT_IDN_FIREWALL) | \ |
92 | INT_MASK(INT_UDN_FIREWALL) | \ | 94 | (1ULL << INT_UDN_FIREWALL) | \ |
93 | INT_MASK(INT_TILE_TIMER) | \ | 95 | (1ULL << INT_TILE_TIMER) | \ |
94 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 96 | (1ULL << INT_AUX_TILE_TIMER) | \ |
95 | INT_MASK(INT_IDN_TIMER) | \ | 97 | (1ULL << INT_IDN_TIMER) | \ |
96 | INT_MASK(INT_UDN_TIMER) | \ | 98 | (1ULL << INT_UDN_TIMER) | \ |
97 | INT_MASK(INT_IDN_AVAIL) | \ | 99 | (1ULL << INT_IDN_AVAIL) | \ |
98 | INT_MASK(INT_UDN_AVAIL) | \ | 100 | (1ULL << INT_UDN_AVAIL) | \ |
99 | INT_MASK(INT_IPI_3) | \ | 101 | (1ULL << INT_IPI_3) | \ |
100 | INT_MASK(INT_IPI_2) | \ | 102 | (1ULL << INT_IPI_2) | \ |
101 | INT_MASK(INT_IPI_1) | \ | 103 | (1ULL << INT_IPI_1) | \ |
102 | INT_MASK(INT_IPI_0) | \ | 104 | (1ULL << INT_IPI_0) | \ |
103 | INT_MASK(INT_PERF_COUNT) | \ | 105 | (1ULL << INT_PERF_COUNT) | \ |
104 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 106 | (1ULL << INT_AUX_PERF_COUNT) | \ |
105 | INT_MASK(INT_INTCTRL_3) | \ | 107 | (1ULL << INT_INTCTRL_3) | \ |
106 | INT_MASK(INT_INTCTRL_2) | \ | 108 | (1ULL << INT_INTCTRL_2) | \ |
107 | INT_MASK(INT_INTCTRL_1) | \ | 109 | (1ULL << INT_INTCTRL_1) | \ |
108 | INT_MASK(INT_INTCTRL_0) | \ | 110 | (1ULL << INT_INTCTRL_0) | \ |
109 | INT_MASK(INT_BOOT_ACCESS) | \ | 111 | (1ULL << INT_BOOT_ACCESS) | \ |
110 | INT_MASK(INT_WORLD_ACCESS) | \ | 112 | (1ULL << INT_WORLD_ACCESS) | \ |
111 | INT_MASK(INT_I_ASID) | \ | 113 | (1ULL << INT_I_ASID) | \ |
112 | INT_MASK(INT_D_ASID) | \ | 114 | (1ULL << INT_D_ASID) | \ |
113 | INT_MASK(INT_DOUBLE_FAULT) | \ | 115 | (1ULL << INT_DOUBLE_FAULT) | \ |
114 | 0) | 116 | 0) |
115 | #define NONQUEUED_INTERRUPTS ( \ | 117 | #define NONQUEUED_INTERRUPTS ( \ |
116 | INT_MASK(INT_SINGLE_STEP_3) | \ | 118 | (1ULL << INT_SINGLE_STEP_3) | \ |
117 | INT_MASK(INT_SINGLE_STEP_2) | \ | 119 | (1ULL << INT_SINGLE_STEP_2) | \ |
118 | INT_MASK(INT_SINGLE_STEP_1) | \ | 120 | (1ULL << INT_SINGLE_STEP_1) | \ |
119 | INT_MASK(INT_SINGLE_STEP_0) | \ | 121 | (1ULL << INT_SINGLE_STEP_0) | \ |
120 | INT_MASK(INT_ITLB_MISS) | \ | 122 | (1ULL << INT_ITLB_MISS) | \ |
121 | INT_MASK(INT_ILL) | \ | 123 | (1ULL << INT_ILL) | \ |
122 | INT_MASK(INT_GPV) | \ | 124 | (1ULL << INT_GPV) | \ |
123 | INT_MASK(INT_IDN_ACCESS) | \ | 125 | (1ULL << INT_IDN_ACCESS) | \ |
124 | INT_MASK(INT_UDN_ACCESS) | \ | 126 | (1ULL << INT_UDN_ACCESS) | \ |
125 | INT_MASK(INT_SWINT_3) | \ | 127 | (1ULL << INT_SWINT_3) | \ |
126 | INT_MASK(INT_SWINT_2) | \ | 128 | (1ULL << INT_SWINT_2) | \ |
127 | INT_MASK(INT_SWINT_1) | \ | 129 | (1ULL << INT_SWINT_1) | \ |
128 | INT_MASK(INT_SWINT_0) | \ | 130 | (1ULL << INT_SWINT_0) | \ |
129 | INT_MASK(INT_ILL_TRANS) | \ | 131 | (1ULL << INT_ILL_TRANS) | \ |
130 | INT_MASK(INT_UNALIGN_DATA) | \ | 132 | (1ULL << INT_UNALIGN_DATA) | \ |
131 | INT_MASK(INT_DTLB_MISS) | \ | 133 | (1ULL << INT_DTLB_MISS) | \ |
132 | INT_MASK(INT_DTLB_ACCESS) | \ | 134 | (1ULL << INT_DTLB_ACCESS) | \ |
133 | 0) | 135 | 0) |
134 | #define CRITICAL_MASKED_INTERRUPTS ( \ | 136 | #define CRITICAL_MASKED_INTERRUPTS ( \ |
135 | INT_MASK(INT_MEM_ERROR) | \ | 137 | (1ULL << INT_MEM_ERROR) | \ |
136 | INT_MASK(INT_SINGLE_STEP_3) | \ | 138 | (1ULL << INT_SINGLE_STEP_3) | \ |
137 | INT_MASK(INT_SINGLE_STEP_2) | \ | 139 | (1ULL << INT_SINGLE_STEP_2) | \ |
138 | INT_MASK(INT_SINGLE_STEP_1) | \ | 140 | (1ULL << INT_SINGLE_STEP_1) | \ |
139 | INT_MASK(INT_SINGLE_STEP_0) | \ | 141 | (1ULL << INT_SINGLE_STEP_0) | \ |
140 | INT_MASK(INT_IDN_COMPLETE) | \ | 142 | (1ULL << INT_IDN_COMPLETE) | \ |
141 | INT_MASK(INT_UDN_COMPLETE) | \ | 143 | (1ULL << INT_UDN_COMPLETE) | \ |
142 | INT_MASK(INT_IDN_FIREWALL) | \ | 144 | (1ULL << INT_IDN_FIREWALL) | \ |
143 | INT_MASK(INT_UDN_FIREWALL) | \ | 145 | (1ULL << INT_UDN_FIREWALL) | \ |
144 | INT_MASK(INT_TILE_TIMER) | \ | 146 | (1ULL << INT_TILE_TIMER) | \ |
145 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 147 | (1ULL << INT_AUX_TILE_TIMER) | \ |
146 | INT_MASK(INT_IDN_TIMER) | \ | 148 | (1ULL << INT_IDN_TIMER) | \ |
147 | INT_MASK(INT_UDN_TIMER) | \ | 149 | (1ULL << INT_UDN_TIMER) | \ |
148 | INT_MASK(INT_IDN_AVAIL) | \ | 150 | (1ULL << INT_IDN_AVAIL) | \ |
149 | INT_MASK(INT_UDN_AVAIL) | \ | 151 | (1ULL << INT_UDN_AVAIL) | \ |
150 | INT_MASK(INT_IPI_3) | \ | 152 | (1ULL << INT_IPI_3) | \ |
151 | INT_MASK(INT_IPI_2) | \ | 153 | (1ULL << INT_IPI_2) | \ |
152 | INT_MASK(INT_IPI_1) | \ | 154 | (1ULL << INT_IPI_1) | \ |
153 | INT_MASK(INT_IPI_0) | \ | 155 | (1ULL << INT_IPI_0) | \ |
154 | INT_MASK(INT_PERF_COUNT) | \ | 156 | (1ULL << INT_PERF_COUNT) | \ |
155 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 157 | (1ULL << INT_AUX_PERF_COUNT) | \ |
156 | INT_MASK(INT_INTCTRL_3) | \ | 158 | (1ULL << INT_INTCTRL_3) | \ |
157 | INT_MASK(INT_INTCTRL_2) | \ | 159 | (1ULL << INT_INTCTRL_2) | \ |
158 | INT_MASK(INT_INTCTRL_1) | \ | 160 | (1ULL << INT_INTCTRL_1) | \ |
159 | INT_MASK(INT_INTCTRL_0) | \ | 161 | (1ULL << INT_INTCTRL_0) | \ |
160 | 0) | 162 | 0) |
161 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | 163 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ |
162 | INT_MASK(INT_ITLB_MISS) | \ | 164 | (1ULL << INT_ITLB_MISS) | \ |
163 | INT_MASK(INT_ILL) | \ | 165 | (1ULL << INT_ILL) | \ |
164 | INT_MASK(INT_GPV) | \ | 166 | (1ULL << INT_GPV) | \ |
165 | INT_MASK(INT_IDN_ACCESS) | \ | 167 | (1ULL << INT_IDN_ACCESS) | \ |
166 | INT_MASK(INT_UDN_ACCESS) | \ | 168 | (1ULL << INT_UDN_ACCESS) | \ |
167 | INT_MASK(INT_SWINT_3) | \ | 169 | (1ULL << INT_SWINT_3) | \ |
168 | INT_MASK(INT_SWINT_2) | \ | 170 | (1ULL << INT_SWINT_2) | \ |
169 | INT_MASK(INT_SWINT_1) | \ | 171 | (1ULL << INT_SWINT_1) | \ |
170 | INT_MASK(INT_SWINT_0) | \ | 172 | (1ULL << INT_SWINT_0) | \ |
171 | INT_MASK(INT_ILL_TRANS) | \ | 173 | (1ULL << INT_ILL_TRANS) | \ |
172 | INT_MASK(INT_UNALIGN_DATA) | \ | 174 | (1ULL << INT_UNALIGN_DATA) | \ |
173 | INT_MASK(INT_DTLB_MISS) | \ | 175 | (1ULL << INT_DTLB_MISS) | \ |
174 | INT_MASK(INT_DTLB_ACCESS) | \ | 176 | (1ULL << INT_DTLB_ACCESS) | \ |
175 | INT_MASK(INT_BOOT_ACCESS) | \ | 177 | (1ULL << INT_BOOT_ACCESS) | \ |
176 | INT_MASK(INT_WORLD_ACCESS) | \ | 178 | (1ULL << INT_WORLD_ACCESS) | \ |
177 | INT_MASK(INT_I_ASID) | \ | 179 | (1ULL << INT_I_ASID) | \ |
178 | INT_MASK(INT_D_ASID) | \ | 180 | (1ULL << INT_D_ASID) | \ |
179 | INT_MASK(INT_DOUBLE_FAULT) | \ | 181 | (1ULL << INT_DOUBLE_FAULT) | \ |
180 | 0) | 182 | 0) |
181 | #define MASKABLE_INTERRUPTS ( \ | 183 | #define MASKABLE_INTERRUPTS ( \ |
182 | INT_MASK(INT_MEM_ERROR) | \ | 184 | (1ULL << INT_MEM_ERROR) | \ |
183 | INT_MASK(INT_SINGLE_STEP_3) | \ | 185 | (1ULL << INT_SINGLE_STEP_3) | \ |
184 | INT_MASK(INT_SINGLE_STEP_2) | \ | 186 | (1ULL << INT_SINGLE_STEP_2) | \ |
185 | INT_MASK(INT_SINGLE_STEP_1) | \ | 187 | (1ULL << INT_SINGLE_STEP_1) | \ |
186 | INT_MASK(INT_SINGLE_STEP_0) | \ | 188 | (1ULL << INT_SINGLE_STEP_0) | \ |
187 | INT_MASK(INT_IDN_COMPLETE) | \ | 189 | (1ULL << INT_IDN_COMPLETE) | \ |
188 | INT_MASK(INT_UDN_COMPLETE) | \ | 190 | (1ULL << INT_UDN_COMPLETE) | \ |
189 | INT_MASK(INT_IDN_FIREWALL) | \ | 191 | (1ULL << INT_IDN_FIREWALL) | \ |
190 | INT_MASK(INT_UDN_FIREWALL) | \ | 192 | (1ULL << INT_UDN_FIREWALL) | \ |
191 | INT_MASK(INT_TILE_TIMER) | \ | 193 | (1ULL << INT_TILE_TIMER) | \ |
192 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 194 | (1ULL << INT_AUX_TILE_TIMER) | \ |
193 | INT_MASK(INT_IDN_TIMER) | \ | 195 | (1ULL << INT_IDN_TIMER) | \ |
194 | INT_MASK(INT_UDN_TIMER) | \ | 196 | (1ULL << INT_UDN_TIMER) | \ |
195 | INT_MASK(INT_IDN_AVAIL) | \ | 197 | (1ULL << INT_IDN_AVAIL) | \ |
196 | INT_MASK(INT_UDN_AVAIL) | \ | 198 | (1ULL << INT_UDN_AVAIL) | \ |
197 | INT_MASK(INT_IPI_3) | \ | 199 | (1ULL << INT_IPI_3) | \ |
198 | INT_MASK(INT_IPI_2) | \ | 200 | (1ULL << INT_IPI_2) | \ |
199 | INT_MASK(INT_IPI_1) | \ | 201 | (1ULL << INT_IPI_1) | \ |
200 | INT_MASK(INT_IPI_0) | \ | 202 | (1ULL << INT_IPI_0) | \ |
201 | INT_MASK(INT_PERF_COUNT) | \ | 203 | (1ULL << INT_PERF_COUNT) | \ |
202 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 204 | (1ULL << INT_AUX_PERF_COUNT) | \ |
203 | INT_MASK(INT_INTCTRL_3) | \ | 205 | (1ULL << INT_INTCTRL_3) | \ |
204 | INT_MASK(INT_INTCTRL_2) | \ | 206 | (1ULL << INT_INTCTRL_2) | \ |
205 | INT_MASK(INT_INTCTRL_1) | \ | 207 | (1ULL << INT_INTCTRL_1) | \ |
206 | INT_MASK(INT_INTCTRL_0) | \ | 208 | (1ULL << INT_INTCTRL_0) | \ |
207 | 0) | 209 | 0) |
208 | #define UNMASKABLE_INTERRUPTS ( \ | 210 | #define UNMASKABLE_INTERRUPTS ( \ |
209 | INT_MASK(INT_ITLB_MISS) | \ | 211 | (1ULL << INT_ITLB_MISS) | \ |
210 | INT_MASK(INT_ILL) | \ | 212 | (1ULL << INT_ILL) | \ |
211 | INT_MASK(INT_GPV) | \ | 213 | (1ULL << INT_GPV) | \ |
212 | INT_MASK(INT_IDN_ACCESS) | \ | 214 | (1ULL << INT_IDN_ACCESS) | \ |
213 | INT_MASK(INT_UDN_ACCESS) | \ | 215 | (1ULL << INT_UDN_ACCESS) | \ |
214 | INT_MASK(INT_SWINT_3) | \ | 216 | (1ULL << INT_SWINT_3) | \ |
215 | INT_MASK(INT_SWINT_2) | \ | 217 | (1ULL << INT_SWINT_2) | \ |
216 | INT_MASK(INT_SWINT_1) | \ | 218 | (1ULL << INT_SWINT_1) | \ |
217 | INT_MASK(INT_SWINT_0) | \ | 219 | (1ULL << INT_SWINT_0) | \ |
218 | INT_MASK(INT_ILL_TRANS) | \ | 220 | (1ULL << INT_ILL_TRANS) | \ |
219 | INT_MASK(INT_UNALIGN_DATA) | \ | 221 | (1ULL << INT_UNALIGN_DATA) | \ |
220 | INT_MASK(INT_DTLB_MISS) | \ | 222 | (1ULL << INT_DTLB_MISS) | \ |
221 | INT_MASK(INT_DTLB_ACCESS) | \ | 223 | (1ULL << INT_DTLB_ACCESS) | \ |
222 | INT_MASK(INT_BOOT_ACCESS) | \ | 224 | (1ULL << INT_BOOT_ACCESS) | \ |
223 | INT_MASK(INT_WORLD_ACCESS) | \ | 225 | (1ULL << INT_WORLD_ACCESS) | \ |
224 | INT_MASK(INT_I_ASID) | \ | 226 | (1ULL << INT_I_ASID) | \ |
225 | INT_MASK(INT_D_ASID) | \ | 227 | (1ULL << INT_D_ASID) | \ |
226 | INT_MASK(INT_DOUBLE_FAULT) | \ | 228 | (1ULL << INT_DOUBLE_FAULT) | \ |
227 | 0) | 229 | 0) |
228 | #define SYNC_INTERRUPTS ( \ | 230 | #define SYNC_INTERRUPTS ( \ |
229 | INT_MASK(INT_SINGLE_STEP_3) | \ | 231 | (1ULL << INT_SINGLE_STEP_3) | \ |
230 | INT_MASK(INT_SINGLE_STEP_2) | \ | 232 | (1ULL << INT_SINGLE_STEP_2) | \ |
231 | INT_MASK(INT_SINGLE_STEP_1) | \ | 233 | (1ULL << INT_SINGLE_STEP_1) | \ |
232 | INT_MASK(INT_SINGLE_STEP_0) | \ | 234 | (1ULL << INT_SINGLE_STEP_0) | \ |
233 | INT_MASK(INT_IDN_COMPLETE) | \ | 235 | (1ULL << INT_IDN_COMPLETE) | \ |
234 | INT_MASK(INT_UDN_COMPLETE) | \ | 236 | (1ULL << INT_UDN_COMPLETE) | \ |
235 | INT_MASK(INT_ITLB_MISS) | \ | 237 | (1ULL << INT_ITLB_MISS) | \ |
236 | INT_MASK(INT_ILL) | \ | 238 | (1ULL << INT_ILL) | \ |
237 | INT_MASK(INT_GPV) | \ | 239 | (1ULL << INT_GPV) | \ |
238 | INT_MASK(INT_IDN_ACCESS) | \ | 240 | (1ULL << INT_IDN_ACCESS) | \ |
239 | INT_MASK(INT_UDN_ACCESS) | \ | 241 | (1ULL << INT_UDN_ACCESS) | \ |
240 | INT_MASK(INT_SWINT_3) | \ | 242 | (1ULL << INT_SWINT_3) | \ |
241 | INT_MASK(INT_SWINT_2) | \ | 243 | (1ULL << INT_SWINT_2) | \ |
242 | INT_MASK(INT_SWINT_1) | \ | 244 | (1ULL << INT_SWINT_1) | \ |
243 | INT_MASK(INT_SWINT_0) | \ | 245 | (1ULL << INT_SWINT_0) | \ |
244 | INT_MASK(INT_ILL_TRANS) | \ | 246 | (1ULL << INT_ILL_TRANS) | \ |
245 | INT_MASK(INT_UNALIGN_DATA) | \ | 247 | (1ULL << INT_UNALIGN_DATA) | \ |
246 | INT_MASK(INT_DTLB_MISS) | \ | 248 | (1ULL << INT_DTLB_MISS) | \ |
247 | INT_MASK(INT_DTLB_ACCESS) | \ | 249 | (1ULL << INT_DTLB_ACCESS) | \ |
248 | 0) | 250 | 0) |
249 | #define NON_SYNC_INTERRUPTS ( \ | 251 | #define NON_SYNC_INTERRUPTS ( \ |
250 | INT_MASK(INT_MEM_ERROR) | \ | 252 | (1ULL << INT_MEM_ERROR) | \ |
251 | INT_MASK(INT_IDN_FIREWALL) | \ | 253 | (1ULL << INT_IDN_FIREWALL) | \ |
252 | INT_MASK(INT_UDN_FIREWALL) | \ | 254 | (1ULL << INT_UDN_FIREWALL) | \ |
253 | INT_MASK(INT_TILE_TIMER) | \ | 255 | (1ULL << INT_TILE_TIMER) | \ |
254 | INT_MASK(INT_AUX_TILE_TIMER) | \ | 256 | (1ULL << INT_AUX_TILE_TIMER) | \ |
255 | INT_MASK(INT_IDN_TIMER) | \ | 257 | (1ULL << INT_IDN_TIMER) | \ |
256 | INT_MASK(INT_UDN_TIMER) | \ | 258 | (1ULL << INT_UDN_TIMER) | \ |
257 | INT_MASK(INT_IDN_AVAIL) | \ | 259 | (1ULL << INT_IDN_AVAIL) | \ |
258 | INT_MASK(INT_UDN_AVAIL) | \ | 260 | (1ULL << INT_UDN_AVAIL) | \ |
259 | INT_MASK(INT_IPI_3) | \ | 261 | (1ULL << INT_IPI_3) | \ |
260 | INT_MASK(INT_IPI_2) | \ | 262 | (1ULL << INT_IPI_2) | \ |
261 | INT_MASK(INT_IPI_1) | \ | 263 | (1ULL << INT_IPI_1) | \ |
262 | INT_MASK(INT_IPI_0) | \ | 264 | (1ULL << INT_IPI_0) | \ |
263 | INT_MASK(INT_PERF_COUNT) | \ | 265 | (1ULL << INT_PERF_COUNT) | \ |
264 | INT_MASK(INT_AUX_PERF_COUNT) | \ | 266 | (1ULL << INT_AUX_PERF_COUNT) | \ |
265 | INT_MASK(INT_INTCTRL_3) | \ | 267 | (1ULL << INT_INTCTRL_3) | \ |
266 | INT_MASK(INT_INTCTRL_2) | \ | 268 | (1ULL << INT_INTCTRL_2) | \ |
267 | INT_MASK(INT_INTCTRL_1) | \ | 269 | (1ULL << INT_INTCTRL_1) | \ |
268 | INT_MASK(INT_INTCTRL_0) | \ | 270 | (1ULL << INT_INTCTRL_0) | \ |
269 | INT_MASK(INT_BOOT_ACCESS) | \ | 271 | (1ULL << INT_BOOT_ACCESS) | \ |
270 | INT_MASK(INT_WORLD_ACCESS) | \ | 272 | (1ULL << INT_WORLD_ACCESS) | \ |
271 | INT_MASK(INT_I_ASID) | \ | 273 | (1ULL << INT_I_ASID) | \ |
272 | INT_MASK(INT_D_ASID) | \ | 274 | (1ULL << INT_D_ASID) | \ |
273 | INT_MASK(INT_DOUBLE_FAULT) | \ | 275 | (1ULL << INT_DOUBLE_FAULT) | \ |
274 | 0) | 276 | 0) |
275 | #endif /* !__ASSEMBLER__ */ | 277 | #endif /* !__ASSEMBLER__ */ |
276 | #endif /* !__ARCH_INTERRUPTS_H__ */ | 278 | #endif /* !__ARCH_INTERRUPTS_H__ */ |
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 54bc9a6678e8..4ea080902654 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S | |||
@@ -1035,7 +1035,9 @@ handle_syscall: | |||
1035 | /* Ensure that the syscall number is within the legal range. */ | 1035 | /* Ensure that the syscall number is within the legal range. */ |
1036 | { | 1036 | { |
1037 | moveli r20, hw2(sys_call_table) | 1037 | moveli r20, hw2(sys_call_table) |
1038 | #ifdef CONFIG_COMPAT | ||
1038 | blbs r30, .Lcompat_syscall | 1039 | blbs r30, .Lcompat_syscall |
1040 | #endif | ||
1039 | } | 1041 | } |
1040 | { | 1042 | { |
1041 | cmpltu r21, TREG_SYSCALL_NR_NAME, r21 | 1043 | cmpltu r21, TREG_SYSCALL_NR_NAME, r21 |
@@ -1093,6 +1095,7 @@ handle_syscall: | |||
1093 | j .Lresume_userspace /* jump into middle of interrupt_return */ | 1095 | j .Lresume_userspace /* jump into middle of interrupt_return */ |
1094 | } | 1096 | } |
1095 | 1097 | ||
1098 | #ifdef CONFIG_COMPAT | ||
1096 | .Lcompat_syscall: | 1099 | .Lcompat_syscall: |
1097 | /* | 1100 | /* |
1098 | * Load the base of the compat syscall table in r20, and | 1101 | * Load the base of the compat syscall table in r20, and |
@@ -1117,6 +1120,7 @@ handle_syscall: | |||
1117 | { move r15, r4; addxi r4, r4, 0 } | 1120 | { move r15, r4; addxi r4, r4, 0 } |
1118 | { move r16, r5; addxi r5, r5, 0 } | 1121 | { move r16, r5; addxi r5, r5, 0 } |
1119 | j .Lload_syscall_pointer | 1122 | j .Lload_syscall_pointer |
1123 | #endif | ||
1120 | 1124 | ||
1121 | .Linvalid_syscall: | 1125 | .Linvalid_syscall: |
1122 | /* Report an invalid syscall back to the user program */ | 1126 | /* Report an invalid syscall back to the user program */ |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 0e5661e7d00d..caf93ae11793 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t); | |||
159 | int copy_thread(unsigned long clone_flags, unsigned long sp, | 159 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
160 | unsigned long arg, struct task_struct *p) | 160 | unsigned long arg, struct task_struct *p) |
161 | { | 161 | { |
162 | struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs(); | 162 | struct pt_regs *childregs = task_pt_regs(p); |
163 | unsigned long ksp; | 163 | unsigned long ksp; |
164 | unsigned long *callee_regs; | 164 | unsigned long *callee_regs; |
165 | 165 | ||
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c index baa3d905fee2..d1b5c913ae72 100644 --- a/arch/tile/kernel/reboot.c +++ b/arch/tile/kernel/reboot.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/reboot.h> | 16 | #include <linux/reboot.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/pm.h> | 18 | #include <linux/pm.h> |
19 | #include <linux/export.h> | ||
19 | #include <asm/page.h> | 20 | #include <asm/page.h> |
20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
21 | #include <hv/hypervisor.h> | 22 | #include <hv/hypervisor.h> |
@@ -49,3 +50,4 @@ void machine_restart(char *cmd) | |||
49 | 50 | ||
50 | /* No interesting distinction to be made here. */ | 51 | /* No interesting distinction to be made here. */ |
51 | void (*pm_power_off)(void) = NULL; | 52 | void (*pm_power_off)(void) = NULL; |
53 | EXPORT_SYMBOL(pm_power_off); | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6a649a4462d3..d1e15f7b59c6 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/timex.h> | 31 | #include <linux/timex.h> |
32 | #include <linux/hugetlb.h> | 32 | #include <linux/hugetlb.h> |
33 | #include <linux/start_kernel.h> | 33 | #include <linux/start_kernel.h> |
34 | #include <linux/screen_info.h> | ||
34 | #include <asm/setup.h> | 35 | #include <asm/setup.h> |
35 | #include <asm/sections.h> | 36 | #include <asm/sections.h> |
36 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; } | |||
49 | /* Chip information */ | 50 | /* Chip information */ |
50 | char chip_model[64] __write_once; | 51 | char chip_model[64] __write_once; |
51 | 52 | ||
53 | #ifdef CONFIG_VT | ||
54 | struct screen_info screen_info; | ||
55 | #endif | ||
56 | |||
52 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; | 57 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; |
53 | EXPORT_SYMBOL(node_data); | 58 | EXPORT_SYMBOL(node_data); |
54 | 59 | ||
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index b2f44c28dda6..ed258b8ae320 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | |||
112 | p->pc, p->sp, p->ex1); | 112 | p->pc, p->sp, p->ex1); |
113 | p = NULL; | 113 | p = NULL; |
114 | } | 114 | } |
115 | if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) | 115 | if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0) |
116 | return p; | 116 | return p; |
117 | return NULL; | 117 | return NULL; |
118 | } | 118 | } |
@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
484 | { | 484 | { |
485 | save_stack_trace_tsk(NULL, trace); | 485 | save_stack_trace_tsk(NULL, trace); |
486 | } | 486 | } |
487 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
487 | 488 | ||
488 | #endif | 489 | #endif |
489 | 490 | ||
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c index db4fb89e12d8..8f8ad814b139 100644 --- a/arch/tile/lib/cacheflush.c +++ b/arch/tile/lib/cacheflush.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * more details. | 12 | * more details. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/export.h> | ||
15 | #include <asm/page.h> | 16 | #include <asm/page.h> |
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
17 | #include <arch/icache.h> | 18 | #include <arch/icache.h> |
@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh) | |||
165 | __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); | 166 | __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); |
166 | #endif | 167 | #endif |
167 | } | 168 | } |
169 | EXPORT_SYMBOL_GPL(finv_buffer_remote); | ||
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c index fdc403614d12..75947edccb26 100644 --- a/arch/tile/lib/cpumask.c +++ b/arch/tile/lib/cpumask.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/export.h> | ||
19 | 20 | ||
20 | /* | 21 | /* |
21 | * Allow cropping out bits beyond the end of the array. | 22 | * Allow cropping out bits beyond the end of the array. |
@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) | |||
50 | } while (*bp != '\0' && *bp != '\n'); | 51 | } while (*bp != '\0' && *bp != '\n'); |
51 | return 0; | 52 | return 0; |
52 | } | 53 | } |
54 | EXPORT_SYMBOL(bitmap_parselist_crop); | ||
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index dd5f0a33fdaf..4385cb6fa00a 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c | |||
@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel); | |||
55 | EXPORT_SYMBOL(hv_dev_close); | 55 | EXPORT_SYMBOL(hv_dev_close); |
56 | EXPORT_SYMBOL(hv_sysconf); | 56 | EXPORT_SYMBOL(hv_sysconf); |
57 | EXPORT_SYMBOL(hv_confstr); | 57 | EXPORT_SYMBOL(hv_confstr); |
58 | EXPORT_SYMBOL(hv_get_rtc); | ||
59 | EXPORT_SYMBOL(hv_set_rtc); | ||
58 | 60 | ||
59 | /* libgcc.a */ | 61 | /* libgcc.a */ |
60 | uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); | 62 | uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); |
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 5f7868dcd6d4..1ae911939a18 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home) | |||
408 | __set_pte(ptep, pte_set_home(pteval, home)); | 408 | __set_pte(ptep, pte_set_home(pteval, home)); |
409 | } | 409 | } |
410 | } | 410 | } |
411 | EXPORT_SYMBOL(homecache_change_page_home); | ||
411 | 412 | ||
412 | struct page *homecache_alloc_pages(gfp_t gfp_mask, | 413 | struct page *homecache_alloc_pages(gfp_t gfp_mask, |
413 | unsigned int order, int home) | 414 | unsigned int order, int home) |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index ecdfee60ee4a..f4076af1f4ed 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -3,6 +3,90 @@ | |||
3 | 3 | ||
4 | #include <uapi/asm/mce.h> | 4 | #include <uapi/asm/mce.h> |
5 | 5 | ||
6 | /* | ||
7 | * Machine Check support for x86 | ||
8 | */ | ||
9 | |||
10 | /* MCG_CAP register defines */ | ||
11 | #define MCG_BANKCNT_MASK 0xff /* Number of Banks */ | ||
12 | #define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ | ||
13 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ | ||
14 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | ||
15 | #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ | ||
16 | #define MCG_EXT_CNT_SHIFT 16 | ||
17 | #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) | ||
18 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ | ||
19 | |||
20 | /* MCG_STATUS register defines */ | ||
21 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ | ||
22 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ | ||
23 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ | ||
24 | |||
25 | /* MCi_STATUS register defines */ | ||
26 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ | ||
27 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ | ||
28 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ | ||
29 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ | ||
30 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ | ||
31 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ | ||
32 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | ||
33 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | ||
34 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | ||
35 | #define MCACOD 0xffff /* MCA Error Code */ | ||
36 | |||
37 | /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ | ||
38 | #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ | ||
39 | #define MCACOD_SCRUBMSK 0xfff0 | ||
40 | #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ | ||
41 | #define MCACOD_DATA 0x0134 /* Data Load */ | ||
42 | #define MCACOD_INSTR 0x0150 /* Instruction Fetch */ | ||
43 | |||
44 | /* MCi_MISC register defines */ | ||
45 | #define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) | ||
46 | #define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7) | ||
47 | #define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */ | ||
48 | #define MCI_MISC_ADDR_LINEAR 1 /* linear address */ | ||
49 | #define MCI_MISC_ADDR_PHYS 2 /* physical address */ | ||
50 | #define MCI_MISC_ADDR_MEM 3 /* memory address */ | ||
51 | #define MCI_MISC_ADDR_GENERIC 7 /* generic */ | ||
52 | |||
53 | /* CTL2 register defines */ | ||
54 | #define MCI_CTL2_CMCI_EN (1ULL << 30) | ||
55 | #define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL | ||
56 | |||
57 | #define MCJ_CTX_MASK 3 | ||
58 | #define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK) | ||
59 | #define MCJ_CTX_RANDOM 0 /* inject context: random */ | ||
60 | #define MCJ_CTX_PROCESS 0x1 /* inject context: process */ | ||
61 | #define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */ | ||
62 | #define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */ | ||
63 | #define MCJ_EXCEPTION 0x8 /* raise as exception */ | ||
64 | #define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */ | ||
65 | |||
66 | #define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ | ||
67 | |||
68 | /* Software defined banks */ | ||
69 | #define MCE_EXTENDED_BANK 128 | ||
70 | #define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0) | ||
71 | #define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) | ||
72 | |||
73 | #define MCE_LOG_LEN 32 | ||
74 | #define MCE_LOG_SIGNATURE "MACHINECHECK" | ||
75 | |||
76 | /* | ||
77 | * This structure contains all data related to the MCE log. Also | ||
78 | * carries a signature to make it easier to find from external | ||
79 | * debugging tools. Each entry is only valid when its finished flag | ||
80 | * is set. | ||
81 | */ | ||
82 | struct mce_log { | ||
83 | char signature[12]; /* "MACHINECHECK" */ | ||
84 | unsigned len; /* = MCE_LOG_LEN */ | ||
85 | unsigned next; | ||
86 | unsigned flags; | ||
87 | unsigned recordlen; /* length of struct mce */ | ||
88 | struct mce entry[MCE_LOG_LEN]; | ||
89 | }; | ||
6 | 90 | ||
7 | struct mca_config { | 91 | struct mca_config { |
8 | bool dont_log_ce; | 92 | bool dont_log_ce; |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5199db2923d3..1c1a955e67c0 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd) | |||
142 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; | 142 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline unsigned long pud_pfn(pud_t pud) | ||
146 | { | ||
147 | return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
148 | } | ||
149 | |||
145 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | 150 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
146 | 151 | ||
147 | static inline int pmd_large(pmd_t pte) | 152 | static inline int pmd_large(pmd_t pte) |
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 58c829871c31..a0eab85ce7b8 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h | |||
@@ -4,66 +4,6 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/ioctls.h> | 5 | #include <asm/ioctls.h> |
6 | 6 | ||
7 | /* | ||
8 | * Machine Check support for x86 | ||
9 | */ | ||
10 | |||
11 | /* MCG_CAP register defines */ | ||
12 | #define MCG_BANKCNT_MASK 0xff /* Number of Banks */ | ||
13 | #define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */ | ||
14 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ | ||
15 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | ||
16 | #define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ | ||
17 | #define MCG_EXT_CNT_SHIFT 16 | ||
18 | #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) | ||
19 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ | ||
20 | |||
21 | /* MCG_STATUS register defines */ | ||
22 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ | ||
23 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ | ||
24 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ | ||
25 | |||
26 | /* MCi_STATUS register defines */ | ||
27 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ | ||
28 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ | ||
29 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ | ||
30 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ | ||
31 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ | ||
32 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ | ||
33 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ | ||
34 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ | ||
35 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ | ||
36 | #define MCACOD 0xffff /* MCA Error Code */ | ||
37 | |||
38 | /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ | ||
39 | #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ | ||
40 | #define MCACOD_SCRUBMSK 0xfff0 | ||
41 | #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ | ||
42 | #define MCACOD_DATA 0x0134 /* Data Load */ | ||
43 | #define MCACOD_INSTR 0x0150 /* Instruction Fetch */ | ||
44 | |||
45 | /* MCi_MISC register defines */ | ||
46 | #define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) | ||
47 | #define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7) | ||
48 | #define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */ | ||
49 | #define MCI_MISC_ADDR_LINEAR 1 /* linear address */ | ||
50 | #define MCI_MISC_ADDR_PHYS 2 /* physical address */ | ||
51 | #define MCI_MISC_ADDR_MEM 3 /* memory address */ | ||
52 | #define MCI_MISC_ADDR_GENERIC 7 /* generic */ | ||
53 | |||
54 | /* CTL2 register defines */ | ||
55 | #define MCI_CTL2_CMCI_EN (1ULL << 30) | ||
56 | #define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL | ||
57 | |||
58 | #define MCJ_CTX_MASK 3 | ||
59 | #define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK) | ||
60 | #define MCJ_CTX_RANDOM 0 /* inject context: random */ | ||
61 | #define MCJ_CTX_PROCESS 0x1 /* inject context: process */ | ||
62 | #define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */ | ||
63 | #define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */ | ||
64 | #define MCJ_EXCEPTION 0x8 /* raise as exception */ | ||
65 | #define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */ | ||
66 | |||
67 | /* Fields are zero when not available */ | 7 | /* Fields are zero when not available */ |
68 | struct mce { | 8 | struct mce { |
69 | __u64 status; | 9 | __u64 status; |
@@ -87,35 +27,8 @@ struct mce { | |||
87 | __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ | 27 | __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ |
88 | }; | 28 | }; |
89 | 29 | ||
90 | /* | ||
91 | * This structure contains all data related to the MCE log. Also | ||
92 | * carries a signature to make it easier to find from external | ||
93 | * debugging tools. Each entry is only valid when its finished flag | ||
94 | * is set. | ||
95 | */ | ||
96 | |||
97 | #define MCE_LOG_LEN 32 | ||
98 | |||
99 | struct mce_log { | ||
100 | char signature[12]; /* "MACHINECHECK" */ | ||
101 | unsigned len; /* = MCE_LOG_LEN */ | ||
102 | unsigned next; | ||
103 | unsigned flags; | ||
104 | unsigned recordlen; /* length of struct mce */ | ||
105 | struct mce entry[MCE_LOG_LEN]; | ||
106 | }; | ||
107 | |||
108 | #define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */ | ||
109 | |||
110 | #define MCE_LOG_SIGNATURE "MACHINECHECK" | ||
111 | |||
112 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) | 30 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) |
113 | #define MCE_GET_LOG_LEN _IOR('M', 2, int) | 31 | #define MCE_GET_LOG_LEN _IOR('M', 2, int) |
114 | #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) | 32 | #define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) |
115 | 33 | ||
116 | /* Software defined banks */ | ||
117 | #define MCE_EXTENDED_BANK 128 | ||
118 | #define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 | ||
119 | #define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) | ||
120 | |||
121 | #endif /* _UAPI_ASM_X86_MCE_H */ | 34 | #endif /* _UAPI_ASM_X86_MCE_H */ |
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index e03a1e180e81..562a76d433c8 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c | |||
@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg) | |||
20 | } | 20 | } |
21 | early_param("x2apic_phys", set_x2apic_phys_mode); | 21 | early_param("x2apic_phys", set_x2apic_phys_mode); |
22 | 22 | ||
23 | static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 23 | static bool x2apic_fadt_phys(void) |
24 | { | 24 | { |
25 | if (x2apic_phys) | 25 | if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && |
26 | return x2apic_enabled(); | 26 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { |
27 | else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && | ||
28 | (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && | ||
29 | x2apic_enabled()) { | ||
30 | printk(KERN_DEBUG "System requires x2apic physical mode\n"); | 27 | printk(KERN_DEBUG "System requires x2apic physical mode\n"); |
31 | return 1; | 28 | return true; |
32 | } | 29 | } |
33 | else | 30 | return false; |
34 | return 0; | 31 | } |
32 | |||
33 | static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
34 | { | ||
35 | return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys()); | ||
35 | } | 36 | } |
36 | 37 | ||
37 | static void | 38 | static void |
@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void) | |||
82 | 83 | ||
83 | static int x2apic_phys_probe(void) | 84 | static int x2apic_phys_probe(void) |
84 | { | 85 | { |
85 | if (x2apic_mode && x2apic_phys) | 86 | if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys())) |
86 | return 1; | 87 | return 1; |
87 | 88 | ||
88 | return apic == &apic_x2apic_phys; | 89 | return apic == &apic_x2apic_phys; |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 027088f2f7dd..fb674fd3fc22 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | |||
748 | return; | 748 | return; |
749 | } | 749 | } |
750 | #endif | 750 | #endif |
751 | /* Kernel addresses are always protection faults: */ | ||
752 | if (address >= TASK_SIZE) | ||
753 | error_code |= PF_PROT; | ||
751 | 754 | ||
752 | if (unlikely(show_unhandled_signals)) | 755 | if (likely(show_unhandled_signals)) |
753 | show_signal_msg(regs, error_code, address, tsk); | 756 | show_signal_msg(regs, error_code, address, tsk); |
754 | 757 | ||
755 | /* Kernel addresses are always protection faults: */ | ||
756 | tsk->thread.cr2 = address; | 758 | tsk->thread.cr2 = address; |
757 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | 759 | tsk->thread.error_code = error_code; |
758 | tsk->thread.trap_nr = X86_TRAP_PF; | 760 | tsk->thread.trap_nr = X86_TRAP_PF; |
759 | 761 | ||
760 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); | 762 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 2ead3c8a4c84..75c9a6a59697 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr) | |||
831 | if (pud_none(*pud)) | 831 | if (pud_none(*pud)) |
832 | return 0; | 832 | return 0; |
833 | 833 | ||
834 | if (pud_large(*pud)) | ||
835 | return pfn_valid(pud_pfn(*pud)); | ||
836 | |||
834 | pmd = pmd_offset(pud, addr); | 837 | pmd = pmd_offset(pud, addr); |
835 | if (pmd_none(*pmd)) | 838 | if (pmd_none(*pmd)) |
836 | return 0; | 839 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c index 6b0843c33877..e05c15777588 100644 --- a/drivers/gpu/drm/nouveau/core/core/falcon.c +++ b/drivers/gpu/drm/nouveau/core/core/falcon.c | |||
@@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object) | |||
73 | nv_debug(falcon, "data limit: %d\n", falcon->data.limit); | 73 | nv_debug(falcon, "data limit: %d\n", falcon->data.limit); |
74 | 74 | ||
75 | /* wait for 'uc halted' to be signalled before continuing */ | 75 | /* wait for 'uc halted' to be signalled before continuing */ |
76 | if (falcon->secret) { | 76 | if (falcon->secret && falcon->version < 4) { |
77 | nv_wait(falcon, 0x008, 0x00000010, 0x00000010); | 77 | if (!falcon->version) |
78 | nv_wait(falcon, 0x008, 0x00000010, 0x00000010); | ||
79 | else | ||
80 | nv_wait(falcon, 0x180, 0x80000000, 0); | ||
78 | nv_wo32(falcon, 0x004, 0x00000010); | 81 | nv_wo32(falcon, 0x004, 0x00000010); |
79 | } | 82 | } |
80 | 83 | ||
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c index f74c30aa33a0..48f06378d3f9 100644 --- a/drivers/gpu/drm/nouveau/core/core/subdev.c +++ b/drivers/gpu/drm/nouveau/core/core/subdev.c | |||
@@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent, | |||
99 | if (ret) | 99 | if (ret) |
100 | return ret; | 100 | return ret; |
101 | 101 | ||
102 | mutex_init(&subdev->mutex); | 102 | __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key); |
103 | subdev->name = subname; | 103 | subdev->name = subname; |
104 | 104 | ||
105 | if (parent) { | 105 | if (parent) { |
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h index 5982935ee23a..106bb19fdd9a 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h | |||
@@ -50,10 +50,13 @@ int nouveau_object_fini(struct nouveau_object *, bool suspend); | |||
50 | 50 | ||
51 | extern struct nouveau_ofuncs nouveau_object_ofuncs; | 51 | extern struct nouveau_ofuncs nouveau_object_ofuncs; |
52 | 52 | ||
53 | /* Don't allocate dynamically, because lockdep needs lock_class_keys to be in | ||
54 | * ".data". */ | ||
53 | struct nouveau_oclass { | 55 | struct nouveau_oclass { |
54 | u32 handle; | 56 | u32 handle; |
55 | struct nouveau_ofuncs *ofuncs; | 57 | struct nouveau_ofuncs * const ofuncs; |
56 | struct nouveau_omthds *omthds; | 58 | struct nouveau_omthds * const omthds; |
59 | struct lock_class_key lock_class_key; | ||
57 | }; | 60 | }; |
58 | 61 | ||
59 | #define nv_oclass(o) nv_object(o)->oclass | 62 | #define nv_oclass(o) nv_object(o)->oclass |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c index d6d16007ec1a..d62045f454b2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c | |||
@@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb) | |||
86 | return ret; | 86 | return ret; |
87 | } | 87 | } |
88 | 88 | ||
89 | if (!nouveau_mm_initialised(&pfb->tags) && tags) { | 89 | if (!nouveau_mm_initialised(&pfb->tags)) { |
90 | ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1); | 90 | ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1); |
91 | if (ret) | 91 | if (ret) |
92 | return ret; | 92 | return ret; |
93 | } | 93 | } |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 487cb8c6c204..eac236ed19b2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c | |||
@@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb) | |||
99 | struct nouveau_bios *bios = nouveau_bios(device); | 99 | struct nouveau_bios *bios = nouveau_bios(device); |
100 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ | 100 | const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ |
101 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ | 101 | const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ |
102 | u32 size; | 102 | u32 size, tags = 0; |
103 | int ret; | 103 | int ret; |
104 | 104 | ||
105 | pfb->ram.size = nv_rd32(pfb, 0x10020c); | 105 | pfb->ram.size = nv_rd32(pfb, 0x10020c); |
@@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb) | |||
140 | return ret; | 140 | return ret; |
141 | 141 | ||
142 | pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; | 142 | pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1; |
143 | tags = nv_rd32(pfb, 0x100320); | ||
143 | break; | 144 | break; |
144 | } | 145 | } |
145 | 146 | ||
146 | return nv_rd32(pfb, 0x100320); | 147 | return tags; |
147 | } | 148 | } |
148 | 149 | ||
149 | static int | 150 | static int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 69d7b1d0b9d6..1699a9083a2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -28,6 +28,7 @@ | |||
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <core/engine.h> | 30 | #include <core/engine.h> |
31 | #include <linux/swiotlb.h> | ||
31 | 32 | ||
32 | #include <subdev/fb.h> | 33 | #include <subdev/fb.h> |
33 | #include <subdev/vm.h> | 34 | #include <subdev/vm.h> |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 8b090f1eb51d..5e7aef23825a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -245,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev, | |||
245 | return 0; | 245 | return 0; |
246 | } | 246 | } |
247 | 247 | ||
248 | static struct lock_class_key drm_client_lock_class_key; | ||
249 | |||
248 | static int | 250 | static int |
249 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) | 251 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) |
250 | { | 252 | { |
@@ -256,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
256 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); | 258 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); |
257 | if (ret) | 259 | if (ret) |
258 | return ret; | 260 | return ret; |
261 | lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key); | ||
259 | 262 | ||
260 | dev->dev_private = drm; | 263 | dev->dev_private = drm; |
261 | drm->dev = dev; | 264 | drm->dev = dev; |
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 7a445666e71f..ee4cff534f10 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2909 | return -EINVAL; | 2909 | return -EINVAL; |
2910 | } | 2910 | } |
2911 | if (tiled) { | 2911 | if (tiled) { |
2912 | dst_offset = ib[idx+1]; | 2912 | dst_offset = radeon_get_ib_value(p, idx+1); |
2913 | dst_offset <<= 8; | 2913 | dst_offset <<= 8; |
2914 | 2914 | ||
2915 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2915 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
2916 | p->idx += count + 7; | 2916 | p->idx += count + 7; |
2917 | } else { | 2917 | } else { |
2918 | dst_offset = ib[idx+1]; | 2918 | dst_offset = radeon_get_ib_value(p, idx+1); |
2919 | dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; | 2919 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; |
2920 | 2920 | ||
2921 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2921 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
2922 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2922 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
@@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
2954 | DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); | 2954 | DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); |
2955 | return -EINVAL; | 2955 | return -EINVAL; |
2956 | } | 2956 | } |
2957 | dst_offset = ib[idx+1]; | 2957 | dst_offset = radeon_get_ib_value(p, idx+1); |
2958 | dst_offset <<= 8; | 2958 | dst_offset <<= 8; |
2959 | dst2_offset = ib[idx+2]; | 2959 | dst2_offset = radeon_get_ib_value(p, idx+2); |
2960 | dst2_offset <<= 8; | 2960 | dst2_offset <<= 8; |
2961 | src_offset = ib[idx+8]; | 2961 | src_offset = radeon_get_ib_value(p, idx+8); |
2962 | src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; | 2962 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; |
2963 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 2963 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
2964 | dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", | 2964 | dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", |
2965 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 2965 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
@@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3014 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); | 3014 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); |
3015 | return -EINVAL; | 3015 | return -EINVAL; |
3016 | } | 3016 | } |
3017 | dst_offset = ib[idx+1]; | 3017 | dst_offset = radeon_get_ib_value(p, idx+1); |
3018 | dst_offset <<= 8; | 3018 | dst_offset <<= 8; |
3019 | dst2_offset = ib[idx+2]; | 3019 | dst2_offset = radeon_get_ib_value(p, idx+2); |
3020 | dst2_offset <<= 8; | 3020 | dst2_offset <<= 8; |
3021 | src_offset = ib[idx+8]; | 3021 | src_offset = radeon_get_ib_value(p, idx+8); |
3022 | src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; | 3022 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; |
3023 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3023 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
3024 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", | 3024 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", |
3025 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3025 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
@@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3046 | /* detile bit */ | 3046 | /* detile bit */ |
3047 | if (idx_value & (1 << 31)) { | 3047 | if (idx_value & (1 << 31)) { |
3048 | /* tiled src, linear dst */ | 3048 | /* tiled src, linear dst */ |
3049 | src_offset = ib[idx+1]; | 3049 | src_offset = radeon_get_ib_value(p, idx+1); |
3050 | src_offset <<= 8; | 3050 | src_offset <<= 8; |
3051 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 3051 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); |
3052 | 3052 | ||
3053 | dst_offset = ib[idx+7]; | 3053 | dst_offset = radeon_get_ib_value(p, idx+7); |
3054 | dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3054 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
3055 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 3055 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
3056 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 3056 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
3057 | } else { | 3057 | } else { |
3058 | /* linear src, tiled dst */ | 3058 | /* linear src, tiled dst */ |
3059 | src_offset = ib[idx+7]; | 3059 | src_offset = radeon_get_ib_value(p, idx+7); |
3060 | src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3060 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
3061 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 3061 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
3062 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 3062 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
3063 | 3063 | ||
3064 | dst_offset = ib[idx+1]; | 3064 | dst_offset = radeon_get_ib_value(p, idx+1); |
3065 | dst_offset <<= 8; | 3065 | dst_offset <<= 8; |
3066 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 3066 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
3067 | } | 3067 | } |
@@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3098 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); | 3098 | DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); |
3099 | return -EINVAL; | 3099 | return -EINVAL; |
3100 | } | 3100 | } |
3101 | dst_offset = ib[idx+1]; | 3101 | dst_offset = radeon_get_ib_value(p, idx+1); |
3102 | dst_offset <<= 8; | 3102 | dst_offset <<= 8; |
3103 | dst2_offset = ib[idx+2]; | 3103 | dst2_offset = radeon_get_ib_value(p, idx+2); |
3104 | dst2_offset <<= 8; | 3104 | dst2_offset <<= 8; |
3105 | src_offset = ib[idx+8]; | 3105 | src_offset = radeon_get_ib_value(p, idx+8); |
3106 | src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; | 3106 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; |
3107 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3107 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
3108 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", | 3108 | dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", |
3109 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3109 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
@@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3135 | /* detile bit */ | 3135 | /* detile bit */ |
3136 | if (idx_value & (1 << 31)) { | 3136 | if (idx_value & (1 << 31)) { |
3137 | /* tiled src, linear dst */ | 3137 | /* tiled src, linear dst */ |
3138 | src_offset = ib[idx+1]; | 3138 | src_offset = radeon_get_ib_value(p, idx+1); |
3139 | src_offset <<= 8; | 3139 | src_offset <<= 8; |
3140 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 3140 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); |
3141 | 3141 | ||
3142 | dst_offset = ib[idx+7]; | 3142 | dst_offset = radeon_get_ib_value(p, idx+7); |
3143 | dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3143 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
3144 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 3144 | ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
3145 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 3145 | ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
3146 | } else { | 3146 | } else { |
3147 | /* linear src, tiled dst */ | 3147 | /* linear src, tiled dst */ |
3148 | src_offset = ib[idx+7]; | 3148 | src_offset = radeon_get_ib_value(p, idx+7); |
3149 | src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; | 3149 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; |
3150 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 3150 | ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
3151 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 3151 | ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
3152 | 3152 | ||
3153 | dst_offset = ib[idx+1]; | 3153 | dst_offset = radeon_get_ib_value(p, idx+1); |
3154 | dst_offset <<= 8; | 3154 | dst_offset <<= 8; |
3155 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 3155 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
3156 | } | 3156 | } |
@@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3176 | switch (misc) { | 3176 | switch (misc) { |
3177 | case 0: | 3177 | case 0: |
3178 | /* L2L, byte */ | 3178 | /* L2L, byte */ |
3179 | src_offset = ib[idx+2]; | 3179 | src_offset = radeon_get_ib_value(p, idx+2); |
3180 | src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 3180 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
3181 | dst_offset = ib[idx+1]; | 3181 | dst_offset = radeon_get_ib_value(p, idx+1); |
3182 | dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 3182 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
3183 | if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { | 3183 | if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { |
3184 | dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", | 3184 | dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", |
3185 | src_offset + count, radeon_bo_size(src_reloc->robj)); | 3185 | src_offset + count, radeon_bo_size(src_reloc->robj)); |
@@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3216 | DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); | 3216 | DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); |
3217 | return -EINVAL; | 3217 | return -EINVAL; |
3218 | } | 3218 | } |
3219 | dst_offset = ib[idx+1]; | 3219 | dst_offset = radeon_get_ib_value(p, idx+1); |
3220 | dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 3220 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
3221 | dst2_offset = ib[idx+2]; | 3221 | dst2_offset = radeon_get_ib_value(p, idx+2); |
3222 | dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32; | 3222 | dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32; |
3223 | src_offset = ib[idx+3]; | 3223 | src_offset = radeon_get_ib_value(p, idx+3); |
3224 | src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; | 3224 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
3225 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3225 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
3226 | dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", | 3226 | dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", |
3227 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3227 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
@@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3251 | } | 3251 | } |
3252 | } else { | 3252 | } else { |
3253 | /* L2L, dw */ | 3253 | /* L2L, dw */ |
3254 | src_offset = ib[idx+2]; | 3254 | src_offset = radeon_get_ib_value(p, idx+2); |
3255 | src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 3255 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
3256 | dst_offset = ib[idx+1]; | 3256 | dst_offset = radeon_get_ib_value(p, idx+1); |
3257 | dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 3257 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
3258 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { | 3258 | if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { |
3259 | dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", | 3259 | dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", |
3260 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); | 3260 | src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); |
@@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) | |||
3279 | DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); | 3279 | DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); |
3280 | return -EINVAL; | 3280 | return -EINVAL; |
3281 | } | 3281 | } |
3282 | dst_offset = ib[idx+1]; | 3282 | dst_offset = radeon_get_ib_value(p, idx+1); |
3283 | dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; | 3283 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; |
3284 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { | 3284 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { |
3285 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", | 3285 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", |
3286 | dst_offset, radeon_bo_size(dst_reloc->robj)); | 3286 | dst_offset, radeon_bo_size(dst_reloc->robj)); |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 69ec24ab8d63..9b2512bf1a46 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2623 | return -EINVAL; | 2623 | return -EINVAL; |
2624 | } | 2624 | } |
2625 | if (tiled) { | 2625 | if (tiled) { |
2626 | dst_offset = ib[idx+1]; | 2626 | dst_offset = radeon_get_ib_value(p, idx+1); |
2627 | dst_offset <<= 8; | 2627 | dst_offset <<= 8; |
2628 | 2628 | ||
2629 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2629 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
2630 | p->idx += count + 5; | 2630 | p->idx += count + 5; |
2631 | } else { | 2631 | } else { |
2632 | dst_offset = ib[idx+1]; | 2632 | dst_offset = radeon_get_ib_value(p, idx+1); |
2633 | dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; | 2633 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; |
2634 | 2634 | ||
2635 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2635 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
2636 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2636 | ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
@@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2658 | /* detile bit */ | 2658 | /* detile bit */ |
2659 | if (idx_value & (1 << 31)) { | 2659 | if (idx_value & (1 << 31)) { |
2660 | /* tiled src, linear dst */ | 2660 | /* tiled src, linear dst */ |
2661 | src_offset = ib[idx+1]; | 2661 | src_offset = radeon_get_ib_value(p, idx+1); |
2662 | src_offset <<= 8; | 2662 | src_offset <<= 8; |
2663 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); | 2663 | ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); |
2664 | 2664 | ||
2665 | dst_offset = ib[idx+5]; | 2665 | dst_offset = radeon_get_ib_value(p, idx+5); |
2666 | dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; | 2666 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
2667 | ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2667 | ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
2668 | ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; | 2668 | ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; |
2669 | } else { | 2669 | } else { |
2670 | /* linear src, tiled dst */ | 2670 | /* linear src, tiled dst */ |
2671 | src_offset = ib[idx+5]; | 2671 | src_offset = radeon_get_ib_value(p, idx+5); |
2672 | src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; | 2672 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; |
2673 | ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2673 | ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
2674 | ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2674 | ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
2675 | 2675 | ||
2676 | dst_offset = ib[idx+1]; | 2676 | dst_offset = radeon_get_ib_value(p, idx+1); |
2677 | dst_offset <<= 8; | 2677 | dst_offset <<= 8; |
2678 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); | 2678 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); |
2679 | } | 2679 | } |
2680 | p->idx += 7; | 2680 | p->idx += 7; |
2681 | } else { | 2681 | } else { |
2682 | if (p->family >= CHIP_RV770) { | 2682 | if (p->family >= CHIP_RV770) { |
2683 | src_offset = ib[idx+2]; | 2683 | src_offset = radeon_get_ib_value(p, idx+2); |
2684 | src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; | 2684 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; |
2685 | dst_offset = ib[idx+1]; | 2685 | dst_offset = radeon_get_ib_value(p, idx+1); |
2686 | dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 2686 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
2687 | 2687 | ||
2688 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2688 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
2689 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2689 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
@@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2691 | ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; | 2691 | ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; |
2692 | p->idx += 5; | 2692 | p->idx += 5; |
2693 | } else { | 2693 | } else { |
2694 | src_offset = ib[idx+2]; | 2694 | src_offset = radeon_get_ib_value(p, idx+2); |
2695 | src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; | 2695 | src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; |
2696 | dst_offset = ib[idx+1]; | 2696 | dst_offset = radeon_get_ib_value(p, idx+1); |
2697 | dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16; | 2697 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; |
2698 | 2698 | ||
2699 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); | 2699 | ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); |
2700 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); | 2700 | ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); |
@@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) | |||
2724 | DRM_ERROR("bad DMA_PACKET_WRITE\n"); | 2724 | DRM_ERROR("bad DMA_PACKET_WRITE\n"); |
2725 | return -EINVAL; | 2725 | return -EINVAL; |
2726 | } | 2726 | } |
2727 | dst_offset = ib[idx+1]; | 2727 | dst_offset = radeon_get_ib_value(p, idx+1); |
2728 | dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; | 2728 | dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; |
2729 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { | 2729 | if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { |
2730 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", | 2730 | dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", |
2731 | dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); | 2731 | dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1d8ff2f850ba..93f760e27a92 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <drm/radeon_drm.h> | 38 | #include <drm/radeon_drm.h> |
39 | #include <linux/seq_file.h> | 39 | #include <linux/seq_file.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/swiotlb.h> | ||
41 | #include "radeon_reg.h" | 42 | #include "radeon_reg.h" |
42 | #include "radeon.h" | 43 | #include "radeon.h" |
43 | 44 | ||
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 358cd7ee905b..7cd74e29cbc8 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c | |||
@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void) | |||
162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | 162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) |
163 | #define DELTA(x,y) ((y)-(x)) | 163 | #define DELTA(x,y) ((y)-(x)) |
164 | #define TIME_NAME "PCC" | 164 | #define TIME_NAME "PCC" |
165 | #elif defined(CONFIG_MN10300) | 165 | #elif defined(CONFIG_MN10300) || defined(CONFIG_TILE) |
166 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | 166 | #define GET_TIME(x) do { x = get_cycles(); } while (0) |
167 | #define DELTA(x, y) ((x) - (y)) | 167 | #define DELTA(x, y) ((x) - (y)) |
168 | #define TIME_NAME "TSC" | 168 | #define TIME_NAME "TSC" |
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 56d3f697e0c7..0035c01660b6 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | #include "atl1c.h" | 22 | #include "atl1c.h" |
23 | 23 | ||
24 | #define ATL1C_DRV_VERSION "1.0.1.0-NAPI" | 24 | #define ATL1C_DRV_VERSION "1.0.1.1-NAPI" |
25 | char atl1c_driver_name[] = "atl1c"; | 25 | char atl1c_driver_name[] = "atl1c"; |
26 | char atl1c_driver_version[] = ATL1C_DRV_VERSION; | 26 | char atl1c_driver_version[] = ATL1C_DRV_VERSION; |
27 | 27 | ||
@@ -1652,6 +1652,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) | |||
1652 | u16 num_alloc = 0; | 1652 | u16 num_alloc = 0; |
1653 | u16 rfd_next_to_use, next_next; | 1653 | u16 rfd_next_to_use, next_next; |
1654 | struct atl1c_rx_free_desc *rfd_desc; | 1654 | struct atl1c_rx_free_desc *rfd_desc; |
1655 | dma_addr_t mapping; | ||
1655 | 1656 | ||
1656 | next_next = rfd_next_to_use = rfd_ring->next_to_use; | 1657 | next_next = rfd_next_to_use = rfd_ring->next_to_use; |
1657 | if (++next_next == rfd_ring->count) | 1658 | if (++next_next == rfd_ring->count) |
@@ -1678,9 +1679,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) | |||
1678 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); | 1679 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); |
1679 | buffer_info->skb = skb; | 1680 | buffer_info->skb = skb; |
1680 | buffer_info->length = adapter->rx_buffer_len; | 1681 | buffer_info->length = adapter->rx_buffer_len; |
1681 | buffer_info->dma = pci_map_single(pdev, vir_addr, | 1682 | mapping = pci_map_single(pdev, vir_addr, |
1682 | buffer_info->length, | 1683 | buffer_info->length, |
1683 | PCI_DMA_FROMDEVICE); | 1684 | PCI_DMA_FROMDEVICE); |
1685 | if (unlikely(pci_dma_mapping_error(pdev, mapping))) { | ||
1686 | dev_kfree_skb(skb); | ||
1687 | buffer_info->skb = NULL; | ||
1688 | buffer_info->length = 0; | ||
1689 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); | ||
1690 | netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed"); | ||
1691 | break; | ||
1692 | } | ||
1693 | buffer_info->dma = mapping; | ||
1684 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, | 1694 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, |
1685 | ATL1C_PCIMAP_FROMDEVICE); | 1695 | ATL1C_PCIMAP_FROMDEVICE); |
1686 | rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 1696 | rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
@@ -2015,7 +2025,29 @@ check_sum: | |||
2015 | return 0; | 2025 | return 0; |
2016 | } | 2026 | } |
2017 | 2027 | ||
2018 | static void atl1c_tx_map(struct atl1c_adapter *adapter, | 2028 | static void atl1c_tx_rollback(struct atl1c_adapter *adpt, |
2029 | struct atl1c_tpd_desc *first_tpd, | ||
2030 | enum atl1c_trans_queue type) | ||
2031 | { | ||
2032 | struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type]; | ||
2033 | struct atl1c_buffer *buffer_info; | ||
2034 | struct atl1c_tpd_desc *tpd; | ||
2035 | u16 first_index, index; | ||
2036 | |||
2037 | first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc; | ||
2038 | index = first_index; | ||
2039 | while (index != tpd_ring->next_to_use) { | ||
2040 | tpd = ATL1C_TPD_DESC(tpd_ring, index); | ||
2041 | buffer_info = &tpd_ring->buffer_info[index]; | ||
2042 | atl1c_clean_buffer(adpt->pdev, buffer_info, 0); | ||
2043 | memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); | ||
2044 | if (++index == tpd_ring->count) | ||
2045 | index = 0; | ||
2046 | } | ||
2047 | tpd_ring->next_to_use = first_index; | ||
2048 | } | ||
2049 | |||
2050 | static int atl1c_tx_map(struct atl1c_adapter *adapter, | ||
2019 | struct sk_buff *skb, struct atl1c_tpd_desc *tpd, | 2051 | struct sk_buff *skb, struct atl1c_tpd_desc *tpd, |
2020 | enum atl1c_trans_queue type) | 2052 | enum atl1c_trans_queue type) |
2021 | { | 2053 | { |
@@ -2040,7 +2072,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, | |||
2040 | buffer_info->length = map_len; | 2072 | buffer_info->length = map_len; |
2041 | buffer_info->dma = pci_map_single(adapter->pdev, | 2073 | buffer_info->dma = pci_map_single(adapter->pdev, |
2042 | skb->data, hdr_len, PCI_DMA_TODEVICE); | 2074 | skb->data, hdr_len, PCI_DMA_TODEVICE); |
2043 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); | 2075 | if (unlikely(pci_dma_mapping_error(adapter->pdev, |
2076 | buffer_info->dma))) | ||
2077 | goto err_dma; | ||
2078 | |||
2044 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, | 2079 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, |
2045 | ATL1C_PCIMAP_TODEVICE); | 2080 | ATL1C_PCIMAP_TODEVICE); |
2046 | mapped_len += map_len; | 2081 | mapped_len += map_len; |
@@ -2062,6 +2097,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, | |||
2062 | buffer_info->dma = | 2097 | buffer_info->dma = |
2063 | pci_map_single(adapter->pdev, skb->data + mapped_len, | 2098 | pci_map_single(adapter->pdev, skb->data + mapped_len, |
2064 | buffer_info->length, PCI_DMA_TODEVICE); | 2099 | buffer_info->length, PCI_DMA_TODEVICE); |
2100 | if (unlikely(pci_dma_mapping_error(adapter->pdev, | ||
2101 | buffer_info->dma))) | ||
2102 | goto err_dma; | ||
2103 | |||
2065 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); | 2104 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); |
2066 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, | 2105 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, |
2067 | ATL1C_PCIMAP_TODEVICE); | 2106 | ATL1C_PCIMAP_TODEVICE); |
@@ -2083,6 +2122,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, | |||
2083 | frag, 0, | 2122 | frag, 0, |
2084 | buffer_info->length, | 2123 | buffer_info->length, |
2085 | DMA_TO_DEVICE); | 2124 | DMA_TO_DEVICE); |
2125 | if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) | ||
2126 | goto err_dma; | ||
2127 | |||
2086 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); | 2128 | ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); |
2087 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, | 2129 | ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, |
2088 | ATL1C_PCIMAP_TODEVICE); | 2130 | ATL1C_PCIMAP_TODEVICE); |
@@ -2095,6 +2137,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter, | |||
2095 | /* The last buffer info contain the skb address, | 2137 | /* The last buffer info contain the skb address, |
2096 | so it will be free after unmap */ | 2138 | so it will be free after unmap */ |
2097 | buffer_info->skb = skb; | 2139 | buffer_info->skb = skb; |
2140 | |||
2141 | return 0; | ||
2142 | |||
2143 | err_dma: | ||
2144 | buffer_info->dma = 0; | ||
2145 | buffer_info->length = 0; | ||
2146 | return -1; | ||
2098 | } | 2147 | } |
2099 | 2148 | ||
2100 | static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, | 2149 | static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, |
@@ -2157,10 +2206,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, | |||
2157 | if (skb_network_offset(skb) != ETH_HLEN) | 2206 | if (skb_network_offset(skb) != ETH_HLEN) |
2158 | tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ | 2207 | tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ |
2159 | 2208 | ||
2160 | atl1c_tx_map(adapter, skb, tpd, type); | 2209 | if (atl1c_tx_map(adapter, skb, tpd, type) < 0) { |
2161 | atl1c_tx_queue(adapter, skb, tpd, type); | 2210 | netif_info(adapter, tx_done, adapter->netdev, |
2211 | "tx-skb droppted due to dma error\n"); | ||
2212 | /* roll back tpd/buffer */ | ||
2213 | atl1c_tx_rollback(adapter, tpd, type); | ||
2214 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
2215 | dev_kfree_skb(skb); | ||
2216 | } else { | ||
2217 | atl1c_tx_queue(adapter, skb, tpd, type); | ||
2218 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
2219 | } | ||
2162 | 2220 | ||
2163 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
2164 | return NETDEV_TX_OK; | 2221 | return NETDEV_TX_OK; |
2165 | } | 2222 | } |
2166 | 2223 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index f771ddfba646..a5edac8df67b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -504,13 +504,11 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
504 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, | 504 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, |
505 | tpa_info->parsing_flags, len_on_bd); | 505 | tpa_info->parsing_flags, len_on_bd); |
506 | 506 | ||
507 | /* set for GRO */ | 507 | skb_shinfo(skb)->gso_type = |
508 | if (fp->mode == TPA_MODE_GRO) | 508 | (GET_FLAG(tpa_info->parsing_flags, |
509 | skb_shinfo(skb)->gso_type = | 509 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == |
510 | (GET_FLAG(tpa_info->parsing_flags, | 510 | PRS_FLAG_OVERETH_IPV6) ? |
511 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == | 511 | SKB_GSO_TCPV6 : SKB_GSO_TCPV4; |
512 | PRS_FLAG_OVERETH_IPV6) ? | ||
513 | SKB_GSO_TCPV6 : SKB_GSO_TCPV4; | ||
514 | } | 512 | } |
515 | 513 | ||
516 | 514 | ||
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index a9b0830fb39d..b9d4bb9530e5 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget) | |||
693 | * get notified when new packets arrive. | 693 | * get notified when new packets arrive. |
694 | */ | 694 | */ |
695 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); | 695 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); |
696 | |||
697 | /* Packets received while interrupts were disabled */ | ||
698 | status = macb_readl(bp, RSR); | ||
699 | if (unlikely(status)) | ||
700 | napi_reschedule(napi); | ||
696 | } | 701 | } |
697 | 702 | ||
698 | /* TODO: Handle errors */ | 703 | /* TODO: Handle errors */ |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 20a5af6d87d0..b3e3294cfe53 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1401,6 +1401,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, | |||
1401 | /* set gso_size to avoid messing up TCP MSS */ | 1401 | /* set gso_size to avoid messing up TCP MSS */ |
1402 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), | 1402 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), |
1403 | IXGBE_CB(skb)->append_cnt); | 1403 | IXGBE_CB(skb)->append_cnt); |
1404 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | ||
1404 | } | 1405 | } |
1405 | 1406 | ||
1406 | static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, | 1407 | static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 6f82812d0fab..09aa310b6194 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -986,8 +986,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, | |||
986 | th->seq = htonl(seq_number); | 986 | th->seq = htonl(seq_number); |
987 | length = skb->len; | 987 | length = skb->len; |
988 | 988 | ||
989 | if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) | 989 | if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) { |
990 | skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1); | 990 | skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1); |
991 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
992 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | ||
993 | else | ||
994 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | ||
995 | } | ||
991 | 996 | ||
992 | if (vid != 0xffff) | 997 | if (vid != 0xffff) |
993 | __vlan_hwaccel_put_tag(skb, vid); | 998 | __vlan_hwaccel_put_tag(skb, vid); |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 11702324a071..998974f78742 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -450,7 +450,6 @@ enum rtl8168_registers { | |||
450 | #define PWM_EN (1 << 22) | 450 | #define PWM_EN (1 << 22) |
451 | #define RXDV_GATED_EN (1 << 19) | 451 | #define RXDV_GATED_EN (1 << 19) |
452 | #define EARLY_TALLY_EN (1 << 16) | 452 | #define EARLY_TALLY_EN (1 << 16) |
453 | #define FORCE_CLK (1 << 15) /* force clock request */ | ||
454 | }; | 453 | }; |
455 | 454 | ||
456 | enum rtl_register_content { | 455 | enum rtl_register_content { |
@@ -514,7 +513,6 @@ enum rtl_register_content { | |||
514 | PMEnable = (1 << 0), /* Power Management Enable */ | 513 | PMEnable = (1 << 0), /* Power Management Enable */ |
515 | 514 | ||
516 | /* Config2 register p. 25 */ | 515 | /* Config2 register p. 25 */ |
517 | ClkReqEn = (1 << 7), /* Clock Request Enable */ | ||
518 | MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ | 516 | MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ |
519 | PCI_Clock_66MHz = 0x01, | 517 | PCI_Clock_66MHz = 0x01, |
520 | PCI_Clock_33MHz = 0x00, | 518 | PCI_Clock_33MHz = 0x00, |
@@ -535,7 +533,6 @@ enum rtl_register_content { | |||
535 | Spi_en = (1 << 3), | 533 | Spi_en = (1 << 3), |
536 | LanWake = (1 << 1), /* LanWake enable/disable */ | 534 | LanWake = (1 << 1), /* LanWake enable/disable */ |
537 | PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ | 535 | PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ |
538 | ASPM_en = (1 << 0), /* ASPM enable */ | ||
539 | 536 | ||
540 | /* TBICSR p.28 */ | 537 | /* TBICSR p.28 */ |
541 | TBIReset = 0x80000000, | 538 | TBIReset = 0x80000000, |
@@ -684,7 +681,6 @@ enum features { | |||
684 | RTL_FEATURE_WOL = (1 << 0), | 681 | RTL_FEATURE_WOL = (1 << 0), |
685 | RTL_FEATURE_MSI = (1 << 1), | 682 | RTL_FEATURE_MSI = (1 << 1), |
686 | RTL_FEATURE_GMII = (1 << 2), | 683 | RTL_FEATURE_GMII = (1 << 2), |
687 | RTL_FEATURE_FW_LOADED = (1 << 3), | ||
688 | }; | 684 | }; |
689 | 685 | ||
690 | struct rtl8169_counters { | 686 | struct rtl8169_counters { |
@@ -2389,10 +2385,8 @@ static void rtl_apply_firmware(struct rtl8169_private *tp) | |||
2389 | struct rtl_fw *rtl_fw = tp->rtl_fw; | 2385 | struct rtl_fw *rtl_fw = tp->rtl_fw; |
2390 | 2386 | ||
2391 | /* TODO: release firmware once rtl_phy_write_fw signals failures. */ | 2387 | /* TODO: release firmware once rtl_phy_write_fw signals failures. */ |
2392 | if (!IS_ERR_OR_NULL(rtl_fw)) { | 2388 | if (!IS_ERR_OR_NULL(rtl_fw)) |
2393 | rtl_phy_write_fw(tp, rtl_fw); | 2389 | rtl_phy_write_fw(tp, rtl_fw); |
2394 | tp->features |= RTL_FEATURE_FW_LOADED; | ||
2395 | } | ||
2396 | } | 2390 | } |
2397 | 2391 | ||
2398 | static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) | 2392 | static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) |
@@ -2403,31 +2397,6 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) | |||
2403 | rtl_apply_firmware(tp); | 2397 | rtl_apply_firmware(tp); |
2404 | } | 2398 | } |
2405 | 2399 | ||
2406 | static void r810x_aldps_disable(struct rtl8169_private *tp) | ||
2407 | { | ||
2408 | rtl_writephy(tp, 0x1f, 0x0000); | ||
2409 | rtl_writephy(tp, 0x18, 0x0310); | ||
2410 | msleep(100); | ||
2411 | } | ||
2412 | |||
2413 | static void r810x_aldps_enable(struct rtl8169_private *tp) | ||
2414 | { | ||
2415 | if (!(tp->features & RTL_FEATURE_FW_LOADED)) | ||
2416 | return; | ||
2417 | |||
2418 | rtl_writephy(tp, 0x1f, 0x0000); | ||
2419 | rtl_writephy(tp, 0x18, 0x8310); | ||
2420 | } | ||
2421 | |||
2422 | static void r8168_aldps_enable_1(struct rtl8169_private *tp) | ||
2423 | { | ||
2424 | if (!(tp->features & RTL_FEATURE_FW_LOADED)) | ||
2425 | return; | ||
2426 | |||
2427 | rtl_writephy(tp, 0x1f, 0x0000); | ||
2428 | rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000); | ||
2429 | } | ||
2430 | |||
2431 | static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) | 2400 | static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) |
2432 | { | 2401 | { |
2433 | static const struct phy_reg phy_reg_init[] = { | 2402 | static const struct phy_reg phy_reg_init[] = { |
@@ -3218,8 +3187,6 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) | |||
3218 | rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); | 3187 | rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); |
3219 | rtl_writephy(tp, 0x1f, 0x0000); | 3188 | rtl_writephy(tp, 0x1f, 0x0000); |
3220 | 3189 | ||
3221 | r8168_aldps_enable_1(tp); | ||
3222 | |||
3223 | /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ | 3190 | /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ |
3224 | rtl_rar_exgmac_set(tp, tp->dev->dev_addr); | 3191 | rtl_rar_exgmac_set(tp, tp->dev->dev_addr); |
3225 | } | 3192 | } |
@@ -3294,8 +3261,6 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) | |||
3294 | rtl_writephy(tp, 0x05, 0x8b85); | 3261 | rtl_writephy(tp, 0x05, 0x8b85); |
3295 | rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); | 3262 | rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); |
3296 | rtl_writephy(tp, 0x1f, 0x0000); | 3263 | rtl_writephy(tp, 0x1f, 0x0000); |
3297 | |||
3298 | r8168_aldps_enable_1(tp); | ||
3299 | } | 3264 | } |
3300 | 3265 | ||
3301 | static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) | 3266 | static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) |
@@ -3303,8 +3268,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) | |||
3303 | rtl_apply_firmware(tp); | 3268 | rtl_apply_firmware(tp); |
3304 | 3269 | ||
3305 | rtl8168f_hw_phy_config(tp); | 3270 | rtl8168f_hw_phy_config(tp); |
3306 | |||
3307 | r8168_aldps_enable_1(tp); | ||
3308 | } | 3271 | } |
3309 | 3272 | ||
3310 | static void rtl8411_hw_phy_config(struct rtl8169_private *tp) | 3273 | static void rtl8411_hw_phy_config(struct rtl8169_private *tp) |
@@ -3402,8 +3365,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp) | |||
3402 | rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); | 3365 | rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); |
3403 | rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); | 3366 | rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); |
3404 | rtl_writephy(tp, 0x1f, 0x0000); | 3367 | rtl_writephy(tp, 0x1f, 0x0000); |
3405 | |||
3406 | r8168_aldps_enable_1(tp); | ||
3407 | } | 3368 | } |
3408 | 3369 | ||
3409 | static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) | 3370 | static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) |
@@ -3489,19 +3450,21 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) | |||
3489 | }; | 3450 | }; |
3490 | 3451 | ||
3491 | /* Disable ALDPS before ram code */ | 3452 | /* Disable ALDPS before ram code */ |
3492 | r810x_aldps_disable(tp); | 3453 | rtl_writephy(tp, 0x1f, 0x0000); |
3454 | rtl_writephy(tp, 0x18, 0x0310); | ||
3455 | msleep(100); | ||
3493 | 3456 | ||
3494 | rtl_apply_firmware(tp); | 3457 | rtl_apply_firmware(tp); |
3495 | 3458 | ||
3496 | rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | 3459 | rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); |
3497 | |||
3498 | r810x_aldps_enable(tp); | ||
3499 | } | 3460 | } |
3500 | 3461 | ||
3501 | static void rtl8402_hw_phy_config(struct rtl8169_private *tp) | 3462 | static void rtl8402_hw_phy_config(struct rtl8169_private *tp) |
3502 | { | 3463 | { |
3503 | /* Disable ALDPS before setting firmware */ | 3464 | /* Disable ALDPS before setting firmware */ |
3504 | r810x_aldps_disable(tp); | 3465 | rtl_writephy(tp, 0x1f, 0x0000); |
3466 | rtl_writephy(tp, 0x18, 0x0310); | ||
3467 | msleep(20); | ||
3505 | 3468 | ||
3506 | rtl_apply_firmware(tp); | 3469 | rtl_apply_firmware(tp); |
3507 | 3470 | ||
@@ -3511,8 +3474,6 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp) | |||
3511 | rtl_writephy(tp, 0x10, 0x401f); | 3474 | rtl_writephy(tp, 0x10, 0x401f); |
3512 | rtl_writephy(tp, 0x19, 0x7030); | 3475 | rtl_writephy(tp, 0x19, 0x7030); |
3513 | rtl_writephy(tp, 0x1f, 0x0000); | 3476 | rtl_writephy(tp, 0x1f, 0x0000); |
3514 | |||
3515 | r810x_aldps_enable(tp); | ||
3516 | } | 3477 | } |
3517 | 3478 | ||
3518 | static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) | 3479 | static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) |
@@ -3525,7 +3486,9 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) | |||
3525 | }; | 3486 | }; |
3526 | 3487 | ||
3527 | /* Disable ALDPS before ram code */ | 3488 | /* Disable ALDPS before ram code */ |
3528 | r810x_aldps_disable(tp); | 3489 | rtl_writephy(tp, 0x1f, 0x0000); |
3490 | rtl_writephy(tp, 0x18, 0x0310); | ||
3491 | msleep(100); | ||
3529 | 3492 | ||
3530 | rtl_apply_firmware(tp); | 3493 | rtl_apply_firmware(tp); |
3531 | 3494 | ||
@@ -3533,8 +3496,6 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) | |||
3533 | rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); | 3496 | rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); |
3534 | 3497 | ||
3535 | rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); | 3498 | rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); |
3536 | |||
3537 | r810x_aldps_enable(tp); | ||
3538 | } | 3499 | } |
3539 | 3500 | ||
3540 | static void rtl_hw_phy_config(struct net_device *dev) | 3501 | static void rtl_hw_phy_config(struct net_device *dev) |
@@ -5051,6 +5012,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) | |||
5051 | 5012 | ||
5052 | RTL_W8(MaxTxPacketSize, EarlySize); | 5013 | RTL_W8(MaxTxPacketSize, EarlySize); |
5053 | 5014 | ||
5015 | rtl_disable_clock_request(pdev); | ||
5016 | |||
5054 | RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); | 5017 | RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); |
5055 | RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); | 5018 | RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); |
5056 | 5019 | ||
@@ -5059,8 +5022,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) | |||
5059 | 5022 | ||
5060 | RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); | 5023 | RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); |
5061 | RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); | 5024 | RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); |
5062 | RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en); | 5025 | RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); |
5063 | RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); | ||
5064 | } | 5026 | } |
5065 | 5027 | ||
5066 | static void rtl_hw_start_8168f(struct rtl8169_private *tp) | 5028 | static void rtl_hw_start_8168f(struct rtl8169_private *tp) |
@@ -5085,12 +5047,13 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) | |||
5085 | 5047 | ||
5086 | RTL_W8(MaxTxPacketSize, EarlySize); | 5048 | RTL_W8(MaxTxPacketSize, EarlySize); |
5087 | 5049 | ||
5050 | rtl_disable_clock_request(pdev); | ||
5051 | |||
5088 | RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); | 5052 | RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); |
5089 | RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); | 5053 | RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); |
5090 | RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); | 5054 | RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); |
5091 | RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK); | 5055 | RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); |
5092 | RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en); | 5056 | RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); |
5093 | RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); | ||
5094 | } | 5057 | } |
5095 | 5058 | ||
5096 | static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) | 5059 | static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) |
@@ -5147,10 +5110,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) | |||
5147 | rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); | 5110 | rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); |
5148 | 5111 | ||
5149 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); | 5112 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); |
5150 | RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN); | 5113 | RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); |
5151 | RTL_W8(MaxTxPacketSize, EarlySize); | 5114 | RTL_W8(MaxTxPacketSize, EarlySize); |
5152 | RTL_W8(Config5, RTL_R8(Config5) | ASPM_en); | ||
5153 | RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); | ||
5154 | 5115 | ||
5155 | rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); | 5116 | rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); |
5156 | rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); | 5117 | rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); |
@@ -5366,9 +5327,6 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) | |||
5366 | 5327 | ||
5367 | RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); | 5328 | RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); |
5368 | RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); | 5329 | RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); |
5369 | RTL_W8(Config5, RTL_R8(Config5) | ASPM_en); | ||
5370 | RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); | ||
5371 | RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK); | ||
5372 | 5330 | ||
5373 | rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); | 5331 | rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); |
5374 | } | 5332 | } |
@@ -5394,9 +5352,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) | |||
5394 | 5352 | ||
5395 | RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); | 5353 | RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); |
5396 | RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); | 5354 | RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); |
5397 | RTL_W8(Config5, RTL_R8(Config5) | ASPM_en); | ||
5398 | RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); | ||
5399 | RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK); | ||
5400 | 5355 | ||
5401 | rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); | 5356 | rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); |
5402 | 5357 | ||
@@ -5418,10 +5373,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) | |||
5418 | /* Force LAN exit from ASPM if Rx/Tx are not idle */ | 5373 | /* Force LAN exit from ASPM if Rx/Tx are not idle */ |
5419 | RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); | 5374 | RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); |
5420 | 5375 | ||
5421 | RTL_W32(MISC, | 5376 | RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); |
5422 | (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN); | ||
5423 | RTL_W8(Config5, RTL_R8(Config5) | ASPM_en); | ||
5424 | RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn); | ||
5425 | RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); | 5377 | RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); |
5426 | RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); | 5378 | RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); |
5427 | } | 5379 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f07c0612abf6..b75f4b286895 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -69,7 +69,7 @@ | |||
69 | 69 | ||
70 | #undef STMMAC_XMIT_DEBUG | 70 | #undef STMMAC_XMIT_DEBUG |
71 | /*#define STMMAC_XMIT_DEBUG*/ | 71 | /*#define STMMAC_XMIT_DEBUG*/ |
72 | #ifdef STMMAC_TX_DEBUG | 72 | #ifdef STMMAC_XMIT_DEBUG |
73 | #define TX_DBG(fmt, args...) printk(fmt, ## args) | 73 | #define TX_DBG(fmt, args...) printk(fmt, ## args) |
74 | #else | 74 | #else |
75 | #define TX_DBG(fmt, args...) do { } while (0) | 75 | #define TX_DBG(fmt, args...) do { } while (0) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 0376a5e6b2bf..0b9829fe3eea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
@@ -188,8 +188,6 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
188 | goto bus_register_fail; | 188 | goto bus_register_fail; |
189 | } | 189 | } |
190 | 190 | ||
191 | priv->mii = new_bus; | ||
192 | |||
193 | found = 0; | 191 | found = 0; |
194 | for (addr = 0; addr < PHY_MAX_ADDR; addr++) { | 192 | for (addr = 0; addr < PHY_MAX_ADDR; addr++) { |
195 | struct phy_device *phydev = new_bus->phy_map[addr]; | 193 | struct phy_device *phydev = new_bus->phy_map[addr]; |
@@ -237,8 +235,14 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
237 | } | 235 | } |
238 | } | 236 | } |
239 | 237 | ||
240 | if (!found) | 238 | if (!found) { |
241 | pr_warning("%s: No PHY found\n", ndev->name); | 239 | pr_warning("%s: No PHY found\n", ndev->name); |
240 | mdiobus_unregister(new_bus); | ||
241 | mdiobus_free(new_bus); | ||
242 | return -ENODEV; | ||
243 | } | ||
244 | |||
245 | priv->mii = new_bus; | ||
242 | 246 | ||
243 | return 0; | 247 | return 0; |
244 | 248 | ||
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index c8e05e27f38c..19d903598b0d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -411,6 +411,7 @@ static const struct usb_device_id products[] = { | |||
411 | }, | 411 | }, |
412 | 412 | ||
413 | /* 3. Combined interface devices matching on interface number */ | 413 | /* 3. Combined interface devices matching on interface number */ |
414 | {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ | ||
414 | {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ | 415 | {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ |
415 | {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, | 416 | {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, |
416 | {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, | 417 | {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, |
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 83564d36e801..a00a03ea4ec9 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c | |||
@@ -318,20 +318,20 @@ struct mwl8k_sta { | |||
318 | #define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv)) | 318 | #define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv)) |
319 | 319 | ||
320 | static const struct ieee80211_channel mwl8k_channels_24[] = { | 320 | static const struct ieee80211_channel mwl8k_channels_24[] = { |
321 | { .center_freq = 2412, .hw_value = 1, }, | 321 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, }, |
322 | { .center_freq = 2417, .hw_value = 2, }, | 322 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, }, |
323 | { .center_freq = 2422, .hw_value = 3, }, | 323 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, }, |
324 | { .center_freq = 2427, .hw_value = 4, }, | 324 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, }, |
325 | { .center_freq = 2432, .hw_value = 5, }, | 325 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, }, |
326 | { .center_freq = 2437, .hw_value = 6, }, | 326 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, }, |
327 | { .center_freq = 2442, .hw_value = 7, }, | 327 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, }, |
328 | { .center_freq = 2447, .hw_value = 8, }, | 328 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, }, |
329 | { .center_freq = 2452, .hw_value = 9, }, | 329 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, }, |
330 | { .center_freq = 2457, .hw_value = 10, }, | 330 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, }, |
331 | { .center_freq = 2462, .hw_value = 11, }, | 331 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, }, |
332 | { .center_freq = 2467, .hw_value = 12, }, | 332 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, }, |
333 | { .center_freq = 2472, .hw_value = 13, }, | 333 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, }, |
334 | { .center_freq = 2484, .hw_value = 14, }, | 334 | { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, }, |
335 | }; | 335 | }; |
336 | 336 | ||
337 | static const struct ieee80211_rate mwl8k_rates_24[] = { | 337 | static const struct ieee80211_rate mwl8k_rates_24[] = { |
@@ -352,10 +352,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = { | |||
352 | }; | 352 | }; |
353 | 353 | ||
354 | static const struct ieee80211_channel mwl8k_channels_50[] = { | 354 | static const struct ieee80211_channel mwl8k_channels_50[] = { |
355 | { .center_freq = 5180, .hw_value = 36, }, | 355 | { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, }, |
356 | { .center_freq = 5200, .hw_value = 40, }, | 356 | { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, }, |
357 | { .center_freq = 5220, .hw_value = 44, }, | 357 | { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, }, |
358 | { .center_freq = 5240, .hw_value = 48, }, | 358 | { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, }, |
359 | }; | 359 | }; |
360 | 360 | ||
361 | static const struct ieee80211_rate mwl8k_rates_50[] = { | 361 | static const struct ieee80211_rate mwl8k_rates_50[] = { |
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 10c1a3454e48..81c5077feff3 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c | |||
@@ -350,7 +350,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) | |||
350 | /* Enable the clockwatch on ST Variants */ | 350 | /* Enable the clockwatch on ST Variants */ |
351 | if (vendor->clockwatch) | 351 | if (vendor->clockwatch) |
352 | data |= RTC_CR_CWEN; | 352 | data |= RTC_CR_CWEN; |
353 | writel(data | RTC_CR_EN, ldata->base + RTC_CR); | 353 | else |
354 | data |= RTC_CR_EN; | ||
355 | writel(data, ldata->base + RTC_CR); | ||
354 | 356 | ||
355 | /* | 357 | /* |
356 | * On ST PL031 variants, the RTC reset value does not provide correct | 358 | * On ST PL031 variants, the RTC reset value does not provide correct |
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index 77cdba9df274..bb991dfe134f 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h | |||
@@ -28,25 +28,16 @@ | |||
28 | #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION | 28 | #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * Architectures where both 32- and 64-bit binaries can be executed | 31 | * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed |
32 | * on 64-bit kernels need this. This keeps the structure format | 32 | * back to the kernel via ioctl from userspace. On architectures where 32- and |
33 | * uniform, and makes sure the wait_queue_token isn't too big to be | 33 | * 64-bit userspace binaries can be executed it's important that the size of |
34 | * passed back down to the kernel. | 34 | * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we |
35 | * | 35 | * do not break the binary ABI interface by changing the structure size. |
36 | * This assumes that on these architectures: | ||
37 | * mode 32 bit 64 bit | ||
38 | * ------------------------- | ||
39 | * int 32 bit 32 bit | ||
40 | * long 32 bit 64 bit | ||
41 | * | ||
42 | * If so, 32-bit user-space code should be backwards compatible. | ||
43 | */ | 36 | */ |
44 | 37 | #if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */ | |
45 | #if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \ | ||
46 | || defined(__powerpc__) || defined(__s390__) | ||
47 | typedef unsigned int autofs_wqt_t; | ||
48 | #else | ||
49 | typedef unsigned long autofs_wqt_t; | 38 | typedef unsigned long autofs_wqt_t; |
39 | #else | ||
40 | typedef unsigned int autofs_wqt_t; | ||
50 | #endif | 41 | #endif |
51 | 42 | ||
52 | /* Packet types */ | 43 | /* Packet types */ |
diff --git a/kernel/pid.c b/kernel/pid.c index de9af600006f..f2c6a6825098 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -331,7 +331,7 @@ out: | |||
331 | return pid; | 331 | return pid; |
332 | 332 | ||
333 | out_unlock: | 333 | out_unlock: |
334 | spin_unlock(&pidmap_lock); | 334 | spin_unlock_irq(&pidmap_lock); |
335 | out_free: | 335 | out_free: |
336 | while (++i <= ns->level) | 336 | while (++i <= ns->level) |
337 | free_pidmap(pid->numbers + i); | 337 | free_pidmap(pid->numbers + i); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 09255ec8159c..fbb60b103e64 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | |||
3030 | if (memcg) { | 3030 | if (memcg) { |
3031 | s->memcg_params->memcg = memcg; | 3031 | s->memcg_params->memcg = memcg; |
3032 | s->memcg_params->root_cache = root_cache; | 3032 | s->memcg_params->root_cache = root_cache; |
3033 | } | 3033 | } else |
3034 | s->memcg_params->is_root_cache = true; | ||
3035 | |||
3034 | return 0; | 3036 | return 0; |
3035 | } | 3037 | } |
3036 | 3038 | ||
diff --git a/mm/mlock.c b/mm/mlock.c index f0b9ce572fc7..c9bd528b01d2 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) | |||
517 | static int do_mlockall(int flags) | 517 | static int do_mlockall(int flags) |
518 | { | 518 | { |
519 | struct vm_area_struct * vma, * prev = NULL; | 519 | struct vm_area_struct * vma, * prev = NULL; |
520 | unsigned int def_flags = 0; | ||
521 | 520 | ||
522 | if (flags & MCL_FUTURE) | 521 | if (flags & MCL_FUTURE) |
523 | def_flags = VM_LOCKED; | 522 | current->mm->def_flags |= VM_LOCKED; |
524 | current->mm->def_flags = def_flags; | 523 | else |
524 | current->mm->def_flags &= ~VM_LOCKED; | ||
525 | if (flags == MCL_FUTURE) | 525 | if (flags == MCL_FUTURE) |
526 | goto out; | 526 | goto out; |
527 | 527 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index df2022ff0c8a..9673d96b1ba7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -773,6 +773,10 @@ void __init init_cma_reserved_pageblock(struct page *page) | |||
773 | set_pageblock_migratetype(page, MIGRATE_CMA); | 773 | set_pageblock_migratetype(page, MIGRATE_CMA); |
774 | __free_pages(page, pageblock_order); | 774 | __free_pages(page, pageblock_order); |
775 | totalram_pages += pageblock_nr_pages; | 775 | totalram_pages += pageblock_nr_pages; |
776 | #ifdef CONFIG_HIGHMEM | ||
777 | if (PageHighMem(page)) | ||
778 | totalhigh_pages += pageblock_nr_pages; | ||
779 | #endif | ||
776 | } | 780 | } |
777 | #endif | 781 | #endif |
778 | 782 | ||
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 183f97a86bb2..553921511e4e 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
@@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res, | |||
440 | /* this is an hash collision with the temporary selected node. Choose | 440 | /* this is an hash collision with the temporary selected node. Choose |
441 | * the one with the lowest address | 441 | * the one with the lowest address |
442 | */ | 442 | */ |
443 | if ((tmp_max == max) && | 443 | if ((tmp_max == max) && max_orig_node && |
444 | (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) | 444 | (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) |
445 | goto out; | 445 | goto out; |
446 | 446 | ||
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 7f884e3fb955..8660ea3be705 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/etherdevice.h> | 16 | #include <linux/etherdevice.h> |
17 | #include <linux/llc.h> | 17 | #include <linux/llc.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/pkt_sched.h> | ||
19 | #include <net/net_namespace.h> | 20 | #include <net/net_namespace.h> |
20 | #include <net/llc.h> | 21 | #include <net/llc.h> |
21 | #include <net/llc_pdu.h> | 22 | #include <net/llc_pdu.h> |
@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p, | |||
40 | 41 | ||
41 | skb->dev = p->dev; | 42 | skb->dev = p->dev; |
42 | skb->protocol = htons(ETH_P_802_2); | 43 | skb->protocol = htons(ETH_P_802_2); |
44 | skb->priority = TC_PRIO_CONTROL; | ||
43 | 45 | ||
44 | skb_reserve(skb, LLC_RESERVE); | 46 | skb_reserve(skb, LLC_RESERVE); |
45 | memcpy(__skb_put(skb, length), data, length); | 47 | memcpy(__skb_put(skb, length), data, length); |
diff --git a/net/core/datagram.c b/net/core/datagram.c index 0337e2b76862..368f9c3f9dc6 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, | |||
187 | skb_queue_walk(queue, skb) { | 187 | skb_queue_walk(queue, skb) { |
188 | *peeked = skb->peeked; | 188 | *peeked = skb->peeked; |
189 | if (flags & MSG_PEEK) { | 189 | if (flags & MSG_PEEK) { |
190 | if (*off >= skb->len) { | 190 | if (*off >= skb->len && skb->len) { |
191 | *off -= skb->len; | 191 | *off -= skb->len; |
192 | continue; | 192 | continue; |
193 | } | 193 | } |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 9547a273b9e9..ded146b217f1 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -928,24 +928,25 @@ static void parp_redo(struct sk_buff *skb) | |||
928 | static int arp_rcv(struct sk_buff *skb, struct net_device *dev, | 928 | static int arp_rcv(struct sk_buff *skb, struct net_device *dev, |
929 | struct packet_type *pt, struct net_device *orig_dev) | 929 | struct packet_type *pt, struct net_device *orig_dev) |
930 | { | 930 | { |
931 | struct arphdr *arp; | 931 | const struct arphdr *arp; |
932 | |||
933 | if (dev->flags & IFF_NOARP || | ||
934 | skb->pkt_type == PACKET_OTHERHOST || | ||
935 | skb->pkt_type == PACKET_LOOPBACK) | ||
936 | goto freeskb; | ||
937 | |||
938 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
939 | if (!skb) | ||
940 | goto out_of_mem; | ||
932 | 941 | ||
933 | /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ | 942 | /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ |
934 | if (!pskb_may_pull(skb, arp_hdr_len(dev))) | 943 | if (!pskb_may_pull(skb, arp_hdr_len(dev))) |
935 | goto freeskb; | 944 | goto freeskb; |
936 | 945 | ||
937 | arp = arp_hdr(skb); | 946 | arp = arp_hdr(skb); |
938 | if (arp->ar_hln != dev->addr_len || | 947 | if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4) |
939 | dev->flags & IFF_NOARP || | ||
940 | skb->pkt_type == PACKET_OTHERHOST || | ||
941 | skb->pkt_type == PACKET_LOOPBACK || | ||
942 | arp->ar_pln != 4) | ||
943 | goto freeskb; | 948 | goto freeskb; |
944 | 949 | ||
945 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
946 | if (skb == NULL) | ||
947 | goto out_of_mem; | ||
948 | |||
949 | memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); | 950 | memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); |
950 | 951 | ||
951 | return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); | 952 | return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); |
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 7302b0b7b642..83acc1405a18 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/skbuff.h> | 10 | #include <linux/skbuff.h> |
11 | #include <linux/ipv6.h> | 11 | #include <linux/ipv6.h> |
12 | #include <net/ipv6.h> | ||
12 | #include <linux/netfilter.h> | 13 | #include <linux/netfilter.h> |
13 | #include <linux/netfilter_ipv6.h> | 14 | #include <linux/netfilter_ipv6.h> |
14 | #include <linux/netfilter_ipv6/ip6t_NPT.h> | 15 | #include <linux/netfilter_ipv6/ip6t_NPT.h> |
@@ -18,11 +19,20 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par) | |||
18 | { | 19 | { |
19 | struct ip6t_npt_tginfo *npt = par->targinfo; | 20 | struct ip6t_npt_tginfo *npt = par->targinfo; |
20 | __wsum src_sum = 0, dst_sum = 0; | 21 | __wsum src_sum = 0, dst_sum = 0; |
22 | struct in6_addr pfx; | ||
21 | unsigned int i; | 23 | unsigned int i; |
22 | 24 | ||
23 | if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64) | 25 | if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64) |
24 | return -EINVAL; | 26 | return -EINVAL; |
25 | 27 | ||
28 | /* Ensure that LSB of prefix is zero */ | ||
29 | ipv6_addr_prefix(&pfx, &npt->src_pfx.in6, npt->src_pfx_len); | ||
30 | if (!ipv6_addr_equal(&pfx, &npt->src_pfx.in6)) | ||
31 | return -EINVAL; | ||
32 | ipv6_addr_prefix(&pfx, &npt->dst_pfx.in6, npt->dst_pfx_len); | ||
33 | if (!ipv6_addr_equal(&pfx, &npt->dst_pfx.in6)) | ||
34 | return -EINVAL; | ||
35 | |||
26 | for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) { | 36 | for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) { |
27 | src_sum = csum_add(src_sum, | 37 | src_sum = csum_add(src_sum, |
28 | (__force __wsum)npt->src_pfx.in6.s6_addr16[i]); | 38 | (__force __wsum)npt->src_pfx.in6.s6_addr16[i]); |
@@ -30,7 +40,7 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par) | |||
30 | (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]); | 40 | (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]); |
31 | } | 41 | } |
32 | 42 | ||
33 | npt->adjustment = (__force __sum16) csum_sub(src_sum, dst_sum); | 43 | npt->adjustment = ~csum_fold(csum_sub(src_sum, dst_sum)); |
34 | return 0; | 44 | return 0; |
35 | } | 45 | } |
36 | 46 | ||
@@ -51,7 +61,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt, | |||
51 | 61 | ||
52 | idx = i / 32; | 62 | idx = i / 32; |
53 | addr->s6_addr32[idx] &= mask; | 63 | addr->s6_addr32[idx] &= mask; |
54 | addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx]; | 64 | addr->s6_addr32[idx] |= ~mask & npt->dst_pfx.in6.s6_addr32[idx]; |
55 | } | 65 | } |
56 | 66 | ||
57 | if (pfx_len <= 48) | 67 | if (pfx_len <= 48) |
@@ -66,8 +76,8 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt, | |||
66 | return false; | 76 | return false; |
67 | } | 77 | } |
68 | 78 | ||
69 | sum = (__force __sum16) csum_add((__force __wsum)addr->s6_addr16[idx], | 79 | sum = ~csum_fold(csum_add(csum_unfold((__force __sum16)addr->s6_addr16[idx]), |
70 | npt->adjustment); | 80 | csum_unfold(npt->adjustment))); |
71 | if (sum == CSUM_MANGLED_0) | 81 | if (sum == CSUM_MANGLED_0) |
72 | sum = 0; | 82 | sum = 0; |
73 | *(__force __sum16 *)&addr->s6_addr16[idx] = sum; | 83 | *(__force __sum16 *)&addr->s6_addr16[idx] = sum; |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 516fbc96feff..0479c64aa83c 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2004,7 +2004,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev, | |||
2004 | { | 2004 | { |
2005 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 2005 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
2006 | 2006 | ||
2007 | memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate)); | 2007 | memcpy(sdata->vif.bss_conf.mcast_rate, rate, |
2008 | sizeof(int) * IEEE80211_NUM_BANDS); | ||
2008 | 2009 | ||
2009 | return 0; | 2010 | return 0; |
2010 | } | 2011 | } |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a3552929a21d..5107248af7fb 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -3400,6 +3400,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
3400 | 3400 | ||
3401 | ret = 0; | 3401 | ret = 0; |
3402 | 3402 | ||
3403 | out: | ||
3403 | while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, | 3404 | while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, |
3404 | IEEE80211_CHAN_DISABLED)) { | 3405 | IEEE80211_CHAN_DISABLED)) { |
3405 | if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { | 3406 | if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { |
@@ -3408,14 +3409,13 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
3408 | goto out; | 3409 | goto out; |
3409 | } | 3410 | } |
3410 | 3411 | ||
3411 | ret = chandef_downgrade(chandef); | 3412 | ret |= chandef_downgrade(chandef); |
3412 | } | 3413 | } |
3413 | 3414 | ||
3414 | if (chandef->width != vht_chandef.width) | 3415 | if (chandef->width != vht_chandef.width) |
3415 | sdata_info(sdata, | 3416 | sdata_info(sdata, |
3416 | "local regulatory prevented using AP HT/VHT configuration, downgraded\n"); | 3417 | "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); |
3417 | 3418 | ||
3418 | out: | ||
3419 | WARN_ON_ONCE(!cfg80211_chandef_valid(chandef)); | 3419 | WARN_ON_ONCE(!cfg80211_chandef_valid(chandef)); |
3420 | return ret; | 3420 | return ret; |
3421 | } | 3421 | } |
@@ -3529,8 +3529,11 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, | |||
3529 | */ | 3529 | */ |
3530 | ret = ieee80211_vif_use_channel(sdata, &chandef, | 3530 | ret = ieee80211_vif_use_channel(sdata, &chandef, |
3531 | IEEE80211_CHANCTX_SHARED); | 3531 | IEEE80211_CHANCTX_SHARED); |
3532 | while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) | 3532 | while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) { |
3533 | ifmgd->flags |= chandef_downgrade(&chandef); | 3533 | ifmgd->flags |= chandef_downgrade(&chandef); |
3534 | ret = ieee80211_vif_use_channel(sdata, &chandef, | ||
3535 | IEEE80211_CHANCTX_SHARED); | ||
3536 | } | ||
3534 | return ret; | 3537 | return ret; |
3535 | } | 3538 | } |
3536 | 3539 | ||
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 746048b13ef3..ae8ec6f27688 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c | |||
@@ -61,14 +61,27 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, | |||
61 | return 1; | 61 | return 1; |
62 | } | 62 | } |
63 | 63 | ||
64 | static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph, | ||
65 | unsigned int sctphoff) | ||
66 | { | ||
67 | __u32 crc32; | ||
68 | struct sk_buff *iter; | ||
69 | |||
70 | crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff); | ||
71 | skb_walk_frags(skb, iter) | ||
72 | crc32 = sctp_update_cksum((u8 *) iter->data, | ||
73 | skb_headlen(iter), crc32); | ||
74 | sctph->checksum = sctp_end_cksum(crc32); | ||
75 | |||
76 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
77 | } | ||
78 | |||
64 | static int | 79 | static int |
65 | sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, | 80 | sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, |
66 | struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) | 81 | struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) |
67 | { | 82 | { |
68 | sctp_sctphdr_t *sctph; | 83 | sctp_sctphdr_t *sctph; |
69 | unsigned int sctphoff = iph->len; | 84 | unsigned int sctphoff = iph->len; |
70 | struct sk_buff *iter; | ||
71 | __be32 crc32; | ||
72 | 85 | ||
73 | #ifdef CONFIG_IP_VS_IPV6 | 86 | #ifdef CONFIG_IP_VS_IPV6 |
74 | if (cp->af == AF_INET6 && iph->fragoffs) | 87 | if (cp->af == AF_INET6 && iph->fragoffs) |
@@ -92,13 +105,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
92 | sctph = (void *) skb_network_header(skb) + sctphoff; | 105 | sctph = (void *) skb_network_header(skb) + sctphoff; |
93 | sctph->source = cp->vport; | 106 | sctph->source = cp->vport; |
94 | 107 | ||
95 | /* Calculate the checksum */ | 108 | sctp_nat_csum(skb, sctph, sctphoff); |
96 | crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff); | ||
97 | skb_walk_frags(skb, iter) | ||
98 | crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter), | ||
99 | crc32); | ||
100 | crc32 = sctp_end_cksum(crc32); | ||
101 | sctph->checksum = crc32; | ||
102 | 109 | ||
103 | return 1; | 110 | return 1; |
104 | } | 111 | } |
@@ -109,8 +116,6 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
109 | { | 116 | { |
110 | sctp_sctphdr_t *sctph; | 117 | sctp_sctphdr_t *sctph; |
111 | unsigned int sctphoff = iph->len; | 118 | unsigned int sctphoff = iph->len; |
112 | struct sk_buff *iter; | ||
113 | __be32 crc32; | ||
114 | 119 | ||
115 | #ifdef CONFIG_IP_VS_IPV6 | 120 | #ifdef CONFIG_IP_VS_IPV6 |
116 | if (cp->af == AF_INET6 && iph->fragoffs) | 121 | if (cp->af == AF_INET6 && iph->fragoffs) |
@@ -134,13 +139,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
134 | sctph = (void *) skb_network_header(skb) + sctphoff; | 139 | sctph = (void *) skb_network_header(skb) + sctphoff; |
135 | sctph->dest = cp->dport; | 140 | sctph->dest = cp->dport; |
136 | 141 | ||
137 | /* Calculate the checksum */ | 142 | sctp_nat_csum(skb, sctph, sctphoff); |
138 | crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff); | ||
139 | skb_walk_frags(skb, iter) | ||
140 | crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter), | ||
141 | crc32); | ||
142 | crc32 = sctp_end_cksum(crc32); | ||
143 | sctph->checksum = crc32; | ||
144 | 143 | ||
145 | return 1; | 144 | return 1; |
146 | } | 145 | } |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index effa10c9e4e3..44fd10c539ac 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -1795,6 +1795,8 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) | |||
1795 | GFP_KERNEL); | 1795 | GFP_KERNEL); |
1796 | if (!tinfo->buf) | 1796 | if (!tinfo->buf) |
1797 | goto outtinfo; | 1797 | goto outtinfo; |
1798 | } else { | ||
1799 | tinfo->buf = NULL; | ||
1798 | } | 1800 | } |
1799 | tinfo->id = id; | 1801 | tinfo->id = id; |
1800 | 1802 | ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 51561eafcb72..79e8ed4ac7ce 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1135,9 +1135,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1135 | memset(&opt, 0, sizeof(opt)); | 1135 | memset(&opt, 0, sizeof(opt)); |
1136 | 1136 | ||
1137 | opt.rate.rate = cl->rate.rate_bps >> 3; | 1137 | opt.rate.rate = cl->rate.rate_bps >> 3; |
1138 | opt.buffer = cl->buffer; | 1138 | opt.buffer = PSCHED_NS2TICKS(cl->buffer); |
1139 | opt.ceil.rate = cl->ceil.rate_bps >> 3; | 1139 | opt.ceil.rate = cl->ceil.rate_bps >> 3; |
1140 | opt.cbuffer = cl->cbuffer; | 1140 | opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); |
1141 | opt.quantum = cl->quantum; | 1141 | opt.quantum = cl->quantum; |
1142 | opt.prio = cl->prio; | 1142 | opt.prio = cl->prio; |
1143 | opt.level = cl->level; | 1143 | opt.level = cl->level; |
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index 7521d944c0fb..cf4852814e0c 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig | |||
@@ -3,8 +3,8 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | menuconfig IP_SCTP | 5 | menuconfig IP_SCTP |
6 | tristate "The SCTP Protocol (EXPERIMENTAL)" | 6 | tristate "The SCTP Protocol" |
7 | depends on INET && EXPERIMENTAL | 7 | depends on INET |
8 | depends on IPV6 || IPV6=n | 8 | depends on IPV6 || IPV6=n |
9 | select CRYPTO | 9 | select CRYPTO |
10 | select CRYPTO_HMAC | 10 | select CRYPTO_HMAC |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index f3f0f4dc31dd..391a245d5203 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
326 | */ | 326 | */ |
327 | rcu_read_lock(); | 327 | rcu_read_lock(); |
328 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { | 328 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
329 | if (!laddr->valid && laddr->state != SCTP_ADDR_SRC) | 329 | if (!laddr->valid) |
330 | continue; | 330 | continue; |
331 | if ((laddr->a.sa.sa_family == AF_INET6) && | 331 | if ((laddr->state == SCTP_ADDR_SRC) && |
332 | (laddr->a.sa.sa_family == AF_INET6) && | ||
332 | (scope <= sctp_scope(&laddr->a))) { | 333 | (scope <= sctp_scope(&laddr->a))) { |
333 | bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); | 334 | bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); |
334 | if (!baddr || (matchlen < bmatchlen)) { | 335 | if (!baddr || (matchlen < bmatchlen)) { |