diff options
127 files changed, 882 insertions, 990 deletions
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index 1404674c0a02..6fea79efb4cb 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt | |||
| @@ -453,7 +453,7 @@ TP_STATUS_COPY : This flag indicates that the frame (and associated | |||
| 453 | enabled previously with setsockopt() and | 453 | enabled previously with setsockopt() and |
| 454 | the PACKET_COPY_THRESH option. | 454 | the PACKET_COPY_THRESH option. |
| 455 | 455 | ||
| 456 | The number of frames than can be buffered to | 456 | The number of frames that can be buffered to |
| 457 | be read with recvfrom is limited like a normal socket. | 457 | be read with recvfrom is limited like a normal socket. |
| 458 | See the SO_RCVBUF option in the socket (7) man page. | 458 | See the SO_RCVBUF option in the socket (7) man page. |
| 459 | 459 | ||
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 661d3c316a17..048c92b487f6 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt | |||
| @@ -21,26 +21,38 @@ has such a feature). | |||
| 21 | 21 | ||
| 22 | SO_TIMESTAMPING: | 22 | SO_TIMESTAMPING: |
| 23 | 23 | ||
| 24 | Instructs the socket layer which kind of information is wanted. The | 24 | Instructs the socket layer which kind of information should be collected |
| 25 | parameter is an integer with some of the following bits set. Setting | 25 | and/or reported. The parameter is an integer with some of the following |
| 26 | other bits is an error and doesn't change the current state. | 26 | bits set. Setting other bits is an error and doesn't change the current |
| 27 | 27 | state. | |
| 28 | SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamp in hardware | 28 | |
| 29 | SOF_TIMESTAMPING_TX_SOFTWARE: if SOF_TIMESTAMPING_TX_HARDWARE is off or | 29 | Four of the bits are requests to the stack to try to generate |
| 30 | fails, then do it in software | 30 | timestamps. Any combination of them is valid. |
| 31 | SOF_TIMESTAMPING_RX_HARDWARE: return the original, unmodified time stamp | 31 | |
| 32 | as generated by the hardware | 32 | SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamps in hardware |
| 33 | SOF_TIMESTAMPING_RX_SOFTWARE: if SOF_TIMESTAMPING_RX_HARDWARE is off or | 33 | SOF_TIMESTAMPING_TX_SOFTWARE: try to obtain send time stamps in software |
| 34 | fails, then do it in software | 34 | SOF_TIMESTAMPING_RX_HARDWARE: try to obtain receive time stamps in hardware |
| 35 | SOF_TIMESTAMPING_RAW_HARDWARE: return original raw hardware time stamp | 35 | SOF_TIMESTAMPING_RX_SOFTWARE: try to obtain receive time stamps in software |
| 36 | SOF_TIMESTAMPING_SYS_HARDWARE: return hardware time stamp transformed to | 36 | |
| 37 | the system time base | 37 | The other three bits control which timestamps will be reported in a |
| 38 | SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in | 38 | generated control message. If none of these bits are set or if none of |
| 39 | software | 39 | the set bits correspond to data that is available, then the control |
| 40 | 40 | message will not be generated: | |
| 41 | SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. | 41 | |
| 42 | SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the | 42 | SOF_TIMESTAMPING_SOFTWARE: report systime if available |
| 43 | following control message: | 43 | SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available |
| 44 | SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available | ||
| 45 | |||
| 46 | It is worth noting that timestamps may be collected for reasons other | ||
| 47 | than being requested by a particular socket with | ||
| 48 | SOF_TIMESTAMPING_[TR]X_(HARD|SOFT)WARE. For example, most drivers that | ||
| 49 | can generate hardware receive timestamps ignore | ||
| 50 | SOF_TIMESTAMPING_RX_HARDWARE. It is still a good idea to set that flag | ||
| 51 | in case future drivers pay attention. | ||
| 52 | |||
| 53 | If timestamps are reported, they will appear in a control message with | ||
| 54 | cmsg_level==SOL_SOCKET, cmsg_type==SO_TIMESTAMPING, and a payload like | ||
| 55 | this: | ||
| 44 | 56 | ||
| 45 | struct scm_timestamping { | 57 | struct scm_timestamping { |
| 46 | struct timespec systime; | 58 | struct timespec systime; |
diff --git a/MAINTAINERS b/MAINTAINERS index b7befe758429..b3fdb0f004ba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1738,6 +1738,7 @@ F: include/uapi/linux/bfs_fs.h | |||
| 1738 | BLACKFIN ARCHITECTURE | 1738 | BLACKFIN ARCHITECTURE |
| 1739 | M: Steven Miao <realmz6@gmail.com> | 1739 | M: Steven Miao <realmz6@gmail.com> |
| 1740 | L: adi-buildroot-devel@lists.sourceforge.net | 1740 | L: adi-buildroot-devel@lists.sourceforge.net |
| 1741 | T: git git://git.code.sf.net/p/adi-linux/code | ||
| 1741 | W: http://blackfin.uclinux.org | 1742 | W: http://blackfin.uclinux.org |
| 1742 | S: Supported | 1743 | S: Supported |
| 1743 | F: arch/blackfin/ | 1744 | F: arch/blackfin/ |
| @@ -6002,6 +6003,8 @@ F: include/linux/netdevice.h | |||
| 6002 | F: include/uapi/linux/in.h | 6003 | F: include/uapi/linux/in.h |
| 6003 | F: include/uapi/linux/net.h | 6004 | F: include/uapi/linux/net.h |
| 6004 | F: include/uapi/linux/netdevice.h | 6005 | F: include/uapi/linux/netdevice.h |
| 6006 | F: tools/net/ | ||
| 6007 | F: tools/testing/selftests/net/ | ||
| 6005 | 6008 | ||
| 6006 | NETWORKING [IPv4/IPv6] | 6009 | NETWORKING [IPv4/IPv6] |
| 6007 | M: "David S. Miller" <davem@davemloft.net> | 6010 | M: "David S. Miller" <davem@davemloft.net> |
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h index 184066ceb1f6..053c17b36559 100644 --- a/arch/cris/include/asm/bitops.h +++ b/arch/cris/include/asm/bitops.h | |||
| @@ -144,7 +144,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
| 144 | * definition, which doesn't have the same semantics. We don't want to | 144 | * definition, which doesn't have the same semantics. We don't want to |
| 145 | * use -fno-builtin, so just hide the name ffs. | 145 | * use -fno-builtin, so just hide the name ffs. |
| 146 | */ | 146 | */ |
| 147 | #define ffs kernel_ffs | 147 | #define ffs(x) kernel_ffs(x) |
| 148 | 148 | ||
| 149 | #include <asm-generic/bitops/fls.h> | 149 | #include <asm-generic/bitops/fls.h> |
| 150 | #include <asm-generic/bitops/__fls.h> | 150 | #include <asm-generic/bitops/__fls.h> |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a96bcf83a735..20e8a9b21d75 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
| @@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
| 98 | /* attempt to allocate a granule's worth of cached memory pages */ | 98 | /* attempt to allocate a granule's worth of cached memory pages */ |
| 99 | 99 | ||
| 100 | page = alloc_pages_exact_node(nid, | 100 | page = alloc_pages_exact_node(nid, |
| 101 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 101 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); | 102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); |
| 103 | if (!page) { | 103 | if (!page) { |
| 104 | mutex_unlock(&uc_pool->add_chunk_mutex); | 104 | mutex_unlock(&uc_pool->add_chunk_mutex); |
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 5ec1e47a0d77..e865d748179b 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c | |||
| @@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) | |||
| 123 | 123 | ||
| 124 | area->nid = nid; | 124 | area->nid = nid; |
| 125 | area->order = order; | 125 | area->order = order; |
| 126 | area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, | 126 | area->pages = alloc_pages_exact_node(area->nid, |
| 127 | GFP_KERNEL|__GFP_THISNODE, | ||
| 127 | area->order); | 128 | area->order); |
| 128 | 129 | ||
| 129 | if (!area->pages) { | 130 | if (!area->pages) { |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index c026cca5602c..f3aaf231b4e5 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
| @@ -341,10 +341,6 @@ config X86_USE_3DNOW | |||
| 341 | def_bool y | 341 | def_bool y |
| 342 | depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML | 342 | depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML |
| 343 | 343 | ||
| 344 | config X86_OOSTORE | ||
| 345 | def_bool y | ||
| 346 | depends on (MWINCHIP3D || MWINCHIPC6) && MTRR | ||
| 347 | |||
| 348 | # | 344 | # |
| 349 | # P6_NOPs are a relatively minor optimization that require a family >= | 345 | # P6_NOPs are a relatively minor optimization that require a family >= |
| 350 | # 6 processor, except that it is broken on certain VIA chips. | 346 | # 6 processor, except that it is broken on certain VIA chips. |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 04a48903b2eb..69bbb4845020 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
| @@ -85,11 +85,7 @@ | |||
| 85 | #else | 85 | #else |
| 86 | # define smp_rmb() barrier() | 86 | # define smp_rmb() barrier() |
| 87 | #endif | 87 | #endif |
| 88 | #ifdef CONFIG_X86_OOSTORE | 88 | #define smp_wmb() barrier() |
| 89 | # define smp_wmb() wmb() | ||
| 90 | #else | ||
| 91 | # define smp_wmb() barrier() | ||
| 92 | #endif | ||
| 93 | #define smp_read_barrier_depends() read_barrier_depends() | 89 | #define smp_read_barrier_depends() read_barrier_depends() |
| 94 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 90 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
| 95 | #else /* !SMP */ | 91 | #else /* !SMP */ |
| @@ -100,7 +96,7 @@ | |||
| 100 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | 96 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
| 101 | #endif /* SMP */ | 97 | #endif /* SMP */ |
| 102 | 98 | ||
| 103 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | 99 | #if defined(CONFIG_X86_PPRO_FENCE) |
| 104 | 100 | ||
| 105 | /* | 101 | /* |
| 106 | * For either of these options x86 doesn't have a strong TSO memory | 102 | * For either of these options x86 doesn't have a strong TSO memory |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 34f69cb9350a..91d9c69a629e 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
| @@ -237,7 +237,7 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) | |||
| 237 | 237 | ||
| 238 | static inline void flush_write_buffers(void) | 238 | static inline void flush_write_buffers(void) |
| 239 | { | 239 | { |
| 240 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | 240 | #if defined(CONFIG_X86_PPRO_FENCE) |
| 241 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); | 241 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); |
| 242 | #endif | 242 | #endif |
| 243 | } | 243 | } |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index bf156ded74b5..0f62f5482d91 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
| @@ -26,10 +26,9 @@ | |||
| 26 | # define LOCK_PTR_REG "D" | 26 | # define LOCK_PTR_REG "D" |
| 27 | #endif | 27 | #endif |
| 28 | 28 | ||
| 29 | #if defined(CONFIG_X86_32) && \ | 29 | #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE)) |
| 30 | (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) | ||
| 31 | /* | 30 | /* |
| 32 | * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock | 31 | * On PPro SMP, we use a locked operation to unlock |
| 33 | * (PPro errata 66, 92) | 32 | * (PPro errata 66, 92) |
| 34 | */ | 33 | */ |
| 35 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX | 34 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 8779edab684e..d8fba5c15fbd 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
| @@ -8,236 +8,6 @@ | |||
| 8 | 8 | ||
| 9 | #include "cpu.h" | 9 | #include "cpu.h" |
| 10 | 10 | ||
| 11 | #ifdef CONFIG_X86_OOSTORE | ||
| 12 | |||
| 13 | static u32 power2(u32 x) | ||
| 14 | { | ||
| 15 | u32 s = 1; | ||
| 16 | |||
| 17 | while (s <= x) | ||
| 18 | s <<= 1; | ||
| 19 | |||
| 20 | return s >>= 1; | ||
| 21 | } | ||
| 22 | |||
| 23 | |||
| 24 | /* | ||
| 25 | * Set up an actual MCR | ||
| 26 | */ | ||
| 27 | static void centaur_mcr_insert(int reg, u32 base, u32 size, int key) | ||
| 28 | { | ||
| 29 | u32 lo, hi; | ||
| 30 | |||
| 31 | hi = base & ~0xFFF; | ||
| 32 | lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ | ||
| 33 | lo &= ~0xFFF; /* Remove the ctrl value bits */ | ||
| 34 | lo |= key; /* Attribute we wish to set */ | ||
| 35 | wrmsr(reg+MSR_IDT_MCR0, lo, hi); | ||
| 36 | mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */ | ||
| 37 | } | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Figure what we can cover with MCR's | ||
| 41 | * | ||
| 42 | * Shortcut: We know you can't put 4Gig of RAM on a winchip | ||
| 43 | */ | ||
| 44 | static u32 ramtop(void) | ||
| 45 | { | ||
| 46 | u32 clip = 0xFFFFFFFFUL; | ||
| 47 | u32 top = 0; | ||
| 48 | int i; | ||
| 49 | |||
| 50 | for (i = 0; i < e820.nr_map; i++) { | ||
| 51 | unsigned long start, end; | ||
| 52 | |||
| 53 | if (e820.map[i].addr > 0xFFFFFFFFUL) | ||
| 54 | continue; | ||
| 55 | /* | ||
| 56 | * Don't MCR over reserved space. Ignore the ISA hole | ||
| 57 | * we frob around that catastrophe already | ||
| 58 | */ | ||
| 59 | if (e820.map[i].type == E820_RESERVED) { | ||
| 60 | if (e820.map[i].addr >= 0x100000UL && | ||
| 61 | e820.map[i].addr < clip) | ||
| 62 | clip = e820.map[i].addr; | ||
| 63 | continue; | ||
| 64 | } | ||
| 65 | start = e820.map[i].addr; | ||
| 66 | end = e820.map[i].addr + e820.map[i].size; | ||
| 67 | if (start >= end) | ||
| 68 | continue; | ||
| 69 | if (end > top) | ||
| 70 | top = end; | ||
| 71 | } | ||
| 72 | /* | ||
| 73 | * Everything below 'top' should be RAM except for the ISA hole. | ||
| 74 | * Because of the limited MCR's we want to map NV/ACPI into our | ||
| 75 | * MCR range for gunk in RAM | ||
| 76 | * | ||
| 77 | * Clip might cause us to MCR insufficient RAM but that is an | ||
| 78 | * acceptable failure mode and should only bite obscure boxes with | ||
| 79 | * a VESA hole at 15Mb | ||
| 80 | * | ||
| 81 | * The second case Clip sometimes kicks in is when the EBDA is marked | ||
| 82 | * as reserved. Again we fail safe with reasonable results | ||
| 83 | */ | ||
| 84 | if (top > clip) | ||
| 85 | top = clip; | ||
| 86 | |||
| 87 | return top; | ||
| 88 | } | ||
| 89 | |||
| 90 | /* | ||
| 91 | * Compute a set of MCR's to give maximum coverage | ||
| 92 | */ | ||
| 93 | static int centaur_mcr_compute(int nr, int key) | ||
| 94 | { | ||
| 95 | u32 mem = ramtop(); | ||
| 96 | u32 root = power2(mem); | ||
| 97 | u32 base = root; | ||
| 98 | u32 top = root; | ||
| 99 | u32 floor = 0; | ||
| 100 | int ct = 0; | ||
| 101 | |||
| 102 | while (ct < nr) { | ||
| 103 | u32 fspace = 0; | ||
| 104 | u32 high; | ||
| 105 | u32 low; | ||
| 106 | |||
| 107 | /* | ||
| 108 | * Find the largest block we will fill going upwards | ||
| 109 | */ | ||
| 110 | high = power2(mem-top); | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Find the largest block we will fill going downwards | ||
| 114 | */ | ||
| 115 | low = base/2; | ||
| 116 | |||
| 117 | /* | ||
| 118 | * Don't fill below 1Mb going downwards as there | ||
| 119 | * is an ISA hole in the way. | ||
| 120 | */ | ||
| 121 | if (base <= 1024*1024) | ||
| 122 | low = 0; | ||
| 123 | |||
| 124 | /* | ||
| 125 | * See how much space we could cover by filling below | ||
| 126 | * the ISA hole | ||
| 127 | */ | ||
| 128 | |||
| 129 | if (floor == 0) | ||
| 130 | fspace = 512*1024; | ||
| 131 | else if (floor == 512*1024) | ||
| 132 | fspace = 128*1024; | ||
| 133 | |||
| 134 | /* And forget ROM space */ | ||
| 135 | |||
| 136 | /* | ||
| 137 | * Now install the largest coverage we get | ||
| 138 | */ | ||
| 139 | if (fspace > high && fspace > low) { | ||
| 140 | centaur_mcr_insert(ct, floor, fspace, key); | ||
| 141 | floor += fspace; | ||
| 142 | } else if (high > low) { | ||
| 143 | centaur_mcr_insert(ct, top, high, key); | ||
| 144 | top += high; | ||
| 145 | } else if (low > 0) { | ||
| 146 | base -= low; | ||
| 147 | centaur_mcr_insert(ct, base, low, key); | ||
| 148 | } else | ||
| 149 | break; | ||
| 150 | ct++; | ||
| 151 | } | ||
| 152 | /* | ||
| 153 | * We loaded ct values. We now need to set the mask. The caller | ||
| 154 | * must do this bit. | ||
| 155 | */ | ||
| 156 | return ct; | ||
| 157 | } | ||
| 158 | |||
| 159 | static void centaur_create_optimal_mcr(void) | ||
| 160 | { | ||
| 161 | int used; | ||
| 162 | int i; | ||
| 163 | |||
| 164 | /* | ||
| 165 | * Allocate up to 6 mcrs to mark as much of ram as possible | ||
| 166 | * as write combining and weak write ordered. | ||
| 167 | * | ||
| 168 | * To experiment with: Linux never uses stack operations for | ||
| 169 | * mmio spaces so we could globally enable stack operation wc | ||
| 170 | * | ||
| 171 | * Load the registers with type 31 - full write combining, all | ||
| 172 | * writes weakly ordered. | ||
| 173 | */ | ||
| 174 | used = centaur_mcr_compute(6, 31); | ||
| 175 | |||
| 176 | /* | ||
| 177 | * Wipe unused MCRs | ||
| 178 | */ | ||
| 179 | for (i = used; i < 8; i++) | ||
| 180 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | ||
| 181 | } | ||
| 182 | |||
| 183 | static void winchip2_create_optimal_mcr(void) | ||
| 184 | { | ||
| 185 | u32 lo, hi; | ||
| 186 | int used; | ||
| 187 | int i; | ||
| 188 | |||
| 189 | /* | ||
| 190 | * Allocate up to 6 mcrs to mark as much of ram as possible | ||
| 191 | * as write combining, weak store ordered. | ||
| 192 | * | ||
| 193 | * Load the registers with type 25 | ||
| 194 | * 8 - weak write ordering | ||
| 195 | * 16 - weak read ordering | ||
| 196 | * 1 - write combining | ||
| 197 | */ | ||
| 198 | used = centaur_mcr_compute(6, 25); | ||
| 199 | |||
| 200 | /* | ||
| 201 | * Mark the registers we are using. | ||
| 202 | */ | ||
| 203 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 204 | for (i = 0; i < used; i++) | ||
| 205 | lo |= 1<<(9+i); | ||
| 206 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 207 | |||
| 208 | /* | ||
| 209 | * Wipe unused MCRs | ||
| 210 | */ | ||
| 211 | |||
| 212 | for (i = used; i < 8; i++) | ||
| 213 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | ||
| 214 | } | ||
| 215 | |||
| 216 | /* | ||
| 217 | * Handle the MCR key on the Winchip 2. | ||
| 218 | */ | ||
| 219 | static void winchip2_unprotect_mcr(void) | ||
| 220 | { | ||
| 221 | u32 lo, hi; | ||
| 222 | u32 key; | ||
| 223 | |||
| 224 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 225 | lo &= ~0x1C0; /* blank bits 8-6 */ | ||
| 226 | key = (lo>>17) & 7; | ||
| 227 | lo |= key<<6; /* replace with unlock key */ | ||
| 228 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 229 | } | ||
| 230 | |||
| 231 | static void winchip2_protect_mcr(void) | ||
| 232 | { | ||
| 233 | u32 lo, hi; | ||
| 234 | |||
| 235 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 236 | lo &= ~0x1C0; /* blank bits 8-6 */ | ||
| 237 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 238 | } | ||
| 239 | #endif /* CONFIG_X86_OOSTORE */ | ||
| 240 | |||
| 241 | #define ACE_PRESENT (1 << 6) | 11 | #define ACE_PRESENT (1 << 6) |
| 242 | #define ACE_ENABLED (1 << 7) | 12 | #define ACE_ENABLED (1 << 7) |
| 243 | #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ | 13 | #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ |
| @@ -362,20 +132,6 @@ static void init_centaur(struct cpuinfo_x86 *c) | |||
| 362 | fcr_clr = DPDC; | 132 | fcr_clr = DPDC; |
| 363 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); | 133 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); |
| 364 | clear_cpu_cap(c, X86_FEATURE_TSC); | 134 | clear_cpu_cap(c, X86_FEATURE_TSC); |
| 365 | #ifdef CONFIG_X86_OOSTORE | ||
| 366 | centaur_create_optimal_mcr(); | ||
| 367 | /* | ||
| 368 | * Enable: | ||
| 369 | * write combining on non-stack, non-string | ||
| 370 | * write combining on string, all types | ||
| 371 | * weak write ordering | ||
| 372 | * | ||
| 373 | * The C6 original lacks weak read order | ||
| 374 | * | ||
| 375 | * Note 0x120 is write only on Winchip 1 | ||
| 376 | */ | ||
| 377 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); | ||
| 378 | #endif | ||
| 379 | break; | 135 | break; |
| 380 | case 8: | 136 | case 8: |
| 381 | switch (c->x86_mask) { | 137 | switch (c->x86_mask) { |
| @@ -392,40 +148,12 @@ static void init_centaur(struct cpuinfo_x86 *c) | |||
| 392 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| | 148 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
| 393 | E2MMX|EAMD3D; | 149 | E2MMX|EAMD3D; |
| 394 | fcr_clr = DPDC; | 150 | fcr_clr = DPDC; |
| 395 | #ifdef CONFIG_X86_OOSTORE | ||
| 396 | winchip2_unprotect_mcr(); | ||
| 397 | winchip2_create_optimal_mcr(); | ||
| 398 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 399 | /* | ||
| 400 | * Enable: | ||
| 401 | * write combining on non-stack, non-string | ||
| 402 | * write combining on string, all types | ||
| 403 | * weak write ordering | ||
| 404 | */ | ||
| 405 | lo |= 31; | ||
| 406 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 407 | winchip2_protect_mcr(); | ||
| 408 | #endif | ||
| 409 | break; | 151 | break; |
| 410 | case 9: | 152 | case 9: |
| 411 | name = "3"; | 153 | name = "3"; |
| 412 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| | 154 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
| 413 | E2MMX|EAMD3D; | 155 | E2MMX|EAMD3D; |
| 414 | fcr_clr = DPDC; | 156 | fcr_clr = DPDC; |
| 415 | #ifdef CONFIG_X86_OOSTORE | ||
| 416 | winchip2_unprotect_mcr(); | ||
| 417 | winchip2_create_optimal_mcr(); | ||
| 418 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 419 | /* | ||
| 420 | * Enable: | ||
| 421 | * write combining on non-stack, non-string | ||
| 422 | * write combining on string, all types | ||
| 423 | * weak write ordering | ||
| 424 | */ | ||
| 425 | lo |= 31; | ||
| 426 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 427 | winchip2_protect_mcr(); | ||
| 428 | #endif | ||
| 429 | break; | 157 | break; |
| 430 | default: | 158 | default: |
| 431 | name = "??"; | 159 | name = "??"; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e81df8fce027..2de1bc09a8d4 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -3002,10 +3002,8 @@ static int cr8_write_interception(struct vcpu_svm *svm) | |||
| 3002 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); | 3002 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); |
| 3003 | /* instruction emulation calls kvm_set_cr8() */ | 3003 | /* instruction emulation calls kvm_set_cr8() */ |
| 3004 | r = cr_interception(svm); | 3004 | r = cr_interception(svm); |
| 3005 | if (irqchip_in_kernel(svm->vcpu.kvm)) { | 3005 | if (irqchip_in_kernel(svm->vcpu.kvm)) |
| 3006 | clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); | ||
| 3007 | return r; | 3006 | return r; |
| 3008 | } | ||
| 3009 | if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) | 3007 | if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) |
| 3010 | return r; | 3008 | return r; |
| 3011 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | 3009 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; |
| @@ -3567,6 +3565,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) | |||
| 3567 | if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) | 3565 | if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) |
| 3568 | return; | 3566 | return; |
| 3569 | 3567 | ||
| 3568 | clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); | ||
| 3569 | |||
| 3570 | if (irr == -1) | 3570 | if (irr == -1) |
| 3571 | return; | 3571 | return; |
| 3572 | 3572 | ||
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 877b9a1b2152..01495755701b 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S | |||
| @@ -140,7 +140,7 @@ bpf_slow_path_byte_msh: | |||
| 140 | push %r9; \ | 140 | push %r9; \ |
| 141 | push SKBDATA; \ | 141 | push SKBDATA; \ |
| 142 | /* rsi already has offset */ \ | 142 | /* rsi already has offset */ \ |
| 143 | mov $SIZE,%ecx; /* size */ \ | 143 | mov $SIZE,%edx; /* size */ \ |
| 144 | call bpf_internal_load_pointer_neg_helper; \ | 144 | call bpf_internal_load_pointer_neg_helper; \ |
| 145 | test %rax,%rax; \ | 145 | test %rax,%rax; \ |
| 146 | pop SKBDATA; \ | 146 | pop SKBDATA; \ |
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 7d01b8c56c00..cc04e67bfd05 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h | |||
| @@ -40,11 +40,7 @@ | |||
| 40 | #define smp_rmb() barrier() | 40 | #define smp_rmb() barrier() |
| 41 | #endif /* CONFIG_X86_PPRO_FENCE */ | 41 | #endif /* CONFIG_X86_PPRO_FENCE */ |
| 42 | 42 | ||
| 43 | #ifdef CONFIG_X86_OOSTORE | ||
| 44 | #define smp_wmb() wmb() | ||
| 45 | #else /* CONFIG_X86_OOSTORE */ | ||
| 46 | #define smp_wmb() barrier() | 43 | #define smp_wmb() barrier() |
| 47 | #endif /* CONFIG_X86_OOSTORE */ | ||
| 48 | 44 | ||
| 49 | #define smp_read_barrier_depends() read_barrier_depends() | 45 | #define smp_read_barrier_depends() read_barrier_depends() |
| 50 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 46 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 65d3f1b5966c..8cb2522d592a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -4225,8 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4225 | 4225 | ||
| 4226 | /* devices that don't properly handle queued TRIM commands */ | 4226 | /* devices that don't properly handle queued TRIM commands */ |
| 4227 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4227 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
| 4228 | { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4228 | { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
| 4229 | { "Crucial_CT???M500SSD3", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | ||
| 4230 | 4229 | ||
| 4231 | /* | 4230 | /* |
| 4232 | * Some WD SATA-I drives spin up and down erratically when the link | 4231 | * Some WD SATA-I drives spin up and down erratically when the link |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e22be8458d92..bbb17841a9e5 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -4134,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) | |||
| 4134 | { | 4134 | { |
| 4135 | if (enable) | 4135 | if (enable) |
| 4136 | WREG32(CP_MEC_CNTL, 0); | 4136 | WREG32(CP_MEC_CNTL, 0); |
| 4137 | else | 4137 | else { |
| 4138 | WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); | 4138 | WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); |
| 4139 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | ||
| 4140 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | ||
| 4141 | } | ||
| 4139 | udelay(50); | 4142 | udelay(50); |
| 4140 | } | 4143 | } |
| 4141 | 4144 | ||
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 1ecb3f1070e3..94626ea90fa5 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
| @@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) | |||
| 264 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); | 264 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); |
| 265 | WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); | 265 | WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); |
| 266 | } | 266 | } |
| 267 | rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; | ||
| 268 | rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; | ||
| 267 | } | 269 | } |
| 268 | 270 | ||
| 269 | /** | 271 | /** |
| @@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable) | |||
| 291 | u32 me_cntl, reg_offset; | 293 | u32 me_cntl, reg_offset; |
| 292 | int i; | 294 | int i; |
| 293 | 295 | ||
| 296 | if (enable == false) { | ||
| 297 | cik_sdma_gfx_stop(rdev); | ||
| 298 | cik_sdma_rlc_stop(rdev); | ||
| 299 | } | ||
| 300 | |||
| 294 | for (i = 0; i < 2; i++) { | 301 | for (i = 0; i < 2; i++) { |
| 295 | if (i == 0) | 302 | if (i == 0) |
| 296 | reg_offset = SDMA0_REGISTER_OFFSET; | 303 | reg_offset = SDMA0_REGISTER_OFFSET; |
| @@ -420,10 +427,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev) | |||
| 420 | if (!rdev->sdma_fw) | 427 | if (!rdev->sdma_fw) |
| 421 | return -EINVAL; | 428 | return -EINVAL; |
| 422 | 429 | ||
| 423 | /* stop the gfx rings and rlc compute queues */ | ||
| 424 | cik_sdma_gfx_stop(rdev); | ||
| 425 | cik_sdma_rlc_stop(rdev); | ||
| 426 | |||
| 427 | /* halt the MEs */ | 430 | /* halt the MEs */ |
| 428 | cik_sdma_enable(rdev, false); | 431 | cik_sdma_enable(rdev, false); |
| 429 | 432 | ||
| @@ -492,9 +495,6 @@ int cik_sdma_resume(struct radeon_device *rdev) | |||
| 492 | */ | 495 | */ |
| 493 | void cik_sdma_fini(struct radeon_device *rdev) | 496 | void cik_sdma_fini(struct radeon_device *rdev) |
| 494 | { | 497 | { |
| 495 | /* stop the gfx rings and rlc compute queues */ | ||
| 496 | cik_sdma_gfx_stop(rdev); | ||
| 497 | cik_sdma_rlc_stop(rdev); | ||
| 498 | /* halt the MEs */ | 498 | /* halt the MEs */ |
| 499 | cik_sdma_enable(rdev, false); | 499 | cik_sdma_enable(rdev, false); |
| 500 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); | 500 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 2aecd6dc2610..66ed3ea71440 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
| @@ -33,6 +33,13 @@ | |||
| 33 | #include <linux/vga_switcheroo.h> | 33 | #include <linux/vga_switcheroo.h> |
| 34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
| 35 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
| 36 | |||
| 37 | #if defined(CONFIG_VGA_SWITCHEROO) | ||
| 38 | bool radeon_is_px(void); | ||
| 39 | #else | ||
| 40 | static inline bool radeon_is_px(void) { return false; } | ||
| 41 | #endif | ||
| 42 | |||
| 36 | /** | 43 | /** |
| 37 | * radeon_driver_unload_kms - Main unload function for KMS. | 44 | * radeon_driver_unload_kms - Main unload function for KMS. |
| 38 | * | 45 | * |
| @@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
| 130 | "Error during ACPI methods call\n"); | 137 | "Error during ACPI methods call\n"); |
| 131 | } | 138 | } |
| 132 | 139 | ||
| 133 | if (radeon_runtime_pm != 0) { | 140 | if ((radeon_runtime_pm == 1) || |
| 141 | ((radeon_runtime_pm == -1) && radeon_is_px())) { | ||
| 134 | pm_runtime_use_autosuspend(dev->dev); | 142 | pm_runtime_use_autosuspend(dev->dev); |
| 135 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | 143 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
| 136 | pm_runtime_set_active(dev->dev); | 144 | pm_runtime_set_active(dev->dev); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a06651309388..214b7992a3aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
| 351 | 351 | ||
| 352 | moved: | 352 | moved: |
| 353 | if (bo->evicted) { | 353 | if (bo->evicted) { |
| 354 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); | 354 | if (bdev->driver->invalidate_caches) { |
| 355 | if (ret) | 355 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); |
| 356 | pr_err("Can not flush read caches\n"); | 356 | if (ret) |
| 357 | pr_err("Can not flush read caches\n"); | ||
| 358 | } | ||
| 357 | bo->evicted = false; | 359 | bo->evicted = false; |
| 358 | } | 360 | } |
| 359 | 361 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 801231c9ae48..0ce48e5a9cb4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
| @@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, | |||
| 339 | vma->vm_private_data = bo; | 339 | vma->vm_private_data = bo; |
| 340 | 340 | ||
| 341 | /* | 341 | /* |
| 342 | * PFNMAP is faster than MIXEDMAP due to reduced page | 342 | * We'd like to use VM_PFNMAP on shared mappings, where |
| 343 | * administration. So use MIXEDMAP only if private VMA, where | 343 | * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, |
| 344 | * we need to support COW. | 344 | * but for some reason VM_PFNMAP + x86 PAT + write-combine is very |
| 345 | * bad for performance. Until that has been sorted out, use | ||
| 346 | * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 | ||
| 345 | */ | 347 | */ |
| 346 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; | 348 | vma->vm_flags |= VM_MIXEDMAP; |
| 347 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | 349 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; |
| 348 | return 0; | 350 | return 0; |
| 349 | out_unref: | 351 | out_unref: |
| @@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) | |||
| 359 | 361 | ||
| 360 | vma->vm_ops = &ttm_bo_vm_ops; | 362 | vma->vm_ops = &ttm_bo_vm_ops; |
| 361 | vma->vm_private_data = ttm_bo_reference(bo); | 363 | vma->vm_private_data = ttm_bo_reference(bo); |
| 362 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; | 364 | vma->vm_flags |= VM_MIXEDMAP; |
| 363 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; | 365 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; |
| 364 | return 0; | 366 | return 0; |
| 365 | } | 367 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 82468d902915..e7af580ab977 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
| @@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 830 | if (unlikely(ret != 0)) | 830 | if (unlikely(ret != 0)) |
| 831 | goto out_unlock; | 831 | goto out_unlock; |
| 832 | 832 | ||
| 833 | /* | ||
| 834 | * A gb-aware client referencing a shared surface will | ||
| 835 | * expect a backup buffer to be present. | ||
| 836 | */ | ||
| 837 | if (dev_priv->has_mob && req->shareable) { | ||
| 838 | uint32_t backup_handle; | ||
| 839 | |||
| 840 | ret = vmw_user_dmabuf_alloc(dev_priv, tfile, | ||
| 841 | res->backup_size, | ||
| 842 | true, | ||
| 843 | &backup_handle, | ||
| 844 | &res->backup); | ||
| 845 | if (unlikely(ret != 0)) { | ||
| 846 | vmw_resource_unreference(&res); | ||
| 847 | goto out_unlock; | ||
| 848 | } | ||
| 849 | } | ||
| 850 | |||
| 833 | tmp = vmw_resource_reference(&srf->res); | 851 | tmp = vmw_resource_reference(&srf->res); |
| 834 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | 852 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, |
| 835 | req->shareable, VMW_RES_SURFACE, | 853 | req->shareable, VMW_RES_SURFACE, |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index f5ed03164d86..de17c5593d97 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
| @@ -387,7 +387,7 @@ config I2C_CBUS_GPIO | |||
| 387 | 387 | ||
| 388 | config I2C_CPM | 388 | config I2C_CPM |
| 389 | tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" | 389 | tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" |
| 390 | depends on (CPM1 || CPM2) && OF_I2C | 390 | depends on CPM1 || CPM2 |
| 391 | help | 391 | help |
| 392 | This supports the use of the I2C interface on Freescale | 392 | This supports the use of the I2C interface on Freescale |
| 393 | processors with CPM1 or CPM2. | 393 | processors with CPM1 or CPM2. |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1af70145fab9..074b9c8e4cf0 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
| @@ -979,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg) | |||
| 979 | int r; | 979 | int r; |
| 980 | struct dm_io_region o_region, c_region; | 980 | struct dm_io_region o_region, c_region; |
| 981 | struct cache *cache = mg->cache; | 981 | struct cache *cache = mg->cache; |
| 982 | sector_t cblock = from_cblock(mg->cblock); | ||
| 982 | 983 | ||
| 983 | o_region.bdev = cache->origin_dev->bdev; | 984 | o_region.bdev = cache->origin_dev->bdev; |
| 984 | o_region.count = cache->sectors_per_block; | 985 | o_region.count = cache->sectors_per_block; |
| 985 | 986 | ||
| 986 | c_region.bdev = cache->cache_dev->bdev; | 987 | c_region.bdev = cache->cache_dev->bdev; |
| 987 | c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; | 988 | c_region.sector = cblock * cache->sectors_per_block; |
| 988 | c_region.count = cache->sectors_per_block; | 989 | c_region.count = cache->sectors_per_block; |
| 989 | 990 | ||
| 990 | if (mg->writeback || mg->demote) { | 991 | if (mg->writeback || mg->demote) { |
| @@ -2464,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
| 2464 | bool discarded_block; | 2465 | bool discarded_block; |
| 2465 | struct dm_bio_prison_cell *cell; | 2466 | struct dm_bio_prison_cell *cell; |
| 2466 | struct policy_result lookup_result; | 2467 | struct policy_result lookup_result; |
| 2467 | struct per_bio_data *pb; | 2468 | struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); |
| 2468 | 2469 | ||
| 2469 | if (from_oblock(block) > from_oblock(cache->origin_blocks)) { | 2470 | if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { |
| 2470 | /* | 2471 | /* |
| 2471 | * This can only occur if the io goes to a partial block at | 2472 | * This can only occur if the io goes to a partial block at |
| 2472 | * the end of the origin device. We don't cache these. | 2473 | * the end of the origin device. We don't cache these. |
| 2473 | * Just remap to the origin and carry on. | 2474 | * Just remap to the origin and carry on. |
| 2474 | */ | 2475 | */ |
| 2475 | remap_to_origin_clear_discard(cache, bio, block); | 2476 | remap_to_origin(cache, bio); |
| 2476 | return DM_MAPIO_REMAPPED; | 2477 | return DM_MAPIO_REMAPPED; |
| 2477 | } | 2478 | } |
| 2478 | 2479 | ||
| 2479 | pb = init_per_bio_data(bio, pb_data_size); | ||
| 2480 | |||
| 2481 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { | 2480 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { |
| 2482 | defer_bio(cache, bio); | 2481 | defer_bio(cache, bio); |
| 2483 | return DM_MAPIO_SUBMITTED; | 2482 | return DM_MAPIO_SUBMITTED; |
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index b9e2000969f0..95c894482fdd 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
| @@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | |||
| 240 | 240 | ||
| 241 | nid = cpu_to_node(cpu); | 241 | nid = cpu_to_node(cpu); |
| 242 | page = alloc_pages_exact_node(nid, | 242 | page = alloc_pages_exact_node(nid, |
| 243 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 243 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 244 | pg_order); | 244 | pg_order); |
| 245 | if (page == NULL) { | 245 | if (page == NULL) { |
| 246 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " | 246 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index a2c47476804d..e8f133e926aa 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
| @@ -730,7 +730,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon | |||
| 730 | client_info->ntt = 0; | 730 | client_info->ntt = 0; |
| 731 | } | 731 | } |
| 732 | 732 | ||
| 733 | if (!vlan_get_tag(skb, &client_info->vlan_id)) | 733 | if (vlan_get_tag(skb, &client_info->vlan_id)) |
| 734 | client_info->vlan_id = 0; | 734 | client_info->vlan_id = 0; |
| 735 | 735 | ||
| 736 | if (!client_info->assigned) { | 736 | if (!client_info->assigned) { |
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index c37878432717..298c26509095 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c | |||
| @@ -121,6 +121,7 @@ static struct bond_opt_value bond_resend_igmp_tbl[] = { | |||
| 121 | static struct bond_opt_value bond_lp_interval_tbl[] = { | 121 | static struct bond_opt_value bond_lp_interval_tbl[] = { |
| 122 | { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT}, | 122 | { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT}, |
| 123 | { "maxval", INT_MAX, BOND_VALFLAG_MAX}, | 123 | { "maxval", INT_MAX, BOND_VALFLAG_MAX}, |
| 124 | { NULL, -1, 0}, | ||
| 124 | }; | 125 | }; |
| 125 | 126 | ||
| 126 | static struct bond_option bond_opts[] = { | 127 | static struct bond_option bond_opts[] = { |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index cda25ac45b47..6c9e1c9bdeb8 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
| @@ -2507,6 +2507,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) | |||
| 2507 | 2507 | ||
| 2508 | bp->fw_wr_seq++; | 2508 | bp->fw_wr_seq++; |
| 2509 | msg_data |= bp->fw_wr_seq; | 2509 | msg_data |= bp->fw_wr_seq; |
| 2510 | bp->fw_last_msg = msg_data; | ||
| 2510 | 2511 | ||
| 2511 | bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); | 2512 | bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); |
| 2512 | 2513 | ||
| @@ -4000,8 +4001,23 @@ bnx2_setup_wol(struct bnx2 *bp) | |||
| 4000 | wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; | 4001 | wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; |
| 4001 | } | 4002 | } |
| 4002 | 4003 | ||
| 4003 | if (!(bp->flags & BNX2_FLAG_NO_WOL)) | 4004 | if (!(bp->flags & BNX2_FLAG_NO_WOL)) { |
| 4004 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); | 4005 | u32 val; |
| 4006 | |||
| 4007 | wol_msg |= BNX2_DRV_MSG_DATA_WAIT3; | ||
| 4008 | if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { | ||
| 4009 | bnx2_fw_sync(bp, wol_msg, 1, 0); | ||
| 4010 | return; | ||
| 4011 | } | ||
| 4012 | /* Tell firmware not to power down the PHY yet, otherwise | ||
| 4013 | * the chip will take a long time to respond to MMIO reads. | ||
| 4014 | */ | ||
| 4015 | val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); | ||
| 4016 | bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, | ||
| 4017 | val | BNX2_PORT_FEATURE_ASF_ENABLED); | ||
| 4018 | bnx2_fw_sync(bp, wol_msg, 1, 0); | ||
| 4019 | bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); | ||
| 4020 | } | ||
| 4005 | 4021 | ||
| 4006 | } | 4022 | } |
| 4007 | 4023 | ||
| @@ -4033,9 +4049,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) | |||
| 4033 | 4049 | ||
| 4034 | if (bp->wol) | 4050 | if (bp->wol) |
| 4035 | pci_set_power_state(bp->pdev, PCI_D3hot); | 4051 | pci_set_power_state(bp->pdev, PCI_D3hot); |
| 4036 | } else { | 4052 | break; |
| 4037 | pci_set_power_state(bp->pdev, PCI_D3hot); | 4053 | |
| 4054 | } | ||
| 4055 | if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { | ||
| 4056 | u32 val; | ||
| 4057 | |||
| 4058 | /* Tell firmware not to power down the PHY yet, | ||
| 4059 | * otherwise the other port may not respond to | ||
| 4060 | * MMIO reads. | ||
| 4061 | */ | ||
| 4062 | val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); | ||
| 4063 | val &= ~BNX2_CONDITION_PM_STATE_MASK; | ||
| 4064 | val |= BNX2_CONDITION_PM_STATE_UNPREP; | ||
| 4065 | bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val); | ||
| 4038 | } | 4066 | } |
| 4067 | pci_set_power_state(bp->pdev, PCI_D3hot); | ||
| 4039 | 4068 | ||
| 4040 | /* No more memory access after this point until | 4069 | /* No more memory access after this point until |
| 4041 | * device is brought back to D0. | 4070 | * device is brought back to D0. |
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index f1cf2c44e7ed..e341bc366fa5 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h | |||
| @@ -6900,6 +6900,7 @@ struct bnx2 { | |||
| 6900 | 6900 | ||
| 6901 | u16 fw_wr_seq; | 6901 | u16 fw_wr_seq; |
| 6902 | u16 fw_drv_pulse_wr_seq; | 6902 | u16 fw_drv_pulse_wr_seq; |
| 6903 | u32 fw_last_msg; | ||
| 6903 | 6904 | ||
| 6904 | int rx_max_ring; | 6905 | int rx_max_ring; |
| 6905 | int rx_ring_size; | 6906 | int rx_ring_size; |
| @@ -7406,6 +7407,10 @@ struct bnx2_rv2p_fw_file { | |||
| 7406 | #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 | 7407 | #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 |
| 7407 | #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 | 7408 | #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 |
| 7408 | #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 | 7409 | #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 |
| 7410 | #define BNX2_CONDITION_PM_STATE_MASK 0x00030000 | ||
| 7411 | #define BNX2_CONDITION_PM_STATE_FULL 0x00030000 | ||
| 7412 | #define BNX2_CONDITION_PM_STATE_PREP 0x00020000 | ||
| 7413 | #define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000 | ||
| 7409 | 7414 | ||
| 7410 | #define BNX2_BC_STATE_DEBUG_CMD 0x1dc | 7415 | #define BNX2_BC_STATE_DEBUG_CMD 0x1dc |
| 7411 | #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 | 7416 | #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 1803c3959044..354ae9792bad 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c | |||
| @@ -1704,7 +1704,7 @@ bfa_flash_sem_get(void __iomem *bar) | |||
| 1704 | while (!bfa_raw_sem_get(bar)) { | 1704 | while (!bfa_raw_sem_get(bar)) { |
| 1705 | if (--n <= 0) | 1705 | if (--n <= 0) |
| 1706 | return BFA_STATUS_BADFLASH; | 1706 | return BFA_STATUS_BADFLASH; |
| 1707 | udelay(10000); | 1707 | mdelay(10); |
| 1708 | } | 1708 | } |
| 1709 | return BFA_STATUS_OK; | 1709 | return BFA_STATUS_OK; |
| 1710 | } | 1710 | } |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 3190d38e16fb..d0c38e01e99f 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
| @@ -632,11 +632,16 @@ static void gem_rx_refill(struct macb *bp) | |||
| 632 | "Unable to allocate sk_buff\n"); | 632 | "Unable to allocate sk_buff\n"); |
| 633 | break; | 633 | break; |
| 634 | } | 634 | } |
| 635 | bp->rx_skbuff[entry] = skb; | ||
| 636 | 635 | ||
| 637 | /* now fill corresponding descriptor entry */ | 636 | /* now fill corresponding descriptor entry */ |
| 638 | paddr = dma_map_single(&bp->pdev->dev, skb->data, | 637 | paddr = dma_map_single(&bp->pdev->dev, skb->data, |
| 639 | bp->rx_buffer_size, DMA_FROM_DEVICE); | 638 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
| 639 | if (dma_mapping_error(&bp->pdev->dev, paddr)) { | ||
| 640 | dev_kfree_skb(skb); | ||
| 641 | break; | ||
| 642 | } | ||
| 643 | |||
| 644 | bp->rx_skbuff[entry] = skb; | ||
| 640 | 645 | ||
| 641 | if (entry == RX_RING_SIZE - 1) | 646 | if (entry == RX_RING_SIZE - 1) |
| 642 | paddr |= MACB_BIT(RX_WRAP); | 647 | paddr |= MACB_BIT(RX_WRAP); |
| @@ -725,7 +730,7 @@ static int gem_rx(struct macb *bp, int budget) | |||
| 725 | skb_put(skb, len); | 730 | skb_put(skb, len); |
| 726 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); | 731 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); |
| 727 | dma_unmap_single(&bp->pdev->dev, addr, | 732 | dma_unmap_single(&bp->pdev->dev, addr, |
| 728 | len, DMA_FROM_DEVICE); | 733 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
| 729 | 734 | ||
| 730 | skb->protocol = eth_type_trans(skb, bp->dev); | 735 | skb->protocol = eth_type_trans(skb, bp->dev); |
| 731 | skb_checksum_none_assert(skb); | 736 | skb_checksum_none_assert(skb); |
| @@ -1036,11 +1041,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1036 | } | 1041 | } |
| 1037 | 1042 | ||
| 1038 | entry = macb_tx_ring_wrap(bp->tx_head); | 1043 | entry = macb_tx_ring_wrap(bp->tx_head); |
| 1039 | bp->tx_head++; | ||
| 1040 | netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); | 1044 | netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); |
| 1041 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | 1045 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
| 1042 | len, DMA_TO_DEVICE); | 1046 | len, DMA_TO_DEVICE); |
| 1047 | if (dma_mapping_error(&bp->pdev->dev, mapping)) { | ||
| 1048 | kfree_skb(skb); | ||
| 1049 | goto unlock; | ||
| 1050 | } | ||
| 1043 | 1051 | ||
| 1052 | bp->tx_head++; | ||
| 1044 | tx_skb = &bp->tx_skb[entry]; | 1053 | tx_skb = &bp->tx_skb[entry]; |
| 1045 | tx_skb->skb = skb; | 1054 | tx_skb->skb = skb; |
| 1046 | tx_skb->mapping = mapping; | 1055 | tx_skb->mapping = mapping; |
| @@ -1066,6 +1075,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1066 | if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) | 1075 | if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) |
| 1067 | netif_stop_queue(dev); | 1076 | netif_stop_queue(dev); |
| 1068 | 1077 | ||
| 1078 | unlock: | ||
| 1069 | spin_unlock_irqrestore(&bp->lock, flags); | 1079 | spin_unlock_irqrestore(&bp->lock, flags); |
| 1070 | 1080 | ||
| 1071 | return NETDEV_TX_OK; | 1081 | return NETDEV_TX_OK; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 479a7cba45c0..03a351300013 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -528,13 +528,6 @@ fec_restart(struct net_device *ndev, int duplex) | |||
| 528 | /* Clear any outstanding interrupt. */ | 528 | /* Clear any outstanding interrupt. */ |
| 529 | writel(0xffc00000, fep->hwp + FEC_IEVENT); | 529 | writel(0xffc00000, fep->hwp + FEC_IEVENT); |
| 530 | 530 | ||
| 531 | /* Setup multicast filter. */ | ||
| 532 | set_multicast_list(ndev); | ||
| 533 | #ifndef CONFIG_M5272 | ||
| 534 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | ||
| 535 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | ||
| 536 | #endif | ||
| 537 | |||
| 538 | /* Set maximum receive buffer size. */ | 531 | /* Set maximum receive buffer size. */ |
| 539 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | 532 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); |
| 540 | 533 | ||
| @@ -655,6 +648,13 @@ fec_restart(struct net_device *ndev, int duplex) | |||
| 655 | 648 | ||
| 656 | writel(rcntl, fep->hwp + FEC_R_CNTRL); | 649 | writel(rcntl, fep->hwp + FEC_R_CNTRL); |
| 657 | 650 | ||
| 651 | /* Setup multicast filter. */ | ||
| 652 | set_multicast_list(ndev); | ||
| 653 | #ifndef CONFIG_M5272 | ||
| 654 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | ||
| 655 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | ||
| 656 | #endif | ||
| 657 | |||
| 658 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { | 658 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { |
| 659 | /* enable ENET endian swap */ | 659 | /* enable ENET endian swap */ |
| 660 | ecntl |= (1 << 8); | 660 | ecntl |= (1 << 8); |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 4be971590461..1fc8334fc181 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
| @@ -522,10 +522,21 @@ retry: | |||
| 522 | return rc; | 522 | return rc; |
| 523 | } | 523 | } |
| 524 | 524 | ||
| 525 | static u64 ibmveth_encode_mac_addr(u8 *mac) | ||
| 526 | { | ||
| 527 | int i; | ||
| 528 | u64 encoded = 0; | ||
| 529 | |||
| 530 | for (i = 0; i < ETH_ALEN; i++) | ||
| 531 | encoded = (encoded << 8) | mac[i]; | ||
| 532 | |||
| 533 | return encoded; | ||
| 534 | } | ||
| 535 | |||
| 525 | static int ibmveth_open(struct net_device *netdev) | 536 | static int ibmveth_open(struct net_device *netdev) |
| 526 | { | 537 | { |
| 527 | struct ibmveth_adapter *adapter = netdev_priv(netdev); | 538 | struct ibmveth_adapter *adapter = netdev_priv(netdev); |
| 528 | u64 mac_address = 0; | 539 | u64 mac_address; |
| 529 | int rxq_entries = 1; | 540 | int rxq_entries = 1; |
| 530 | unsigned long lpar_rc; | 541 | unsigned long lpar_rc; |
| 531 | int rc; | 542 | int rc; |
| @@ -579,8 +590,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
| 579 | adapter->rx_queue.num_slots = rxq_entries; | 590 | adapter->rx_queue.num_slots = rxq_entries; |
| 580 | adapter->rx_queue.toggle = 1; | 591 | adapter->rx_queue.toggle = 1; |
| 581 | 592 | ||
| 582 | memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); | 593 | mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); |
| 583 | mac_address = mac_address >> 16; | ||
| 584 | 594 | ||
| 585 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | | 595 | rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | |
| 586 | adapter->rx_queue.queue_len; | 596 | adapter->rx_queue.queue_len; |
| @@ -1183,8 +1193,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) | |||
| 1183 | /* add the addresses to the filter table */ | 1193 | /* add the addresses to the filter table */ |
| 1184 | netdev_for_each_mc_addr(ha, netdev) { | 1194 | netdev_for_each_mc_addr(ha, netdev) { |
| 1185 | /* add the multicast address to the filter table */ | 1195 | /* add the multicast address to the filter table */ |
| 1186 | unsigned long mcast_addr = 0; | 1196 | u64 mcast_addr; |
| 1187 | memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN); | 1197 | mcast_addr = ibmveth_encode_mac_addr(ha->addr); |
| 1188 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, | 1198 | lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, |
| 1189 | IbmVethMcastAddFilter, | 1199 | IbmVethMcastAddFilter, |
| 1190 | mcast_addr); | 1200 | mcast_addr); |
| @@ -1372,9 +1382,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 1372 | 1382 | ||
| 1373 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); | 1383 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); |
| 1374 | 1384 | ||
| 1375 | adapter->mac_addr = 0; | ||
| 1376 | memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN); | ||
| 1377 | |||
| 1378 | netdev->irq = dev->irq; | 1385 | netdev->irq = dev->irq; |
| 1379 | netdev->netdev_ops = &ibmveth_netdev_ops; | 1386 | netdev->netdev_ops = &ibmveth_netdev_ops; |
| 1380 | netdev->ethtool_ops = &netdev_ethtool_ops; | 1387 | netdev->ethtool_ops = &netdev_ethtool_ops; |
| @@ -1383,7 +1390,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 1383 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 1390 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
| 1384 | netdev->features |= netdev->hw_features; | 1391 | netdev->features |= netdev->hw_features; |
| 1385 | 1392 | ||
| 1386 | memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); | 1393 | memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); |
| 1387 | 1394 | ||
| 1388 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { | 1395 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
| 1389 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; | 1396 | struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; |
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 451ba7949e15..1f37499d4398 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h | |||
| @@ -138,7 +138,6 @@ struct ibmveth_adapter { | |||
| 138 | struct napi_struct napi; | 138 | struct napi_struct napi; |
| 139 | struct net_device_stats stats; | 139 | struct net_device_stats stats; |
| 140 | unsigned int mcastFilterSize; | 140 | unsigned int mcastFilterSize; |
| 141 | unsigned long mac_addr; | ||
| 142 | void * buffer_list_addr; | 141 | void * buffer_list_addr; |
| 143 | void * filter_list_addr; | 142 | void * filter_list_addr; |
| 144 | dma_addr_t buffer_list_dma; | 143 | dma_addr_t buffer_list_dma; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index fad45316200a..84a96f70dfb5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -742,6 +742,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, | |||
| 742 | err = mlx4_en_uc_steer_add(priv, new_mac, | 742 | err = mlx4_en_uc_steer_add(priv, new_mac, |
| 743 | &qpn, | 743 | &qpn, |
| 744 | &entry->reg_id); | 744 | &entry->reg_id); |
| 745 | if (err) | ||
| 746 | return err; | ||
| 747 | if (priv->tunnel_reg_id) { | ||
| 748 | mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); | ||
| 749 | priv->tunnel_reg_id = 0; | ||
| 750 | } | ||
| 751 | err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, | ||
| 752 | &priv->tunnel_reg_id); | ||
| 745 | return err; | 753 | return err; |
| 746 | } | 754 | } |
| 747 | } | 755 | } |
| @@ -1792,6 +1800,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) | |||
| 1792 | mc_list[5] = priv->port; | 1800 | mc_list[5] = priv->port; |
| 1793 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | 1801 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, |
| 1794 | mc_list, MLX4_PROT_ETH, mclist->reg_id); | 1802 | mc_list, MLX4_PROT_ETH, mclist->reg_id); |
| 1803 | if (mclist->tunnel_reg_id) | ||
| 1804 | mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); | ||
| 1795 | } | 1805 | } |
| 1796 | mlx4_en_clear_list(dev); | 1806 | mlx4_en_clear_list(dev); |
| 1797 | list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { | 1807 | list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 91b69ff4b4a2..7e2995ecea6f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
| @@ -129,13 +129,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) | |||
| 129 | [0] = "RSS support", | 129 | [0] = "RSS support", |
| 130 | [1] = "RSS Toeplitz Hash Function support", | 130 | [1] = "RSS Toeplitz Hash Function support", |
| 131 | [2] = "RSS XOR Hash Function support", | 131 | [2] = "RSS XOR Hash Function support", |
| 132 | [3] = "Device manage flow steering support", | 132 | [3] = "Device managed flow steering support", |
| 133 | [4] = "Automatic MAC reassignment support", | 133 | [4] = "Automatic MAC reassignment support", |
| 134 | [5] = "Time stamping support", | 134 | [5] = "Time stamping support", |
| 135 | [6] = "VST (control vlan insertion/stripping) support", | 135 | [6] = "VST (control vlan insertion/stripping) support", |
| 136 | [7] = "FSM (MAC anti-spoofing) support", | 136 | [7] = "FSM (MAC anti-spoofing) support", |
| 137 | [8] = "Dynamic QP updates support", | 137 | [8] = "Dynamic QP updates support", |
| 138 | [9] = "TCP/IP offloads/flow-steering for VXLAN support" | 138 | [9] = "Device managed flow steering IPoIB support", |
| 139 | [10] = "TCP/IP offloads/flow-steering for VXLAN support" | ||
| 139 | }; | 140 | }; |
| 140 | int i; | 141 | int i; |
| 141 | 142 | ||
| @@ -859,7 +860,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
| 859 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); | 860 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); |
| 860 | 861 | ||
| 861 | /* For guests, disable vxlan tunneling */ | 862 | /* For guests, disable vxlan tunneling */ |
| 862 | MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); | 863 | MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); |
| 863 | field &= 0xf7; | 864 | field &= 0xf7; |
| 864 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); | 865 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); |
| 865 | 866 | ||
| @@ -869,7 +870,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
| 869 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); | 870 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); |
| 870 | 871 | ||
| 871 | /* For guests, disable mw type 2 */ | 872 | /* For guests, disable mw type 2 */ |
| 872 | MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); | 873 | MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); |
| 873 | bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; | 874 | bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; |
| 874 | MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); | 875 | MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); |
| 875 | 876 | ||
| @@ -883,7 +884,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, | |||
| 883 | } | 884 | } |
| 884 | 885 | ||
| 885 | /* turn off ipoib managed steering for guests */ | 886 | /* turn off ipoib managed steering for guests */ |
| 886 | MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); | 887 | MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); |
| 887 | field &= ~0x80; | 888 | field &= ~0x80; |
| 888 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); | 889 | MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); |
| 889 | 890 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d711158b0d4b..936c15364739 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -150,6 +150,8 @@ struct mlx4_port_config { | |||
| 150 | struct pci_dev *pdev; | 150 | struct pci_dev *pdev; |
| 151 | }; | 151 | }; |
| 152 | 152 | ||
| 153 | static atomic_t pf_loading = ATOMIC_INIT(0); | ||
| 154 | |||
| 153 | int mlx4_check_port_params(struct mlx4_dev *dev, | 155 | int mlx4_check_port_params(struct mlx4_dev *dev, |
| 154 | enum mlx4_port_type *port_type) | 156 | enum mlx4_port_type *port_type) |
| 155 | { | 157 | { |
| @@ -749,7 +751,7 @@ static void mlx4_request_modules(struct mlx4_dev *dev) | |||
| 749 | has_eth_port = true; | 751 | has_eth_port = true; |
| 750 | } | 752 | } |
| 751 | 753 | ||
| 752 | if (has_ib_port) | 754 | if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) |
| 753 | request_module_nowait(IB_DRV_NAME); | 755 | request_module_nowait(IB_DRV_NAME); |
| 754 | if (has_eth_port) | 756 | if (has_eth_port) |
| 755 | request_module_nowait(EN_DRV_NAME); | 757 | request_module_nowait(EN_DRV_NAME); |
| @@ -1407,6 +1409,11 @@ static int mlx4_init_slave(struct mlx4_dev *dev) | |||
| 1407 | u32 slave_read; | 1409 | u32 slave_read; |
| 1408 | u32 cmd_channel_ver; | 1410 | u32 cmd_channel_ver; |
| 1409 | 1411 | ||
| 1412 | if (atomic_read(&pf_loading)) { | ||
| 1413 | mlx4_warn(dev, "PF is not ready. Deferring probe\n"); | ||
| 1414 | return -EPROBE_DEFER; | ||
| 1415 | } | ||
| 1416 | |||
| 1410 | mutex_lock(&priv->cmd.slave_cmd_mutex); | 1417 | mutex_lock(&priv->cmd.slave_cmd_mutex); |
| 1411 | priv->cmd.max_cmds = 1; | 1418 | priv->cmd.max_cmds = 1; |
| 1412 | mlx4_warn(dev, "Sending reset\n"); | 1419 | mlx4_warn(dev, "Sending reset\n"); |
| @@ -2319,7 +2326,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) | |||
| 2319 | 2326 | ||
| 2320 | if (num_vfs) { | 2327 | if (num_vfs) { |
| 2321 | mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); | 2328 | mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); |
| 2329 | |||
| 2330 | atomic_inc(&pf_loading); | ||
| 2322 | err = pci_enable_sriov(pdev, num_vfs); | 2331 | err = pci_enable_sriov(pdev, num_vfs); |
| 2332 | atomic_dec(&pf_loading); | ||
| 2333 | |||
| 2323 | if (err) { | 2334 | if (err) { |
| 2324 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", | 2335 | mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", |
| 2325 | err); | 2336 | err); |
| @@ -2684,6 +2695,7 @@ static struct pci_driver mlx4_driver = { | |||
| 2684 | .name = DRV_NAME, | 2695 | .name = DRV_NAME, |
| 2685 | .id_table = mlx4_pci_table, | 2696 | .id_table = mlx4_pci_table, |
| 2686 | .probe = mlx4_init_one, | 2697 | .probe = mlx4_init_one, |
| 2698 | .shutdown = mlx4_remove_one, | ||
| 2687 | .remove = mlx4_remove_one, | 2699 | .remove = mlx4_remove_one, |
| 2688 | .err_handler = &mlx4_err_handler, | 2700 | .err_handler = &mlx4_err_handler, |
| 2689 | }; | 2701 | }; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e9779653cd4c..3ff7bc3e7a23 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -209,7 +209,7 @@ static const struct { | |||
| 209 | [RTL_GIGA_MAC_VER_16] = | 209 | [RTL_GIGA_MAC_VER_16] = |
| 210 | _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), | 210 | _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), |
| 211 | [RTL_GIGA_MAC_VER_17] = | 211 | [RTL_GIGA_MAC_VER_17] = |
| 212 | _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), | 212 | _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), |
| 213 | [RTL_GIGA_MAC_VER_18] = | 213 | [RTL_GIGA_MAC_VER_18] = |
| 214 | _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), | 214 | _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), |
| 215 | [RTL_GIGA_MAC_VER_19] = | 215 | [RTL_GIGA_MAC_VER_19] = |
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 72d282bf33a5..c553f6b5a913 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c | |||
| @@ -151,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) | |||
| 151 | sizeof(struct dma_desc))); | 151 | sizeof(struct dma_desc))); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | const struct stmmac_chain_mode_ops chain_mode_ops = { | 154 | const struct stmmac_mode_ops chain_mode_ops = { |
| 155 | .init = stmmac_init_dma_chain, | 155 | .init = stmmac_init_dma_chain, |
| 156 | .is_jumbo_frm = stmmac_is_jumbo_frm, | 156 | .is_jumbo_frm = stmmac_is_jumbo_frm, |
| 157 | .jumbo_frm = stmmac_jumbo_frm, | 157 | .jumbo_frm = stmmac_jumbo_frm, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7834a3993946..74610f3aca9e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
| @@ -419,20 +419,13 @@ struct mii_regs { | |||
| 419 | unsigned int data; /* MII Data */ | 419 | unsigned int data; /* MII Data */ |
| 420 | }; | 420 | }; |
| 421 | 421 | ||
| 422 | struct stmmac_ring_mode_ops { | 422 | struct stmmac_mode_ops { |
| 423 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); | ||
| 424 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); | ||
| 425 | void (*refill_desc3) (void *priv, struct dma_desc *p); | ||
| 426 | void (*init_desc3) (struct dma_desc *p); | ||
| 427 | void (*clean_desc3) (void *priv, struct dma_desc *p); | ||
| 428 | int (*set_16kib_bfsize) (int mtu); | ||
| 429 | }; | ||
| 430 | |||
| 431 | struct stmmac_chain_mode_ops { | ||
| 432 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, | 423 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, |
| 433 | unsigned int extend_desc); | 424 | unsigned int extend_desc); |
| 434 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); | 425 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); |
| 435 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); | 426 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); |
| 427 | int (*set_16kib_bfsize)(int mtu); | ||
| 428 | void (*init_desc3)(struct dma_desc *p); | ||
| 436 | void (*refill_desc3) (void *priv, struct dma_desc *p); | 429 | void (*refill_desc3) (void *priv, struct dma_desc *p); |
| 437 | void (*clean_desc3) (void *priv, struct dma_desc *p); | 430 | void (*clean_desc3) (void *priv, struct dma_desc *p); |
| 438 | }; | 431 | }; |
| @@ -441,8 +434,7 @@ struct mac_device_info { | |||
| 441 | const struct stmmac_ops *mac; | 434 | const struct stmmac_ops *mac; |
| 442 | const struct stmmac_desc_ops *desc; | 435 | const struct stmmac_desc_ops *desc; |
| 443 | const struct stmmac_dma_ops *dma; | 436 | const struct stmmac_dma_ops *dma; |
| 444 | const struct stmmac_ring_mode_ops *ring; | 437 | const struct stmmac_mode_ops *mode; |
| 445 | const struct stmmac_chain_mode_ops *chain; | ||
| 446 | const struct stmmac_hwtimestamp *ptp; | 438 | const struct stmmac_hwtimestamp *ptp; |
| 447 | struct mii_regs mii; /* MII register Addresses */ | 439 | struct mii_regs mii; /* MII register Addresses */ |
| 448 | struct mac_link link; | 440 | struct mac_link link; |
| @@ -460,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, | |||
| 460 | void stmmac_set_mac(void __iomem *ioaddr, bool enable); | 452 | void stmmac_set_mac(void __iomem *ioaddr, bool enable); |
| 461 | 453 | ||
| 462 | void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); | 454 | void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); |
| 463 | extern const struct stmmac_ring_mode_ops ring_mode_ops; | 455 | extern const struct stmmac_mode_ops ring_mode_ops; |
| 464 | extern const struct stmmac_chain_mode_ops chain_mode_ops; | 456 | extern const struct stmmac_mode_ops chain_mode_ops; |
| 465 | 457 | ||
| 466 | #endif /* __COMMON_H__ */ | 458 | #endif /* __COMMON_H__ */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index a96c7c2f5f3f..650a4be6bce5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
| @@ -100,10 +100,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) | |||
| 100 | { | 100 | { |
| 101 | struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; | 101 | struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; |
| 102 | 102 | ||
| 103 | if (unlikely(priv->plat->has_gmac)) | 103 | /* Fill DES3 in case of RING mode */ |
| 104 | /* Fill DES3 in case of RING mode */ | 104 | if (priv->dma_buf_sz >= BUF_SIZE_8KiB) |
| 105 | if (priv->dma_buf_sz >= BUF_SIZE_8KiB) | 105 | p->des3 = p->des2 + BUF_SIZE_8KiB; |
| 106 | p->des3 = p->des2 + BUF_SIZE_8KiB; | ||
| 107 | } | 106 | } |
| 108 | 107 | ||
| 109 | /* In ring mode we need to fill the desc3 because it is used as buffer */ | 108 | /* In ring mode we need to fill the desc3 because it is used as buffer */ |
| @@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu) | |||
| 126 | return ret; | 125 | return ret; |
| 127 | } | 126 | } |
| 128 | 127 | ||
| 129 | const struct stmmac_ring_mode_ops ring_mode_ops = { | 128 | const struct stmmac_mode_ops ring_mode_ops = { |
| 130 | .is_jumbo_frm = stmmac_is_jumbo_frm, | 129 | .is_jumbo_frm = stmmac_is_jumbo_frm, |
| 131 | .jumbo_frm = stmmac_jumbo_frm, | 130 | .jumbo_frm = stmmac_jumbo_frm, |
| 132 | .refill_desc3 = stmmac_refill_desc3, | 131 | .refill_desc3 = stmmac_refill_desc3, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 078ad0ec8593..8543e1cfd55e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -92,8 +92,8 @@ static int tc = TC_DEFAULT; | |||
| 92 | module_param(tc, int, S_IRUGO | S_IWUSR); | 92 | module_param(tc, int, S_IRUGO | S_IWUSR); |
| 93 | MODULE_PARM_DESC(tc, "DMA threshold control value"); | 93 | MODULE_PARM_DESC(tc, "DMA threshold control value"); |
| 94 | 94 | ||
| 95 | #define DMA_BUFFER_SIZE BUF_SIZE_4KiB | 95 | #define DEFAULT_BUFSIZE 1536 |
| 96 | static int buf_sz = DMA_BUFFER_SIZE; | 96 | static int buf_sz = DEFAULT_BUFSIZE; |
| 97 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); | 97 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); |
| 98 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); | 98 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); |
| 99 | 99 | ||
| @@ -136,8 +136,8 @@ static void stmmac_verify_args(void) | |||
| 136 | dma_rxsize = DMA_RX_SIZE; | 136 | dma_rxsize = DMA_RX_SIZE; |
| 137 | if (unlikely(dma_txsize < 0)) | 137 | if (unlikely(dma_txsize < 0)) |
| 138 | dma_txsize = DMA_TX_SIZE; | 138 | dma_txsize = DMA_TX_SIZE; |
| 139 | if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) | 139 | if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) |
| 140 | buf_sz = DMA_BUFFER_SIZE; | 140 | buf_sz = DEFAULT_BUFSIZE; |
| 141 | if (unlikely(flow_ctrl > 1)) | 141 | if (unlikely(flow_ctrl > 1)) |
| 142 | flow_ctrl = FLOW_AUTO; | 142 | flow_ctrl = FLOW_AUTO; |
| 143 | else if (likely(flow_ctrl < 0)) | 143 | else if (likely(flow_ctrl < 0)) |
| @@ -286,10 +286,25 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
| 286 | 286 | ||
| 287 | /* MAC core supports the EEE feature. */ | 287 | /* MAC core supports the EEE feature. */ |
| 288 | if (priv->dma_cap.eee) { | 288 | if (priv->dma_cap.eee) { |
| 289 | int tx_lpi_timer = priv->tx_lpi_timer; | ||
| 290 | |||
| 289 | /* Check if the PHY supports EEE */ | 291 | /* Check if the PHY supports EEE */ |
| 290 | if (phy_init_eee(priv->phydev, 1)) | 292 | if (phy_init_eee(priv->phydev, 1)) { |
| 293 | /* To manage at run-time if the EEE cannot be supported | ||
| 294 | * anymore (for example because the lp caps have been | ||
| 295 | * changed). | ||
| 296 | * In that case the driver disable own timers. | ||
| 297 | */ | ||
| 298 | if (priv->eee_active) { | ||
| 299 | pr_debug("stmmac: disable EEE\n"); | ||
| 300 | del_timer_sync(&priv->eee_ctrl_timer); | ||
| 301 | priv->hw->mac->set_eee_timer(priv->ioaddr, 0, | ||
| 302 | tx_lpi_timer); | ||
| 303 | } | ||
| 304 | priv->eee_active = 0; | ||
| 291 | goto out; | 305 | goto out; |
| 292 | 306 | } | |
| 307 | /* Activate the EEE and start timers */ | ||
| 293 | if (!priv->eee_active) { | 308 | if (!priv->eee_active) { |
| 294 | priv->eee_active = 1; | 309 | priv->eee_active = 1; |
| 295 | init_timer(&priv->eee_ctrl_timer); | 310 | init_timer(&priv->eee_ctrl_timer); |
| @@ -300,13 +315,13 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
| 300 | 315 | ||
| 301 | priv->hw->mac->set_eee_timer(priv->ioaddr, | 316 | priv->hw->mac->set_eee_timer(priv->ioaddr, |
| 302 | STMMAC_DEFAULT_LIT_LS, | 317 | STMMAC_DEFAULT_LIT_LS, |
| 303 | priv->tx_lpi_timer); | 318 | tx_lpi_timer); |
| 304 | } else | 319 | } else |
| 305 | /* Set HW EEE according to the speed */ | 320 | /* Set HW EEE according to the speed */ |
| 306 | priv->hw->mac->set_eee_pls(priv->ioaddr, | 321 | priv->hw->mac->set_eee_pls(priv->ioaddr, |
| 307 | priv->phydev->link); | 322 | priv->phydev->link); |
| 308 | 323 | ||
| 309 | pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); | 324 | pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); |
| 310 | 325 | ||
| 311 | ret = true; | 326 | ret = true; |
| 312 | } | 327 | } |
| @@ -886,10 +901,10 @@ static int stmmac_set_bfsize(int mtu, int bufsize) | |||
| 886 | ret = BUF_SIZE_8KiB; | 901 | ret = BUF_SIZE_8KiB; |
| 887 | else if (mtu >= BUF_SIZE_2KiB) | 902 | else if (mtu >= BUF_SIZE_2KiB) |
| 888 | ret = BUF_SIZE_4KiB; | 903 | ret = BUF_SIZE_4KiB; |
| 889 | else if (mtu >= DMA_BUFFER_SIZE) | 904 | else if (mtu > DEFAULT_BUFSIZE) |
| 890 | ret = BUF_SIZE_2KiB; | 905 | ret = BUF_SIZE_2KiB; |
| 891 | else | 906 | else |
| 892 | ret = DMA_BUFFER_SIZE; | 907 | ret = DEFAULT_BUFSIZE; |
| 893 | 908 | ||
| 894 | return ret; | 909 | return ret; |
| 895 | } | 910 | } |
| @@ -951,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
| 951 | 966 | ||
| 952 | p->des2 = priv->rx_skbuff_dma[i]; | 967 | p->des2 = priv->rx_skbuff_dma[i]; |
| 953 | 968 | ||
| 954 | if ((priv->mode == STMMAC_RING_MODE) && | 969 | if ((priv->hw->mode->init_desc3) && |
| 955 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) | 970 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) |
| 956 | priv->hw->ring->init_desc3(p); | 971 | priv->hw->mode->init_desc3(p); |
| 957 | 972 | ||
| 958 | return 0; | 973 | return 0; |
| 959 | } | 974 | } |
| @@ -984,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
| 984 | unsigned int bfsize = 0; | 999 | unsigned int bfsize = 0; |
| 985 | int ret = -ENOMEM; | 1000 | int ret = -ENOMEM; |
| 986 | 1001 | ||
| 987 | /* Set the max buffer size according to the DESC mode | 1002 | if (priv->hw->mode->set_16kib_bfsize) |
| 988 | * and the MTU. Note that RING mode allows 16KiB bsize. | 1003 | bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); |
| 989 | */ | ||
| 990 | if (priv->mode == STMMAC_RING_MODE) | ||
| 991 | bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); | ||
| 992 | 1004 | ||
| 993 | if (bfsize < BUF_SIZE_16KiB) | 1005 | if (bfsize < BUF_SIZE_16KiB) |
| 994 | bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); | 1006 | bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); |
| @@ -1029,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
| 1029 | /* Setup the chained descriptor addresses */ | 1041 | /* Setup the chained descriptor addresses */ |
| 1030 | if (priv->mode == STMMAC_CHAIN_MODE) { | 1042 | if (priv->mode == STMMAC_CHAIN_MODE) { |
| 1031 | if (priv->extend_desc) { | 1043 | if (priv->extend_desc) { |
| 1032 | priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy, | 1044 | priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, |
| 1033 | rxsize, 1); | 1045 | rxsize, 1); |
| 1034 | priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy, | 1046 | priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, |
| 1035 | txsize, 1); | 1047 | txsize, 1); |
| 1036 | } else { | 1048 | } else { |
| 1037 | priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy, | 1049 | priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, |
| 1038 | rxsize, 0); | 1050 | rxsize, 0); |
| 1039 | priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy, | 1051 | priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, |
| 1040 | txsize, 0); | 1052 | txsize, 0); |
| 1041 | } | 1053 | } |
| 1042 | } | 1054 | } |
| 1043 | 1055 | ||
| @@ -1288,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) | |||
| 1288 | DMA_TO_DEVICE); | 1300 | DMA_TO_DEVICE); |
| 1289 | priv->tx_skbuff_dma[entry] = 0; | 1301 | priv->tx_skbuff_dma[entry] = 0; |
| 1290 | } | 1302 | } |
| 1291 | priv->hw->ring->clean_desc3(priv, p); | 1303 | priv->hw->mode->clean_desc3(priv, p); |
| 1292 | 1304 | ||
| 1293 | if (likely(skb != NULL)) { | 1305 | if (likely(skb != NULL)) { |
| 1294 | dev_kfree_skb(skb); | 1306 | dev_kfree_skb(skb); |
| @@ -1844,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1844 | int nfrags = skb_shinfo(skb)->nr_frags; | 1856 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 1845 | struct dma_desc *desc, *first; | 1857 | struct dma_desc *desc, *first; |
| 1846 | unsigned int nopaged_len = skb_headlen(skb); | 1858 | unsigned int nopaged_len = skb_headlen(skb); |
| 1859 | unsigned int enh_desc = priv->plat->enh_desc; | ||
| 1847 | 1860 | ||
| 1848 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { | 1861 | if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { |
| 1849 | if (!netif_queue_stopped(dev)) { | 1862 | if (!netif_queue_stopped(dev)) { |
| @@ -1871,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1871 | first = desc; | 1884 | first = desc; |
| 1872 | 1885 | ||
| 1873 | /* To program the descriptors according to the size of the frame */ | 1886 | /* To program the descriptors according to the size of the frame */ |
| 1874 | if (priv->mode == STMMAC_RING_MODE) { | 1887 | if (enh_desc) |
| 1875 | is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len, | 1888 | is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); |
| 1876 | priv->plat->enh_desc); | 1889 | |
| 1877 | if (unlikely(is_jumbo)) | ||
| 1878 | entry = priv->hw->ring->jumbo_frm(priv, skb, | ||
| 1879 | csum_insertion); | ||
| 1880 | } else { | ||
| 1881 | is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len, | ||
| 1882 | priv->plat->enh_desc); | ||
| 1883 | if (unlikely(is_jumbo)) | ||
| 1884 | entry = priv->hw->chain->jumbo_frm(priv, skb, | ||
| 1885 | csum_insertion); | ||
| 1886 | } | ||
| 1887 | if (likely(!is_jumbo)) { | 1890 | if (likely(!is_jumbo)) { |
| 1888 | desc->des2 = dma_map_single(priv->device, skb->data, | 1891 | desc->des2 = dma_map_single(priv->device, skb->data, |
| 1889 | nopaged_len, DMA_TO_DEVICE); | 1892 | nopaged_len, DMA_TO_DEVICE); |
| 1890 | priv->tx_skbuff_dma[entry] = desc->des2; | 1893 | priv->tx_skbuff_dma[entry] = desc->des2; |
| 1891 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, | 1894 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, |
| 1892 | csum_insertion, priv->mode); | 1895 | csum_insertion, priv->mode); |
| 1893 | } else | 1896 | } else { |
| 1894 | desc = first; | 1897 | desc = first; |
| 1898 | entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); | ||
| 1899 | } | ||
| 1895 | 1900 | ||
| 1896 | for (i = 0; i < nfrags; i++) { | 1901 | for (i = 0; i < nfrags; i++) { |
| 1897 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1902 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| @@ -2029,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) | |||
| 2029 | 2034 | ||
| 2030 | p->des2 = priv->rx_skbuff_dma[entry]; | 2035 | p->des2 = priv->rx_skbuff_dma[entry]; |
| 2031 | 2036 | ||
| 2032 | priv->hw->ring->refill_desc3(priv, p); | 2037 | priv->hw->mode->refill_desc3(priv, p); |
| 2033 | 2038 | ||
| 2034 | if (netif_msg_rx_status(priv)) | 2039 | if (netif_msg_rx_status(priv)) |
| 2035 | pr_debug("\trefill entry #%d\n", entry); | 2040 | pr_debug("\trefill entry #%d\n", entry); |
| @@ -2633,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
| 2633 | 2638 | ||
| 2634 | /* To use the chained or ring mode */ | 2639 | /* To use the chained or ring mode */ |
| 2635 | if (chain_mode) { | 2640 | if (chain_mode) { |
| 2636 | priv->hw->chain = &chain_mode_ops; | 2641 | priv->hw->mode = &chain_mode_ops; |
| 2637 | pr_info(" Chain mode enabled\n"); | 2642 | pr_info(" Chain mode enabled\n"); |
| 2638 | priv->mode = STMMAC_CHAIN_MODE; | 2643 | priv->mode = STMMAC_CHAIN_MODE; |
| 2639 | } else { | 2644 | } else { |
| 2640 | priv->hw->ring = &ring_mode_ops; | 2645 | priv->hw->mode = &ring_mode_ops; |
| 2641 | pr_info(" Ring mode enabled\n"); | 2646 | pr_info(" Ring mode enabled\n"); |
| 2642 | priv->mode = STMMAC_RING_MODE; | 2647 | priv->mode = STMMAC_RING_MODE; |
| 2643 | } | 2648 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index c61bc72b8e90..8fb32a80f1c1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
| @@ -36,7 +36,7 @@ static const struct of_device_id stmmac_dt_ids[] = { | |||
| 36 | #ifdef CONFIG_DWMAC_STI | 36 | #ifdef CONFIG_DWMAC_STI |
| 37 | { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, | 37 | { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, |
| 38 | { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, | 38 | { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, |
| 39 | { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data}, | 39 | { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data}, |
| 40 | #endif | 40 | #endif |
| 41 | /* SoC specific glue layers should come before generic bindings */ | 41 | /* SoC specific glue layers should come before generic bindings */ |
| 42 | { .compatible = "st,spear600-gmac"}, | 42 | { .compatible = "st,spear600-gmac"}, |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 7141a1937360..d6fce9750b95 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -442,6 +442,8 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 442 | if (!net) | 442 | if (!net) |
| 443 | return -ENOMEM; | 443 | return -ENOMEM; |
| 444 | 444 | ||
| 445 | netif_carrier_off(net); | ||
| 446 | |||
| 445 | net_device_ctx = netdev_priv(net); | 447 | net_device_ctx = netdev_priv(net); |
| 446 | net_device_ctx->device_ctx = dev; | 448 | net_device_ctx->device_ctx = dev; |
| 447 | hv_set_drvdata(dev, net); | 449 | hv_set_drvdata(dev, net); |
| @@ -473,6 +475,8 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 473 | pr_err("Unable to register netdev.\n"); | 475 | pr_err("Unable to register netdev.\n"); |
| 474 | rndis_filter_device_remove(dev); | 476 | rndis_filter_device_remove(dev); |
| 475 | free_netdev(net); | 477 | free_netdev(net); |
| 478 | } else { | ||
| 479 | schedule_delayed_work(&net_device_ctx->dwork, 0); | ||
| 476 | } | 480 | } |
| 477 | 481 | ||
| 478 | return ret; | 482 | return ret; |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 1084e5de3ceb..b54fd257652b 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
| @@ -243,6 +243,22 @@ static int rndis_filter_send_request(struct rndis_device *dev, | |||
| 243 | return ret; | 243 | return ret; |
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | static void rndis_set_link_state(struct rndis_device *rdev, | ||
| 247 | struct rndis_request *request) | ||
| 248 | { | ||
| 249 | u32 link_status; | ||
| 250 | struct rndis_query_complete *query_complete; | ||
| 251 | |||
| 252 | query_complete = &request->response_msg.msg.query_complete; | ||
| 253 | |||
| 254 | if (query_complete->status == RNDIS_STATUS_SUCCESS && | ||
| 255 | query_complete->info_buflen == sizeof(u32)) { | ||
| 256 | memcpy(&link_status, (void *)((unsigned long)query_complete + | ||
| 257 | query_complete->info_buf_offset), sizeof(u32)); | ||
| 258 | rdev->link_state = link_status != 0; | ||
| 259 | } | ||
| 260 | } | ||
| 261 | |||
| 246 | static void rndis_filter_receive_response(struct rndis_device *dev, | 262 | static void rndis_filter_receive_response(struct rndis_device *dev, |
| 247 | struct rndis_message *resp) | 263 | struct rndis_message *resp) |
| 248 | { | 264 | { |
| @@ -272,6 +288,10 @@ static void rndis_filter_receive_response(struct rndis_device *dev, | |||
| 272 | sizeof(struct rndis_message) + RNDIS_EXT_LEN) { | 288 | sizeof(struct rndis_message) + RNDIS_EXT_LEN) { |
| 273 | memcpy(&request->response_msg, resp, | 289 | memcpy(&request->response_msg, resp, |
| 274 | resp->msg_len); | 290 | resp->msg_len); |
| 291 | if (request->request_msg.ndis_msg_type == | ||
| 292 | RNDIS_MSG_QUERY && request->request_msg.msg. | ||
| 293 | query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS) | ||
| 294 | rndis_set_link_state(dev, request); | ||
| 275 | } else { | 295 | } else { |
| 276 | netdev_err(ndev, | 296 | netdev_err(ndev, |
| 277 | "rndis response buffer overflow " | 297 | "rndis response buffer overflow " |
| @@ -620,7 +640,6 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev) | |||
| 620 | ret = rndis_filter_query_device(dev, | 640 | ret = rndis_filter_query_device(dev, |
| 621 | RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, | 641 | RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, |
| 622 | &link_status, &size); | 642 | &link_status, &size); |
| 623 | dev->link_state = (link_status != 0) ? true : false; | ||
| 624 | 643 | ||
| 625 | return ret; | 644 | return ret; |
| 626 | } | 645 | } |
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index ab31544bc254..a30258aad139 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c | |||
| @@ -546,12 +546,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) | |||
| 546 | int rc; | 546 | int rc; |
| 547 | unsigned long flags; | 547 | unsigned long flags; |
| 548 | 548 | ||
| 549 | spin_lock(&lp->lock); | 549 | spin_lock_irqsave(&lp->lock, flags); |
| 550 | if (lp->irq_busy) { | 550 | if (lp->irq_busy) { |
| 551 | spin_unlock(&lp->lock); | 551 | spin_unlock_irqrestore(&lp->lock, flags); |
| 552 | return -EBUSY; | 552 | return -EBUSY; |
| 553 | } | 553 | } |
| 554 | spin_unlock(&lp->lock); | 554 | spin_unlock_irqrestore(&lp->lock, flags); |
| 555 | 555 | ||
| 556 | might_sleep(); | 556 | might_sleep(); |
| 557 | 557 | ||
| @@ -725,10 +725,11 @@ static void at86rf230_irqwork_level(struct work_struct *work) | |||
| 725 | static irqreturn_t at86rf230_isr(int irq, void *data) | 725 | static irqreturn_t at86rf230_isr(int irq, void *data) |
| 726 | { | 726 | { |
| 727 | struct at86rf230_local *lp = data; | 727 | struct at86rf230_local *lp = data; |
| 728 | unsigned long flags; | ||
| 728 | 729 | ||
| 729 | spin_lock(&lp->lock); | 730 | spin_lock_irqsave(&lp->lock, flags); |
| 730 | lp->irq_busy = 1; | 731 | lp->irq_busy = 1; |
| 731 | spin_unlock(&lp->lock); | 732 | spin_unlock_irqrestore(&lp->lock, flags); |
| 732 | 733 | ||
| 733 | schedule_work(&lp->irqwork); | 734 | schedule_work(&lp->irqwork); |
| 734 | 735 | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 19c9eca0ef26..76d96b9ebcdb 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
| @@ -164,9 +164,9 @@ static const struct phy_setting settings[] = { | |||
| 164 | * of that setting. Returns the index of the last setting if | 164 | * of that setting. Returns the index of the last setting if |
| 165 | * none of the others match. | 165 | * none of the others match. |
| 166 | */ | 166 | */ |
| 167 | static inline int phy_find_setting(int speed, int duplex) | 167 | static inline unsigned int phy_find_setting(int speed, int duplex) |
| 168 | { | 168 | { |
| 169 | int idx = 0; | 169 | unsigned int idx = 0; |
| 170 | 170 | ||
| 171 | while (idx < ARRAY_SIZE(settings) && | 171 | while (idx < ARRAY_SIZE(settings) && |
| 172 | (settings[idx].speed != speed || settings[idx].duplex != duplex)) | 172 | (settings[idx].speed != speed || settings[idx].duplex != duplex)) |
| @@ -185,7 +185,7 @@ static inline int phy_find_setting(int speed, int duplex) | |||
| 185 | * the mask in features. Returns the index of the last setting | 185 | * the mask in features. Returns the index of the last setting |
| 186 | * if nothing else matches. | 186 | * if nothing else matches. |
| 187 | */ | 187 | */ |
| 188 | static inline int phy_find_valid(int idx, u32 features) | 188 | static inline unsigned int phy_find_valid(unsigned int idx, u32 features) |
| 189 | { | 189 | { |
| 190 | while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) | 190 | while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) |
| 191 | idx++; | 191 | idx++; |
| @@ -204,7 +204,7 @@ static inline int phy_find_valid(int idx, u32 features) | |||
| 204 | static void phy_sanitize_settings(struct phy_device *phydev) | 204 | static void phy_sanitize_settings(struct phy_device *phydev) |
| 205 | { | 205 | { |
| 206 | u32 features = phydev->supported; | 206 | u32 features = phydev->supported; |
| 207 | int idx; | 207 | unsigned int idx; |
| 208 | 208 | ||
| 209 | /* Sanitize settings based on PHY capabilities */ | 209 | /* Sanitize settings based on PHY capabilities */ |
| 210 | if ((features & SUPPORTED_Autoneg) == 0) | 210 | if ((features & SUPPORTED_Autoneg) == 0) |
| @@ -954,7 +954,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
| 954 | (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { | 954 | (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { |
| 955 | int eee_lp, eee_cap, eee_adv; | 955 | int eee_lp, eee_cap, eee_adv; |
| 956 | u32 lp, cap, adv; | 956 | u32 lp, cap, adv; |
| 957 | int idx, status; | 957 | int status; |
| 958 | unsigned int idx; | ||
| 958 | 959 | ||
| 959 | /* Read phy status to properly get the right settings */ | 960 | /* Read phy status to properly get the right settings */ |
| 960 | status = phy_read_status(phydev); | 961 | status = phy_read_status(phydev); |
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index 433f0a00c683..e2797f1e1b31 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile | |||
| @@ -11,7 +11,7 @@ obj-$(CONFIG_USB_HSO) += hso.o | |||
| 11 | obj-$(CONFIG_USB_NET_AX8817X) += asix.o | 11 | obj-$(CONFIG_USB_NET_AX8817X) += asix.o |
| 12 | asix-y := asix_devices.o asix_common.o ax88172a.o | 12 | asix-y := asix_devices.o asix_common.o ax88172a.o |
| 13 | obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o | 13 | obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o |
| 14 | obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o | 14 | obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o |
| 15 | obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o | 15 | obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o |
| 16 | obj-$(CONFIG_USB_NET_DM9601) += dm9601.o | 16 | obj-$(CONFIG_USB_NET_DM9601) += dm9601.o |
| 17 | obj-$(CONFIG_USB_NET_SR9700) += sr9700.o | 17 | obj-$(CONFIG_USB_NET_SR9700) += sr9700.o |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 42e176912c8e..bd363b27e854 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
| @@ -652,6 +652,13 @@ static const struct usb_device_id products[] = { | |||
| 652 | .driver_info = 0, | 652 | .driver_info = 0, |
| 653 | }, | 653 | }, |
| 654 | 654 | ||
| 655 | /* Samsung USB Ethernet Adapters */ | ||
| 656 | { | ||
| 657 | USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM, | ||
| 658 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
| 659 | .driver_info = 0, | ||
| 660 | }, | ||
| 661 | |||
| 655 | /* WHITELIST!!! | 662 | /* WHITELIST!!! |
| 656 | * | 663 | * |
| 657 | * CDC Ether uses two interfaces, not necessarily consecutive. | 664 | * CDC Ether uses two interfaces, not necessarily consecutive. |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index d89dbe395ad2..adb12f349a61 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -449,9 +449,6 @@ enum rtl8152_flags { | |||
| 449 | #define MCU_TYPE_PLA 0x0100 | 449 | #define MCU_TYPE_PLA 0x0100 |
| 450 | #define MCU_TYPE_USB 0x0000 | 450 | #define MCU_TYPE_USB 0x0000 |
| 451 | 451 | ||
| 452 | #define REALTEK_USB_DEVICE(vend, prod) \ | ||
| 453 | USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC) | ||
| 454 | |||
| 455 | struct rx_desc { | 452 | struct rx_desc { |
| 456 | __le32 opts1; | 453 | __le32 opts1; |
| 457 | #define RX_LEN_MASK 0x7fff | 454 | #define RX_LEN_MASK 0x7fff |
| @@ -2739,6 +2736,12 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
| 2739 | struct net_device *netdev; | 2736 | struct net_device *netdev; |
| 2740 | int ret; | 2737 | int ret; |
| 2741 | 2738 | ||
| 2739 | if (udev->actconfig->desc.bConfigurationValue != 1) { | ||
| 2740 | usb_driver_set_configuration(udev, 1); | ||
| 2741 | return -ENODEV; | ||
| 2742 | } | ||
| 2743 | |||
| 2744 | usb_reset_device(udev); | ||
| 2742 | netdev = alloc_etherdev(sizeof(struct r8152)); | 2745 | netdev = alloc_etherdev(sizeof(struct r8152)); |
| 2743 | if (!netdev) { | 2746 | if (!netdev) { |
| 2744 | dev_err(&intf->dev, "Out of memory\n"); | 2747 | dev_err(&intf->dev, "Out of memory\n"); |
| @@ -2819,9 +2822,9 @@ static void rtl8152_disconnect(struct usb_interface *intf) | |||
| 2819 | 2822 | ||
| 2820 | /* table of devices that work with this driver */ | 2823 | /* table of devices that work with this driver */ |
| 2821 | static struct usb_device_id rtl8152_table[] = { | 2824 | static struct usb_device_id rtl8152_table[] = { |
| 2822 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, | 2825 | {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, |
| 2823 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, | 2826 | {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, |
| 2824 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, | 2827 | {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, |
| 2825 | {} | 2828 | {} |
| 2826 | }; | 2829 | }; |
| 2827 | 2830 | ||
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c deleted file mode 100644 index f0a8791b7636..000000000000 --- a/drivers/net/usb/r815x.c +++ /dev/null | |||
| @@ -1,248 +0,0 @@ | |||
| 1 | #include <linux/module.h> | ||
| 2 | #include <linux/netdevice.h> | ||
| 3 | #include <linux/mii.h> | ||
| 4 | #include <linux/usb.h> | ||
| 5 | #include <linux/usb/cdc.h> | ||
| 6 | #include <linux/usb/usbnet.h> | ||
| 7 | |||
| 8 | #define RTL815x_REQT_READ 0xc0 | ||
| 9 | #define RTL815x_REQT_WRITE 0x40 | ||
| 10 | #define RTL815x_REQ_GET_REGS 0x05 | ||
| 11 | #define RTL815x_REQ_SET_REGS 0x05 | ||
| 12 | |||
| 13 | #define MCU_TYPE_PLA 0x0100 | ||
| 14 | #define OCP_BASE 0xe86c | ||
| 15 | #define BASE_MII 0xa400 | ||
| 16 | |||
| 17 | #define BYTE_EN_DWORD 0xff | ||
| 18 | #define BYTE_EN_WORD 0x33 | ||
| 19 | #define BYTE_EN_BYTE 0x11 | ||
| 20 | |||
| 21 | #define R815x_PHY_ID 32 | ||
| 22 | #define REALTEK_VENDOR_ID 0x0bda | ||
| 23 | |||
| 24 | |||
| 25 | static int pla_read_word(struct usb_device *udev, u16 index) | ||
| 26 | { | ||
| 27 | int ret; | ||
| 28 | u8 shift = index & 2; | ||
| 29 | __le32 *tmp; | ||
| 30 | |||
| 31 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | ||
| 32 | if (!tmp) | ||
| 33 | return -ENOMEM; | ||
| 34 | |||
| 35 | index &= ~3; | ||
| 36 | |||
| 37 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | ||
| 38 | RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, | ||
| 39 | index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); | ||
| 40 | if (ret < 0) | ||
| 41 | goto out2; | ||
| 42 | |||
| 43 | ret = __le32_to_cpu(*tmp); | ||
| 44 | ret >>= (shift * 8); | ||
| 45 | ret &= 0xffff; | ||
| 46 | |||
| 47 | out2: | ||
| 48 | kfree(tmp); | ||
| 49 | return ret; | ||
| 50 | } | ||
| 51 | |||
| 52 | static int pla_write_word(struct usb_device *udev, u16 index, u32 data) | ||
| 53 | { | ||
| 54 | __le32 *tmp; | ||
| 55 | u32 mask = 0xffff; | ||
| 56 | u16 byen = BYTE_EN_WORD; | ||
| 57 | u8 shift = index & 2; | ||
| 58 | int ret; | ||
| 59 | |||
| 60 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | ||
| 61 | if (!tmp) | ||
| 62 | return -ENOMEM; | ||
| 63 | |||
| 64 | data &= mask; | ||
| 65 | |||
| 66 | if (shift) { | ||
| 67 | byen <<= shift; | ||
| 68 | mask <<= (shift * 8); | ||
| 69 | data <<= (shift * 8); | ||
| 70 | index &= ~3; | ||
| 71 | } | ||
| 72 | |||
| 73 | ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | ||
| 74 | RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, | ||
| 75 | index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); | ||
| 76 | if (ret < 0) | ||
| 77 | goto out3; | ||
| 78 | |||
| 79 | data |= __le32_to_cpu(*tmp) & ~mask; | ||
| 80 | *tmp = __cpu_to_le32(data); | ||
| 81 | |||
| 82 | ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
| 83 | RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE, | ||
| 84 | index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp), | ||
| 85 | 500); | ||
| 86 | |||
| 87 | out3: | ||
| 88 | kfree(tmp); | ||
| 89 | return ret; | ||
| 90 | } | ||
| 91 | |||
| 92 | static int ocp_reg_read(struct usbnet *dev, u16 addr) | ||
| 93 | { | ||
| 94 | u16 ocp_base, ocp_index; | ||
| 95 | int ret; | ||
| 96 | |||
| 97 | ocp_base = addr & 0xf000; | ||
| 98 | ret = pla_write_word(dev->udev, OCP_BASE, ocp_base); | ||
| 99 | if (ret < 0) | ||
| 100 | goto out; | ||
| 101 | |||
| 102 | ocp_index = (addr & 0x0fff) | 0xb000; | ||
| 103 | ret = pla_read_word(dev->udev, ocp_index); | ||
| 104 | |||
| 105 | out: | ||
| 106 | return ret; | ||
| 107 | } | ||
| 108 | |||
| 109 | static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data) | ||
| 110 | { | ||
| 111 | u16 ocp_base, ocp_index; | ||
| 112 | int ret; | ||
| 113 | |||
| 114 | ocp_base = addr & 0xf000; | ||
| 115 | ret = pla_write_word(dev->udev, OCP_BASE, ocp_base); | ||
| 116 | if (ret < 0) | ||
| 117 | goto out1; | ||
| 118 | |||
| 119 | ocp_index = (addr & 0x0fff) | 0xb000; | ||
| 120 | ret = pla_write_word(dev->udev, ocp_index, data); | ||
| 121 | |||
| 122 | out1: | ||
| 123 | return ret; | ||
| 124 | } | ||
| 125 | |||
| 126 | static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg) | ||
| 127 | { | ||
| 128 | struct usbnet *dev = netdev_priv(netdev); | ||
| 129 | int ret; | ||
| 130 | |||
| 131 | if (phy_id != R815x_PHY_ID) | ||
| 132 | return -EINVAL; | ||
| 133 | |||
| 134 | if (usb_autopm_get_interface(dev->intf) < 0) | ||
| 135 | return -ENODEV; | ||
| 136 | |||
| 137 | ret = ocp_reg_read(dev, BASE_MII + reg * 2); | ||
| 138 | |||
| 139 | usb_autopm_put_interface(dev->intf); | ||
| 140 | return ret; | ||
| 141 | } | ||
| 142 | |||
| 143 | static | ||
| 144 | void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val) | ||
| 145 | { | ||
| 146 | struct usbnet *dev = netdev_priv(netdev); | ||
| 147 | |||
| 148 | if (phy_id != R815x_PHY_ID) | ||
| 149 | return; | ||
| 150 | |||
| 151 | if (usb_autopm_get_interface(dev->intf) < 0) | ||
| 152 | return; | ||
| 153 | |||
| 154 | ocp_reg_write(dev, BASE_MII + reg * 2, val); | ||
| 155 | |||
| 156 | usb_autopm_put_interface(dev->intf); | ||
| 157 | } | ||
| 158 | |||
| 159 | static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) | ||
| 160 | { | ||
| 161 | int status; | ||
| 162 | |||
| 163 | status = usbnet_cdc_bind(dev, intf); | ||
| 164 | if (status < 0) | ||
| 165 | return status; | ||
| 166 | |||
| 167 | dev->mii.dev = dev->net; | ||
| 168 | dev->mii.mdio_read = r815x_mdio_read; | ||
| 169 | dev->mii.mdio_write = r815x_mdio_write; | ||
| 170 | dev->mii.phy_id_mask = 0x3f; | ||
| 171 | dev->mii.reg_num_mask = 0x1f; | ||
| 172 | dev->mii.phy_id = R815x_PHY_ID; | ||
| 173 | dev->mii.supports_gmii = 1; | ||
| 174 | |||
| 175 | return status; | ||
| 176 | } | ||
| 177 | |||
| 178 | static int r8152_bind(struct usbnet *dev, struct usb_interface *intf) | ||
| 179 | { | ||
| 180 | int status; | ||
| 181 | |||
| 182 | status = usbnet_cdc_bind(dev, intf); | ||
| 183 | if (status < 0) | ||
| 184 | return status; | ||
| 185 | |||
| 186 | dev->mii.dev = dev->net; | ||
| 187 | dev->mii.mdio_read = r815x_mdio_read; | ||
| 188 | dev->mii.mdio_write = r815x_mdio_write; | ||
| 189 | dev->mii.phy_id_mask = 0x3f; | ||
| 190 | dev->mii.reg_num_mask = 0x1f; | ||
| 191 | dev->mii.phy_id = R815x_PHY_ID; | ||
| 192 | dev->mii.supports_gmii = 0; | ||
| 193 | |||
| 194 | return status; | ||
| 195 | } | ||
| 196 | |||
| 197 | static const struct driver_info r8152_info = { | ||
| 198 | .description = "RTL8152 ECM Device", | ||
| 199 | .flags = FLAG_ETHER | FLAG_POINTTOPOINT, | ||
| 200 | .bind = r8152_bind, | ||
| 201 | .unbind = usbnet_cdc_unbind, | ||
| 202 | .status = usbnet_cdc_status, | ||
| 203 | .manage_power = usbnet_manage_power, | ||
| 204 | }; | ||
| 205 | |||
| 206 | static const struct driver_info r8153_info = { | ||
| 207 | .description = "RTL8153 ECM Device", | ||
| 208 | .flags = FLAG_ETHER | FLAG_POINTTOPOINT, | ||
| 209 | .bind = r8153_bind, | ||
| 210 | .unbind = usbnet_cdc_unbind, | ||
| 211 | .status = usbnet_cdc_status, | ||
| 212 | .manage_power = usbnet_manage_power, | ||
| 213 | }; | ||
| 214 | |||
| 215 | static const struct usb_device_id products[] = { | ||
| 216 | { | ||
| 217 | USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM, | ||
| 218 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
| 219 | .driver_info = (unsigned long) &r8152_info, | ||
| 220 | }, | ||
| 221 | |||
| 222 | { | ||
| 223 | USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM, | ||
| 224 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
| 225 | .driver_info = (unsigned long) &r8153_info, | ||
| 226 | }, | ||
| 227 | |||
| 228 | { }, /* END */ | ||
| 229 | }; | ||
| 230 | MODULE_DEVICE_TABLE(usb, products); | ||
| 231 | |||
| 232 | static struct usb_driver r815x_driver = { | ||
| 233 | .name = "r815x", | ||
| 234 | .id_table = products, | ||
| 235 | .probe = usbnet_probe, | ||
| 236 | .disconnect = usbnet_disconnect, | ||
| 237 | .suspend = usbnet_suspend, | ||
| 238 | .resume = usbnet_resume, | ||
| 239 | .reset_resume = usbnet_resume, | ||
| 240 | .supports_autosuspend = 1, | ||
| 241 | .disable_hub_initiated_lpm = 1, | ||
| 242 | }; | ||
| 243 | |||
| 244 | module_usb_driver(r815x_driver); | ||
| 245 | |||
| 246 | MODULE_AUTHOR("Hayes Wang"); | ||
| 247 | MODULE_DESCRIPTION("Realtek USB ECM device"); | ||
| 248 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 3be786faaaec..0fa3b44f7342 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -1762,11 +1762,20 @@ vmxnet3_netpoll(struct net_device *netdev) | |||
| 1762 | { | 1762 | { |
| 1763 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1763 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
| 1764 | 1764 | ||
| 1765 | if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) | 1765 | switch (adapter->intr.type) { |
| 1766 | vmxnet3_disable_all_intrs(adapter); | 1766 | #ifdef CONFIG_PCI_MSI |
| 1767 | 1767 | case VMXNET3_IT_MSIX: { | |
| 1768 | vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); | 1768 | int i; |
| 1769 | vmxnet3_enable_all_intrs(adapter); | 1769 | for (i = 0; i < adapter->num_rx_queues; i++) |
| 1770 | vmxnet3_msix_rx(0, &adapter->rx_queue[i]); | ||
| 1771 | break; | ||
| 1772 | } | ||
| 1773 | #endif | ||
| 1774 | case VMXNET3_IT_MSI: | ||
| 1775 | default: | ||
| 1776 | vmxnet3_intr(0, adapter->netdev); | ||
| 1777 | break; | ||
| 1778 | } | ||
| 1770 | 1779 | ||
| 1771 | } | 1780 | } |
| 1772 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 1781 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 76cde6ce6551..18a895a949d4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c | |||
| @@ -872,8 +872,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
| 872 | 872 | ||
| 873 | lockdep_assert_held(&mvm->mutex); | 873 | lockdep_assert_held(&mvm->mutex); |
| 874 | 874 | ||
| 875 | /* Rssi update while not associated ?! */ | 875 | /* |
| 876 | if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) | 876 | * Rssi update while not associated - can happen since the statistics |
| 877 | * are handled asynchronously | ||
| 878 | */ | ||
| 879 | if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) | ||
| 877 | return; | 880 | return; |
| 878 | 881 | ||
| 879 | /* No BT - reports should be disabled */ | 882 | /* No BT - reports should be disabled */ |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index f47bcbe2945a..3872ead75488 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
| @@ -359,13 +359,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { | |||
| 359 | /* 7265 Series */ | 359 | /* 7265 Series */ |
| 360 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 360 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
| 361 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, | 361 | {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, |
| 362 | {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)}, | ||
| 363 | {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, | 362 | {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, |
| 364 | {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)}, | ||
| 365 | {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, | 363 | {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, |
| 366 | {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, | 364 | {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, |
| 367 | {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, | 365 | {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, |
| 368 | {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, | 366 | {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, |
| 367 | {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, | ||
| 369 | {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, | 368 | {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, |
| 370 | {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, | 369 | {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, |
| 371 | {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, | 370 | {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, |
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c index 5e0eec4d71c7..5d9a8084665d 100644 --- a/drivers/net/wireless/mwifiex/11ac.c +++ b/drivers/net/wireless/mwifiex/11ac.c | |||
| @@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv, | |||
| 189 | vht_cap->header.len = | 189 | vht_cap->header.len = |
| 190 | cpu_to_le16(sizeof(struct ieee80211_vht_cap)); | 190 | cpu_to_le16(sizeof(struct ieee80211_vht_cap)); |
| 191 | memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), | 191 | memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), |
| 192 | (u8 *)bss_desc->bcn_vht_cap + | 192 | (u8 *)bss_desc->bcn_vht_cap, |
| 193 | sizeof(struct ieee_types_header), | ||
| 194 | le16_to_cpu(vht_cap->header.len)); | 193 | le16_to_cpu(vht_cap->header.len)); |
| 195 | 194 | ||
| 196 | mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); | 195 | mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); |
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index 6261f8c53d44..7db1a89fdd95 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c | |||
| @@ -308,8 +308,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, | |||
| 308 | ht_cap->header.len = | 308 | ht_cap->header.len = |
| 309 | cpu_to_le16(sizeof(struct ieee80211_ht_cap)); | 309 | cpu_to_le16(sizeof(struct ieee80211_ht_cap)); |
| 310 | memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header), | 310 | memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header), |
| 311 | (u8 *) bss_desc->bcn_ht_cap + | 311 | (u8 *)bss_desc->bcn_ht_cap, |
| 312 | sizeof(struct ieee_types_header), | ||
| 313 | le16_to_cpu(ht_cap->header.len)); | 312 | le16_to_cpu(ht_cap->header.len)); |
| 314 | 313 | ||
| 315 | mwifiex_fill_cap_info(priv, radio_type, ht_cap); | 314 | mwifiex_fill_cap_info(priv, radio_type, ht_cap); |
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 0a8a26e10f01..668547c2de84 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c | |||
| @@ -2101,12 +2101,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv) | |||
| 2101 | curr_bss->ht_info_offset); | 2101 | curr_bss->ht_info_offset); |
| 2102 | 2102 | ||
| 2103 | if (curr_bss->bcn_vht_cap) | 2103 | if (curr_bss->bcn_vht_cap) |
| 2104 | curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf + | 2104 | curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf + |
| 2105 | curr_bss->vht_cap_offset); | 2105 | curr_bss->vht_cap_offset); |
| 2106 | 2106 | ||
| 2107 | if (curr_bss->bcn_vht_oper) | 2107 | if (curr_bss->bcn_vht_oper) |
| 2108 | curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf + | 2108 | curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf + |
| 2109 | curr_bss->vht_info_offset); | 2109 | curr_bss->vht_info_offset); |
| 2110 | 2110 | ||
| 2111 | if (curr_bss->bcn_bss_co_2040) | 2111 | if (curr_bss->bcn_bss_co_2040) |
| 2112 | curr_bss->bcn_bss_co_2040 = | 2112 | curr_bss->bcn_bss_co_2040 = |
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c index 123c4bb50e0a..cde0eaf99714 100644 --- a/drivers/net/wireless/ti/wl1251/rx.c +++ b/drivers/net/wireless/ti/wl1251/rx.c | |||
| @@ -180,7 +180,7 @@ static void wl1251_rx_body(struct wl1251 *wl, | |||
| 180 | wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length); | 180 | wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length); |
| 181 | 181 | ||
| 182 | /* The actual length doesn't include the target's alignment */ | 182 | /* The actual length doesn't include the target's alignment */ |
| 183 | skb->len = desc->length - PLCP_HEADER_LENGTH; | 183 | skb_trim(skb, desc->length - PLCP_HEADER_LENGTH); |
| 184 | 184 | ||
| 185 | fc = (u16 *)skb->data; | 185 | fc = (u16 *)skb->data; |
| 186 | 186 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 7669d49a67e2..301cc037fda8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -132,8 +132,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 132 | /* If the skb is GSO then we'll also need an extra slot for the | 132 | /* If the skb is GSO then we'll also need an extra slot for the |
| 133 | * metadata. | 133 | * metadata. |
| 134 | */ | 134 | */ |
| 135 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | 135 | if (skb_is_gso(skb)) |
| 136 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | ||
| 137 | min_slots_needed++; | 136 | min_slots_needed++; |
| 138 | 137 | ||
| 139 | /* If the skb can't possibly fit in the remaining slots | 138 | /* If the skb can't possibly fit in the remaining slots |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index e5284bca2d90..438d0c09b7e6 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -240,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
| 240 | struct gnttab_copy *copy_gop; | 240 | struct gnttab_copy *copy_gop; |
| 241 | struct xenvif_rx_meta *meta; | 241 | struct xenvif_rx_meta *meta; |
| 242 | unsigned long bytes; | 242 | unsigned long bytes; |
| 243 | int gso_type; | 243 | int gso_type = XEN_NETIF_GSO_TYPE_NONE; |
| 244 | 244 | ||
| 245 | /* Data must not cross a page boundary. */ | 245 | /* Data must not cross a page boundary. */ |
| 246 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); | 246 | BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); |
| @@ -299,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, | |||
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | /* Leave a gap for the GSO descriptor. */ | 301 | /* Leave a gap for the GSO descriptor. */ |
| 302 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | 302 | if (skb_is_gso(skb)) { |
| 303 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; | 303 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
| 304 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 304 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; |
| 305 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | 305 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
| 306 | else | 306 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; |
| 307 | gso_type = XEN_NETIF_GSO_TYPE_NONE; | 307 | } |
| 308 | 308 | ||
| 309 | if (*head && ((1 << gso_type) & vif->gso_mask)) | 309 | if (*head && ((1 << gso_type) & vif->gso_mask)) |
| 310 | vif->rx.req_cons++; | 310 | vif->rx.req_cons++; |
| @@ -338,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
| 338 | int head = 1; | 338 | int head = 1; |
| 339 | int old_meta_prod; | 339 | int old_meta_prod; |
| 340 | int gso_type; | 340 | int gso_type; |
| 341 | int gso_size; | ||
| 342 | 341 | ||
| 343 | old_meta_prod = npo->meta_prod; | 342 | old_meta_prod = npo->meta_prod; |
| 344 | 343 | ||
| 345 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { | 344 | gso_type = XEN_NETIF_GSO_TYPE_NONE; |
| 346 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; | 345 | if (skb_is_gso(skb)) { |
| 347 | gso_size = skb_shinfo(skb)->gso_size; | 346 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
| 348 | } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { | 347 | gso_type = XEN_NETIF_GSO_TYPE_TCPV4; |
| 349 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; | 348 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
| 350 | gso_size = skb_shinfo(skb)->gso_size; | 349 | gso_type = XEN_NETIF_GSO_TYPE_TCPV6; |
| 351 | } else { | ||
| 352 | gso_type = XEN_NETIF_GSO_TYPE_NONE; | ||
| 353 | gso_size = 0; | ||
| 354 | } | 350 | } |
| 355 | 351 | ||
| 356 | /* Set up a GSO prefix descriptor, if necessary */ | 352 | /* Set up a GSO prefix descriptor, if necessary */ |
| @@ -358,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
| 358 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); | 354 | req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); |
| 359 | meta = npo->meta + npo->meta_prod++; | 355 | meta = npo->meta + npo->meta_prod++; |
| 360 | meta->gso_type = gso_type; | 356 | meta->gso_type = gso_type; |
| 361 | meta->gso_size = gso_size; | 357 | meta->gso_size = skb_shinfo(skb)->gso_size; |
| 362 | meta->size = 0; | 358 | meta->size = 0; |
| 363 | meta->id = req->id; | 359 | meta->id = req->id; |
| 364 | } | 360 | } |
| @@ -368,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
| 368 | 364 | ||
| 369 | if ((1 << gso_type) & vif->gso_mask) { | 365 | if ((1 << gso_type) & vif->gso_mask) { |
| 370 | meta->gso_type = gso_type; | 366 | meta->gso_type = gso_type; |
| 371 | meta->gso_size = gso_size; | 367 | meta->gso_size = skb_shinfo(skb)->gso_size; |
| 372 | } else { | 368 | } else { |
| 373 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; | 369 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; |
| 374 | meta->gso_size = 0; | 370 | meta->gso_size = 0; |
| @@ -500,8 +496,9 @@ static void xenvif_rx_action(struct xenvif *vif) | |||
| 500 | size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | 496 | size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 501 | max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); | 497 | max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); |
| 502 | } | 498 | } |
| 503 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || | 499 | if (skb_is_gso(skb) && |
| 504 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 500 | (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || |
| 501 | skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) | ||
| 505 | max_slots_needed++; | 502 | max_slots_needed++; |
| 506 | 503 | ||
| 507 | /* If the skb may not fit then bail out now */ | 504 | /* If the skb may not fit then bail out now */ |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 00660cc502c5..38901665c770 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
| @@ -162,8 +162,6 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, | |||
| 162 | 162 | ||
| 163 | avail = *r; | 163 | avail = *r; |
| 164 | pci_clip_resource_to_region(bus, &avail, region); | 164 | pci_clip_resource_to_region(bus, &avail, region); |
| 165 | if (!resource_size(&avail)) | ||
| 166 | continue; | ||
| 167 | 165 | ||
| 168 | /* | 166 | /* |
| 169 | * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to | 167 | * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6b05f6134b68..fdbc294821e6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -1192,6 +1192,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) | |||
| 1192 | return err; | 1192 | return err; |
| 1193 | pci_fixup_device(pci_fixup_enable, dev); | 1193 | pci_fixup_device(pci_fixup_enable, dev); |
| 1194 | 1194 | ||
| 1195 | if (dev->msi_enabled || dev->msix_enabled) | ||
| 1196 | return 0; | ||
| 1197 | |||
| 1195 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | 1198 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); |
| 1196 | if (pin) { | 1199 | if (pin) { |
| 1197 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | 1200 | pci_read_config_word(dev, PCI_COMMAND, &cmd); |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index cf32f0393369..c0f3718b77a8 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
| @@ -513,7 +513,7 @@ struct cifs_mnt_data { | |||
| 513 | static inline unsigned int | 513 | static inline unsigned int |
| 514 | get_rfc1002_length(void *buf) | 514 | get_rfc1002_length(void *buf) |
| 515 | { | 515 | { |
| 516 | return be32_to_cpu(*((__be32 *)buf)); | 516 | return be32_to_cpu(*((__be32 *)buf)) & 0xffffff; |
| 517 | } | 517 | } |
| 518 | 518 | ||
| 519 | static inline void | 519 | static inline void |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 53c15074bb36..834fce759d80 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -2579,31 +2579,19 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov, | |||
| 2579 | struct cifsInodeInfo *cinode = CIFS_I(inode); | 2579 | struct cifsInodeInfo *cinode = CIFS_I(inode); |
| 2580 | struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; | 2580 | struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; |
| 2581 | ssize_t rc = -EACCES; | 2581 | ssize_t rc = -EACCES; |
| 2582 | loff_t lock_pos = pos; | ||
| 2582 | 2583 | ||
| 2583 | BUG_ON(iocb->ki_pos != pos); | 2584 | if (file->f_flags & O_APPEND) |
| 2584 | 2585 | lock_pos = i_size_read(inode); | |
| 2585 | /* | 2586 | /* |
| 2586 | * We need to hold the sem to be sure nobody modifies lock list | 2587 | * We need to hold the sem to be sure nobody modifies lock list |
| 2587 | * with a brlock that prevents writing. | 2588 | * with a brlock that prevents writing. |
| 2588 | */ | 2589 | */ |
| 2589 | down_read(&cinode->lock_sem); | 2590 | down_read(&cinode->lock_sem); |
| 2590 | if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs), | 2591 | if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs), |
| 2591 | server->vals->exclusive_lock_type, NULL, | 2592 | server->vals->exclusive_lock_type, NULL, |
| 2592 | CIFS_WRITE_OP)) { | 2593 | CIFS_WRITE_OP)) |
| 2593 | mutex_lock(&inode->i_mutex); | 2594 | rc = generic_file_aio_write(iocb, iov, nr_segs, pos); |
| 2594 | rc = __generic_file_aio_write(iocb, iov, nr_segs, | ||
| 2595 | &iocb->ki_pos); | ||
| 2596 | mutex_unlock(&inode->i_mutex); | ||
| 2597 | } | ||
| 2598 | |||
| 2599 | if (rc > 0) { | ||
| 2600 | ssize_t err; | ||
| 2601 | |||
| 2602 | err = generic_write_sync(file, iocb->ki_pos - rc, rc); | ||
| 2603 | if (err < 0) | ||
| 2604 | rc = err; | ||
| 2605 | } | ||
| 2606 | |||
| 2607 | up_read(&cinode->lock_sem); | 2595 | up_read(&cinode->lock_sem); |
| 2608 | return rc; | 2596 | return rc; |
| 2609 | } | 2597 | } |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index b37570952846..18cd5650a5fc 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
| @@ -270,6 +270,26 @@ cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx, | |||
| 270 | iov->iov_len = rqst->rq_pagesz; | 270 | iov->iov_len = rqst->rq_pagesz; |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | static unsigned long | ||
| 274 | rqst_len(struct smb_rqst *rqst) | ||
| 275 | { | ||
| 276 | unsigned int i; | ||
| 277 | struct kvec *iov = rqst->rq_iov; | ||
| 278 | unsigned long buflen = 0; | ||
| 279 | |||
| 280 | /* total up iov array first */ | ||
| 281 | for (i = 0; i < rqst->rq_nvec; i++) | ||
| 282 | buflen += iov[i].iov_len; | ||
| 283 | |||
| 284 | /* add in the page array if there is one */ | ||
| 285 | if (rqst->rq_npages) { | ||
| 286 | buflen += rqst->rq_pagesz * (rqst->rq_npages - 1); | ||
| 287 | buflen += rqst->rq_tailsz; | ||
| 288 | } | ||
| 289 | |||
| 290 | return buflen; | ||
| 291 | } | ||
| 292 | |||
| 273 | static int | 293 | static int |
| 274 | smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | 294 | smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) |
| 275 | { | 295 | { |
| @@ -277,6 +297,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | |||
| 277 | struct kvec *iov = rqst->rq_iov; | 297 | struct kvec *iov = rqst->rq_iov; |
| 278 | int n_vec = rqst->rq_nvec; | 298 | int n_vec = rqst->rq_nvec; |
| 279 | unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); | 299 | unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); |
| 300 | unsigned long send_length; | ||
| 280 | unsigned int i; | 301 | unsigned int i; |
| 281 | size_t total_len = 0, sent; | 302 | size_t total_len = 0, sent; |
| 282 | struct socket *ssocket = server->ssocket; | 303 | struct socket *ssocket = server->ssocket; |
| @@ -285,6 +306,14 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) | |||
| 285 | if (ssocket == NULL) | 306 | if (ssocket == NULL) |
| 286 | return -ENOTSOCK; | 307 | return -ENOTSOCK; |
| 287 | 308 | ||
| 309 | /* sanity check send length */ | ||
| 310 | send_length = rqst_len(rqst); | ||
| 311 | if (send_length != smb_buf_length + 4) { | ||
| 312 | WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n", | ||
| 313 | send_length, smb_buf_length); | ||
| 314 | return -EIO; | ||
| 315 | } | ||
| 316 | |||
| 288 | cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); | 317 | cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); |
| 289 | dump_smb(iov[0].iov_base, iov[0].iov_len); | 318 | dump_smb(iov[0].iov_base, iov[0].iov_len); |
| 290 | 319 | ||
| @@ -683,35 +683,65 @@ EXPORT_SYMBOL(fget_raw); | |||
| 683 | * The fput_needed flag returned by fget_light should be passed to the | 683 | * The fput_needed flag returned by fget_light should be passed to the |
| 684 | * corresponding fput_light. | 684 | * corresponding fput_light. |
| 685 | */ | 685 | */ |
| 686 | struct file *__fget_light(unsigned int fd, fmode_t mask, int *fput_needed) | 686 | static unsigned long __fget_light(unsigned int fd, fmode_t mask) |
| 687 | { | 687 | { |
| 688 | struct files_struct *files = current->files; | 688 | struct files_struct *files = current->files; |
| 689 | struct file *file; | 689 | struct file *file; |
| 690 | 690 | ||
| 691 | *fput_needed = 0; | ||
| 692 | if (atomic_read(&files->count) == 1) { | 691 | if (atomic_read(&files->count) == 1) { |
| 693 | file = __fcheck_files(files, fd); | 692 | file = __fcheck_files(files, fd); |
| 694 | if (file && (file->f_mode & mask)) | 693 | if (!file || unlikely(file->f_mode & mask)) |
| 695 | file = NULL; | 694 | return 0; |
| 695 | return (unsigned long)file; | ||
| 696 | } else { | 696 | } else { |
| 697 | file = __fget(fd, mask); | 697 | file = __fget(fd, mask); |
| 698 | if (file) | 698 | if (!file) |
| 699 | *fput_needed = 1; | 699 | return 0; |
| 700 | return FDPUT_FPUT | (unsigned long)file; | ||
| 700 | } | 701 | } |
| 701 | |||
| 702 | return file; | ||
| 703 | } | 702 | } |
| 704 | struct file *fget_light(unsigned int fd, int *fput_needed) | 703 | unsigned long __fdget(unsigned int fd) |
| 705 | { | 704 | { |
| 706 | return __fget_light(fd, FMODE_PATH, fput_needed); | 705 | return __fget_light(fd, FMODE_PATH); |
| 707 | } | 706 | } |
| 708 | EXPORT_SYMBOL(fget_light); | 707 | EXPORT_SYMBOL(__fdget); |
| 709 | 708 | ||
| 710 | struct file *fget_raw_light(unsigned int fd, int *fput_needed) | 709 | unsigned long __fdget_raw(unsigned int fd) |
| 711 | { | 710 | { |
| 712 | return __fget_light(fd, 0, fput_needed); | 711 | return __fget_light(fd, 0); |
| 712 | } | ||
| 713 | |||
| 714 | unsigned long __fdget_pos(unsigned int fd) | ||
| 715 | { | ||
| 716 | struct files_struct *files = current->files; | ||
| 717 | struct file *file; | ||
| 718 | unsigned long v; | ||
| 719 | |||
| 720 | if (atomic_read(&files->count) == 1) { | ||
| 721 | file = __fcheck_files(files, fd); | ||
| 722 | v = 0; | ||
| 723 | } else { | ||
| 724 | file = __fget(fd, 0); | ||
| 725 | v = FDPUT_FPUT; | ||
| 726 | } | ||
| 727 | if (!file) | ||
| 728 | return 0; | ||
| 729 | |||
| 730 | if (file->f_mode & FMODE_ATOMIC_POS) { | ||
| 731 | if (file_count(file) > 1) { | ||
| 732 | v |= FDPUT_POS_UNLOCK; | ||
| 733 | mutex_lock(&file->f_pos_lock); | ||
| 734 | } | ||
| 735 | } | ||
| 736 | return v | (unsigned long)file; | ||
| 713 | } | 737 | } |
| 714 | 738 | ||
| 739 | /* | ||
| 740 | * We only lock f_pos if we have threads or if the file might be | ||
| 741 | * shared with another process. In both cases we'll have an elevated | ||
| 742 | * file count (done either by fdget() or by fork()). | ||
| 743 | */ | ||
| 744 | |||
| 715 | void set_close_on_exec(unsigned int fd, int flag) | 745 | void set_close_on_exec(unsigned int fd, int flag) |
| 716 | { | 746 | { |
| 717 | struct files_struct *files = current->files; | 747 | struct files_struct *files = current->files; |
diff --git a/fs/file_table.c b/fs/file_table.c index 5fff9030be34..5b24008ea4f6 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
| @@ -135,6 +135,7 @@ struct file *get_empty_filp(void) | |||
| 135 | atomic_long_set(&f->f_count, 1); | 135 | atomic_long_set(&f->f_count, 1); |
| 136 | rwlock_init(&f->f_owner.lock); | 136 | rwlock_init(&f->f_owner.lock); |
| 137 | spin_lock_init(&f->f_lock); | 137 | spin_lock_init(&f->f_lock); |
| 138 | mutex_init(&f->f_pos_lock); | ||
| 138 | eventpoll_init_file(f); | 139 | eventpoll_init_file(f); |
| 139 | /* f->f_version: 0 */ | 140 | /* f->f_version: 0 */ |
| 140 | return f; | 141 | return f; |
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c index 968ce411db53..32602c667b4a 100644 --- a/fs/hfsplus/catalog.c +++ b/fs/hfsplus/catalog.c | |||
| @@ -103,6 +103,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, | |||
| 103 | folder = &entry->folder; | 103 | folder = &entry->folder; |
| 104 | memset(folder, 0, sizeof(*folder)); | 104 | memset(folder, 0, sizeof(*folder)); |
| 105 | folder->type = cpu_to_be16(HFSPLUS_FOLDER); | 105 | folder->type = cpu_to_be16(HFSPLUS_FOLDER); |
| 106 | if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) | ||
| 107 | folder->flags |= cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT); | ||
| 106 | folder->id = cpu_to_be32(inode->i_ino); | 108 | folder->id = cpu_to_be32(inode->i_ino); |
| 107 | HFSPLUS_I(inode)->create_date = | 109 | HFSPLUS_I(inode)->create_date = |
| 108 | folder->create_date = | 110 | folder->create_date = |
| @@ -203,6 +205,36 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid, | |||
| 203 | return hfs_brec_find(fd, hfs_find_rec_by_key); | 205 | return hfs_brec_find(fd, hfs_find_rec_by_key); |
| 204 | } | 206 | } |
| 205 | 207 | ||
| 208 | static void hfsplus_subfolders_inc(struct inode *dir) | ||
| 209 | { | ||
| 210 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); | ||
| 211 | |||
| 212 | if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { | ||
| 213 | /* | ||
| 214 | * Increment subfolder count. Note, the value is only meaningful | ||
| 215 | * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set. | ||
| 216 | */ | ||
| 217 | HFSPLUS_I(dir)->subfolders++; | ||
| 218 | } | ||
| 219 | } | ||
| 220 | |||
| 221 | static void hfsplus_subfolders_dec(struct inode *dir) | ||
| 222 | { | ||
| 223 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); | ||
| 224 | |||
| 225 | if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { | ||
| 226 | /* | ||
| 227 | * Decrement subfolder count. Note, the value is only meaningful | ||
| 228 | * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set. | ||
| 229 | * | ||
| 230 | * Check for zero. Some subfolders may have been created | ||
| 231 | * by an implementation ignorant of this counter. | ||
| 232 | */ | ||
| 233 | if (HFSPLUS_I(dir)->subfolders) | ||
| 234 | HFSPLUS_I(dir)->subfolders--; | ||
| 235 | } | ||
| 236 | } | ||
| 237 | |||
| 206 | int hfsplus_create_cat(u32 cnid, struct inode *dir, | 238 | int hfsplus_create_cat(u32 cnid, struct inode *dir, |
| 207 | struct qstr *str, struct inode *inode) | 239 | struct qstr *str, struct inode *inode) |
| 208 | { | 240 | { |
| @@ -247,6 +279,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, | |||
| 247 | goto err1; | 279 | goto err1; |
| 248 | 280 | ||
| 249 | dir->i_size++; | 281 | dir->i_size++; |
| 282 | if (S_ISDIR(inode->i_mode)) | ||
| 283 | hfsplus_subfolders_inc(dir); | ||
| 250 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; | 284 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; |
| 251 | hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); | 285 | hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); |
| 252 | 286 | ||
| @@ -336,6 +370,8 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) | |||
| 336 | goto out; | 370 | goto out; |
| 337 | 371 | ||
| 338 | dir->i_size--; | 372 | dir->i_size--; |
| 373 | if (type == HFSPLUS_FOLDER) | ||
| 374 | hfsplus_subfolders_dec(dir); | ||
| 339 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; | 375 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; |
| 340 | hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); | 376 | hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); |
| 341 | 377 | ||
| @@ -380,6 +416,7 @@ int hfsplus_rename_cat(u32 cnid, | |||
| 380 | 416 | ||
| 381 | hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, | 417 | hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, |
| 382 | src_fd.entrylength); | 418 | src_fd.entrylength); |
| 419 | type = be16_to_cpu(entry.type); | ||
| 383 | 420 | ||
| 384 | /* create new dir entry with the data from the old entry */ | 421 | /* create new dir entry with the data from the old entry */ |
| 385 | hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); | 422 | hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); |
| @@ -394,6 +431,8 @@ int hfsplus_rename_cat(u32 cnid, | |||
| 394 | if (err) | 431 | if (err) |
| 395 | goto out; | 432 | goto out; |
| 396 | dst_dir->i_size++; | 433 | dst_dir->i_size++; |
| 434 | if (type == HFSPLUS_FOLDER) | ||
| 435 | hfsplus_subfolders_inc(dst_dir); | ||
| 397 | dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC; | 436 | dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC; |
| 398 | 437 | ||
| 399 | /* finally remove the old entry */ | 438 | /* finally remove the old entry */ |
| @@ -405,6 +444,8 @@ int hfsplus_rename_cat(u32 cnid, | |||
| 405 | if (err) | 444 | if (err) |
| 406 | goto out; | 445 | goto out; |
| 407 | src_dir->i_size--; | 446 | src_dir->i_size--; |
| 447 | if (type == HFSPLUS_FOLDER) | ||
| 448 | hfsplus_subfolders_dec(src_dir); | ||
| 408 | src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC; | 449 | src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC; |
| 409 | 450 | ||
| 410 | /* remove old thread entry */ | 451 | /* remove old thread entry */ |
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 08846425b67f..62d571eb69ba 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
| @@ -242,6 +242,7 @@ struct hfsplus_inode_info { | |||
| 242 | */ | 242 | */ |
| 243 | sector_t fs_blocks; | 243 | sector_t fs_blocks; |
| 244 | u8 userflags; /* BSD user file flags */ | 244 | u8 userflags; /* BSD user file flags */ |
| 245 | u32 subfolders; /* Subfolder count (HFSX only) */ | ||
| 245 | struct list_head open_dir_list; | 246 | struct list_head open_dir_list; |
| 246 | loff_t phys_size; | 247 | loff_t phys_size; |
| 247 | 248 | ||
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h index 8ffb3a8ffe75..5a126828d85e 100644 --- a/fs/hfsplus/hfsplus_raw.h +++ b/fs/hfsplus/hfsplus_raw.h | |||
| @@ -261,7 +261,7 @@ struct hfsplus_cat_folder { | |||
| 261 | struct DInfo user_info; | 261 | struct DInfo user_info; |
| 262 | struct DXInfo finder_info; | 262 | struct DXInfo finder_info; |
| 263 | __be32 text_encoding; | 263 | __be32 text_encoding; |
| 264 | u32 reserved; | 264 | __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */ |
| 265 | } __packed; | 265 | } __packed; |
| 266 | 266 | ||
| 267 | /* HFS file info (stolen from hfs.h) */ | 267 | /* HFS file info (stolen from hfs.h) */ |
| @@ -301,11 +301,13 @@ struct hfsplus_cat_file { | |||
| 301 | struct hfsplus_fork_raw rsrc_fork; | 301 | struct hfsplus_fork_raw rsrc_fork; |
| 302 | } __packed; | 302 | } __packed; |
| 303 | 303 | ||
| 304 | /* File attribute bits */ | 304 | /* File and folder flag bits */ |
| 305 | #define HFSPLUS_FILE_LOCKED 0x0001 | 305 | #define HFSPLUS_FILE_LOCKED 0x0001 |
| 306 | #define HFSPLUS_FILE_THREAD_EXISTS 0x0002 | 306 | #define HFSPLUS_FILE_THREAD_EXISTS 0x0002 |
| 307 | #define HFSPLUS_XATTR_EXISTS 0x0004 | 307 | #define HFSPLUS_XATTR_EXISTS 0x0004 |
| 308 | #define HFSPLUS_ACL_EXISTS 0x0008 | 308 | #define HFSPLUS_ACL_EXISTS 0x0008 |
| 309 | #define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count | ||
| 310 | * (HFSX only) */ | ||
| 309 | 311 | ||
| 310 | /* HFS+ catalog thread (part of a cat_entry) */ | 312 | /* HFS+ catalog thread (part of a cat_entry) */ |
| 311 | struct hfsplus_cat_thread { | 313 | struct hfsplus_cat_thread { |
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index fa929f325f87..a4f45bd88a63 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c | |||
| @@ -375,6 +375,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode) | |||
| 375 | hip->extent_state = 0; | 375 | hip->extent_state = 0; |
| 376 | hip->flags = 0; | 376 | hip->flags = 0; |
| 377 | hip->userflags = 0; | 377 | hip->userflags = 0; |
| 378 | hip->subfolders = 0; | ||
| 378 | memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); | 379 | memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); |
| 379 | memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); | 380 | memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); |
| 380 | hip->alloc_blocks = 0; | 381 | hip->alloc_blocks = 0; |
| @@ -494,6 +495,10 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) | |||
| 494 | inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); | 495 | inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); |
| 495 | HFSPLUS_I(inode)->create_date = folder->create_date; | 496 | HFSPLUS_I(inode)->create_date = folder->create_date; |
| 496 | HFSPLUS_I(inode)->fs_blocks = 0; | 497 | HFSPLUS_I(inode)->fs_blocks = 0; |
| 498 | if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) { | ||
| 499 | HFSPLUS_I(inode)->subfolders = | ||
| 500 | be32_to_cpu(folder->subfolders); | ||
| 501 | } | ||
| 497 | inode->i_op = &hfsplus_dir_inode_operations; | 502 | inode->i_op = &hfsplus_dir_inode_operations; |
| 498 | inode->i_fop = &hfsplus_dir_operations; | 503 | inode->i_fop = &hfsplus_dir_operations; |
| 499 | } else if (type == HFSPLUS_FILE) { | 504 | } else if (type == HFSPLUS_FILE) { |
| @@ -566,6 +571,10 @@ int hfsplus_cat_write_inode(struct inode *inode) | |||
| 566 | folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); | 571 | folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); |
| 567 | folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); | 572 | folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); |
| 568 | folder->valence = cpu_to_be32(inode->i_size - 2); | 573 | folder->valence = cpu_to_be32(inode->i_size - 2); |
| 574 | if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) { | ||
| 575 | folder->subfolders = | ||
| 576 | cpu_to_be32(HFSPLUS_I(inode)->subfolders); | ||
| 577 | } | ||
| 569 | hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, | 578 | hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, |
| 570 | sizeof(struct hfsplus_cat_folder)); | 579 | sizeof(struct hfsplus_cat_folder)); |
| 571 | } else if (HFSPLUS_IS_RSRC(inode)) { | 580 | } else if (HFSPLUS_IS_RSRC(inode)) { |
diff --git a/fs/namei.c b/fs/namei.c index 385f7817bfcc..2f730ef9b4b3 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -1884,7 +1884,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, | |||
| 1884 | 1884 | ||
| 1885 | nd->path = f.file->f_path; | 1885 | nd->path = f.file->f_path; |
| 1886 | if (flags & LOOKUP_RCU) { | 1886 | if (flags & LOOKUP_RCU) { |
| 1887 | if (f.need_put) | 1887 | if (f.flags & FDPUT_FPUT) |
| 1888 | *fp = f.file; | 1888 | *fp = f.file; |
| 1889 | nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); | 1889 | nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); |
| 1890 | rcu_read_lock(); | 1890 | rcu_read_lock(); |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 8450262bcf2a..51632c40e896 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
| @@ -2393,8 +2393,8 @@ out_dio: | |||
| 2393 | 2393 | ||
| 2394 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || | 2394 | if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || |
| 2395 | ((file->f_flags & O_DIRECT) && !direct_io)) { | 2395 | ((file->f_flags & O_DIRECT) && !direct_io)) { |
| 2396 | ret = filemap_fdatawrite_range(file->f_mapping, pos, | 2396 | ret = filemap_fdatawrite_range(file->f_mapping, *ppos, |
| 2397 | pos + count - 1); | 2397 | *ppos + count - 1); |
| 2398 | if (ret < 0) | 2398 | if (ret < 0) |
| 2399 | written = ret; | 2399 | written = ret; |
| 2400 | 2400 | ||
| @@ -2407,8 +2407,8 @@ out_dio: | |||
| 2407 | } | 2407 | } |
| 2408 | 2408 | ||
| 2409 | if (!ret) | 2409 | if (!ret) |
| 2410 | ret = filemap_fdatawait_range(file->f_mapping, pos, | 2410 | ret = filemap_fdatawait_range(file->f_mapping, *ppos, |
| 2411 | pos + count - 1); | 2411 | *ppos + count - 1); |
| 2412 | } | 2412 | } |
| 2413 | 2413 | ||
| 2414 | /* | 2414 | /* |
| @@ -705,6 +705,10 @@ static int do_dentry_open(struct file *f, | |||
| 705 | return 0; | 705 | return 0; |
| 706 | } | 706 | } |
| 707 | 707 | ||
| 708 | /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */ | ||
| 709 | if (S_ISREG(inode->i_mode)) | ||
| 710 | f->f_mode |= FMODE_ATOMIC_POS; | ||
| 711 | |||
| 708 | f->f_op = fops_get(inode->i_fop); | 712 | f->f_op = fops_get(inode->i_fop); |
| 709 | if (unlikely(WARN_ON(!f->f_op))) { | 713 | if (unlikely(WARN_ON(!f->f_op))) { |
| 710 | error = -ENODEV; | 714 | error = -ENODEV; |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 51507065263b..b9760628e1fd 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -1824,6 +1824,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path) | |||
| 1824 | if (rc) | 1824 | if (rc) |
| 1825 | goto out_mmput; | 1825 | goto out_mmput; |
| 1826 | 1826 | ||
| 1827 | rc = -ENOENT; | ||
| 1827 | down_read(&mm->mmap_sem); | 1828 | down_read(&mm->mmap_sem); |
| 1828 | vma = find_exact_vma(mm, vm_start, vm_end); | 1829 | vma = find_exact_vma(mm, vm_start, vm_end); |
| 1829 | if (vma && vma->vm_file) { | 1830 | if (vma && vma->vm_file) { |
diff --git a/fs/read_write.c b/fs/read_write.c index edc5746a902a..54e19b9392dc 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
| @@ -264,10 +264,22 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int whence) | |||
| 264 | } | 264 | } |
| 265 | EXPORT_SYMBOL(vfs_llseek); | 265 | EXPORT_SYMBOL(vfs_llseek); |
| 266 | 266 | ||
| 267 | static inline struct fd fdget_pos(int fd) | ||
| 268 | { | ||
| 269 | return __to_fd(__fdget_pos(fd)); | ||
| 270 | } | ||
| 271 | |||
| 272 | static inline void fdput_pos(struct fd f) | ||
| 273 | { | ||
| 274 | if (f.flags & FDPUT_POS_UNLOCK) | ||
| 275 | mutex_unlock(&f.file->f_pos_lock); | ||
| 276 | fdput(f); | ||
| 277 | } | ||
| 278 | |||
| 267 | SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) | 279 | SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) |
| 268 | { | 280 | { |
| 269 | off_t retval; | 281 | off_t retval; |
| 270 | struct fd f = fdget(fd); | 282 | struct fd f = fdget_pos(fd); |
| 271 | if (!f.file) | 283 | if (!f.file) |
| 272 | return -EBADF; | 284 | return -EBADF; |
| 273 | 285 | ||
| @@ -278,7 +290,7 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) | |||
| 278 | if (res != (loff_t)retval) | 290 | if (res != (loff_t)retval) |
| 279 | retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */ | 291 | retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */ |
| 280 | } | 292 | } |
| 281 | fdput(f); | 293 | fdput_pos(f); |
| 282 | return retval; | 294 | return retval; |
| 283 | } | 295 | } |
| 284 | 296 | ||
| @@ -498,7 +510,7 @@ static inline void file_pos_write(struct file *file, loff_t pos) | |||
| 498 | 510 | ||
| 499 | SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) | 511 | SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) |
| 500 | { | 512 | { |
| 501 | struct fd f = fdget(fd); | 513 | struct fd f = fdget_pos(fd); |
| 502 | ssize_t ret = -EBADF; | 514 | ssize_t ret = -EBADF; |
| 503 | 515 | ||
| 504 | if (f.file) { | 516 | if (f.file) { |
| @@ -506,7 +518,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) | |||
| 506 | ret = vfs_read(f.file, buf, count, &pos); | 518 | ret = vfs_read(f.file, buf, count, &pos); |
| 507 | if (ret >= 0) | 519 | if (ret >= 0) |
| 508 | file_pos_write(f.file, pos); | 520 | file_pos_write(f.file, pos); |
| 509 | fdput(f); | 521 | fdput_pos(f); |
| 510 | } | 522 | } |
| 511 | return ret; | 523 | return ret; |
| 512 | } | 524 | } |
| @@ -514,7 +526,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) | |||
| 514 | SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, | 526 | SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, |
| 515 | size_t, count) | 527 | size_t, count) |
| 516 | { | 528 | { |
| 517 | struct fd f = fdget(fd); | 529 | struct fd f = fdget_pos(fd); |
| 518 | ssize_t ret = -EBADF; | 530 | ssize_t ret = -EBADF; |
| 519 | 531 | ||
| 520 | if (f.file) { | 532 | if (f.file) { |
| @@ -522,7 +534,7 @@ SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, | |||
| 522 | ret = vfs_write(f.file, buf, count, &pos); | 534 | ret = vfs_write(f.file, buf, count, &pos); |
| 523 | if (ret >= 0) | 535 | if (ret >= 0) |
| 524 | file_pos_write(f.file, pos); | 536 | file_pos_write(f.file, pos); |
| 525 | fdput(f); | 537 | fdput_pos(f); |
| 526 | } | 538 | } |
| 527 | 539 | ||
| 528 | return ret; | 540 | return ret; |
| @@ -797,7 +809,7 @@ EXPORT_SYMBOL(vfs_writev); | |||
| 797 | SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, | 809 | SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, |
| 798 | unsigned long, vlen) | 810 | unsigned long, vlen) |
| 799 | { | 811 | { |
| 800 | struct fd f = fdget(fd); | 812 | struct fd f = fdget_pos(fd); |
| 801 | ssize_t ret = -EBADF; | 813 | ssize_t ret = -EBADF; |
| 802 | 814 | ||
| 803 | if (f.file) { | 815 | if (f.file) { |
| @@ -805,7 +817,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, | |||
| 805 | ret = vfs_readv(f.file, vec, vlen, &pos); | 817 | ret = vfs_readv(f.file, vec, vlen, &pos); |
| 806 | if (ret >= 0) | 818 | if (ret >= 0) |
| 807 | file_pos_write(f.file, pos); | 819 | file_pos_write(f.file, pos); |
| 808 | fdput(f); | 820 | fdput_pos(f); |
| 809 | } | 821 | } |
| 810 | 822 | ||
| 811 | if (ret > 0) | 823 | if (ret > 0) |
| @@ -817,7 +829,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, | |||
| 817 | SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, | 829 | SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, |
| 818 | unsigned long, vlen) | 830 | unsigned long, vlen) |
| 819 | { | 831 | { |
| 820 | struct fd f = fdget(fd); | 832 | struct fd f = fdget_pos(fd); |
| 821 | ssize_t ret = -EBADF; | 833 | ssize_t ret = -EBADF; |
| 822 | 834 | ||
| 823 | if (f.file) { | 835 | if (f.file) { |
| @@ -825,7 +837,7 @@ SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, | |||
| 825 | ret = vfs_writev(f.file, vec, vlen, &pos); | 837 | ret = vfs_writev(f.file, vec, vlen, &pos); |
| 826 | if (ret >= 0) | 838 | if (ret >= 0) |
| 827 | file_pos_write(f.file, pos); | 839 | file_pos_write(f.file, pos); |
| 828 | fdput(f); | 840 | fdput_pos(f); |
| 829 | } | 841 | } |
| 830 | 842 | ||
| 831 | if (ret > 0) | 843 | if (ret > 0) |
| @@ -968,7 +980,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd, | |||
| 968 | const struct compat_iovec __user *,vec, | 980 | const struct compat_iovec __user *,vec, |
| 969 | compat_ulong_t, vlen) | 981 | compat_ulong_t, vlen) |
| 970 | { | 982 | { |
| 971 | struct fd f = fdget(fd); | 983 | struct fd f = fdget_pos(fd); |
| 972 | ssize_t ret; | 984 | ssize_t ret; |
| 973 | loff_t pos; | 985 | loff_t pos; |
| 974 | 986 | ||
| @@ -978,7 +990,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd, | |||
| 978 | ret = compat_readv(f.file, vec, vlen, &pos); | 990 | ret = compat_readv(f.file, vec, vlen, &pos); |
| 979 | if (ret >= 0) | 991 | if (ret >= 0) |
| 980 | f.file->f_pos = pos; | 992 | f.file->f_pos = pos; |
| 981 | fdput(f); | 993 | fdput_pos(f); |
| 982 | return ret; | 994 | return ret; |
| 983 | } | 995 | } |
| 984 | 996 | ||
| @@ -1035,7 +1047,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd, | |||
| 1035 | const struct compat_iovec __user *, vec, | 1047 | const struct compat_iovec __user *, vec, |
| 1036 | compat_ulong_t, vlen) | 1048 | compat_ulong_t, vlen) |
| 1037 | { | 1049 | { |
| 1038 | struct fd f = fdget(fd); | 1050 | struct fd f = fdget_pos(fd); |
| 1039 | ssize_t ret; | 1051 | ssize_t ret; |
| 1040 | loff_t pos; | 1052 | loff_t pos; |
| 1041 | 1053 | ||
| @@ -1045,7 +1057,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd, | |||
| 1045 | ret = compat_writev(f.file, vec, vlen, &pos); | 1057 | ret = compat_writev(f.file, vec, vlen, &pos); |
| 1046 | if (ret >= 0) | 1058 | if (ret >= 0) |
| 1047 | f.file->f_pos = pos; | 1059 | f.file->f_pos = pos; |
| 1048 | fdput(f); | 1060 | fdput_pos(f); |
| 1049 | return ret; | 1061 | return ret; |
| 1050 | } | 1062 | } |
| 1051 | 1063 | ||
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index be85127bfed3..f27000f55a83 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
| @@ -171,6 +171,11 @@ static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 add | |||
| 171 | return 0; | 171 | return 0; |
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | ||
| 175 | { | ||
| 176 | return -ENXIO; | ||
| 177 | } | ||
| 178 | |||
| 174 | static inline int kvm_vgic_init(struct kvm *kvm) | 179 | static inline int kvm_vgic_init(struct kvm *kvm) |
| 175 | { | 180 | { |
| 176 | return 0; | 181 | return 0; |
diff --git a/include/linux/audit.h b/include/linux/audit.h index aa865a9a4c4f..ec1464df4c60 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
| @@ -43,6 +43,7 @@ struct mq_attr; | |||
| 43 | struct mqstat; | 43 | struct mqstat; |
| 44 | struct audit_watch; | 44 | struct audit_watch; |
| 45 | struct audit_tree; | 45 | struct audit_tree; |
| 46 | struct sk_buff; | ||
| 46 | 47 | ||
| 47 | struct audit_krule { | 48 | struct audit_krule { |
| 48 | int vers_ops; | 49 | int vers_ops; |
| @@ -463,7 +464,7 @@ extern int audit_filter_user(int type); | |||
| 463 | extern int audit_filter_type(int type); | 464 | extern int audit_filter_type(int type); |
| 464 | extern int audit_rule_change(int type, __u32 portid, int seq, | 465 | extern int audit_rule_change(int type, __u32 portid, int seq, |
| 465 | void *data, size_t datasz); | 466 | void *data, size_t datasz); |
| 466 | extern int audit_list_rules_send(__u32 portid, int seq); | 467 | extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); |
| 467 | 468 | ||
| 468 | extern u32 audit_enabled; | 469 | extern u32 audit_enabled; |
| 469 | #else /* CONFIG_AUDIT */ | 470 | #else /* CONFIG_AUDIT */ |
diff --git a/include/linux/file.h b/include/linux/file.h index cbacf4faf447..4d69123377a2 100644 --- a/include/linux/file.h +++ b/include/linux/file.h | |||
| @@ -28,33 +28,36 @@ static inline void fput_light(struct file *file, int fput_needed) | |||
| 28 | 28 | ||
| 29 | struct fd { | 29 | struct fd { |
| 30 | struct file *file; | 30 | struct file *file; |
| 31 | int need_put; | 31 | unsigned int flags; |
| 32 | }; | 32 | }; |
| 33 | #define FDPUT_FPUT 1 | ||
| 34 | #define FDPUT_POS_UNLOCK 2 | ||
| 33 | 35 | ||
| 34 | static inline void fdput(struct fd fd) | 36 | static inline void fdput(struct fd fd) |
| 35 | { | 37 | { |
| 36 | if (fd.need_put) | 38 | if (fd.flags & FDPUT_FPUT) |
| 37 | fput(fd.file); | 39 | fput(fd.file); |
| 38 | } | 40 | } |
| 39 | 41 | ||
| 40 | extern struct file *fget(unsigned int fd); | 42 | extern struct file *fget(unsigned int fd); |
| 41 | extern struct file *fget_light(unsigned int fd, int *fput_needed); | 43 | extern struct file *fget_raw(unsigned int fd); |
| 44 | extern unsigned long __fdget(unsigned int fd); | ||
| 45 | extern unsigned long __fdget_raw(unsigned int fd); | ||
| 46 | extern unsigned long __fdget_pos(unsigned int fd); | ||
| 42 | 47 | ||
| 43 | static inline struct fd fdget(unsigned int fd) | 48 | static inline struct fd __to_fd(unsigned long v) |
| 44 | { | 49 | { |
| 45 | int b; | 50 | return (struct fd){(struct file *)(v & ~3),v & 3}; |
| 46 | struct file *f = fget_light(fd, &b); | ||
| 47 | return (struct fd){f,b}; | ||
| 48 | } | 51 | } |
| 49 | 52 | ||
| 50 | extern struct file *fget_raw(unsigned int fd); | 53 | static inline struct fd fdget(unsigned int fd) |
| 51 | extern struct file *fget_raw_light(unsigned int fd, int *fput_needed); | 54 | { |
| 55 | return __to_fd(__fdget(fd)); | ||
| 56 | } | ||
| 52 | 57 | ||
| 53 | static inline struct fd fdget_raw(unsigned int fd) | 58 | static inline struct fd fdget_raw(unsigned int fd) |
| 54 | { | 59 | { |
| 55 | int b; | 60 | return __to_fd(__fdget_raw(fd)); |
| 56 | struct file *f = fget_raw_light(fd, &b); | ||
| 57 | return (struct fd){f,b}; | ||
| 58 | } | 61 | } |
| 59 | 62 | ||
| 60 | extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); | 63 | extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 60829565e552..23b2a35d712e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -123,6 +123,9 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
| 123 | /* File is opened with O_PATH; almost nothing can be done with it */ | 123 | /* File is opened with O_PATH; almost nothing can be done with it */ |
| 124 | #define FMODE_PATH ((__force fmode_t)0x4000) | 124 | #define FMODE_PATH ((__force fmode_t)0x4000) |
| 125 | 125 | ||
| 126 | /* File needs atomic accesses to f_pos */ | ||
| 127 | #define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) | ||
| 128 | |||
| 126 | /* File was opened by fanotify and shouldn't generate fanotify events */ | 129 | /* File was opened by fanotify and shouldn't generate fanotify events */ |
| 127 | #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) | 130 | #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) |
| 128 | 131 | ||
| @@ -780,13 +783,14 @@ struct file { | |||
| 780 | const struct file_operations *f_op; | 783 | const struct file_operations *f_op; |
| 781 | 784 | ||
| 782 | /* | 785 | /* |
| 783 | * Protects f_ep_links, f_flags, f_pos vs i_size in lseek SEEK_CUR. | 786 | * Protects f_ep_links, f_flags. |
| 784 | * Must not be taken from IRQ context. | 787 | * Must not be taken from IRQ context. |
| 785 | */ | 788 | */ |
| 786 | spinlock_t f_lock; | 789 | spinlock_t f_lock; |
| 787 | atomic_long_t f_count; | 790 | atomic_long_t f_count; |
| 788 | unsigned int f_flags; | 791 | unsigned int f_flags; |
| 789 | fmode_t f_mode; | 792 | fmode_t f_mode; |
| 793 | struct mutex f_pos_lock; | ||
| 790 | loff_t f_pos; | 794 | loff_t f_pos; |
| 791 | struct fown_struct f_owner; | 795 | struct fown_struct f_owner; |
| 792 | const struct cred *f_cred; | 796 | const struct cred *f_cred; |
| @@ -808,7 +812,7 @@ struct file { | |||
| 808 | #ifdef CONFIG_DEBUG_WRITECOUNT | 812 | #ifdef CONFIG_DEBUG_WRITECOUNT |
| 809 | unsigned long f_mnt_write_state; | 813 | unsigned long f_mnt_write_state; |
| 810 | #endif | 814 | #endif |
| 811 | }; | 815 | } __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ |
| 812 | 816 | ||
| 813 | struct file_handle { | 817 | struct file_handle { |
| 814 | __u32 handle_bytes; | 818 | __u32 handle_bytes; |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0437439bc047..39b81dc7d01a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -123,6 +123,10 @@ struct vm_area_struct; | |||
| 123 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ | 123 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ |
| 124 | __GFP_NO_KSWAPD) | 124 | __GFP_NO_KSWAPD) |
| 125 | 125 | ||
| 126 | /* | ||
| 127 | * GFP_THISNODE does not perform any reclaim, you most likely want to | ||
| 128 | * use __GFP_THISNODE to allocate from a given node without fallback! | ||
| 129 | */ | ||
| 126 | #ifdef CONFIG_NUMA | 130 | #ifdef CONFIG_NUMA |
| 127 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) | 131 | #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
| 128 | #else | 132 | #else |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5f2052c83154..9b61b9bf81ac 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone) | |||
| 590 | 590 | ||
| 591 | /* | 591 | /* |
| 592 | * The NUMA zonelists are doubled because we need zonelists that restrict the | 592 | * The NUMA zonelists are doubled because we need zonelists that restrict the |
| 593 | * allocations to a single node for GFP_THISNODE. | 593 | * allocations to a single node for __GFP_THISNODE. |
| 594 | * | 594 | * |
| 595 | * [0] : Zonelist with fallback | 595 | * [0] : Zonelist with fallback |
| 596 | * [1] : No fallback (GFP_THISNODE) | 596 | * [1] : No fallback (__GFP_THISNODE) |
| 597 | */ | 597 | */ |
| 598 | #define MAX_ZONELISTS 2 | 598 | #define MAX_ZONELISTS 2 |
| 599 | 599 | ||
diff --git a/include/linux/slab.h b/include/linux/slab.h index 9260abdd67df..b5b2df60299e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | |||
| 410 | * | 410 | * |
| 411 | * %GFP_NOWAIT - Allocation will not sleep. | 411 | * %GFP_NOWAIT - Allocation will not sleep. |
| 412 | * | 412 | * |
| 413 | * %GFP_THISNODE - Allocate node-local memory only. | 413 | * %__GFP_THISNODE - Allocate node-local memory only. |
| 414 | * | 414 | * |
| 415 | * %GFP_DMA - Allocation suitable for DMA. | 415 | * %GFP_DMA - Allocation suitable for DMA. |
| 416 | * Should only be used for kmalloc() caches. Otherwise, use a | 416 | * Should only be used for kmalloc() caches. Otherwise, use a |
diff --git a/include/net/sock.h b/include/net/sock.h index 5c3f7c3624aa..b9586a137cad 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -1488,6 +1488,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) | |||
| 1488 | */ | 1488 | */ |
| 1489 | #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) | 1489 | #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) |
| 1490 | 1490 | ||
| 1491 | static inline void sock_release_ownership(struct sock *sk) | ||
| 1492 | { | ||
| 1493 | sk->sk_lock.owned = 0; | ||
| 1494 | } | ||
| 1495 | |||
| 1491 | /* | 1496 | /* |
| 1492 | * Macro so as to not evaluate some arguments when | 1497 | * Macro so as to not evaluate some arguments when |
| 1493 | * lockdep is not enabled. | 1498 | * lockdep is not enabled. |
| @@ -2186,7 +2191,6 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | |||
| 2186 | { | 2191 | { |
| 2187 | #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ | 2192 | #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ |
| 2188 | (1UL << SOCK_RCVTSTAMP) | \ | 2193 | (1UL << SOCK_RCVTSTAMP) | \ |
| 2189 | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ | ||
| 2190 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ | 2194 | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ |
| 2191 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ | 2195 | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ |
| 2192 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) | 2196 | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) |
diff --git a/kernel/audit.c b/kernel/audit.c index 34c5a2310fbf..3392d3e0254a 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -182,7 +182,7 @@ struct audit_buffer { | |||
| 182 | 182 | ||
| 183 | struct audit_reply { | 183 | struct audit_reply { |
| 184 | __u32 portid; | 184 | __u32 portid; |
| 185 | pid_t pid; | 185 | struct net *net; |
| 186 | struct sk_buff *skb; | 186 | struct sk_buff *skb; |
| 187 | }; | 187 | }; |
| 188 | 188 | ||
| @@ -500,7 +500,7 @@ int audit_send_list(void *_dest) | |||
| 500 | { | 500 | { |
| 501 | struct audit_netlink_list *dest = _dest; | 501 | struct audit_netlink_list *dest = _dest; |
| 502 | struct sk_buff *skb; | 502 | struct sk_buff *skb; |
| 503 | struct net *net = get_net_ns_by_pid(dest->pid); | 503 | struct net *net = dest->net; |
| 504 | struct audit_net *aunet = net_generic(net, audit_net_id); | 504 | struct audit_net *aunet = net_generic(net, audit_net_id); |
| 505 | 505 | ||
| 506 | /* wait for parent to finish and send an ACK */ | 506 | /* wait for parent to finish and send an ACK */ |
| @@ -510,6 +510,7 @@ int audit_send_list(void *_dest) | |||
| 510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) | 510 | while ((skb = __skb_dequeue(&dest->q)) != NULL) |
| 511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); | 511 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); |
| 512 | 512 | ||
| 513 | put_net(net); | ||
| 513 | kfree(dest); | 514 | kfree(dest); |
| 514 | 515 | ||
| 515 | return 0; | 516 | return 0; |
| @@ -543,7 +544,7 @@ out_kfree_skb: | |||
| 543 | static int audit_send_reply_thread(void *arg) | 544 | static int audit_send_reply_thread(void *arg) |
| 544 | { | 545 | { |
| 545 | struct audit_reply *reply = (struct audit_reply *)arg; | 546 | struct audit_reply *reply = (struct audit_reply *)arg; |
| 546 | struct net *net = get_net_ns_by_pid(reply->pid); | 547 | struct net *net = reply->net; |
| 547 | struct audit_net *aunet = net_generic(net, audit_net_id); | 548 | struct audit_net *aunet = net_generic(net, audit_net_id); |
| 548 | 549 | ||
| 549 | mutex_lock(&audit_cmd_mutex); | 550 | mutex_lock(&audit_cmd_mutex); |
| @@ -552,12 +553,13 @@ static int audit_send_reply_thread(void *arg) | |||
| 552 | /* Ignore failure. It'll only happen if the sender goes away, | 553 | /* Ignore failure. It'll only happen if the sender goes away, |
| 553 | because our timeout is set to infinite. */ | 554 | because our timeout is set to infinite. */ |
| 554 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); | 555 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); |
| 556 | put_net(net); | ||
| 555 | kfree(reply); | 557 | kfree(reply); |
| 556 | return 0; | 558 | return 0; |
| 557 | } | 559 | } |
| 558 | /** | 560 | /** |
| 559 | * audit_send_reply - send an audit reply message via netlink | 561 | * audit_send_reply - send an audit reply message via netlink |
| 560 | * @portid: netlink port to which to send reply | 562 | * @request_skb: skb of request we are replying to (used to target the reply) |
| 561 | * @seq: sequence number | 563 | * @seq: sequence number |
| 562 | * @type: audit message type | 564 | * @type: audit message type |
| 563 | * @done: done (last) flag | 565 | * @done: done (last) flag |
| @@ -568,9 +570,11 @@ static int audit_send_reply_thread(void *arg) | |||
| 568 | * Allocates an skb, builds the netlink message, and sends it to the port id. | 570 | * Allocates an skb, builds the netlink message, and sends it to the port id. |
| 569 | * No failure notifications. | 571 | * No failure notifications. |
| 570 | */ | 572 | */ |
| 571 | static void audit_send_reply(__u32 portid, int seq, int type, int done, | 573 | static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, |
| 572 | int multi, const void *payload, int size) | 574 | int multi, const void *payload, int size) |
| 573 | { | 575 | { |
| 576 | u32 portid = NETLINK_CB(request_skb).portid; | ||
| 577 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
| 574 | struct sk_buff *skb; | 578 | struct sk_buff *skb; |
| 575 | struct task_struct *tsk; | 579 | struct task_struct *tsk; |
| 576 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), | 580 | struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), |
| @@ -583,8 +587,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done, | |||
| 583 | if (!skb) | 587 | if (!skb) |
| 584 | goto out; | 588 | goto out; |
| 585 | 589 | ||
| 590 | reply->net = get_net(net); | ||
| 586 | reply->portid = portid; | 591 | reply->portid = portid; |
| 587 | reply->pid = task_pid_vnr(current); | ||
| 588 | reply->skb = skb; | 592 | reply->skb = skb; |
| 589 | 593 | ||
| 590 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); | 594 | tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); |
| @@ -673,8 +677,7 @@ static int audit_get_feature(struct sk_buff *skb) | |||
| 673 | 677 | ||
| 674 | seq = nlmsg_hdr(skb)->nlmsg_seq; | 678 | seq = nlmsg_hdr(skb)->nlmsg_seq; |
| 675 | 679 | ||
| 676 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 680 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af)); |
| 677 | &af, sizeof(af)); | ||
| 678 | 681 | ||
| 679 | return 0; | 682 | return 0; |
| 680 | } | 683 | } |
| @@ -794,8 +797,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 794 | s.backlog = skb_queue_len(&audit_skb_queue); | 797 | s.backlog = skb_queue_len(&audit_skb_queue); |
| 795 | s.version = AUDIT_VERSION_LATEST; | 798 | s.version = AUDIT_VERSION_LATEST; |
| 796 | s.backlog_wait_time = audit_backlog_wait_time; | 799 | s.backlog_wait_time = audit_backlog_wait_time; |
| 797 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, | 800 | audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); |
| 798 | &s, sizeof(s)); | ||
| 799 | break; | 801 | break; |
| 800 | } | 802 | } |
| 801 | case AUDIT_SET: { | 803 | case AUDIT_SET: { |
| @@ -905,7 +907,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 905 | seq, data, nlmsg_len(nlh)); | 907 | seq, data, nlmsg_len(nlh)); |
| 906 | break; | 908 | break; |
| 907 | case AUDIT_LIST_RULES: | 909 | case AUDIT_LIST_RULES: |
| 908 | err = audit_list_rules_send(NETLINK_CB(skb).portid, seq); | 910 | err = audit_list_rules_send(skb, seq); |
| 909 | break; | 911 | break; |
| 910 | case AUDIT_TRIM: | 912 | case AUDIT_TRIM: |
| 911 | audit_trim_trees(); | 913 | audit_trim_trees(); |
| @@ -970,8 +972,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 970 | memcpy(sig_data->ctx, ctx, len); | 972 | memcpy(sig_data->ctx, ctx, len); |
| 971 | security_release_secctx(ctx, len); | 973 | security_release_secctx(ctx, len); |
| 972 | } | 974 | } |
| 973 | audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO, | 975 | audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0, |
| 974 | 0, 0, sig_data, sizeof(*sig_data) + len); | 976 | sig_data, sizeof(*sig_data) + len); |
| 975 | kfree(sig_data); | 977 | kfree(sig_data); |
| 976 | break; | 978 | break; |
| 977 | case AUDIT_TTY_GET: { | 979 | case AUDIT_TTY_GET: { |
| @@ -983,8 +985,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 983 | s.log_passwd = tsk->signal->audit_tty_log_passwd; | 985 | s.log_passwd = tsk->signal->audit_tty_log_passwd; |
| 984 | spin_unlock(&tsk->sighand->siglock); | 986 | spin_unlock(&tsk->sighand->siglock); |
| 985 | 987 | ||
| 986 | audit_send_reply(NETLINK_CB(skb).portid, seq, | 988 | audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); |
| 987 | AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); | ||
| 988 | break; | 989 | break; |
| 989 | } | 990 | } |
| 990 | case AUDIT_TTY_SET: { | 991 | case AUDIT_TTY_SET: { |
diff --git a/kernel/audit.h b/kernel/audit.h index 57cc64d67718..8df132214606 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
| @@ -247,7 +247,7 @@ extern void audit_panic(const char *message); | |||
| 247 | 247 | ||
| 248 | struct audit_netlink_list { | 248 | struct audit_netlink_list { |
| 249 | __u32 portid; | 249 | __u32 portid; |
| 250 | pid_t pid; | 250 | struct net *net; |
| 251 | struct sk_buff_head q; | 251 | struct sk_buff_head q; |
| 252 | }; | 252 | }; |
| 253 | 253 | ||
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 14a78cca384e..92062fd6cc8c 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -29,6 +29,8 @@ | |||
| 29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
| 30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| 31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
| 32 | #include <net/net_namespace.h> | ||
| 33 | #include <net/sock.h> | ||
| 32 | #include "audit.h" | 34 | #include "audit.h" |
| 33 | 35 | ||
| 34 | /* | 36 | /* |
| @@ -1065,11 +1067,13 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data, | |||
| 1065 | 1067 | ||
| 1066 | /** | 1068 | /** |
| 1067 | * audit_list_rules_send - list the audit rules | 1069 | * audit_list_rules_send - list the audit rules |
| 1068 | * @portid: target portid for netlink audit messages | 1070 | * @request_skb: skb of request we are replying to (used to target the reply) |
| 1069 | * @seq: netlink audit message sequence (serial) number | 1071 | * @seq: netlink audit message sequence (serial) number |
| 1070 | */ | 1072 | */ |
| 1071 | int audit_list_rules_send(__u32 portid, int seq) | 1073 | int audit_list_rules_send(struct sk_buff *request_skb, int seq) |
| 1072 | { | 1074 | { |
| 1075 | u32 portid = NETLINK_CB(request_skb).portid; | ||
| 1076 | struct net *net = sock_net(NETLINK_CB(request_skb).sk); | ||
| 1073 | struct task_struct *tsk; | 1077 | struct task_struct *tsk; |
| 1074 | struct audit_netlink_list *dest; | 1078 | struct audit_netlink_list *dest; |
| 1075 | int err = 0; | 1079 | int err = 0; |
| @@ -1083,8 +1087,8 @@ int audit_list_rules_send(__u32 portid, int seq) | |||
| 1083 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); | 1087 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); |
| 1084 | if (!dest) | 1088 | if (!dest) |
| 1085 | return -ENOMEM; | 1089 | return -ENOMEM; |
| 1090 | dest->net = get_net(net); | ||
| 1086 | dest->portid = portid; | 1091 | dest->portid = portid; |
| 1087 | dest->pid = task_pid_vnr(current); | ||
| 1088 | skb_queue_head_init(&dest->q); | 1092 | skb_queue_head_init(&dest->q); |
| 1089 | 1093 | ||
| 1090 | mutex_lock(&audit_filter_mutex); | 1094 | mutex_lock(&audit_filter_mutex); |
diff --git a/kernel/profile.c b/kernel/profile.c index 6631e1ef55ab..ebdd9c1a86b4 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -549,14 +549,14 @@ static int create_hash_tables(void) | |||
| 549 | struct page *page; | 549 | struct page *page; |
| 550 | 550 | ||
| 551 | page = alloc_pages_exact_node(node, | 551 | page = alloc_pages_exact_node(node, |
| 552 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 552 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 553 | 0); | 553 | 0); |
| 554 | if (!page) | 554 | if (!page) |
| 555 | goto out_cleanup; | 555 | goto out_cleanup; |
| 556 | per_cpu(cpu_profile_hits, cpu)[1] | 556 | per_cpu(cpu_profile_hits, cpu)[1] |
| 557 | = (struct profile_hit *)page_address(page); | 557 | = (struct profile_hit *)page_address(page); |
| 558 | page = alloc_pages_exact_node(node, | 558 | page = alloc_pages_exact_node(node, |
| 559 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 559 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
| 560 | 0); | 560 | 0); |
| 561 | if (!page) | 561 | if (!page) |
| 562 | goto out_cleanup; | 562 | goto out_cleanup; |
diff --git a/mm/Kconfig b/mm/Kconfig index 2d9f1504d75e..2888024e0b0a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -575,5 +575,5 @@ config PGTABLE_MAPPING | |||
| 575 | then you should select this. This causes zsmalloc to use page table | 575 | then you should select this. This causes zsmalloc to use page table |
| 576 | mapping rather than copying for object mapping. | 576 | mapping rather than copying for object mapping. |
| 577 | 577 | ||
| 578 | You can check speed with zsmalloc benchmark[1]. | 578 | You can check speed with zsmalloc benchmark: |
| 579 | [1] https://github.com/spartacus06/zsmalloc | 579 | https://github.com/spartacus06/zsmapbench |
diff --git a/mm/compaction.c b/mm/compaction.c index b48c5259ea33..918577595ea8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
| 251 | { | 251 | { |
| 252 | int nr_scanned = 0, total_isolated = 0; | 252 | int nr_scanned = 0, total_isolated = 0; |
| 253 | struct page *cursor, *valid_page = NULL; | 253 | struct page *cursor, *valid_page = NULL; |
| 254 | unsigned long nr_strict_required = end_pfn - blockpfn; | ||
| 255 | unsigned long flags; | 254 | unsigned long flags; |
| 256 | bool locked = false; | 255 | bool locked = false; |
| 257 | 256 | ||
| @@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
| 264 | 263 | ||
| 265 | nr_scanned++; | 264 | nr_scanned++; |
| 266 | if (!pfn_valid_within(blockpfn)) | 265 | if (!pfn_valid_within(blockpfn)) |
| 267 | continue; | 266 | goto isolate_fail; |
| 267 | |||
| 268 | if (!valid_page) | 268 | if (!valid_page) |
| 269 | valid_page = page; | 269 | valid_page = page; |
| 270 | if (!PageBuddy(page)) | 270 | if (!PageBuddy(page)) |
| 271 | continue; | 271 | goto isolate_fail; |
| 272 | 272 | ||
| 273 | /* | 273 | /* |
| 274 | * The zone lock must be held to isolate freepages. | 274 | * The zone lock must be held to isolate freepages. |
| @@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
| 289 | 289 | ||
| 290 | /* Recheck this is a buddy page under lock */ | 290 | /* Recheck this is a buddy page under lock */ |
| 291 | if (!PageBuddy(page)) | 291 | if (!PageBuddy(page)) |
| 292 | continue; | 292 | goto isolate_fail; |
| 293 | 293 | ||
| 294 | /* Found a free page, break it into order-0 pages */ | 294 | /* Found a free page, break it into order-0 pages */ |
| 295 | isolated = split_free_page(page); | 295 | isolated = split_free_page(page); |
| 296 | if (!isolated && strict) | ||
| 297 | break; | ||
| 298 | total_isolated += isolated; | 296 | total_isolated += isolated; |
| 299 | for (i = 0; i < isolated; i++) { | 297 | for (i = 0; i < isolated; i++) { |
| 300 | list_add(&page->lru, freelist); | 298 | list_add(&page->lru, freelist); |
| @@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
| 305 | if (isolated) { | 303 | if (isolated) { |
| 306 | blockpfn += isolated - 1; | 304 | blockpfn += isolated - 1; |
| 307 | cursor += isolated - 1; | 305 | cursor += isolated - 1; |
| 306 | continue; | ||
| 308 | } | 307 | } |
| 308 | |||
| 309 | isolate_fail: | ||
| 310 | if (strict) | ||
| 311 | break; | ||
| 312 | else | ||
| 313 | continue; | ||
| 314 | |||
| 309 | } | 315 | } |
| 310 | 316 | ||
| 311 | trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); | 317 | trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); |
| @@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
| 315 | * pages requested were isolated. If there were any failures, 0 is | 321 | * pages requested were isolated. If there were any failures, 0 is |
| 316 | * returned and CMA will fail. | 322 | * returned and CMA will fail. |
| 317 | */ | 323 | */ |
| 318 | if (strict && nr_strict_required > total_isolated) | 324 | if (strict && blockpfn < end_pfn) |
| 319 | total_isolated = 0; | 325 | total_isolated = 0; |
| 320 | 326 | ||
| 321 | if (locked) | 327 | if (locked) |
diff --git a/mm/migrate.c b/mm/migrate.c index 482a33d89134..b494fdb9a636 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -1158,7 +1158,7 @@ static struct page *new_page_node(struct page *p, unsigned long private, | |||
| 1158 | pm->node); | 1158 | pm->node); |
| 1159 | else | 1159 | else |
| 1160 | return alloc_pages_exact_node(pm->node, | 1160 | return alloc_pages_exact_node(pm->node, |
| 1161 | GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); | 1161 | GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); |
| 1162 | } | 1162 | } |
| 1163 | 1163 | ||
| 1164 | /* | 1164 | /* |
| @@ -1544,9 +1544,9 @@ static struct page *alloc_misplaced_dst_page(struct page *page, | |||
| 1544 | struct page *newpage; | 1544 | struct page *newpage; |
| 1545 | 1545 | ||
| 1546 | newpage = alloc_pages_exact_node(nid, | 1546 | newpage = alloc_pages_exact_node(nid, |
| 1547 | (GFP_HIGHUSER_MOVABLE | GFP_THISNODE | | 1547 | (GFP_HIGHUSER_MOVABLE | |
| 1548 | __GFP_NOMEMALLOC | __GFP_NORETRY | | 1548 | __GFP_THISNODE | __GFP_NOMEMALLOC | |
| 1549 | __GFP_NOWARN) & | 1549 | __GFP_NORETRY | __GFP_NOWARN) & |
| 1550 | ~GFP_IOFS, 0); | 1550 | ~GFP_IOFS, 0); |
| 1551 | 1551 | ||
| 1552 | return newpage; | 1552 | return newpage; |
| @@ -1747,7 +1747,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
| 1747 | goto out_dropref; | 1747 | goto out_dropref; |
| 1748 | 1748 | ||
| 1749 | new_page = alloc_pages_node(node, | 1749 | new_page = alloc_pages_node(node, |
| 1750 | (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); | 1750 | (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT, |
| 1751 | HPAGE_PMD_ORDER); | ||
| 1751 | if (!new_page) | 1752 | if (!new_page) |
| 1752 | goto out_fail; | 1753 | goto out_fail; |
| 1753 | 1754 | ||
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index de51c48c4393..4b65aa492fb6 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
| @@ -538,6 +538,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev | |||
| 538 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 538 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
| 539 | struct net_device *real_dev = vlan->real_dev; | 539 | struct net_device *real_dev = vlan->real_dev; |
| 540 | 540 | ||
| 541 | if (saddr == NULL) | ||
| 542 | saddr = dev->dev_addr; | ||
| 543 | |||
| 541 | return dev_hard_header(skb, real_dev, type, daddr, saddr, len); | 544 | return dev_hard_header(skb, real_dev, type, daddr, saddr, len); |
| 542 | } | 545 | } |
| 543 | 546 | ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index ef66365b7354..93067ecdb9a2 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
| @@ -1127,9 +1127,10 @@ static void br_multicast_query_received(struct net_bridge *br, | |||
| 1127 | struct net_bridge_port *port, | 1127 | struct net_bridge_port *port, |
| 1128 | struct bridge_mcast_querier *querier, | 1128 | struct bridge_mcast_querier *querier, |
| 1129 | int saddr, | 1129 | int saddr, |
| 1130 | bool is_general_query, | ||
| 1130 | unsigned long max_delay) | 1131 | unsigned long max_delay) |
| 1131 | { | 1132 | { |
| 1132 | if (saddr) | 1133 | if (saddr && is_general_query) |
| 1133 | br_multicast_update_querier_timer(br, querier, max_delay); | 1134 | br_multicast_update_querier_timer(br, querier, max_delay); |
| 1134 | else if (timer_pending(&querier->timer)) | 1135 | else if (timer_pending(&querier->timer)) |
| 1135 | return; | 1136 | return; |
| @@ -1181,8 +1182,16 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
| 1181 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; | 1182 | IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; |
| 1182 | } | 1183 | } |
| 1183 | 1184 | ||
| 1185 | /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer | ||
| 1186 | * all-systems destination addresses (224.0.0.1) for general queries | ||
| 1187 | */ | ||
| 1188 | if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) { | ||
| 1189 | err = -EINVAL; | ||
| 1190 | goto out; | ||
| 1191 | } | ||
| 1192 | |||
| 1184 | br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, | 1193 | br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, |
| 1185 | max_delay); | 1194 | !group, max_delay); |
| 1186 | 1195 | ||
| 1187 | if (!group) | 1196 | if (!group) |
| 1188 | goto out; | 1197 | goto out; |
| @@ -1228,6 +1237,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
| 1228 | unsigned long max_delay; | 1237 | unsigned long max_delay; |
| 1229 | unsigned long now = jiffies; | 1238 | unsigned long now = jiffies; |
| 1230 | const struct in6_addr *group = NULL; | 1239 | const struct in6_addr *group = NULL; |
| 1240 | bool is_general_query; | ||
| 1231 | int err = 0; | 1241 | int err = 0; |
| 1232 | 1242 | ||
| 1233 | spin_lock(&br->multicast_lock); | 1243 | spin_lock(&br->multicast_lock); |
| @@ -1235,6 +1245,12 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
| 1235 | (port && port->state == BR_STATE_DISABLED)) | 1245 | (port && port->state == BR_STATE_DISABLED)) |
| 1236 | goto out; | 1246 | goto out; |
| 1237 | 1247 | ||
| 1248 | /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */ | ||
| 1249 | if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) { | ||
| 1250 | err = -EINVAL; | ||
| 1251 | goto out; | ||
| 1252 | } | ||
| 1253 | |||
| 1238 | if (skb->len == sizeof(*mld)) { | 1254 | if (skb->len == sizeof(*mld)) { |
| 1239 | if (!pskb_may_pull(skb, sizeof(*mld))) { | 1255 | if (!pskb_may_pull(skb, sizeof(*mld))) { |
| 1240 | err = -EINVAL; | 1256 | err = -EINVAL; |
| @@ -1256,8 +1272,19 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
| 1256 | max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); | 1272 | max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); |
| 1257 | } | 1273 | } |
| 1258 | 1274 | ||
| 1275 | is_general_query = group && ipv6_addr_any(group); | ||
| 1276 | |||
| 1277 | /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer | ||
| 1278 | * all-nodes destination address (ff02::1) for general queries | ||
| 1279 | */ | ||
| 1280 | if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) { | ||
| 1281 | err = -EINVAL; | ||
| 1282 | goto out; | ||
| 1283 | } | ||
| 1284 | |||
| 1259 | br_multicast_query_received(br, port, &br->ip6_querier, | 1285 | br_multicast_query_received(br, port, &br->ip6_querier, |
| 1260 | !ipv6_addr_any(&ip6h->saddr), max_delay); | 1286 | !ipv6_addr_any(&ip6h->saddr), |
| 1287 | is_general_query, max_delay); | ||
| 1261 | 1288 | ||
| 1262 | if (!group) | 1289 | if (!group) |
| 1263 | goto out; | 1290 | goto out; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5d6236d9fdce..869c7afe3b07 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -2838,81 +2838,84 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum); | |||
| 2838 | 2838 | ||
| 2839 | /** | 2839 | /** |
| 2840 | * skb_segment - Perform protocol segmentation on skb. | 2840 | * skb_segment - Perform protocol segmentation on skb. |
| 2841 | * @skb: buffer to segment | 2841 | * @head_skb: buffer to segment |
| 2842 | * @features: features for the output path (see dev->features) | 2842 | * @features: features for the output path (see dev->features) |
| 2843 | * | 2843 | * |
| 2844 | * This function performs segmentation on the given skb. It returns | 2844 | * This function performs segmentation on the given skb. It returns |
| 2845 | * a pointer to the first in a list of new skbs for the segments. | 2845 | * a pointer to the first in a list of new skbs for the segments. |
| 2846 | * In case of error it returns ERR_PTR(err). | 2846 | * In case of error it returns ERR_PTR(err). |
| 2847 | */ | 2847 | */ |
| 2848 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) | 2848 | struct sk_buff *skb_segment(struct sk_buff *head_skb, |
| 2849 | netdev_features_t features) | ||
| 2849 | { | 2850 | { |
| 2850 | struct sk_buff *segs = NULL; | 2851 | struct sk_buff *segs = NULL; |
| 2851 | struct sk_buff *tail = NULL; | 2852 | struct sk_buff *tail = NULL; |
| 2852 | struct sk_buff *fskb = skb_shinfo(skb)->frag_list; | 2853 | struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; |
| 2853 | skb_frag_t *skb_frag = skb_shinfo(skb)->frags; | 2854 | skb_frag_t *frag = skb_shinfo(head_skb)->frags; |
| 2854 | unsigned int mss = skb_shinfo(skb)->gso_size; | 2855 | unsigned int mss = skb_shinfo(head_skb)->gso_size; |
| 2855 | unsigned int doffset = skb->data - skb_mac_header(skb); | 2856 | unsigned int doffset = head_skb->data - skb_mac_header(head_skb); |
| 2857 | struct sk_buff *frag_skb = head_skb; | ||
| 2856 | unsigned int offset = doffset; | 2858 | unsigned int offset = doffset; |
| 2857 | unsigned int tnl_hlen = skb_tnl_header_len(skb); | 2859 | unsigned int tnl_hlen = skb_tnl_header_len(head_skb); |
| 2858 | unsigned int headroom; | 2860 | unsigned int headroom; |
| 2859 | unsigned int len; | 2861 | unsigned int len; |
| 2860 | __be16 proto; | 2862 | __be16 proto; |
| 2861 | bool csum; | 2863 | bool csum; |
| 2862 | int sg = !!(features & NETIF_F_SG); | 2864 | int sg = !!(features & NETIF_F_SG); |
| 2863 | int nfrags = skb_shinfo(skb)->nr_frags; | 2865 | int nfrags = skb_shinfo(head_skb)->nr_frags; |
| 2864 | int err = -ENOMEM; | 2866 | int err = -ENOMEM; |
| 2865 | int i = 0; | 2867 | int i = 0; |
| 2866 | int pos; | 2868 | int pos; |
| 2867 | 2869 | ||
| 2868 | proto = skb_network_protocol(skb); | 2870 | proto = skb_network_protocol(head_skb); |
| 2869 | if (unlikely(!proto)) | 2871 | if (unlikely(!proto)) |
| 2870 | return ERR_PTR(-EINVAL); | 2872 | return ERR_PTR(-EINVAL); |
| 2871 | 2873 | ||
| 2872 | csum = !!can_checksum_protocol(features, proto); | 2874 | csum = !!can_checksum_protocol(features, proto); |
| 2873 | __skb_push(skb, doffset); | 2875 | __skb_push(head_skb, doffset); |
| 2874 | headroom = skb_headroom(skb); | 2876 | headroom = skb_headroom(head_skb); |
| 2875 | pos = skb_headlen(skb); | 2877 | pos = skb_headlen(head_skb); |
| 2876 | 2878 | ||
| 2877 | do { | 2879 | do { |
| 2878 | struct sk_buff *nskb; | 2880 | struct sk_buff *nskb; |
| 2879 | skb_frag_t *frag; | 2881 | skb_frag_t *nskb_frag; |
| 2880 | int hsize; | 2882 | int hsize; |
| 2881 | int size; | 2883 | int size; |
| 2882 | 2884 | ||
| 2883 | len = skb->len - offset; | 2885 | len = head_skb->len - offset; |
| 2884 | if (len > mss) | 2886 | if (len > mss) |
| 2885 | len = mss; | 2887 | len = mss; |
| 2886 | 2888 | ||
| 2887 | hsize = skb_headlen(skb) - offset; | 2889 | hsize = skb_headlen(head_skb) - offset; |
| 2888 | if (hsize < 0) | 2890 | if (hsize < 0) |
| 2889 | hsize = 0; | 2891 | hsize = 0; |
| 2890 | if (hsize > len || !sg) | 2892 | if (hsize > len || !sg) |
| 2891 | hsize = len; | 2893 | hsize = len; |
| 2892 | 2894 | ||
| 2893 | if (!hsize && i >= nfrags && skb_headlen(fskb) && | 2895 | if (!hsize && i >= nfrags && skb_headlen(list_skb) && |
| 2894 | (skb_headlen(fskb) == len || sg)) { | 2896 | (skb_headlen(list_skb) == len || sg)) { |
| 2895 | BUG_ON(skb_headlen(fskb) > len); | 2897 | BUG_ON(skb_headlen(list_skb) > len); |
| 2896 | 2898 | ||
| 2897 | i = 0; | 2899 | i = 0; |
| 2898 | nfrags = skb_shinfo(fskb)->nr_frags; | 2900 | nfrags = skb_shinfo(list_skb)->nr_frags; |
| 2899 | skb_frag = skb_shinfo(fskb)->frags; | 2901 | frag = skb_shinfo(list_skb)->frags; |
| 2900 | pos += skb_headlen(fskb); | 2902 | frag_skb = list_skb; |
| 2903 | pos += skb_headlen(list_skb); | ||
| 2901 | 2904 | ||
| 2902 | while (pos < offset + len) { | 2905 | while (pos < offset + len) { |
| 2903 | BUG_ON(i >= nfrags); | 2906 | BUG_ON(i >= nfrags); |
| 2904 | 2907 | ||
| 2905 | size = skb_frag_size(skb_frag); | 2908 | size = skb_frag_size(frag); |
| 2906 | if (pos + size > offset + len) | 2909 | if (pos + size > offset + len) |
| 2907 | break; | 2910 | break; |
| 2908 | 2911 | ||
| 2909 | i++; | 2912 | i++; |
| 2910 | pos += size; | 2913 | pos += size; |
| 2911 | skb_frag++; | 2914 | frag++; |
| 2912 | } | 2915 | } |
| 2913 | 2916 | ||
| 2914 | nskb = skb_clone(fskb, GFP_ATOMIC); | 2917 | nskb = skb_clone(list_skb, GFP_ATOMIC); |
| 2915 | fskb = fskb->next; | 2918 | list_skb = list_skb->next; |
| 2916 | 2919 | ||
| 2917 | if (unlikely(!nskb)) | 2920 | if (unlikely(!nskb)) |
| 2918 | goto err; | 2921 | goto err; |
| @@ -2933,7 +2936,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) | |||
| 2933 | __skb_push(nskb, doffset); | 2936 | __skb_push(nskb, doffset); |
| 2934 | } else { | 2937 | } else { |
| 2935 | nskb = __alloc_skb(hsize + doffset + headroom, | 2938 | nskb = __alloc_skb(hsize + doffset + headroom, |
| 2936 | GFP_ATOMIC, skb_alloc_rx_flag(skb), | 2939 | GFP_ATOMIC, skb_alloc_rx_flag(head_skb), |
| 2937 | NUMA_NO_NODE); | 2940 | NUMA_NO_NODE); |
| 2938 | 2941 | ||
| 2939 | if (unlikely(!nskb)) | 2942 | if (unlikely(!nskb)) |
| @@ -2949,12 +2952,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) | |||
| 2949 | segs = nskb; | 2952 | segs = nskb; |
| 2950 | tail = nskb; | 2953 | tail = nskb; |
| 2951 | 2954 | ||
| 2952 | __copy_skb_header(nskb, skb); | 2955 | __copy_skb_header(nskb, head_skb); |
| 2953 | nskb->mac_len = skb->mac_len; | 2956 | nskb->mac_len = head_skb->mac_len; |
| 2954 | 2957 | ||
| 2955 | skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); | 2958 | skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); |
| 2956 | 2959 | ||
| 2957 | skb_copy_from_linear_data_offset(skb, -tnl_hlen, | 2960 | skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, |
| 2958 | nskb->data - tnl_hlen, | 2961 | nskb->data - tnl_hlen, |
| 2959 | doffset + tnl_hlen); | 2962 | doffset + tnl_hlen); |
| 2960 | 2963 | ||
| @@ -2963,30 +2966,32 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) | |||
| 2963 | 2966 | ||
| 2964 | if (!sg) { | 2967 | if (!sg) { |
| 2965 | nskb->ip_summed = CHECKSUM_NONE; | 2968 | nskb->ip_summed = CHECKSUM_NONE; |
| 2966 | nskb->csum = skb_copy_and_csum_bits(skb, offset, | 2969 | nskb->csum = skb_copy_and_csum_bits(head_skb, offset, |
| 2967 | skb_put(nskb, len), | 2970 | skb_put(nskb, len), |
| 2968 | len, 0); | 2971 | len, 0); |
| 2969 | continue; | 2972 | continue; |
| 2970 | } | 2973 | } |
| 2971 | 2974 | ||
| 2972 | frag = skb_shinfo(nskb)->frags; | 2975 | nskb_frag = skb_shinfo(nskb)->frags; |
| 2973 | 2976 | ||
| 2974 | skb_copy_from_linear_data_offset(skb, offset, | 2977 | skb_copy_from_linear_data_offset(head_skb, offset, |
| 2975 | skb_put(nskb, hsize), hsize); | 2978 | skb_put(nskb, hsize), hsize); |
| 2976 | 2979 | ||
| 2977 | skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; | 2980 | skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & |
| 2981 | SKBTX_SHARED_FRAG; | ||
| 2978 | 2982 | ||
| 2979 | while (pos < offset + len) { | 2983 | while (pos < offset + len) { |
| 2980 | if (i >= nfrags) { | 2984 | if (i >= nfrags) { |
| 2981 | BUG_ON(skb_headlen(fskb)); | 2985 | BUG_ON(skb_headlen(list_skb)); |
| 2982 | 2986 | ||
| 2983 | i = 0; | 2987 | i = 0; |
| 2984 | nfrags = skb_shinfo(fskb)->nr_frags; | 2988 | nfrags = skb_shinfo(list_skb)->nr_frags; |
| 2985 | skb_frag = skb_shinfo(fskb)->frags; | 2989 | frag = skb_shinfo(list_skb)->frags; |
| 2990 | frag_skb = list_skb; | ||
| 2986 | 2991 | ||
| 2987 | BUG_ON(!nfrags); | 2992 | BUG_ON(!nfrags); |
| 2988 | 2993 | ||
| 2989 | fskb = fskb->next; | 2994 | list_skb = list_skb->next; |
| 2990 | } | 2995 | } |
| 2991 | 2996 | ||
| 2992 | if (unlikely(skb_shinfo(nskb)->nr_frags >= | 2997 | if (unlikely(skb_shinfo(nskb)->nr_frags >= |
| @@ -2997,27 +3002,30 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) | |||
| 2997 | goto err; | 3002 | goto err; |
| 2998 | } | 3003 | } |
| 2999 | 3004 | ||
| 3000 | *frag = *skb_frag; | 3005 | if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) |
| 3001 | __skb_frag_ref(frag); | 3006 | goto err; |
| 3002 | size = skb_frag_size(frag); | 3007 | |
| 3008 | *nskb_frag = *frag; | ||
| 3009 | __skb_frag_ref(nskb_frag); | ||
| 3010 | size = skb_frag_size(nskb_frag); | ||
| 3003 | 3011 | ||
| 3004 | if (pos < offset) { | 3012 | if (pos < offset) { |
| 3005 | frag->page_offset += offset - pos; | 3013 | nskb_frag->page_offset += offset - pos; |
| 3006 | skb_frag_size_sub(frag, offset - pos); | 3014 | skb_frag_size_sub(nskb_frag, offset - pos); |
| 3007 | } | 3015 | } |
| 3008 | 3016 | ||
| 3009 | skb_shinfo(nskb)->nr_frags++; | 3017 | skb_shinfo(nskb)->nr_frags++; |
| 3010 | 3018 | ||
| 3011 | if (pos + size <= offset + len) { | 3019 | if (pos + size <= offset + len) { |
| 3012 | i++; | 3020 | i++; |
| 3013 | skb_frag++; | 3021 | frag++; |
| 3014 | pos += size; | 3022 | pos += size; |
| 3015 | } else { | 3023 | } else { |
| 3016 | skb_frag_size_sub(frag, pos + size - (offset + len)); | 3024 | skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); |
| 3017 | goto skip_fraglist; | 3025 | goto skip_fraglist; |
| 3018 | } | 3026 | } |
| 3019 | 3027 | ||
| 3020 | frag++; | 3028 | nskb_frag++; |
| 3021 | } | 3029 | } |
| 3022 | 3030 | ||
| 3023 | skip_fraglist: | 3031 | skip_fraglist: |
| @@ -3031,7 +3039,7 @@ perform_csum_check: | |||
| 3031 | nskb->len - doffset, 0); | 3039 | nskb->len - doffset, 0); |
| 3032 | nskb->ip_summed = CHECKSUM_NONE; | 3040 | nskb->ip_summed = CHECKSUM_NONE; |
| 3033 | } | 3041 | } |
| 3034 | } while ((offset += len) < skb->len); | 3042 | } while ((offset += len) < head_skb->len); |
| 3035 | 3043 | ||
| 3036 | return segs; | 3044 | return segs; |
| 3037 | 3045 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 5b6a9431b017..c0fc6bdad1e3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -2357,10 +2357,13 @@ void release_sock(struct sock *sk) | |||
| 2357 | if (sk->sk_backlog.tail) | 2357 | if (sk->sk_backlog.tail) |
| 2358 | __release_sock(sk); | 2358 | __release_sock(sk); |
| 2359 | 2359 | ||
| 2360 | /* Warning : release_cb() might need to release sk ownership, | ||
| 2361 | * ie call sock_release_ownership(sk) before us. | ||
| 2362 | */ | ||
| 2360 | if (sk->sk_prot->release_cb) | 2363 | if (sk->sk_prot->release_cb) |
| 2361 | sk->sk_prot->release_cb(sk); | 2364 | sk->sk_prot->release_cb(sk); |
| 2362 | 2365 | ||
| 2363 | sk->sk_lock.owned = 0; | 2366 | sock_release_ownership(sk); |
| 2364 | if (waitqueue_active(&sk->sk_lock.wq)) | 2367 | if (waitqueue_active(&sk->sk_lock.wq)) |
| 2365 | wake_up(&sk->sk_lock.wq); | 2368 | wake_up(&sk->sk_lock.wq); |
| 2366 | spin_unlock_bh(&sk->sk_lock.slock); | 2369 | spin_unlock_bh(&sk->sk_lock.slock); |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index bb075fc9a14f..3b01959bf4bb 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
| @@ -208,7 +208,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force) | |||
| 208 | } | 208 | } |
| 209 | 209 | ||
| 210 | work = frag_mem_limit(nf) - nf->low_thresh; | 210 | work = frag_mem_limit(nf) - nf->low_thresh; |
| 211 | while (work > 0) { | 211 | while (work > 0 || force) { |
| 212 | spin_lock(&nf->lru_lock); | 212 | spin_lock(&nf->lru_lock); |
| 213 | 213 | ||
| 214 | if (list_empty(&nf->lru_list)) { | 214 | if (list_empty(&nf->lru_list)) { |
| @@ -278,9 +278,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, | |||
| 278 | 278 | ||
| 279 | atomic_inc(&qp->refcnt); | 279 | atomic_inc(&qp->refcnt); |
| 280 | hlist_add_head(&qp->list, &hb->chain); | 280 | hlist_add_head(&qp->list, &hb->chain); |
| 281 | inet_frag_lru_add(nf, qp); | ||
| 281 | spin_unlock(&hb->chain_lock); | 282 | spin_unlock(&hb->chain_lock); |
| 282 | read_unlock(&f->lock); | 283 | read_unlock(&f->lock); |
| 283 | inet_frag_lru_add(nf, qp); | 284 | |
| 284 | return qp; | 285 | return qp; |
| 285 | } | 286 | } |
| 286 | 287 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f0eb4e337ec8..17a11e65e57f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -767,6 +767,17 @@ void tcp_release_cb(struct sock *sk) | |||
| 767 | if (flags & (1UL << TCP_TSQ_DEFERRED)) | 767 | if (flags & (1UL << TCP_TSQ_DEFERRED)) |
| 768 | tcp_tsq_handler(sk); | 768 | tcp_tsq_handler(sk); |
| 769 | 769 | ||
| 770 | /* Here begins the tricky part : | ||
| 771 | * We are called from release_sock() with : | ||
| 772 | * 1) BH disabled | ||
| 773 | * 2) sk_lock.slock spinlock held | ||
| 774 | * 3) socket owned by us (sk->sk_lock.owned == 1) | ||
| 775 | * | ||
| 776 | * But following code is meant to be called from BH handlers, | ||
| 777 | * so we should keep BH disabled, but early release socket ownership | ||
| 778 | */ | ||
| 779 | sock_release_ownership(sk); | ||
| 780 | |||
| 770 | if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { | 781 | if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { |
| 771 | tcp_write_timer_handler(sk); | 782 | tcp_write_timer_handler(sk); |
| 772 | __sock_put(sk); | 783 | __sock_put(sk); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index fdbfeca36d63..344e972426df 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -1103,8 +1103,11 @@ retry: | |||
| 1103 | * Lifetime is greater than REGEN_ADVANCE time units. In particular, | 1103 | * Lifetime is greater than REGEN_ADVANCE time units. In particular, |
| 1104 | * an implementation must not create a temporary address with a zero | 1104 | * an implementation must not create a temporary address with a zero |
| 1105 | * Preferred Lifetime. | 1105 | * Preferred Lifetime. |
| 1106 | * Use age calculation as in addrconf_verify to avoid unnecessary | ||
| 1107 | * temporary addresses being generated. | ||
| 1106 | */ | 1108 | */ |
| 1107 | if (tmp_prefered_lft <= regen_advance) { | 1109 | age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; |
| 1110 | if (tmp_prefered_lft <= regen_advance + age) { | ||
| 1108 | in6_ifa_put(ifp); | 1111 | in6_ifa_put(ifp); |
| 1109 | in6_dev_put(idev); | 1112 | in6_dev_put(idev); |
| 1110 | ret = -1; | 1113 | ret = -1; |
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c index cf77f3abfd06..447a7fbd1bb6 100644 --- a/net/ipv6/exthdrs_offload.c +++ b/net/ipv6/exthdrs_offload.c | |||
| @@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(void) | |||
| 25 | int ret; | 25 | int ret; |
| 26 | 26 | ||
| 27 | ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING); | 27 | ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING); |
| 28 | if (!ret) | 28 | if (ret) |
| 29 | goto out; | 29 | goto out; |
| 30 | 30 | ||
| 31 | ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS); | 31 | ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS); |
| 32 | if (!ret) | 32 | if (ret) |
| 33 | goto out_rt; | 33 | goto out_rt; |
| 34 | 34 | ||
| 35 | out: | 35 | out: |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 11dac21e6586..fba54a407bb2 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -1513,7 +1513,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
| 1513 | if (!table) | 1513 | if (!table) |
| 1514 | goto out; | 1514 | goto out; |
| 1515 | 1515 | ||
| 1516 | rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table); | 1516 | rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); |
| 1517 | 1517 | ||
| 1518 | if (!rt) { | 1518 | if (!rt) { |
| 1519 | err = -ENOMEM; | 1519 | err = -ENOMEM; |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 735d0f60c83a..85d9d94c0a3c 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -112,7 +112,6 @@ struct l2tp_net { | |||
| 112 | spinlock_t l2tp_session_hlist_lock; | 112 | spinlock_t l2tp_session_hlist_lock; |
| 113 | }; | 113 | }; |
| 114 | 114 | ||
| 115 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); | ||
| 116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | 115 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); |
| 117 | 116 | ||
| 118 | static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) | 117 | static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) |
| @@ -1863,7 +1862,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete); | |||
| 1863 | /* We come here whenever a session's send_seq, cookie_len or | 1862 | /* We come here whenever a session's send_seq, cookie_len or |
| 1864 | * l2specific_len parameters are set. | 1863 | * l2specific_len parameters are set. |
| 1865 | */ | 1864 | */ |
| 1866 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version) | 1865 | void l2tp_session_set_header_len(struct l2tp_session *session, int version) |
| 1867 | { | 1866 | { |
| 1868 | if (version == L2TP_HDR_VER_2) { | 1867 | if (version == L2TP_HDR_VER_2) { |
| 1869 | session->hdr_len = 6; | 1868 | session->hdr_len = 6; |
| @@ -1876,6 +1875,7 @@ static void l2tp_session_set_header_len(struct l2tp_session *session, int versio | |||
| 1876 | } | 1875 | } |
| 1877 | 1876 | ||
| 1878 | } | 1877 | } |
| 1878 | EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); | ||
| 1879 | 1879 | ||
| 1880 | struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) | 1880 | struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) |
| 1881 | { | 1881 | { |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 1f01ba3435bc..3f93ccd6ba97 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
| @@ -263,6 +263,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
| 263 | int length, int (*payload_hook)(struct sk_buff *skb)); | 263 | int length, int (*payload_hook)(struct sk_buff *skb)); |
| 264 | int l2tp_session_queue_purge(struct l2tp_session *session); | 264 | int l2tp_session_queue_purge(struct l2tp_session *session); |
| 265 | int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); | 265 | int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); |
| 266 | void l2tp_session_set_header_len(struct l2tp_session *session, int version); | ||
| 266 | 267 | ||
| 267 | int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, | 268 | int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, |
| 268 | int hdr_len); | 269 | int hdr_len); |
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 4cfd722e9153..bd7387adea9e 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
| @@ -578,8 +578,10 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf | |||
| 578 | if (info->attrs[L2TP_ATTR_RECV_SEQ]) | 578 | if (info->attrs[L2TP_ATTR_RECV_SEQ]) |
| 579 | session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); | 579 | session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); |
| 580 | 580 | ||
| 581 | if (info->attrs[L2TP_ATTR_SEND_SEQ]) | 581 | if (info->attrs[L2TP_ATTR_SEND_SEQ]) { |
| 582 | session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); | 582 | session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); |
| 583 | l2tp_session_set_header_len(session, session->tunnel->version); | ||
| 584 | } | ||
| 583 | 585 | ||
| 584 | if (info->attrs[L2TP_ATTR_LNS_MODE]) | 586 | if (info->attrs[L2TP_ATTR_LNS_MODE]) |
| 585 | session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); | 587 | session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index be5fadf34739..5990919356a5 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
| @@ -254,12 +254,14 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int | |||
| 254 | po = pppox_sk(sk); | 254 | po = pppox_sk(sk); |
| 255 | ppp_input(&po->chan, skb); | 255 | ppp_input(&po->chan, skb); |
| 256 | } else { | 256 | } else { |
| 257 | l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n", | 257 | l2tp_dbg(session, PPPOL2TP_MSG_DATA, |
| 258 | session->name); | 258 | "%s: recv %d byte data frame, passing to L2TP socket\n", |
| 259 | session->name, data_len); | ||
| 259 | 260 | ||
| 260 | /* Not bound. Nothing we can do, so discard. */ | 261 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
| 261 | atomic_long_inc(&session->stats.rx_errors); | 262 | atomic_long_inc(&session->stats.rx_errors); |
| 262 | kfree_skb(skb); | 263 | kfree_skb(skb); |
| 264 | } | ||
| 263 | } | 265 | } |
| 264 | 266 | ||
| 265 | return; | 267 | return; |
| @@ -1312,6 +1314,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, | |||
| 1312 | po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : | 1314 | po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : |
| 1313 | PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; | 1315 | PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; |
| 1314 | } | 1316 | } |
| 1317 | l2tp_session_set_header_len(session, session->tunnel->version); | ||
| 1315 | l2tp_info(session, PPPOL2TP_MSG_CONTROL, | 1318 | l2tp_info(session, PPPOL2TP_MSG_CONTROL, |
| 1316 | "%s: set send_seq=%d\n", | 1319 | "%s: set send_seq=%d\n", |
| 1317 | session->name, session->send_seq); | 1320 | session->name, session->send_seq); |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index f43613a97dd6..0c1ecfdf9a12 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
| @@ -100,6 +100,12 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, | |||
| 100 | } | 100 | } |
| 101 | max_bw = max(max_bw, width); | 101 | max_bw = max(max_bw, width); |
| 102 | } | 102 | } |
| 103 | |||
| 104 | /* use the configured bandwidth in case of monitor interface */ | ||
| 105 | sdata = rcu_dereference(local->monitor_sdata); | ||
| 106 | if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf) | ||
| 107 | max_bw = max(max_bw, conf->def.width); | ||
| 108 | |||
| 103 | rcu_read_unlock(); | 109 | rcu_read_unlock(); |
| 104 | 110 | ||
| 105 | return max_bw; | 111 | return max_bw; |
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c index 2802f9d9279d..ad8b377b4b9f 100644 --- a/net/mac80211/mesh_ps.c +++ b/net/mac80211/mesh_ps.c | |||
| @@ -36,6 +36,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta) | |||
| 36 | sdata->vif.addr); | 36 | sdata->vif.addr); |
| 37 | nullfunc->frame_control = fc; | 37 | nullfunc->frame_control = fc; |
| 38 | nullfunc->duration_id = 0; | 38 | nullfunc->duration_id = 0; |
| 39 | nullfunc->seq_ctrl = 0; | ||
| 39 | /* no address resolution for this frame -> set addr 1 immediately */ | 40 | /* no address resolution for this frame -> set addr 1 immediately */ |
| 40 | memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); | 41 | memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); |
| 41 | memset(skb_put(skb, 2), 0, 2); /* append QoS control field */ | 42 | memset(skb_put(skb, 2), 0, 2); /* append QoS control field */ |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a023b432143b..137a192e64bc 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
| @@ -1206,6 +1206,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, | |||
| 1206 | memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); | 1206 | memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); |
| 1207 | memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); | 1207 | memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); |
| 1208 | memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); | 1208 | memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); |
| 1209 | nullfunc->seq_ctrl = 0; | ||
| 1209 | 1210 | ||
| 1210 | skb->priority = tid; | 1211 | skb->priority = tid; |
| 1211 | skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); | 1212 | skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 1313145e3b86..a07d55e75698 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
| @@ -273,11 +273,12 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) | |||
| 273 | 273 | ||
| 274 | void qdisc_list_add(struct Qdisc *q) | 274 | void qdisc_list_add(struct Qdisc *q) |
| 275 | { | 275 | { |
| 276 | struct Qdisc *root = qdisc_dev(q)->qdisc; | 276 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { |
| 277 | struct Qdisc *root = qdisc_dev(q)->qdisc; | ||
| 277 | 278 | ||
| 278 | WARN_ON_ONCE(root == &noop_qdisc); | 279 | WARN_ON_ONCE(root == &noop_qdisc); |
| 279 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) | ||
| 280 | list_add_tail(&q->list, &root->list); | 280 | list_add_tail(&q->list, &root->list); |
| 281 | } | ||
| 281 | } | 282 | } |
| 282 | EXPORT_SYMBOL(qdisc_list_add); | 283 | EXPORT_SYMBOL(qdisc_list_add); |
| 283 | 284 | ||
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 08ef7a42c0e4..21e251766eb1 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c | |||
| @@ -601,6 +601,7 @@ static int fq_resize(struct Qdisc *sch, u32 log) | |||
| 601 | { | 601 | { |
| 602 | struct fq_sched_data *q = qdisc_priv(sch); | 602 | struct fq_sched_data *q = qdisc_priv(sch); |
| 603 | struct rb_root *array; | 603 | struct rb_root *array; |
| 604 | void *old_fq_root; | ||
| 604 | u32 idx; | 605 | u32 idx; |
| 605 | 606 | ||
| 606 | if (q->fq_root && log == q->fq_trees_log) | 607 | if (q->fq_root && log == q->fq_trees_log) |
| @@ -615,13 +616,19 @@ static int fq_resize(struct Qdisc *sch, u32 log) | |||
| 615 | for (idx = 0; idx < (1U << log); idx++) | 616 | for (idx = 0; idx < (1U << log); idx++) |
| 616 | array[idx] = RB_ROOT; | 617 | array[idx] = RB_ROOT; |
| 617 | 618 | ||
| 618 | if (q->fq_root) { | 619 | sch_tree_lock(sch); |
| 619 | fq_rehash(q, q->fq_root, q->fq_trees_log, array, log); | 620 | |
| 620 | fq_free(q->fq_root); | 621 | old_fq_root = q->fq_root; |
| 621 | } | 622 | if (old_fq_root) |
| 623 | fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); | ||
| 624 | |||
| 622 | q->fq_root = array; | 625 | q->fq_root = array; |
| 623 | q->fq_trees_log = log; | 626 | q->fq_trees_log = log; |
| 624 | 627 | ||
| 628 | sch_tree_unlock(sch); | ||
| 629 | |||
| 630 | fq_free(old_fq_root); | ||
| 631 | |||
| 625 | return 0; | 632 | return 0; |
| 626 | } | 633 | } |
| 627 | 634 | ||
| @@ -697,9 +704,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) | |||
| 697 | q->flow_refill_delay = usecs_to_jiffies(usecs_delay); | 704 | q->flow_refill_delay = usecs_to_jiffies(usecs_delay); |
| 698 | } | 705 | } |
| 699 | 706 | ||
| 700 | if (!err) | 707 | if (!err) { |
| 708 | sch_tree_unlock(sch); | ||
| 701 | err = fq_resize(sch, fq_log); | 709 | err = fq_resize(sch, fq_log); |
| 702 | 710 | sch_tree_lock(sch); | |
| 711 | } | ||
| 703 | while (sch->q.qlen > sch->limit) { | 712 | while (sch->q.qlen > sch->limit) { |
| 704 | struct sk_buff *skb = fq_dequeue(sch); | 713 | struct sk_buff *skb = fq_dequeue(sch); |
| 705 | 714 | ||
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 632090b961c3..3a1767ef3201 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -1421,8 +1421,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk) | |||
| 1421 | BUG_ON(!list_empty(&chunk->list)); | 1421 | BUG_ON(!list_empty(&chunk->list)); |
| 1422 | list_del_init(&chunk->transmitted_list); | 1422 | list_del_init(&chunk->transmitted_list); |
| 1423 | 1423 | ||
| 1424 | /* Free the chunk skb data and the SCTP_chunk stub itself. */ | 1424 | consume_skb(chunk->skb); |
| 1425 | dev_kfree_skb(chunk->skb); | 1425 | consume_skb(chunk->auth_chunk); |
| 1426 | 1426 | ||
| 1427 | SCTP_DBG_OBJCNT_DEC(chunk); | 1427 | SCTP_DBG_OBJCNT_DEC(chunk); |
| 1428 | kmem_cache_free(sctp_chunk_cachep, chunk); | 1428 | kmem_cache_free(sctp_chunk_cachep, chunk); |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index ae65b6b5973a..01e002430c85 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -760,7 +760,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, | |||
| 760 | 760 | ||
| 761 | /* Make sure that we and the peer are AUTH capable */ | 761 | /* Make sure that we and the peer are AUTH capable */ |
| 762 | if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { | 762 | if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { |
| 763 | kfree_skb(chunk->auth_chunk); | ||
| 764 | sctp_association_free(new_asoc); | 763 | sctp_association_free(new_asoc); |
| 765 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); | 764 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); |
| 766 | } | 765 | } |
| @@ -775,10 +774,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, | |||
| 775 | auth.transport = chunk->transport; | 774 | auth.transport = chunk->transport; |
| 776 | 775 | ||
| 777 | ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); | 776 | ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); |
| 778 | |||
| 779 | /* We can now safely free the auth_chunk clone */ | ||
| 780 | kfree_skb(chunk->auth_chunk); | ||
| 781 | |||
| 782 | if (ret != SCTP_IERROR_NO_ERROR) { | 777 | if (ret != SCTP_IERROR_NO_ERROR) { |
| 783 | sctp_association_free(new_asoc); | 778 | sctp_association_free(new_asoc); |
| 784 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); | 779 | return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); |
diff --git a/net/socket.c b/net/socket.c index 879933aaed4c..a19ae1968d37 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -450,16 +450,17 @@ EXPORT_SYMBOL(sockfd_lookup); | |||
| 450 | 450 | ||
| 451 | static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) | 451 | static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) |
| 452 | { | 452 | { |
| 453 | struct file *file; | 453 | struct fd f = fdget(fd); |
| 454 | struct socket *sock; | 454 | struct socket *sock; |
| 455 | 455 | ||
| 456 | *err = -EBADF; | 456 | *err = -EBADF; |
| 457 | file = fget_light(fd, fput_needed); | 457 | if (f.file) { |
| 458 | if (file) { | 458 | sock = sock_from_file(f.file, err); |
| 459 | sock = sock_from_file(file, err); | 459 | if (likely(sock)) { |
| 460 | if (sock) | 460 | *fput_needed = f.flags; |
| 461 | return sock; | 461 | return sock; |
| 462 | fput_light(file, *fput_needed); | 462 | } |
| 463 | fdput(f); | ||
| 463 | } | 464 | } |
| 464 | return NULL; | 465 | return NULL; |
| 465 | } | 466 | } |
| @@ -1985,6 +1986,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, | |||
| 1985 | { | 1986 | { |
| 1986 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) | 1987 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) |
| 1987 | return -EFAULT; | 1988 | return -EFAULT; |
| 1989 | |||
| 1990 | if (kmsg->msg_namelen < 0) | ||
| 1991 | return -EINVAL; | ||
| 1992 | |||
| 1988 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | 1993 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
| 1989 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); | 1994 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); |
| 1990 | return 0; | 1995 | return 0; |
diff --git a/net/tipc/config.c b/net/tipc/config.c index e74eef2e7490..e6d721692ae0 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c | |||
| @@ -376,7 +376,6 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr, | |||
| 376 | struct tipc_cfg_msg_hdr *req_hdr; | 376 | struct tipc_cfg_msg_hdr *req_hdr; |
| 377 | struct tipc_cfg_msg_hdr *rep_hdr; | 377 | struct tipc_cfg_msg_hdr *rep_hdr; |
| 378 | struct sk_buff *rep_buf; | 378 | struct sk_buff *rep_buf; |
| 379 | int ret; | ||
| 380 | 379 | ||
| 381 | /* Validate configuration message header (ignore invalid message) */ | 380 | /* Validate configuration message header (ignore invalid message) */ |
| 382 | req_hdr = (struct tipc_cfg_msg_hdr *)buf; | 381 | req_hdr = (struct tipc_cfg_msg_hdr *)buf; |
| @@ -398,12 +397,8 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr, | |||
| 398 | memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr)); | 397 | memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr)); |
| 399 | rep_hdr->tcm_len = htonl(rep_buf->len); | 398 | rep_hdr->tcm_len = htonl(rep_buf->len); |
| 400 | rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST); | 399 | rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST); |
| 401 | 400 | tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data, | |
| 402 | ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data, | 401 | rep_buf->len); |
| 403 | rep_buf->len); | ||
| 404 | if (ret < 0) | ||
| 405 | pr_err("Sending cfg reply message failed, no memory\n"); | ||
| 406 | |||
| 407 | kfree_skb(rep_buf); | 402 | kfree_skb(rep_buf); |
| 408 | } | 403 | } |
| 409 | } | 404 | } |
diff --git a/net/tipc/handler.c b/net/tipc/handler.c index e4bc8a296744..1fabf160501f 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c | |||
| @@ -58,7 +58,6 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument) | |||
| 58 | 58 | ||
| 59 | spin_lock_bh(&qitem_lock); | 59 | spin_lock_bh(&qitem_lock); |
| 60 | if (!handler_enabled) { | 60 | if (!handler_enabled) { |
| 61 | pr_err("Signal request ignored by handler\n"); | ||
| 62 | spin_unlock_bh(&qitem_lock); | 61 | spin_unlock_bh(&qitem_lock); |
| 63 | return -ENOPROTOOPT; | 62 | return -ENOPROTOOPT; |
| 64 | } | 63 | } |
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 48302be175ce..042e8e3cabc0 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c | |||
| @@ -941,17 +941,48 @@ int tipc_nametbl_init(void) | |||
| 941 | return 0; | 941 | return 0; |
| 942 | } | 942 | } |
| 943 | 943 | ||
| 944 | /** | ||
| 945 | * tipc_purge_publications - remove all publications for a given type | ||
| 946 | * | ||
| 947 | * tipc_nametbl_lock must be held when calling this function | ||
| 948 | */ | ||
| 949 | static void tipc_purge_publications(struct name_seq *seq) | ||
| 950 | { | ||
| 951 | struct publication *publ, *safe; | ||
| 952 | struct sub_seq *sseq; | ||
| 953 | struct name_info *info; | ||
| 954 | |||
| 955 | if (!seq->sseqs) { | ||
| 956 | nameseq_delete_empty(seq); | ||
| 957 | return; | ||
| 958 | } | ||
| 959 | sseq = seq->sseqs; | ||
| 960 | info = sseq->info; | ||
| 961 | list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { | ||
| 962 | tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, | ||
| 963 | publ->ref, publ->key); | ||
| 964 | } | ||
| 965 | } | ||
| 966 | |||
| 944 | void tipc_nametbl_stop(void) | 967 | void tipc_nametbl_stop(void) |
| 945 | { | 968 | { |
| 946 | u32 i; | 969 | u32 i; |
| 970 | struct name_seq *seq; | ||
| 971 | struct hlist_head *seq_head; | ||
| 972 | struct hlist_node *safe; | ||
| 947 | 973 | ||
| 948 | /* Verify name table is empty, then release it */ | 974 | /* Verify name table is empty and purge any lingering |
| 975 | * publications, then release the name table | ||
| 976 | */ | ||
| 949 | write_lock_bh(&tipc_nametbl_lock); | 977 | write_lock_bh(&tipc_nametbl_lock); |
| 950 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { | 978 | for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { |
| 951 | if (hlist_empty(&table.types[i])) | 979 | if (hlist_empty(&table.types[i])) |
| 952 | continue; | 980 | continue; |
| 953 | pr_err("nametbl_stop(): orphaned hash chain detected\n"); | 981 | seq_head = &table.types[i]; |
| 954 | break; | 982 | hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) { |
| 983 | tipc_purge_publications(seq); | ||
| 984 | } | ||
| 985 | continue; | ||
| 955 | } | 986 | } |
| 956 | kfree(table.types); | 987 | kfree(table.types); |
| 957 | table.types = NULL; | 988 | table.types = NULL; |
diff --git a/net/tipc/server.c b/net/tipc/server.c index 373979789a73..646a930eefbf 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c | |||
| @@ -87,7 +87,6 @@ static void tipc_clean_outqueues(struct tipc_conn *con); | |||
| 87 | static void tipc_conn_kref_release(struct kref *kref) | 87 | static void tipc_conn_kref_release(struct kref *kref) |
| 88 | { | 88 | { |
| 89 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); | 89 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); |
| 90 | struct tipc_server *s = con->server; | ||
| 91 | 90 | ||
| 92 | if (con->sock) { | 91 | if (con->sock) { |
| 93 | tipc_sock_release_local(con->sock); | 92 | tipc_sock_release_local(con->sock); |
| @@ -95,10 +94,6 @@ static void tipc_conn_kref_release(struct kref *kref) | |||
| 95 | } | 94 | } |
| 96 | 95 | ||
| 97 | tipc_clean_outqueues(con); | 96 | tipc_clean_outqueues(con); |
| 98 | |||
| 99 | if (con->conid) | ||
| 100 | s->tipc_conn_shutdown(con->conid, con->usr_data); | ||
| 101 | |||
| 102 | kfree(con); | 97 | kfree(con); |
| 103 | } | 98 | } |
| 104 | 99 | ||
| @@ -181,6 +176,9 @@ static void tipc_close_conn(struct tipc_conn *con) | |||
| 181 | struct tipc_server *s = con->server; | 176 | struct tipc_server *s = con->server; |
| 182 | 177 | ||
| 183 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { | 178 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { |
| 179 | if (con->conid) | ||
| 180 | s->tipc_conn_shutdown(con->conid, con->usr_data); | ||
| 181 | |||
| 184 | spin_lock_bh(&s->idr_lock); | 182 | spin_lock_bh(&s->idr_lock); |
| 185 | idr_remove(&s->conn_idr, con->conid); | 183 | idr_remove(&s->conn_idr, con->conid); |
| 186 | s->idr_in_use--; | 184 | s->idr_in_use--; |
| @@ -429,10 +427,12 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, | |||
| 429 | list_add_tail(&e->list, &con->outqueue); | 427 | list_add_tail(&e->list, &con->outqueue); |
| 430 | spin_unlock_bh(&con->outqueue_lock); | 428 | spin_unlock_bh(&con->outqueue_lock); |
| 431 | 429 | ||
| 432 | if (test_bit(CF_CONNECTED, &con->flags)) | 430 | if (test_bit(CF_CONNECTED, &con->flags)) { |
| 433 | if (!queue_work(s->send_wq, &con->swork)) | 431 | if (!queue_work(s->send_wq, &con->swork)) |
| 434 | conn_put(con); | 432 | conn_put(con); |
| 435 | 433 | } else { | |
| 434 | conn_put(con); | ||
| 435 | } | ||
| 436 | return 0; | 436 | return 0; |
| 437 | } | 437 | } |
| 438 | 438 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a4cf274455aa..0ed0eaa62f29 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -997,7 +997,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo) | |||
| 997 | 997 | ||
| 998 | for (;;) { | 998 | for (;;) { |
| 999 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 999 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
| 1000 | if (skb_queue_empty(&sk->sk_receive_queue)) { | 1000 | if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { |
| 1001 | if (sock->state == SS_DISCONNECTING) { | 1001 | if (sock->state == SS_DISCONNECTING) { |
| 1002 | err = -ENOTCONN; | 1002 | err = -ENOTCONN; |
| 1003 | break; | 1003 | break; |
| @@ -1623,7 +1623,7 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) | |||
| 1623 | for (;;) { | 1623 | for (;;) { |
| 1624 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, | 1624 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
| 1625 | TASK_INTERRUPTIBLE); | 1625 | TASK_INTERRUPTIBLE); |
| 1626 | if (skb_queue_empty(&sk->sk_receive_queue)) { | 1626 | if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { |
| 1627 | release_sock(sk); | 1627 | release_sock(sk); |
| 1628 | timeo = schedule_timeout(timeo); | 1628 | timeo = schedule_timeout(timeo); |
| 1629 | lock_sock(sk); | 1629 | lock_sock(sk); |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 7cb0bd5b1176..11c9ae00837d 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
| @@ -96,20 +96,16 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower, | |||
| 96 | { | 96 | { |
| 97 | struct tipc_subscriber *subscriber = sub->subscriber; | 97 | struct tipc_subscriber *subscriber = sub->subscriber; |
| 98 | struct kvec msg_sect; | 98 | struct kvec msg_sect; |
| 99 | int ret; | ||
| 100 | 99 | ||
| 101 | msg_sect.iov_base = (void *)&sub->evt; | 100 | msg_sect.iov_base = (void *)&sub->evt; |
| 102 | msg_sect.iov_len = sizeof(struct tipc_event); | 101 | msg_sect.iov_len = sizeof(struct tipc_event); |
| 103 | |||
| 104 | sub->evt.event = htohl(event, sub->swap); | 102 | sub->evt.event = htohl(event, sub->swap); |
| 105 | sub->evt.found_lower = htohl(found_lower, sub->swap); | 103 | sub->evt.found_lower = htohl(found_lower, sub->swap); |
| 106 | sub->evt.found_upper = htohl(found_upper, sub->swap); | 104 | sub->evt.found_upper = htohl(found_upper, sub->swap); |
| 107 | sub->evt.port.ref = htohl(port_ref, sub->swap); | 105 | sub->evt.port.ref = htohl(port_ref, sub->swap); |
| 108 | sub->evt.port.node = htohl(node, sub->swap); | 106 | sub->evt.port.node = htohl(node, sub->swap); |
| 109 | ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, | 107 | tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base, |
| 110 | msg_sect.iov_base, msg_sect.iov_len); | 108 | msg_sect.iov_len); |
| 111 | if (ret < 0) | ||
| 112 | pr_err("Sending subscription event failed, no memory\n"); | ||
| 113 | } | 109 | } |
| 114 | 110 | ||
| 115 | /** | 111 | /** |
| @@ -153,14 +149,6 @@ static void subscr_timeout(struct tipc_subscription *sub) | |||
| 153 | /* The spin lock per subscriber is used to protect its members */ | 149 | /* The spin lock per subscriber is used to protect its members */ |
| 154 | spin_lock_bh(&subscriber->lock); | 150 | spin_lock_bh(&subscriber->lock); |
| 155 | 151 | ||
| 156 | /* Validate if the connection related to the subscriber is | ||
| 157 | * closed (in case subscriber is terminating) | ||
| 158 | */ | ||
| 159 | if (subscriber->conid == 0) { | ||
| 160 | spin_unlock_bh(&subscriber->lock); | ||
| 161 | return; | ||
| 162 | } | ||
| 163 | |||
| 164 | /* Validate timeout (in case subscription is being cancelled) */ | 152 | /* Validate timeout (in case subscription is being cancelled) */ |
| 165 | if (sub->timeout == TIPC_WAIT_FOREVER) { | 153 | if (sub->timeout == TIPC_WAIT_FOREVER) { |
| 166 | spin_unlock_bh(&subscriber->lock); | 154 | spin_unlock_bh(&subscriber->lock); |
| @@ -215,9 +203,6 @@ static void subscr_release(struct tipc_subscriber *subscriber) | |||
| 215 | 203 | ||
| 216 | spin_lock_bh(&subscriber->lock); | 204 | spin_lock_bh(&subscriber->lock); |
| 217 | 205 | ||
| 218 | /* Invalidate subscriber reference */ | ||
| 219 | subscriber->conid = 0; | ||
| 220 | |||
| 221 | /* Destroy any existing subscriptions for subscriber */ | 206 | /* Destroy any existing subscriptions for subscriber */ |
| 222 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, | 207 | list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, |
| 223 | subscription_list) { | 208 | subscription_list) { |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 29fc8bee9702..ce6ec6c2f4de 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -163,9 +163,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) | |||
| 163 | 163 | ||
| 164 | static inline unsigned int unix_hash_fold(__wsum n) | 164 | static inline unsigned int unix_hash_fold(__wsum n) |
| 165 | { | 165 | { |
| 166 | unsigned int hash = (__force unsigned int)n; | 166 | unsigned int hash = (__force unsigned int)csum_fold(n); |
| 167 | 167 | ||
| 168 | hash ^= hash>>16; | ||
| 169 | hash ^= hash>>8; | 168 | hash ^= hash>>8; |
| 170 | return hash&(UNIX_HASH_SIZE-1); | 169 | return hash&(UNIX_HASH_SIZE-1); |
| 171 | } | 170 | } |
diff --git a/net/wireless/core.c b/net/wireless/core.c index 010892b81a06..a3bf18d11609 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
| @@ -788,8 +788,6 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, | |||
| 788 | default: | 788 | default: |
| 789 | break; | 789 | break; |
| 790 | } | 790 | } |
| 791 | |||
| 792 | wdev->beacon_interval = 0; | ||
| 793 | } | 791 | } |
| 794 | 792 | ||
| 795 | static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | 793 | static int cfg80211_netdev_notifier_call(struct notifier_block *nb, |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 276e84b8a8e5..10085de886fe 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c | |||
| @@ -330,7 +330,8 @@ static void write_src(void) | |||
| 330 | printf("\tPTR\t_text + %#llx\n", | 330 | printf("\tPTR\t_text + %#llx\n", |
| 331 | table[i].addr - _text); | 331 | table[i].addr - _text); |
| 332 | else | 332 | else |
| 333 | printf("\tPTR\t%#llx\n", table[i].addr); | 333 | printf("\tPTR\t_text - %#llx\n", |
| 334 | _text - table[i].addr); | ||
| 334 | } else { | 335 | } else { |
| 335 | printf("\tPTR\t%#llx\n", table[i].addr); | 336 | printf("\tPTR\t%#llx\n", table[i].addr); |
| 336 | } | 337 | } |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 850296a1e0ff..8d0a84436674 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -3616,6 +3616,19 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec, | |||
| 3616 | } | 3616 | } |
| 3617 | } | 3617 | } |
| 3618 | 3618 | ||
| 3619 | static void alc_no_shutup(struct hda_codec *codec) | ||
| 3620 | { | ||
| 3621 | } | ||
| 3622 | |||
| 3623 | static void alc_fixup_no_shutup(struct hda_codec *codec, | ||
| 3624 | const struct hda_fixup *fix, int action) | ||
| 3625 | { | ||
| 3626 | if (action == HDA_FIXUP_ACT_PRE_PROBE) { | ||
| 3627 | struct alc_spec *spec = codec->spec; | ||
| 3628 | spec->shutup = alc_no_shutup; | ||
| 3629 | } | ||
| 3630 | } | ||
| 3631 | |||
| 3619 | static void alc_fixup_headset_mode_alc668(struct hda_codec *codec, | 3632 | static void alc_fixup_headset_mode_alc668(struct hda_codec *codec, |
| 3620 | const struct hda_fixup *fix, int action) | 3633 | const struct hda_fixup *fix, int action) |
| 3621 | { | 3634 | { |
| @@ -3844,6 +3857,7 @@ enum { | |||
| 3844 | ALC269_FIXUP_HP_GPIO_LED, | 3857 | ALC269_FIXUP_HP_GPIO_LED, |
| 3845 | ALC269_FIXUP_INV_DMIC, | 3858 | ALC269_FIXUP_INV_DMIC, |
| 3846 | ALC269_FIXUP_LENOVO_DOCK, | 3859 | ALC269_FIXUP_LENOVO_DOCK, |
| 3860 | ALC269_FIXUP_NO_SHUTUP, | ||
| 3847 | ALC286_FIXUP_SONY_MIC_NO_PRESENCE, | 3861 | ALC286_FIXUP_SONY_MIC_NO_PRESENCE, |
| 3848 | ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, | 3862 | ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, |
| 3849 | ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, | 3863 | ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, |
| @@ -4020,6 +4034,10 @@ static const struct hda_fixup alc269_fixups[] = { | |||
| 4020 | .type = HDA_FIXUP_FUNC, | 4034 | .type = HDA_FIXUP_FUNC, |
| 4021 | .v.func = alc_fixup_inv_dmic_0x12, | 4035 | .v.func = alc_fixup_inv_dmic_0x12, |
| 4022 | }, | 4036 | }, |
| 4037 | [ALC269_FIXUP_NO_SHUTUP] = { | ||
| 4038 | .type = HDA_FIXUP_FUNC, | ||
| 4039 | .v.func = alc_fixup_no_shutup, | ||
| 4040 | }, | ||
| 4023 | [ALC269_FIXUP_LENOVO_DOCK] = { | 4041 | [ALC269_FIXUP_LENOVO_DOCK] = { |
| 4024 | .type = HDA_FIXUP_PINS, | 4042 | .type = HDA_FIXUP_PINS, |
| 4025 | .v.pins = (const struct hda_pintbl[]) { | 4043 | .v.pins = (const struct hda_pintbl[]) { |
| @@ -4405,6 +4423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 4405 | SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 4423 | SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
| 4406 | SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 4424 | SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
| 4407 | SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 4425 | SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
| 4426 | SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), | ||
| 4408 | SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 4427 | SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
| 4409 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), | 4428 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), |
| 4410 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 4429 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c index 75d0ad5d2dcb..647a72cda005 100644 --- a/sound/soc/codecs/88pm860x-codec.c +++ b/sound/soc/codecs/88pm860x-codec.c | |||
| @@ -1328,6 +1328,9 @@ static int pm860x_probe(struct snd_soc_codec *codec) | |||
| 1328 | pm860x->codec = codec; | 1328 | pm860x->codec = codec; |
| 1329 | 1329 | ||
| 1330 | codec->control_data = pm860x->regmap; | 1330 | codec->control_data = pm860x->regmap; |
| 1331 | ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); | ||
| 1332 | if (ret) | ||
| 1333 | return ret; | ||
| 1331 | 1334 | ||
| 1332 | for (i = 0; i < 4; i++) { | 1335 | for (i = 0; i < 4; i++) { |
| 1333 | ret = request_threaded_irq(pm860x->irq[i], NULL, | 1336 | ret = request_threaded_irq(pm860x->irq[i], NULL, |
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c index 52e7cb08434b..fa2b8e07f420 100644 --- a/sound/soc/codecs/si476x.c +++ b/sound/soc/codecs/si476x.c | |||
| @@ -210,7 +210,7 @@ out: | |||
| 210 | static int si476x_codec_probe(struct snd_soc_codec *codec) | 210 | static int si476x_codec_probe(struct snd_soc_codec *codec) |
| 211 | { | 211 | { |
| 212 | codec->control_data = dev_get_regmap(codec->dev->parent, NULL); | 212 | codec->control_data = dev_get_regmap(codec->dev->parent, NULL); |
| 213 | return 0; | 213 | return snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); |
| 214 | } | 214 | } |
| 215 | 215 | ||
| 216 | static struct snd_soc_dai_ops si476x_dai_ops = { | 216 | static struct snd_soc_dai_ops si476x_dai_ops = { |
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c index 3fde9e402710..d163e18d85d4 100644 --- a/sound/soc/omap/n810.c +++ b/sound/soc/omap/n810.c | |||
| @@ -305,7 +305,9 @@ static int __init n810_soc_init(void) | |||
| 305 | int err; | 305 | int err; |
| 306 | struct device *dev; | 306 | struct device *dev; |
| 307 | 307 | ||
| 308 | if (!(machine_is_nokia_n810() || machine_is_nokia_n810_wimax())) | 308 | if (!of_have_populated_dt() || |
| 309 | (!of_machine_is_compatible("nokia,n810") && | ||
| 310 | !of_machine_is_compatible("nokia,n810-wimax"))) | ||
| 309 | return -ENODEV; | 311 | return -ENODEV; |
| 310 | 312 | ||
| 311 | n810_snd_device = platform_device_alloc("soc-audio", -1); | 313 | n810_snd_device = platform_device_alloc("soc-audio", -1); |
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 47e1ce771e65..28522bd03b8e 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c | |||
| @@ -1989,6 +1989,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card) | |||
| 1989 | 1989 | ||
| 1990 | paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list); | 1990 | paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list); |
| 1991 | if (paths < 0) { | 1991 | if (paths < 0) { |
| 1992 | dpcm_path_put(&list); | ||
| 1992 | dev_warn(fe->dev, "ASoC: %s no valid %s path\n", | 1993 | dev_warn(fe->dev, "ASoC: %s no valid %s path\n", |
| 1993 | fe->dai_link->name, "playback"); | 1994 | fe->dai_link->name, "playback"); |
| 1994 | mutex_unlock(&card->mutex); | 1995 | mutex_unlock(&card->mutex); |
| @@ -2018,6 +2019,7 @@ capture: | |||
| 2018 | 2019 | ||
| 2019 | paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list); | 2020 | paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list); |
| 2020 | if (paths < 0) { | 2021 | if (paths < 0) { |
| 2022 | dpcm_path_put(&list); | ||
| 2021 | dev_warn(fe->dev, "ASoC: %s no valid %s path\n", | 2023 | dev_warn(fe->dev, "ASoC: %s no valid %s path\n", |
| 2022 | fe->dai_link->name, "capture"); | 2024 | fe->dai_link->name, "capture"); |
| 2023 | mutex_unlock(&card->mutex); | 2025 | mutex_unlock(&card->mutex); |
| @@ -2082,6 +2084,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream) | |||
| 2082 | fe->dpcm[stream].runtime = fe_substream->runtime; | 2084 | fe->dpcm[stream].runtime = fe_substream->runtime; |
| 2083 | 2085 | ||
| 2084 | if (dpcm_path_get(fe, stream, &list) <= 0) { | 2086 | if (dpcm_path_get(fe, stream, &list) <= 0) { |
| 2087 | dpcm_path_put(&list); | ||
| 2085 | dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", | 2088 | dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", |
| 2086 | fe->dai_link->name, stream ? "capture" : "playback"); | 2089 | fe->dai_link->name, stream ? "capture" : "playback"); |
| 2087 | } | 2090 | } |
diff --git a/tools/net/Makefile b/tools/net/Makefile index 004cd74734b6..ee577ea03ba5 100644 --- a/tools/net/Makefile +++ b/tools/net/Makefile | |||
| @@ -12,7 +12,7 @@ YACC = bison | |||
| 12 | 12 | ||
| 13 | all : bpf_jit_disasm bpf_dbg bpf_asm | 13 | all : bpf_jit_disasm bpf_dbg bpf_asm |
| 14 | 14 | ||
| 15 | bpf_jit_disasm : CFLAGS = -Wall -O2 | 15 | bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm' |
| 16 | bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl | 16 | bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl |
| 17 | bpf_jit_disasm : bpf_jit_disasm.o | 17 | bpf_jit_disasm : bpf_jit_disasm.o |
| 18 | 18 | ||
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c index d66418237d21..aa290c0de6f5 100644 --- a/tools/testing/selftests/ipc/msgque.c +++ b/tools/testing/selftests/ipc/msgque.c | |||
| @@ -201,6 +201,7 @@ int main(int argc, char **argv) | |||
| 201 | 201 | ||
| 202 | msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666); | 202 | msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666); |
| 203 | if (msgque.msq_id == -1) { | 203 | if (msgque.msq_id == -1) { |
| 204 | err = -errno; | ||
| 204 | printf("Can't create queue\n"); | 205 | printf("Can't create queue\n"); |
| 205 | goto err_out; | 206 | goto err_out; |
| 206 | } | 207 | } |
