aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/arm/include/asm/tlb.h7
-rw-r--r--arch/arm/kernel/perf_event.c7
-rw-r--r--arch/arm64/include/asm/tlb.h7
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/ia64/include/asm/tlb.h9
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/s390/include/asm/tlb.h8
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/um/include/asm/tlb.h6
-rw-r--r--arch/x86/include/asm/pgtable-2level.h48
-rw-r--r--arch/x86/include/asm/pgtable-3level.h3
-rw-r--r--arch/x86/include/asm/pgtable.h30
-rw-r--r--arch/x86/include/asm/pgtable_types.h17
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c4
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/mm/mmap.c2
-rw-r--r--drivers/block/aoe/aoecmd.c17
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c66
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c107
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h6
-rw-r--r--drivers/net/ethernet/marvell/skge.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c58
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c111
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/macvtap.c12
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/cw1200/sta.c7
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c16
-rw-r--r--drivers/net/wireless/iwlegacy/common.c1
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c35
-rw-r--r--drivers/usb/class/usbtmc.c8
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/host/ehci-sched.c13
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/serial/keyspan.c2
-rw-r--r--drivers/usb/serial/mos7720.c21
-rw-r--r--drivers/usb/serial/mos7840.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c9
-rw-r--r--drivers/usb/serial/usb_wwan.c20
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c9
-rw-r--r--fs/cifs/cifsencrypt.c14
-rw-r--r--fs/cifs/cifsfs.c11
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/connect.c7
-rw-r--r--fs/cifs/file.c1
-rw-r--r--fs/cifs/link.c84
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c6
-rw-r--r--fs/cifs/smb1ops.c1
-rw-r--r--fs/cifs/smb2transport.c9
-rw-r--r--fs/exec.c4
-rw-r--r--fs/ext4/ioctl.c6
-rw-r--r--fs/ext4/super.c19
-rw-r--r--fs/hugetlbfs/inode.c10
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/dir.c4
-rw-r--r--fs/ocfs2/file.c6
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/refcounttree.c53
-rw-r--r--fs/ocfs2/refcounttree.h6
-rw-r--r--fs/proc/task_mmu.c31
-rw-r--r--include/asm-generic/pgtable.h30
-rw-r--r--include/asm-generic/tlb.h2
-rw-r--r--include/linux/mlx5/device.h22
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/spinlock.h14
-rw-r--r--include/linux/swapops.h2
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--include/net/busy_poll.h7
-rw-r--r--include/net/ip_tunnels.h14
-rw-r--r--include/net/sch_generic.h9
-rw-r--r--include/uapi/linux/pkt_sched.h10
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/power/qos.c20
-rw-r--r--kernel/sched/core.c96
-rw-r--r--kernel/sched/cpupri.c4
-rw-r--r--kernel/sched/fair.c10
-rw-r--r--mm/fremap.c11
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory.c49
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/rmap.c14
-rw-r--r--mm/swapfile.c19
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/gateway_client.c13
-rw-r--r--net/batman-adv/gateway_client.h3
-rw-r--r--net/batman-adv/soft-interface.c9
-rw-r--r--net/batman-adv/unicast.c13
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/core/flow_dissector.c1
-rw-r--r--net/core/neighbour.c10
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_trie.c5
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/tcp_cubic.c12
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/mac80211/mlme.c54
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c12
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue_core.c5
-rw-r--r--net/netfilter/xt_TCPMSS.c28
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c10
-rw-r--r--net/netlink/genetlink.c7
-rw-r--r--net/openvswitch/actions.c1
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow.c2
-rw-r--r--net/sched/sch_api.c41
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/transport.c4
-rw-r--r--net/tipc/bearer.c9
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/nl80211.c6
-rw-r--r--sound/pci/hda/hda_generic.c6
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/soc/codecs/cs42l52.c5
-rw-r--r--sound/soc/codecs/sgtl5000.c18
-rw-r--r--sound/soc/soc-dapm.c7
-rw-r--r--sound/soc/tegra/tegra30_i2s.c2
-rw-r--r--sound/usb/6fire/midi.c16
-rw-r--r--sound/usb/6fire/midi.h6
-rw-r--r--sound/usb/6fire/pcm.c41
-rw-r--r--sound/usb/6fire/pcm.h2
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--sound/usb/quirks.c6
167 files changed, 1387 insertions, 676 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 7cacc88dc79c..b8045c7f78c9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5581,9 +5581,9 @@ S: Maintained
5581F: drivers/media/tuners/mxl5007t.* 5581F: drivers/media/tuners/mxl5007t.*
5582 5582
5583MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) 5583MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
5584M: Andrew Gallatin <gallatin@myri.com> 5584M: Hyong-Youb Kim <hykim@myri.com>
5585L: netdev@vger.kernel.org 5585L: netdev@vger.kernel.org
5586W: http://www.myri.com/scs/download-Myri10GE.html 5586W: https://www.myricom.com/support/downloads/myri10ge.html
5587S: Supported 5587S: Supported
5588F: drivers/net/ethernet/myricom/myri10ge/ 5588F: drivers/net/ethernet/myricom/myri10ge/
5589 5589
diff --git a/arch/Kconfig b/arch/Kconfig
index 8d2ae24b9f4a..1feb169274fe 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2
407 help 407 help
408 Architecture has the first two arguments of clone(2) swapped. 408 Architecture has the first two arguments of clone(2) swapped.
409 409
410config CLONE_BACKWARDS3
411 bool
412 help
413 Architecture has tls passed as the 3rd argument of clone(2),
414 not the 5th one.
415
410config ODD_RT_SIGACTION 416config ODD_RT_SIGACTION
411 bool 417 bool
412 help 418 help
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 46e7cfb3e721..0baf7f0d9394 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -43,6 +43,7 @@ struct mmu_gather {
43 struct mm_struct *mm; 43 struct mm_struct *mm;
44 unsigned int fullmm; 44 unsigned int fullmm;
45 struct vm_area_struct *vma; 45 struct vm_area_struct *vma;
46 unsigned long start, end;
46 unsigned long range_start; 47 unsigned long range_start;
47 unsigned long range_end; 48 unsigned long range_end;
48 unsigned int nr; 49 unsigned int nr;
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
107} 108}
108 109
109static inline void 110static inline void
110tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) 111tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
111{ 112{
112 tlb->mm = mm; 113 tlb->mm = mm;
113 tlb->fullmm = fullmm; 114 tlb->fullmm = !(start | (end+1));
115 tlb->start = start;
116 tlb->end = end;
114 tlb->vma = NULL; 117 tlb->vma = NULL;
115 tlb->max = ARRAY_SIZE(tlb->local); 118 tlb->max = ARRAY_SIZE(tlb->local);
116 tlb->pages = tlb->local; 119 tlb->pages = tlb->local;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d9f5cd4e533f..21f77906602c 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
53static int 53static int
54armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 54armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
55{ 55{
56 int mapping = (*event_map)[config]; 56 int mapping;
57
58 if (config >= PERF_COUNT_HW_MAX)
59 return -ENOENT;
60
61 mapping = (*event_map)[config];
57 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 62 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
58} 63}
59 64
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 46b3beb4b773..717031a762c2 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -35,6 +35,7 @@ struct mmu_gather {
35 struct mm_struct *mm; 35 struct mm_struct *mm;
36 unsigned int fullmm; 36 unsigned int fullmm;
37 struct vm_area_struct *vma; 37 struct vm_area_struct *vma;
38 unsigned long start, end;
38 unsigned long range_start; 39 unsigned long range_start;
39 unsigned long range_end; 40 unsigned long range_end;
40 unsigned int nr; 41 unsigned int nr;
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
97} 98}
98 99
99static inline void 100static inline void
100tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) 101tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
101{ 102{
102 tlb->mm = mm; 103 tlb->mm = mm;
103 tlb->fullmm = fullmm; 104 tlb->fullmm = !(start | (end+1));
105 tlb->start = start;
106 tlb->end = end;
104 tlb->vma = NULL; 107 tlb->vma = NULL;
105 tlb->max = ARRAY_SIZE(tlb->local); 108 tlb->max = ARRAY_SIZE(tlb->local);
106 tlb->pages = tlb->local; 109 tlb->pages = tlb->local;
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 33a97929d055..77d442ab28c8 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz"
158endmenu 158endmenu
159 159
160source "init/Kconfig" 160source "init/Kconfig"
161source "kernel/Kconfig.freezer"
161source "drivers/Kconfig" 162source "drivers/Kconfig"
162source "fs/Kconfig" 163source "fs/Kconfig"
163 164
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index ef3a9de01954..bc5efc7c3f3f 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -22,7 +22,7 @@
22 * unmapping a portion of the virtual address space, these hooks are called according to 22 * unmapping a portion of the virtual address space, these hooks are called according to
23 * the following template: 23 * the following template:
24 * 24 *
25 * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM 25 * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
26 * { 26 * {
27 * for each vma that needs a shootdown do { 27 * for each vma that needs a shootdown do {
28 * tlb_start_vma(tlb, vma); 28 * tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@ struct mmu_gather {
58 unsigned int max; 58 unsigned int max;
59 unsigned char fullmm; /* non-zero means full mm flush */ 59 unsigned char fullmm; /* non-zero means full mm flush */
60 unsigned char need_flush; /* really unmapped some PTEs? */ 60 unsigned char need_flush; /* really unmapped some PTEs? */
61 unsigned long start, end;
61 unsigned long start_addr; 62 unsigned long start_addr;
62 unsigned long end_addr; 63 unsigned long end_addr;
63 struct page **pages; 64 struct page **pages;
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
155 156
156 157
157static inline void 158static inline void
158tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 159tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
159{ 160{
160 tlb->mm = mm; 161 tlb->mm = mm;
161 tlb->max = ARRAY_SIZE(tlb->local); 162 tlb->max = ARRAY_SIZE(tlb->local);
162 tlb->pages = tlb->local; 163 tlb->pages = tlb->local;
163 tlb->nr = 0; 164 tlb->nr = 0;
164 tlb->fullmm = full_mm_flush; 165 tlb->fullmm = !(start | (end+1));
166 tlb->start = start;
167 tlb->end = end;
165 tlb->start_addr = ~0UL; 168 tlb->start_addr = ~0UL;
166} 169}
167 170
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d22a4ecffff4..4fab52294d98 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,7 +28,7 @@ config MICROBLAZE
28 select GENERIC_CLOCKEVENTS 28 select GENERIC_CLOCKEVENTS
29 select GENERIC_IDLE_POLL_SETUP 29 select GENERIC_IDLE_POLL_SETUP
30 select MODULES_USE_ELF_RELA 30 select MODULES_USE_ELF_RELA
31 select CLONE_BACKWARDS 31 select CLONE_BACKWARDS3
32 32
33config SWAP 33config SWAP
34 def_bool n 34 def_bool n
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 99dbab1c59ac..d60bf98fa5cf 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -55,6 +55,7 @@ config GENERIC_CSUM
55 55
56source "init/Kconfig" 56source "init/Kconfig"
57 57
58source "kernel/Kconfig.freezer"
58 59
59menu "Processor type and features" 60menu "Processor type and features"
60 61
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b75d7d686684..23a64d25f2b1 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -32,6 +32,7 @@ struct mmu_gather {
32 struct mm_struct *mm; 32 struct mm_struct *mm;
33 struct mmu_table_batch *batch; 33 struct mmu_table_batch *batch;
34 unsigned int fullmm; 34 unsigned int fullmm;
35 unsigned long start, unsigned long end;
35}; 36};
36 37
37struct mmu_table_batch { 38struct mmu_table_batch {
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
48 49
49static inline void tlb_gather_mmu(struct mmu_gather *tlb, 50static inline void tlb_gather_mmu(struct mmu_gather *tlb,
50 struct mm_struct *mm, 51 struct mm_struct *mm,
51 unsigned int full_mm_flush) 52 unsigned long start,
53 unsigned long end)
52{ 54{
53 tlb->mm = mm; 55 tlb->mm = mm;
54 tlb->fullmm = full_mm_flush; 56 tlb->start = start;
57 tlb->end = end;
58 tlb->fullmm = !(start | (end+1));
55 tlb->batch = NULL; 59 tlb->batch = NULL;
56 if (tlb->fullmm) 60 if (tlb->fullmm)
57 __tlb_flush_mm(mm); 61 __tlb_flush_mm(mm);
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index c8def8bc9020..5fc237581caf 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT
87 87
88source "init/Kconfig" 88source "init/Kconfig"
89 89
90source "kernel/Kconfig.freezer"
91
90config MMU 92config MMU
91 def_bool y 93 def_bool y
92 94
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index e61d43d9f689..362192ed12fe 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
36} 36}
37 37
38static inline void 38static inline void
39tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 39tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
40{ 40{
41 tlb->mm = mm; 41 tlb->mm = mm;
42 tlb->fullmm = full_mm_flush; 42 tlb->start = start;
43 tlb->end = end;
44 tlb->fullmm = !(start | (end+1));
43 45
44 init_tlb_gather(tlb); 46 init_tlb_gather(tlb);
45} 47}
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 4febacd1a8a1..29b0301c18aa 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
45} 45}
46 46
47static inline void 47static inline void
48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
49{ 49{
50 tlb->mm = mm; 50 tlb->mm = mm;
51 tlb->fullmm = full_mm_flush; 51 tlb->start = start;
52 tlb->end = end;
53 tlb->fullmm = !(start | (end+1));
52 54
53 init_tlb_gather(tlb); 55 init_tlb_gather(tlb);
54} 56}
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index f2b489cf1602..3bf2dd0cf61f 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
55#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) 55#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
56#endif 56#endif
57 57
58#ifdef CONFIG_MEM_SOFT_DIRTY
59
60/*
61 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
62 * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
63 * into this range.
64 */
65#define PTE_FILE_MAX_BITS 28
66#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
67#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
68#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
69#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1)
70#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
71#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
72#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
73
74#define pte_to_pgoff(pte) \
75 ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
76 & ((1U << PTE_FILE_BITS1) - 1))) \
77 + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
78 & ((1U << PTE_FILE_BITS2) - 1)) \
79 << (PTE_FILE_BITS1)) \
80 + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
81 & ((1U << PTE_FILE_BITS3) - 1)) \
82 << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
83 + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
84 << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
85
86#define pgoff_to_pte(off) \
87 ((pte_t) { .pte_low = \
88 ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
89 + ((((off) >> PTE_FILE_BITS1) \
90 & ((1U << PTE_FILE_BITS2) - 1)) \
91 << PTE_FILE_SHIFT2) \
92 + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
93 & ((1U << PTE_FILE_BITS3) - 1)) \
94 << PTE_FILE_SHIFT3) \
95 + ((((off) >> \
96 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
97 << PTE_FILE_SHIFT4) \
98 + _PAGE_FILE })
99
100#else /* CONFIG_MEM_SOFT_DIRTY */
101
58/* 102/*
59 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 103 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
60 * split up the 29 bits of offset into this range: 104 * split up the 29 bits of offset into this range.
61 */ 105 */
62#define PTE_FILE_MAX_BITS 29 106#define PTE_FILE_MAX_BITS 29
63#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) 107#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
88 << PTE_FILE_SHIFT3) \ 132 << PTE_FILE_SHIFT3) \
89 + _PAGE_FILE }) 133 + _PAGE_FILE })
90 134
135#endif /* CONFIG_MEM_SOFT_DIRTY */
136
91/* Encode and de-code a swap entry */ 137/* Encode and de-code a swap entry */
92#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE 138#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
93#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) 139#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 4cc9f2b7cdc3..81bb91b49a88 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
179/* 179/*
180 * Bits 0, 6 and 7 are taken in the low part of the pte, 180 * Bits 0, 6 and 7 are taken in the low part of the pte,
181 * put the 32 bits of offset into the high part. 181 * put the 32 bits of offset into the high part.
182 *
183 * For soft-dirty tracking 11 bit is taken from
184 * the low part of pte as well.
182 */ 185 */
183#define pte_to_pgoff(pte) ((pte).pte_high) 186#define pte_to_pgoff(pte) ((pte).pte_high)
184#define pgoff_to_pte(off) \ 187#define pgoff_to_pte(off) \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7dc305a46058..1c00631164c2 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
315} 315}
316 316
317static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
318{
319 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
320}
321
322static inline int pte_swp_soft_dirty(pte_t pte)
323{
324 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
325}
326
327static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
328{
329 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
330}
331
332static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
333{
334 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
335}
336
337static inline pte_t pte_file_mksoft_dirty(pte_t pte)
338{
339 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
340}
341
342static inline int pte_file_soft_dirty(pte_t pte)
343{
344 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
345}
346
317/* 347/*
318 * Mask out unsupported bits in a present pgprot. Non-present pgprots 348 * Mask out unsupported bits in a present pgprot. Non-present pgprots
319 * can use those bits for other purposes, so leave them be. 349 * can use those bits for other purposes, so leave them be.
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index c98ac63aae48..f4843e031131 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -61,12 +61,27 @@
61 * they do not conflict with each other. 61 * they do not conflict with each other.
62 */ 62 */
63 63
64#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN
65
64#ifdef CONFIG_MEM_SOFT_DIRTY 66#ifdef CONFIG_MEM_SOFT_DIRTY
65#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) 67#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
66#else 68#else
67#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) 69#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
68#endif 70#endif
69 71
72/*
73 * Tracking soft dirty bit when a page goes to a swap is tricky.
74 * We need a bit which can be stored in pte _and_ not conflict
75 * with swap entry format. On x86 bits 6 and 7 are *not* involved
76 * into swap entry computation, but bit 6 is used for nonlinear
77 * file mapping, so we borrow bit 7 for soft dirty tracking.
78 */
79#ifdef CONFIG_MEM_SOFT_DIRTY
80#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
81#else
82#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
83#endif
84
70#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 85#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
71#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) 86#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
72#else 87#else
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 33692eaabab5..e3ddd7db723f 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
233#define arch_read_relax(lock) cpu_relax() 233#define arch_read_relax(lock) cpu_relax()
234#define arch_write_relax(lock) cpu_relax() 234#define arch_write_relax(lock) cpu_relax()
235 235
236/* The {read|write|spin}_lock() on x86 are full memory barriers. */
237static inline void smp_mb__after_lock(void) { }
238#define ARCH_HAS_SMP_MB_AFTER_LOCK
239
240#endif /* _ASM_X86_SPINLOCK_H */ 236#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index fbc9210b45bc..a45d8d4ace10 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void)
2270 case 70: 2270 case 70:
2271 case 71: 2271 case 71:
2272 case 63: 2272 case 63:
2273 case 69:
2273 x86_pmu.late_ack = true; 2274 x86_pmu.late_ack = true;
2274 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 2275 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2275 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 2276 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index cad791dbde95..1fb6c72717bd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
314static struct uncore_event_desc snbep_uncore_qpi_events[] = { 314static struct uncore_event_desc snbep_uncore_qpi_events[] = {
315 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), 315 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
316 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), 316 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
317 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), 317 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
318 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), 318 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
319 { /* end: all zeroes */ }, 319 { /* end: all zeroes */ },
320}; 320};
321 321
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index dbded5aedb81..48f8375e4c6b 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
101 *begin = new_begin; 101 *begin = new_begin;
102 } 102 }
103 } else { 103 } else {
104 *begin = TASK_UNMAPPED_BASE; 104 *begin = mmap_legacy_base();
105 *end = TASK_SIZE; 105 *end = TASK_SIZE;
106 } 106 }
107} 107}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 62c29a5bfe26..f63778cb2363 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -98,7 +98,7 @@ static unsigned long mmap_base(void)
98 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 98 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
99 * does, but not when emulating X86_32 99 * does, but not when emulating X86_32
100 */ 100 */
101static unsigned long mmap_legacy_base(void) 101unsigned long mmap_legacy_base(void)
102{ 102{
103 if (mmap_is_ia32()) 103 if (mmap_is_ia32())
104 return TASK_UNMAPPED_BASE; 104 return TASK_UNMAPPED_BASE;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 99cb944a002d..4d45dba7fb8f 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio)
906 int i; 906 int i;
907 907
908 bio_for_each_segment(bv, bio, i) { 908 bio_for_each_segment(bv, bio, i) {
909 page = bv->bv_page;
910 /* Non-zero page count for non-head members of 909 /* Non-zero page count for non-head members of
911 * compound pages is no longer allowed by the kernel, 910 * compound pages is no longer allowed by the kernel.
912 * but this has never been seen here.
913 */ 911 */
914 if (unlikely(PageCompound(page))) 912 page = compound_trans_head(bv->bv_page);
915 if (compound_trans_head(page) != page) {
916 pr_crit("page tail used for block I/O\n");
917 BUG();
918 }
919 atomic_inc(&page->_count); 913 atomic_inc(&page->_count);
920 } 914 }
921} 915}
@@ -924,10 +918,13 @@ static void
924bio_pagedec(struct bio *bio) 918bio_pagedec(struct bio *bio)
925{ 919{
926 struct bio_vec *bv; 920 struct bio_vec *bv;
921 struct page *page;
927 int i; 922 int i;
928 923
929 bio_for_each_segment(bv, bio, i) 924 bio_for_each_segment(bv, bio, i) {
930 atomic_dec(&bv->bv_page->_count); 925 page = compound_trans_head(bv->bv_page);
926 atomic_dec(&page->_count);
927 }
931} 928}
932 929
933static void 930static void
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 07f257d44a1e..e48cb339c0c6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n)
3714 * The bonding ndo_neigh_setup is called at init time beofre any 3714 * The bonding ndo_neigh_setup is called at init time beofre any
3715 * slave exists. So we must declare proxy setup function which will 3715 * slave exists. So we must declare proxy setup function which will
3716 * be used at run time to resolve the actual slave neigh param setup. 3716 * be used at run time to resolve the actual slave neigh param setup.
3717 *
3718 * It's also called by master devices (such as vlans) to setup their
3719 * underlying devices. In that case - do nothing, we're already set up from
3720 * our init.
3717 */ 3721 */
3718static int bond_neigh_setup(struct net_device *dev, 3722static int bond_neigh_setup(struct net_device *dev,
3719 struct neigh_parms *parms) 3723 struct neigh_parms *parms)
3720{ 3724{
3721 parms->neigh_setup = bond_neigh_init; 3725 /* modify only our neigh_parms */
3726 if (parms->dev == dev)
3727 parms->neigh_setup = bond_neigh_init;
3722 3728
3723 return 0; 3729 return 0;
3724} 3730}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25723d8ee201..925ab8ec9329 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
649 if ((mc->ptr + rec_len) > mc->end) 649 if ((mc->ptr + rec_len) > mc->end)
650 goto decode_failed; 650 goto decode_failed;
651 651
652 memcpy(cf->data, mc->ptr, rec_len); 652 memcpy(cf->data, mc->ptr, cf->can_dlc);
653 mc->ptr += rec_len; 653 mc->ptr += rec_len;
654 } 654 }
655 655
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index f1b121ee5525..55d79cb53a79 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
199 struct arc_emac_priv *priv = netdev_priv(ndev); 199 struct arc_emac_priv *priv = netdev_priv(ndev);
200 unsigned int work_done; 200 unsigned int work_done;
201 201
202 for (work_done = 0; work_done <= budget; work_done++) { 202 for (work_done = 0; work_done < budget; work_done++) {
203 unsigned int *last_rx_bd = &priv->last_rx_bd; 203 unsigned int *last_rx_bd = &priv->last_rx_bd;
204 struct net_device_stats *stats = &priv->stats; 204 struct net_device_stats *stats = &priv->stats;
205 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; 205 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d80e34b8285f..ce9b387b5a19 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1502,6 +1502,7 @@ struct bnx2x {
1502#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) 1502#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
1503#define IS_VF_FLAG (1 << 22) 1503#define IS_VF_FLAG (1 << 22)
1504#define INTERRUPTS_ENABLED_FLAG (1 << 23) 1504#define INTERRUPTS_ENABLED_FLAG (1 << 23)
1505#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
1505 1506
1506#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) 1507#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
1507 1508
@@ -1830,6 +1831,8 @@ struct bnx2x {
1830 1831
1831 int fp_array_size; 1832 int fp_array_size;
1832 u32 dump_preset_idx; 1833 u32 dump_preset_idx;
1834 bool stats_started;
1835 struct semaphore stats_sema;
1833}; 1836};
1834 1837
1835/* Tx queues may be less or equal to Rx queues */ 1838/* Tx queues may be less or equal to Rx queues */
@@ -2451,4 +2454,6 @@ enum bnx2x_pci_bus_speed {
2451 BNX2X_PCI_LINK_SPEED_5000 = 5000, 2454 BNX2X_PCI_LINK_SPEED_5000 = 5000,
2452 BNX2X_PCI_LINK_SPEED_8000 = 8000 2455 BNX2X_PCI_LINK_SPEED_8000 = 8000
2453}; 2456};
2457
2458void bnx2x_set_local_cmng(struct bnx2x *bp);
2454#endif /* bnx2x.h */ 2459#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 0c94df47e0e8..f9122f2d6b65 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -753,6 +753,10 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
753 bnx2x_pfc_set_pfc(bp); 753 bnx2x_pfc_set_pfc(bp);
754 754
755 bnx2x_dcbx_update_ets_params(bp); 755 bnx2x_dcbx_update_ets_params(bp);
756
757 /* ets may affect cmng configuration: reinit it in hw */
758 bnx2x_set_local_cmng(bp);
759
756 bnx2x_dcbx_resume_hw_tx(bp); 760 bnx2x_dcbx_resume_hw_tx(bp);
757 761
758 return; 762 return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5018e52ae2ad..32767f6aa33f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1300,6 +1300,9 @@ struct drv_func_mb {
1300 1300
1301 #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 1301 #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
1302 1302
1303 #define DRV_MSG_CODE_RMMOD 0xdb000000
1304 #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
1305
1303 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 1306 #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
1304 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 1307 #define REQ_BC_VER_4_SET_MF_BW 0x00060202
1305 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 1308 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1372,6 +1375,8 @@ struct drv_func_mb {
1372 1375
1373 #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 1376 #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
1374 1377
1378 #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
1379
1375 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 1380 #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
1376 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 1381 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
1377 1382
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e06186c305d8..955d6cfd9cb7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2476,7 +2476,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2476 2476
2477 input.port_rate = bp->link_vars.line_speed; 2477 input.port_rate = bp->link_vars.line_speed;
2478 2478
2479 if (cmng_type == CMNG_FNS_MINMAX) { 2479 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2480 int vn; 2480 int vn;
2481 2481
2482 /* read mf conf from shmem */ 2482 /* read mf conf from shmem */
@@ -2533,6 +2533,21 @@ static void storm_memset_cmng(struct bnx2x *bp,
2533 } 2533 }
2534} 2534}
2535 2535
2536/* init cmng mode in HW according to local configuration */
2537void bnx2x_set_local_cmng(struct bnx2x *bp)
2538{
2539 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2540
2541 if (cmng_fns != CMNG_FNS_NONE) {
2542 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2543 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2544 } else {
2545 /* rate shaping and fairness are disabled */
2546 DP(NETIF_MSG_IFUP,
2547 "single function mode without fairness\n");
2548 }
2549}
2550
2536/* This function is called upon link interrupt */ 2551/* This function is called upon link interrupt */
2537static void bnx2x_link_attn(struct bnx2x *bp) 2552static void bnx2x_link_attn(struct bnx2x *bp)
2538{ 2553{
@@ -2568,17 +2583,8 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2568 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2583 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2569 } 2584 }
2570 2585
2571 if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2586 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2572 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2587 bnx2x_set_local_cmng(bp);
2573
2574 if (cmng_fns != CMNG_FNS_NONE) {
2575 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2576 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2577 } else
2578 /* rate shaping and fairness are disabled */
2579 DP(NETIF_MSG_IFUP,
2580 "single function mode without fairness\n");
2581 }
2582 2588
2583 __bnx2x_link_report(bp); 2589 __bnx2x_link_report(bp);
2584 2590
@@ -10362,6 +10368,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10362 10368
10363 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? 10369 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10364 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; 10370 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
10371
10372 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
10373 BC_SUPPORTS_RMMOD_CMD : 0;
10374
10365 boot_mode = SHMEM_RD(bp, 10375 boot_mode = SHMEM_RD(bp,
10366 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 10376 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10367 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 10377 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -11524,6 +11534,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11524 mutex_init(&bp->port.phy_mutex); 11534 mutex_init(&bp->port.phy_mutex);
11525 mutex_init(&bp->fw_mb_mutex); 11535 mutex_init(&bp->fw_mb_mutex);
11526 spin_lock_init(&bp->stats_lock); 11536 spin_lock_init(&bp->stats_lock);
11537 sema_init(&bp->stats_sema, 1);
11527 11538
11528 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11539 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11529 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11540 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12817,13 +12828,17 @@ static void __bnx2x_remove(struct pci_dev *pdev,
12817 bnx2x_dcbnl_update_applist(bp, true); 12828 bnx2x_dcbnl_update_applist(bp, true);
12818#endif 12829#endif
12819 12830
12831 if (IS_PF(bp) &&
12832 !BP_NOMCP(bp) &&
12833 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
12834 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
12835
12820 /* Close the interface - either directly or implicitly */ 12836 /* Close the interface - either directly or implicitly */
12821 if (remove_netdev) { 12837 if (remove_netdev) {
12822 unregister_netdev(dev); 12838 unregister_netdev(dev);
12823 } else { 12839 } else {
12824 rtnl_lock(); 12840 rtnl_lock();
12825 if (netif_running(dev)) 12841 dev_close(dev);
12826 bnx2x_close(dev);
12827 rtnl_unlock(); 12842 rtnl_unlock();
12828 } 12843 }
12829 12844
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 95861efb5051..44104fb27947 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -3463,7 +3463,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3463alloc_mem_err: 3463alloc_mem_err:
3464 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3464 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3465 sizeof(struct bnx2x_vf_mbx_msg)); 3465 sizeof(struct bnx2x_vf_mbx_msg));
3466 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3466 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3467 sizeof(union pf_vf_bulletin)); 3467 sizeof(union pf_vf_bulletin));
3468 return -ENOMEM; 3468 return -ENOMEM;
3469} 3469}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 98366abd02bd..d63d1327b051 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
221 * Statistics service functions 221 * Statistics service functions
222 */ 222 */
223 223
224static void bnx2x_stats_pmf_update(struct bnx2x *bp) 224/* should be called under stats_sema */
225static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
225{ 226{
226 struct dmae_command *dmae; 227 struct dmae_command *dmae;
227 u32 opcode; 228 u32 opcode;
@@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
518 *stats_comp = 0; 519 *stats_comp = 0;
519} 520}
520 521
521static void bnx2x_stats_start(struct bnx2x *bp) 522/* should be called under stats_sema */
523static void __bnx2x_stats_start(struct bnx2x *bp)
522{ 524{
523 /* vfs travel through here as part of the statistics FSM, but no action 525 /* vfs travel through here as part of the statistics FSM, but no action
524 * is required 526 * is required
@@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp)
534 536
535 bnx2x_hw_stats_post(bp); 537 bnx2x_hw_stats_post(bp);
536 bnx2x_storm_stats_post(bp); 538 bnx2x_storm_stats_post(bp);
539
540 bp->stats_started = true;
541}
542
543static void bnx2x_stats_start(struct bnx2x *bp)
544{
545 if (down_timeout(&bp->stats_sema, HZ/10))
546 BNX2X_ERR("Unable to acquire stats lock\n");
547 __bnx2x_stats_start(bp);
548 up(&bp->stats_sema);
537} 549}
538 550
539static void bnx2x_stats_pmf_start(struct bnx2x *bp) 551static void bnx2x_stats_pmf_start(struct bnx2x *bp)
540{ 552{
553 if (down_timeout(&bp->stats_sema, HZ/10))
554 BNX2X_ERR("Unable to acquire stats lock\n");
541 bnx2x_stats_comp(bp); 555 bnx2x_stats_comp(bp);
542 bnx2x_stats_pmf_update(bp); 556 __bnx2x_stats_pmf_update(bp);
543 bnx2x_stats_start(bp); 557 __bnx2x_stats_start(bp);
558 up(&bp->stats_sema);
559}
560
561static void bnx2x_stats_pmf_update(struct bnx2x *bp)
562{
563 if (down_timeout(&bp->stats_sema, HZ/10))
564 BNX2X_ERR("Unable to acquire stats lock\n");
565 __bnx2x_stats_pmf_update(bp);
566 up(&bp->stats_sema);
544} 567}
545 568
546static void bnx2x_stats_restart(struct bnx2x *bp) 569static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
550 */ 573 */
551 if (IS_VF(bp)) 574 if (IS_VF(bp))
552 return; 575 return;
576 if (down_timeout(&bp->stats_sema, HZ/10))
577 BNX2X_ERR("Unable to acquire stats lock\n");
553 bnx2x_stats_comp(bp); 578 bnx2x_stats_comp(bp);
554 bnx2x_stats_start(bp); 579 __bnx2x_stats_start(bp);
580 up(&bp->stats_sema);
555} 581}
556 582
557static void bnx2x_bmac_stats_update(struct bnx2x *bp) 583static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
888 /* Make sure we use the value of the counter 914 /* Make sure we use the value of the counter
889 * used for sending the last stats ramrod. 915 * used for sending the last stats ramrod.
890 */ 916 */
891 spin_lock_bh(&bp->stats_lock);
892 cur_stats_counter = bp->stats_counter - 1; 917 cur_stats_counter = bp->stats_counter - 1;
893 spin_unlock_bh(&bp->stats_lock);
894 918
895 /* are storm stats valid? */ 919 /* are storm stats valid? */
896 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { 920 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
@@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1227{ 1251{
1228 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1252 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1229 1253
1230 if (bnx2x_edebug_stats_stopped(bp)) 1254 /* we run update from timer context, so give up
1255 * if somebody is in the middle of transition
1256 */
1257 if (down_trylock(&bp->stats_sema))
1231 return; 1258 return;
1232 1259
1260 if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
1261 goto out;
1262
1233 if (IS_PF(bp)) { 1263 if (IS_PF(bp)) {
1234 if (*stats_comp != DMAE_COMP_VAL) 1264 if (*stats_comp != DMAE_COMP_VAL)
1235 return; 1265 goto out;
1236 1266
1237 if (bp->port.pmf) 1267 if (bp->port.pmf)
1238 bnx2x_hw_stats_update(bp); 1268 bnx2x_hw_stats_update(bp);
@@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1242 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1272 BNX2X_ERR("storm stats were not updated for 3 times\n");
1243 bnx2x_panic(); 1273 bnx2x_panic();
1244 } 1274 }
1245 return; 1275 goto out;
1246 } 1276 }
1247 } else { 1277 } else {
1248 /* vf doesn't collect HW statistics, and doesn't get completions 1278 /* vf doesn't collect HW statistics, and doesn't get completions
@@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1256 1286
1257 /* vf is done */ 1287 /* vf is done */
1258 if (IS_VF(bp)) 1288 if (IS_VF(bp))
1259 return; 1289 goto out;
1260 1290
1261 if (netif_msg_timer(bp)) { 1291 if (netif_msg_timer(bp)) {
1262 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1292 struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1267 1297
1268 bnx2x_hw_stats_post(bp); 1298 bnx2x_hw_stats_post(bp);
1269 bnx2x_storm_stats_post(bp); 1299 bnx2x_storm_stats_post(bp);
1300
1301out:
1302 up(&bp->stats_sema);
1270} 1303}
1271 1304
1272static void bnx2x_port_stats_stop(struct bnx2x *bp) 1305static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
1332{ 1365{
1333 int update = 0; 1366 int update = 0;
1334 1367
1368 if (down_timeout(&bp->stats_sema, HZ/10))
1369 BNX2X_ERR("Unable to acquire stats lock\n");
1370
1371 bp->stats_started = false;
1372
1335 bnx2x_stats_comp(bp); 1373 bnx2x_stats_comp(bp);
1336 1374
1337 if (bp->port.pmf) 1375 if (bp->port.pmf)
@@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
1348 bnx2x_hw_stats_post(bp); 1386 bnx2x_hw_stats_post(bp);
1349 bnx2x_stats_comp(bp); 1387 bnx2x_stats_comp(bp);
1350 } 1388 }
1389
1390 up(&bp->stats_sema);
1351} 1391}
1352 1392
1353static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1393static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1376,15 +1416,17 @@ static const struct {
1376void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1416void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1377{ 1417{
1378 enum bnx2x_stats_state state; 1418 enum bnx2x_stats_state state;
1419 void (*action)(struct bnx2x *bp);
1379 if (unlikely(bp->panic)) 1420 if (unlikely(bp->panic))
1380 return; 1421 return;
1381 1422
1382 spin_lock_bh(&bp->stats_lock); 1423 spin_lock_bh(&bp->stats_lock);
1383 state = bp->stats_state; 1424 state = bp->stats_state;
1384 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1425 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1426 action = bnx2x_stats_stm[state][event].action;
1385 spin_unlock_bh(&bp->stats_lock); 1427 spin_unlock_bh(&bp->stats_lock);
1386 1428
1387 bnx2x_stats_stm[state][event].action(bp); 1429 action(bp);
1388 1430
1389 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1431 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1390 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1432 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ddebc7a5dda0..0da2214ef1b9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17796,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17796 17796
17797done: 17797done:
17798 if (state == pci_channel_io_perm_failure) { 17798 if (state == pci_channel_io_perm_failure) {
17799 tg3_napi_enable(tp); 17799 if (netdev) {
17800 dev_close(netdev); 17800 tg3_napi_enable(tp);
17801 dev_close(netdev);
17802 }
17801 err = PCI_ERS_RESULT_DISCONNECT; 17803 err = PCI_ERS_RESULT_DISCONNECT;
17802 } else { 17804 } else {
17803 pci_disable_device(pdev); 17805 pci_disable_device(pdev);
@@ -17827,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17827 rtnl_lock(); 17829 rtnl_lock();
17828 17830
17829 if (pci_enable_device(pdev)) { 17831 if (pci_enable_device(pdev)) {
17830 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); 17832 dev_err(&pdev->dev,
17833 "Cannot re-enable PCI device after reset.\n");
17831 goto done; 17834 goto done;
17832 } 17835 }
17833 17836
@@ -17835,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17835 pci_restore_state(pdev); 17838 pci_restore_state(pdev);
17836 pci_save_state(pdev); 17839 pci_save_state(pdev);
17837 17840
17838 if (!netif_running(netdev)) { 17841 if (!netdev || !netif_running(netdev)) {
17839 rc = PCI_ERS_RESULT_RECOVERED; 17842 rc = PCI_ERS_RESULT_RECOVERED;
17840 goto done; 17843 goto done;
17841 } 17844 }
@@ -17847,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17847 rc = PCI_ERS_RESULT_RECOVERED; 17850 rc = PCI_ERS_RESULT_RECOVERED;
17848 17851
17849done: 17852done:
17850 if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { 17853 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17851 tg3_napi_enable(tp); 17854 tg3_napi_enable(tp);
17852 dev_close(netdev); 17855 dev_close(netdev);
17853 } 17856 }
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 687ec4a8bb48..9c89dc8fe105 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
455 q->pg_chunk.offset = 0; 455 q->pg_chunk.offset = 0;
456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 456 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457 0, q->alloc_size, PCI_DMA_FROMDEVICE); 457 0, q->alloc_size, PCI_DMA_FROMDEVICE);
458 if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459 __free_pages(q->pg_chunk.page, order);
460 q->pg_chunk.page = NULL;
461 return -EIO;
462 }
463 q->pg_chunk.mapping = mapping; 458 q->pg_chunk.mapping = mapping;
464 } 459 }
465 sd->pg_chunk = q->pg_chunk; 460 sd->pg_chunk = q->pg_chunk;
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
954 return flits_to_desc(flits); 949 return flits_to_desc(flits);
955} 950}
956 951
957
958/* map_skb - map a packet main body and its page fragments
959 * @pdev: the PCI device
960 * @skb: the packet
961 * @addr: placeholder to save the mapped addresses
962 *
963 * map the main body of an sk_buff and its page fragments, if any.
964 */
965static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
966 dma_addr_t *addr)
967{
968 const skb_frag_t *fp, *end;
969 const struct skb_shared_info *si;
970
971 *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
972 PCI_DMA_TODEVICE);
973 if (pci_dma_mapping_error(pdev, *addr))
974 goto out_err;
975
976 si = skb_shinfo(skb);
977 end = &si->frags[si->nr_frags];
978
979 for (fp = si->frags; fp < end; fp++) {
980 *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
981 DMA_TO_DEVICE);
982 if (pci_dma_mapping_error(pdev, *addr))
983 goto unwind;
984 }
985 return 0;
986
987unwind:
988 while (fp-- > si->frags)
989 dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
990 DMA_TO_DEVICE);
991
992 pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
993out_err:
994 return -ENOMEM;
995}
996
997/** 952/**
998 * write_sgl - populate a scatter/gather list for a packet 953 * make_sgl - populate a scatter/gather list for a packet
999 * @skb: the packet 954 * @skb: the packet
1000 * @sgp: the SGL to populate 955 * @sgp: the SGL to populate
1001 * @start: start address of skb main body data to include in the SGL 956 * @start: start address of skb main body data to include in the SGL
1002 * @len: length of skb main body data to include in the SGL 957 * @len: length of skb main body data to include in the SGL
1003 * @addr: the list of the mapped addresses 958 * @pdev: the PCI device
1004 * 959 *
1005 * Copies the scatter/gather list for the buffers that make up a packet 960 * Generates a scatter/gather list for the buffers that make up a packet
1006 * and returns the SGL size in 8-byte words. The caller must size the SGL 961 * and returns the SGL size in 8-byte words. The caller must size the SGL
1007 * appropriately. 962 * appropriately.
1008 */ 963 */
1009static inline unsigned int write_sgl(const struct sk_buff *skb, 964static inline unsigned int make_sgl(const struct sk_buff *skb,
1010 struct sg_ent *sgp, unsigned char *start, 965 struct sg_ent *sgp, unsigned char *start,
1011 unsigned int len, const dma_addr_t *addr) 966 unsigned int len, struct pci_dev *pdev)
1012{ 967{
1013 unsigned int i, j = 0, k = 0, nfrags; 968 dma_addr_t mapping;
969 unsigned int i, j = 0, nfrags;
1014 970
1015 if (len) { 971 if (len) {
972 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
1016 sgp->len[0] = cpu_to_be32(len); 973 sgp->len[0] = cpu_to_be32(len);
1017 sgp->addr[j++] = cpu_to_be64(addr[k++]); 974 sgp->addr[0] = cpu_to_be64(mapping);
975 j = 1;
1018 } 976 }
1019 977
1020 nfrags = skb_shinfo(skb)->nr_frags; 978 nfrags = skb_shinfo(skb)->nr_frags;
1021 for (i = 0; i < nfrags; i++) { 979 for (i = 0; i < nfrags; i++) {
1022 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 980 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1023 981
982 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
983 DMA_TO_DEVICE);
1024 sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); 984 sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1025 sgp->addr[j] = cpu_to_be64(addr[k++]); 985 sgp->addr[j] = cpu_to_be64(mapping);
1026 j ^= 1; 986 j ^= 1;
1027 if (j == 0) 987 if (j == 0)
1028 ++sgp; 988 ++sgp;
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1178 const struct port_info *pi, 1138 const struct port_info *pi,
1179 unsigned int pidx, unsigned int gen, 1139 unsigned int pidx, unsigned int gen,
1180 struct sge_txq *q, unsigned int ndesc, 1140 struct sge_txq *q, unsigned int ndesc,
1181 unsigned int compl, const dma_addr_t *addr) 1141 unsigned int compl)
1182{ 1142{
1183 unsigned int flits, sgl_flits, cntrl, tso_info; 1143 unsigned int flits, sgl_flits, cntrl, tso_info;
1184 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; 1144 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1236 } 1196 }
1237 1197
1238 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1198 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1239 sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); 1199 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1240 1200
1241 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, 1201 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1242 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), 1202 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1267 struct netdev_queue *txq; 1227 struct netdev_queue *txq;
1268 struct sge_qset *qs; 1228 struct sge_qset *qs;
1269 struct sge_txq *q; 1229 struct sge_txq *q;
1270 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1271 1230
1272 /* 1231 /*
1273 * The chip min packet length is 9 octets but play safe and reject 1232 * The chip min packet length is 9 octets but play safe and reject
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1296 return NETDEV_TX_BUSY; 1255 return NETDEV_TX_BUSY;
1297 } 1256 }
1298 1257
1299 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1300 dev_kfree_skb(skb);
1301 return NETDEV_TX_OK;
1302 }
1303
1304 q->in_use += ndesc; 1258 q->in_use += ndesc;
1305 if (unlikely(credits - ndesc < q->stop_thres)) { 1259 if (unlikely(credits - ndesc < q->stop_thres)) {
1306 t3_stop_tx_queue(txq, qs, q); 1260 t3_stop_tx_queue(txq, qs, q);
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1358 if (likely(!skb_shared(skb))) 1312 if (likely(!skb_shared(skb)))
1359 skb_orphan(skb); 1313 skb_orphan(skb);
1360 1314
1361 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); 1315 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1362 check_ring_tx_db(adap, q); 1316 check_ring_tx_db(adap, q);
1363 return NETDEV_TX_OK; 1317 return NETDEV_TX_OK;
1364} 1318}
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1623 */ 1577 */
1624static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, 1578static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1625 struct sge_txq *q, unsigned int pidx, 1579 struct sge_txq *q, unsigned int pidx,
1626 unsigned int gen, unsigned int ndesc, 1580 unsigned int gen, unsigned int ndesc)
1627 const dma_addr_t *addr)
1628{ 1581{
1629 unsigned int sgl_flits, flits; 1582 unsigned int sgl_flits, flits;
1630 struct work_request_hdr *from; 1583 struct work_request_hdr *from;
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1645 1598
1646 flits = skb_transport_offset(skb) / 8; 1599 flits = skb_transport_offset(skb) / 8;
1647 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1600 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1648 sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), 1601 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1649 skb_tail_pointer(skb) - 1602 skb->tail - skb->transport_header,
1650 skb_transport_header(skb), addr); 1603 adap->pdev);
1651 if (need_skb_unmap()) { 1604 if (need_skb_unmap()) {
1652 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1605 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1653 skb->destructor = deferred_unmap_destructor; 1606 skb->destructor = deferred_unmap_destructor;
@@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1705 goto again; 1658 goto again;
1706 } 1659 }
1707 1660
1708 if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1709 spin_unlock(&q->lock);
1710 return NET_XMIT_SUCCESS;
1711 }
1712
1713 gen = q->gen; 1661 gen = q->gen;
1714 q->in_use += ndesc; 1662 q->in_use += ndesc;
1715 pidx = q->pidx; 1663 pidx = q->pidx;
@@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1720 } 1668 }
1721 spin_unlock(&q->lock); 1669 spin_unlock(&q->lock);
1722 1670
1723 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); 1671 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1724 check_ring_tx_db(adap, q); 1672 check_ring_tx_db(adap, q);
1725 return NET_XMIT_SUCCESS; 1673 return NET_XMIT_SUCCESS;
1726} 1674}
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data)
1738 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 1686 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1739 const struct port_info *pi = netdev_priv(qs->netdev); 1687 const struct port_info *pi = netdev_priv(qs->netdev);
1740 struct adapter *adap = pi->adapter; 1688 struct adapter *adap = pi->adapter;
1741 unsigned int written = 0;
1742 1689
1743 spin_lock(&q->lock); 1690 spin_lock(&q->lock);
1744again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); 1691again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1758 break; 1705 break;
1759 } 1706 }
1760 1707
1761 if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1762 break;
1763
1764 gen = q->gen; 1708 gen = q->gen;
1765 q->in_use += ndesc; 1709 q->in_use += ndesc;
1766 pidx = q->pidx; 1710 pidx = q->pidx;
1767 q->pidx += ndesc; 1711 q->pidx += ndesc;
1768 written += ndesc;
1769 if (q->pidx >= q->size) { 1712 if (q->pidx >= q->size) {
1770 q->pidx -= q->size; 1713 q->pidx -= q->size;
1771 q->gen ^= 1; 1714 q->gen ^= 1;
@@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1773 __skb_unlink(skb, &q->sendq); 1716 __skb_unlink(skb, &q->sendq);
1774 spin_unlock(&q->lock); 1717 spin_unlock(&q->lock);
1775 1718
1776 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, 1719 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1777 (dma_addr_t *)skb->head);
1778 spin_lock(&q->lock); 1720 spin_lock(&q->lock);
1779 } 1721 }
1780 spin_unlock(&q->lock); 1722 spin_unlock(&q->lock);
@@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1784 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1726 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1785#endif 1727#endif
1786 wmb(); 1728 wmb();
1787 if (likely(written)) 1729 t3_write_reg(adap, A_SG_KDOORBELL,
1788 t3_write_reg(adap, A_SG_KDOORBELL, 1730 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1789 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1790} 1731}
1791 1732
1792/** 1733/**
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 6e6e0a117ee2..8ec5d74ad44d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
3048 3048
3049 adapter->max_event_queues = le16_to_cpu(desc->eq_count); 3049 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3050 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); 3050 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3051
3052 /* Clear flags that driver is not interested in */
3053 adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
3051 } 3054 }
3052err: 3055err:
3053 mutex_unlock(&adapter->mbox_lock); 3056 mutex_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 5228d88c5a02..1b3b9e886412 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -563,6 +563,12 @@ enum be_if_flags {
563 BE_IF_FLAGS_MULTICAST = 0x1000 563 BE_IF_FLAGS_MULTICAST = 0x1000
564}; 564};
565 565
566#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
567 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
568 BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
569 BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
570 BE_IF_FLAGS_UNTAGGED)
571
566/* An RX interface is an object with one or more MAC addresses and 572/* An RX interface is an object with one or more MAC addresses and
567 * filtering capabilities. */ 573 * filtering capabilities. */
568struct be_cmd_req_if_create { 574struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c896079728e1..ef94a591f9e5 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
931} 931}
932 932
933/* Allocate and setup a new buffer for receiving */ 933/* Allocate and setup a new buffer for receiving */
934static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 934static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
935 struct sk_buff *skb, unsigned int bufsize) 935 struct sk_buff *skb, unsigned int bufsize)
936{ 936{
937 struct skge_rx_desc *rd = e->desc; 937 struct skge_rx_desc *rd = e->desc;
938 u64 map; 938 dma_addr_t map;
939 939
940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
941 PCI_DMA_FROMDEVICE); 941 PCI_DMA_FROMDEVICE);
942 942
943 rd->dma_lo = map; 943 if (pci_dma_mapping_error(skge->hw->pdev, map))
944 rd->dma_hi = map >> 32; 944 return -1;
945
946 rd->dma_lo = lower_32_bits(map);
947 rd->dma_hi = upper_32_bits(map);
945 e->skb = skb; 948 e->skb = skb;
946 rd->csum1_start = ETH_HLEN; 949 rd->csum1_start = ETH_HLEN;
947 rd->csum2_start = ETH_HLEN; 950 rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
953 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 956 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
954 dma_unmap_addr_set(e, mapaddr, map); 957 dma_unmap_addr_set(e, mapaddr, map);
955 dma_unmap_len_set(e, maplen, bufsize); 958 dma_unmap_len_set(e, maplen, bufsize);
959 return 0;
956} 960}
957 961
958/* Resume receiving using existing skb, 962/* Resume receiving using existing skb,
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev)
1014 return -ENOMEM; 1018 return -ENOMEM;
1015 1019
1016 skb_reserve(skb, NET_IP_ALIGN); 1020 skb_reserve(skb, NET_IP_ALIGN);
1017 skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1021 if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
1022 dev_kfree_skb(skb);
1023 return -EIO;
1024 }
1018 } while ((e = e->next) != ring->start); 1025 } while ((e = e->next) != ring->start);
1019 1026
1020 ring->to_clean = ring->start; 1027 ring->to_clean = ring->start;
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev)
2544 2551
2545 BUG_ON(skge->dma & 7); 2552 BUG_ON(skge->dma & 7);
2546 2553
2547 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { 2554 if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
2548 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); 2555 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
2549 err = -EINVAL; 2556 err = -EINVAL;
2550 goto free_pci_mem; 2557 goto free_pci_mem;
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2729 struct skge_tx_desc *td; 2736 struct skge_tx_desc *td;
2730 int i; 2737 int i;
2731 u32 control, len; 2738 u32 control, len;
2732 u64 map; 2739 dma_addr_t map;
2733 2740
2734 if (skb_padto(skb, ETH_ZLEN)) 2741 if (skb_padto(skb, ETH_ZLEN))
2735 return NETDEV_TX_OK; 2742 return NETDEV_TX_OK;
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2743 e->skb = skb; 2750 e->skb = skb;
2744 len = skb_headlen(skb); 2751 len = skb_headlen(skb);
2745 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2752 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2753 if (pci_dma_mapping_error(hw->pdev, map))
2754 goto mapping_error;
2755
2746 dma_unmap_addr_set(e, mapaddr, map); 2756 dma_unmap_addr_set(e, mapaddr, map);
2747 dma_unmap_len_set(e, maplen, len); 2757 dma_unmap_len_set(e, maplen, len);
2748 2758
2749 td->dma_lo = map; 2759 td->dma_lo = lower_32_bits(map);
2750 td->dma_hi = map >> 32; 2760 td->dma_hi = upper_32_bits(map);
2751 2761
2752 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2762 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2753 const int offset = skb_checksum_start_offset(skb); 2763 const int offset = skb_checksum_start_offset(skb);
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2778 2788
2779 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2789 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
2780 skb_frag_size(frag), DMA_TO_DEVICE); 2790 skb_frag_size(frag), DMA_TO_DEVICE);
2791 if (dma_mapping_error(&hw->pdev->dev, map))
2792 goto mapping_unwind;
2781 2793
2782 e = e->next; 2794 e = e->next;
2783 e->skb = skb; 2795 e->skb = skb;
2784 tf = e->desc; 2796 tf = e->desc;
2785 BUG_ON(tf->control & BMU_OWN); 2797 BUG_ON(tf->control & BMU_OWN);
2786 2798
2787 tf->dma_lo = map; 2799 tf->dma_lo = lower_32_bits(map);
2788 tf->dma_hi = (u64) map >> 32; 2800 tf->dma_hi = upper_32_bits(map);
2789 dma_unmap_addr_set(e, mapaddr, map); 2801 dma_unmap_addr_set(e, mapaddr, map);
2790 dma_unmap_len_set(e, maplen, skb_frag_size(frag)); 2802 dma_unmap_len_set(e, maplen, skb_frag_size(frag));
2791 2803
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2815 } 2827 }
2816 2828
2817 return NETDEV_TX_OK; 2829 return NETDEV_TX_OK;
2830
2831mapping_unwind:
2832 e = skge->tx_ring.to_use;
2833 pci_unmap_single(hw->pdev,
2834 dma_unmap_addr(e, mapaddr),
2835 dma_unmap_len(e, maplen),
2836 PCI_DMA_TODEVICE);
2837 while (i-- > 0) {
2838 e = e->next;
2839 pci_unmap_page(hw->pdev,
2840 dma_unmap_addr(e, mapaddr),
2841 dma_unmap_len(e, maplen),
2842 PCI_DMA_TODEVICE);
2843 }
2844
2845mapping_error:
2846 if (net_ratelimit())
2847 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2848 dev_kfree_skb(skb);
2849 return NETDEV_TX_OK;
2818} 2850}
2819 2851
2820 2852
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3045 3077
3046 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3078 pci_dma_sync_single_for_cpu(skge->hw->pdev,
3047 dma_unmap_addr(e, mapaddr), 3079 dma_unmap_addr(e, mapaddr),
3048 len, PCI_DMA_FROMDEVICE); 3080 dma_unmap_len(e, maplen),
3081 PCI_DMA_FROMDEVICE);
3049 skb_copy_from_linear_data(e->skb, skb->data, len); 3082 skb_copy_from_linear_data(e->skb, skb->data, len);
3050 pci_dma_sync_single_for_device(skge->hw->pdev, 3083 pci_dma_sync_single_for_device(skge->hw->pdev,
3051 dma_unmap_addr(e, mapaddr), 3084 dma_unmap_addr(e, mapaddr),
3052 len, PCI_DMA_FROMDEVICE); 3085 dma_unmap_len(e, maplen),
3086 PCI_DMA_FROMDEVICE);
3053 skge_rx_reuse(e, skge->rx_buf_size); 3087 skge_rx_reuse(e, skge->rx_buf_size);
3054 } else { 3088 } else {
3055 struct sk_buff *nskb; 3089 struct sk_buff *nskb;
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3058 if (!nskb) 3092 if (!nskb)
3059 goto resubmit; 3093 goto resubmit;
3060 3094
3095 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
3096 dev_kfree_skb(nskb);
3097 goto resubmit;
3098 }
3099
3061 pci_unmap_single(skge->hw->pdev, 3100 pci_unmap_single(skge->hw->pdev,
3062 dma_unmap_addr(e, mapaddr), 3101 dma_unmap_addr(e, mapaddr),
3063 dma_unmap_len(e, maplen), 3102 dma_unmap_len(e, maplen),
3064 PCI_DMA_FROMDEVICE); 3103 PCI_DMA_FROMDEVICE);
3065 skb = e->skb; 3104 skb = e->skb;
3066 prefetch(skb->data); 3105 prefetch(skb->data);
3067 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
3068 } 3106 }
3069 3107
3070 skb_put(skb, len); 3108 skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c571de85d0f9..5472cbd34028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -46,7 +46,7 @@
46#include "mlx5_core.h" 46#include "mlx5_core.h"
47 47
48enum { 48enum {
49 CMD_IF_REV = 4, 49 CMD_IF_REV = 5,
50}; 50};
51 51
52enum { 52enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c02cbcfd0fb8..443cc4d7b024 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
268 case MLX5_EVENT_TYPE_PAGE_REQUEST: 268 case MLX5_EVENT_TYPE_PAGE_REQUEST:
269 { 269 {
270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 270 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
271 s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); 271 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
272 272
273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); 273 mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
274 mlx5_core_req_pages_handler(dev, func_id, npages); 274 mlx5_core_req_pages_handler(dev, func_id, npages);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 72a5222447f5..f012658b6a92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; 113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; 114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
115 caps->log_max_mcg = out->hca_cap.log_max_mcg; 115 caps->log_max_mcg = out->hca_cap.log_max_mcg;
116 caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); 116 caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); 117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); 118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; 119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 748f10a155c4..3e6670c4a7cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -55,33 +55,9 @@ enum {
55}; 55};
56 56
57static DEFINE_SPINLOCK(health_lock); 57static DEFINE_SPINLOCK(health_lock);
58
59static LIST_HEAD(health_list); 58static LIST_HEAD(health_list);
60static struct work_struct health_work; 59static struct work_struct health_work;
61 60
62static health_handler_t reg_handler;
63int mlx5_register_health_report_handler(health_handler_t handler)
64{
65 spin_lock_irq(&health_lock);
66 if (reg_handler) {
67 spin_unlock_irq(&health_lock);
68 return -EEXIST;
69 }
70 reg_handler = handler;
71 spin_unlock_irq(&health_lock);
72
73 return 0;
74}
75EXPORT_SYMBOL(mlx5_register_health_report_handler);
76
77void mlx5_unregister_health_report_handler(void)
78{
79 spin_lock_irq(&health_lock);
80 reg_handler = NULL;
81 spin_unlock_irq(&health_lock);
82}
83EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
84
85static void health_care(struct work_struct *work) 61static void health_care(struct work_struct *work)
86{ 62{
87 struct mlx5_core_health *health, *n; 63 struct mlx5_core_health *health, *n;
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work)
98 priv = container_of(health, struct mlx5_priv, health); 74 priv = container_of(health, struct mlx5_priv, health);
99 dev = container_of(priv, struct mlx5_core_dev, priv); 75 dev = container_of(priv, struct mlx5_core_dev, priv);
100 mlx5_core_warn(dev, "handling bad device here\n"); 76 mlx5_core_warn(dev, "handling bad device here\n");
77 /* nothing yet */
101 spin_lock_irq(&health_lock); 78 spin_lock_irq(&health_lock);
102 if (reg_handler)
103 reg_handler(dev->pdev, health->health,
104 sizeof(health->health));
105
106 list_del_init(&health->list); 79 list_del_init(&health->list);
107 spin_unlock_irq(&health_lock); 80 spin_unlock_irq(&health_lock);
108 } 81 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 4a3e137931a3..3a2408d44820 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -43,10 +43,16 @@ enum {
43 MLX5_PAGES_TAKE = 2 43 MLX5_PAGES_TAKE = 2
44}; 44};
45 45
46enum {
47 MLX5_BOOT_PAGES = 1,
48 MLX5_INIT_PAGES = 2,
49 MLX5_POST_INIT_PAGES = 3
50};
51
46struct mlx5_pages_req { 52struct mlx5_pages_req {
47 struct mlx5_core_dev *dev; 53 struct mlx5_core_dev *dev;
48 u32 func_id; 54 u32 func_id;
49 s16 npages; 55 s32 npages;
50 struct work_struct work; 56 struct work_struct work;
51}; 57};
52 58
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
64 70
65struct mlx5_query_pages_outbox { 71struct mlx5_query_pages_outbox {
66 struct mlx5_outbox_hdr hdr; 72 struct mlx5_outbox_hdr hdr;
67 __be16 num_boot_pages; 73 __be16 rsvd;
68 __be16 func_id; 74 __be16 func_id;
69 __be16 init_pages; 75 __be32 num_pages;
70 __be16 num_pages;
71}; 76};
72 77
73struct mlx5_manage_pages_inbox { 78struct mlx5_manage_pages_inbox {
74 struct mlx5_inbox_hdr hdr; 79 struct mlx5_inbox_hdr hdr;
75 __be16 rsvd0; 80 __be16 rsvd;
76 __be16 func_id; 81 __be16 func_id;
77 __be16 rsvd1; 82 __be32 num_entries;
78 __be16 num_entries;
79 u8 rsvd2[16];
80 __be64 pas[0]; 83 __be64 pas[0];
81}; 84};
82 85
83struct mlx5_manage_pages_outbox { 86struct mlx5_manage_pages_outbox {
84 struct mlx5_outbox_hdr hdr; 87 struct mlx5_outbox_hdr hdr;
85 u8 rsvd0[2]; 88 __be32 num_entries;
86 __be16 num_entries; 89 u8 rsvd[4];
87 u8 rsvd1[20];
88 __be64 pas[0]; 90 __be64 pas[0];
89}; 91};
90 92
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
146} 148}
147 149
148static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, 150static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
149 s16 *pages, s16 *init_pages, u16 *boot_pages) 151 s32 *npages, int boot)
150{ 152{
151 struct mlx5_query_pages_inbox in; 153 struct mlx5_query_pages_inbox in;
152 struct mlx5_query_pages_outbox out; 154 struct mlx5_query_pages_outbox out;
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
155 memset(&in, 0, sizeof(in)); 157 memset(&in, 0, sizeof(in));
156 memset(&out, 0, sizeof(out)); 158 memset(&out, 0, sizeof(out));
157 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); 159 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
160 in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
161
158 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 162 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
159 if (err) 163 if (err)
160 return err; 164 return err;
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
162 if (out.hdr.status) 166 if (out.hdr.status)
163 return mlx5_cmd_status_to_err(&out.hdr); 167 return mlx5_cmd_status_to_err(&out.hdr);
164 168
165 if (pages) 169 *npages = be32_to_cpu(out.num_pages);
166 *pages = be16_to_cpu(out.num_pages);
167
168 if (init_pages)
169 *init_pages = be16_to_cpu(out.init_pages);
170
171 if (boot_pages)
172 *boot_pages = be16_to_cpu(out.num_boot_pages);
173
174 *func_id = be16_to_cpu(out.func_id); 170 *func_id = be16_to_cpu(out.func_id);
175 171
176 return err; 172 return err;
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
224 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 220 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
225 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); 221 in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
226 in->func_id = cpu_to_be16(func_id); 222 in->func_id = cpu_to_be16(func_id);
227 in->num_entries = cpu_to_be16(npages); 223 in->num_entries = cpu_to_be32(npages);
228 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 224 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
229 mlx5_core_dbg(dev, "err %d\n", err); 225 mlx5_core_dbg(dev, "err %d\n", err);
230 if (err) { 226 if (err) {
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
292 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 288 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
293 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); 289 in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
294 in.func_id = cpu_to_be16(func_id); 290 in.func_id = cpu_to_be16(func_id);
295 in.num_entries = cpu_to_be16(npages); 291 in.num_entries = cpu_to_be32(npages);
296 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 292 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
297 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 293 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
298 if (err) { 294 if (err) {
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
306 goto out_free; 302 goto out_free;
307 } 303 }
308 304
309 num_claimed = be16_to_cpu(out->num_entries); 305 num_claimed = be32_to_cpu(out->num_entries);
310 if (nclaimed) 306 if (nclaimed)
311 *nclaimed = num_claimed; 307 *nclaimed = num_claimed;
312 308
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
345} 341}
346 342
347void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 343void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
348 s16 npages) 344 s32 npages)
349{ 345{
350 struct mlx5_pages_req *req; 346 struct mlx5_pages_req *req;
351 347
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
364 360
365int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) 361int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
366{ 362{
367 u16 uninitialized_var(boot_pages);
368 s16 uninitialized_var(init_pages);
369 u16 uninitialized_var(func_id); 363 u16 uninitialized_var(func_id);
364 s32 uninitialized_var(npages);
370 int err; 365 int err;
371 366
372 err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, 367 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
373 &boot_pages);
374 if (err) 368 if (err)
375 return err; 369 return err;
376 370
371 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
372 npages, boot ? "boot" : "init", func_id);
377 373
378 mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", 374 return give_pages(dev, func_id, npages, 0);
379 init_pages, boot_pages, func_id);
380 return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
381} 375}
382 376
383static int optimal_reclaimed_pages(void) 377static int optimal_reclaimed_pages(void)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 92da9980a0a0..9d4bb7f83904 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -3266,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
3266 u8 val; 3266 u8 val;
3267 int ret, max_sds_rings = adapter->max_sds_rings; 3267 int ret, max_sds_rings = adapter->max_sds_rings;
3268 3268
3269 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
3270 netdev_info(netdev, "Device is resetting\n");
3271 return -EBUSY;
3272 }
3273
3269 if (qlcnic_get_diag_lock(adapter)) { 3274 if (qlcnic_get_diag_lock(adapter)) {
3270 netdev_info(netdev, "Device in diagnostics mode\n"); 3275 netdev_info(netdev, "Device in diagnostics mode\n");
3271 return -EBUSY; 3276 return -EBUSY;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 9f4b8d5f0865..345d987aede4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
629 return -EIO; 629 return -EIO;
630 } 630 }
631 631
632 qlcnic_set_drv_version(adapter); 632 if (adapter->portnum == 0)
633 qlcnic_set_drv_version(adapter);
633 qlcnic_83xx_idc_attach_driver(adapter); 634 qlcnic_83xx_idc_attach_driver(adapter);
634 635
635 return 0; 636 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ee013fcc3322..bc05d016c859 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2165,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2165 if (err) 2165 if (err)
2166 goto err_out_disable_mbx_intr; 2166 goto err_out_disable_mbx_intr;
2167 2167
2168 qlcnic_set_drv_version(adapter); 2168 if (adapter->portnum == 0)
2169 qlcnic_set_drv_version(adapter);
2169 2170
2170 pci_set_drvdata(pdev, adapter); 2171 pci_set_drvdata(pdev, adapter);
2171 2172
@@ -3085,7 +3086,8 @@ done:
3085 adapter->fw_fail_cnt = 0; 3086 adapter->fw_fail_cnt = 0;
3086 adapter->flags &= ~QLCNIC_FW_HANG; 3087 adapter->flags &= ~QLCNIC_FW_HANG;
3087 clear_bit(__QLCNIC_RESETTING, &adapter->state); 3088 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3088 qlcnic_set_drv_version(adapter); 3089 if (adapter->portnum == 0)
3090 qlcnic_set_drv_version(adapter);
3089 3091
3090 if (!qlcnic_clr_drv_state(adapter)) 3092 if (!qlcnic_clr_drv_state(adapter))
3091 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, 3093 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 10ed82b3baca..660c3f5b2237 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
170 170
171 if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { 171 if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
172 err = qlcnic_get_beacon_state(adapter, &h_beacon_state); 172 err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
173 if (!err) { 173 if (err) {
174 dev_info(&adapter->pdev->dev, 174 netdev_err(adapter->netdev,
175 "Failed to get current beacon state\n"); 175 "Failed to get current beacon state\n");
176 } else { 176 } else {
177 if (h_beacon_state == QLCNIC_BEACON_DISABLE) 177 if (h_beacon_state == QLCNIC_BEACON_DISABLE)
178 ahw->beacon_state = 0; 178 ahw->beacon_state = 0;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 6f35f8404d68..d2e591955bdd 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -524,6 +524,7 @@ rx_status_loop:
524 PCI_DMA_FROMDEVICE); 524 PCI_DMA_FROMDEVICE);
525 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { 525 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
526 dev->stats.rx_dropped++; 526 dev->stats.rx_dropped++;
527 kfree_skb(new_skb);
527 goto rx_next; 528 goto rx_next;
528 } 529 }
529 530
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index c9d942a5c335..1ef9d8a555aa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
33 struct stmmac_priv *priv = (struct stmmac_priv *)p; 33 struct stmmac_priv *priv = (struct stmmac_priv *)p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
35 unsigned int entry = priv->cur_tx % txsize; 35 unsigned int entry = priv->cur_tx % txsize;
36 struct dma_desc *desc = priv->dma_tx + entry; 36 struct dma_desc *desc;
37 unsigned int nopaged_len = skb_headlen(skb); 37 unsigned int nopaged_len = skb_headlen(skb);
38 unsigned int bmax, len; 38 unsigned int bmax, len;
39 39
40 if (priv->extend_desc)
41 desc = (struct dma_desc *)(priv->dma_etx + entry);
42 else
43 desc = priv->dma_tx + entry;
44
40 if (priv->plat->enh_desc) 45 if (priv->plat->enh_desc)
41 bmax = BUF_SIZE_8KiB; 46 bmax = BUF_SIZE_8KiB;
42 else 47 else
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
54 STMMAC_RING_MODE); 59 STMMAC_RING_MODE);
55 wmb(); 60 wmb();
56 entry = (++priv->cur_tx) % txsize; 61 entry = (++priv->cur_tx) % txsize;
57 desc = priv->dma_tx + entry; 62
63 if (priv->extend_desc)
64 desc = (struct dma_desc *)(priv->dma_etx + entry);
65 else
66 desc = priv->dma_tx + entry;
58 67
59 desc->des2 = dma_map_single(priv->device, skb->data + bmax, 68 desc->des2 = dma_map_single(priv->device, skb->data + bmax,
60 len, DMA_TO_DEVICE); 69 len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f2ccb36e8685..0a9bb9d30c3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
939 939
940 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, 940 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
941 GFP_KERNEL); 941 GFP_KERNEL);
942 if (unlikely(skb == NULL)) { 942 if (!skb) {
943 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 943 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
944 return 1; 944 return -ENOMEM;
945 } 945 }
946 skb_reserve(skb, NET_IP_ALIGN); 946 skb_reserve(skb, NET_IP_ALIGN);
947 priv->rx_skbuff[i] = skb; 947 priv->rx_skbuff[i] = skb;
948 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 948 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
949 priv->dma_buf_sz, 949 priv->dma_buf_sz,
950 DMA_FROM_DEVICE); 950 DMA_FROM_DEVICE);
951 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
952 pr_err("%s: DMA mapping error\n", __func__);
953 dev_kfree_skb_any(skb);
954 return -EINVAL;
955 }
951 956
952 p->des2 = priv->rx_skbuff_dma[i]; 957 p->des2 = priv->rx_skbuff_dma[i];
953 958
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
958 return 0; 963 return 0;
959} 964}
960 965
966static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
967{
968 if (priv->rx_skbuff[i]) {
969 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
970 priv->dma_buf_sz, DMA_FROM_DEVICE);
971 dev_kfree_skb_any(priv->rx_skbuff[i]);
972 }
973 priv->rx_skbuff[i] = NULL;
974}
975
961/** 976/**
962 * init_dma_desc_rings - init the RX/TX descriptor rings 977 * init_dma_desc_rings - init the RX/TX descriptor rings
963 * @dev: net device structure 978 * @dev: net device structure
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
965 * and allocates the socket buffers. It suppors the chained and ring 980 * and allocates the socket buffers. It suppors the chained and ring
966 * modes. 981 * modes.
967 */ 982 */
968static void init_dma_desc_rings(struct net_device *dev) 983static int init_dma_desc_rings(struct net_device *dev)
969{ 984{
970 int i; 985 int i;
971 struct stmmac_priv *priv = netdev_priv(dev); 986 struct stmmac_priv *priv = netdev_priv(dev);
972 unsigned int txsize = priv->dma_tx_size; 987 unsigned int txsize = priv->dma_tx_size;
973 unsigned int rxsize = priv->dma_rx_size; 988 unsigned int rxsize = priv->dma_rx_size;
974 unsigned int bfsize = 0; 989 unsigned int bfsize = 0;
990 int ret = -ENOMEM;
975 991
976 /* Set the max buffer size according to the DESC mode 992 /* Set the max buffer size according to the DESC mode
977 * and the MTU. Note that RING mode allows 16KiB bsize. 993 * and the MTU. Note that RING mode allows 16KiB bsize.
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev)
992 dma_extended_desc), 1008 dma_extended_desc),
993 &priv->dma_rx_phy, 1009 &priv->dma_rx_phy,
994 GFP_KERNEL); 1010 GFP_KERNEL);
1011 if (!priv->dma_erx)
1012 goto err_dma;
1013
995 priv->dma_etx = dma_alloc_coherent(priv->device, txsize * 1014 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
996 sizeof(struct 1015 sizeof(struct
997 dma_extended_desc), 1016 dma_extended_desc),
998 &priv->dma_tx_phy, 1017 &priv->dma_tx_phy,
999 GFP_KERNEL); 1018 GFP_KERNEL);
1000 if ((!priv->dma_erx) || (!priv->dma_etx)) 1019 if (!priv->dma_etx) {
1001 return; 1020 dma_free_coherent(priv->device, priv->dma_rx_size *
1021 sizeof(struct dma_extended_desc),
1022 priv->dma_erx, priv->dma_rx_phy);
1023 goto err_dma;
1024 }
1002 } else { 1025 } else {
1003 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * 1026 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
1004 sizeof(struct dma_desc), 1027 sizeof(struct dma_desc),
1005 &priv->dma_rx_phy, 1028 &priv->dma_rx_phy,
1006 GFP_KERNEL); 1029 GFP_KERNEL);
1030 if (!priv->dma_rx)
1031 goto err_dma;
1032
1007 priv->dma_tx = dma_alloc_coherent(priv->device, txsize * 1033 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
1008 sizeof(struct dma_desc), 1034 sizeof(struct dma_desc),
1009 &priv->dma_tx_phy, 1035 &priv->dma_tx_phy,
1010 GFP_KERNEL); 1036 GFP_KERNEL);
1011 if ((!priv->dma_rx) || (!priv->dma_tx)) 1037 if (!priv->dma_tx) {
1012 return; 1038 dma_free_coherent(priv->device, priv->dma_rx_size *
1039 sizeof(struct dma_desc),
1040 priv->dma_rx, priv->dma_rx_phy);
1041 goto err_dma;
1042 }
1013 } 1043 }
1014 1044
1015 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), 1045 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
1016 GFP_KERNEL); 1046 GFP_KERNEL);
1047 if (!priv->rx_skbuff_dma)
1048 goto err_rx_skbuff_dma;
1049
1017 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), 1050 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
1018 GFP_KERNEL); 1051 GFP_KERNEL);
1052 if (!priv->rx_skbuff)
1053 goto err_rx_skbuff;
1054
1019 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), 1055 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
1020 GFP_KERNEL); 1056 GFP_KERNEL);
1057 if (!priv->tx_skbuff_dma)
1058 goto err_tx_skbuff_dma;
1059
1021 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), 1060 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1022 GFP_KERNEL); 1061 GFP_KERNEL);
1062 if (!priv->tx_skbuff)
1063 goto err_tx_skbuff;
1064
1023 if (netif_msg_probe(priv)) { 1065 if (netif_msg_probe(priv)) {
1024 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, 1066 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1025 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); 1067 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev)
1034 else 1076 else
1035 p = priv->dma_rx + i; 1077 p = priv->dma_rx + i;
1036 1078
1037 if (stmmac_init_rx_buffers(priv, p, i)) 1079 ret = stmmac_init_rx_buffers(priv, p, i);
1038 break; 1080 if (ret)
1081 goto err_init_rx_buffers;
1039 1082
1040 if (netif_msg_probe(priv)) 1083 if (netif_msg_probe(priv))
1041 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 1084 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev)
1081 1124
1082 if (netif_msg_hw(priv)) 1125 if (netif_msg_hw(priv))
1083 stmmac_display_rings(priv); 1126 stmmac_display_rings(priv);
1127
1128 return 0;
1129err_init_rx_buffers:
1130 while (--i >= 0)
1131 stmmac_free_rx_buffers(priv, i);
1132 kfree(priv->tx_skbuff);
1133err_tx_skbuff:
1134 kfree(priv->tx_skbuff_dma);
1135err_tx_skbuff_dma:
1136 kfree(priv->rx_skbuff);
1137err_rx_skbuff:
1138 kfree(priv->rx_skbuff_dma);
1139err_rx_skbuff_dma:
1140 if (priv->extend_desc) {
1141 dma_free_coherent(priv->device, priv->dma_tx_size *
1142 sizeof(struct dma_extended_desc),
1143 priv->dma_etx, priv->dma_tx_phy);
1144 dma_free_coherent(priv->device, priv->dma_rx_size *
1145 sizeof(struct dma_extended_desc),
1146 priv->dma_erx, priv->dma_rx_phy);
1147 } else {
1148 dma_free_coherent(priv->device,
1149 priv->dma_tx_size * sizeof(struct dma_desc),
1150 priv->dma_tx, priv->dma_tx_phy);
1151 dma_free_coherent(priv->device,
1152 priv->dma_rx_size * sizeof(struct dma_desc),
1153 priv->dma_rx, priv->dma_rx_phy);
1154 }
1155err_dma:
1156 return ret;
1084} 1157}
1085 1158
1086static void dma_free_rx_skbufs(struct stmmac_priv *priv) 1159static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1087{ 1160{
1088 int i; 1161 int i;
1089 1162
1090 for (i = 0; i < priv->dma_rx_size; i++) { 1163 for (i = 0; i < priv->dma_rx_size; i++)
1091 if (priv->rx_skbuff[i]) { 1164 stmmac_free_rx_buffers(priv, i);
1092 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
1093 priv->dma_buf_sz, DMA_FROM_DEVICE);
1094 dev_kfree_skb_any(priv->rx_skbuff[i]);
1095 }
1096 priv->rx_skbuff[i] = NULL;
1097 }
1098} 1165}
1099 1166
1100static void dma_free_tx_skbufs(struct stmmac_priv *priv) 1167static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev)
1560 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 1627 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1561 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); 1628 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1562 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 1629 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1563 init_dma_desc_rings(dev); 1630
1631 ret = init_dma_desc_rings(dev);
1632 if (ret < 0) {
1633 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1634 goto dma_desc_error;
1635 }
1564 1636
1565 /* DMA initialization and SW reset */ 1637 /* DMA initialization and SW reset */
1566 ret = stmmac_init_dma_engine(priv); 1638 ret = stmmac_init_dma_engine(priv);
1567 if (ret < 0) { 1639 if (ret < 0) {
1568 pr_err("%s: DMA initialization failed\n", __func__); 1640 pr_err("%s: DMA engine initialization failed\n", __func__);
1569 goto init_error; 1641 goto init_error;
1570 } 1642 }
1571 1643
@@ -1672,6 +1744,7 @@ wolirq_error:
1672 1744
1673init_error: 1745init_error:
1674 free_dma_desc_resources(priv); 1746 free_dma_desc_resources(priv);
1747dma_desc_error:
1675 if (priv->phydev) 1748 if (priv->phydev)
1676 phy_disconnect(priv->phydev); 1749 phy_disconnect(priv->phydev);
1677phy_error: 1750phy_error:
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 1d6dc41f755d..d01cacf8a7c2 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2100 2100
2101 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 2101 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2102 } 2102 }
2103 netif_rx(skb); 2103 netif_receive_skb(skb);
2104 2104
2105 stats->rx_bytes += pkt_len; 2105 stats->rx_bytes += pkt_len;
2106 stats->rx_packets++; 2106 stats->rx_packets++;
@@ -2884,6 +2884,7 @@ out:
2884 return ret; 2884 return ret;
2885 2885
2886err_iounmap: 2886err_iounmap:
2887 netif_napi_del(&vptr->napi);
2887 iounmap(regs); 2888 iounmap(regs);
2888err_free_dev: 2889err_free_dev:
2889 free_netdev(netdev); 2890 free_netdev(netdev);
@@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev)
2904 struct velocity_info *vptr = netdev_priv(netdev); 2905 struct velocity_info *vptr = netdev_priv(netdev);
2905 2906
2906 unregister_netdev(netdev); 2907 unregister_netdev(netdev);
2908 netif_napi_del(&vptr->napi);
2907 iounmap(vptr->mac_regs); 2909 iounmap(vptr->mac_regs);
2908 free_netdev(netdev); 2910 free_netdev(netdev);
2909 velocity_nics--; 2911 velocity_nics--;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d0f9c2fd1d4f..16b43bf544b7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -739,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
739 return -EADDRNOTAVAIL; 739 return -EADDRNOTAVAIL;
740 } 740 }
741 741
742 if (data && data[IFLA_MACVLAN_FLAGS] &&
743 nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
744 return -EINVAL;
745
742 if (data && data[IFLA_MACVLAN_MODE]) { 746 if (data && data[IFLA_MACVLAN_MODE]) {
743 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { 747 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
744 case MACVLAN_MODE_PRIVATE: 748 case MACVLAN_MODE_PRIVATE:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a98fb0ed6aef..b51db2abfe44 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -818,10 +818,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
818 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 818 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
819 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 819 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
820 } 820 }
821 if (vlan) 821 if (vlan) {
822 local_bh_disable();
822 macvlan_start_xmit(skb, vlan->dev); 823 macvlan_start_xmit(skb, vlan->dev);
823 else 824 local_bh_enable();
825 } else {
824 kfree_skb(skb); 826 kfree_skb(skb);
827 }
825 rcu_read_unlock(); 828 rcu_read_unlock();
826 829
827 return total_len; 830 return total_len;
@@ -912,8 +915,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
912done: 915done:
913 rcu_read_lock(); 916 rcu_read_lock();
914 vlan = rcu_dereference(q->vlan); 917 vlan = rcu_dereference(q->vlan);
915 if (vlan) 918 if (vlan) {
919 preempt_disable();
916 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); 920 macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
921 preempt_enable();
922 }
917 rcu_read_unlock(); 923 rcu_read_unlock();
918 924
919 return ret ? ret : copied; 925 return ret ? ret : copied;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index db690a372260..71af122edf2d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1074 u32 rxhash; 1074 u32 rxhash;
1075 1075
1076 if (!(tun->flags & TUN_NO_PI)) { 1076 if (!(tun->flags & TUN_NO_PI)) {
1077 if ((len -= sizeof(pi)) > total_len) 1077 if (len < sizeof(pi))
1078 return -EINVAL; 1078 return -EINVAL;
1079 len -= sizeof(pi);
1079 1080
1080 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) 1081 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
1081 return -EFAULT; 1082 return -EFAULT;
@@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1083 } 1084 }
1084 1085
1085 if (tun->flags & TUN_VNET_HDR) { 1086 if (tun->flags & TUN_VNET_HDR) {
1086 if ((len -= tun->vnet_hdr_sz) > total_len) 1087 if (len < tun->vnet_hdr_sz)
1087 return -EINVAL; 1088 return -EINVAL;
1089 len -= tun->vnet_hdr_sz;
1088 1090
1089 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) 1091 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
1090 return -EFAULT; 1092 return -EFAULT;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f4c6db419ddb..767f7af3bd40 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1386,7 +1386,7 @@ static int vxlan_open(struct net_device *dev)
1386 return -ENOTCONN; 1386 return -ENOTCONN;
1387 1387
1388 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && 1388 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
1389 ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { 1389 vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
1390 vxlan_sock_hold(vs); 1390 vxlan_sock_hold(vs);
1391 dev_hold(dev); 1391 dev_hold(dev);
1392 queue_work(vxlan_wq, &vxlan->igmp_join); 1392 queue_work(vxlan_wq, &vxlan->igmp_join);
@@ -1793,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1793 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 1793 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
1794 struct vxlan_dev *vxlan = netdev_priv(dev); 1794 struct vxlan_dev *vxlan = netdev_priv(dev);
1795 1795
1796 flush_workqueue(vxlan_wq);
1797
1798 spin_lock(&vn->sock_lock); 1796 spin_lock(&vn->sock_lock);
1799 hlist_del_rcu(&vxlan->hlist); 1797 hlist_del_rcu(&vxlan->hlist);
1800 spin_unlock(&vn->sock_lock); 1798 spin_unlock(&vn->sock_lock);
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 7365674366f4..010b252be584 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv)
1406 if (!priv->join_status) 1406 if (!priv->join_status)
1407 goto done; 1407 goto done;
1408 1408
1409 if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { 1409 if (priv->join_status == CW1200_JOIN_STATUS_AP)
1410 wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", 1410 goto done;
1411 priv->join_status);
1412 BUG_ON(1);
1413 }
1414 1411
1415 cancel_work_sync(&priv->update_filtering_work); 1412 cancel_work_sync(&priv->update_filtering_work);
1416 cancel_work_sync(&priv->set_beacon_wakeup_period_work); 1413 cancel_work_sync(&priv->set_beacon_wakeup_period_work);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index b9b2bb51e605..f2ed62e37340 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -4460,12 +4460,12 @@ il4965_irq_tasklet(struct il_priv *il)
4460 * is killed. Hence update the killswitch state here. The 4460 * is killed. Hence update the killswitch state here. The
4461 * rfkill handler will care about restarting if needed. 4461 * rfkill handler will care about restarting if needed.
4462 */ 4462 */
4463 if (!test_bit(S_ALIVE, &il->status)) { 4463 if (hw_rf_kill) {
4464 if (hw_rf_kill) 4464 set_bit(S_RFKILL, &il->status);
4465 set_bit(S_RFKILL, &il->status); 4465 } else {
4466 else 4466 clear_bit(S_RFKILL, &il->status);
4467 clear_bit(S_RFKILL, &il->status);
4468 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); 4467 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4468 il_force_reset(il, true);
4469 } 4469 }
4470 4470
4471 handled |= CSR_INT_BIT_RF_KILL; 4471 handled |= CSR_INT_BIT_RF_KILL;
@@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il)
5334 5334
5335 il->active_rate = RATES_MASK; 5335 il->active_rate = RATES_MASK;
5336 5336
5337 il_power_update_mode(il, true);
5338 D_INFO("Updated power mode\n");
5339
5337 if (il_is_associated(il)) { 5340 if (il_is_associated(il)) {
5338 struct il_rxon_cmd *active_rxon = 5341 struct il_rxon_cmd *active_rxon =
5339 (struct il_rxon_cmd *)&il->active; 5342 (struct il_rxon_cmd *)&il->active;
@@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il)
5364 D_INFO("ALIVE processing complete.\n"); 5367 D_INFO("ALIVE processing complete.\n");
5365 wake_up(&il->wait_command_queue); 5368 wake_up(&il->wait_command_queue);
5366 5369
5367 il_power_update_mode(il, true);
5368 D_INFO("Updated power mode\n");
5369
5370 return; 5370 return;
5371 5371
5372restart: 5372restart:
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 3195aad440dd..b03e22ef5462 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external)
4660 4660
4661 return 0; 4661 return 0;
4662} 4662}
4663EXPORT_SYMBOL(il_force_reset);
4663 4664
4664int 4665int
4665il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4666il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 767fee2ab340..26019531db15 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/delay.h>
26#include <linux/rtc.h> 27#include <linux/rtc.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/of_device.h> 29#include <linux/of_device.h>
@@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
119} 120}
120#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ 121#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */
121 122
122static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) 123static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
123{ 124{
125 int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */
124 /* 126 /*
125 * The datasheet doesn't say which way round the 127 * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010
126 * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0, 128 * states:
127 * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS 129 * | The order in which registers are updated is
130 * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds.
131 * | (This list is in bitfield order, from LSB to MSB, as they would
132 * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT
133 * | register. For example, the Seconds register corresponds to
134 * | STALE_REGS or NEW_REGS containing 0x80.)
128 */ 135 */
129 while (readl(rtc_data->io + STMP3XXX_RTC_STAT) & 136 do {
130 (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) 137 if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) &
131 cpu_relax(); 138 (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)))
139 return 0;
140 udelay(1);
141 } while (--timeout > 0);
142 return (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
143 (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0;
132} 144}
133 145
134/* Time read/write */ 146/* Time read/write */
135static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) 147static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
136{ 148{
149 int ret;
137 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); 150 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
138 151
139 stmp3xxx_wait_time(rtc_data); 152 ret = stmp3xxx_wait_time(rtc_data);
153 if (ret)
154 return ret;
155
140 rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); 156 rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
141 return 0; 157 return 0;
142} 158}
@@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
146 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); 162 struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
147 163
148 writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); 164 writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS);
149 stmp3xxx_wait_time(rtc_data); 165 return stmp3xxx_wait_time(rtc_data);
150 return 0;
151} 166}
152 167
153/* interrupt(s) handler */ 168/* interrupt(s) handler */
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 609dbc2f7151..83b4ef4dfcf8 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1119,11 +1119,11 @@ static int usbtmc_probe(struct usb_interface *intf,
1119 /* Determine if it is a Rigol or not */ 1119 /* Determine if it is a Rigol or not */
1120 data->rigol_quirk = 0; 1120 data->rigol_quirk = 0;
1121 dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", 1121 dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n",
1122 data->usb_dev->descriptor.idVendor, 1122 le16_to_cpu(data->usb_dev->descriptor.idVendor),
1123 data->usb_dev->descriptor.idProduct); 1123 le16_to_cpu(data->usb_dev->descriptor.idProduct));
1124 for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { 1124 for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) {
1125 if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) && 1125 if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) &&
1126 (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) { 1126 (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) {
1127 dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); 1127 dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n");
1128 data->rigol_quirk = 1; 1128 data->rigol_quirk = 1;
1129 break; 1129 break;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a63598895077..5b44cd47da5b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = {
78 { USB_DEVICE(0x04d8, 0x000c), .driver_info = 78 { USB_DEVICE(0x04d8, 0x000c), .driver_info =
79 USB_QUIRK_CONFIG_INTF_STRINGS }, 79 USB_QUIRK_CONFIG_INTF_STRINGS },
80 80
81 /* CarrolTouch 4000U */
82 { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
83
84 /* CarrolTouch 4500U */
85 { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
86
81 /* Samsung Android phone modem - ID conflict with SPH-I500 */ 87 /* Samsung Android phone modem - ID conflict with SPH-I500 */
82 { USB_DEVICE(0x04e8, 0x6601), .driver_info = 88 { USB_DEVICE(0x04e8, 0x6601), .driver_info =
83 USB_QUIRK_CONFIG_INTF_STRINGS }, 89 USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index f80d0330d548..8e3c878f38cf 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1391,21 +1391,20 @@ iso_stream_schedule (
1391 1391
1392 /* Behind the scheduling threshold? */ 1392 /* Behind the scheduling threshold? */
1393 if (unlikely(start < next)) { 1393 if (unlikely(start < next)) {
1394 unsigned now2 = (now - base) & (mod - 1);
1394 1395
1395 /* USB_ISO_ASAP: Round up to the first available slot */ 1396 /* USB_ISO_ASAP: Round up to the first available slot */
1396 if (urb->transfer_flags & URB_ISO_ASAP) 1397 if (urb->transfer_flags & URB_ISO_ASAP)
1397 start += (next - start + period - 1) & -period; 1398 start += (next - start + period - 1) & -period;
1398 1399
1399 /* 1400 /*
1400 * Not ASAP: Use the next slot in the stream. If 1401 * Not ASAP: Use the next slot in the stream,
1401 * the entire URB falls before the threshold, fail. 1402 * no matter what.
1402 */ 1403 */
1403 else if (start + span - period < next) { 1404 else if (start + span - period < now2) {
1404 ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", 1405 ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
1405 urb, start + base, 1406 urb, start + base,
1406 span - period, next + base); 1407 span - period, now2 + base);
1407 status = -EXDEV;
1408 goto fail;
1409 } 1408 }
1410 } 1409 }
1411 1410
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index eb3c8c142fa9..eeb27208c0d1 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -830,7 +830,7 @@ static int adu_probe(struct usb_interface *interface,
830 830
831 /* let the user know what node this device is now attached to */ 831 /* let the user know what node this device is now attached to */
832 dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", 832 dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
833 udev->descriptor.idProduct, dev->serial_number, 833 le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
834 (dev->minor - ADU_MINOR_BASE)); 834 (dev->minor - ADU_MINOR_BASE));
835exit: 835exit:
836 dbg(2, " %s : leave, return value %p (dev)", __func__, dev); 836 dbg(2, " %s : leave, return value %p (dev)", __func__, dev);
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 5a979729f8ec..58c17fdc85eb 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2303,7 +2303,7 @@ static int keyspan_startup(struct usb_serial *serial)
2303 if (d_details == NULL) { 2303 if (d_details == NULL) {
2304 dev_err(&serial->dev->dev, "%s - unknown product id %x\n", 2304 dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
2305 __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); 2305 __func__, le16_to_cpu(serial->dev->descriptor.idProduct));
2306 return 1; 2306 return -ENODEV;
2307 } 2307 }
2308 2308
2309 /* Setup private data for serial driver */ 2309 /* Setup private data for serial driver */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 51da424327b0..b01300164fc0 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -90,6 +90,7 @@ struct urbtracker {
90 struct list_head urblist_entry; 90 struct list_head urblist_entry;
91 struct kref ref_count; 91 struct kref ref_count;
92 struct urb *urb; 92 struct urb *urb;
93 struct usb_ctrlrequest *setup;
93}; 94};
94 95
95enum mos7715_pp_modes { 96enum mos7715_pp_modes {
@@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref)
271 struct mos7715_parport *mos_parport = urbtrack->mos_parport; 272 struct mos7715_parport *mos_parport = urbtrack->mos_parport;
272 273
273 usb_free_urb(urbtrack->urb); 274 usb_free_urb(urbtrack->urb);
275 kfree(urbtrack->setup);
274 kfree(urbtrack); 276 kfree(urbtrack);
275 kref_put(&mos_parport->ref_count, destroy_mos_parport); 277 kref_put(&mos_parport->ref_count, destroy_mos_parport);
276} 278}
@@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
355 struct urbtracker *urbtrack; 357 struct urbtracker *urbtrack;
356 int ret_val; 358 int ret_val;
357 unsigned long flags; 359 unsigned long flags;
358 struct usb_ctrlrequest setup;
359 struct usb_serial *serial = mos_parport->serial; 360 struct usb_serial *serial = mos_parport->serial;
360 struct usb_device *usbdev = serial->dev; 361 struct usb_device *usbdev = serial->dev;
361 362
@@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
373 kfree(urbtrack); 374 kfree(urbtrack);
374 return -ENOMEM; 375 return -ENOMEM;
375 } 376 }
376 setup.bRequestType = (__u8)0x40; 377 urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
377 setup.bRequest = (__u8)0x0e; 378 if (!urbtrack->setup) {
378 setup.wValue = get_reg_value(reg, dummy); 379 usb_free_urb(urbtrack->urb);
379 setup.wIndex = get_reg_index(reg); 380 kfree(urbtrack);
380 setup.wLength = 0; 381 return -ENOMEM;
382 }
383 urbtrack->setup->bRequestType = (__u8)0x40;
384 urbtrack->setup->bRequest = (__u8)0x0e;
385 urbtrack->setup->wValue = get_reg_value(reg, dummy);
386 urbtrack->setup->wIndex = get_reg_index(reg);
387 urbtrack->setup->wLength = 0;
381 usb_fill_control_urb(urbtrack->urb, usbdev, 388 usb_fill_control_urb(urbtrack->urb, usbdev,
382 usb_sndctrlpipe(usbdev, 0), 389 usb_sndctrlpipe(usbdev, 0),
383 (unsigned char *)&setup, 390 (unsigned char *)urbtrack->setup,
384 NULL, 0, async_complete, urbtrack); 391 NULL, 0, async_complete, urbtrack);
385 kref_init(&urbtrack->ref_count); 392 kref_init(&urbtrack->ref_count);
386 INIT_LIST_HEAD(&urbtrack->urblist_entry); 393 INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index d953d674f222..3bac4693c038 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2193,7 +2193,7 @@ static int mos7810_check(struct usb_serial *serial)
2193static int mos7840_probe(struct usb_serial *serial, 2193static int mos7840_probe(struct usb_serial *serial,
2194 const struct usb_device_id *id) 2194 const struct usb_device_id *id)
2195{ 2195{
2196 u16 product = serial->dev->descriptor.idProduct; 2196 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
2197 u8 *buf; 2197 u8 *buf;
2198 int device_type; 2198 int device_type;
2199 2199
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 375b5a400b6f..5c9f9b1d7736 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev)
1536 char buf[32]; 1536 char buf[32];
1537 1537
1538 /* try ID specific firmware first, then try generic firmware */ 1538 /* try ID specific firmware first, then try generic firmware */
1539 sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, 1539 sprintf(buf, "ti_usb-v%04x-p%04x.fw",
1540 dev->descriptor.idProduct); 1540 le16_to_cpu(dev->descriptor.idVendor),
1541 le16_to_cpu(dev->descriptor.idProduct));
1541 status = request_firmware(&fw_p, buf, &dev->dev); 1542 status = request_firmware(&fw_p, buf, &dev->dev);
1542 1543
1543 if (status != 0) { 1544 if (status != 0) {
1544 buf[0] = '\0'; 1545 buf[0] = '\0';
1545 if (dev->descriptor.idVendor == MTS_VENDOR_ID) { 1546 if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
1546 switch (dev->descriptor.idProduct) { 1547 switch (le16_to_cpu(dev->descriptor.idProduct)) {
1547 case MTS_CDMA_PRODUCT_ID: 1548 case MTS_CDMA_PRODUCT_ID:
1548 strcpy(buf, "mts_cdma.fw"); 1549 strcpy(buf, "mts_cdma.fw");
1549 break; 1550 break;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 8257d30c4072..85365784040b 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb)
291 tty_flip_buffer_push(&port->port); 291 tty_flip_buffer_push(&port->port);
292 } else 292 } else
293 dev_dbg(dev, "%s: empty read urb received\n", __func__); 293 dev_dbg(dev, "%s: empty read urb received\n", __func__);
294 294 }
295 /* Resubmit urb so we continue receiving */ 295 /* Resubmit urb so we continue receiving */
296 err = usb_submit_urb(urb, GFP_ATOMIC); 296 err = usb_submit_urb(urb, GFP_ATOMIC);
297 if (err) { 297 if (err) {
298 if (err != -EPERM) { 298 if (err != -EPERM) {
299 dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); 299 dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
300 /* busy also in error unless we are killed */ 300 __func__, err);
301 usb_mark_last_busy(port->serial->dev); 301 /* busy also in error unless we are killed */
302 }
303 } else {
304 usb_mark_last_busy(port->serial->dev); 302 usb_mark_last_busy(port->serial->dev);
305 } 303 }
304 } else {
305 usb_mark_last_busy(port->serial->dev);
306 } 306 }
307} 307}
308 308
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 16968c899493..d3493ca0525d 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1226,6 +1226,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1226 } 1226 }
1227 spin_lock_irqsave(&xfer->lock, flags); 1227 spin_lock_irqsave(&xfer->lock, flags);
1228 rpipe = xfer->ep->hcpriv; 1228 rpipe = xfer->ep->hcpriv;
1229 if (rpipe == NULL) {
1230 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
1231 __func__, wa_xfer_id(xfer),
1232 "Probably already aborted.\n" );
1233 goto out_unlock;
1234 }
1229 /* Check the delayed list -> if there, release and complete */ 1235 /* Check the delayed list -> if there, release and complete */
1230 spin_lock_irqsave(&wa->xfer_list_lock, flags2); 1236 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1231 if (!list_empty(&xfer->list_node) && xfer->seg == NULL) 1237 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1644,8 +1650,7 @@ static void wa_xfer_result_cb(struct urb *urb)
1644 break; 1650 break;
1645 } 1651 }
1646 usb_status = xfer_result->bTransferStatus & 0x3f; 1652 usb_status = xfer_result->bTransferStatus & 0x3f;
1647 if (usb_status == WA_XFER_STATUS_ABORTED 1653 if (usb_status == WA_XFER_STATUS_NOT_FOUND)
1648 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1649 /* taken care of already */ 1654 /* taken care of already */
1650 break; 1655 break;
1651 xfer_id = xfer_result->dwTransferID; 1656 xfer_id = xfer_result->dwTransferID;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 45e57cc38200..fc6f4f3a1a9d 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
43 server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); 43 server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
44 if (IS_ERR(server->secmech.md5)) { 44 if (IS_ERR(server->secmech.md5)) {
45 cifs_dbg(VFS, "could not allocate crypto md5\n"); 45 cifs_dbg(VFS, "could not allocate crypto md5\n");
46 return PTR_ERR(server->secmech.md5); 46 rc = PTR_ERR(server->secmech.md5);
47 server->secmech.md5 = NULL;
48 return rc;
47 } 49 }
48 50
49 size = sizeof(struct shash_desc) + 51 size = sizeof(struct shash_desc) +
50 crypto_shash_descsize(server->secmech.md5); 52 crypto_shash_descsize(server->secmech.md5);
51 server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); 53 server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
52 if (!server->secmech.sdescmd5) { 54 if (!server->secmech.sdescmd5) {
53 rc = -ENOMEM;
54 crypto_free_shash(server->secmech.md5); 55 crypto_free_shash(server->secmech.md5);
55 server->secmech.md5 = NULL; 56 server->secmech.md5 = NULL;
56 return rc; 57 return -ENOMEM;
57 } 58 }
58 server->secmech.sdescmd5->shash.tfm = server->secmech.md5; 59 server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
59 server->secmech.sdescmd5->shash.flags = 0x0; 60 server->secmech.sdescmd5->shash.flags = 0x0;
@@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
421 if (blobptr + attrsize > blobend) 422 if (blobptr + attrsize > blobend)
422 break; 423 break;
423 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { 424 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
424 if (!attrsize) 425 if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
425 break; 426 break;
426 if (!ses->domainName) { 427 if (!ses->domainName) {
427 ses->domainName = 428 ses->domainName =
@@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
591 592
592static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) 593static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
593{ 594{
595 int rc;
594 unsigned int size; 596 unsigned int size;
595 597
596 /* check if already allocated */ 598 /* check if already allocated */
@@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
600 server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); 602 server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
601 if (IS_ERR(server->secmech.hmacmd5)) { 603 if (IS_ERR(server->secmech.hmacmd5)) {
602 cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); 604 cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
603 return PTR_ERR(server->secmech.hmacmd5); 605 rc = PTR_ERR(server->secmech.hmacmd5);
606 server->secmech.hmacmd5 = NULL;
607 return rc;
604 } 608 }
605 609
606 size = sizeof(struct shash_desc) + 610 size = sizeof(struct shash_desc) +
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 4bdd547dbf6f..85ea98d139fc 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb)
147 goto out_no_root; 147 goto out_no_root;
148 } 148 }
149 149
150 if (cifs_sb_master_tcon(cifs_sb)->nocase)
151 sb->s_d_op = &cifs_ci_dentry_ops;
152 else
153 sb->s_d_op = &cifs_dentry_ops;
154
150 sb->s_root = d_make_root(inode); 155 sb->s_root = d_make_root(inode);
151 if (!sb->s_root) { 156 if (!sb->s_root) {
152 rc = -ENOMEM; 157 rc = -ENOMEM;
153 goto out_no_root; 158 goto out_no_root;
154 } 159 }
155 160
156 /* do that *after* d_make_root() - we want NULL ->d_op for root here */
157 if (cifs_sb_master_tcon(cifs_sb)->nocase)
158 sb->s_d_op = &cifs_ci_dentry_ops;
159 else
160 sb->s_d_op = &cifs_dentry_ops;
161
162#ifdef CONFIG_CIFS_NFSD_EXPORT 161#ifdef CONFIG_CIFS_NFSD_EXPORT
163 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 162 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
164 cifs_dbg(FYI, "export ops supported\n"); 163 cifs_dbg(FYI, "export ops supported\n");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1fdc37041057..52ca861ed35e 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -44,6 +44,7 @@
44#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) 44#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
45#define MAX_SERVER_SIZE 15 45#define MAX_SERVER_SIZE 15
46#define MAX_SHARE_SIZE 80 46#define MAX_SHARE_SIZE 80
47#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
47#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ 48#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
48#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ 49#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
49 50
@@ -369,6 +370,9 @@ struct smb_version_operations {
369 void (*generate_signingkey)(struct TCP_Server_Info *server); 370 void (*generate_signingkey)(struct TCP_Server_Info *server);
370 int (*calc_signature)(struct smb_rqst *rqst, 371 int (*calc_signature)(struct smb_rqst *rqst,
371 struct TCP_Server_Info *server); 372 struct TCP_Server_Info *server);
373 int (*query_mf_symlink)(const unsigned char *path, char *pbuf,
374 unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
375 unsigned int xid);
372}; 376};
373 377
374struct smb_version_values { 378struct smb_version_values {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f7e584d047e2..b29a012bed33 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work);
497struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, 497struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
498 work_func_t complete); 498 work_func_t complete);
499void cifs_writedata_release(struct kref *refcount); 499void cifs_writedata_release(struct kref *refcount);
500 500int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
501 unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
502 unsigned int xid);
501#endif /* _CIFSPROTO_H */ 503#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index fa68813396b5..d67c550c4980 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1675 if (string == NULL) 1675 if (string == NULL)
1676 goto out_nomem; 1676 goto out_nomem;
1677 1677
1678 if (strnlen(string, 256) == 256) { 1678 if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
1679 == CIFS_MAX_DOMAINNAME_LEN) {
1679 printk(KERN_WARNING "CIFS: domain name too" 1680 printk(KERN_WARNING "CIFS: domain name too"
1680 " long\n"); 1681 " long\n");
1681 goto cifs_parse_mount_err; 1682 goto cifs_parse_mount_err;
@@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
2276 2277
2277#ifdef CONFIG_KEYS 2278#ifdef CONFIG_KEYS
2278 2279
2279/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */ 2280/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
2280#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1) 2281#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
2281 2282
2282/* Populate username and pw fields from keyring if possible */ 2283/* Populate username and pw fields from keyring if possible */
2283static int 2284static int
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1e57f36ea1b2..7e36ae34e947 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
647 oflags, &oplock, &cfile->fid.netfid, xid); 647 oflags, &oplock, &cfile->fid.netfid, xid);
648 if (rc == 0) { 648 if (rc == 0) {
649 cifs_dbg(FYI, "posix reopen succeeded\n"); 649 cifs_dbg(FYI, "posix reopen succeeded\n");
650 oparms.reconnect = true;
650 goto reopen_success; 651 goto reopen_success;
651 } 652 }
652 /* 653 /*
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index b83c3f5646bd..562044f700e5 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
305} 305}
306 306
307int 307int
308CIFSCheckMFSymlink(struct cifs_fattr *fattr, 308open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
309 const unsigned char *path, 309 unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
310 struct cifs_sb_info *cifs_sb, unsigned int xid) 310 unsigned int xid)
311{ 311{
312 int rc; 312 int rc;
313 int oplock = 0; 313 int oplock = 0;
314 __u16 netfid = 0; 314 __u16 netfid = 0;
315 struct tcon_link *tlink; 315 struct tcon_link *tlink;
316 struct cifs_tcon *pTcon; 316 struct cifs_tcon *ptcon;
317 struct cifs_io_parms io_parms; 317 struct cifs_io_parms io_parms;
318 u8 *buf;
319 char *pbuf;
320 unsigned int bytes_read = 0;
321 int buf_type = CIFS_NO_BUFFER; 318 int buf_type = CIFS_NO_BUFFER;
322 unsigned int link_len = 0;
323 FILE_ALL_INFO file_info; 319 FILE_ALL_INFO file_info;
324 320
325 if (!CIFSCouldBeMFSymlink(fattr))
326 /* it's not a symlink */
327 return 0;
328
329 tlink = cifs_sb_tlink(cifs_sb); 321 tlink = cifs_sb_tlink(cifs_sb);
330 if (IS_ERR(tlink)) 322 if (IS_ERR(tlink))
331 return PTR_ERR(tlink); 323 return PTR_ERR(tlink);
332 pTcon = tlink_tcon(tlink); 324 ptcon = tlink_tcon(tlink);
333 325
334 rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ, 326 rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ,
335 CREATE_NOT_DIR, &netfid, &oplock, &file_info, 327 CREATE_NOT_DIR, &netfid, &oplock, &file_info,
336 cifs_sb->local_nls, 328 cifs_sb->local_nls,
337 cifs_sb->mnt_cifs_flags & 329 cifs_sb->mnt_cifs_flags &
338 CIFS_MOUNT_MAP_SPECIAL_CHR); 330 CIFS_MOUNT_MAP_SPECIAL_CHR);
339 if (rc != 0) 331 if (rc != 0) {
340 goto out; 332 cifs_put_tlink(tlink);
333 return rc;
334 }
341 335
342 if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { 336 if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
343 CIFSSMBClose(xid, pTcon, netfid); 337 CIFSSMBClose(xid, ptcon, netfid);
338 cifs_put_tlink(tlink);
344 /* it's not a symlink */ 339 /* it's not a symlink */
345 goto out; 340 return rc;
346 } 341 }
347 342
348 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
349 if (!buf) {
350 rc = -ENOMEM;
351 goto out;
352 }
353 pbuf = buf;
354 io_parms.netfid = netfid; 343 io_parms.netfid = netfid;
355 io_parms.pid = current->tgid; 344 io_parms.pid = current->tgid;
356 io_parms.tcon = pTcon; 345 io_parms.tcon = ptcon;
357 io_parms.offset = 0; 346 io_parms.offset = 0;
358 io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; 347 io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
359 348
360 rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type); 349 rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
361 CIFSSMBClose(xid, pTcon, netfid); 350 CIFSSMBClose(xid, ptcon, netfid);
362 if (rc != 0) { 351 cifs_put_tlink(tlink);
363 kfree(buf); 352 return rc;
353}
354
355
356int
357CIFSCheckMFSymlink(struct cifs_fattr *fattr,
358 const unsigned char *path,
359 struct cifs_sb_info *cifs_sb, unsigned int xid)
360{
361 int rc = 0;
362 u8 *buf = NULL;
363 unsigned int link_len = 0;
364 unsigned int bytes_read = 0;
365 struct cifs_tcon *ptcon;
366
367 if (!CIFSCouldBeMFSymlink(fattr))
368 /* it's not a symlink */
369 return 0;
370
371 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
372 if (!buf) {
373 rc = -ENOMEM;
364 goto out; 374 goto out;
365 } 375 }
366 376
377 ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
378 if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
379 rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
380 &bytes_read, cifs_sb, xid);
381 else
382 goto out;
383
384 if (rc != 0)
385 goto out;
386
387 if (bytes_read == 0) /* not a symlink */
388 goto out;
389
367 rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL); 390 rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
368 kfree(buf);
369 if (rc == -EINVAL) { 391 if (rc == -EINVAL) {
370 /* it's not a symlink */ 392 /* it's not a symlink */
371 rc = 0; 393 rc = 0;
@@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
381 fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; 403 fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
382 fattr->cf_dtype = DT_LNK; 404 fattr->cf_dtype = DT_LNK;
383out: 405out:
384 cifs_put_tlink(tlink); 406 kfree(buf);
385 return rc; 407 return rc;
386} 408}
387 409
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index ab8778469394..69d2c826a23b 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
111 return; 111 return;
112 } 112 }
113 113
114 /*
115 * If we know that the inode will need to be revalidated immediately,
116 * then don't create a new dentry for it. We'll end up doing an on
117 * the wire call either way and this spares us an invalidation.
118 */
119 if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
120 return;
121
114 dentry = d_alloc(parent, name); 122 dentry = d_alloc(parent, name);
115 if (!dentry) 123 if (!dentry)
116 return; 124 return;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 79358e341fd2..08dd37bb23aa 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
197 bytes_ret = 0; 197 bytes_ret = 0;
198 } else 198 } else
199 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, 199 bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
200 256, nls_cp); 200 CIFS_MAX_DOMAINNAME_LEN, nls_cp);
201 bcc_ptr += 2 * bytes_ret; 201 bcc_ptr += 2 * bytes_ret;
202 bcc_ptr += 2; /* account for null terminator */ 202 bcc_ptr += 2; /* account for null terminator */
203 203
@@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
255 255
256 /* copy domain */ 256 /* copy domain */
257 if (ses->domainName != NULL) { 257 if (ses->domainName != NULL) {
258 strncpy(bcc_ptr, ses->domainName, 256); 258 strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
259 bcc_ptr += strnlen(ses->domainName, 256); 259 bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
260 } /* else we will send a null domain name 260 } /* else we will send a null domain name
261 so the server will default to its own domain */ 261 so the server will default to its own domain */
262 *bcc_ptr = 0; 262 *bcc_ptr = 0;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 6457690731a2..60943978aec3 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = {
944 .mand_lock = cifs_mand_lock, 944 .mand_lock = cifs_mand_lock,
945 .mand_unlock_range = cifs_unlock_range, 945 .mand_unlock_range = cifs_unlock_range,
946 .push_mand_locks = cifs_push_mandatory_locks, 946 .push_mand_locks = cifs_push_mandatory_locks,
947 .query_mf_symlink = open_query_close_cifs_symlink,
947}; 948};
948 949
949struct smb_version_values smb1_values = { 950struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 301b191270b9..4f2300d020c7 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -42,6 +42,7 @@
42static int 42static int
43smb2_crypto_shash_allocate(struct TCP_Server_Info *server) 43smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
44{ 44{
45 int rc;
45 unsigned int size; 46 unsigned int size;
46 47
47 if (server->secmech.sdeschmacsha256 != NULL) 48 if (server->secmech.sdeschmacsha256 != NULL)
@@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
50 server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); 51 server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
51 if (IS_ERR(server->secmech.hmacsha256)) { 52 if (IS_ERR(server->secmech.hmacsha256)) {
52 cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); 53 cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
53 return PTR_ERR(server->secmech.hmacsha256); 54 rc = PTR_ERR(server->secmech.hmacsha256);
55 server->secmech.hmacsha256 = NULL;
56 return rc;
54 } 57 }
55 58
56 size = sizeof(struct shash_desc) + 59 size = sizeof(struct shash_desc) +
@@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
87 server->secmech.sdeschmacsha256 = NULL; 90 server->secmech.sdeschmacsha256 = NULL;
88 crypto_free_shash(server->secmech.hmacsha256); 91 crypto_free_shash(server->secmech.hmacsha256);
89 server->secmech.hmacsha256 = NULL; 92 server->secmech.hmacsha256 = NULL;
90 return PTR_ERR(server->secmech.cmacaes); 93 rc = PTR_ERR(server->secmech.cmacaes);
94 server->secmech.cmacaes = NULL;
95 return rc;
91 } 96 }
92 97
93 size = sizeof(struct shash_desc) + 98 size = sizeof(struct shash_desc) +
diff --git a/fs/exec.c b/fs/exec.c
index 9c73def87642..fd774c7cb483 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
608 return -ENOMEM; 608 return -ENOMEM;
609 609
610 lru_add_drain(); 610 lru_add_drain();
611 tlb_gather_mmu(&tlb, mm, 0); 611 tlb_gather_mmu(&tlb, mm, old_start, old_end);
612 if (new_end > old_start) { 612 if (new_end > old_start) {
613 /* 613 /*
614 * when the old and new regions overlap clear from new_end. 614 * when the old and new regions overlap clear from new_end.
@@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
625 free_pgd_range(&tlb, old_start, old_end, new_end, 625 free_pgd_range(&tlb, old_start, old_end, new_end,
626 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); 626 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
627 } 627 }
628 tlb_finish_mmu(&tlb, new_end, old_end); 628 tlb_finish_mmu(&tlb, old_start, old_end);
629 629
630 /* 630 /*
631 * Shrink the vma to just the new range. Always succeeds. 631 * Shrink the vma to just the new range. Always succeeds.
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 9491ac0590f7..c0427e2f6648 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
77 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); 77 memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
78 memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags)); 78 memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags));
79 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); 79 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
80 memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree)); 80 ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
81 memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr)); 81 ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
82 ext4_es_lru_del(inode1);
83 ext4_es_lru_del(inode2);
82 84
83 isize = i_size_read(inode1); 85 isize = i_size_read(inode1);
84 i_size_write(inode1, i_size_read(inode2)); 86 i_size_write(inode1, i_size_read(inode2));
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 36b141e420b7..b59373b625e9 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1359,7 +1359,7 @@ static const struct mount_opts {
1359 {Opt_delalloc, EXT4_MOUNT_DELALLOC, 1359 {Opt_delalloc, EXT4_MOUNT_DELALLOC,
1360 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1360 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1361 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, 1361 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1362 MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT}, 1362 MOPT_EXT4_ONLY | MOPT_CLEAR},
1363 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1363 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1364 MOPT_EXT4_ONLY | MOPT_SET}, 1364 MOPT_EXT4_ONLY | MOPT_SET},
1365 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | 1365 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
@@ -3483,7 +3483,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3483 } 3483 }
3484 if (test_opt(sb, DIOREAD_NOLOCK)) { 3484 if (test_opt(sb, DIOREAD_NOLOCK)) {
3485 ext4_msg(sb, KERN_ERR, "can't mount with " 3485 ext4_msg(sb, KERN_ERR, "can't mount with "
3486 "both data=journal and delalloc"); 3486 "both data=journal and dioread_nolock");
3487 goto failed_mount; 3487 goto failed_mount;
3488 } 3488 }
3489 if (test_opt(sb, DELALLOC)) 3489 if (test_opt(sb, DELALLOC))
@@ -4727,6 +4727,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4727 goto restore_opts; 4727 goto restore_opts;
4728 } 4728 }
4729 4729
4730 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
4731 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
4732 ext4_msg(sb, KERN_ERR, "can't mount with "
4733 "both data=journal and delalloc");
4734 err = -EINVAL;
4735 goto restore_opts;
4736 }
4737 if (test_opt(sb, DIOREAD_NOLOCK)) {
4738 ext4_msg(sb, KERN_ERR, "can't mount with "
4739 "both data=journal and dioread_nolock");
4740 err = -EINVAL;
4741 goto restore_opts;
4742 }
4743 }
4744
4730 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) 4745 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
4731 ext4_abort(sb, "Abort forced by user"); 4746 ext4_abort(sb, "Abort forced by user");
4732 4747
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a3f868ae3fd4..34423978b170 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
463 return inode; 463 return inode;
464} 464}
465 465
466/*
467 * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
468 * be taken from reclaim -- unlike regular filesystems. This needs an
469 * annotation because huge_pmd_share() does an allocation under
470 * i_mmap_mutex.
471 */
472struct lock_class_key hugetlbfs_i_mmap_mutex_key;
473
466static struct inode *hugetlbfs_get_inode(struct super_block *sb, 474static struct inode *hugetlbfs_get_inode(struct super_block *sb,
467 struct inode *dir, 475 struct inode *dir,
468 umode_t mode, dev_t dev) 476 umode_t mode, dev_t dev)
@@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
474 struct hugetlbfs_inode_info *info; 482 struct hugetlbfs_inode_info *info;
475 inode->i_ino = get_next_ino(); 483 inode->i_ino = get_next_ino();
476 inode_init_owner(inode, dir, mode); 484 inode_init_owner(inode, dir, mode);
485 lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
486 &hugetlbfs_i_mmap_mutex_key);
477 inode->i_mapping->a_ops = &hugetlbfs_aops; 487 inode->i_mapping->a_ops = &hugetlbfs_aops;
478 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; 488 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
479 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 489 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 79736a28d84f..2abf97b2a592 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1757,7 +1757,7 @@ try_again:
1757 goto out; 1757 goto out;
1758 } else if (ret == 1) { 1758 } else if (ret == 1) {
1759 clusters_need = wc->w_clen; 1759 clusters_need = wc->w_clen;
1760 ret = ocfs2_refcount_cow(inode, filp, di_bh, 1760 ret = ocfs2_refcount_cow(inode, di_bh,
1761 wc->w_cpos, wc->w_clen, UINT_MAX); 1761 wc->w_cpos, wc->w_clen, UINT_MAX);
1762 if (ret) { 1762 if (ret) {
1763 mlog_errno(ret); 1763 mlog_errno(ret);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index eb760d8acd50..30544ce8e9f7 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode)
2153{ 2153{
2154 int ret; 2154 int ret;
2155 struct ocfs2_empty_dir_priv priv = { 2155 struct ocfs2_empty_dir_priv priv = {
2156 .ctx.actor = ocfs2_empty_dir_filldir 2156 .ctx.actor = ocfs2_empty_dir_filldir,
2157 }; 2157 };
2158 2158
2159 memset(&priv, 0, sizeof(priv));
2160
2161 if (ocfs2_dir_indexed(inode)) { 2159 if (ocfs2_dir_indexed(inode)) {
2162 ret = ocfs2_empty_dir_dx(inode, &priv); 2160 ret = ocfs2_empty_dir_dx(inode, &priv);
2163 if (ret) 2161 if (ret)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 41000f223ca4..3261d71319ee 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
370 if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) 370 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
371 goto out; 371 goto out;
372 372
373 return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1); 373 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
374 374
375out: 375out:
376 return status; 376 return status;
@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
899 zero_clusters = last_cpos - zero_cpos; 899 zero_clusters = last_cpos - zero_cpos;
900 900
901 if (needs_cow) { 901 if (needs_cow) {
902 rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos, 902 rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
903 zero_clusters, UINT_MAX); 903 zero_clusters, UINT_MAX);
904 if (rc) { 904 if (rc) {
905 mlog_errno(rc); 905 mlog_errno(rc);
@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2078 2078
2079 *meta_level = 1; 2079 *meta_level = 1;
2080 2080
2081 ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX); 2081 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2082 if (ret) 2082 if (ret)
2083 mlog_errno(ret); 2083 mlog_errno(ret);
2084out: 2084out:
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 96f9ac237e86..0a992737dcaf 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb,
537 extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); 537 extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth);
538 538
539 return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + 539 return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks +
540 ocfs2_quota_trans_credits(sb) + bits_wanted; 540 ocfs2_quota_trans_credits(sb);
541} 541}
542 542
543static inline int ocfs2_calc_symlink_credits(struct super_block *sb) 543static inline int ocfs2_calc_symlink_credits(struct super_block *sb)
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index f1fc172175b6..452068b45749 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); 69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
70 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); 70 u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
71 71
72 ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, 72 ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
73 p_cpos, new_p_cpos, len); 73 p_cpos, new_p_cpos, len);
74 if (ret) { 74 if (ret) {
75 mlog_errno(ret); 75 mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 9f6b96a09615..a70d604593b6 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -49,7 +49,6 @@
49 49
50struct ocfs2_cow_context { 50struct ocfs2_cow_context {
51 struct inode *inode; 51 struct inode *inode;
52 struct file *file;
53 u32 cow_start; 52 u32 cow_start;
54 u32 cow_len; 53 u32 cow_len;
55 struct ocfs2_extent_tree data_et; 54 struct ocfs2_extent_tree data_et;
@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
66 u32 *num_clusters, 65 u32 *num_clusters,
67 unsigned int *extent_flags); 66 unsigned int *extent_flags);
68 int (*cow_duplicate_clusters)(handle_t *handle, 67 int (*cow_duplicate_clusters)(handle_t *handle,
69 struct file *file, 68 struct inode *inode,
70 u32 cpos, u32 old_cluster, 69 u32 cpos, u32 old_cluster,
71 u32 new_cluster, u32 new_len); 70 u32 new_cluster, u32 new_len);
72}; 71};
@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
2922} 2921}
2923 2922
2924int ocfs2_duplicate_clusters_by_page(handle_t *handle, 2923int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2925 struct file *file, 2924 struct inode *inode,
2926 u32 cpos, u32 old_cluster, 2925 u32 cpos, u32 old_cluster,
2927 u32 new_cluster, u32 new_len) 2926 u32 new_cluster, u32 new_len)
2928{ 2927{
2929 int ret = 0, partial; 2928 int ret = 0, partial;
2930 struct inode *inode = file_inode(file); 2929 struct super_block *sb = inode->i_sb;
2931 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
2932 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2933 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); 2930 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
2934 struct page *page; 2931 struct page *page;
2935 pgoff_t page_index; 2932 pgoff_t page_index;
@@ -2978,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2978 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2975 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2979 BUG_ON(PageDirty(page)); 2976 BUG_ON(PageDirty(page));
2980 2977
2981 if (PageReadahead(page)) {
2982 page_cache_async_readahead(mapping,
2983 &file->f_ra, file,
2984 page, page_index,
2985 readahead_pages);
2986 }
2987
2988 if (!PageUptodate(page)) { 2978 if (!PageUptodate(page)) {
2989 ret = block_read_full_page(page, ocfs2_get_block); 2979 ret = block_read_full_page(page, ocfs2_get_block);
2990 if (ret) { 2980 if (ret) {
@@ -3004,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
3004 } 2994 }
3005 } 2995 }
3006 2996
3007 ocfs2_map_and_dirty_page(inode, handle, from, to, 2997 ocfs2_map_and_dirty_page(inode,
2998 handle, from, to,
3008 page, 0, &new_block); 2999 page, 0, &new_block);
3009 mark_page_accessed(page); 3000 mark_page_accessed(page);
3010unlock: 3001unlock:
@@ -3020,12 +3011,11 @@ unlock:
3020} 3011}
3021 3012
3022int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 3013int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
3023 struct file *file, 3014 struct inode *inode,
3024 u32 cpos, u32 old_cluster, 3015 u32 cpos, u32 old_cluster,
3025 u32 new_cluster, u32 new_len) 3016 u32 new_cluster, u32 new_len)
3026{ 3017{
3027 int ret = 0; 3018 int ret = 0;
3028 struct inode *inode = file_inode(file);
3029 struct super_block *sb = inode->i_sb; 3019 struct super_block *sb = inode->i_sb;
3030 struct ocfs2_caching_info *ci = INODE_CACHE(inode); 3020 struct ocfs2_caching_info *ci = INODE_CACHE(inode);
3031 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); 3021 int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
@@ -3150,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
3150 3140
3151 /*If the old clusters is unwritten, no need to duplicate. */ 3141 /*If the old clusters is unwritten, no need to duplicate. */
3152 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { 3142 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
3153 ret = context->cow_duplicate_clusters(handle, context->file, 3143 ret = context->cow_duplicate_clusters(handle, context->inode,
3154 cpos, old, new, len); 3144 cpos, old, new, len);
3155 if (ret) { 3145 if (ret) {
3156 mlog_errno(ret); 3146 mlog_errno(ret);
@@ -3428,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
3428 return ret; 3418 return ret;
3429} 3419}
3430 3420
3431static void ocfs2_readahead_for_cow(struct inode *inode,
3432 struct file *file,
3433 u32 start, u32 len)
3434{
3435 struct address_space *mapping;
3436 pgoff_t index;
3437 unsigned long num_pages;
3438 int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
3439
3440 if (!file)
3441 return;
3442
3443 mapping = file->f_mapping;
3444 num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
3445 if (!num_pages)
3446 num_pages = 1;
3447
3448 index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
3449 page_cache_sync_readahead(mapping, &file->f_ra, file,
3450 index, num_pages);
3451}
3452
3453/* 3421/*
3454 * Starting at cpos, try to CoW write_len clusters. Don't CoW 3422 * Starting at cpos, try to CoW write_len clusters. Don't CoW
3455 * past max_cpos. This will stop when it runs into a hole or an 3423 * past max_cpos. This will stop when it runs into a hole or an
3456 * unrefcounted extent. 3424 * unrefcounted extent.
3457 */ 3425 */
3458static int ocfs2_refcount_cow_hunk(struct inode *inode, 3426static int ocfs2_refcount_cow_hunk(struct inode *inode,
3459 struct file *file,
3460 struct buffer_head *di_bh, 3427 struct buffer_head *di_bh,
3461 u32 cpos, u32 write_len, u32 max_cpos) 3428 u32 cpos, u32 write_len, u32 max_cpos)
3462{ 3429{
@@ -3485,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
3485 3452
3486 BUG_ON(cow_len == 0); 3453 BUG_ON(cow_len == 0);
3487 3454
3488 ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
3489
3490 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); 3455 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3491 if (!context) { 3456 if (!context) {
3492 ret = -ENOMEM; 3457 ret = -ENOMEM;
@@ -3508,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
3508 context->ref_root_bh = ref_root_bh; 3473 context->ref_root_bh = ref_root_bh;
3509 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; 3474 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
3510 context->get_clusters = ocfs2_di_get_clusters; 3475 context->get_clusters = ocfs2_di_get_clusters;
3511 context->file = file;
3512 3476
3513 ocfs2_init_dinode_extent_tree(&context->data_et, 3477 ocfs2_init_dinode_extent_tree(&context->data_et,
3514 INODE_CACHE(inode), di_bh); 3478 INODE_CACHE(inode), di_bh);
@@ -3537,7 +3501,6 @@ out:
3537 * clusters between cpos and cpos+write_len are safe to modify. 3501 * clusters between cpos and cpos+write_len are safe to modify.
3538 */ 3502 */
3539int ocfs2_refcount_cow(struct inode *inode, 3503int ocfs2_refcount_cow(struct inode *inode,
3540 struct file *file,
3541 struct buffer_head *di_bh, 3504 struct buffer_head *di_bh,
3542 u32 cpos, u32 write_len, u32 max_cpos) 3505 u32 cpos, u32 write_len, u32 max_cpos)
3543{ 3506{
@@ -3557,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode,
3557 num_clusters = write_len; 3520 num_clusters = write_len;
3558 3521
3559 if (ext_flags & OCFS2_EXT_REFCOUNTED) { 3522 if (ext_flags & OCFS2_EXT_REFCOUNTED) {
3560 ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos, 3523 ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
3561 num_clusters, max_cpos); 3524 num_clusters, max_cpos);
3562 if (ret) { 3525 if (ret) {
3563 mlog_errno(ret); 3526 mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
index 7754608c83a4..6422bbcdb525 100644
--- a/fs/ocfs2/refcounttree.h
+++ b/fs/ocfs2/refcounttree.h
@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
53 int *credits, 53 int *credits,
54 int *ref_blocks); 54 int *ref_blocks);
55int ocfs2_refcount_cow(struct inode *inode, 55int ocfs2_refcount_cow(struct inode *inode,
56 struct file *filep, struct buffer_head *di_bh, 56 struct buffer_head *di_bh,
57 u32 cpos, u32 write_len, u32 max_cpos); 57 u32 cpos, u32 write_len, u32 max_cpos);
58 58
59typedef int (ocfs2_post_refcount_func)(struct inode *inode, 59typedef int (ocfs2_post_refcount_func)(struct inode *inode,
@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
85 u32 cpos, u32 write_len, 85 u32 cpos, u32 write_len,
86 struct ocfs2_post_refcount *post); 86 struct ocfs2_post_refcount *post);
87int ocfs2_duplicate_clusters_by_page(handle_t *handle, 87int ocfs2_duplicate_clusters_by_page(handle_t *handle,
88 struct file *file, 88 struct inode *inode,
89 u32 cpos, u32 old_cluster, 89 u32 cpos, u32 old_cluster,
90 u32 new_cluster, u32 new_len); 90 u32 new_cluster, u32 new_len);
91int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, 91int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
92 struct file *file, 92 struct inode *inode,
93 u32 cpos, u32 old_cluster, 93 u32 cpos, u32 old_cluster,
94 u32 new_cluster, u32 new_len); 94 u32 new_cluster, u32 new_len);
95int ocfs2_cow_sync_writeback(struct super_block *sb, 95int ocfs2_cow_sync_writeback(struct super_block *sb,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dbf61f6174f0..107d026f5d6e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
730 * of how soft-dirty works. 730 * of how soft-dirty works.
731 */ 731 */
732 pte_t ptent = *pte; 732 pte_t ptent = *pte;
733 ptent = pte_wrprotect(ptent); 733
734 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 734 if (pte_present(ptent)) {
735 ptent = pte_wrprotect(ptent);
736 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
737 } else if (is_swap_pte(ptent)) {
738 ptent = pte_swp_clear_soft_dirty(ptent);
739 } else if (pte_file(ptent)) {
740 ptent = pte_file_clear_soft_dirty(ptent);
741 }
742
735 set_pte_at(vma->vm_mm, addr, pte, ptent); 743 set_pte_at(vma->vm_mm, addr, pte, ptent);
736#endif 744#endif
737} 745}
@@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
752 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 760 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
753 for (; addr != end; pte++, addr += PAGE_SIZE) { 761 for (; addr != end; pte++, addr += PAGE_SIZE) {
754 ptent = *pte; 762 ptent = *pte;
755 if (!pte_present(ptent))
756 continue;
757 763
758 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 764 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
759 clear_soft_dirty(vma, addr, pte); 765 clear_soft_dirty(vma, addr, pte);
760 continue; 766 continue;
761 } 767 }
762 768
769 if (!pte_present(ptent))
770 continue;
771
763 page = vm_normal_page(vma, addr, ptent); 772 page = vm_normal_page(vma, addr, ptent);
764 if (!page) 773 if (!page)
765 continue; 774 continue;
@@ -859,7 +868,7 @@ typedef struct {
859} pagemap_entry_t; 868} pagemap_entry_t;
860 869
861struct pagemapread { 870struct pagemapread {
862 int pos, len; 871 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
863 pagemap_entry_t *buffer; 872 pagemap_entry_t *buffer;
864 bool v2; 873 bool v2;
865}; 874};
@@ -867,7 +876,7 @@ struct pagemapread {
867#define PAGEMAP_WALK_SIZE (PMD_SIZE) 876#define PAGEMAP_WALK_SIZE (PMD_SIZE)
868#define PAGEMAP_WALK_MASK (PMD_MASK) 877#define PAGEMAP_WALK_MASK (PMD_MASK)
869 878
870#define PM_ENTRY_BYTES sizeof(u64) 879#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
871#define PM_STATUS_BITS 3 880#define PM_STATUS_BITS 3
872#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) 881#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
873#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) 882#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
@@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
930 flags = PM_PRESENT; 939 flags = PM_PRESENT;
931 page = vm_normal_page(vma, addr, pte); 940 page = vm_normal_page(vma, addr, pte);
932 } else if (is_swap_pte(pte)) { 941 } else if (is_swap_pte(pte)) {
933 swp_entry_t entry = pte_to_swp_entry(pte); 942 swp_entry_t entry;
934 943 if (pte_swp_soft_dirty(pte))
944 flags2 |= __PM_SOFT_DIRTY;
945 entry = pte_to_swp_entry(pte);
935 frame = swp_type(entry) | 946 frame = swp_type(entry) |
936 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 947 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
937 flags = PM_SWAP; 948 flags = PM_SWAP;
@@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
1116 goto out_task; 1127 goto out_task;
1117 1128
1118 pm.v2 = soft_dirty_cleared; 1129 pm.v2 = soft_dirty_cleared;
1119 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1130 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1120 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 1131 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1121 ret = -ENOMEM; 1132 ret = -ENOMEM;
1122 if (!pm.buffer) 1133 if (!pm.buffer)
1123 goto out_task; 1134 goto out_task;
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2f47ade1b567..0807ddf97b05 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
417{ 417{
418 return pmd; 418 return pmd;
419} 419}
420
421static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
422{
423 return pte;
424}
425
426static inline int pte_swp_soft_dirty(pte_t pte)
427{
428 return 0;
429}
430
431static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
432{
433 return pte;
434}
435
436static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
437{
438 return pte;
439}
440
441static inline pte_t pte_file_mksoft_dirty(pte_t pte)
442{
443 return pte;
444}
445
446static inline int pte_file_soft_dirty(pte_t pte)
447{
448 return 0;
449}
420#endif 450#endif
421 451
422#ifndef __HAVE_PFNMAP_TRACKING 452#ifndef __HAVE_PFNMAP_TRACKING
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 13821c339a41..5672d7ea1fa0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,7 +112,7 @@ struct mmu_gather {
112 112
113#define HAVE_GENERIC_MMU_GATHER 113#define HAVE_GENERIC_MMU_GATHER
114 114
115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); 115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
116void tlb_flush_mmu(struct mmu_gather *tlb); 116void tlb_flush_mmu(struct mmu_gather *tlb);
117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, 117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
118 unsigned long end); 118 unsigned long end);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 737685e9e852..68029b30c3dc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
309 __be16 max_desc_sz_rq; 309 __be16 max_desc_sz_rq;
310 u8 rsvd21[2]; 310 u8 rsvd21[2];
311 __be16 max_desc_sz_sq_dc; 311 __be16 max_desc_sz_sq_dc;
312 u8 rsvd22[4]; 312 __be32 max_qp_mcg;
313 __be16 max_qp_mcg; 313 u8 rsvd22[3];
314 u8 rsvd23;
315 u8 log_max_mcg; 314 u8 log_max_mcg;
316 u8 rsvd24; 315 u8 rsvd23;
317 u8 log_max_pd; 316 u8 log_max_pd;
318 u8 rsvd25; 317 u8 rsvd24;
319 u8 log_max_xrcd; 318 u8 log_max_xrcd;
320 u8 rsvd26[42]; 319 u8 rsvd25[42];
321 __be16 log_uar_page_sz; 320 __be16 log_uar_page_sz;
322 u8 rsvd27[28]; 321 u8 rsvd26[28];
323 u8 log_msx_atomic_size_qp; 322 u8 log_msx_atomic_size_qp;
324 u8 rsvd28[2]; 323 u8 rsvd27[2];
325 u8 log_msx_atomic_size_dc; 324 u8 log_msx_atomic_size_dc;
326 u8 rsvd29[76]; 325 u8 rsvd28[76];
327}; 326};
328 327
329 328
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
472struct mlx5_eqe_page_req { 471struct mlx5_eqe_page_req {
473 u8 rsvd0[2]; 472 u8 rsvd0[2];
474 __be16 func_id; 473 __be16 func_id;
475 u8 rsvd1[2]; 474 __be32 num_pages;
476 __be16 num_pages; 475 __be32 rsvd1[5];
477 __be32 rsvd2[5];
478}; 476};
479 477
480union ev_data { 478union ev_data {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2aa258b0ced1..8888381fc150 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -358,7 +358,7 @@ struct mlx5_caps {
358 u32 reserved_lkey; 358 u32 reserved_lkey;
359 u8 local_ca_ack_delay; 359 u8 local_ca_ack_delay;
360 u8 log_max_mcg; 360 u8 log_max_mcg;
361 u16 max_qp_mcg; 361 u32 max_qp_mcg;
362 int min_page_sz; 362 int min_page_sz;
363}; 363};
364 364
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
691int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 691int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
692void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 692void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
693void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 693void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
694 s16 npages); 694 s32 npages);
695int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 695int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
696int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); 696int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
697void mlx5_register_debugfs(void); 697void mlx5_register_debugfs(void);
@@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
731int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); 731int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
732void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); 732void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
733 733
734typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
735int mlx5_register_health_report_handler(health_handler_t handler);
736void mlx5_unregister_health_report_handler(void);
737const char *mlx5_command_str(int command); 734const char *mlx5_command_str(int command);
738int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); 735int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
739void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); 736void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d722490da030..e9995eb5985c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -314,6 +314,7 @@ struct nsproxy;
314struct user_namespace; 314struct user_namespace;
315 315
316#ifdef CONFIG_MMU 316#ifdef CONFIG_MMU
317extern unsigned long mmap_legacy_base(void);
317extern void arch_pick_mmap_layout(struct mm_struct *mm); 318extern void arch_pick_mmap_layout(struct mm_struct *mm);
318extern unsigned long 319extern unsigned long
319arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 320arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
@@ -1532,6 +1533,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1532 * Test if a process is not yet dead (at most zombie state) 1533 * Test if a process is not yet dead (at most zombie state)
1533 * If pid_alive fails, then pointers within the task structure 1534 * If pid_alive fails, then pointers within the task structure
1534 * can be stale and must not be dereferenced. 1535 * can be stale and must not be dereferenced.
1536 *
1537 * Return: 1 if the process is alive. 0 otherwise.
1535 */ 1538 */
1536static inline int pid_alive(struct task_struct *p) 1539static inline int pid_alive(struct task_struct *p)
1537{ 1540{
@@ -1543,6 +1546,8 @@ static inline int pid_alive(struct task_struct *p)
1543 * @tsk: Task structure to be checked. 1546 * @tsk: Task structure to be checked.
1544 * 1547 *
1545 * Check if a task structure is the first user space task the kernel created. 1548 * Check if a task structure is the first user space task the kernel created.
1549 *
1550 * Return: 1 if the task structure is init. 0 otherwise.
1546 */ 1551 */
1547static inline int is_global_init(struct task_struct *tsk) 1552static inline int is_global_init(struct task_struct *tsk)
1548{ 1553{
@@ -1894,6 +1899,8 @@ extern struct task_struct *idle_task(int cpu);
1894/** 1899/**
1895 * is_idle_task - is the specified task an idle task? 1900 * is_idle_task - is the specified task an idle task?
1896 * @p: the task in question. 1901 * @p: the task in question.
1902 *
1903 * Return: 1 if @p is an idle task. 0 otherwise.
1897 */ 1904 */
1898static inline bool is_idle_task(const struct task_struct *p) 1905static inline bool is_idle_task(const struct task_struct *p)
1899{ 1906{
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ced949a..75f34949d9ab 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -117,9 +117,17 @@ do { \
117#endif /*arch_spin_is_contended*/ 117#endif /*arch_spin_is_contended*/
118#endif 118#endif
119 119
120/* The lock does not imply full memory barrier. */ 120/*
121#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK 121 * Despite its name it doesn't necessarily has to be a full barrier.
122static inline void smp_mb__after_lock(void) { smp_mb(); } 122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129#ifndef smp_mb__before_spinlock
130#define smp_mb__before_spinlock() smp_wmb()
123#endif 131#endif
124 132
125/** 133/**
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5fd30d2a415..8d4fa82bfb91 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
67 swp_entry_t arch_entry; 67 swp_entry_t arch_entry;
68 68
69 BUG_ON(pte_file(pte)); 69 BUG_ON(pte_file(pte));
70 if (pte_swp_soft_dirty(pte))
71 pte = pte_swp_clear_soft_dirty(pte);
70 arch_entry = __pte_to_swp_entry(pte); 72 arch_entry = __pte_to_swp_entry(pte);
71 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); 73 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
72} 74}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4147d700a293..84662ecc7b51 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
802asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, 802asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
803 int __user *); 803 int __user *);
804#else 804#else
805#ifdef CONFIG_CLONE_BACKWARDS3
806asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
807 int __user *, int);
808#else
805asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, 809asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
806 int __user *, int); 810 int __user *, int);
807#endif 811#endif
812#endif
808 813
809asmlinkage long sys_execve(const char __user *filename, 814asmlinkage long sys_execve(const char __user *filename,
810 const char __user *const __user *argv, 815 const char __user *const __user *argv,
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index f18b91966d3d..8a358a2c97e6 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
122 if (rc > 0) 122 if (rc > 0)
123 /* local bh are disabled so it is ok to use _BH */ 123 /* local bh are disabled so it is ok to use _BH */
124 NET_ADD_STATS_BH(sock_net(sk), 124 NET_ADD_STATS_BH(sock_net(sk),
125 LINUX_MIB_LOWLATENCYRXPACKETS, rc); 125 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
126 126
127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && 127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
128 !need_resched() && !busy_loop_timeout(end_time)); 128 !need_resched() && !busy_loop_timeout(end_time));
@@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk)
162 return false; 162 return false;
163} 163}
164 164
165static inline bool sk_busy_poll(struct sock *sk, int nonblock)
166{
167 return false;
168}
169
170static inline void skb_mark_napi_id(struct sk_buff *skb, 165static inline void skb_mark_napi_id(struct sk_buff *skb,
171 struct napi_struct *napi) 166 struct napi_struct *napi)
172{ 167{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 781b3cf86a2f..a354db5b7662 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
145 return INET_ECN_encapsulate(tos, inner); 145 return INET_ECN_encapsulate(tos, inner);
146} 146}
147 147
148static inline void tunnel_ip_select_ident(struct sk_buff *skb,
149 const struct iphdr *old_iph,
150 struct dst_entry *dst)
151{
152 struct iphdr *iph = ip_hdr(skb);
153
154 /* Use inner packet iph-id if possible. */
155 if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
156 iph->id = old_iph->id;
157 else
158 __ip_select_ident(iph, dst,
159 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
160}
161
162int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); 148int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
163int iptunnel_xmit(struct net *net, struct rtable *rt, 149int iptunnel_xmit(struct net *net, struct rtable *rt,
164 struct sk_buff *skb, 150 struct sk_buff *skb,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6eab63363e59..e5ae0c50fa9c 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -683,13 +683,19 @@ struct psched_ratecfg {
683 u64 rate_bytes_ps; /* bytes per second */ 683 u64 rate_bytes_ps; /* bytes per second */
684 u32 mult; 684 u32 mult;
685 u16 overhead; 685 u16 overhead;
686 u8 linklayer;
686 u8 shift; 687 u8 shift;
687}; 688};
688 689
689static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, 690static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
690 unsigned int len) 691 unsigned int len)
691{ 692{
692 return ((u64)(len + r->overhead) * r->mult) >> r->shift; 693 len += r->overhead;
694
695 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
696 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
697
698 return ((u64)len * r->mult) >> r->shift;
693} 699}
694 700
695extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); 701extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
@@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
700 memset(res, 0, sizeof(*res)); 706 memset(res, 0, sizeof(*res));
701 res->rate = r->rate_bytes_ps; 707 res->rate = r->rate_bytes_ps;
702 res->overhead = r->overhead; 708 res->overhead = r->overhead;
709 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
703} 710}
704 711
705#endif 712#endif
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index dbd71b0c7d8c..09d62b9228ff 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -73,9 +73,17 @@ struct tc_estimator {
73#define TC_H_ROOT (0xFFFFFFFFU) 73#define TC_H_ROOT (0xFFFFFFFFU)
74#define TC_H_INGRESS (0xFFFFFFF1U) 74#define TC_H_INGRESS (0xFFFFFFF1U)
75 75
76/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
77enum tc_link_layer {
78 TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
79 TC_LINKLAYER_ETHERNET,
80 TC_LINKLAYER_ATM,
81};
82#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
83
76struct tc_ratespec { 84struct tc_ratespec {
77 unsigned char cell_log; 85 unsigned char cell_log;
78 unsigned char __reserved; 86 __u8 linklayer; /* lower 4 bits */
79 unsigned short overhead; 87 unsigned short overhead;
80 short cell_align; 88 short cell_align;
81 unsigned short mpu; 89 unsigned short mpu;
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index af0a674cc677..a1356d3b54df 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -253,7 +253,7 @@ enum
253 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ 253 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
254 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ 254 LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
255 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ 255 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
256 LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ 256 LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
257 __LINUX_MIB_MAX 257 __LINUX_MIB_MAX
258}; 258};
259 259
diff --git a/kernel/fork.c b/kernel/fork.c
index 403d2bb8a968..e23bb19e2a3e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
1679 int __user *, parent_tidptr, 1679 int __user *, parent_tidptr,
1680 int __user *, child_tidptr, 1680 int __user *, child_tidptr,
1681 int, tls_val) 1681 int, tls_val)
1682#elif defined(CONFIG_CLONE_BACKWARDS3)
1683SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
1684 int, stack_size,
1685 int __user *, parent_tidptr,
1686 int __user *, child_tidptr,
1687 int, tls_val)
1682#else 1688#else
1683SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 1689SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1684 int __user *, parent_tidptr, 1690 int __user *, parent_tidptr,
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ff05f4bd86eb..a52ee7bb830d 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
686 might_sleep(); 686 might_sleep();
687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
688 0, &ctx->dep_map, _RET_IP_, ctx); 688 0, &ctx->dep_map, _RET_IP_, ctx);
689 if (!ret && ctx->acquired > 0) 689 if (!ret && ctx->acquired > 1)
690 return ww_mutex_deadlock_injection(lock, ctx); 690 return ww_mutex_deadlock_injection(lock, ctx);
691 691
692 return ret; 692 return ret;
@@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
703 0, &ctx->dep_map, _RET_IP_, ctx); 703 0, &ctx->dep_map, _RET_IP_, ctx);
704 704
705 if (!ret && ctx->acquired > 0) 705 if (!ret && ctx->acquired > 1)
706 return ww_mutex_deadlock_injection(lock, ctx); 706 return ww_mutex_deadlock_injection(lock, ctx);
707 707
708 return ret; 708 return ret;
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 06fe28589e9c..a394297f8b2f 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req)
296} 296}
297EXPORT_SYMBOL_GPL(pm_qos_request_active); 297EXPORT_SYMBOL_GPL(pm_qos_request_active);
298 298
299static void __pm_qos_update_request(struct pm_qos_request *req,
300 s32 new_value)
301{
302 trace_pm_qos_update_request(req->pm_qos_class, new_value);
303
304 if (new_value != req->node.prio)
305 pm_qos_update_target(
306 pm_qos_array[req->pm_qos_class]->constraints,
307 &req->node, PM_QOS_UPDATE_REQ, new_value);
308}
309
299/** 310/**
300 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout 311 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
301 * @work: work struct for the delayed work (timeout) 312 * @work: work struct for the delayed work (timeout)
@@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work)
308 struct pm_qos_request, 319 struct pm_qos_request,
309 work); 320 work);
310 321
311 pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); 322 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
312} 323}
313 324
314/** 325/**
@@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
364 } 375 }
365 376
366 cancel_delayed_work_sync(&req->work); 377 cancel_delayed_work_sync(&req->work);
367 378 __pm_qos_update_request(req, new_value);
368 trace_pm_qos_update_request(req->pm_qos_class, new_value);
369 if (new_value != req->node.prio)
370 pm_qos_update_target(
371 pm_qos_array[req->pm_qos_class]->constraints,
372 &req->node, PM_QOS_UPDATE_REQ, new_value);
373} 379}
374EXPORT_SYMBOL_GPL(pm_qos_update_request); 380EXPORT_SYMBOL_GPL(pm_qos_update_request);
375 381
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b7c32cb7bfeb..05c39f030314 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p)
933/** 933/**
934 * task_curr - is this task currently executing on a CPU? 934 * task_curr - is this task currently executing on a CPU?
935 * @p: the task in question. 935 * @p: the task in question.
936 *
937 * Return: 1 if the task is currently executing. 0 otherwise.
936 */ 938 */
937inline int task_curr(const struct task_struct *p) 939inline int task_curr(const struct task_struct *p)
938{ 940{
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
1482 * the simpler "current->state = TASK_RUNNING" to mark yourself 1484 * the simpler "current->state = TASK_RUNNING" to mark yourself
1483 * runnable without the overhead of this. 1485 * runnable without the overhead of this.
1484 * 1486 *
1485 * Returns %true if @p was woken up, %false if it was already running 1487 * Return: %true if @p was woken up, %false if it was already running.
1486 * or @state didn't match @p's state. 1488 * or @state didn't match @p's state.
1487 */ 1489 */
1488static int 1490static int
@@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1491 unsigned long flags; 1493 unsigned long flags;
1492 int cpu, success = 0; 1494 int cpu, success = 0;
1493 1495
1494 smp_wmb(); 1496 /*
1497 * If we are going to wake up a thread waiting for CONDITION we
1498 * need to ensure that CONDITION=1 done by the caller can not be
1499 * reordered with p->state check below. This pairs with mb() in
1500 * set_current_state() the waiting thread does.
1501 */
1502 smp_mb__before_spinlock();
1495 raw_spin_lock_irqsave(&p->pi_lock, flags); 1503 raw_spin_lock_irqsave(&p->pi_lock, flags);
1496 if (!(p->state & state)) 1504 if (!(p->state & state))
1497 goto out; 1505 goto out;
@@ -1577,8 +1585,9 @@ out:
1577 * @p: The process to be woken up. 1585 * @p: The process to be woken up.
1578 * 1586 *
1579 * Attempt to wake up the nominated process and move it to the set of runnable 1587 * Attempt to wake up the nominated process and move it to the set of runnable
1580 * processes. Returns 1 if the process was woken up, 0 if it was already 1588 * processes.
1581 * running. 1589 *
1590 * Return: 1 if the process was woken up, 0 if it was already running.
1582 * 1591 *
1583 * It may be assumed that this function implies a write memory barrier before 1592 * It may be assumed that this function implies a write memory barrier before
1584 * changing the task state if and only if any tasks are woken up. 1593 * changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2200,8 @@ void scheduler_tick(void)
2191 * This makes sure that uptime, CFS vruntime, load 2200 * This makes sure that uptime, CFS vruntime, load
2192 * balancing, etc... continue to move forward, even 2201 * balancing, etc... continue to move forward, even
2193 * with a very low granularity. 2202 * with a very low granularity.
2203 *
2204 * Return: Maximum deferment in nanoseconds.
2194 */ 2205 */
2195u64 scheduler_tick_max_deferment(void) 2206u64 scheduler_tick_max_deferment(void)
2196{ 2207{
@@ -2394,6 +2405,12 @@ need_resched:
2394 if (sched_feat(HRTICK)) 2405 if (sched_feat(HRTICK))
2395 hrtick_clear(rq); 2406 hrtick_clear(rq);
2396 2407
2408 /*
2409 * Make sure that signal_pending_state()->signal_pending() below
2410 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
2411 * done by the caller to avoid the race with signal_wake_up().
2412 */
2413 smp_mb__before_spinlock();
2397 raw_spin_lock_irq(&rq->lock); 2414 raw_spin_lock_irq(&rq->lock);
2398 2415
2399 switch_count = &prev->nivcsw; 2416 switch_count = &prev->nivcsw;
@@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion);
2796 * specified timeout to expire. The timeout is in jiffies. It is not 2813 * specified timeout to expire. The timeout is in jiffies. It is not
2797 * interruptible. 2814 * interruptible.
2798 * 2815 *
2799 * The return value is 0 if timed out, and positive (at least 1, or number of 2816 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2800 * jiffies left till timeout) if completed. 2817 * till timeout) if completed.
2801 */ 2818 */
2802unsigned long __sched 2819unsigned long __sched
2803wait_for_completion_timeout(struct completion *x, unsigned long timeout) 2820wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
2829 * specified timeout to expire. The timeout is in jiffies. It is not 2846 * specified timeout to expire. The timeout is in jiffies. It is not
2830 * interruptible. The caller is accounted as waiting for IO. 2847 * interruptible. The caller is accounted as waiting for IO.
2831 * 2848 *
2832 * The return value is 0 if timed out, and positive (at least 1, or number of 2849 * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
2833 * jiffies left till timeout) if completed. 2850 * till timeout) if completed.
2834 */ 2851 */
2835unsigned long __sched 2852unsigned long __sched
2836wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) 2853wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout);
2846 * This waits for completion of a specific task to be signaled. It is 2863 * This waits for completion of a specific task to be signaled. It is
2847 * interruptible. 2864 * interruptible.
2848 * 2865 *
2849 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2866 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2850 */ 2867 */
2851int __sched wait_for_completion_interruptible(struct completion *x) 2868int __sched wait_for_completion_interruptible(struct completion *x)
2852{ 2869{
@@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
2865 * This waits for either a completion of a specific task to be signaled or for a 2882 * This waits for either a completion of a specific task to be signaled or for a
2866 * specified timeout to expire. It is interruptible. The timeout is in jiffies. 2883 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
2867 * 2884 *
2868 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2885 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2869 * positive (at least 1, or number of jiffies left till timeout) if completed. 2886 * or number of jiffies left till timeout) if completed.
2870 */ 2887 */
2871long __sched 2888long __sched
2872wait_for_completion_interruptible_timeout(struct completion *x, 2889wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
2883 * This waits to be signaled for completion of a specific task. It can be 2900 * This waits to be signaled for completion of a specific task. It can be
2884 * interrupted by a kill signal. 2901 * interrupted by a kill signal.
2885 * 2902 *
2886 * The return value is -ERESTARTSYS if interrupted, 0 if completed. 2903 * Return: -ERESTARTSYS if interrupted, 0 if completed.
2887 */ 2904 */
2888int __sched wait_for_completion_killable(struct completion *x) 2905int __sched wait_for_completion_killable(struct completion *x)
2889{ 2906{
@@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable);
2903 * signaled or for a specified timeout to expire. It can be 2920 * signaled or for a specified timeout to expire. It can be
2904 * interrupted by a kill signal. The timeout is in jiffies. 2921 * interrupted by a kill signal. The timeout is in jiffies.
2905 * 2922 *
2906 * The return value is -ERESTARTSYS if interrupted, 0 if timed out, 2923 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
2907 * positive (at least 1, or number of jiffies left till timeout) if completed. 2924 * or number of jiffies left till timeout) if completed.
2908 */ 2925 */
2909long __sched 2926long __sched
2910wait_for_completion_killable_timeout(struct completion *x, 2927wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
2918 * try_wait_for_completion - try to decrement a completion without blocking 2935 * try_wait_for_completion - try to decrement a completion without blocking
2919 * @x: completion structure 2936 * @x: completion structure
2920 * 2937 *
2921 * Returns: 0 if a decrement cannot be done without blocking 2938 * Return: 0 if a decrement cannot be done without blocking
2922 * 1 if a decrement succeeded. 2939 * 1 if a decrement succeeded.
2923 * 2940 *
2924 * If a completion is being used as a counting completion, 2941 * If a completion is being used as a counting completion,
@@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
2945 * completion_done - Test to see if a completion has any waiters 2962 * completion_done - Test to see if a completion has any waiters
2946 * @x: completion structure 2963 * @x: completion structure
2947 * 2964 *
2948 * Returns: 0 if there are waiters (wait_for_completion() in progress) 2965 * Return: 0 if there are waiters (wait_for_completion() in progress)
2949 * 1 if there are no waiters. 2966 * 1 if there are no waiters.
2950 * 2967 *
2951 */ 2968 */
@@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment)
3182 * task_prio - return the priority value of a given task. 3199 * task_prio - return the priority value of a given task.
3183 * @p: the task in question. 3200 * @p: the task in question.
3184 * 3201 *
3185 * This is the priority value as seen by users in /proc. 3202 * Return: The priority value as seen by users in /proc.
3186 * RT tasks are offset by -200. Normal tasks are centered 3203 * RT tasks are offset by -200. Normal tasks are centered
3187 * around 0, value goes from -16 to +15. 3204 * around 0, value goes from -16 to +15.
3188 */ 3205 */
@@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p)
3194/** 3211/**
3195 * task_nice - return the nice value of a given task. 3212 * task_nice - return the nice value of a given task.
3196 * @p: the task in question. 3213 * @p: the task in question.
3214 *
3215 * Return: The nice value [ -20 ... 0 ... 19 ].
3197 */ 3216 */
3198int task_nice(const struct task_struct *p) 3217int task_nice(const struct task_struct *p)
3199{ 3218{
@@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice);
3204/** 3223/**
3205 * idle_cpu - is a given cpu idle currently? 3224 * idle_cpu - is a given cpu idle currently?
3206 * @cpu: the processor in question. 3225 * @cpu: the processor in question.
3226 *
3227 * Return: 1 if the CPU is currently idle. 0 otherwise.
3207 */ 3228 */
3208int idle_cpu(int cpu) 3229int idle_cpu(int cpu)
3209{ 3230{
@@ -3226,6 +3247,8 @@ int idle_cpu(int cpu)
3226/** 3247/**
3227 * idle_task - return the idle task for a given cpu. 3248 * idle_task - return the idle task for a given cpu.
3228 * @cpu: the processor in question. 3249 * @cpu: the processor in question.
3250 *
3251 * Return: The idle task for the cpu @cpu.
3229 */ 3252 */
3230struct task_struct *idle_task(int cpu) 3253struct task_struct *idle_task(int cpu)
3231{ 3254{
@@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu)
3235/** 3258/**
3236 * find_process_by_pid - find a process with a matching PID value. 3259 * find_process_by_pid - find a process with a matching PID value.
3237 * @pid: the pid in question. 3260 * @pid: the pid in question.
3261 *
3262 * The task of @pid, if found. %NULL otherwise.
3238 */ 3263 */
3239static struct task_struct *find_process_by_pid(pid_t pid) 3264static struct task_struct *find_process_by_pid(pid_t pid)
3240{ 3265{
@@ -3432,6 +3457,8 @@ recheck:
3432 * @policy: new policy. 3457 * @policy: new policy.
3433 * @param: structure containing the new RT priority. 3458 * @param: structure containing the new RT priority.
3434 * 3459 *
3460 * Return: 0 on success. An error code otherwise.
3461 *
3435 * NOTE that the task may be already dead. 3462 * NOTE that the task may be already dead.
3436 */ 3463 */
3437int sched_setscheduler(struct task_struct *p, int policy, 3464int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
3451 * current context has permission. For example, this is needed in 3478 * current context has permission. For example, this is needed in
3452 * stop_machine(): we create temporary high priority worker threads, 3479 * stop_machine(): we create temporary high priority worker threads,
3453 * but our caller might not have that capability. 3480 * but our caller might not have that capability.
3481 *
3482 * Return: 0 on success. An error code otherwise.
3454 */ 3483 */
3455int sched_setscheduler_nocheck(struct task_struct *p, int policy, 3484int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3456 const struct sched_param *param) 3485 const struct sched_param *param)
@@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3485 * @pid: the pid in question. 3514 * @pid: the pid in question.
3486 * @policy: new policy. 3515 * @policy: new policy.
3487 * @param: structure containing the new RT priority. 3516 * @param: structure containing the new RT priority.
3517 *
3518 * Return: 0 on success. An error code otherwise.
3488 */ 3519 */
3489SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, 3520SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3490 struct sched_param __user *, param) 3521 struct sched_param __user *, param)
@@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
3500 * sys_sched_setparam - set/change the RT priority of a thread 3531 * sys_sched_setparam - set/change the RT priority of a thread
3501 * @pid: the pid in question. 3532 * @pid: the pid in question.
3502 * @param: structure containing the new RT priority. 3533 * @param: structure containing the new RT priority.
3534 *
3535 * Return: 0 on success. An error code otherwise.
3503 */ 3536 */
3504SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 3537SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3505{ 3538{
@@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3509/** 3542/**
3510 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 3543 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3511 * @pid: the pid in question. 3544 * @pid: the pid in question.
3545 *
3546 * Return: On success, the policy of the thread. Otherwise, a negative error
3547 * code.
3512 */ 3548 */
3513SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 3549SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3514{ 3550{
@@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3535 * sys_sched_getparam - get the RT priority of a thread 3571 * sys_sched_getparam - get the RT priority of a thread
3536 * @pid: the pid in question. 3572 * @pid: the pid in question.
3537 * @param: structure containing the RT priority. 3573 * @param: structure containing the RT priority.
3574 *
3575 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
3576 * code.
3538 */ 3577 */
3539SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 3578SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3540{ 3579{
@@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3659 * @pid: pid of the process 3698 * @pid: pid of the process
3660 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3699 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3661 * @user_mask_ptr: user-space pointer to the new cpu mask 3700 * @user_mask_ptr: user-space pointer to the new cpu mask
3701 *
3702 * Return: 0 on success. An error code otherwise.
3662 */ 3703 */
3663SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 3704SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3664 unsigned long __user *, user_mask_ptr) 3705 unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3751,8 @@ out_unlock:
3710 * @pid: pid of the process 3751 * @pid: pid of the process
3711 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 3752 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3712 * @user_mask_ptr: user-space pointer to hold the current cpu mask 3753 * @user_mask_ptr: user-space pointer to hold the current cpu mask
3754 *
3755 * Return: 0 on success. An error code otherwise.
3713 */ 3756 */
3714SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 3757SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3715 unsigned long __user *, user_mask_ptr) 3758 unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3744 * 3787 *
3745 * This function yields the current CPU to other tasks. If there are no 3788 * This function yields the current CPU to other tasks. If there are no
3746 * other threads running on this CPU then this function will return. 3789 * other threads running on this CPU then this function will return.
3790 *
3791 * Return: 0.
3747 */ 3792 */
3748SYSCALL_DEFINE0(sched_yield) 3793SYSCALL_DEFINE0(sched_yield)
3749{ 3794{
@@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield);
3869 * It's the caller's job to ensure that the target task struct 3914 * It's the caller's job to ensure that the target task struct
3870 * can't go away on us before we can do any checks. 3915 * can't go away on us before we can do any checks.
3871 * 3916 *
3872 * Returns: 3917 * Return:
3873 * true (>0) if we indeed boosted the target task. 3918 * true (>0) if we indeed boosted the target task.
3874 * false (0) if we failed to boost the target. 3919 * false (0) if we failed to boost the target.
3875 * -ESRCH if there's no task to yield to. 3920 * -ESRCH if there's no task to yield to.
@@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout)
3972 * sys_sched_get_priority_max - return maximum RT priority. 4017 * sys_sched_get_priority_max - return maximum RT priority.
3973 * @policy: scheduling class. 4018 * @policy: scheduling class.
3974 * 4019 *
3975 * this syscall returns the maximum rt_priority that can be used 4020 * Return: On success, this syscall returns the maximum
3976 * by a given scheduling class. 4021 * rt_priority that can be used by a given scheduling class.
4022 * On failure, a negative error code is returned.
3977 */ 4023 */
3978SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 4024SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3979{ 4025{
@@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3997 * sys_sched_get_priority_min - return minimum RT priority. 4043 * sys_sched_get_priority_min - return minimum RT priority.
3998 * @policy: scheduling class. 4044 * @policy: scheduling class.
3999 * 4045 *
4000 * this syscall returns the minimum rt_priority that can be used 4046 * Return: On success, this syscall returns the minimum
4001 * by a given scheduling class. 4047 * rt_priority that can be used by a given scheduling class.
4048 * On failure, a negative error code is returned.
4002 */ 4049 */
4003SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 4050SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4004{ 4051{
@@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4024 * 4071 *
4025 * this syscall writes the default timeslice value of a given process 4072 * this syscall writes the default timeslice value of a given process
4026 * into the user-space timespec buffer. A value of '0' means infinity. 4073 * into the user-space timespec buffer. A value of '0' means infinity.
4074 *
4075 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
4076 * an error code.
4027 */ 4077 */
4028SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 4078SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4029 struct timespec __user *, interval) 4079 struct timespec __user *, interval)
@@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void)
6632 * @cpu: the processor in question. 6682 * @cpu: the processor in question.
6633 * 6683 *
6634 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! 6684 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6685 *
6686 * Return: The current task for @cpu.
6635 */ 6687 */
6636struct task_struct *curr_task(int cpu) 6688struct task_struct *curr_task(int cpu)
6637{ 6689{
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 1095e878a46f..8b836b376d91 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -62,7 +62,7 @@ static int convert_prio(int prio)
62 * any discrepancies created by racing against the uncertainty of the current 62 * any discrepancies created by racing against the uncertainty of the current
63 * priority configuration. 63 * priority configuration.
64 * 64 *
65 * Returns: (int)bool - CPUs were found 65 * Return: (int)bool - CPUs were found
66 */ 66 */
67int cpupri_find(struct cpupri *cp, struct task_struct *p, 67int cpupri_find(struct cpupri *cp, struct task_struct *p,
68 struct cpumask *lowest_mask) 68 struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
203 * cpupri_init - initialize the cpupri structure 203 * cpupri_init - initialize the cpupri structure
204 * @cp: The cpupri context 204 * @cp: The cpupri context
205 * 205 *
206 * Returns: -ENOMEM if memory fails. 206 * Return: -ENOMEM on memory allocation failure.
207 */ 207 */
208int cpupri_init(struct cpupri *cp) 208int cpupri_init(struct cpupri *cp)
209{ 209{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9565645e3202..68f1609ca149 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2032 */ 2032 */
2033 update_entity_load_avg(curr, 1); 2033 update_entity_load_avg(curr, 1);
2034 update_cfs_rq_blocked_load(cfs_rq, 1); 2034 update_cfs_rq_blocked_load(cfs_rq, 1);
2035 update_cfs_shares(cfs_rq);
2035 2036
2036#ifdef CONFIG_SCHED_HRTICK 2037#ifdef CONFIG_SCHED_HRTICK
2037 /* 2038 /*
@@ -4280,6 +4281,8 @@ struct sg_lb_stats {
4280 * get_sd_load_idx - Obtain the load index for a given sched domain. 4281 * get_sd_load_idx - Obtain the load index for a given sched domain.
4281 * @sd: The sched_domain whose load_idx is to be obtained. 4282 * @sd: The sched_domain whose load_idx is to be obtained.
4282 * @idle: The Idle status of the CPU for whose sd load_icx is obtained. 4283 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4284 *
4285 * Return: The load index.
4283 */ 4286 */
4284static inline int get_sd_load_idx(struct sched_domain *sd, 4287static inline int get_sd_load_idx(struct sched_domain *sd,
4285 enum cpu_idle_type idle) 4288 enum cpu_idle_type idle)
@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
4574 * 4577 *
4575 * Determine if @sg is a busier group than the previously selected 4578 * Determine if @sg is a busier group than the previously selected
4576 * busiest group. 4579 * busiest group.
4580 *
4581 * Return: %true if @sg is a busier group than the previously selected
4582 * busiest group. %false otherwise.
4577 */ 4583 */
4578static bool update_sd_pick_busiest(struct lb_env *env, 4584static bool update_sd_pick_busiest(struct lb_env *env,
4579 struct sd_lb_stats *sds, 4585 struct sd_lb_stats *sds,
@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
4691 * assuming lower CPU number will be equivalent to lower a SMT thread 4697 * assuming lower CPU number will be equivalent to lower a SMT thread
4692 * number. 4698 * number.
4693 * 4699 *
4694 * Returns 1 when packing is required and a task should be moved to 4700 * Return: 1 when packing is required and a task should be moved to
4695 * this CPU. The amount of the imbalance is returned in *imbalance. 4701 * this CPU. The amount of the imbalance is returned in *imbalance.
4696 * 4702 *
4697 * @env: The load balancing environment. 4703 * @env: The load balancing environment.
@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4869 * @balance: Pointer to a variable indicating if this_cpu 4875 * @balance: Pointer to a variable indicating if this_cpu
4870 * is the appropriate cpu to perform load balancing at this_level. 4876 * is the appropriate cpu to perform load balancing at this_level.
4871 * 4877 *
4872 * Returns: - the busiest group if imbalance exists. 4878 * Return: - The busiest group if imbalance exists.
4873 * - If no imbalance and user has opted for power-savings balance, 4879 * - If no imbalance and user has opted for power-savings balance,
4874 * return the least loaded group whose CPUs can be 4880 * return the least loaded group whose CPUs can be
4875 * put to idle by rebalancing its tasks onto our group. 4881 * put to idle by rebalancing its tasks onto our group.
diff --git a/mm/fremap.c b/mm/fremap.c
index 87da3590c61e..5bff08147768 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
57 unsigned long addr, unsigned long pgoff, pgprot_t prot) 57 unsigned long addr, unsigned long pgoff, pgprot_t prot)
58{ 58{
59 int err = -ENOMEM; 59 int err = -ENOMEM;
60 pte_t *pte; 60 pte_t *pte, ptfile;
61 spinlock_t *ptl; 61 spinlock_t *ptl;
62 62
63 pte = get_locked_pte(mm, addr, &ptl); 63 pte = get_locked_pte(mm, addr, &ptl);
64 if (!pte) 64 if (!pte)
65 goto out; 65 goto out;
66 66
67 if (!pte_none(*pte)) 67 ptfile = pgoff_to_pte(pgoff);
68
69 if (!pte_none(*pte)) {
70 if (pte_present(*pte) && pte_soft_dirty(*pte))
71 pte_file_mksoft_dirty(ptfile);
68 zap_pte(mm, vma, addr, pte); 72 zap_pte(mm, vma, addr, pte);
73 }
69 74
70 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); 75 set_pte_at(mm, addr, pte, ptfile);
71 /* 76 /*
72 * We don't need to run update_mmu_cache() here because the "file pte" 77 * We don't need to run update_mmu_cache() here because the "file pte"
73 * being installed by install_file_pte() is not a real pte - it's a 78 * being installed by install_file_pte() is not a real pte - it's a
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 83aff0a4d093..b60f33080a28 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2490 2490
2491 mm = vma->vm_mm; 2491 mm = vma->vm_mm;
2492 2492
2493 tlb_gather_mmu(&tlb, mm, 0); 2493 tlb_gather_mmu(&tlb, mm, start, end);
2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2495 tlb_finish_mmu(&tlb, start, end); 2495 tlb_finish_mmu(&tlb, start, end);
2496} 2496}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c290a1cf3862..c5792a5d87ce 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3195 if (!s->memcg_params) 3195 if (!s->memcg_params)
3196 return -ENOMEM; 3196 return -ENOMEM;
3197 3197
3198 INIT_WORK(&s->memcg_params->destroy,
3199 kmem_cache_destroy_work_func);
3200 if (memcg) { 3198 if (memcg) {
3201 s->memcg_params->memcg = memcg; 3199 s->memcg_params->memcg = memcg;
3202 s->memcg_params->root_cache = root_cache; 3200 s->memcg_params->root_cache = root_cache;
3201 INIT_WORK(&s->memcg_params->destroy,
3202 kmem_cache_destroy_work_func);
3203 } else 3203 } else
3204 s->memcg_params->is_root_cache = true; 3204 s->memcg_params->is_root_cache = true;
3205 3205
diff --git a/mm/memory.c b/mm/memory.c
index 1ce2e2a734fc..af84bc0ec17c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
209 * tear-down from @mm. The @fullmm argument is used when @mm is without 209 * tear-down from @mm. The @fullmm argument is used when @mm is without
210 * users and we're going to destroy the full address space (exit/execve). 210 * users and we're going to destroy the full address space (exit/execve).
211 */ 211 */
212void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) 212void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
213{ 213{
214 tlb->mm = mm; 214 tlb->mm = mm;
215 215
216 tlb->fullmm = fullmm; 216 /* Is it from 0 to ~0? */
217 tlb->fullmm = !(start | (end+1));
217 tlb->need_flush_all = 0; 218 tlb->need_flush_all = 0;
218 tlb->start = -1UL; 219 tlb->start = start;
219 tlb->end = 0; 220 tlb->end = end;
220 tlb->need_flush = 0; 221 tlb->need_flush = 0;
221 tlb->local.next = NULL; 222 tlb->local.next = NULL;
222 tlb->local.nr = 0; 223 tlb->local.nr = 0;
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
256{ 257{
257 struct mmu_gather_batch *batch, *next; 258 struct mmu_gather_batch *batch, *next;
258 259
259 tlb->start = start;
260 tlb->end = end;
261 tlb_flush_mmu(tlb); 260 tlb_flush_mmu(tlb);
262 261
263 /* keep the page table cache within bounds */ 262 /* keep the page table cache within bounds */
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1099 spinlock_t *ptl; 1098 spinlock_t *ptl;
1100 pte_t *start_pte; 1099 pte_t *start_pte;
1101 pte_t *pte; 1100 pte_t *pte;
1102 unsigned long range_start = addr;
1103 1101
1104again: 1102again:
1105 init_rss_vec(rss); 1103 init_rss_vec(rss);
@@ -1141,9 +1139,12 @@ again:
1141 continue; 1139 continue;
1142 if (unlikely(details) && details->nonlinear_vma 1140 if (unlikely(details) && details->nonlinear_vma
1143 && linear_page_index(details->nonlinear_vma, 1141 && linear_page_index(details->nonlinear_vma,
1144 addr) != page->index) 1142 addr) != page->index) {
1145 set_pte_at(mm, addr, pte, 1143 pte_t ptfile = pgoff_to_pte(page->index);
1146 pgoff_to_pte(page->index)); 1144 if (pte_soft_dirty(ptent))
1145 pte_file_mksoft_dirty(ptfile);
1146 set_pte_at(mm, addr, pte, ptfile);
1147 }
1147 if (PageAnon(page)) 1148 if (PageAnon(page))
1148 rss[MM_ANONPAGES]--; 1149 rss[MM_ANONPAGES]--;
1149 else { 1150 else {
@@ -1202,17 +1203,25 @@ again:
1202 * and page-free while holding it. 1203 * and page-free while holding it.
1203 */ 1204 */
1204 if (force_flush) { 1205 if (force_flush) {
1206 unsigned long old_end;
1207
1205 force_flush = 0; 1208 force_flush = 0;
1206 1209
1207#ifdef HAVE_GENERIC_MMU_GATHER 1210 /*
1208 tlb->start = range_start; 1211 * Flush the TLB just for the previous segment,
1212 * then update the range to be the remaining
1213 * TLB range.
1214 */
1215 old_end = tlb->end;
1209 tlb->end = addr; 1216 tlb->end = addr;
1210#endif 1217
1211 tlb_flush_mmu(tlb); 1218 tlb_flush_mmu(tlb);
1212 if (addr != end) { 1219
1213 range_start = addr; 1220 tlb->start = addr;
1221 tlb->end = old_end;
1222
1223 if (addr != end)
1214 goto again; 1224 goto again;
1215 }
1216 } 1225 }
1217 1226
1218 return addr; 1227 return addr;
@@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1397 unsigned long end = start + size; 1406 unsigned long end = start + size;
1398 1407
1399 lru_add_drain(); 1408 lru_add_drain();
1400 tlb_gather_mmu(&tlb, mm, 0); 1409 tlb_gather_mmu(&tlb, mm, start, end);
1401 update_hiwater_rss(mm); 1410 update_hiwater_rss(mm);
1402 mmu_notifier_invalidate_range_start(mm, start, end); 1411 mmu_notifier_invalidate_range_start(mm, start, end);
1403 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) 1412 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
1423 unsigned long end = address + size; 1432 unsigned long end = address + size;
1424 1433
1425 lru_add_drain(); 1434 lru_add_drain();
1426 tlb_gather_mmu(&tlb, mm, 0); 1435 tlb_gather_mmu(&tlb, mm, address, end);
1427 update_hiwater_rss(mm); 1436 update_hiwater_rss(mm);
1428 mmu_notifier_invalidate_range_start(mm, address, end); 1437 mmu_notifier_invalidate_range_start(mm, address, end);
1429 unmap_single_vma(&tlb, vma, address, end, details); 1438 unmap_single_vma(&tlb, vma, address, end, details);
@@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
3115 exclusive = 1; 3124 exclusive = 1;
3116 } 3125 }
3117 flush_icache_page(vma, page); 3126 flush_icache_page(vma, page);
3127 if (pte_swp_soft_dirty(orig_pte))
3128 pte = pte_mksoft_dirty(pte);
3118 set_pte_at(mm, address, page_table, pte); 3129 set_pte_at(mm, address, page_table, pte);
3119 if (page == swapcache) 3130 if (page == swapcache)
3120 do_page_add_anon_rmap(page, vma, address, exclusive); 3131 do_page_add_anon_rmap(page, vma, address, exclusive);
@@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3408 entry = mk_pte(page, vma->vm_page_prot); 3419 entry = mk_pte(page, vma->vm_page_prot);
3409 if (flags & FAULT_FLAG_WRITE) 3420 if (flags & FAULT_FLAG_WRITE)
3410 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3421 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3422 else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
3423 pte_mksoft_dirty(entry);
3411 if (anon) { 3424 if (anon) {
3412 inc_mm_counter_fast(mm, MM_ANONPAGES); 3425 inc_mm_counter_fast(mm, MM_ANONPAGES);
3413 page_add_new_anon_rmap(page, vma, address); 3426 page_add_new_anon_rmap(page, vma, address);
diff --git a/mm/mmap.c b/mm/mmap.c
index 1edbaa3136c3..f9c97d10b873 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm,
2336 struct mmu_gather tlb; 2336 struct mmu_gather tlb;
2337 2337
2338 lru_add_drain(); 2338 lru_add_drain();
2339 tlb_gather_mmu(&tlb, mm, 0); 2339 tlb_gather_mmu(&tlb, mm, start, end);
2340 update_hiwater_rss(mm); 2340 update_hiwater_rss(mm);
2341 unmap_vmas(&tlb, vma, start, end); 2341 unmap_vmas(&tlb, vma, start, end);
2342 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2342 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm)
2709 2709
2710 lru_add_drain(); 2710 lru_add_drain();
2711 flush_cache_mm(mm); 2711 flush_cache_mm(mm);
2712 tlb_gather_mmu(&tlb, mm, 1); 2712 tlb_gather_mmu(&tlb, mm, 0, -1);
2713 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2713 /* update_hiwater_rss(mm) here? but nobody should be looking */
2714 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2714 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2715 unmap_vmas(&tlb, vma, 0, -1); 2715 unmap_vmas(&tlb, vma, 0, -1);
diff --git a/mm/rmap.c b/mm/rmap.c
index cd356df4f71a..b2e29acd7e3d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1236 swp_entry_to_pte(make_hwpoison_entry(page))); 1236 swp_entry_to_pte(make_hwpoison_entry(page)));
1237 } else if (PageAnon(page)) { 1237 } else if (PageAnon(page)) {
1238 swp_entry_t entry = { .val = page_private(page) }; 1238 swp_entry_t entry = { .val = page_private(page) };
1239 pte_t swp_pte;
1239 1240
1240 if (PageSwapCache(page)) { 1241 if (PageSwapCache(page)) {
1241 /* 1242 /*
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1264 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); 1265 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
1265 entry = make_migration_entry(page, pte_write(pteval)); 1266 entry = make_migration_entry(page, pte_write(pteval));
1266 } 1267 }
1267 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 1268 swp_pte = swp_entry_to_pte(entry);
1269 if (pte_soft_dirty(pteval))
1270 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1271 set_pte_at(mm, address, pte, swp_pte);
1268 BUG_ON(pte_file(*pte)); 1272 BUG_ON(pte_file(*pte));
1269 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1273 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1270 (TTU_ACTION(flags) == TTU_MIGRATION)) { 1274 (TTU_ACTION(flags) == TTU_MIGRATION)) {
@@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1401 pteval = ptep_clear_flush(vma, address, pte); 1405 pteval = ptep_clear_flush(vma, address, pte);
1402 1406
1403 /* If nonlinear, store the file page offset in the pte. */ 1407 /* If nonlinear, store the file page offset in the pte. */
1404 if (page->index != linear_page_index(vma, address)) 1408 if (page->index != linear_page_index(vma, address)) {
1405 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 1409 pte_t ptfile = pgoff_to_pte(page->index);
1410 if (pte_soft_dirty(pteval))
1411 pte_file_mksoft_dirty(ptfile);
1412 set_pte_at(mm, address, pte, ptfile);
1413 }
1406 1414
1407 /* Move the dirty bit to the physical page now the pte is gone. */ 1415 /* Move the dirty bit to the physical page now the pte is gone. */
1408 if (pte_dirty(pteval)) 1416 if (pte_dirty(pteval))
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 36af6eeaa67e..6cf2e60983b7 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free)
866} 866}
867#endif /* CONFIG_HIBERNATION */ 867#endif /* CONFIG_HIBERNATION */
868 868
869static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
870{
871#ifdef CONFIG_MEM_SOFT_DIRTY
872 /*
873 * When pte keeps soft dirty bit the pte generated
874 * from swap entry does not has it, still it's same
875 * pte from logical point of view.
876 */
877 pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
878 return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
879#else
880 return pte_same(pte, swp_pte);
881#endif
882}
883
869/* 884/*
870 * No need to decide whether this PTE shares the swap entry with others, 885 * No need to decide whether this PTE shares the swap entry with others,
871 * just let do_wp_page work it out if a write is requested later - to 886 * just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
892 } 907 }
893 908
894 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 909 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
895 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 910 if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
896 mem_cgroup_cancel_charge_swapin(memcg); 911 mem_cgroup_cancel_charge_swapin(memcg);
897 ret = 0; 912 ret = 0;
898 goto out; 913 goto out;
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
947 * swapoff spends a _lot_ of time in this loop! 962 * swapoff spends a _lot_ of time in this loop!
948 * Test inline before going to call unuse_pte. 963 * Test inline before going to call unuse_pte.
949 */ 964 */
950 if (unlikely(pte_same(*pte, swp_pte))) { 965 if (unlikely(maybe_same_pte(*pte, swp_pte))) {
951 pte_unmap(pte); 966 pte_unmap(pte);
952 ret = unuse_pte(vma, pmd, addr, entry, page); 967 ret = unuse_pte(vma, pmd, addr, entry, page);
953 if (ret) 968 if (ret)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4a78c4de9f20..6ee48aac776f 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
91 91
92struct net_device *vlan_dev_real_dev(const struct net_device *dev) 92struct net_device *vlan_dev_real_dev(const struct net_device *dev)
93{ 93{
94 return vlan_dev_priv(dev)->real_dev; 94 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
95
96 while (is_vlan_dev(ret))
97 ret = vlan_dev_priv(ret)->real_dev;
98
99 return ret;
95} 100}
96EXPORT_SYMBOL(vlan_dev_real_dev); 101EXPORT_SYMBOL(vlan_dev_real_dev);
97 102
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index e14531f1ce1c..264de88db320 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1529,6 +1529,8 @@ out:
1529 * in these cases, the skb is further handled by this function and 1529 * in these cases, the skb is further handled by this function and
1530 * returns 1, otherwise it returns 0 and the caller shall further 1530 * returns 1, otherwise it returns 0 and the caller shall further
1531 * process the skb. 1531 * process the skb.
1532 *
1533 * This call might reallocate skb data.
1532 */ 1534 */
1533int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, 1535int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1534 unsigned short vid) 1536 unsigned short vid)
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index f105219f4a4b..7614af31daff 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -508,6 +508,7 @@ out:
508 return 0; 508 return 0;
509} 509}
510 510
511/* this call might reallocate skb data */
511static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) 512static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
512{ 513{
513 int ret = false; 514 int ret = false;
@@ -568,6 +569,7 @@ out:
568 return ret; 569 return ret;
569} 570}
570 571
572/* this call might reallocate skb data */
571bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) 573bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
572{ 574{
573 struct ethhdr *ethhdr; 575 struct ethhdr *ethhdr;
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
619 621
620 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) 622 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
621 return false; 623 return false;
624
625 /* skb->data might have been reallocated by pskb_may_pull() */
626 ethhdr = (struct ethhdr *)skb->data;
627 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
628 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
629
622 udphdr = (struct udphdr *)(skb->data + *header_len); 630 udphdr = (struct udphdr *)(skb->data + *header_len);
623 *header_len += sizeof(*udphdr); 631 *header_len += sizeof(*udphdr);
624 632
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
634 return true; 642 return true;
635} 643}
636 644
645/* this call might reallocate skb data */
637bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 646bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
638 struct sk_buff *skb, struct ethhdr *ethhdr) 647 struct sk_buff *skb)
639{ 648{
640 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; 649 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
641 struct batadv_orig_node *orig_dst_node = NULL; 650 struct batadv_orig_node *orig_dst_node = NULL;
642 struct batadv_gw_node *curr_gw = NULL; 651 struct batadv_gw_node *curr_gw = NULL;
652 struct ethhdr *ethhdr;
643 bool ret, out_of_range = false; 653 bool ret, out_of_range = false;
644 unsigned int header_len = 0; 654 unsigned int header_len = 0;
645 uint8_t curr_tq_avg; 655 uint8_t curr_tq_avg;
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
648 if (!ret) 658 if (!ret)
649 goto out; 659 goto out;
650 660
661 ethhdr = (struct ethhdr *)skb->data;
651 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, 662 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
652 ethhdr->h_dest); 663 ethhdr->h_dest);
653 if (!orig_dst_node) 664 if (!orig_dst_node)
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 039902dca4a6..1037d75da51f 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
34void batadv_gw_node_purge(struct batadv_priv *bat_priv); 34void batadv_gw_node_purge(struct batadv_priv *bat_priv);
35int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); 35int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
36bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); 36bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
37bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 37bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
38 struct sk_buff *skb, struct ethhdr *ethhdr);
39 38
40#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ 39#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 700d0b49742d..0f04e1c302b4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
180 if (batadv_bla_tx(bat_priv, skb, vid)) 180 if (batadv_bla_tx(bat_priv, skb, vid))
181 goto dropped; 181 goto dropped;
182 182
183 /* skb->data might have been reallocated by batadv_bla_tx() */
184 ethhdr = (struct ethhdr *)skb->data;
185
183 /* Register the client MAC in the transtable */ 186 /* Register the client MAC in the transtable */
184 if (!is_multicast_ether_addr(ethhdr->h_source)) 187 if (!is_multicast_ether_addr(ethhdr->h_source))
185 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); 188 batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
220 default: 223 default:
221 break; 224 break;
222 } 225 }
226
227 /* reminder: ethhdr might have become unusable from here on
228 * (batadv_gw_is_dhcp_target() might have reallocated skb data)
229 */
223 } 230 }
224 231
225 /* ethernet packet should be broadcasted */ 232 /* ethernet packet should be broadcasted */
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
266 /* unicast packet */ 273 /* unicast packet */
267 } else { 274 } else {
268 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { 275 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
269 ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); 276 ret = batadv_gw_out_of_range(bat_priv, skb);
270 if (ret) 277 if (ret)
271 goto dropped; 278 goto dropped;
272 } 279 }
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc8b5d4dd636..688a0419756b 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
326 * @skb: the skb containing the payload to encapsulate 326 * @skb: the skb containing the payload to encapsulate
327 * @orig_node: the destination node 327 * @orig_node: the destination node
328 * 328 *
329 * Returns false if the payload could not be encapsulated or true otherwise 329 * Returns false if the payload could not be encapsulated or true otherwise.
330 *
331 * This call might reallocate skb data.
330 */ 332 */
331static bool batadv_unicast_prepare_skb(struct sk_buff *skb, 333static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
332 struct batadv_orig_node *orig_node) 334 struct batadv_orig_node *orig_node)
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
343 * @orig_node: the destination node 345 * @orig_node: the destination node
344 * @packet_subtype: the batman 4addr packet subtype to use 346 * @packet_subtype: the batman 4addr packet subtype to use
345 * 347 *
346 * Returns false if the payload could not be encapsulated or true otherwise 348 * Returns false if the payload could not be encapsulated or true otherwise.
349 *
350 * This call might reallocate skb data.
347 */ 351 */
348bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, 352bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
349 struct sk_buff *skb, 353 struct sk_buff *skb,
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
401 struct batadv_neigh_node *neigh_node; 405 struct batadv_neigh_node *neigh_node;
402 int data_len = skb->len; 406 int data_len = skb->len;
403 int ret = NET_RX_DROP; 407 int ret = NET_RX_DROP;
404 unsigned int dev_mtu; 408 unsigned int dev_mtu, header_len;
405 409
406 /* get routing information */ 410 /* get routing information */
407 if (is_multicast_ether_addr(ethhdr->h_dest)) { 411 if (is_multicast_ether_addr(ethhdr->h_dest)) {
@@ -429,10 +433,12 @@ find_router:
429 switch (packet_type) { 433 switch (packet_type) {
430 case BATADV_UNICAST: 434 case BATADV_UNICAST:
431 batadv_unicast_prepare_skb(skb, orig_node); 435 batadv_unicast_prepare_skb(skb, orig_node);
436 header_len = sizeof(struct batadv_unicast_packet);
432 break; 437 break;
433 case BATADV_UNICAST_4ADDR: 438 case BATADV_UNICAST_4ADDR:
434 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, 439 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
435 packet_subtype); 440 packet_subtype);
441 header_len = sizeof(struct batadv_unicast_4addr_packet);
436 break; 442 break;
437 default: 443 default:
438 /* this function supports UNICAST and UNICAST_4ADDR only. It 444 /* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -441,6 +447,7 @@ find_router:
441 goto out; 447 goto out;
442 } 448 }
443 449
450 ethhdr = (struct ethhdr *)(skb->data + header_len);
444 unicast_packet = (struct batadv_unicast_packet *)skb->data; 451 unicast_packet = (struct batadv_unicast_packet *)skb->data;
445 452
446 /* inform the destination node that we are still missing a correct route 453 /* inform the destination node that we are still missing a correct route
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 61c5e819380e..08e576ada0b2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1195,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1195 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); 1195 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1196 if (max_delay) 1196 if (max_delay)
1197 group = &mld->mld_mca; 1197 group = &mld->mld_mca;
1198 } else if (skb->len >= sizeof(*mld2q)) { 1198 } else {
1199 if (!pskb_may_pull(skb, sizeof(*mld2q))) { 1199 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1200 err = -EINVAL; 1200 err = -EINVAL;
1201 goto out; 1201 goto out;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 394bb96b6087..3b9637fb7939 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Sysfs attributes of bridge ports 2 * Sysfs attributes of bridge
3 * Linux ethernet bridge 3 * Linux ethernet bridge
4 * 4 *
5 * Authors: 5 * Authors:
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 00ee068efc1c..b84a1b155bc1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -65,6 +65,7 @@ ipv6:
65 nhoff += sizeof(struct ipv6hdr); 65 nhoff += sizeof(struct ipv6hdr);
66 break; 66 break;
67 } 67 }
68 case __constant_htons(ETH_P_8021AD):
68 case __constant_htons(ETH_P_8021Q): { 69 case __constant_htons(ETH_P_8021Q): {
69 const struct vlan_hdr *vlan; 70 const struct vlan_hdr *vlan;
70 struct vlan_hdr _vlan; 71 struct vlan_hdr _vlan;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9232c68941ab..60533db8b72d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1441 atomic_set(&p->refcnt, 1); 1441 atomic_set(&p->refcnt, 1);
1442 p->reachable_time = 1442 p->reachable_time =
1443 neigh_rand_reach_time(p->base_reachable_time); 1443 neigh_rand_reach_time(p->base_reachable_time);
1444 dev_hold(dev);
1445 p->dev = dev;
1446 write_pnet(&p->net, hold_net(net));
1447 p->sysctl_table = NULL;
1444 1448
1445 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1449 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1450 release_net(net);
1451 dev_put(dev);
1446 kfree(p); 1452 kfree(p);
1447 return NULL; 1453 return NULL;
1448 } 1454 }
1449 1455
1450 dev_hold(dev);
1451 p->dev = dev;
1452 write_pnet(&p->net, hold_net(net));
1453 p->sysctl_table = NULL;
1454 write_lock_bh(&tbl->lock); 1456 write_lock_bh(&tbl->lock);
1455 p->next = tbl->parms.next; 1457 p->next = tbl->parms.next;
1456 tbl->parms.next = p; 1458 tbl->parms.next = p;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 3de740834d1f..ca198c1d1d30 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
2156 /* If aging addresses are supported device will need to 2156 /* If aging addresses are supported device will need to
2157 * implement its own handler for this. 2157 * implement its own handler for this.
2158 */ 2158 */
2159 if (ndm->ndm_state & NUD_PERMANENT) { 2159 if (!(ndm->ndm_state & NUD_PERMANENT)) {
2160 pr_info("%s: FDB only supports static addresses\n", dev->name); 2160 pr_info("%s: FDB only supports static addresses\n", dev->name);
2161 return -EINVAL; 2161 return -EINVAL;
2162 } 2162 }
@@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2384 struct nlattr *extfilt; 2384 struct nlattr *extfilt;
2385 u32 filter_mask = 0; 2385 u32 filter_mask = 0;
2386 2386
2387 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg), 2387 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
2388 IFLA_EXT_MASK); 2388 IFLA_EXT_MASK);
2389 if (extfilt) 2389 if (extfilt)
2390 filter_mask = nla_get_u32(extfilt); 2390 filter_mask = nla_get_u32(extfilt);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index ab3d814bc80a..109ee89f123e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
477 } 477 }
478 478
479 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - 479 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
480 net_adj) & ~(align - 1)) + (net_adj - 2); 480 net_adj) & ~(align - 1)) + net_adj - 2;
481} 481}
482 482
483static void esp4_err(struct sk_buff *skb, u32 info) 483static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 108a1e9c9eac..3df6d3edb2a1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,7 +71,6 @@
71#include <linux/init.h> 71#include <linux/init.h>
72#include <linux/list.h> 72#include <linux/list.h>
73#include <linux/slab.h> 73#include <linux/slab.h>
74#include <linux/prefetch.h>
75#include <linux/export.h> 74#include <linux/export.h>
76#include <net/net_namespace.h> 75#include <net/net_namespace.h>
77#include <net/ip.h> 76#include <net/ip.h>
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
1761 if (!c) 1760 if (!c)
1762 continue; 1761 continue;
1763 1762
1764 if (IS_LEAF(c)) { 1763 if (IS_LEAF(c))
1765 prefetch(rcu_dereference_rtnl(p->child[idx]));
1766 return (struct leaf *) c; 1764 return (struct leaf *) c;
1767 }
1768 1765
1769 /* Rescan start scanning in new node */ 1766 /* Rescan start scanning in new node */
1770 p = (struct tnode *) c; 1767 p = (struct tnode *) c;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 1f6eab66f7ce..8d6939eeb492 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
383 if (daddr) 383 if (daddr)
384 memcpy(&iph->daddr, daddr, 4); 384 memcpy(&iph->daddr, daddr, 4);
385 if (iph->daddr) 385 if (iph->daddr)
386 return t->hlen; 386 return t->hlen + sizeof(*iph);
387 387
388 return -(t->hlen + sizeof(*iph)); 388 return -(t->hlen + sizeof(*iph));
389} 389}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 7167b08977df..850525b34899 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt,
76 iph->daddr = dst; 76 iph->daddr = dst;
77 iph->saddr = src; 77 iph->saddr = src;
78 iph->ttl = ttl; 78 iph->ttl = ttl;
79 tunnel_ip_select_ident(skb, 79 __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
80 (const struct iphdr *)skb_inner_network_header(skb),
81 &rt->dst);
82 80
83 err = ip_local_out(skb); 81 err = ip_local_out(skb);
84 if (unlikely(net_xmit_eval(err))) 82 if (unlikely(net_xmit_eval(err)))
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6577a1149a47..463bd1273346 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
273 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), 273 SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
274 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), 274 SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
275 SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), 275 SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
276 SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), 276 SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
277 SNMP_MIB_SENTINEL 277 SNMP_MIB_SENTINEL
278}; 278};
279 279
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9077f441cb2..b6ae92a51f58 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
206 */ 206 */
207static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 207static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
208{ 208{
209 u64 offs; 209 u32 delta, bic_target, max_cnt;
210 u32 delta, t, bic_target, max_cnt; 210 u64 offs, t;
211 211
212 ca->ack_cnt++; /* count the number of ACKs */ 212 ca->ack_cnt++; /* count the number of ACKs */
213 213
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
250 * if the cwnd < 1 million packets !!! 250 * if the cwnd < 1 million packets !!!
251 */ 251 */
252 252
253 t = (s32)(tcp_time_stamp - ca->epoch_start);
254 t += msecs_to_jiffies(ca->delay_min >> 3);
253 /* change the unit from HZ to bictcp_HZ */ 255 /* change the unit from HZ to bictcp_HZ */
254 t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) 256 t <<= BICTCP_HZ;
255 - ca->epoch_start) << BICTCP_HZ) / HZ; 257 do_div(t, HZ);
256 258
257 if (t < ca->bic_K) /* t - K */ 259 if (t < ca->bic_K) /* t - K */
258 offs = ca->bic_K - t; 260 offs = ca->bic_K - t;
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
414 return; 416 return;
415 417
416 /* Discard delay samples right after fast recovery */ 418 /* Discard delay samples right after fast recovery */
417 if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) 419 if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
418 return; 420 return;
419 421
420 delay = (rtt_us << 3) / USEC_PER_MSEC; 422 delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 40ffd72243a4..aeac0dc3635d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
425 net_adj = 0; 425 net_adj = 0;
426 426
427 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - 427 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
428 net_adj) & ~(align - 1)) + (net_adj - 2); 428 net_adj) & ~(align - 1)) + net_adj - 2;
429} 429}
430 430
431static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 431static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bff3d821c7eb..c4ff5bbb45c4 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
993 993
994 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { 994 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
995#ifdef CONFIG_IPV6_SUBTREES 995#ifdef CONFIG_IPV6_SUBTREES
996 if (fn->subtree) 996 if (fn->subtree) {
997 fn = fib6_lookup_1(fn->subtree, args + 1); 997 struct fib6_node *sfn;
998 sfn = fib6_lookup_1(fn->subtree,
999 args + 1);
1000 if (!sfn)
1001 goto backtrack;
1002 fn = sfn;
1003 }
998#endif 1004#endif
999 if (!fn || fn->fn_flags & RTN_RTINFO) 1005 if (fn->fn_flags & RTN_RTINFO)
1000 return fn; 1006 return fn;
1001 } 1007 }
1002 } 1008 }
1003 1009#ifdef CONFIG_IPV6_SUBTREES
1010backtrack:
1011#endif
1004 if (fn->fn_flags & RTN_ROOT) 1012 if (fn->fn_flags & RTN_ROOT)
1005 break; 1013 break;
1006 1014
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ae31968d42d3..cc9e02d79b55 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -31,10 +31,12 @@
31#include "led.h" 31#include "led.h"
32 32
33#define IEEE80211_AUTH_TIMEOUT (HZ / 5) 33#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
34#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
34#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) 35#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
35#define IEEE80211_AUTH_MAX_TRIES 3 36#define IEEE80211_AUTH_MAX_TRIES 3
36#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) 37#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
37#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 38#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
39#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
38#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) 40#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
39#define IEEE80211_ASSOC_MAX_TRIES 3 41#define IEEE80211_ASSOC_MAX_TRIES 3
40 42
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
209 struct ieee80211_channel *channel, 211 struct ieee80211_channel *channel,
210 const struct ieee80211_ht_operation *ht_oper, 212 const struct ieee80211_ht_operation *ht_oper,
211 const struct ieee80211_vht_operation *vht_oper, 213 const struct ieee80211_vht_operation *vht_oper,
212 struct cfg80211_chan_def *chandef, bool verbose) 214 struct cfg80211_chan_def *chandef, bool tracking)
213{ 215{
216 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
214 struct cfg80211_chan_def vht_chandef; 217 struct cfg80211_chan_def vht_chandef;
215 u32 ht_cfreq, ret; 218 u32 ht_cfreq, ret;
216 219
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
229 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, 232 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
230 channel->band); 233 channel->band);
231 /* check that channel matches the right operating channel */ 234 /* check that channel matches the right operating channel */
232 if (channel->center_freq != ht_cfreq) { 235 if (!tracking && channel->center_freq != ht_cfreq) {
233 /* 236 /*
234 * It's possible that some APs are confused here; 237 * It's possible that some APs are confused here;
235 * Netgear WNDR3700 sometimes reports 4 higher than 238 * Netgear WNDR3700 sometimes reports 4 higher than
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
237 * since we look at probe response/beacon data here 240 * since we look at probe response/beacon data here
238 * it should be OK. 241 * it should be OK.
239 */ 242 */
240 if (verbose) 243 sdata_info(sdata,
241 sdata_info(sdata, 244 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
242 "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", 245 channel->center_freq, ht_cfreq,
243 channel->center_freq, ht_cfreq, 246 ht_oper->primary_chan, channel->band);
244 ht_oper->primary_chan, channel->band);
245 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; 247 ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
246 goto out; 248 goto out;
247 } 249 }
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
295 channel->band); 297 channel->band);
296 break; 298 break;
297 default: 299 default:
298 if (verbose) 300 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
299 sdata_info(sdata, 301 sdata_info(sdata,
300 "AP VHT operation IE has invalid channel width (%d), disable VHT\n", 302 "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
301 vht_oper->chan_width); 303 vht_oper->chan_width);
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
304 } 306 }
305 307
306 if (!cfg80211_chandef_valid(&vht_chandef)) { 308 if (!cfg80211_chandef_valid(&vht_chandef)) {
307 if (verbose) 309 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
308 sdata_info(sdata, 310 sdata_info(sdata,
309 "AP VHT information is invalid, disable VHT\n"); 311 "AP VHT information is invalid, disable VHT\n");
310 ret = IEEE80211_STA_DISABLE_VHT; 312 ret = IEEE80211_STA_DISABLE_VHT;
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
317 } 319 }
318 320
319 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { 321 if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
320 if (verbose) 322 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
321 sdata_info(sdata, 323 sdata_info(sdata,
322 "AP VHT information doesn't match HT, disable VHT\n"); 324 "AP VHT information doesn't match HT, disable VHT\n");
323 ret = IEEE80211_STA_DISABLE_VHT; 325 ret = IEEE80211_STA_DISABLE_VHT;
@@ -333,18 +335,27 @@ out:
333 if (ret & IEEE80211_STA_DISABLE_VHT) 335 if (ret & IEEE80211_STA_DISABLE_VHT)
334 vht_chandef = *chandef; 336 vht_chandef = *chandef;
335 337
338 /*
339 * Ignore the DISABLED flag when we're already connected and only
340 * tracking the APs beacon for bandwidth changes - otherwise we
341 * might get disconnected here if we connect to an AP, update our
342 * regulatory information based on the AP's country IE and the
343 * information we have is wrong/outdated and disables the channel
344 * that we're actually using for the connection to the AP.
345 */
336 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, 346 while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
337 IEEE80211_CHAN_DISABLED)) { 347 tracking ? 0 :
348 IEEE80211_CHAN_DISABLED)) {
338 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { 349 if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
339 ret = IEEE80211_STA_DISABLE_HT | 350 ret = IEEE80211_STA_DISABLE_HT |
340 IEEE80211_STA_DISABLE_VHT; 351 IEEE80211_STA_DISABLE_VHT;
341 goto out; 352 break;
342 } 353 }
343 354
344 ret |= chandef_downgrade(chandef); 355 ret |= chandef_downgrade(chandef);
345 } 356 }
346 357
347 if (chandef->width != vht_chandef.width && verbose) 358 if (chandef->width != vht_chandef.width && !tracking)
348 sdata_info(sdata, 359 sdata_info(sdata,
349 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); 360 "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
350 361
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
384 395
385 /* calculate new channel (type) based on HT/VHT operation IEs */ 396 /* calculate new channel (type) based on HT/VHT operation IEs */
386 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, 397 flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
387 vht_oper, &chandef, false); 398 vht_oper, &chandef, true);
388 399
389 /* 400 /*
390 * Downgrade the new channel if we associated with restricted 401 * Downgrade the new channel if we associated with restricted
@@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
3394 3405
3395 if (tx_flags == 0) { 3406 if (tx_flags == 0) {
3396 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 3407 auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
3397 ifmgd->auth_data->timeout_started = true; 3408 auth_data->timeout_started = true;
3398 run_again(sdata, auth_data->timeout); 3409 run_again(sdata, auth_data->timeout);
3399 } else { 3410 } else {
3400 auth_data->timeout_started = false; 3411 auth_data->timeout =
3412 round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
3413 auth_data->timeout_started = true;
3414 run_again(sdata, auth_data->timeout);
3401 } 3415 }
3402 3416
3403 return 0; 3417 return 0;
@@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
3434 assoc_data->timeout_started = true; 3448 assoc_data->timeout_started = true;
3435 run_again(sdata, assoc_data->timeout); 3449 run_again(sdata, assoc_data->timeout);
3436 } else { 3450 } else {
3437 assoc_data->timeout_started = false; 3451 assoc_data->timeout =
3452 round_jiffies_up(jiffies +
3453 IEEE80211_ASSOC_TIMEOUT_LONG);
3454 assoc_data->timeout_started = true;
3455 run_again(sdata, assoc_data->timeout);
3438 } 3456 }
3439 3457
3440 return 0; 3458 return 0;
@@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
3829 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, 3847 ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
3830 cbss->channel, 3848 cbss->channel,
3831 ht_oper, vht_oper, 3849 ht_oper, vht_oper,
3832 &chandef, true); 3850 &chandef, false);
3833 3851
3834 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), 3852 sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
3835 local->rx_chains); 3853 local->rx_chains);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 7dcc376eea5f..2f8010707d01 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 526 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
527 __u32 seq, ack, sack, end, win, swin; 527 __u32 seq, ack, sack, end, win, swin;
528 s16 receiver_offset; 528 s16 receiver_offset;
529 bool res; 529 bool res, in_recv_win;
530 530
531 /* 531 /*
532 * Get the required data from the packet. 532 * Get the required data from the packet.
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
649 receiver->td_end, receiver->td_maxend, receiver->td_maxwin, 649 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
650 receiver->td_scale); 650 receiver->td_scale);
651 651
652 /* Is the ending sequence in the receive window (if available)? */
653 in_recv_win = !receiver->td_maxwin ||
654 after(end, sender->td_end - receiver->td_maxwin - 1);
655
652 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", 656 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
653 before(seq, sender->td_maxend + 1), 657 before(seq, sender->td_maxend + 1),
654 after(end, sender->td_end - receiver->td_maxwin - 1), 658 (in_recv_win ? 1 : 0),
655 before(sack, receiver->td_end + 1), 659 before(sack, receiver->td_end + 1),
656 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); 660 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
657 661
658 if (before(seq, sender->td_maxend + 1) && 662 if (before(seq, sender->td_maxend + 1) &&
659 after(end, sender->td_end - receiver->td_maxwin - 1) && 663 in_recv_win &&
660 before(sack, receiver->td_end + 1) && 664 before(sack, receiver->td_end + 1) &&
661 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { 665 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
662 /* 666 /*
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
725 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, 729 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
726 "nf_ct_tcp: %s ", 730 "nf_ct_tcp: %s ",
727 before(seq, sender->td_maxend + 1) ? 731 before(seq, sender->td_maxend + 1) ?
728 after(end, sender->td_end - receiver->td_maxwin - 1) ? 732 in_recv_win ?
729 before(sack, receiver->td_end + 1) ? 733 before(sack, receiver->td_end + 1) ?
730 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" 734 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
731 : "ACK is under the lower bound (possible overly delayed ACK)" 735 : "ACK is under the lower bound (possible overly delayed ACK)"
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 962e9792e317..d92cc317bf8b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
419 nfmsg->version = NFNETLINK_V0; 419 nfmsg->version = NFNETLINK_V0;
420 nfmsg->res_id = htons(inst->group_num); 420 nfmsg->res_id = htons(inst->group_num);
421 421
422 memset(&pmsg, 0, sizeof(pmsg));
422 pmsg.hw_protocol = skb->protocol; 423 pmsg.hw_protocol = skb->protocol;
423 pmsg.hook = hooknum; 424 pmsg.hook = hooknum;
424 425
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
498 if (indev && skb->dev && 499 if (indev && skb->dev &&
499 skb->mac_header != skb->network_header) { 500 skb->mac_header != skb->network_header) {
500 struct nfulnl_msg_packet_hw phw; 501 struct nfulnl_msg_packet_hw phw;
501 int len = dev_parse_header(skb, phw.hw_addr); 502 int len;
503
504 memset(&phw, 0, sizeof(phw));
505 len = dev_parse_header(skb, phw.hw_addr);
502 if (len > 0) { 506 if (len > 0) {
503 phw.hw_addrlen = htons(len); 507 phw.hw_addrlen = htons(len);
504 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) 508 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 971ea145ab3e..8a703c3dd318 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
463 if (indev && entskb->dev && 463 if (indev && entskb->dev &&
464 entskb->mac_header != entskb->network_header) { 464 entskb->mac_header != entskb->network_header) {
465 struct nfqnl_msg_packet_hw phw; 465 struct nfqnl_msg_packet_hw phw;
466 int len = dev_parse_header(entskb, phw.hw_addr); 466 int len;
467
468 memset(&phw, 0, sizeof(phw));
469 len = dev_parse_header(entskb, phw.hw_addr);
467 if (len) { 470 if (len) {
468 phw.hw_addrlen = htons(len); 471 phw.hw_addrlen = htons(len);
469 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) 472 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 7011c71646f0..6113cc7efffc 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
52{ 52{
53 const struct xt_tcpmss_info *info = par->targinfo; 53 const struct xt_tcpmss_info *info = par->targinfo;
54 struct tcphdr *tcph; 54 struct tcphdr *tcph;
55 unsigned int tcplen, i; 55 int len, tcp_hdrlen;
56 unsigned int i;
56 __be16 oldval; 57 __be16 oldval;
57 u16 newmss; 58 u16 newmss;
58 u8 *opt; 59 u8 *opt;
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
64 if (!skb_make_writable(skb, skb->len)) 65 if (!skb_make_writable(skb, skb->len))
65 return -1; 66 return -1;
66 67
67 tcplen = skb->len - tcphoff; 68 len = skb->len - tcphoff;
69 if (len < (int)sizeof(struct tcphdr))
70 return -1;
71
68 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 72 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
73 tcp_hdrlen = tcph->doff * 4;
69 74
70 /* Header cannot be larger than the packet */ 75 if (len < tcp_hdrlen)
71 if (tcplen < tcph->doff*4)
72 return -1; 76 return -1;
73 77
74 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 78 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
87 newmss = info->mss; 91 newmss = info->mss;
88 92
89 opt = (u_int8_t *)tcph; 93 opt = (u_int8_t *)tcph;
90 for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) { 94 for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
91 if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS && 95 if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
92 opt[i+1] == TCPOLEN_MSS) {
93 u_int16_t oldmss; 96 u_int16_t oldmss;
94 97
95 oldmss = (opt[i+2] << 8) | opt[i+3]; 98 oldmss = (opt[i+2] << 8) | opt[i+3];
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
112 } 115 }
113 116
114 /* There is data after the header so the option can't be added 117 /* There is data after the header so the option can't be added
115 without moving it, and doing so may make the SYN packet 118 * without moving it, and doing so may make the SYN packet
116 itself too large. Accept the packet unmodified instead. */ 119 * itself too large. Accept the packet unmodified instead.
117 if (tcplen > tcph->doff*4) 120 */
121 if (len > tcp_hdrlen)
118 return 0; 122 return 0;
119 123
120 /* 124 /*
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
143 newmss = min(newmss, (u16)1220); 147 newmss = min(newmss, (u16)1220);
144 148
145 opt = (u_int8_t *)tcph + sizeof(struct tcphdr); 149 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
146 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); 150 memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
147 151
148 inet_proto_csum_replace2(&tcph->check, skb, 152 inet_proto_csum_replace2(&tcph->check, skb,
149 htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); 153 htons(len), htons(len + TCPOLEN_MSS), 1);
150 opt[0] = TCPOPT_MSS; 154 opt[0] = TCPOPT_MSS;
151 opt[1] = TCPOLEN_MSS; 155 opt[1] = TCPOLEN_MSS;
152 opt[2] = (newmss & 0xff00) >> 8; 156 opt[2] = (newmss & 0xff00) >> 8;
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index b68fa191710f..625fa1d636a0 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
38 struct tcphdr *tcph; 38 struct tcphdr *tcph;
39 u_int16_t n, o; 39 u_int16_t n, o;
40 u_int8_t *opt; 40 u_int8_t *opt;
41 int len; 41 int len, tcp_hdrlen;
42 42
43 /* This is a fragment, no TCP header is available */ 43 /* This is a fragment, no TCP header is available */
44 if (par->fragoff != 0) 44 if (par->fragoff != 0)
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
52 return NF_DROP; 52 return NF_DROP;
53 53
54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
55 if (tcph->doff * 4 > len) 55 tcp_hdrlen = tcph->doff * 4;
56
57 if (len < tcp_hdrlen)
56 return NF_DROP; 58 return NF_DROP;
57 59
58 opt = (u_int8_t *)tcph; 60 opt = (u_int8_t *)tcph;
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
61 * Walk through all TCP options - if we find some option to remove, 63 * Walk through all TCP options - if we find some option to remove,
62 * set all octets to %TCPOPT_NOP and adjust checksum. 64 * set all octets to %TCPOPT_NOP and adjust checksum.
63 */ 65 */
64 for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { 66 for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
65 optl = optlen(opt, i); 67 optl = optlen(opt, i);
66 68
67 if (i + optl > tcp_hdrlen(skb)) 69 if (i + optl > tcp_hdrlen)
68 break; 70 break;
69 71
70 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) 72 if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 512718adb0d5..f85f8a2ad6cf 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -789,6 +789,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
789 struct net *net = sock_net(skb->sk); 789 struct net *net = sock_net(skb->sk);
790 int chains_to_skip = cb->args[0]; 790 int chains_to_skip = cb->args[0];
791 int fams_to_skip = cb->args[1]; 791 int fams_to_skip = cb->args[1];
792 bool need_locking = chains_to_skip || fams_to_skip;
793
794 if (need_locking)
795 genl_lock();
792 796
793 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) { 797 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
794 n = 0; 798 n = 0;
@@ -810,6 +814,9 @@ errout:
810 cb->args[0] = i; 814 cb->args[0] = i;
811 cb->args[1] = n; 815 cb->args[1] = n;
812 816
817 if (need_locking)
818 genl_unlock();
819
813 return skb->len; 820 return skb->len;
814} 821}
815 822
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 22c5f399f1cf..ab101f715447 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
535{ 535{
536 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); 536 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
537 537
538 OVS_CB(skb)->tun_key = NULL;
538 return do_execute_actions(dp, skb, acts->actions, 539 return do_execute_actions(dp, skb, acts->actions,
539 acts->actions_len, false); 540 acts->actions_len, false);
540} 541}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f7e3a0d84c40..f2ed7600084e 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2076 ovs_notify(reply, info, &ovs_dp_vport_multicast_group); 2076 ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
2077 return 0; 2077 return 0;
2078 2078
2079 rtnl_unlock();
2080 return 0;
2081
2082exit_free: 2079exit_free:
2083 kfree_skb(reply); 2080 kfree_skb(reply);
2084exit_unlock: 2081exit_unlock:
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 5c519b121e1b..1aa84dc58777 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
240 struct flex_array *buckets; 240 struct flex_array *buckets;
241 int i, err; 241 int i, err;
242 242
243 buckets = flex_array_alloc(sizeof(struct hlist_head *), 243 buckets = flex_array_alloc(sizeof(struct hlist_head),
244 n_buckets, GFP_KERNEL); 244 n_buckets, GFP_KERNEL);
245 if (!buckets) 245 if (!buckets)
246 return NULL; 246 return NULL;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 281c1bded1f6..51b968d3febb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
285 return q; 285 return q;
286} 286}
287 287
288/* The linklayer setting were not transferred from iproute2, in older
289 * versions, and the rate tables lookup systems have been dropped in
290 * the kernel. To keep backward compatible with older iproute2 tc
291 * utils, we detect the linklayer setting by detecting if the rate
292 * table were modified.
293 *
294 * For linklayer ATM table entries, the rate table will be aligned to
295 * 48 bytes, thus some table entries will contain the same value. The
296 * mpu (min packet unit) is also encoded into the old rate table, thus
297 * starting from the mpu, we find low and high table entries for
298 * mapping this cell. If these entries contain the same value, when
299 * the rate tables have been modified for linklayer ATM.
300 *
301 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
302 * and then roundup to the next cell, calc the table entry one below,
303 * and compare.
304 */
305static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
306{
307 int low = roundup(r->mpu, 48);
308 int high = roundup(low+1, 48);
309 int cell_low = low >> r->cell_log;
310 int cell_high = (high >> r->cell_log) - 1;
311
312 /* rtab is too inaccurate at rates > 100Mbit/s */
313 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
314 pr_debug("TC linklayer: Giving up ATM detection\n");
315 return TC_LINKLAYER_ETHERNET;
316 }
317
318 if ((cell_high > cell_low) && (cell_high < 256)
319 && (rtab[cell_low] == rtab[cell_high])) {
320 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
321 cell_low, cell_high, rtab[cell_high]);
322 return TC_LINKLAYER_ATM;
323 }
324 return TC_LINKLAYER_ETHERNET;
325}
326
288static struct qdisc_rate_table *qdisc_rtab_list; 327static struct qdisc_rate_table *qdisc_rtab_list;
289 328
290struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) 329struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
308 rtab->rate = *r; 347 rtab->rate = *r;
309 rtab->refcnt = 1; 348 rtab->refcnt = 1;
310 memcpy(rtab->data, nla_data(tab), 1024); 349 memcpy(rtab->data, nla_data(tab), 1024);
350 if (r->linklayer == TC_LINKLAYER_UNAWARE)
351 r->linklayer = __detect_linklayer(r, rtab->data);
311 rtab->next = qdisc_rtab_list; 352 rtab->next = qdisc_rtab_list;
312 qdisc_rtab_list = rtab; 353 qdisc_rtab_list = rtab;
313 } 354 }
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4626cef4b76e..48be3d5c0d92 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/if_vlan.h>
28#include <net/sch_generic.h> 29#include <net/sch_generic.h>
29#include <net/pkt_sched.h> 30#include <net/pkt_sched.h>
30#include <net/dst.h> 31#include <net/dst.h>
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q)
207 208
208unsigned long dev_trans_start(struct net_device *dev) 209unsigned long dev_trans_start(struct net_device *dev)
209{ 210{
210 unsigned long val, res = dev->trans_start; 211 unsigned long val, res;
211 unsigned int i; 212 unsigned int i;
212 213
214 if (is_vlan_dev(dev))
215 dev = vlan_dev_real_dev(dev);
216 res = dev->trans_start;
213 for (i = 0; i < dev->num_tx_queues; i++) { 217 for (i = 0; i < dev->num_tx_queues; i++) {
214 val = netdev_get_tx_queue(dev, i)->trans_start; 218 val = netdev_get_tx_queue(dev, i)->trans_start;
215 if (val && time_after(val, res)) 219 if (val && time_after(val, res))
216 res = val; 220 res = val;
217 } 221 }
218 dev->trans_start = res; 222 dev->trans_start = res;
223
219 return res; 224 return res;
220} 225}
221EXPORT_SYMBOL(dev_trans_start); 226EXPORT_SYMBOL(dev_trans_start);
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
904 memset(r, 0, sizeof(*r)); 909 memset(r, 0, sizeof(*r));
905 r->overhead = conf->overhead; 910 r->overhead = conf->overhead;
906 r->rate_bytes_ps = conf->rate; 911 r->rate_bytes_ps = conf->rate;
912 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
907 r->mult = 1; 913 r->mult = 1;
908 /* 914 /*
909 * The deal here is to replace a divide by a reciprocal one 915 * The deal here is to replace a divide by a reciprocal one
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 45e751527dfc..c2178b15ca6e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1329 struct htb_sched *q = qdisc_priv(sch); 1329 struct htb_sched *q = qdisc_priv(sch);
1330 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1330 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1331 struct nlattr *opt = tca[TCA_OPTIONS]; 1331 struct nlattr *opt = tca[TCA_OPTIONS];
1332 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1332 struct nlattr *tb[TCA_HTB_MAX + 1]; 1333 struct nlattr *tb[TCA_HTB_MAX + 1];
1333 struct tc_htb_opt *hopt; 1334 struct tc_htb_opt *hopt;
1334 1335
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1350 if (!hopt->rate.rate || !hopt->ceil.rate) 1351 if (!hopt->rate.rate || !hopt->ceil.rate)
1351 goto failure; 1352 goto failure;
1352 1353
1354 /* Keeping backward compatible with rate_table based iproute2 tc */
1355 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
1356 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1357 if (rtab)
1358 qdisc_put_rtab(rtab);
1359 }
1360 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
1361 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1362 if (ctab)
1363 qdisc_put_rtab(ctab);
1364 }
1365
1353 if (!cl) { /* new class */ 1366 if (!cl) { /* new class */
1354 struct Qdisc *new_q; 1367 struct Qdisc *new_q;
1355 int prio; 1368 int prio;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index bce5b79662a6..ab67efc64b24 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
846 else 846 else
847 spc_state = SCTP_ADDR_AVAILABLE; 847 spc_state = SCTP_ADDR_AVAILABLE;
848 /* Don't inform ULP about transition from PF to 848 /* Don't inform ULP about transition from PF to
849 * active state and set cwnd to 1, see SCTP 849 * active state and set cwnd to 1 MTU, see SCTP
850 * Quick failover draft section 5.1, point 5 850 * Quick failover draft section 5.1, point 5
851 */ 851 */
852 if (transport->state == SCTP_PF) { 852 if (transport->state == SCTP_PF) {
853 ulp_notify = false; 853 ulp_notify = false;
854 transport->cwnd = 1; 854 transport->cwnd = asoc->pathmtu;
855 } 855 }
856 transport->state = SCTP_ACTIVE; 856 transport->state = SCTP_ACTIVE;
857 break; 857 break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index bdbbc3fd7c14..8fdd16046d66 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
181 return; 181 return;
182 } 182 }
183 183
184 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
185
186 sctp_packet_free(&transport->packet); 184 sctp_packet_free(&transport->packet);
187 185
188 if (transport->asoc) 186 if (transport->asoc)
189 sctp_association_put(transport->asoc); 187 sctp_association_put(transport->asoc);
188
189 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
190} 190}
191 191
192/* Start T3_rtx timer if it is not already running and update the heartbeat 192/* Start T3_rtx timer if it is not already running and update the heartbeat
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index cb29ef7ba2f0..609c30c80816 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
460{ 460{
461 struct tipc_link *l_ptr; 461 struct tipc_link *l_ptr;
462 struct tipc_link *temp_l_ptr; 462 struct tipc_link *temp_l_ptr;
463 struct tipc_link_req *temp_req;
463 464
464 pr_info("Disabling bearer <%s>\n", b_ptr->name); 465 pr_info("Disabling bearer <%s>\n", b_ptr->name);
465 spin_lock_bh(&b_ptr->lock); 466 spin_lock_bh(&b_ptr->lock);
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
468 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 469 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
469 tipc_link_delete(l_ptr); 470 tipc_link_delete(l_ptr);
470 } 471 }
471 if (b_ptr->link_req) 472 temp_req = b_ptr->link_req;
472 tipc_disc_delete(b_ptr->link_req); 473 b_ptr->link_req = NULL;
473 spin_unlock_bh(&b_ptr->lock); 474 spin_unlock_bh(&b_ptr->lock);
475
476 if (temp_req)
477 tipc_disc_delete(temp_req);
478
474 memset(b_ptr, 0, sizeof(struct tipc_bearer)); 479 memset(b_ptr, 0, sizeof(struct tipc_bearer));
475} 480}
476 481
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 593071dabd1c..4d9334683f84 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
347 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { 347 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
348 struct vsock_sock *vsk; 348 struct vsock_sock *vsk;
349 list_for_each_entry(vsk, &vsock_connected_table[i], 349 list_for_each_entry(vsk, &vsock_connected_table[i],
350 connected_table); 350 connected_table)
351 fn(sk_vsock(vsk)); 351 fn(sk_vsock(vsk));
352 } 352 }
353 353
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 4f9f216665e9..a8c29fa4f1b3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
765 cfg80211_leave_mesh(rdev, dev); 765 cfg80211_leave_mesh(rdev, dev);
766 break; 766 break;
767 case NL80211_IFTYPE_AP: 767 case NL80211_IFTYPE_AP:
768 case NL80211_IFTYPE_P2P_GO:
768 cfg80211_stop_ap(rdev, dev); 769 cfg80211_stop_ap(rdev, dev);
769 break; 770 break;
770 default: 771 default:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 25d217d90807..3fcba69817e5 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
441 goto out_unlock; 441 goto out_unlock;
442 } 442 }
443 *rdev = wiphy_to_dev((*wdev)->wiphy); 443 *rdev = wiphy_to_dev((*wdev)->wiphy);
444 cb->args[0] = (*rdev)->wiphy_idx; 444 /* 0 is the first index - add 1 to parse only once */
445 cb->args[0] = (*rdev)->wiphy_idx + 1;
445 cb->args[1] = (*wdev)->identifier; 446 cb->args[1] = (*wdev)->identifier;
446 } else { 447 } else {
447 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]); 448 /* subtract the 1 again here */
449 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
448 struct wireless_dev *tmp; 450 struct wireless_dev *tmp;
449 451
450 if (!wiphy) { 452 if (!wiphy) {
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8e77cbbad871..e3c7ba8d7582 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -522,7 +522,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1,
522} 522}
523 523
524#define nid_has_mute(codec, nid, dir) \ 524#define nid_has_mute(codec, nid, dir) \
525 check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE) 525 check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
526#define nid_has_volume(codec, nid, dir) \ 526#define nid_has_volume(codec, nid, dir) \
527 check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) 527 check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
528 528
@@ -624,7 +624,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
624 if (enable) 624 if (enable)
625 val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; 625 val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT;
626 } 626 }
627 if (caps & AC_AMPCAP_MUTE) { 627 if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
628 if (!enable) 628 if (!enable)
629 val |= HDA_AMP_MUTE; 629 val |= HDA_AMP_MUTE;
630 } 630 }
@@ -648,7 +648,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec,
648{ 648{
649 unsigned int mask = 0xff; 649 unsigned int mask = 0xff;
650 650
651 if (caps & AC_AMPCAP_MUTE) { 651 if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
652 if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) 652 if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL))
653 mask &= ~0x80; 653 mask &= ~0x80;
654 } 654 }
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8bd226149868..f303cd898515 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1031,6 +1031,7 @@ enum {
1031 ALC880_FIXUP_GPIO2, 1031 ALC880_FIXUP_GPIO2,
1032 ALC880_FIXUP_MEDION_RIM, 1032 ALC880_FIXUP_MEDION_RIM,
1033 ALC880_FIXUP_LG, 1033 ALC880_FIXUP_LG,
1034 ALC880_FIXUP_LG_LW25,
1034 ALC880_FIXUP_W810, 1035 ALC880_FIXUP_W810,
1035 ALC880_FIXUP_EAPD_COEF, 1036 ALC880_FIXUP_EAPD_COEF,
1036 ALC880_FIXUP_TCL_S700, 1037 ALC880_FIXUP_TCL_S700,
@@ -1089,6 +1090,14 @@ static const struct hda_fixup alc880_fixups[] = {
1089 { } 1090 { }
1090 } 1091 }
1091 }, 1092 },
1093 [ALC880_FIXUP_LG_LW25] = {
1094 .type = HDA_FIXUP_PINS,
1095 .v.pins = (const struct hda_pintbl[]) {
1096 { 0x1a, 0x0181344f }, /* line-in */
1097 { 0x1b, 0x0321403f }, /* headphone */
1098 { }
1099 }
1100 },
1092 [ALC880_FIXUP_W810] = { 1101 [ALC880_FIXUP_W810] = {
1093 .type = HDA_FIXUP_PINS, 1102 .type = HDA_FIXUP_PINS,
1094 .v.pins = (const struct hda_pintbl[]) { 1103 .v.pins = (const struct hda_pintbl[]) {
@@ -1341,6 +1350,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
1341 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), 1350 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG),
1342 SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), 1351 SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG),
1343 SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), 1352 SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG),
1353 SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25),
1344 SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), 1354 SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700),
1345 1355
1346 /* Below is the copied entries from alc880_quirks.c. 1356 /* Below is the copied entries from alc880_quirks.c.
@@ -4329,6 +4339,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4329 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), 4339 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
4330 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), 4340 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
4331 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), 4341 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
4342 SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
4332 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), 4343 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
4333 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4344 SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
4334 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 4345 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 987f728718c5..be2ba1b6fe4a 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -195,6 +195,8 @@ static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
195 195
196static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); 196static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
197 197
198static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
199
198static const unsigned int limiter_tlv[] = { 200static const unsigned int limiter_tlv[] = {
199 TLV_DB_RANGE_HEAD(2), 201 TLV_DB_RANGE_HEAD(2),
200 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), 202 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
@@ -451,7 +453,8 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
451 SOC_ENUM("Beep Pitch", beep_pitch_enum), 453 SOC_ENUM("Beep Pitch", beep_pitch_enum),
452 SOC_ENUM("Beep on Time", beep_ontime_enum), 454 SOC_ENUM("Beep on Time", beep_ontime_enum),
453 SOC_ENUM("Beep off Time", beep_offtime_enum), 455 SOC_ENUM("Beep off Time", beep_offtime_enum),
454 SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv), 456 SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL,
457 0, 0x07, 0x1f, beep_tlv),
455 SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1), 458 SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1),
456 SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum), 459 SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum),
457 SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum), 460 SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum),
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 6c8a9e7bee25..760e8bfeacaa 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -153,6 +153,8 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
153static int power_vag_event(struct snd_soc_dapm_widget *w, 153static int power_vag_event(struct snd_soc_dapm_widget *w,
154 struct snd_kcontrol *kcontrol, int event) 154 struct snd_kcontrol *kcontrol, int event)
155{ 155{
156 const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
157
156 switch (event) { 158 switch (event) {
157 case SND_SOC_DAPM_POST_PMU: 159 case SND_SOC_DAPM_POST_PMU:
158 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, 160 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
@@ -160,9 +162,17 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
160 break; 162 break;
161 163
162 case SND_SOC_DAPM_PRE_PMD: 164 case SND_SOC_DAPM_PRE_PMD:
163 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, 165 /*
164 SGTL5000_VAG_POWERUP, 0); 166 * Don't clear VAG_POWERUP, when both DAC and ADC are
165 msleep(400); 167 * operational to prevent inadvertently starving the
168 * other one of them.
169 */
170 if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) &
171 mask) != mask) {
172 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
173 SGTL5000_VAG_POWERUP, 0);
174 msleep(400);
175 }
166 break; 176 break;
167 default: 177 default:
168 break; 178 break;
@@ -388,7 +398,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
388 SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0), 398 SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0),
389 SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)", 399 SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)",
390 SGTL5000_CHIP_ANA_ADC_CTRL, 400 SGTL5000_CHIP_ANA_ADC_CTRL,
391 8, 2, 0, capture_6db_attenuate), 401 8, 1, 0, capture_6db_attenuate),
392 SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0), 402 SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0),
393 403
394 SOC_DOUBLE_TLV("Headphone Playback Volume", 404 SOC_DOUBLE_TLV("Headphone Playback Volume",
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index bd16010441cc..4375c9f2b791 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -679,13 +679,14 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
679 return -EINVAL; 679 return -EINVAL;
680 } 680 }
681 681
682 path = list_first_entry(&w->sources, struct snd_soc_dapm_path, 682 if (list_empty(&w->sources)) {
683 list_sink);
684 if (!path) {
685 dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name); 683 dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name);
686 return -EINVAL; 684 return -EINVAL;
687 } 685 }
688 686
687 path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
688 list_sink);
689
689 ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path); 690 ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path);
690 if (ret < 0) 691 if (ret < 0)
691 return ret; 692 return ret;
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index d04146cad61f..47565fd04505 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
228 reg = TEGRA30_I2S_CIF_RX_CTRL; 228 reg = TEGRA30_I2S_CIF_RX_CTRL;
229 } else { 229 } else {
230 val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX; 230 val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
231 reg = TEGRA30_I2S_CIF_RX_CTRL; 231 reg = TEGRA30_I2S_CIF_TX_CTRL;
232 } 232 }
233 233
234 regmap_write(i2s->regmap, reg, val); 234 regmap_write(i2s->regmap, reg, val);
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
index 26722423330d..f3dd7266c391 100644
--- a/sound/usb/6fire/midi.c
+++ b/sound/usb/6fire/midi.c
@@ -19,6 +19,10 @@
19#include "chip.h" 19#include "chip.h"
20#include "comm.h" 20#include "comm.h"
21 21
22enum {
23 MIDI_BUFSIZE = 64
24};
25
22static void usb6fire_midi_out_handler(struct urb *urb) 26static void usb6fire_midi_out_handler(struct urb *urb)
23{ 27{
24 struct midi_runtime *rt = urb->context; 28 struct midi_runtime *rt = urb->context;
@@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip)
156 if (!rt) 160 if (!rt)
157 return -ENOMEM; 161 return -ENOMEM;
158 162
163 rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
164 if (!rt->out_buffer) {
165 kfree(rt);
166 return -ENOMEM;
167 }
168
159 rt->chip = chip; 169 rt->chip = chip;
160 rt->in_received = usb6fire_midi_in_received; 170 rt->in_received = usb6fire_midi_in_received;
161 rt->out_buffer[0] = 0x80; /* 'send midi' command */ 171 rt->out_buffer[0] = 0x80; /* 'send midi' command */
@@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip)
169 179
170 ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); 180 ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
171 if (ret < 0) { 181 if (ret < 0) {
182 kfree(rt->out_buffer);
172 kfree(rt); 183 kfree(rt);
173 snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); 184 snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
174 return ret; 185 return ret;
@@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip)
197 208
198void usb6fire_midi_destroy(struct sfire_chip *chip) 209void usb6fire_midi_destroy(struct sfire_chip *chip)
199{ 210{
200 kfree(chip->midi); 211 struct midi_runtime *rt = chip->midi;
212
213 kfree(rt->out_buffer);
214 kfree(rt);
201 chip->midi = NULL; 215 chip->midi = NULL;
202} 216}
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
index c321006e5430..84851b9f5559 100644
--- a/sound/usb/6fire/midi.h
+++ b/sound/usb/6fire/midi.h
@@ -16,10 +16,6 @@
16 16
17#include "common.h" 17#include "common.h"
18 18
19enum {
20 MIDI_BUFSIZE = 64
21};
22
23struct midi_runtime { 19struct midi_runtime {
24 struct sfire_chip *chip; 20 struct sfire_chip *chip;
25 struct snd_rawmidi *instance; 21 struct snd_rawmidi *instance;
@@ -32,7 +28,7 @@ struct midi_runtime {
32 struct snd_rawmidi_substream *out; 28 struct snd_rawmidi_substream *out;
33 struct urb out_urb; 29 struct urb out_urb;
34 u8 out_serial; /* serial number of out packet */ 30 u8 out_serial; /* serial number of out packet */
35 u8 out_buffer[MIDI_BUFSIZE]; 31 u8 *out_buffer;
36 int buffer_offset; 32 int buffer_offset;
37 33
38 void (*in_received)(struct midi_runtime *rt, u8 *data, int length); 34 void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 3d2551cc10f2..b5eb97fdc842 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -582,6 +582,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
582 urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; 582 urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
583} 583}
584 584
585static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
586{
587 int i;
588
589 for (i = 0; i < PCM_N_URBS; i++) {
590 rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
591 * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
592 if (!rt->out_urbs[i].buffer)
593 return -ENOMEM;
594 rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
595 * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
596 if (!rt->in_urbs[i].buffer)
597 return -ENOMEM;
598 }
599 return 0;
600}
601
602static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
603{
604 int i;
605
606 for (i = 0; i < PCM_N_URBS; i++) {
607 kfree(rt->out_urbs[i].buffer);
608 kfree(rt->in_urbs[i].buffer);
609 }
610}
611
585int usb6fire_pcm_init(struct sfire_chip *chip) 612int usb6fire_pcm_init(struct sfire_chip *chip)
586{ 613{
587 int i; 614 int i;
@@ -593,6 +620,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
593 if (!rt) 620 if (!rt)
594 return -ENOMEM; 621 return -ENOMEM;
595 622
623 ret = usb6fire_pcm_buffers_init(rt);
624 if (ret) {
625 usb6fire_pcm_buffers_destroy(rt);
626 kfree(rt);
627 return ret;
628 }
629
596 rt->chip = chip; 630 rt->chip = chip;
597 rt->stream_state = STREAM_DISABLED; 631 rt->stream_state = STREAM_DISABLED;
598 rt->rate = ARRAY_SIZE(rates); 632 rt->rate = ARRAY_SIZE(rates);
@@ -614,6 +648,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
614 648
615 ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm); 649 ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
616 if (ret < 0) { 650 if (ret < 0) {
651 usb6fire_pcm_buffers_destroy(rt);
617 kfree(rt); 652 kfree(rt);
618 snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n"); 653 snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
619 return ret; 654 return ret;
@@ -625,6 +660,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
625 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops); 660 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops);
626 661
627 if (ret) { 662 if (ret) {
663 usb6fire_pcm_buffers_destroy(rt);
628 kfree(rt); 664 kfree(rt);
629 snd_printk(KERN_ERR PREFIX 665 snd_printk(KERN_ERR PREFIX
630 "error preallocating pcm buffers.\n"); 666 "error preallocating pcm buffers.\n");
@@ -669,6 +705,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip)
669 705
670void usb6fire_pcm_destroy(struct sfire_chip *chip) 706void usb6fire_pcm_destroy(struct sfire_chip *chip)
671{ 707{
672 kfree(chip->pcm); 708 struct pcm_runtime *rt = chip->pcm;
709
710 usb6fire_pcm_buffers_destroy(rt);
711 kfree(rt);
673 chip->pcm = NULL; 712 chip->pcm = NULL;
674} 713}
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
index 9b01133ee3fe..f5779d6182c6 100644
--- a/sound/usb/6fire/pcm.h
+++ b/sound/usb/6fire/pcm.h
@@ -32,7 +32,7 @@ struct pcm_urb {
32 struct urb instance; 32 struct urb instance;
33 struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB]; 33 struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
34 /* END DO NOT SEPARATE */ 34 /* END DO NOT SEPARATE */
35 u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE]; 35 u8 *buffer;
36 36
37 struct pcm_urb *peer; 37 struct pcm_urb *peer;
38}; 38};
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d5438083fd6a..95558ef4a7a0 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
888 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ 888 case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
889 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ 889 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
890 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ 890 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
891 case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
891 case USB_ID(0x046d, 0x0991): 892 case USB_ID(0x046d, 0x0991):
892 /* Most audio usb devices lie about volume resolution. 893 /* Most audio usb devices lie about volume resolution.
893 * Most Logitech webcams have res = 384. 894 * Most Logitech webcams have res = 384.
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 1bc45e71f1fe..0df9ede99dfd 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -319,19 +319,19 @@ static int create_auto_midi_quirk(struct snd_usb_audio *chip,
319 if (altsd->bNumEndpoints < 1) 319 if (altsd->bNumEndpoints < 1)
320 return -ENODEV; 320 return -ENODEV;
321 epd = get_endpoint(alts, 0); 321 epd = get_endpoint(alts, 0);
322 if (!usb_endpoint_xfer_bulk(epd) || 322 if (!usb_endpoint_xfer_bulk(epd) &&
323 !usb_endpoint_xfer_int(epd)) 323 !usb_endpoint_xfer_int(epd))
324 return -ENODEV; 324 return -ENODEV;
325 325
326 switch (USB_ID_VENDOR(chip->usb_id)) { 326 switch (USB_ID_VENDOR(chip->usb_id)) {
327 case 0x0499: /* Yamaha */ 327 case 0x0499: /* Yamaha */
328 err = create_yamaha_midi_quirk(chip, iface, driver, alts); 328 err = create_yamaha_midi_quirk(chip, iface, driver, alts);
329 if (err < 0 && err != -ENODEV) 329 if (err != -ENODEV)
330 return err; 330 return err;
331 break; 331 break;
332 case 0x0582: /* Roland */ 332 case 0x0582: /* Roland */
333 err = create_roland_midi_quirk(chip, iface, driver, alts); 333 err = create_roland_midi_quirk(chip, iface, driver, alts);
334 if (err < 0 && err != -ENODEV) 334 if (err != -ENODEV)
335 return err; 335 return err;
336 break; 336 break;
337 } 337 }