aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-05-29 07:28:14 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-05-29 07:28:14 -0400
commitf70977fbd6359efb17bb93adac37b9f226d910a7 (patch)
treed36c3ace7ae638ec447b7cce39617d950d121265
parent2807bd18cc60ec471917b5158e98e4d7b7e030fb (diff)
parente6f8a4d60b905eae1a20cbb0c72c67b26b2f02fd (diff)
Merge back earlier ACPI thermal material.
-rw-r--r--Documentation/DocBook/drm.tmpl12
-rw-r--r--Documentation/DocBook/media/Makefile2
-rw-r--r--Documentation/devicetree/bindings/net/mdio-gpio.txt2
-rw-r--r--Documentation/email-clients.txt15
-rw-r--r--Documentation/filesystems/proc.txt5
-rw-r--r--Documentation/hwmon/sysfs-interface14
-rw-r--r--Documentation/java.txt8
-rw-r--r--Documentation/networking/filter.txt2
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile2
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h6
-rw-r--r--arch/sparc/kernel/sysfs.c2
-rw-r--r--arch/sparc/lib/NG2memcpy.S1
-rw-r--r--arch/sparc/mm/fault_64.c16
-rw-r--r--arch/sparc/mm/tsb.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--drivers/acpi/bus.c28
-rw-r--r--drivers/acpi/thermal.c11
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/mv_xor.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c130
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c2
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ntc_thermistor.c15
-rw-r--r--drivers/infiniband/hw/mlx4/main.c67
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/net/bonding/bond_alb.c54
-rw-r--r--drivers/net/bonding/bond_main.c134
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/c_can/Kconfig7
-rw-r--r--drivers/net/can/c_can/c_can.c36
-rw-r--r--drivers/net/can/sja1000/peak_pci.c14
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c110
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c181
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h26
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h47
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c108
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c133
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c20
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c706
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/jme.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c57
-rw-r--r--drivers/net/ethernet/sfc/nic.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/macvlan.c18
-rw-r--r--drivers/net/phy/mdio-gpio.c4
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/usb/cdc_mbim.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c30
-rw-r--r--drivers/net/xen-netback/netback.c102
-rw-r--r--drivers/ptp/Kconfig3
-rw-r--r--drivers/scsi/scsi_transport_sas.c3
-rw-r--r--fs/afs/cmservice.c19
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c86
-rw-r--r--fs/nfsd/nfs4acl.c2
-rw-r--r--fs/nfsd/nfs4state.c15
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c8
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/linux/dmaengine.h1
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/if_vlan.h15
-rw-r--r--include/linux/mlx4/qp.h11
-rw-r--r--include/linux/net.h15
-rw-r--r--include/linux/netdevice.h34
-rw-r--r--include/linux/of_mdio.h7
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/net/cfg80211.h12
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/netns/ipv4.h9
-rw-r--r--include/uapi/linux/nl80211.h4
-rw-r--r--kernel/events/core.c174
-rw-r--r--kernel/sched/core.c15
-rw-r--r--kernel/sched/cpudeadline.c4
-rw-r--r--kernel/sched/cpupri.c3
-rw-r--r--kernel/sched/cputime.c32
-rw-r--r--kernel/sched/deadline.c5
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memcontrol.c27
-rw-r--r--mm/memory-failure.c17
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/8021q/vlan_dev.c52
-rw-r--r--net/batman-adv/bat_iv_ogm.c2
-rw-r--r--net/batman-adv/distributed-arp-table.c3
-rw-r--r--net/batman-adv/fragmentation.c11
-rw-r--r--net/batman-adv/gateway_client.c11
-rw-r--r--net/batman-adv/hard-interface.c2
-rw-r--r--net/batman-adv/originator.c62
-rw-r--r--net/bridge/br_netfilter.c4
-rw-r--r--net/core/dev.c102
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/rtnetlink.c33
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/utils.c8
-rw-r--r--net/dsa/dsa.c3
-rw-r--r--net/ipv4/af_inet.c36
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/ip_forward.c54
-rw-r--r--net/ipv4/ip_fragment.c5
-rw-r--r--net/ipv4/ip_output.c51
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/ip_vti.c5
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c5
-rw-r--r--net/ipv4/ping.c6
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c42
-rw-r--r--net/ipv4/xfrm4_output.c32
-rw-r--r--net/ipv4/xfrm4_protocol.c19
-rw-r--r--net/ipv6/ip6_offload.c6
-rw-r--r--net/ipv6/ip6_output.c8
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/ndisc.c7
-rw-r--r--net/ipv6/netfilter.c6
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/ipv6/xfrm6_output.c22
-rw-r--r--net/ipv6/xfrm6_protocol.c11
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mlme.c20
-rw-r--r--net/mac80211/offchannel.c27
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/mac80211/status.c5
-rw-r--r--net/mac80211/trace.h4
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/mac80211/vht.c9
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_tables_core.c49
-rw-r--r--net/netfilter/nfnetlink.c8
-rw-r--r--net/rxrpc/ar-key.c2
-rw-r--r--net/sched/cls_tcindex.c30
-rw-r--r--net/wireless/scan.c12
-rw-r--r--net/wireless/sme.c2
-rw-r--r--tools/Makefile6
-rw-r--r--tools/lib/lockdep/Makefile5
186 files changed, 2968 insertions, 1268 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 677a02553ec0..ba60d93c1855 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -79,7 +79,7 @@
79 <partintro> 79 <partintro>
80 <para> 80 <para>
81 This first part of the DRM Developer's Guide documents core DRM code, 81 This first part of the DRM Developer's Guide documents core DRM code,
82 helper libraries for writting drivers and generic userspace interfaces 82 helper libraries for writing drivers and generic userspace interfaces
83 exposed by DRM drivers. 83 exposed by DRM drivers.
84 </para> 84 </para>
85 </partintro> 85 </partintro>
@@ -459,7 +459,7 @@ char *date;</synopsis>
459 providing a solution to every graphics memory-related problems, GEM 459 providing a solution to every graphics memory-related problems, GEM
460 identified common code between drivers and created a support library to 460 identified common code between drivers and created a support library to
461 share it. GEM has simpler initialization and execution requirements than 461 share it. GEM has simpler initialization and execution requirements than
462 TTM, but has no video RAM management capabitilies and is thus limited to 462 TTM, but has no video RAM management capabilities and is thus limited to
463 UMA devices. 463 UMA devices.
464 </para> 464 </para>
465 <sect2> 465 <sect2>
@@ -889,7 +889,7 @@ int (*prime_fd_to_handle)(struct drm_device *dev,
889 vice versa. Drivers must use the kernel dma-buf buffer sharing framework 889 vice versa. Drivers must use the kernel dma-buf buffer sharing framework
890 to manage the PRIME file descriptors. Similar to the mode setting 890 to manage the PRIME file descriptors. Similar to the mode setting
891 API PRIME is agnostic to the underlying buffer object manager, as 891 API PRIME is agnostic to the underlying buffer object manager, as
892 long as handles are 32bit unsinged integers. 892 long as handles are 32bit unsigned integers.
893 </para> 893 </para>
894 <para> 894 <para>
895 While non-GEM drivers must implement the operations themselves, GEM 895 While non-GEM drivers must implement the operations themselves, GEM
@@ -2356,7 +2356,7 @@ void intel_crt_init(struct drm_device *dev)
2356 first create properties and then create and associate individual instances 2356 first create properties and then create and associate individual instances
2357 of those properties to objects. A property can be instantiated multiple 2357 of those properties to objects. A property can be instantiated multiple
2358 times and associated with different objects. Values are stored in property 2358 times and associated with different objects. Values are stored in property
2359 instances, and all other property information are stored in the propery 2359 instances, and all other property information are stored in the property
2360 and shared between all instances of the property. 2360 and shared between all instances of the property.
2361 </para> 2361 </para>
2362 <para> 2362 <para>
@@ -2697,10 +2697,10 @@ int num_ioctls;</synopsis>
2697 <sect1> 2697 <sect1>
2698 <title>Legacy Support Code</title> 2698 <title>Legacy Support Code</title>
2699 <para> 2699 <para>
2700 The section very brievely covers some of the old legacy support code which 2700 The section very briefly covers some of the old legacy support code which
2701 is only used by old DRM drivers which have done a so-called shadow-attach 2701 is only used by old DRM drivers which have done a so-called shadow-attach
2702 to the underlying device instead of registering as a real driver. This 2702 to the underlying device instead of registering as a real driver. This
2703 also includes some of the old generic buffer mangement and command 2703 also includes some of the old generic buffer management and command
2704 submission code. Do not use any of this in new and modern drivers. 2704 submission code. Do not use any of this in new and modern drivers.
2705 </para> 2705 </para>
2706 2706
diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile
index f9fd615427fb..1d27f0a1abd1 100644
--- a/Documentation/DocBook/media/Makefile
+++ b/Documentation/DocBook/media/Makefile
@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
195# 195#
196 196
197install_media_images = \ 197install_media_images = \
198 $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api 198 $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
199 199
200$(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64 200$(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
201 $(Q)base64 -d $< >$@ 201 $(Q)base64 -d $< >$@
diff --git a/Documentation/devicetree/bindings/net/mdio-gpio.txt b/Documentation/devicetree/bindings/net/mdio-gpio.txt
index c79bab025369..8dbcf8295c6c 100644
--- a/Documentation/devicetree/bindings/net/mdio-gpio.txt
+++ b/Documentation/devicetree/bindings/net/mdio-gpio.txt
@@ -14,7 +14,7 @@ node.
14Example: 14Example:
15 15
16aliases { 16aliases {
17 mdio-gpio0 = <&mdio0>; 17 mdio-gpio0 = &mdio0;
18}; 18};
19 19
20mdio0: mdio { 20mdio0: mdio {
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index e9f5daccbd02..4e30ebaa9e5b 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -201,20 +201,15 @@ To beat some sense out of the internal editor, do this:
201 201
202- Edit your Thunderbird config settings so that it won't use format=flowed. 202- Edit your Thunderbird config settings so that it won't use format=flowed.
203 Go to "edit->preferences->advanced->config editor" to bring up the 203 Go to "edit->preferences->advanced->config editor" to bring up the
204 thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to 204 thunderbird's registry editor.
205 "false".
206 205
207- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false". 206- Set "mailnews.send_plaintext_flowed" to "false"
208 207
209- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true". 208- Set "mailnews.wraplength" from "72" to "0"
210 209
211- Enable UTF8: Set "prefs.converted-to-utf8" to "true". 210- "View" > "Message Body As" > "Plain Text"
212 211
213- Install the "toggle wordwrap" extension. Download the file from: 212- "View" > "Character Encoding" > "Unicode (UTF-8)"
214 https://addons.mozilla.org/thunderbird/addon/2351/
215 Then go to "tools->add ons", select "install" at the bottom of the screen,
216 and browse to where you saved the .xul file. This adds an "Enable
217 Wordwrap" entry under the Options menu of the message composer.
218 213
219~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 214~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
220TkRat (GUI) 215TkRat (GUI)
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 8b9cd8eb3f91..264bcde0c51c 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1245,8 +1245,9 @@ second). The meanings of the columns are as follows, from left to right:
1245 1245
1246The "intr" line gives counts of interrupts serviced since boot time, for each 1246The "intr" line gives counts of interrupts serviced since boot time, for each
1247of the possible system interrupts. The first column is the total of all 1247of the possible system interrupts. The first column is the total of all
1248interrupts serviced; each subsequent column is the total for that particular 1248interrupts serviced including unnumbered architecture specific interrupts;
1249interrupt. 1249each subsequent column is the total for that particular numbered interrupt.
1250Unnumbered interrupts are not shown, only summed into the total.
1250 1251
1251The "ctxt" line gives the total number of context switches across all CPUs. 1252The "ctxt" line gives the total number of context switches across all CPUs.
1252 1253
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 79f8257dd790..2cc95ad46604 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -327,6 +327,13 @@ temp[1-*]_max_hyst
327 from the max value. 327 from the max value.
328 RW 328 RW
329 329
330temp[1-*]_min_hyst
331 Temperature hysteresis value for min limit.
332 Unit: millidegree Celsius
333 Must be reported as an absolute temperature, NOT a delta
334 from the min value.
335 RW
336
330temp[1-*]_input Temperature input value. 337temp[1-*]_input Temperature input value.
331 Unit: millidegree Celsius 338 Unit: millidegree Celsius
332 RO 339 RO
@@ -362,6 +369,13 @@ temp[1-*]_lcrit Temperature critical min value, typically lower than
362 Unit: millidegree Celsius 369 Unit: millidegree Celsius
363 RW 370 RW
364 371
372temp[1-*]_lcrit_hyst
373 Temperature hysteresis value for critical min limit.
374 Unit: millidegree Celsius
375 Must be reported as an absolute temperature, NOT a delta
376 from the critical min value.
377 RW
378
365temp[1-*]_offset 379temp[1-*]_offset
366 Temperature offset which is added to the temperature reading 380 Temperature offset which is added to the temperature reading
367 by the chip. 381 by the chip.
diff --git a/Documentation/java.txt b/Documentation/java.txt
index e6a723281547..418020584ccc 100644
--- a/Documentation/java.txt
+++ b/Documentation/java.txt
@@ -188,6 +188,9 @@ shift
188#define CP_METHODREF 10 188#define CP_METHODREF 10
189#define CP_INTERFACEMETHODREF 11 189#define CP_INTERFACEMETHODREF 11
190#define CP_NAMEANDTYPE 12 190#define CP_NAMEANDTYPE 12
191#define CP_METHODHANDLE 15
192#define CP_METHODTYPE 16
193#define CP_INVOKEDYNAMIC 18
191 194
192/* Define some commonly used error messages */ 195/* Define some commonly used error messages */
193 196
@@ -242,14 +245,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur)
242 break; 245 break;
243 case CP_CLASS: 246 case CP_CLASS:
244 case CP_STRING: 247 case CP_STRING:
248 case CP_METHODTYPE:
245 seekerr = fseek(classfile, 2, SEEK_CUR); 249 seekerr = fseek(classfile, 2, SEEK_CUR);
246 break; 250 break;
251 case CP_METHODHANDLE:
252 seekerr = fseek(classfile, 3, SEEK_CUR);
253 break;
247 case CP_INTEGER: 254 case CP_INTEGER:
248 case CP_FLOAT: 255 case CP_FLOAT:
249 case CP_FIELDREF: 256 case CP_FIELDREF:
250 case CP_METHODREF: 257 case CP_METHODREF:
251 case CP_INTERFACEMETHODREF: 258 case CP_INTERFACEMETHODREF:
252 case CP_NAMEANDTYPE: 259 case CP_NAMEANDTYPE:
260 case CP_INVOKEDYNAMIC:
253 seekerr = fseek(classfile, 4, SEEK_CUR); 261 seekerr = fseek(classfile, 4, SEEK_CUR);
254 break; 262 break;
255 case CP_LONG: 263 case CP_LONG:
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index 81f940f4e884..e3ba753cb714 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -277,7 +277,7 @@ Possible BPF extensions are shown in the following table:
277 mark skb->mark 277 mark skb->mark
278 queue skb->queue_mapping 278 queue skb->queue_mapping
279 hatype skb->dev->type 279 hatype skb->dev->type
280 rxhash skb->rxhash 280 rxhash skb->hash
281 cpu raw_smp_processor_id() 281 cpu raw_smp_processor_id()
282 vlan_tci vlan_tx_tag_get(skb) 282 vlan_tci vlan_tx_tag_get(skb)
283 vlan_pr vlan_tx_tag_present(skb) 283 vlan_pr vlan_tx_tag_present(skb)
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 6fea79efb4cb..38112d512f47 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -578,7 +578,7 @@ processes. This also works in combination with mmap(2) on packet sockets.
578 578
579Currently implemented fanout policies are: 579Currently implemented fanout policies are:
580 580
581 - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash 581 - PACKET_FANOUT_HASH: schedule to socket by skb's packet hash
582 - PACKET_FANOUT_LB: schedule to socket by round-robin 582 - PACKET_FANOUT_LB: schedule to socket by round-robin
583 - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on 583 - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
584 - PACKET_FANOUT_RND: schedule to socket by random selection 584 - PACKET_FANOUT_RND: schedule to socket by random selection
diff --git a/MAINTAINERS b/MAINTAINERS
index 6846c7c622e3..cc4511177949 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -537,7 +537,7 @@ L: linux-alpha@vger.kernel.org
537F: arch/alpha/ 537F: arch/alpha/
538 538
539ALTERA TRIPLE SPEED ETHERNET DRIVER 539ALTERA TRIPLE SPEED ETHERNET DRIVER
540M: Vince Bridgers <vbridgers2013@gmail.com 540M: Vince Bridgers <vbridgers2013@gmail.com>
541L: netdev@vger.kernel.org 541L: netdev@vger.kernel.org
542L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) 542L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
543S: Maintained 543S: Maintained
@@ -6514,10 +6514,10 @@ T: git git://openrisc.net/~jonas/linux
6514F: arch/openrisc/ 6514F: arch/openrisc/
6515 6515
6516OPENVSWITCH 6516OPENVSWITCH
6517M: Jesse Gross <jesse@nicira.com> 6517M: Pravin Shelar <pshelar@nicira.com>
6518L: dev@openvswitch.org 6518L: dev@openvswitch.org
6519W: http://openvswitch.org 6519W: http://openvswitch.org
6520T: git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git 6520T: git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
6521S: Maintained 6521S: Maintained
6522F: net/openvswitch/ 6522F: net/openvswitch/
6523 6523
diff --git a/Makefile b/Makefile
index 9d993787afe0..cf3412d78ff1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 15 2PATCHLEVEL = 15
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index f1432da7b4c0..c5fa7a697fba 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -432,7 +432,7 @@
432 ENTRY_SAME(sched_setattr) 432 ENTRY_SAME(sched_setattr)
433 ENTRY_SAME(sched_getattr) /* 335 */ 433 ENTRY_SAME(sched_getattr) /* 335 */
434 ENTRY_COMP(utimes) 434 ENTRY_COMP(utimes)
435 ENTRY_COMP(renameat2) 435 ENTRY_SAME(renameat2)
436 436
437 /* Nothing yet */ 437 /* Nothing yet */
438 438
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 452d3ebd9d0f..e9f8fa9337fe 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
811 return NULL; 811 return NULL;
812 memset(header, 0, sz); 812 memset(header, 0, sz);
813 header->pages = sz / PAGE_SIZE; 813 header->pages = sz / PAGE_SIZE;
814 hole = sz - (bpfsize + sizeof(*header)); 814 hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
815 /* Insert random number of illegal instructions before BPF code 815 /* Insert random number of illegal instructions before BPF code
816 * and make sure the first instruction starts at an even address. 816 * and make sure the first instruction starts at an even address.
817 */ 817 */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index fde5abaac0cc..1a49ffdf9da9 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -24,7 +24,8 @@
24 24
25/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). 25/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
26 * The page copy blockops can use 0x6000000 to 0x8000000. 26 * The page copy blockops can use 0x6000000 to 0x8000000.
27 * The TSB is mapped in the 0x8000000 to 0xa000000 range. 27 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
28 * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
28 * The PROM resides in an area spanning 0xf0000000 to 0x100000000. 29 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
29 * The vmalloc area spans 0x100000000 to 0x200000000. 30 * The vmalloc area spans 0x100000000 to 0x200000000.
30 * Since modules need to be in the lowest 32-bits of the address space, 31 * Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
33 * 0x400000000. 34 * 0x400000000.
34 */ 35 */
35#define TLBTEMP_BASE _AC(0x0000000006000000,UL) 36#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
36#define TSBMAP_BASE _AC(0x0000000008000000,UL) 37#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
38#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
37#define MODULES_VADDR _AC(0x0000000010000000,UL) 39#define MODULES_VADDR _AC(0x0000000010000000,UL)
38#define MODULES_LEN _AC(0x00000000e0000000,UL) 40#define MODULES_LEN _AC(0x00000000e0000000,UL)
39#define MODULES_END _AC(0x00000000f0000000,UL) 41#define MODULES_END _AC(0x00000000f0000000,UL)
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index a364000ca1aa..7f41d40b7e6e 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s,
151 size_t count) 151 size_t count)
152{ 152{
153 unsigned long val, err; 153 unsigned long val, err;
154 int ret = sscanf(buf, "%ld", &val); 154 int ret = sscanf(buf, "%lu", &val);
155 155
156 if (ret != 1) 156 if (ret != 1)
157 return -EINVAL; 157 return -EINVAL;
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index 2c20ad63ddbf..30eee6e8a81b 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
236 */ 236 */
237 VISEntryHalf 237 VISEntryHalf
238 238
239 membar #Sync
239 alignaddr %o1, %g0, %g0 240 alignaddr %o1, %g0, %g0
240 241
241 add %o1, (64 - 1), %o4 242 add %o1, (64 - 1), %o4
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index a8ff0d1a3b69..4ced3fc66130 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -281,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
281 show_regs(regs); 281 show_regs(regs);
282} 282}
283 283
284static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
285 unsigned long addr)
286{
287 static int times;
288
289 if (times++ < 10)
290 printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
291 "reports 64-bit fault address [%lx]\n",
292 current->comm, current->pid, addr);
293 show_regs(regs);
294}
295
296asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) 284asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
297{ 285{
298 enum ctx_state prev_state = exception_enter(); 286 enum ctx_state prev_state = exception_enter();
@@ -322,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
322 goto intr_or_no_mm; 310 goto intr_or_no_mm;
323 } 311 }
324 } 312 }
325 if (unlikely((address >> 32) != 0)) { 313 if (unlikely((address >> 32) != 0))
326 bogus_32bit_fault_address(regs, address);
327 goto intr_or_no_mm; 314 goto intr_or_no_mm;
328 }
329 } 315 }
330 316
331 if (regs->tstate & TSTATE_PRIV) { 317 if (regs->tstate & TSTATE_PRIV) {
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f5d506fdddad..fe19b81acc09 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
133 mm->context.tsb_block[tsb_idx].tsb_nentries = 133 mm->context.tsb_block[tsb_idx].tsb_nentries =
134 tsb_bytes / sizeof(struct tsb); 134 tsb_bytes / sizeof(struct tsb);
135 135
136 base = TSBMAP_BASE; 136 switch (tsb_idx) {
137 case MM_TSB_BASE:
138 base = TSBMAP_8K_BASE;
139 break;
140#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
141 case MM_TSB_HUGE:
142 base = TSBMAP_4M_BASE;
143 break;
144#endif
145 default:
146 BUG();
147 }
148
137 tte = pgprot_val(PAGE_KERNEL_LOCKED); 149 tte = pgprot_val(PAGE_KERNEL_LOCKED);
138 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); 150 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
139 BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); 151 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index aa333d966886..adb02aa62af5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
169{ 169{
170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 170 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 171 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
172 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
173 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ 172 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
174 EVENT_CONSTRAINT_END 173 EVENT_CONSTRAINT_END
175}; 174};
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index dc017735bb91..6d5663a599a7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
171 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ 171 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
172 172
173 header->pages = sz / PAGE_SIZE; 173 header->pages = sz / PAGE_SIZE;
174 hole = sz - (proglen + sizeof(*header)); 174 hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
175 175
176 /* insert a random number of int3 instructions before BPF code */ 176 /* insert a random number of int3 instructions before BPF code */
177 *image_ptr = &header->image[prandom_u32() % hole]; 177 *image_ptr = &header->image[prandom_u32() % hole];
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index cf925c4f36b7..8445d570f60a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -132,6 +132,21 @@ void acpi_bus_private_data_handler(acpi_handle handle,
132} 132}
133EXPORT_SYMBOL(acpi_bus_private_data_handler); 133EXPORT_SYMBOL(acpi_bus_private_data_handler);
134 134
135int acpi_bus_attach_private_data(acpi_handle handle, void *data)
136{
137 acpi_status status;
138
139 status = acpi_attach_data(handle,
140 acpi_bus_private_data_handler, data);
141 if (ACPI_FAILURE(status)) {
142 acpi_handle_debug(handle, "Error attaching device data\n");
143 return -ENODEV;
144 }
145
146 return 0;
147}
148EXPORT_SYMBOL_GPL(acpi_bus_attach_private_data);
149
135int acpi_bus_get_private_data(acpi_handle handle, void **data) 150int acpi_bus_get_private_data(acpi_handle handle, void **data)
136{ 151{
137 acpi_status status; 152 acpi_status status;
@@ -140,15 +155,20 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
140 return -EINVAL; 155 return -EINVAL;
141 156
142 status = acpi_get_data(handle, acpi_bus_private_data_handler, data); 157 status = acpi_get_data(handle, acpi_bus_private_data_handler, data);
143 if (ACPI_FAILURE(status) || !*data) { 158 if (ACPI_FAILURE(status)) {
144 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n", 159 acpi_handle_debug(handle, "No context for object\n");
145 handle));
146 return -ENODEV; 160 return -ENODEV;
147 } 161 }
148 162
149 return 0; 163 return 0;
150} 164}
151EXPORT_SYMBOL(acpi_bus_get_private_data); 165EXPORT_SYMBOL_GPL(acpi_bus_get_private_data);
166
167void acpi_bus_detach_private_data(acpi_handle handle)
168{
169 acpi_detach_data(handle, acpi_bus_private_data_handler);
170}
171EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data);
152 172
153void acpi_bus_no_hotplug(acpi_handle handle) 173void acpi_bus_no_hotplug(acpi_handle handle)
154{ 174{
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 25bbc55dca89..112817e963e0 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -925,13 +925,10 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
925 if (result) 925 if (result)
926 return result; 926 return result;
927 927
928 status = acpi_attach_data(tz->device->handle, 928 status = acpi_bus_attach_private_data(tz->device->handle,
929 acpi_bus_private_data_handler, 929 tz->thermal_zone);
930 tz->thermal_zone); 930 if (ACPI_FAILURE(status))
931 if (ACPI_FAILURE(status)) {
932 pr_err(PREFIX "Error attaching device data\n");
933 return -ENODEV; 931 return -ENODEV;
934 }
935 932
936 tz->tz_enabled = 1; 933 tz->tz_enabled = 1;
937 934
@@ -946,7 +943,7 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
946 sysfs_remove_link(&tz->thermal_zone->device.kobj, "device"); 943 sysfs_remove_link(&tz->thermal_zone->device.kobj, "device");
947 thermal_zone_device_unregister(tz->thermal_zone); 944 thermal_zone_device_unregister(tz->thermal_zone);
948 tz->thermal_zone = NULL; 945 tz->thermal_zone = NULL;
949 acpi_detach_data(tz->device->handle, acpi_bus_private_data_handler); 946 acpi_bus_detach_private_data(tz->device->handle);
950} 947}
951 948
952 949
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a886713937fd..d5d30ed863ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
1009 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1009 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1010 DMA_BIDIRECTIONAL); 1010 DMA_BIDIRECTIONAL);
1011 } 1011 }
1012 cnt = unmap->map_cnt;
1012 mempool_free(unmap, __get_unmap_pool(cnt)->pool); 1013 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1013} 1014}
1014 1015
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1074 memset(unmap, 0, sizeof(*unmap)); 1075 memset(unmap, 0, sizeof(*unmap));
1075 kref_init(&unmap->kref); 1076 kref_init(&unmap->kref);
1076 unmap->dev = dev; 1077 unmap->dev = dev;
1078 unmap->map_cnt = nr;
1077 1079
1078 return unmap; 1080 return unmap;
1079} 1081}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 766b68ed505c..394cbc5c93e3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
191 191
192static void mv_chan_activate(struct mv_xor_chan *chan) 192static void mv_chan_activate(struct mv_xor_chan *chan)
193{ 193{
194 u32 activation;
195
196 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 194 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
197 activation = readl_relaxed(XOR_ACTIVATION(chan)); 195
198 activation |= 0x1; 196 /* writel ensures all descriptors are flushed before activation */
199 writel_relaxed(activation, XOR_ACTIVATION(chan)); 197 writel(BIT(0), XOR_ACTIVATION(chan));
200} 198}
201 199
202static char mv_chan_is_busy(struct mv_xor_chan *chan) 200static char mv_chan_is_busy(struct mv_xor_chan *chan)
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 7762665ad8fd..876de9ac3793 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
1009 } 1009 }
1010 1010
1011 if (outp == 8) 1011 if (outp == 8)
1012 return false; 1012 return conf;
1013 1013
1014 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); 1014 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
1015 if (data == 0x0000) 1015 if (data == 0x0000)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 43fec17ea540..bbf117be572f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
40 case 0x00: return 2; 40 case 0x00: return 2;
41 case 0x19: return 1; 41 case 0x19: return 1;
42 case 0x1c: return 0; 42 case 0x1c: return 0;
43 case 0x1e: return 2;
43 default: 44 default:
44 break; 45 break;
45 } 46 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 68528619834a..8149e7cf4303 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1642,6 +1642,7 @@ struct radeon_vce {
1642 unsigned fb_version; 1642 unsigned fb_version;
1643 atomic_t handles[RADEON_MAX_VCE_HANDLES]; 1643 atomic_t handles[RADEON_MAX_VCE_HANDLES];
1644 struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; 1644 struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
1645 unsigned img_size[RADEON_MAX_VCE_HANDLES];
1645 struct delayed_work idle_work; 1646 struct delayed_work idle_work;
1646}; 1647};
1647 1648
@@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
1655 uint32_t handle, struct radeon_fence **fence); 1656 uint32_t handle, struct radeon_fence **fence);
1656void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); 1657void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
1657void radeon_vce_note_usage(struct radeon_device *rdev); 1658void radeon_vce_note_usage(struct radeon_device *rdev);
1658int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); 1659int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
1659int radeon_vce_cs_parse(struct radeon_cs_parser *p); 1660int radeon_vce_cs_parse(struct radeon_cs_parser *p);
1660bool radeon_vce_semaphore_emit(struct radeon_device *rdev, 1661bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
1661 struct radeon_ring *ring, 1662 struct radeon_ring *ring,
@@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
2640#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) 2641#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
2641#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) 2642#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
2642#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) 2643#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
2643#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI)) 2644#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
2645 (rdev->family == CHIP_MULLINS))
2644 2646
2645#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ 2647#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
2646 (rdev->ddev->pdev->device == 0x6850) || \ 2648 (rdev->ddev->pdev->device == 0x6850) || \
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index b3633d9a5317..9ab30976287d 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
196 } 196 }
197 } 197 }
198 198
199 if (!found) {
200 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
201 dhandle = ACPI_HANDLE(&pdev->dev);
202 if (!dhandle)
203 continue;
204
205 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
206 if (!ACPI_FAILURE(status)) {
207 found = true;
208 break;
209 }
210 }
211 }
212
199 if (!found) 213 if (!found)
200 return false; 214 return false;
201 215
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 408b6ac53f0b..f00dbbf4d806 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
999 999
1000 /* avoid high jitter with small fractional dividers */ 1000 /* avoid high jitter with small fractional dividers */
1001 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { 1001 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
1002 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60); 1002 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
1003 if (fb_div < fb_div_min) { 1003 if (fb_div < fb_div_min) {
1004 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); 1004 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
1005 fb_div *= tmp; 1005 fb_div *= tmp;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0cc47f12d995..eaaedba04675 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
577 return r; 577 return r;
578 } 578 }
579 579
580 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 580 if (rdev->accel_working) {
581 if (r) { 581 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
582 radeon_vm_fini(rdev, &fpriv->vm); 582 if (r) {
583 kfree(fpriv); 583 radeon_vm_fini(rdev, &fpriv->vm);
584 return r; 584 kfree(fpriv);
585 } 585 return r;
586 }
586 587
587 /* map the ib pool buffer read only into 588 /* map the ib pool buffer read only into
588 * virtual address space */ 589 * virtual address space */
589 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, 590 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
590 rdev->ring_tmp_bo.bo); 591 rdev->ring_tmp_bo.bo);
591 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, 592 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
592 RADEON_VM_PAGE_READABLE | 593 RADEON_VM_PAGE_READABLE |
593 RADEON_VM_PAGE_SNOOPED); 594 RADEON_VM_PAGE_SNOOPED);
594 595
595 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 596 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
596 if (r) { 597 if (r) {
597 radeon_vm_fini(rdev, &fpriv->vm); 598 radeon_vm_fini(rdev, &fpriv->vm);
598 kfree(fpriv); 599 kfree(fpriv);
599 return r; 600 return r;
601 }
600 } 602 }
601
602 file_priv->driver_priv = fpriv; 603 file_priv->driver_priv = fpriv;
603 } 604 }
604 605
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
626 struct radeon_bo_va *bo_va; 627 struct radeon_bo_va *bo_va;
627 int r; 628 int r;
628 629
629 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 630 if (rdev->accel_working) {
630 if (!r) { 631 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
631 bo_va = radeon_vm_bo_find(&fpriv->vm, 632 if (!r) {
632 rdev->ring_tmp_bo.bo); 633 bo_va = radeon_vm_bo_find(&fpriv->vm,
633 if (bo_va) 634 rdev->ring_tmp_bo.bo);
634 radeon_vm_bo_rmv(rdev, bo_va); 635 if (bo_va)
635 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 636 radeon_vm_bo_rmv(rdev, bo_va);
637 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
638 }
636 } 639 }
637 640
638 radeon_vm_fini(rdev, &fpriv->vm); 641 radeon_vm_fini(rdev, &fpriv->vm);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 19bec0dbfa38..4faa4d6f9bb4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
458 * into account. We don't want to disallow buffer moves 458 * into account. We don't want to disallow buffer moves
459 * completely. 459 * completely.
460 */ 460 */
461 if (current_domain != RADEON_GEM_DOMAIN_CPU && 461 if ((lobj->alt_domain & current_domain) != 0 &&
462 (domain & current_domain) == 0 && /* will be moved */ 462 (domain & current_domain) == 0 && /* will be moved */
463 bytes_moved > bytes_moved_threshold) { 463 bytes_moved > bytes_moved_threshold) {
464 /* don't move it */ 464 /* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
699 rbo = container_of(bo, struct radeon_bo, tbo); 699 rbo = container_of(bo, struct radeon_bo, tbo);
700 radeon_bo_check_tiling(rbo, 0, 0); 700 radeon_bo_check_tiling(rbo, 0, 0);
701 rdev = rbo->rdev; 701 rdev = rbo->rdev;
702 if (bo->mem.mem_type == TTM_PL_VRAM) { 702 if (bo->mem.mem_type != TTM_PL_VRAM)
703 size = bo->mem.num_pages << PAGE_SHIFT; 703 return 0;
704 offset = bo->mem.start << PAGE_SHIFT; 704
705 if ((offset + size) > rdev->mc.visible_vram_size) { 705 size = bo->mem.num_pages << PAGE_SHIFT;
706 /* hurrah the memory is not visible ! */ 706 offset = bo->mem.start << PAGE_SHIFT;
707 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 707 if ((offset + size) <= rdev->mc.visible_vram_size)
708 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 708 return 0;
709 r = ttm_bo_validate(bo, &rbo->placement, false, false); 709
710 if (unlikely(r != 0)) 710 /* hurrah the memory is not visible ! */
711 return r; 711 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
712 offset = bo->mem.start << PAGE_SHIFT; 712 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
713 /* this should not happen */ 713 r = ttm_bo_validate(bo, &rbo->placement, false, false);
714 if ((offset + size) > rdev->mc.visible_vram_size) 714 if (unlikely(r == -ENOMEM)) {
715 return -EINVAL; 715 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
716 } 716 return ttm_bo_validate(bo, &rbo->placement, false, false);
717 } else if (unlikely(r != 0)) {
718 return r;
717 } 719 }
720
721 offset = bo->mem.start << PAGE_SHIFT;
722 /* this should never happen */
723 if ((offset + size) > rdev->mc.visible_vram_size)
724 return -EINVAL;
725
718 return 0; 726 return 0;
719} 727}
720 728
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f30b8426eee2..53d6e1bb48dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
361 struct drm_device *ddev = dev_get_drvdata(dev); 361 struct drm_device *ddev = dev_get_drvdata(dev);
362 struct radeon_device *rdev = ddev->dev_private; 362 struct radeon_device *rdev = ddev->dev_private;
363 363
364 /* Can't set profile when the card is off */
365 if ((rdev->flags & RADEON_IS_PX) &&
366 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
367 return -EINVAL;
368
364 mutex_lock(&rdev->pm.mutex); 369 mutex_lock(&rdev->pm.mutex);
365 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 370 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
366 if (strncmp("default", buf, strlen("default")) == 0) 371 if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
409 struct drm_device *ddev = dev_get_drvdata(dev); 414 struct drm_device *ddev = dev_get_drvdata(dev);
410 struct radeon_device *rdev = ddev->dev_private; 415 struct radeon_device *rdev = ddev->dev_private;
411 416
417 /* Can't set method when the card is off */
418 if ((rdev->flags & RADEON_IS_PX) &&
419 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
420 count = -EINVAL;
421 goto fail;
422 }
423
412 /* we don't support the legacy modes with dpm */ 424 /* we don't support the legacy modes with dpm */
413 if (rdev->pm.pm_method == PM_METHOD_DPM) { 425 if (rdev->pm.pm_method == PM_METHOD_DPM) {
414 count = -EINVAL; 426 count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
446 struct radeon_device *rdev = ddev->dev_private; 458 struct radeon_device *rdev = ddev->dev_private;
447 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 459 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
448 460
461 if ((rdev->flags & RADEON_IS_PX) &&
462 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
463 return snprintf(buf, PAGE_SIZE, "off\n");
464
449 return snprintf(buf, PAGE_SIZE, "%s\n", 465 return snprintf(buf, PAGE_SIZE, "%s\n",
450 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 466 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
451 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 467 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
459 struct drm_device *ddev = dev_get_drvdata(dev); 475 struct drm_device *ddev = dev_get_drvdata(dev);
460 struct radeon_device *rdev = ddev->dev_private; 476 struct radeon_device *rdev = ddev->dev_private;
461 477
478 /* Can't set dpm state when the card is off */
479 if ((rdev->flags & RADEON_IS_PX) &&
480 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
481 return -EINVAL;
482
462 mutex_lock(&rdev->pm.mutex); 483 mutex_lock(&rdev->pm.mutex);
463 if (strncmp("battery", buf, strlen("battery")) == 0) 484 if (strncmp("battery", buf, strlen("battery")) == 0)
464 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 485 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
485 struct radeon_device *rdev = ddev->dev_private; 506 struct radeon_device *rdev = ddev->dev_private;
486 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 507 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
487 508
509 if ((rdev->flags & RADEON_IS_PX) &&
510 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
511 return snprintf(buf, PAGE_SIZE, "off\n");
512
488 return snprintf(buf, PAGE_SIZE, "%s\n", 513 return snprintf(buf, PAGE_SIZE, "%s\n",
489 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : 514 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
490 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 515 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
500 enum radeon_dpm_forced_level level; 525 enum radeon_dpm_forced_level level;
501 int ret = 0; 526 int ret = 0;
502 527
528 /* Can't force performance level when the card is off */
529 if ((rdev->flags & RADEON_IS_PX) &&
530 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
531 return -EINVAL;
532
503 mutex_lock(&rdev->pm.mutex); 533 mutex_lock(&rdev->pm.mutex);
504 if (strncmp("low", buf, strlen("low")) == 0) { 534 if (strncmp("low", buf, strlen("low")) == 0) {
505 level = RADEON_DPM_FORCED_LEVEL_LOW; 535 level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
538 char *buf) 568 char *buf)
539{ 569{
540 struct radeon_device *rdev = dev_get_drvdata(dev); 570 struct radeon_device *rdev = dev_get_drvdata(dev);
571 struct drm_device *ddev = rdev->ddev;
541 int temp; 572 int temp;
542 573
574 /* Can't get temperature when the card is off */
575 if ((rdev->flags & RADEON_IS_PX) &&
576 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
577 return -EINVAL;
578
543 if (rdev->asic->pm.get_temperature) 579 if (rdev->asic->pm.get_temperature)
544 temp = radeon_get_temperature(rdev); 580 temp = radeon_get_temperature(rdev);
545 else 581 else
@@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1614 struct drm_info_node *node = (struct drm_info_node *) m->private; 1650 struct drm_info_node *node = (struct drm_info_node *) m->private;
1615 struct drm_device *dev = node->minor->dev; 1651 struct drm_device *dev = node->minor->dev;
1616 struct radeon_device *rdev = dev->dev_private; 1652 struct radeon_device *rdev = dev->dev_private;
1653 struct drm_device *ddev = rdev->ddev;
1617 1654
1618 if (rdev->pm.dpm_enabled) { 1655 if ((rdev->flags & RADEON_IS_PX) &&
1656 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1657 seq_printf(m, "PX asic powered off\n");
1658 } else if (rdev->pm.dpm_enabled) {
1619 mutex_lock(&rdev->pm.mutex); 1659 mutex_lock(&rdev->pm.mutex);
1620 if (rdev->asic->dpm.debugfs_print_current_performance_level) 1660 if (rdev->asic->dpm.debugfs_print_current_performance_level)
1621 radeon_dpm_debugfs_print_current_performance_level(rdev, m); 1661 radeon_dpm_debugfs_print_current_performance_level(rdev, m);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index f73324c81491..3971d968af6c 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
443 * @p: parser context 443 * @p: parser context
444 * @lo: address of lower dword 444 * @lo: address of lower dword
445 * @hi: address of higher dword 445 * @hi: address of higher dword
446 * @size: size of checker for relocation buffer
446 * 447 *
447 * Patch relocation inside command stream with real buffer address 448 * Patch relocation inside command stream with real buffer address
448 */ 449 */
449int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) 450int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
451 unsigned size)
450{ 452{
451 struct radeon_cs_chunk *relocs_chunk; 453 struct radeon_cs_chunk *relocs_chunk;
452 uint64_t offset; 454 struct radeon_cs_reloc *reloc;
455 uint64_t start, end, offset;
453 unsigned idx; 456 unsigned idx;
454 457
455 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 458 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -462,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
462 return -EINVAL; 465 return -EINVAL;
463 } 466 }
464 467
465 offset += p->relocs_ptr[(idx / 4)]->gpu_offset; 468 reloc = p->relocs_ptr[(idx / 4)];
469 start = reloc->gpu_offset;
470 end = start + radeon_bo_size(reloc->robj);
471 start += offset;
466 472
467 p->ib.ptr[lo] = offset & 0xFFFFFFFF; 473 p->ib.ptr[lo] = start & 0xFFFFFFFF;
468 p->ib.ptr[hi] = offset >> 32; 474 p->ib.ptr[hi] = start >> 32;
475
476 if (end <= start) {
477 DRM_ERROR("invalid reloc offset %llX!\n", offset);
478 return -EINVAL;
479 }
480 if ((end - start) < size) {
481 DRM_ERROR("buffer to small (%d / %d)!\n",
482 (unsigned)(end - start), size);
483 return -EINVAL;
484 }
469 485
470 return 0; 486 return 0;
471} 487}
472 488
473/** 489/**
490 * radeon_vce_validate_handle - validate stream handle
491 *
492 * @p: parser context
493 * @handle: handle to validate
494 *
495 * Validates the handle and return the found session index or -EINVAL
496 * we we don't have another free session index.
497 */
498int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
499{
500 unsigned i;
501
502 /* validate the handle */
503 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
504 if (atomic_read(&p->rdev->vce.handles[i]) == handle)
505 return i;
506 }
507
508 /* handle not found try to alloc a new one */
509 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
510 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
511 p->rdev->vce.filp[i] = p->filp;
512 p->rdev->vce.img_size[i] = 0;
513 return i;
514 }
515 }
516
517 DRM_ERROR("No more free VCE handles!\n");
518 return -EINVAL;
519}
520
521/**
474 * radeon_vce_cs_parse - parse and validate the command stream 522 * radeon_vce_cs_parse - parse and validate the command stream
475 * 523 *
476 * @p: parser context 524 * @p: parser context
@@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
478 */ 526 */
479int radeon_vce_cs_parse(struct radeon_cs_parser *p) 527int radeon_vce_cs_parse(struct radeon_cs_parser *p)
480{ 528{
481 uint32_t handle = 0; 529 int session_idx = -1;
482 bool destroy = false; 530 bool destroyed = false;
531 uint32_t tmp, handle = 0;
532 uint32_t *size = &tmp;
483 int i, r; 533 int i, r;
484 534
485 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { 535 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
491 return -EINVAL; 541 return -EINVAL;
492 } 542 }
493 543
544 if (destroyed) {
545 DRM_ERROR("No other command allowed after destroy!\n");
546 return -EINVAL;
547 }
548
494 switch (cmd) { 549 switch (cmd) {
495 case 0x00000001: // session 550 case 0x00000001: // session
496 handle = radeon_get_ib_value(p, p->idx + 2); 551 handle = radeon_get_ib_value(p, p->idx + 2);
552 session_idx = radeon_vce_validate_handle(p, handle);
553 if (session_idx < 0)
554 return session_idx;
555 size = &p->rdev->vce.img_size[session_idx];
497 break; 556 break;
498 557
499 case 0x00000002: // task info 558 case 0x00000002: // task info
559 break;
560
500 case 0x01000001: // create 561 case 0x01000001: // create
562 *size = radeon_get_ib_value(p, p->idx + 8) *
563 radeon_get_ib_value(p, p->idx + 10) *
564 8 * 3 / 2;
565 break;
566
501 case 0x04000001: // config extension 567 case 0x04000001: // config extension
502 case 0x04000002: // pic control 568 case 0x04000002: // pic control
503 case 0x04000005: // rate control 569 case 0x04000005: // rate control
@@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
506 break; 572 break;
507 573
508 case 0x03000001: // encode 574 case 0x03000001: // encode
509 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); 575 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
576 *size);
510 if (r) 577 if (r)
511 return r; 578 return r;
512 579
513 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); 580 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
581 *size / 3);
514 if (r) 582 if (r)
515 return r; 583 return r;
516 break; 584 break;
517 585
518 case 0x02000001: // destroy 586 case 0x02000001: // destroy
519 destroy = true; 587 destroyed = true;
520 break; 588 break;
521 589
522 case 0x05000001: // context buffer 590 case 0x05000001: // context buffer
591 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
592 *size * 2);
593 if (r)
594 return r;
595 break;
596
523 case 0x05000004: // video bitstream buffer 597 case 0x05000004: // video bitstream buffer
598 tmp = radeon_get_ib_value(p, p->idx + 4);
599 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
600 tmp);
601 if (r)
602 return r;
603 break;
604
524 case 0x05000005: // feedback buffer 605 case 0x05000005: // feedback buffer
525 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); 606 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
607 4096);
526 if (r) 608 if (r)
527 return r; 609 return r;
528 break; 610 break;
@@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
532 return -EINVAL; 614 return -EINVAL;
533 } 615 }
534 616
617 if (session_idx == -1) {
618 DRM_ERROR("no session command at start of IB\n");
619 return -EINVAL;
620 }
621
535 p->idx += len / 4; 622 p->idx += len / 4;
536 } 623 }
537 624
538 if (destroy) { 625 if (destroyed) {
539 /* IB contains a destroy msg, free the handle */ 626 /* IB contains a destroy msg, free the handle */
540 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) 627 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
541 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); 628 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
542
543 return 0;
544 }
545
546 /* create or encode, validate the handle */
547 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
548 if (atomic_read(&p->rdev->vce.handles[i]) == handle)
549 return 0;
550 } 629 }
551 630
552 /* handle not found try to alloc a new one */ 631 return 0;
553 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
554 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
555 p->rdev->vce.filp[i] = p->filp;
556 return 0;
557 }
558 }
559
560 DRM_ERROR("No more free VCE handles!\n");
561 return -EINVAL;
562} 632}
563 633
564/** 634/**
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2aae6ce49d32..d9ab99f47612 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
595 ndw = 64; 595 ndw = 64;
596 596
597 /* assume the worst case */ 597 /* assume the worst case */
598 ndw += vm->max_pde_used * 12; 598 ndw += vm->max_pde_used * 16;
599 599
600 /* update too big for an IB */ 600 /* update too big for an IB */
601 if (ndw > 0xfffff) 601 if (ndw > 0xfffff)
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 683532f84931..7321283602ce 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -107,8 +107,8 @@
107#define SPLL_CHG_STATUS (1 << 1) 107#define SPLL_CHG_STATUS (1 << 1)
108#define SPLL_CNTL_MODE 0x618 108#define SPLL_CNTL_MODE 0x618
109#define SPLL_SW_DIR_CONTROL (1 << 0) 109#define SPLL_SW_DIR_CONTROL (1 << 0)
110# define SPLL_REFCLK_SEL(x) ((x) << 8) 110# define SPLL_REFCLK_SEL(x) ((x) << 26)
111# define SPLL_REFCLK_SEL_MASK 0xFF00 111# define SPLL_REFCLK_SEL_MASK (3 << 26)
112 112
113#define CG_SPLL_SPREAD_SPECTRUM 0x620 113#define CG_SPLL_SPREAD_SPECTRUM 0x620
114#define SSEN (1 << 0) 114#define SSEN (1 << 0)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bc196f49ec53..4af0da96c2e2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
1053 1053
1054config SENSORS_NTC_THERMISTOR 1054config SENSORS_NTC_THERMISTOR
1055 tristate "NTC thermistor support" 1055 tristate "NTC thermistor support"
1056 depends on (!OF && !IIO) || (OF && IIO) 1056 depends on !OF || IIO=n || IIO
1057 help 1057 help
1058 This driver supports NTC thermistors sensor reading and its 1058 This driver supports NTC thermistors sensor reading and its
1059 interpretation. The driver can also monitor the temperature and 1059 interpretation. The driver can also monitor the temperature and
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8a17f01e8672..e76feb86a1d4 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -44,6 +44,7 @@ struct ntc_compensation {
44 unsigned int ohm; 44 unsigned int ohm;
45}; 45};
46 46
47/* Order matters, ntc_match references the entries by index */
47static const struct platform_device_id ntc_thermistor_id[] = { 48static const struct platform_device_id ntc_thermistor_id[] = {
48 { "ncp15wb473", TYPE_NCPXXWB473 }, 49 { "ncp15wb473", TYPE_NCPXXWB473 },
49 { "ncp18wb473", TYPE_NCPXXWB473 }, 50 { "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
141 char name[PLATFORM_NAME_SIZE]; 142 char name[PLATFORM_NAME_SIZE];
142}; 143};
143 144
144#ifdef CONFIG_OF 145#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
145static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) 146static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
146{ 147{
147 struct iio_channel *channel = pdata->chan; 148 struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
163 164
164static const struct of_device_id ntc_match[] = { 165static const struct of_device_id ntc_match[] = {
165 { .compatible = "ntc,ncp15wb473", 166 { .compatible = "ntc,ncp15wb473",
166 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 167 .data = &ntc_thermistor_id[0] },
167 { .compatible = "ntc,ncp18wb473", 168 { .compatible = "ntc,ncp18wb473",
168 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 169 .data = &ntc_thermistor_id[1] },
169 { .compatible = "ntc,ncp21wb473", 170 { .compatible = "ntc,ncp21wb473",
170 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 171 .data = &ntc_thermistor_id[2] },
171 { .compatible = "ntc,ncp03wb473", 172 { .compatible = "ntc,ncp03wb473",
172 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 173 .data = &ntc_thermistor_id[3] },
173 { .compatible = "ntc,ncp15wl333", 174 { .compatible = "ntc,ncp15wl333",
174 .data = &ntc_thermistor_id[TYPE_NCPXXWL333] }, 175 .data = &ntc_thermistor_id[4] },
175 { }, 176 { },
176}; 177};
177MODULE_DEVICE_TABLE(of, ntc_match); 178MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
223 return NULL; 224 return NULL;
224} 225}
225 226
227#define ntc_match NULL
228
226static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) 229static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
227{ } 230{ }
228#endif 231#endif
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1b6dbe156a37..199c7896f081 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -48,6 +48,7 @@
48 48
49#include <linux/mlx4/driver.h> 49#include <linux/mlx4/driver.h>
50#include <linux/mlx4/cmd.h> 50#include <linux/mlx4/cmd.h>
51#include <linux/mlx4/qp.h>
51 52
52#include "mlx4_ib.h" 53#include "mlx4_ib.h"
53#include "user.h" 54#include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
1614} 1615}
1615#endif 1616#endif
1616 1617
1618#define MLX4_IB_INVALID_MAC ((u64)-1)
1619static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1620 struct net_device *dev,
1621 int port)
1622{
1623 u64 new_smac = 0;
1624 u64 release_mac = MLX4_IB_INVALID_MAC;
1625 struct mlx4_ib_qp *qp;
1626
1627 read_lock(&dev_base_lock);
1628 new_smac = mlx4_mac_to_u64(dev->dev_addr);
1629 read_unlock(&dev_base_lock);
1630
1631 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1632 qp = ibdev->qp1_proxy[port - 1];
1633 if (qp) {
1634 int new_smac_index;
1635 u64 old_smac = qp->pri.smac;
1636 struct mlx4_update_qp_params update_params;
1637
1638 if (new_smac == old_smac)
1639 goto unlock;
1640
1641 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
1642
1643 if (new_smac_index < 0)
1644 goto unlock;
1645
1646 update_params.smac_index = new_smac_index;
1647 if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
1648 &update_params)) {
1649 release_mac = new_smac;
1650 goto unlock;
1651 }
1652
1653 qp->pri.smac = new_smac;
1654 qp->pri.smac_index = new_smac_index;
1655
1656 release_mac = old_smac;
1657 }
1658
1659unlock:
1660 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1661 if (release_mac != MLX4_IB_INVALID_MAC)
1662 mlx4_unregister_mac(ibdev->dev, port, release_mac);
1663}
1664
1617static void mlx4_ib_get_dev_addr(struct net_device *dev, 1665static void mlx4_ib_get_dev_addr(struct net_device *dev,
1618 struct mlx4_ib_dev *ibdev, u8 port) 1666 struct mlx4_ib_dev *ibdev, u8 port)
1619{ 1667{
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1689 return 0; 1737 return 0;
1690} 1738}
1691 1739
1692static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) 1740static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1741 struct net_device *dev,
1742 unsigned long event)
1743
1693{ 1744{
1694 struct mlx4_ib_iboe *iboe; 1745 struct mlx4_ib_iboe *iboe;
1746 int update_qps_port = -1;
1695 int port; 1747 int port;
1696 1748
1697 iboe = &ibdev->iboe; 1749 iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1719 } 1771 }
1720 curr_master = iboe->masters[port - 1]; 1772 curr_master = iboe->masters[port - 1];
1721 1773
1774 if (dev == iboe->netdevs[port - 1] &&
1775 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
1776 event == NETDEV_UP || event == NETDEV_CHANGE))
1777 update_qps_port = port;
1778
1722 if (curr_netdev) { 1779 if (curr_netdev) {
1723 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? 1780 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1724 IB_PORT_ACTIVE : IB_PORT_DOWN; 1781 IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1752 } 1809 }
1753 1810
1754 spin_unlock(&iboe->lock); 1811 spin_unlock(&iboe->lock);
1812
1813 if (update_qps_port > 0)
1814 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
1755} 1815}
1756 1816
1757static int mlx4_ib_netdev_event(struct notifier_block *this, 1817static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
1764 return NOTIFY_DONE; 1824 return NOTIFY_DONE;
1765 1825
1766 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); 1826 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1767 mlx4_ib_scan_netdevs(ibdev); 1827 mlx4_ib_scan_netdevs(ibdev, dev, event);
1768 1828
1769 return NOTIFY_DONE; 1829 return NOTIFY_DONE;
1770} 1830}
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2043 goto err_map; 2103 goto err_map;
2044 2104
2045 for (i = 0; i < ibdev->num_ports; ++i) { 2105 for (i = 0; i < ibdev->num_ports; ++i) {
2106 mutex_init(&ibdev->qp1_proxy_lock[i]);
2046 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == 2107 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2047 IB_LINK_LAYER_ETHERNET) { 2108 IB_LINK_LAYER_ETHERNET) {
2048 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); 2109 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2126 for (i = 1 ; i <= ibdev->num_ports ; ++i) 2187 for (i = 1 ; i <= ibdev->num_ports ; ++i)
2127 reset_gid_table(ibdev, i); 2188 reset_gid_table(ibdev, i);
2128 rtnl_lock(); 2189 rtnl_lock();
2129 mlx4_ib_scan_netdevs(ibdev); 2190 mlx4_ib_scan_netdevs(ibdev, NULL, 0);
2130 rtnl_unlock(); 2191 rtnl_unlock();
2131 mlx4_ib_init_gid_table(ibdev); 2192 mlx4_ib_init_gid_table(ibdev);
2132 } 2193 }
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f589522fddfd..66b0b7dbd9f4 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
522 int steer_qpn_count; 522 int steer_qpn_count;
523 int steer_qpn_base; 523 int steer_qpn_base;
524 int steering_support; 524 int steering_support;
525 struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
526 /* lock when destroying qp1_proxy and getting netdev events */
527 struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
525}; 528};
526 529
527struct ib_event_work { 530struct ib_event_work {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 41308af4163c..dc57482ae7af 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
1132 if (is_qp0(dev, mqp)) 1132 if (is_qp0(dev, mqp))
1133 mlx4_CLOSE_PORT(dev->dev, mqp->port); 1133 mlx4_CLOSE_PORT(dev->dev, mqp->port);
1134 1134
1135 if (dev->qp1_proxy[mqp->port - 1] == mqp) {
1136 mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
1137 dev->qp1_proxy[mqp->port - 1] = NULL;
1138 mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
1139 }
1140
1135 pd = get_pd(mqp); 1141 pd = get_pd(mqp);
1136 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); 1142 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
1137 1143
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1646 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); 1652 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
1647 if (err) 1653 if (err)
1648 return -EINVAL; 1654 return -EINVAL;
1655 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1656 dev->qp1_proxy[qp->port - 1] = qp;
1649 } 1657 }
1650 } 1658 }
1651 } 1659 }
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9f69e818b000..93580a47cc54 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
82} 82}
83 83
84/* Forward declaration */ 84/* Forward declaration */
85static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); 85static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
86 bool strict_match);
86static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); 87static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
87static void rlb_src_unlink(struct bonding *bond, u32 index); 88static void rlb_src_unlink(struct bonding *bond, u32 index);
88static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, 89static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
459 460
460 bond->alb_info.rlb_promisc_timeout_counter = 0; 461 bond->alb_info.rlb_promisc_timeout_counter = 0;
461 462
462 alb_send_learning_packets(bond->curr_active_slave, addr); 463 alb_send_learning_packets(bond->curr_active_slave, addr, true);
463} 464}
464 465
465/* slave being removed should not be active at this point 466/* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
995/*********************** tlb/rlb shared functions *********************/ 996/*********************** tlb/rlb shared functions *********************/
996 997
997static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], 998static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
998 u16 vid) 999 __be16 vlan_proto, u16 vid)
999{ 1000{
1000 struct learning_pkt pkt; 1001 struct learning_pkt pkt;
1001 struct sk_buff *skb; 1002 struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1021 skb->dev = slave->dev; 1022 skb->dev = slave->dev;
1022 1023
1023 if (vid) { 1024 if (vid) {
1024 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); 1025 skb = vlan_put_tag(skb, vlan_proto, vid);
1025 if (!skb) { 1026 if (!skb) {
1026 pr_err("%s: Error: failed to insert VLAN tag\n", 1027 pr_err("%s: Error: failed to insert VLAN tag\n",
1027 slave->bond->dev->name); 1028 slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1032 dev_queue_xmit(skb); 1033 dev_queue_xmit(skb);
1033} 1034}
1034 1035
1035 1036static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
1036static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) 1037 bool strict_match)
1037{ 1038{
1038 struct bonding *bond = bond_get_bond_by_slave(slave); 1039 struct bonding *bond = bond_get_bond_by_slave(slave);
1039 struct net_device *upper; 1040 struct net_device *upper;
1040 struct list_head *iter; 1041 struct list_head *iter;
1041 1042
1042 /* send untagged */ 1043 /* send untagged */
1043 alb_send_lp_vid(slave, mac_addr, 0); 1044 alb_send_lp_vid(slave, mac_addr, 0, 0);
1044 1045
1045 /* loop through vlans and send one packet for each */ 1046 /* loop through vlans and send one packet for each */
1046 rcu_read_lock(); 1047 rcu_read_lock();
1047 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { 1048 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
1048 if (upper->priv_flags & IFF_802_1Q_VLAN) 1049 if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
1049 alb_send_lp_vid(slave, mac_addr, 1050 if (strict_match &&
1050 vlan_dev_vlan_id(upper)); 1051 ether_addr_equal_64bits(mac_addr,
1052 upper->dev_addr)) {
1053 alb_send_lp_vid(slave, mac_addr,
1054 vlan_dev_vlan_proto(upper),
1055 vlan_dev_vlan_id(upper));
1056 } else if (!strict_match) {
1057 alb_send_lp_vid(slave, upper->dev_addr,
1058 vlan_dev_vlan_proto(upper),
1059 vlan_dev_vlan_id(upper));
1060 }
1061 }
1051 } 1062 }
1052 rcu_read_unlock(); 1063 rcu_read_unlock();
1053} 1064}
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1107 1118
1108 /* fasten the change in the switch */ 1119 /* fasten the change in the switch */
1109 if (SLAVE_IS_OK(slave1)) { 1120 if (SLAVE_IS_OK(slave1)) {
1110 alb_send_learning_packets(slave1, slave1->dev->dev_addr); 1121 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
1111 if (bond->alb_info.rlb_enabled) { 1122 if (bond->alb_info.rlb_enabled) {
1112 /* inform the clients that the mac address 1123 /* inform the clients that the mac address
1113 * has changed 1124 * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1119 } 1130 }
1120 1131
1121 if (SLAVE_IS_OK(slave2)) { 1132 if (SLAVE_IS_OK(slave2)) {
1122 alb_send_learning_packets(slave2, slave2->dev->dev_addr); 1133 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
1123 if (bond->alb_info.rlb_enabled) { 1134 if (bond->alb_info.rlb_enabled) {
1124 /* inform the clients that the mac address 1135 /* inform the clients that the mac address
1125 * has changed 1136 * has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
1490 1501
1491 /* send learning packets */ 1502 /* send learning packets */
1492 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { 1503 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
1504 bool strict_match;
1505
1493 /* change of curr_active_slave involves swapping of mac addresses. 1506 /* change of curr_active_slave involves swapping of mac addresses.
1494 * in order to avoid this swapping from happening while 1507 * in order to avoid this swapping from happening while
1495 * sending the learning packets, the curr_slave_lock must be held for 1508 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
1497 */ 1510 */
1498 read_lock(&bond->curr_slave_lock); 1511 read_lock(&bond->curr_slave_lock);
1499 1512
1500 bond_for_each_slave_rcu(bond, slave, iter) 1513 bond_for_each_slave_rcu(bond, slave, iter) {
1501 alb_send_learning_packets(slave, slave->dev->dev_addr); 1514 /* If updating current_active, use all currently
1515 * user mac addreses (!strict_match). Otherwise, only
1516 * use mac of the slave device.
1517 */
1518 strict_match = (slave != bond->curr_active_slave);
1519 alb_send_learning_packets(slave, slave->dev->dev_addr,
1520 strict_match);
1521 }
1502 1522
1503 read_unlock(&bond->curr_slave_lock); 1523 read_unlock(&bond->curr_slave_lock);
1504 1524
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1721 } else { 1741 } else {
1722 /* set the new_slave to the bond mac address */ 1742 /* set the new_slave to the bond mac address */
1723 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); 1743 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
1724 alb_send_learning_packets(new_slave, bond->dev->dev_addr); 1744 alb_send_learning_packets(new_slave, bond->dev->dev_addr,
1745 false);
1725 } 1746 }
1726 1747
1727 write_lock_bh(&bond->curr_slave_lock); 1748 write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1764 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); 1785 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
1765 1786
1766 read_lock(&bond->lock); 1787 read_lock(&bond->lock);
1767 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); 1788 alb_send_learning_packets(bond->curr_active_slave,
1789 bond_dev->dev_addr, false);
1768 if (bond->alb_info.rlb_enabled) { 1790 if (bond->alb_info.rlb_enabled) {
1769 /* inform clients mac address has changed */ 1791 /* inform clients mac address has changed */
1770 rlb_req_update_slave_clients(bond, bond->curr_active_slave); 1792 rlb_req_update_slave_clients(bond, bond->curr_active_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69aff72c8957..d3a67896d435 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2126 */ 2126 */
2127static void bond_arp_send(struct net_device *slave_dev, int arp_op, 2127static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2128 __be32 dest_ip, __be32 src_ip, 2128 __be32 dest_ip, __be32 src_ip,
2129 struct bond_vlan_tag *inner, 2129 struct bond_vlan_tag *tags)
2130 struct bond_vlan_tag *outer)
2131{ 2130{
2132 struct sk_buff *skb; 2131 struct sk_buff *skb;
2132 int i;
2133 2133
2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", 2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
2135 arp_op, slave_dev->name, &dest_ip, &src_ip); 2135 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2141 net_err_ratelimited("ARP packet allocation failed\n"); 2141 net_err_ratelimited("ARP packet allocation failed\n");
2142 return; 2142 return;
2143 } 2143 }
2144 if (outer->vlan_id) {
2145 if (inner->vlan_id) {
2146 pr_debug("inner tag: proto %X vid %X\n",
2147 ntohs(inner->vlan_proto), inner->vlan_id);
2148 skb = __vlan_put_tag(skb, inner->vlan_proto,
2149 inner->vlan_id);
2150 if (!skb) {
2151 net_err_ratelimited("failed to insert inner VLAN tag\n");
2152 return;
2153 }
2154 }
2155 2144
2156 pr_debug("outer reg: proto %X vid %X\n", 2145 /* Go through all the tags backwards and add them to the packet */
2157 ntohs(outer->vlan_proto), outer->vlan_id); 2146 for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
2158 skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); 2147 if (!tags[i].vlan_id)
2148 continue;
2149
2150 pr_debug("inner tag: proto %X vid %X\n",
2151 ntohs(tags[i].vlan_proto), tags[i].vlan_id);
2152 skb = __vlan_put_tag(skb, tags[i].vlan_proto,
2153 tags[i].vlan_id);
2154 if (!skb) {
2155 net_err_ratelimited("failed to insert inner VLAN tag\n");
2156 return;
2157 }
2158 }
2159 /* Set the outer tag */
2160 if (tags[0].vlan_id) {
2161 pr_debug("outer tag: proto %X vid %X\n",
2162 ntohs(tags[0].vlan_proto), tags[0].vlan_id);
2163 skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
2159 if (!skb) { 2164 if (!skb) {
2160 net_err_ratelimited("failed to insert outer VLAN tag\n"); 2165 net_err_ratelimited("failed to insert outer VLAN tag\n");
2161 return; 2166 return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2164 arp_xmit(skb); 2169 arp_xmit(skb);
2165} 2170}
2166 2171
2172/* Validate the device path between the @start_dev and the @end_dev.
2173 * The path is valid if the @end_dev is reachable through device
2174 * stacking.
2175 * When the path is validated, collect any vlan information in the
2176 * path.
2177 */
2178static bool bond_verify_device_path(struct net_device *start_dev,
2179 struct net_device *end_dev,
2180 struct bond_vlan_tag *tags)
2181{
2182 struct net_device *upper;
2183 struct list_head *iter;
2184 int idx;
2185
2186 if (start_dev == end_dev)
2187 return true;
2188
2189 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2190 if (bond_verify_device_path(upper, end_dev, tags)) {
2191 if (is_vlan_dev(upper)) {
2192 idx = vlan_get_encap_level(upper);
2193 if (idx >= BOND_MAX_VLAN_ENCAP)
2194 return false;
2195
2196 tags[idx].vlan_proto =
2197 vlan_dev_vlan_proto(upper);
2198 tags[idx].vlan_id = vlan_dev_vlan_id(upper);
2199 }
2200 return true;
2201 }
2202 }
2203
2204 return false;
2205}
2167 2206
2168static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2207static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2169{ 2208{
2170 struct net_device *upper, *vlan_upper;
2171 struct list_head *iter, *vlan_iter;
2172 struct rtable *rt; 2209 struct rtable *rt;
2173 struct bond_vlan_tag inner, outer; 2210 struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
2174 __be32 *targets = bond->params.arp_targets, addr; 2211 __be32 *targets = bond->params.arp_targets, addr;
2175 int i; 2212 int i;
2213 bool ret;
2176 2214
2177 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { 2215 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2178 pr_debug("basa: target %pI4\n", &targets[i]); 2216 pr_debug("basa: target %pI4\n", &targets[i]);
2179 inner.vlan_proto = 0; 2217 memset(tags, 0, sizeof(tags));
2180 inner.vlan_id = 0;
2181 outer.vlan_proto = 0;
2182 outer.vlan_id = 0;
2183 2218
2184 /* Find out through which dev should the packet go */ 2219 /* Find out through which dev should the packet go */
2185 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 2220 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2192 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", 2227 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2193 bond->dev->name, 2228 bond->dev->name,
2194 &targets[i]); 2229 &targets[i]);
2195 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); 2230 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2231 0, tags);
2196 continue; 2232 continue;
2197 } 2233 }
2198 2234
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2201 goto found; 2237 goto found;
2202 2238
2203 rcu_read_lock(); 2239 rcu_read_lock();
2204 /* first we search only for vlan devices. for every vlan 2240 ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
2205 * found we verify its upper dev list, searching for the
2206 * rt->dst.dev. If found we save the tag of the vlan and
2207 * proceed to send the packet.
2208 */
2209 netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
2210 vlan_iter) {
2211 if (!is_vlan_dev(vlan_upper))
2212 continue;
2213
2214 if (vlan_upper == rt->dst.dev) {
2215 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2216 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2217 rcu_read_unlock();
2218 goto found;
2219 }
2220 netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
2221 iter) {
2222 if (upper == rt->dst.dev) {
2223 /* If the upper dev is a vlan dev too,
2224 * set the vlan tag to inner tag.
2225 */
2226 if (is_vlan_dev(upper)) {
2227 inner.vlan_proto = vlan_dev_vlan_proto(upper);
2228 inner.vlan_id = vlan_dev_vlan_id(upper);
2229 }
2230 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2231 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2232 rcu_read_unlock();
2233 goto found;
2234 }
2235 }
2236 }
2237
2238 /* if the device we're looking for is not on top of any of
2239 * our upper vlans, then just search for any dev that
2240 * matches, and in case it's a vlan - save the id
2241 */
2242 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
2243 if (upper == rt->dst.dev) {
2244 rcu_read_unlock();
2245 goto found;
2246 }
2247 }
2248 rcu_read_unlock(); 2241 rcu_read_unlock();
2249 2242
2243 if (ret)
2244 goto found;
2245
2250 /* Not our device - skip */ 2246 /* Not our device - skip */
2251 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", 2247 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2252 bond->dev->name, &targets[i], 2248 bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
2259 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); 2255 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2260 ip_rt_put(rt); 2256 ip_rt_put(rt);
2261 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2257 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2262 addr, &inner, &outer); 2258 addr, tags);
2263 } 2259 }
2264} 2260}
2265 2261
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 724e30fa20b9..832070298446 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
125static const struct bond_opt_value bond_intmax_tbl[] = { 125static const struct bond_opt_value bond_intmax_tbl[] = {
126 { "off", 0, BOND_VALFLAG_DEFAULT}, 126 { "off", 0, BOND_VALFLAG_DEFAULT},
127 { "maxval", INT_MAX, BOND_VALFLAG_MAX}, 127 { "maxval", INT_MAX, BOND_VALFLAG_MAX},
128 { NULL, -1, 0}
128}; 129};
129 130
130static const struct bond_opt_value bond_lacp_rate_tbl[] = { 131static const struct bond_opt_value bond_lacp_rate_tbl[] = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b8bdd0acc8f3..00bea320e3b5 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -36,6 +36,7 @@
36 36
37#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" 37#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
38 38
39#define BOND_MAX_VLAN_ENCAP 2
39#define BOND_MAX_ARP_TARGETS 16 40#define BOND_MAX_ARP_TARGETS 16
40 41
41#define BOND_DEFAULT_MIIMON 100 42#define BOND_DEFAULT_MIIMON 100
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 8ab7103d4f44..61ffc12d8fd8 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
14 SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) 14 SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
15 boards like am335x, dm814x, dm813x and dm811x. 15 boards like am335x, dm814x, dm813x and dm811x.
16 16
17config CAN_C_CAN_STRICT_FRAME_ORDERING
18 bool "Force a strict RX CAN frame order (may cause frame loss)"
19 ---help---
20 The RX split buffer prevents packet reordering but can cause packet
21 loss. Only enable this option when you accept to lose CAN frames
22 in favour of getting the received CAN frames in the correct order.
23
24config CAN_C_CAN_PCI 17config CAN_C_CAN_PCI
25 tristate "Generic PCI Bus based C_CAN/D_CAN driver" 18 tristate "Generic PCI Bus based C_CAN/D_CAN driver"
26 depends on PCI 19 depends on PCI
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a2ca820b5373..95e04e2002da 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend)
732static inline void c_can_rx_object_get(struct net_device *dev, 732static inline void c_can_rx_object_get(struct net_device *dev,
733 struct c_can_priv *priv, u32 obj) 733 struct c_can_priv *priv, u32 obj)
734{ 734{
735#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
736 if (obj < C_CAN_MSG_RX_LOW_LAST)
737 c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
738 else
739#endif
740 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); 735 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
741} 736}
742 737
743static inline void c_can_rx_finalize(struct net_device *dev, 738static inline void c_can_rx_finalize(struct net_device *dev,
744 struct c_can_priv *priv, u32 obj) 739 struct c_can_priv *priv, u32 obj)
745{ 740{
746#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
747 if (obj < C_CAN_MSG_RX_LOW_LAST)
748 priv->rxmasked |= BIT(obj - 1);
749 else if (obj == C_CAN_MSG_RX_LOW_LAST) {
750 priv->rxmasked = 0;
751 /* activate all lower message objects */
752 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
753 }
754#endif
755 if (priv->type != BOSCH_D_CAN) 741 if (priv->type != BOSCH_D_CAN)
756 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); 742 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
757} 743}
@@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
799{ 785{
800 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); 786 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
801 787
802#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
803 pend &= ~priv->rxmasked;
804#endif
805 return pend; 788 return pend;
806} 789}
807 790
@@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
814 * has arrived. To work-around this issue, we keep two groups of message 797 * has arrived. To work-around this issue, we keep two groups of message
815 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. 798 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
816 * 799 *
817 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
818 *
819 * To ensure in-order frame reception we use the following
820 * approach while re-activating a message object to receive further
821 * frames:
822 * - if the current message object number is lower than
823 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
824 * the INTPND bit.
825 * - if the current message object number is equal to
826 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
827 * receive message objects.
828 * - if the current message object number is greater than
829 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
830 * only this message object.
831 *
832 * This can cause packet loss!
833 *
834 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
835 *
836 * We clear the newdat bit right away. 800 * We clear the newdat bit right away.
837 * 801 *
838 * This can result in packet reordering when the readout is slow. 802 * This can result in packet reordering when the readout is slow.
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index c540e3d12e3d..564933ae218c 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
551{ 551{
552 struct sja1000_priv *priv; 552 struct sja1000_priv *priv;
553 struct peak_pci_chan *chan; 553 struct peak_pci_chan *chan;
554 struct net_device *dev; 554 struct net_device *dev, *prev_dev;
555 void __iomem *cfg_base, *reg_base; 555 void __iomem *cfg_base, *reg_base;
556 u16 sub_sys_id, icr; 556 u16 sub_sys_id, icr;
557 int i, err, channels; 557 int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
688 writew(0x0, cfg_base + PITA_ICR + 2); 688 writew(0x0, cfg_base + PITA_ICR + 2);
689 689
690 chan = NULL; 690 chan = NULL;
691 for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { 691 for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
692 unregister_sja1000dev(dev);
693 free_sja1000dev(dev);
694 priv = netdev_priv(dev); 692 priv = netdev_priv(dev);
695 chan = priv->priv; 693 chan = priv->priv;
694 prev_dev = chan->prev_dev;
695
696 unregister_sja1000dev(dev);
697 free_sja1000dev(dev);
696 } 698 }
697 699
698 /* free any PCIeC resources too */ 700 /* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
726 728
727 /* Loop over all registered devices */ 729 /* Loop over all registered devices */
728 while (1) { 730 while (1) {
731 struct net_device *prev_dev = chan->prev_dev;
732
729 dev_info(&pdev->dev, "removing device %s\n", dev->name); 733 dev_info(&pdev->dev, "removing device %s\n", dev->name);
730 unregister_sja1000dev(dev); 734 unregister_sja1000dev(dev);
731 free_sja1000dev(dev); 735 free_sja1000dev(dev);
732 dev = chan->prev_dev; 736 dev = prev_dev;
733 737
734 if (!dev) { 738 if (!dev) {
735 /* do that only for first channel */ 739 /* do that only for first channel */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39b26fe28d10..d7401017a3f1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
35source "drivers/net/ethernet/chelsio/Kconfig" 35source "drivers/net/ethernet/chelsio/Kconfig"
36source "drivers/net/ethernet/cirrus/Kconfig" 36source "drivers/net/ethernet/cirrus/Kconfig"
37source "drivers/net/ethernet/cisco/Kconfig" 37source "drivers/net/ethernet/cisco/Kconfig"
38
39config CX_ECAT
40 tristate "Beckhoff CX5020 EtherCAT master support"
41 depends on PCI
42 ---help---
43 Driver for EtherCAT master module located on CCAT FPGA
44 that can be found on Beckhoff CX5020, and possibly other of CX
45 Beckhoff CX series industrial PCs.
46
47 To compile this driver as a module, choose M here. The module
48 will be called ec_bhf.
49
38source "drivers/net/ethernet/davicom/Kconfig" 50source "drivers/net/ethernet/davicom/Kconfig"
39 51
40config DNET 52config DNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 545d0b3b9cb4..35190e36c456 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ 21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ 22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ 23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
24obj-$(CONFIG_CX_ECAT) += ec_bhf.o
24obj-$(CONFIG_DM9000) += davicom/ 25obj-$(CONFIG_DM9000) += davicom/
25obj-$(CONFIG_DNET) += dnet.o 26obj-$(CONFIG_DNET) += dnet.o
26obj-$(CONFIG_NET_VENDOR_DEC) += dec/ 27obj-$(CONFIG_NET_VENDOR_DEC) += dec/
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index d4a187e45369..3eff2fd3997e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o 5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ 6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
7altera_msgdma.o altera_sgdma.o altera_utils.o 7altera_msgdma.o altera_sgdma.o altera_utils.o
8ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 4d1f2fdd5c32..0fb986ba3290 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
37void msgdma_reset(struct altera_tse_private *priv) 37void msgdma_reset(struct altera_tse_private *priv)
38{ 38{
39 int counter; 39 int counter;
40 struct msgdma_csr *txcsr =
41 (struct msgdma_csr *)priv->tx_dma_csr;
42 struct msgdma_csr *rxcsr =
43 (struct msgdma_csr *)priv->rx_dma_csr;
44 40
45 /* Reset Rx mSGDMA */ 41 /* Reset Rx mSGDMA */
46 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 42 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
47 iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); 43 msgdma_csroffs(status));
44 csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
45 msgdma_csroffs(control));
48 46
49 counter = 0; 47 counter = 0;
50 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 48 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
51 if (tse_bit_is_clear(&rxcsr->status, 49 if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
52 MSGDMA_CSR_STAT_RESETTING)) 50 MSGDMA_CSR_STAT_RESETTING))
53 break; 51 break;
54 udelay(1); 52 udelay(1);
@@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
59 "TSE Rx mSGDMA resetting bit never cleared!\n"); 57 "TSE Rx mSGDMA resetting bit never cleared!\n");
60 58
61 /* clear all status bits */ 59 /* clear all status bits */
62 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 60 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
63 61
64 /* Reset Tx mSGDMA */ 62 /* Reset Tx mSGDMA */
65 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 63 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
66 iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); 64 msgdma_csroffs(status));
65
66 csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
67 msgdma_csroffs(control));
67 68
68 counter = 0; 69 counter = 0;
69 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 70 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
70 if (tse_bit_is_clear(&txcsr->status, 71 if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
71 MSGDMA_CSR_STAT_RESETTING)) 72 MSGDMA_CSR_STAT_RESETTING))
72 break; 73 break;
73 udelay(1); 74 udelay(1);
@@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
78 "TSE Tx mSGDMA resetting bit never cleared!\n"); 79 "TSE Tx mSGDMA resetting bit never cleared!\n");
79 80
80 /* clear all status bits */ 81 /* clear all status bits */
81 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 82 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
82} 83}
83 84
84void msgdma_disable_rxirq(struct altera_tse_private *priv) 85void msgdma_disable_rxirq(struct altera_tse_private *priv)
85{ 86{
86 struct msgdma_csr *csr = priv->rx_dma_csr; 87 tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
87 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 88 MSGDMA_CSR_CTL_GLOBAL_INTR);
88} 89}
89 90
90void msgdma_enable_rxirq(struct altera_tse_private *priv) 91void msgdma_enable_rxirq(struct altera_tse_private *priv)
91{ 92{
92 struct msgdma_csr *csr = priv->rx_dma_csr; 93 tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
93 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 94 MSGDMA_CSR_CTL_GLOBAL_INTR);
94} 95}
95 96
96void msgdma_disable_txirq(struct altera_tse_private *priv) 97void msgdma_disable_txirq(struct altera_tse_private *priv)
97{ 98{
98 struct msgdma_csr *csr = priv->tx_dma_csr; 99 tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
99 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 100 MSGDMA_CSR_CTL_GLOBAL_INTR);
100} 101}
101 102
102void msgdma_enable_txirq(struct altera_tse_private *priv) 103void msgdma_enable_txirq(struct altera_tse_private *priv)
103{ 104{
104 struct msgdma_csr *csr = priv->tx_dma_csr; 105 tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
105 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 106 MSGDMA_CSR_CTL_GLOBAL_INTR);
106} 107}
107 108
108void msgdma_clear_rxirq(struct altera_tse_private *priv) 109void msgdma_clear_rxirq(struct altera_tse_private *priv)
109{ 110{
110 struct msgdma_csr *csr = priv->rx_dma_csr; 111 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
111 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
112} 112}
113 113
114void msgdma_clear_txirq(struct altera_tse_private *priv) 114void msgdma_clear_txirq(struct altera_tse_private *priv)
115{ 115{
116 struct msgdma_csr *csr = priv->tx_dma_csr; 116 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
117 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
118} 117}
119 118
120/* return 0 to indicate transmit is pending */ 119/* return 0 to indicate transmit is pending */
121int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 120int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
122{ 121{
123 struct msgdma_extended_desc *desc = priv->tx_dma_desc; 122 csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
124 123 msgdma_descroffs(read_addr_lo));
125 iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); 124 csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
126 iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); 125 msgdma_descroffs(read_addr_hi));
127 iowrite32(0, &desc->write_addr_lo); 126 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
128 iowrite32(0, &desc->write_addr_hi); 127 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
129 iowrite32(buffer->len, &desc->len); 128 csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
130 iowrite32(0, &desc->burst_seq_num); 129 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
131 iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); 130 csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
132 iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); 131 msgdma_descroffs(stride));
132 csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
133 msgdma_descroffs(control));
133 return 0; 134 return 0;
134} 135}
135 136
@@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
138 u32 ready = 0; 139 u32 ready = 0;
139 u32 inuse; 140 u32 inuse;
140 u32 status; 141 u32 status;
141 struct msgdma_csr *txcsr =
142 (struct msgdma_csr *)priv->tx_dma_csr;
143 142
144 /* Get number of sent descriptors */ 143 /* Get number of sent descriptors */
145 inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; 144 inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
145 & 0xffff;
146 146
147 if (inuse) { /* Tx FIFO is not empty */ 147 if (inuse) { /* Tx FIFO is not empty */
148 ready = priv->tx_prod - priv->tx_cons - inuse - 1; 148 ready = priv->tx_prod - priv->tx_cons - inuse - 1;
149 } else { 149 } else {
150 /* Check for buffered last packet */ 150 /* Check for buffered last packet */
151 status = ioread32(&txcsr->status); 151 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
152 if (status & MSGDMA_CSR_STAT_BUSY) 152 if (status & MSGDMA_CSR_STAT_BUSY)
153 ready = priv->tx_prod - priv->tx_cons - 1; 153 ready = priv->tx_prod - priv->tx_cons - 1;
154 else 154 else
@@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
162void msgdma_add_rx_desc(struct altera_tse_private *priv, 162void msgdma_add_rx_desc(struct altera_tse_private *priv,
163 struct tse_buffer *rxbuffer) 163 struct tse_buffer *rxbuffer)
164{ 164{
165 struct msgdma_extended_desc *desc = priv->rx_dma_desc;
166 u32 len = priv->rx_dma_buf_sz; 165 u32 len = priv->rx_dma_buf_sz;
167 dma_addr_t dma_addr = rxbuffer->dma_addr; 166 dma_addr_t dma_addr = rxbuffer->dma_addr;
168 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP 167 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
172 | MSGDMA_DESC_CTL_TR_ERR_IRQ 171 | MSGDMA_DESC_CTL_TR_ERR_IRQ
173 | MSGDMA_DESC_CTL_GO); 172 | MSGDMA_DESC_CTL_GO);
174 173
175 iowrite32(0, &desc->read_addr_lo); 174 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
176 iowrite32(0, &desc->read_addr_hi); 175 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
177 iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); 176 csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
178 iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); 177 msgdma_descroffs(write_addr_lo));
179 iowrite32(len, &desc->len); 178 csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
180 iowrite32(0, &desc->burst_seq_num); 179 msgdma_descroffs(write_addr_hi));
181 iowrite32(0x00010001, &desc->stride); 180 csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
182 iowrite32(control, &desc->control); 181 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
182 csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
183 csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
183} 184}
184 185
185/* status is returned on upper 16 bits, 186/* status is returned on upper 16 bits,
@@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
190 u32 rxstatus = 0; 191 u32 rxstatus = 0;
191 u32 pktlength; 192 u32 pktlength;
192 u32 pktstatus; 193 u32 pktstatus;
193 struct msgdma_csr *rxcsr = 194
194 (struct msgdma_csr *)priv->rx_dma_csr; 195 if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
195 struct msgdma_response *rxresp = 196 & 0xffff) {
196 (struct msgdma_response *)priv->rx_dma_resp; 197 pktlength = csrrd32(priv->rx_dma_resp,
197 198 msgdma_respoffs(bytes_transferred));
198 if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { 199 pktstatus = csrrd32(priv->rx_dma_resp,
199 pktlength = ioread32(&rxresp->bytes_transferred); 200 msgdma_respoffs(status));
200 pktstatus = ioread32(&rxresp->status);
201 rxstatus = pktstatus; 201 rxstatus = pktstatus;
202 rxstatus = rxstatus << 16; 202 rxstatus = rxstatus << 16;
203 rxstatus |= (pktlength & 0xffff); 203 rxstatus |= (pktlength & 0xffff);
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index d7b59ba4019c..e335626e1b6b 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -17,15 +17,6 @@
17#ifndef __ALTERA_MSGDMAHW_H__ 17#ifndef __ALTERA_MSGDMAHW_H__
18#define __ALTERA_MSGDMAHW_H__ 18#define __ALTERA_MSGDMAHW_H__
19 19
20/* mSGDMA standard descriptor format
21 */
22struct msgdma_desc {
23 u32 read_addr; /* data buffer source address */
24 u32 write_addr; /* data buffer destination address */
25 u32 len; /* the number of bytes to transfer per descriptor */
26 u32 control; /* characteristics of the transfer */
27};
28
29/* mSGDMA extended descriptor format 20/* mSGDMA extended descriptor format
30 */ 21 */
31struct msgdma_extended_desc { 22struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
159 u32 status; 150 u32 status;
160}; 151};
161 152
153#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
154#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
155#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
156
162/* mSGDMA response register bit definitions 157/* mSGDMA response register bit definitions
163 */ 158 */
164#define MSGDMA_RESP_EARLY_TERM BIT(8) 159#define MSGDMA_RESP_EARLY_TERM BIT(8)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 9ce8630692b6..99cc56f451cf 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,8 +20,8 @@
20#include "altera_sgdmahw.h" 20#include "altera_sgdmahw.h"
21#include "altera_sgdma.h" 21#include "altera_sgdma.h"
22 22
23static void sgdma_setup_descrip(struct sgdma_descrip *desc, 23static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
24 struct sgdma_descrip *ndesc, 24 struct sgdma_descrip __iomem *ndesc,
25 dma_addr_t ndesc_phys, 25 dma_addr_t ndesc_phys,
26 dma_addr_t raddr, 26 dma_addr_t raddr,
27 dma_addr_t waddr, 27 dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
31 int wfixed); 31 int wfixed);
32 32
33static int sgdma_async_write(struct altera_tse_private *priv, 33static int sgdma_async_write(struct altera_tse_private *priv,
34 struct sgdma_descrip *desc); 34 struct sgdma_descrip __iomem *desc);
35 35
36static int sgdma_async_read(struct altera_tse_private *priv); 36static int sgdma_async_read(struct altera_tse_private *priv);
37 37
38static dma_addr_t 38static dma_addr_t
39sgdma_txphysaddr(struct altera_tse_private *priv, 39sgdma_txphysaddr(struct altera_tse_private *priv,
40 struct sgdma_descrip *desc); 40 struct sgdma_descrip __iomem *desc);
41 41
42static dma_addr_t 42static dma_addr_t
43sgdma_rxphysaddr(struct altera_tse_private *priv, 43sgdma_rxphysaddr(struct altera_tse_private *priv,
44 struct sgdma_descrip *desc); 44 struct sgdma_descrip __iomem *desc);
45 45
46static int sgdma_txbusy(struct altera_tse_private *priv); 46static int sgdma_txbusy(struct altera_tse_private *priv);
47 47
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
79 priv->rxdescphys = (dma_addr_t) 0; 79 priv->rxdescphys = (dma_addr_t) 0;
80 priv->txdescphys = (dma_addr_t) 0; 80 priv->txdescphys = (dma_addr_t) 0;
81 81
82 priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, 82 priv->rxdescphys = dma_map_single(priv->device,
83 (void __force *)priv->rx_dma_desc,
83 priv->rxdescmem, DMA_BIDIRECTIONAL); 84 priv->rxdescmem, DMA_BIDIRECTIONAL);
84 85
85 if (dma_mapping_error(priv->device, priv->rxdescphys)) { 86 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
88 return -EINVAL; 89 return -EINVAL;
89 } 90 }
90 91
91 priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, 92 priv->txdescphys = dma_map_single(priv->device,
93 (void __force *)priv->tx_dma_desc,
92 priv->txdescmem, DMA_TO_DEVICE); 94 priv->txdescmem, DMA_TO_DEVICE);
93 95
94 if (dma_mapping_error(priv->device, priv->txdescphys)) { 96 if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
98 } 100 }
99 101
100 /* Initialize descriptor memory to all 0's, sync memory to cache */ 102 /* Initialize descriptor memory to all 0's, sync memory to cache */
101 memset(priv->tx_dma_desc, 0, priv->txdescmem); 103 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
102 memset(priv->rx_dma_desc, 0, priv->rxdescmem); 104 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
103 105
104 dma_sync_single_for_device(priv->device, priv->txdescphys, 106 dma_sync_single_for_device(priv->device, priv->txdescphys,
105 priv->txdescmem, DMA_TO_DEVICE); 107 priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
126 */ 128 */
127void sgdma_reset(struct altera_tse_private *priv) 129void sgdma_reset(struct altera_tse_private *priv)
128{ 130{
129 u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
130 u32 txdescriplen = priv->txdescmem;
131 u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
132 u32 rxdescriplen = priv->rxdescmem;
133 struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
134 struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
135
136 /* Initialize descriptor memory to 0 */ 131 /* Initialize descriptor memory to 0 */
137 memset(ptxdescripmem, 0, txdescriplen); 132 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
138 memset(prxdescripmem, 0, rxdescriplen); 133 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
139 134
140 iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); 135 csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
141 iowrite32(0, &ptxsgdma->control); 136 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
142 137
143 iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); 138 csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
144 iowrite32(0, &prxsgdma->control); 139 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
145} 140}
146 141
147/* For SGDMA, interrupts remain enabled after initially enabling, 142/* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
167 162
168void sgdma_clear_rxirq(struct altera_tse_private *priv) 163void sgdma_clear_rxirq(struct altera_tse_private *priv)
169{ 164{
170 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 165 tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
171 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 166 SGDMA_CTRLREG_CLRINT);
172} 167}
173 168
174void sgdma_clear_txirq(struct altera_tse_private *priv) 169void sgdma_clear_txirq(struct altera_tse_private *priv)
175{ 170{
176 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; 171 tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
177 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 172 SGDMA_CTRLREG_CLRINT);
178} 173}
179 174
180/* transmits buffer through SGDMA. Returns number of buffers 175/* transmits buffer through SGDMA. Returns number of buffers
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
184 */ 179 */
185int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 180int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
186{ 181{
187 int pktstx = 0; 182 struct sgdma_descrip __iomem *descbase =
188 struct sgdma_descrip *descbase = 183 (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
189 (struct sgdma_descrip *)priv->tx_dma_desc;
190 184
191 struct sgdma_descrip *cdesc = &descbase[0]; 185 struct sgdma_descrip __iomem *cdesc = &descbase[0];
192 struct sgdma_descrip *ndesc = &descbase[1]; 186 struct sgdma_descrip __iomem *ndesc = &descbase[1];
193 187
194 /* wait 'til the tx sgdma is ready for the next transmit request */ 188 /* wait 'til the tx sgdma is ready for the next transmit request */
195 if (sgdma_txbusy(priv)) 189 if (sgdma_txbusy(priv))
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
205 0, /* read fixed */ 199 0, /* read fixed */
206 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ 200 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
207 201
208 pktstx = sgdma_async_write(priv, cdesc); 202 sgdma_async_write(priv, cdesc);
209 203
210 /* enqueue the request to the pending transmit queue */ 204 /* enqueue the request to the pending transmit queue */
211 queue_tx(priv, buffer); 205 queue_tx(priv, buffer);
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
219u32 sgdma_tx_completions(struct altera_tse_private *priv) 213u32 sgdma_tx_completions(struct altera_tse_private *priv)
220{ 214{
221 u32 ready = 0; 215 u32 ready = 0;
222 struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
223 216
224 if (!sgdma_txbusy(priv) && 217 if (!sgdma_txbusy(priv) &&
225 ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && 218 ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
219 & SGDMA_CONTROL_HW_OWNED) == 0) &&
226 (dequeue_tx(priv))) { 220 (dequeue_tx(priv))) {
227 ready = 1; 221 ready = 1;
228 } 222 }
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
246 */ 240 */
247u32 sgdma_rx_status(struct altera_tse_private *priv) 241u32 sgdma_rx_status(struct altera_tse_private *priv)
248{ 242{
249 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 243 struct sgdma_descrip __iomem *base =
250 struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; 244 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
251 struct sgdma_descrip *desc = NULL; 245 struct sgdma_descrip __iomem *desc = NULL;
252 int pktsrx;
253 unsigned int rxstatus = 0;
254 unsigned int pktlength = 0;
255 unsigned int pktstatus = 0;
256 struct tse_buffer *rxbuffer = NULL; 246 struct tse_buffer *rxbuffer = NULL;
247 unsigned int rxstatus = 0;
257 248
258 u32 sts = ioread32(&csr->status); 249 u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
259 250
260 desc = &base[0]; 251 desc = &base[0];
261 if (sts & SGDMA_STSREG_EOP) { 252 if (sts & SGDMA_STSREG_EOP) {
253 unsigned int pktlength = 0;
254 unsigned int pktstatus = 0;
262 dma_sync_single_for_cpu(priv->device, 255 dma_sync_single_for_cpu(priv->device,
263 priv->rxdescphys, 256 priv->rxdescphys,
264 priv->sgdmadesclen, 257 priv->sgdmadesclen,
265 DMA_FROM_DEVICE); 258 DMA_FROM_DEVICE);
266 259
267 pktlength = desc->bytes_xferred; 260 pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
268 pktstatus = desc->status & 0x3f; 261 pktstatus = csrrd8(desc, sgdma_descroffs(status));
269 rxstatus = pktstatus; 262 rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
270 rxstatus = rxstatus << 16; 263 rxstatus = rxstatus << 16;
271 rxstatus |= (pktlength & 0xffff); 264 rxstatus |= (pktlength & 0xffff);
272 265
273 if (rxstatus) { 266 if (rxstatus) {
274 desc->status = 0; 267 csrwr8(0, desc, sgdma_descroffs(status));
275 268
276 rxbuffer = dequeue_rx(priv); 269 rxbuffer = dequeue_rx(priv);
277 if (rxbuffer == NULL) 270 if (rxbuffer == NULL)
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
279 "sgdma rx and rx queue empty!\n"); 272 "sgdma rx and rx queue empty!\n");
280 273
281 /* Clear control */ 274 /* Clear control */
282 iowrite32(0, &csr->control); 275 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
283 /* clear status */ 276 /* clear status */
284 iowrite32(0xf, &csr->status); 277 csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
285 278
286 /* kick the rx sgdma after reaping this descriptor */ 279 /* kick the rx sgdma after reaping this descriptor */
287 pktsrx = sgdma_async_read(priv); 280 sgdma_async_read(priv);
288 281
289 } else { 282 } else {
290 /* If the SGDMA indicated an end of packet on recv, 283 /* If the SGDMA indicated an end of packet on recv,
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
298 */ 291 */
299 netdev_err(priv->dev, 292 netdev_err(priv->dev,
300 "SGDMA RX Error Info: %x, %x, %x\n", 293 "SGDMA RX Error Info: %x, %x, %x\n",
301 sts, desc->status, rxstatus); 294 sts, csrrd8(desc, sgdma_descroffs(status)),
295 rxstatus);
302 } 296 }
303 } else if (sts == 0) { 297 } else if (sts == 0) {
304 pktsrx = sgdma_async_read(priv); 298 sgdma_async_read(priv);
305 } 299 }
306 300
307 return rxstatus; 301 return rxstatus;
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
309 303
310 304
311/* Private functions */ 305/* Private functions */
312static void sgdma_setup_descrip(struct sgdma_descrip *desc, 306static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
313 struct sgdma_descrip *ndesc, 307 struct sgdma_descrip __iomem *ndesc,
314 dma_addr_t ndesc_phys, 308 dma_addr_t ndesc_phys,
315 dma_addr_t raddr, 309 dma_addr_t raddr,
316 dma_addr_t waddr, 310 dma_addr_t waddr,
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
320 int wfixed) 314 int wfixed)
321{ 315{
322 /* Clear the next descriptor as not owned by hardware */ 316 /* Clear the next descriptor as not owned by hardware */
323 u32 ctrl = ndesc->control; 317
318 u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
324 ctrl &= ~SGDMA_CONTROL_HW_OWNED; 319 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
325 ndesc->control = ctrl; 320 csrwr8(ctrl, ndesc, sgdma_descroffs(control));
326 321
327 ctrl = 0;
328 ctrl = SGDMA_CONTROL_HW_OWNED; 322 ctrl = SGDMA_CONTROL_HW_OWNED;
329 ctrl |= generate_eop; 323 ctrl |= generate_eop;
330 ctrl |= rfixed; 324 ctrl |= rfixed;
331 ctrl |= wfixed; 325 ctrl |= wfixed;
332 326
333 /* Channel is implicitly zero, initialized to 0 by default */ 327 /* Channel is implicitly zero, initialized to 0 by default */
334 328 csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
335 desc->raddr = raddr; 329 csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
336 desc->waddr = waddr; 330
337 desc->next = lower_32_bits(ndesc_phys); 331 csrwr32(0, desc, sgdma_descroffs(pad1));
338 desc->control = ctrl; 332 csrwr32(0, desc, sgdma_descroffs(pad2));
339 desc->status = 0; 333 csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
340 desc->rburst = 0; 334
341 desc->wburst = 0; 335 csrwr8(ctrl, desc, sgdma_descroffs(control));
342 desc->bytes = length; 336 csrwr8(0, desc, sgdma_descroffs(status));
343 desc->bytes_xferred = 0; 337 csrwr8(0, desc, sgdma_descroffs(wburst));
338 csrwr8(0, desc, sgdma_descroffs(rburst));
339 csrwr16(length, desc, sgdma_descroffs(bytes));
340 csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
344} 341}
345 342
346/* If hardware is busy, don't restart async read. 343/* If hardware is busy, don't restart async read.
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
351 */ 348 */
352static int sgdma_async_read(struct altera_tse_private *priv) 349static int sgdma_async_read(struct altera_tse_private *priv)
353{ 350{
354 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 351 struct sgdma_descrip __iomem *descbase =
355 struct sgdma_descrip *descbase = 352 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
356 (struct sgdma_descrip *)priv->rx_dma_desc;
357 353
358 struct sgdma_descrip *cdesc = &descbase[0]; 354 struct sgdma_descrip __iomem *cdesc = &descbase[0];
359 struct sgdma_descrip *ndesc = &descbase[1]; 355 struct sgdma_descrip __iomem *ndesc = &descbase[1];
360 356
361 struct tse_buffer *rxbuffer = NULL; 357 struct tse_buffer *rxbuffer = NULL;
362 358
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
382 priv->sgdmadesclen, 378 priv->sgdmadesclen,
383 DMA_TO_DEVICE); 379 DMA_TO_DEVICE);
384 380
385 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), 381 csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
386 &csr->next_descrip); 382 priv->rx_dma_csr,
383 sgdma_csroffs(next_descrip));
387 384
388 iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), 385 csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
389 &csr->control); 386 priv->rx_dma_csr,
387 sgdma_csroffs(control));
390 388
391 return 1; 389 return 1;
392 } 390 }
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
395} 393}
396 394
397static int sgdma_async_write(struct altera_tse_private *priv, 395static int sgdma_async_write(struct altera_tse_private *priv,
398 struct sgdma_descrip *desc) 396 struct sgdma_descrip __iomem *desc)
399{ 397{
400 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
401
402 if (sgdma_txbusy(priv)) 398 if (sgdma_txbusy(priv))
403 return 0; 399 return 0;
404 400
405 /* clear control and status */ 401 /* clear control and status */
406 iowrite32(0, &csr->control); 402 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
407 iowrite32(0x1f, &csr->status); 403 csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
408 404
409 dma_sync_single_for_device(priv->device, priv->txdescphys, 405 dma_sync_single_for_device(priv->device, priv->txdescphys,
410 priv->sgdmadesclen, DMA_TO_DEVICE); 406 priv->sgdmadesclen, DMA_TO_DEVICE);
411 407
412 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), 408 csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
413 &csr->next_descrip); 409 priv->tx_dma_csr,
410 sgdma_csroffs(next_descrip));
414 411
415 iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), 412 csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
416 &csr->control); 413 priv->tx_dma_csr,
414 sgdma_csroffs(control));
417 415
418 return 1; 416 return 1;
419} 417}
420 418
421static dma_addr_t 419static dma_addr_t
422sgdma_txphysaddr(struct altera_tse_private *priv, 420sgdma_txphysaddr(struct altera_tse_private *priv,
423 struct sgdma_descrip *desc) 421 struct sgdma_descrip __iomem *desc)
424{ 422{
425 dma_addr_t paddr = priv->txdescmem_busaddr; 423 dma_addr_t paddr = priv->txdescmem_busaddr;
426 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; 424 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
429 427
430static dma_addr_t 428static dma_addr_t
431sgdma_rxphysaddr(struct altera_tse_private *priv, 429sgdma_rxphysaddr(struct altera_tse_private *priv,
432 struct sgdma_descrip *desc) 430 struct sgdma_descrip __iomem *desc)
433{ 431{
434 dma_addr_t paddr = priv->rxdescmem_busaddr; 432 dma_addr_t paddr = priv->rxdescmem_busaddr;
435 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; 433 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
518 */ 516 */
519static int sgdma_rxbusy(struct altera_tse_private *priv) 517static int sgdma_rxbusy(struct altera_tse_private *priv)
520{ 518{
521 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 519 return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
522 return ioread32(&csr->status) & SGDMA_STSREG_BUSY; 520 & SGDMA_STSREG_BUSY;
523} 521}
524 522
525/* waits for the tx sgdma to finish it's current operation, returns 0 523/* waits for the tx sgdma to finish it's current operation, returns 0
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
528static int sgdma_txbusy(struct altera_tse_private *priv) 526static int sgdma_txbusy(struct altera_tse_private *priv)
529{ 527{
530 int delay = 0; 528 int delay = 0;
531 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
532 529
533 /* if DMA is busy, wait for current transactino to finish */ 530 /* if DMA is busy, wait for current transactino to finish */
534 while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) 531 while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
532 & SGDMA_STSREG_BUSY) && (delay++ < 100))
535 udelay(1); 533 udelay(1);
536 534
537 if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { 535 if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
536 & SGDMA_STSREG_BUSY) {
538 netdev_err(priv->dev, "timeout waiting for tx dma\n"); 537 netdev_err(priv->dev, "timeout waiting for tx dma\n");
539 return 1; 538 return 1;
540 } 539 }
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index ba3334f35383..85bc33b218d9 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -19,16 +19,16 @@
19 19
20/* SGDMA descriptor structure */ 20/* SGDMA descriptor structure */
21struct sgdma_descrip { 21struct sgdma_descrip {
22 unsigned int raddr; /* address of data to be read */ 22 u32 raddr; /* address of data to be read */
23 unsigned int pad1; 23 u32 pad1;
24 unsigned int waddr; 24 u32 waddr;
25 unsigned int pad2; 25 u32 pad2;
26 unsigned int next; 26 u32 next;
27 unsigned int pad3; 27 u32 pad3;
28 unsigned short bytes; 28 u16 bytes;
29 unsigned char rburst; 29 u8 rburst;
30 unsigned char wburst; 30 u8 wburst;
31 unsigned short bytes_xferred; /* 16 bits, bytes xferred */ 31 u16 bytes_xferred; /* 16 bits, bytes xferred */
32 32
33 /* bit 0: error 33 /* bit 0: error
34 * bit 1: length error 34 * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
39 * bit 6: reserved 39 * bit 6: reserved
40 * bit 7: status eop for recv case 40 * bit 7: status eop for recv case
41 */ 41 */
42 unsigned char status; 42 u8 status;
43 43
44 /* bit 0: eop 44 /* bit 0: eop
45 * bit 1: read_fixed 45 * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
47 * bits 3,4,5,6: Channel (always 0) 47 * bits 3,4,5,6: Channel (always 0)
48 * bit 7: hardware owned 48 * bit 7: hardware owned
49 */ 49 */
50 unsigned char control; 50 u8 control;
51} __packed; 51} __packed;
52 52
53 53
@@ -101,6 +101,8 @@ struct sgdma_csr {
101 u32 pad3[3]; 101 u32 pad3[3];
102}; 102};
103 103
104#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
105#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
104 106
105#define SGDMA_STSREG_ERR BIT(0) /* Error */ 107#define SGDMA_STSREG_ERR BIT(0) /* Error */
106#define SGDMA_STSREG_EOP BIT(1) /* EOP */ 108#define SGDMA_STSREG_EOP BIT(1) /* EOP */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 465c4aabebbd..2adb24d4523c 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -357,6 +357,8 @@ struct altera_tse_mac {
357 u32 reserved5[42]; 357 u32 reserved5[42];
358}; 358};
359 359
360#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
361
360/* Transmit and Receive Command Registers Bit Definitions 362/* Transmit and Receive Command Registers Bit Definitions
361 */ 363 */
362#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) 364#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
487 */ 489 */
488void altera_tse_set_ethtool_ops(struct net_device *); 490void altera_tse_set_ethtool_ops(struct net_device *);
489 491
492static inline
493u32 csrrd32(void __iomem *mac, size_t offs)
494{
495 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
496 return readl(paddr);
497}
498
499static inline
500u16 csrrd16(void __iomem *mac, size_t offs)
501{
502 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
503 return readw(paddr);
504}
505
506static inline
507u8 csrrd8(void __iomem *mac, size_t offs)
508{
509 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
510 return readb(paddr);
511}
512
513static inline
514void csrwr32(u32 val, void __iomem *mac, size_t offs)
515{
516 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
517
518 writel(val, paddr);
519}
520
521static inline
522void csrwr16(u16 val, void __iomem *mac, size_t offs)
523{
524 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
525
526 writew(val, paddr);
527}
528
529static inline
530void csrwr8(u8 val, void __iomem *mac, size_t offs)
531{
532 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
533
534 writeb(val, paddr);
535}
536
490#endif /* __ALTERA_TSE_H__ */ 537#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 76133caffa78..54c25eff7952 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
96 u64 *buf) 96 u64 *buf)
97{ 97{
98 struct altera_tse_private *priv = netdev_priv(dev); 98 struct altera_tse_private *priv = netdev_priv(dev);
99 struct altera_tse_mac *mac = priv->mac_dev;
100 u64 ext; 99 u64 ext;
101 100
102 buf[0] = ioread32(&mac->frames_transmitted_ok); 101 buf[0] = csrrd32(priv->mac_dev,
103 buf[1] = ioread32(&mac->frames_received_ok); 102 tse_csroffs(frames_transmitted_ok));
104 buf[2] = ioread32(&mac->frames_check_sequence_errors); 103 buf[1] = csrrd32(priv->mac_dev,
105 buf[3] = ioread32(&mac->alignment_errors); 104 tse_csroffs(frames_received_ok));
105 buf[2] = csrrd32(priv->mac_dev,
106 tse_csroffs(frames_check_sequence_errors));
107 buf[3] = csrrd32(priv->mac_dev,
108 tse_csroffs(alignment_errors));
106 109
107 /* Extended aOctetsTransmittedOK counter */ 110 /* Extended aOctetsTransmittedOK counter */
108 ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; 111 ext = (u64) csrrd32(priv->mac_dev,
109 ext |= ioread32(&mac->octets_transmitted_ok); 112 tse_csroffs(msb_octets_transmitted_ok)) << 32;
113
114 ext |= csrrd32(priv->mac_dev,
115 tse_csroffs(octets_transmitted_ok));
110 buf[4] = ext; 116 buf[4] = ext;
111 117
112 /* Extended aOctetsReceivedOK counter */ 118 /* Extended aOctetsReceivedOK counter */
113 ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; 119 ext = (u64) csrrd32(priv->mac_dev,
114 ext |= ioread32(&mac->octets_received_ok); 120 tse_csroffs(msb_octets_received_ok)) << 32;
121
122 ext |= csrrd32(priv->mac_dev,
123 tse_csroffs(octets_received_ok));
115 buf[5] = ext; 124 buf[5] = ext;
116 125
117 buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); 126 buf[6] = csrrd32(priv->mac_dev,
118 buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); 127 tse_csroffs(tx_pause_mac_ctrl_frames));
119 buf[8] = ioread32(&mac->if_in_errors); 128 buf[7] = csrrd32(priv->mac_dev,
120 buf[9] = ioread32(&mac->if_out_errors); 129 tse_csroffs(rx_pause_mac_ctrl_frames));
121 buf[10] = ioread32(&mac->if_in_ucast_pkts); 130 buf[8] = csrrd32(priv->mac_dev,
122 buf[11] = ioread32(&mac->if_in_multicast_pkts); 131 tse_csroffs(if_in_errors));
123 buf[12] = ioread32(&mac->if_in_broadcast_pkts); 132 buf[9] = csrrd32(priv->mac_dev,
124 buf[13] = ioread32(&mac->if_out_discards); 133 tse_csroffs(if_out_errors));
125 buf[14] = ioread32(&mac->if_out_ucast_pkts); 134 buf[10] = csrrd32(priv->mac_dev,
126 buf[15] = ioread32(&mac->if_out_multicast_pkts); 135 tse_csroffs(if_in_ucast_pkts));
127 buf[16] = ioread32(&mac->if_out_broadcast_pkts); 136 buf[11] = csrrd32(priv->mac_dev,
128 buf[17] = ioread32(&mac->ether_stats_drop_events); 137 tse_csroffs(if_in_multicast_pkts));
138 buf[12] = csrrd32(priv->mac_dev,
139 tse_csroffs(if_in_broadcast_pkts));
140 buf[13] = csrrd32(priv->mac_dev,
141 tse_csroffs(if_out_discards));
142 buf[14] = csrrd32(priv->mac_dev,
143 tse_csroffs(if_out_ucast_pkts));
144 buf[15] = csrrd32(priv->mac_dev,
145 tse_csroffs(if_out_multicast_pkts));
146 buf[16] = csrrd32(priv->mac_dev,
147 tse_csroffs(if_out_broadcast_pkts));
148 buf[17] = csrrd32(priv->mac_dev,
149 tse_csroffs(ether_stats_drop_events));
129 150
130 /* Extended etherStatsOctets counter */ 151 /* Extended etherStatsOctets counter */
131 ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; 152 ext = (u64) csrrd32(priv->mac_dev,
132 ext |= ioread32(&mac->ether_stats_octets); 153 tse_csroffs(msb_ether_stats_octets)) << 32;
154 ext |= csrrd32(priv->mac_dev,
155 tse_csroffs(ether_stats_octets));
133 buf[18] = ext; 156 buf[18] = ext;
134 157
135 buf[19] = ioread32(&mac->ether_stats_pkts); 158 buf[19] = csrrd32(priv->mac_dev,
136 buf[20] = ioread32(&mac->ether_stats_undersize_pkts); 159 tse_csroffs(ether_stats_pkts));
137 buf[21] = ioread32(&mac->ether_stats_oversize_pkts); 160 buf[20] = csrrd32(priv->mac_dev,
138 buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); 161 tse_csroffs(ether_stats_undersize_pkts));
139 buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); 162 buf[21] = csrrd32(priv->mac_dev,
140 buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); 163 tse_csroffs(ether_stats_oversize_pkts));
141 buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); 164 buf[22] = csrrd32(priv->mac_dev,
142 buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); 165 tse_csroffs(ether_stats_pkts_64_octets));
143 buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); 166 buf[23] = csrrd32(priv->mac_dev,
144 buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); 167 tse_csroffs(ether_stats_pkts_65to127_octets));
145 buf[29] = ioread32(&mac->ether_stats_jabbers); 168 buf[24] = csrrd32(priv->mac_dev,
146 buf[30] = ioread32(&mac->ether_stats_fragments); 169 tse_csroffs(ether_stats_pkts_128to255_octets));
170 buf[25] = csrrd32(priv->mac_dev,
171 tse_csroffs(ether_stats_pkts_256to511_octets));
172 buf[26] = csrrd32(priv->mac_dev,
173 tse_csroffs(ether_stats_pkts_512to1023_octets));
174 buf[27] = csrrd32(priv->mac_dev,
175 tse_csroffs(ether_stats_pkts_1024to1518_octets));
176 buf[28] = csrrd32(priv->mac_dev,
177 tse_csroffs(ether_stats_pkts_1519tox_octets));
178 buf[29] = csrrd32(priv->mac_dev,
179 tse_csroffs(ether_stats_jabbers));
180 buf[30] = csrrd32(priv->mac_dev,
181 tse_csroffs(ether_stats_fragments));
147} 182}
148 183
149static int tse_sset_count(struct net_device *dev, int sset) 184static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
178{ 213{
179 int i; 214 int i;
180 struct altera_tse_private *priv = netdev_priv(dev); 215 struct altera_tse_private *priv = netdev_priv(dev);
181 u32 *tse_mac_regs = (u32 *)priv->mac_dev;
182 u32 *buf = regbuf; 216 u32 *buf = regbuf;
183 217
184 /* Set version to a known value, so ethtool knows 218 /* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
196 regs->version = 1; 230 regs->version = 1;
197 231
198 for (i = 0; i < TSE_NUM_REGS; i++) 232 for (i = 0; i < TSE_NUM_REGS; i++)
199 buf[i] = ioread32(&tse_mac_regs[i]); 233 buf[i] = csrrd32(priv->mac_dev, i * 4);
200} 234}
201 235
202static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 236static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index e44a4aeb9701..7330681574d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
100 */ 100 */
101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
102{ 102{
103 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 103 struct net_device *ndev = bus->priv;
104 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 104 struct altera_tse_private *priv = netdev_priv(ndev);
105 u32 data;
106 105
107 /* set MDIO address */ 106 /* set MDIO address */
108 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 107 csrwr32((mii_id & 0x1f), priv->mac_dev,
108 tse_csroffs(mdio_phy0_addr));
109 109
110 /* get the data */ 110 /* get the data */
111 data = ioread32(&mdio_regs[regnum]) & 0xffff; 111 return csrrd32(priv->mac_dev,
112 return data; 112 tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
113} 113}
114 114
115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
116 u16 value) 116 u16 value)
117{ 117{
118 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 118 struct net_device *ndev = bus->priv;
119 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 119 struct altera_tse_private *priv = netdev_priv(ndev);
120 120
121 /* set MDIO address */ 121 /* set MDIO address */
122 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 122 csrwr32((mii_id & 0x1f), priv->mac_dev,
123 tse_csroffs(mdio_phy0_addr));
123 124
124 /* write the data */ 125 /* write the data */
125 iowrite32((u32) value, &mdio_regs[regnum]); 126 csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
126 return 0; 127 return 0;
127} 128}
128 129
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
168 for (i = 0; i < PHY_MAX_ADDR; i++) 169 for (i = 0; i < PHY_MAX_ADDR; i++)
169 mdio->irq[i] = PHY_POLL; 170 mdio->irq[i] = PHY_POLL;
170 171
171 mdio->priv = priv->mac_dev; 172 mdio->priv = dev;
172 mdio->parent = priv->device; 173 mdio->parent = priv->device;
173 174
174 ret = of_mdiobus_register(mdio, mdio_node); 175 ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
563 unsigned int nopaged_len = skb_headlen(skb); 564 unsigned int nopaged_len = skb_headlen(skb);
564 enum netdev_tx ret = NETDEV_TX_OK; 565 enum netdev_tx ret = NETDEV_TX_OK;
565 dma_addr_t dma_addr; 566 dma_addr_t dma_addr;
566 int txcomplete = 0;
567 567
568 spin_lock_bh(&priv->tx_lock); 568 spin_lock_bh(&priv->tx_lock);
569 569
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
599 dma_sync_single_for_device(priv->device, buffer->dma_addr, 599 dma_sync_single_for_device(priv->device, buffer->dma_addr,
600 buffer->len, DMA_TO_DEVICE); 600 buffer->len, DMA_TO_DEVICE);
601 601
602 txcomplete = priv->dmaops->tx_buffer(priv, buffer); 602 priv->dmaops->tx_buffer(priv, buffer);
603 603
604 skb_tx_timestamp(skb); 604 skb_tx_timestamp(skb);
605 605
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
698 struct altera_tse_private *priv = netdev_priv(dev); 698 struct altera_tse_private *priv = netdev_priv(dev);
699 struct phy_device *phydev = NULL; 699 struct phy_device *phydev = NULL;
700 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 700 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
701 int ret;
702 701
703 if (priv->phy_addr != POLL_PHY) { 702 if (priv->phy_addr != POLL_PHY) {
704 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, 703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
712 netdev_err(dev, "Could not attach to PHY\n"); 711 netdev_err(dev, "Could not attach to PHY\n");
713 712
714 } else { 713 } else {
714 int ret;
715 phydev = phy_find_first(priv->mdio); 715 phydev = phy_find_first(priv->mdio);
716 if (phydev == NULL) { 716 if (phydev == NULL) {
717 netdev_err(dev, "No PHY found\n"); 717 netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
791 791
792static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) 792static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
793{ 793{
794 struct altera_tse_mac *mac = priv->mac_dev;
795 u32 msb; 794 u32 msb;
796 u32 lsb; 795 u32 lsb;
797 796
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
799 lsb = ((addr[5] << 8) | addr[4]) & 0xffff; 798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
800 799
801 /* Set primary MAC address */ 800 /* Set primary MAC address */
802 iowrite32(msb, &mac->mac_addr_0); 801 csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
803 iowrite32(lsb, &mac->mac_addr_1); 802 csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
804} 803}
805 804
806/* MAC software reset. 805/* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
811 */ 810 */
812static int reset_mac(struct altera_tse_private *priv) 811static int reset_mac(struct altera_tse_private *priv)
813{ 812{
814 void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
815 int counter; 813 int counter;
816 u32 dat; 814 u32 dat;
817 815
818 dat = ioread32(cmd_cfg_reg); 816 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
819 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 817 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
820 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; 818 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
821 iowrite32(dat, cmd_cfg_reg); 819 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
822 820
823 counter = 0; 821 counter = 0;
824 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 822 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
825 if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) 823 if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
824 MAC_CMDCFG_SW_RESET))
826 break; 825 break;
827 udelay(1); 826 udelay(1);
828 } 827 }
829 828
830 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
831 dat = ioread32(cmd_cfg_reg); 830 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
832 dat &= ~MAC_CMDCFG_SW_RESET; 831 dat &= ~MAC_CMDCFG_SW_RESET;
833 iowrite32(dat, cmd_cfg_reg); 832 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
834 return -1; 833 return -1;
835 } 834 }
836 return 0; 835 return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
840*/ 839*/
841static int init_mac(struct altera_tse_private *priv) 840static int init_mac(struct altera_tse_private *priv)
842{ 841{
843 struct altera_tse_mac *mac = priv->mac_dev;
844 unsigned int cmd = 0; 842 unsigned int cmd = 0;
845 u32 frm_length; 843 u32 frm_length;
846 844
847 /* Setup Rx FIFO */ 845 /* Setup Rx FIFO */
848 iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, 846 csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
849 &mac->rx_section_empty); 847 priv->mac_dev, tse_csroffs(rx_section_empty));
850 iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); 848
851 iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); 849 csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
852 iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); 850 tse_csroffs(rx_section_full));
851
852 csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
853 tse_csroffs(rx_almost_empty));
854
855 csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
856 tse_csroffs(rx_almost_full));
853 857
854 /* Setup Tx FIFO */ 858 /* Setup Tx FIFO */
855 iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, 859 csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
856 &mac->tx_section_empty); 860 priv->mac_dev, tse_csroffs(tx_section_empty));
857 iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); 861
858 iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); 862 csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
859 iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); 863 tse_csroffs(tx_section_full));
864
865 csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
866 tse_csroffs(tx_almost_empty));
867
868 csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
869 tse_csroffs(tx_almost_full));
860 870
861 /* MAC Address Configuration */ 871 /* MAC Address Configuration */
862 tse_update_mac_addr(priv, priv->dev->dev_addr); 872 tse_update_mac_addr(priv, priv->dev->dev_addr);
863 873
864 /* MAC Function Configuration */ 874 /* MAC Function Configuration */
865 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; 875 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
866 iowrite32(frm_length, &mac->frm_length); 876 csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
867 iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); 877
878 csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
879 tse_csroffs(tx_ipg_length));
868 880
869 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit 881 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
870 * start address 882 * start address
871 */ 883 */
872 tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); 884 tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
873 tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | 885 ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
874 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); 886
887 tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
888 ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
889 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
875 890
876 /* Set the MAC options */ 891 /* Set the MAC options */
877 cmd = ioread32(&mac->command_config); 892 cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
878 cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ 893 cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
879 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ 894 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
880 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames 895 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
889 cmd &= ~MAC_CMDCFG_ETH_SPEED; 904 cmd &= ~MAC_CMDCFG_ETH_SPEED;
890 cmd &= ~MAC_CMDCFG_ENA_10; 905 cmd &= ~MAC_CMDCFG_ENA_10;
891 906
892 iowrite32(cmd, &mac->command_config); 907 csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
893 908
894 iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta); 909 csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
910 tse_csroffs(pause_quanta));
895 911
896 if (netif_msg_hw(priv)) 912 if (netif_msg_hw(priv))
897 dev_dbg(priv->device, 913 dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
904 */ 920 */
905static void tse_set_mac(struct altera_tse_private *priv, bool enable) 921static void tse_set_mac(struct altera_tse_private *priv, bool enable)
906{ 922{
907 struct altera_tse_mac *mac = priv->mac_dev; 923 u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
908 u32 value = ioread32(&mac->command_config);
909 924
910 if (enable) 925 if (enable)
911 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; 926 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
912 else 927 else
913 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 928 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
914 929
915 iowrite32(value, &mac->command_config); 930 csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
916} 931}
917 932
918/* Change the MTU 933/* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
942static void altera_tse_set_mcfilter(struct net_device *dev) 957static void altera_tse_set_mcfilter(struct net_device *dev)
943{ 958{
944 struct altera_tse_private *priv = netdev_priv(dev); 959 struct altera_tse_private *priv = netdev_priv(dev);
945 struct altera_tse_mac *mac = priv->mac_dev;
946 int i; 960 int i;
947 struct netdev_hw_addr *ha; 961 struct netdev_hw_addr *ha;
948 962
949 /* clear the hash filter */ 963 /* clear the hash filter */
950 for (i = 0; i < 64; i++) 964 for (i = 0; i < 64; i++)
951 iowrite32(0, &(mac->hash_table[i])); 965 csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
952 966
953 netdev_for_each_mc_addr(ha, dev) { 967 netdev_for_each_mc_addr(ha, dev) {
954 unsigned int hash = 0; 968 unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
964 978
965 hash = (hash << 1) | xor_bit; 979 hash = (hash << 1) | xor_bit;
966 } 980 }
967 iowrite32(1, &(mac->hash_table[hash])); 981 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
968 } 982 }
969} 983}
970 984
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
972static void altera_tse_set_mcfilterall(struct net_device *dev) 986static void altera_tse_set_mcfilterall(struct net_device *dev)
973{ 987{
974 struct altera_tse_private *priv = netdev_priv(dev); 988 struct altera_tse_private *priv = netdev_priv(dev);
975 struct altera_tse_mac *mac = priv->mac_dev;
976 int i; 989 int i;
977 990
978 /* set the hash filter */ 991 /* set the hash filter */
979 for (i = 0; i < 64; i++) 992 for (i = 0; i < 64; i++)
980 iowrite32(1, &(mac->hash_table[i])); 993 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
981} 994}
982 995
983/* Set or clear the multicast filter for this adaptor 996/* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
985static void tse_set_rx_mode_hashfilter(struct net_device *dev) 998static void tse_set_rx_mode_hashfilter(struct net_device *dev)
986{ 999{
987 struct altera_tse_private *priv = netdev_priv(dev); 1000 struct altera_tse_private *priv = netdev_priv(dev);
988 struct altera_tse_mac *mac = priv->mac_dev;
989 1001
990 spin_lock(&priv->mac_cfg_lock); 1002 spin_lock(&priv->mac_cfg_lock);
991 1003
992 if (dev->flags & IFF_PROMISC) 1004 if (dev->flags & IFF_PROMISC)
993 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1005 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1006 MAC_CMDCFG_PROMIS_EN);
994 1007
995 if (dev->flags & IFF_ALLMULTI) 1008 if (dev->flags & IFF_ALLMULTI)
996 altera_tse_set_mcfilterall(dev); 1009 altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
1005static void tse_set_rx_mode(struct net_device *dev) 1018static void tse_set_rx_mode(struct net_device *dev)
1006{ 1019{
1007 struct altera_tse_private *priv = netdev_priv(dev); 1020 struct altera_tse_private *priv = netdev_priv(dev);
1008 struct altera_tse_mac *mac = priv->mac_dev;
1009 1021
1010 spin_lock(&priv->mac_cfg_lock); 1022 spin_lock(&priv->mac_cfg_lock);
1011 1023
1012 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || 1024 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1013 !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) 1025 !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1014 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1026 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1027 MAC_CMDCFG_PROMIS_EN);
1015 else 1028 else
1016 tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1029 tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1030 MAC_CMDCFG_PROMIS_EN);
1017 1031
1018 spin_unlock(&priv->mac_cfg_lock); 1032 spin_unlock(&priv->mac_cfg_lock);
1019} 1033}
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
1362 of_property_read_bool(pdev->dev.of_node, 1376 of_property_read_bool(pdev->dev.of_node,
1363 "altr,has-hash-multicast-filter"); 1377 "altr,has-hash-multicast-filter");
1364 1378
1379 /* Set hash filter to not set for now until the
1380 * multicast filter receive issue is debugged
1381 */
1382 priv->hash_filter = 0;
1383
1365 /* get supplemental address settings for this instance */ 1384 /* get supplemental address settings for this instance */
1366 priv->added_unicast = 1385 priv->added_unicast =
1367 of_property_read_bool(pdev->dev.of_node, 1386 of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
1493 return 0; 1512 return 0;
1494} 1513}
1495 1514
1496struct altera_dmaops altera_dtype_sgdma = { 1515static const struct altera_dmaops altera_dtype_sgdma = {
1497 .altera_dtype = ALTERA_DTYPE_SGDMA, 1516 .altera_dtype = ALTERA_DTYPE_SGDMA,
1498 .dmamask = 32, 1517 .dmamask = 32,
1499 .reset_dma = sgdma_reset, 1518 .reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
1512 .start_rxdma = sgdma_start_rxdma, 1531 .start_rxdma = sgdma_start_rxdma,
1513}; 1532};
1514 1533
1515struct altera_dmaops altera_dtype_msgdma = { 1534static const struct altera_dmaops altera_dtype_msgdma = {
1516 .altera_dtype = ALTERA_DTYPE_MSGDMA, 1535 .altera_dtype = ALTERA_DTYPE_MSGDMA,
1517 .dmamask = 64, 1536 .dmamask = 64,
1518 .reset_dma = msgdma_reset, 1537 .reset_dma = msgdma_reset,
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
index 70fa13f486b2..d7eeb1713ad2 100644
--- a/drivers/net/ethernet/altera/altera_utils.c
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -17,28 +17,28 @@
17#include "altera_tse.h" 17#include "altera_tse.h"
18#include "altera_utils.h" 18#include "altera_utils.h"
19 19
20void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) 20void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
21{ 21{
22 u32 value = ioread32(ioaddr); 22 u32 value = csrrd32(ioaddr, offs);
23 value |= bit_mask; 23 value |= bit_mask;
24 iowrite32(value, ioaddr); 24 csrwr32(value, ioaddr, offs);
25} 25}
26 26
27void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) 27void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
28{ 28{
29 u32 value = ioread32(ioaddr); 29 u32 value = csrrd32(ioaddr, offs);
30 value &= ~bit_mask; 30 value &= ~bit_mask;
31 iowrite32(value, ioaddr); 31 csrwr32(value, ioaddr, offs);
32} 32}
33 33
34int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) 34int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
35{ 35{
36 u32 value = ioread32(ioaddr); 36 u32 value = csrrd32(ioaddr, offs);
37 return (value & bit_mask) ? 1 : 0; 37 return (value & bit_mask) ? 1 : 0;
38} 38}
39 39
40int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) 40int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
41{ 41{
42 u32 value = ioread32(ioaddr); 42 u32 value = csrrd32(ioaddr, offs);
43 return (value & bit_mask) ? 0 : 1; 43 return (value & bit_mask) ? 0 : 1;
44} 44}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index ce1db36d3583..baf100ccf587 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -19,9 +19,9 @@
19#ifndef __ALTERA_UTILS_H__ 19#ifndef __ALTERA_UTILS_H__
20#define __ALTERA_UTILS_H__ 20#define __ALTERA_UTILS_H__
21 21
22void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); 22void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
23void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); 23void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
24int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); 24int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
25int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); 25int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
26 26
27#endif /* __ALTERA_UTILS_H__*/ 27#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b260913db236..3b0d43154e67 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08) 10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10053#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10053#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10054#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) 10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) 10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
10057{ 10057{
10058 u8 major, minor, version; 10058 u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10352 /* Reset should be performed after BRB is emptied */ 10352 /* Reset should be performed after BRB is emptied */
10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10354 u32 timer_count = 1000; 10354 u32 timer_count = 1000;
10355 bool need_write = true;
10355 10356
10356 /* Close the MAC Rx to prevent BRB from filling up */ 10357 /* Close the MAC Rx to prevent BRB from filling up */
10357 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10358 bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10398 * cleaning methods - might be redundant but harmless. 10399 * cleaning methods - might be redundant but harmless.
10399 */ 10400 */
10400 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { 10401 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
10401 bnx2x_prev_unload_undi_mf(bp); 10402 if (need_write) {
10403 bnx2x_prev_unload_undi_mf(bp);
10404 need_write = false;
10405 }
10402 } else if (prev_undi) { 10406 } else if (prev_undi) {
10403 /* If UNDI resides in memory, 10407 /* If UNDI resides in memory,
10404 * manually increment it 10408 * manually increment it
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 81cc2d9831c2..b8078d50261b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2695,7 +2695,7 @@ out:
2695 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2695 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2696 } 2696 }
2697 2697
2698 return 0; 2698 return rc;
2699} 2699}
2700 2700
2701int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2701int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0c067e8564dd..784c7155b98a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
747out: 747out:
748 bnx2x_vfpf_finalize(bp, &req->first_tlv); 748 bnx2x_vfpf_finalize(bp, &req->first_tlv);
749 749
750 return 0; 750 return rc;
751} 751}
752 752
753/* request pf to config rss table for vf queues*/ 753/* request pf to config rss table for vf queues*/
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644
index 000000000000..4884205e56ee
--- /dev/null
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -0,0 +1,706 @@
1 /*
2 * drivers/net/ethernet/beckhoff/ec_bhf.c
3 *
4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17/* This is a driver for EtherCAT master module present on CCAT FPGA.
18 * Those can be found on Bechhoff CX50xx industrial PCs.
19 */
20
21#if 0
22#define DEBUG
23#endif
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/ip.h>
33#include <linux/skbuff.h>
34#include <linux/hrtimer.h>
35#include <linux/interrupt.h>
36#include <linux/stat.h>
37
38#define TIMER_INTERVAL_NSEC 20000
39
40#define INFO_BLOCK_SIZE 0x10
41#define INFO_BLOCK_TYPE 0x0
42#define INFO_BLOCK_REV 0x2
43#define INFO_BLOCK_BLK_CNT 0x4
44#define INFO_BLOCK_TX_CHAN 0x4
45#define INFO_BLOCK_RX_CHAN 0x5
46#define INFO_BLOCK_OFFSET 0x8
47
48#define EC_MII_OFFSET 0x4
49#define EC_FIFO_OFFSET 0x8
50#define EC_MAC_OFFSET 0xc
51
52#define MAC_FRAME_ERR_CNT 0x0
53#define MAC_RX_ERR_CNT 0x1
54#define MAC_CRC_ERR_CNT 0x2
55#define MAC_LNK_LST_ERR_CNT 0x3
56#define MAC_TX_FRAME_CNT 0x10
57#define MAC_RX_FRAME_CNT 0x14
58#define MAC_TX_FIFO_LVL 0x20
59#define MAC_DROPPED_FRMS 0x28
60#define MAC_CONNECTED_CCAT_FLAG 0x78
61
62#define MII_MAC_ADDR 0x8
63#define MII_MAC_FILT_FLAG 0xe
64#define MII_LINK_STATUS 0xf
65
66#define FIFO_TX_REG 0x0
67#define FIFO_TX_RESET 0x8
68#define FIFO_RX_REG 0x10
69#define FIFO_RX_ADDR_VALID (1u << 31)
70#define FIFO_RX_RESET 0x18
71
72#define DMA_CHAN_OFFSET 0x1000
73#define DMA_CHAN_SIZE 0x8
74
75#define DMA_WINDOW_SIZE_MASK 0xfffffffc
76
77static struct pci_device_id ids[] = {
78 { PCI_DEVICE(0x15ec, 0x5000), },
79 { 0, }
80};
81MODULE_DEVICE_TABLE(pci, ids);
82
83struct rx_header {
84#define RXHDR_NEXT_ADDR_MASK 0xffffffu
85#define RXHDR_NEXT_VALID (1u << 31)
86 __le32 next;
87#define RXHDR_NEXT_RECV_FLAG 0x1
88 __le32 recv;
89#define RXHDR_LEN_MASK 0xfffu
90 __le16 len;
91 __le16 port;
92 __le32 reserved;
93 u8 timestamp[8];
94} __packed;
95
96#define PKT_PAYLOAD_SIZE 0x7e8
97struct rx_desc {
98 struct rx_header header;
99 u8 data[PKT_PAYLOAD_SIZE];
100} __packed;
101
102struct tx_header {
103 __le16 len;
104#define TX_HDR_PORT_0 0x1
105#define TX_HDR_PORT_1 0x2
106 u8 port;
107 u8 ts_enable;
108#define TX_HDR_SENT 0x1
109 __le32 sent;
110 u8 timestamp[8];
111} __packed;
112
113struct tx_desc {
114 struct tx_header header;
115 u8 data[PKT_PAYLOAD_SIZE];
116} __packed;
117
118#define FIFO_SIZE 64
119
120static long polling_frequency = TIMER_INTERVAL_NSEC;
121
122struct bhf_dma {
123 u8 *buf;
124 size_t len;
125 dma_addr_t buf_phys;
126
127 u8 *alloc;
128 size_t alloc_len;
129 dma_addr_t alloc_phys;
130};
131
132struct ec_bhf_priv {
133 struct net_device *net_dev;
134
135 struct pci_dev *dev;
136
137 void * __iomem io;
138 void * __iomem dma_io;
139
140 struct hrtimer hrtimer;
141
142 int tx_dma_chan;
143 int rx_dma_chan;
144 void * __iomem ec_io;
145 void * __iomem fifo_io;
146 void * __iomem mii_io;
147 void * __iomem mac_io;
148
149 struct bhf_dma rx_buf;
150 struct rx_desc *rx_descs;
151 int rx_dnext;
152 int rx_dcount;
153
154 struct bhf_dma tx_buf;
155 struct tx_desc *tx_descs;
156 int tx_dcount;
157 int tx_dnext;
158
159 u64 stat_rx_bytes;
160 u64 stat_tx_bytes;
161};
162
163#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
164
165#define ETHERCAT_MASTER_ID 0x14
166
167static void ec_bhf_print_status(struct ec_bhf_priv *priv)
168{
169 struct device *dev = PRIV_TO_DEV(priv);
170
171 dev_dbg(dev, "Frame error counter: %d\n",
172 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
173 dev_dbg(dev, "RX error counter: %d\n",
174 ioread8(priv->mac_io + MAC_RX_ERR_CNT));
175 dev_dbg(dev, "CRC error counter: %d\n",
176 ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
177 dev_dbg(dev, "TX frame counter: %d\n",
178 ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
179 dev_dbg(dev, "RX frame counter: %d\n",
180 ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
181 dev_dbg(dev, "TX fifo level: %d\n",
182 ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
183 dev_dbg(dev, "Dropped frames: %d\n",
184 ioread8(priv->mac_io + MAC_DROPPED_FRMS));
185 dev_dbg(dev, "Connected with CCAT slot: %d\n",
186 ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
187 dev_dbg(dev, "Link status: %d\n",
188 ioread8(priv->mii_io + MII_LINK_STATUS));
189}
190
191static void ec_bhf_reset(struct ec_bhf_priv *priv)
192{
193 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
194 iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
195 iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
196 iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
197 iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
198 iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
199 iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
200
201 iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
202 iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
203
204 iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
205}
206
207static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
208{
209 u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
210 u32 addr = (u8 *)desc - priv->tx_buf.buf;
211
212 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
213
214 dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
215}
216
217static int ec_bhf_desc_sent(struct tx_desc *desc)
218{
219 return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
220}
221
222static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
223{
224 if (unlikely(netif_queue_stopped(priv->net_dev))) {
225 /* Make sure that we perceive changes to tx_dnext. */
226 smp_rmb();
227
228 if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
229 netif_wake_queue(priv->net_dev);
230 }
231}
232
233static int ec_bhf_pkt_received(struct rx_desc *desc)
234{
235 return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
236}
237
238static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
239{
240 iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
241 priv->fifo_io + FIFO_RX_REG);
242}
243
244static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
245{
246 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
247 struct device *dev = PRIV_TO_DEV(priv);
248
249 while (ec_bhf_pkt_received(desc)) {
250 int pkt_size = (le16_to_cpu(desc->header.len) &
251 RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
252 u8 *data = desc->data;
253 struct sk_buff *skb;
254
255 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
256 dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
257
258 if (skb) {
259 memcpy(skb_put(skb, pkt_size), data, pkt_size);
260 skb->protocol = eth_type_trans(skb, priv->net_dev);
261 dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
262
263 priv->stat_rx_bytes += pkt_size;
264
265 netif_rx(skb);
266 } else {
267 dev_err_ratelimited(dev,
268 "Couldn't allocate a skb_buff for a packet of size %u\n",
269 pkt_size);
270 }
271
272 desc->header.recv = 0;
273
274 ec_bhf_add_rx_desc(priv, desc);
275
276 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
277 desc = &priv->rx_descs[priv->rx_dnext];
278 }
279
280}
281
282static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
283{
284 struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
285 hrtimer);
286 ec_bhf_process_rx(priv);
287 ec_bhf_process_tx(priv);
288
289 if (!netif_running(priv->net_dev))
290 return HRTIMER_NORESTART;
291
292 hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
293 return HRTIMER_RESTART;
294}
295
296static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
297{
298 struct device *dev = PRIV_TO_DEV(priv);
299 unsigned block_count, i;
300 void * __iomem ec_info;
301
302 dev_dbg(dev, "Info block:\n");
303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
304 dev_dbg(dev, "Revision of function: %x\n",
305 (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
306
307 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
308 dev_dbg(dev, "Number of function blocks: %x\n", block_count);
309
310 for (i = 0; i < block_count; i++) {
311 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
312 INFO_BLOCK_TYPE);
313 if (type == ETHERCAT_MASTER_ID)
314 break;
315 }
316 if (i == block_count) {
317 dev_err(dev, "EtherCAT master with DMA block not found\n");
318 return -ENODEV;
319 }
320 dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
321
322 ec_info = priv->io + i * INFO_BLOCK_SIZE;
323 dev_dbg(dev, "EtherCAT master revision: %d\n",
324 ioread16(ec_info + INFO_BLOCK_REV));
325
326 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
327 dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
328 priv->tx_dma_chan);
329
330 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
331 dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
332 priv->rx_dma_chan);
333
334 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
335 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
336 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
337 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
338
339 dev_dbg(dev,
340 "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
341 priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
342
343 return 0;
344}
345
346static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
347 struct net_device *net_dev)
348{
349 struct ec_bhf_priv *priv = netdev_priv(net_dev);
350 struct tx_desc *desc;
351 unsigned len;
352
353 dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
354
355 desc = &priv->tx_descs[priv->tx_dnext];
356
357 skb_copy_and_csum_dev(skb, desc->data);
358 len = skb->len;
359
360 memset(&desc->header, 0, sizeof(desc->header));
361 desc->header.len = cpu_to_le16(len);
362 desc->header.port = TX_HDR_PORT_0;
363
364 ec_bhf_send_packet(priv, desc);
365
366 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
367
368 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
369 /* Make sure that update updates to tx_dnext are perceived
370 * by timer routine.
371 */
372 smp_wmb();
373
374 netif_stop_queue(net_dev);
375
376 dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
377 ec_bhf_print_status(priv);
378 }
379
380 priv->stat_tx_bytes += len;
381
382 dev_kfree_skb(skb);
383
384 return NETDEV_TX_OK;
385}
386
387static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
388 struct bhf_dma *buf,
389 int channel,
390 int size)
391{
392 int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
393 struct device *dev = PRIV_TO_DEV(priv);
394 u32 mask;
395
396 iowrite32(0xffffffff, priv->dma_io + offset);
397
398 mask = ioread32(priv->dma_io + offset);
399 mask &= DMA_WINDOW_SIZE_MASK;
400 dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
401
402 /* We want to allocate a chunk of memory that is:
403 * - aligned to the mask we just read
404 * - is of size 2^mask bytes (at most)
405 * In order to ensure that we will allocate buffer of
406 * 2 * 2^mask bytes.
407 */
408 buf->len = min_t(int, ~mask + 1, size);
409 buf->alloc_len = 2 * buf->len;
410
411 dev_dbg(dev, "Allocating %d bytes for channel %d",
412 (int)buf->alloc_len, channel);
413 buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
414 GFP_KERNEL);
415 if (buf->alloc == NULL) {
416 dev_info(dev, "Failed to allocate buffer\n");
417 return -ENOMEM;
418 }
419
420 buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
421 buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
422
423 iowrite32(0, priv->dma_io + offset + 4);
424 iowrite32(buf->buf_phys, priv->dma_io + offset);
425 dev_dbg(dev, "Buffer: %x and read from dev: %x",
426 (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
427
428 return 0;
429}
430
431static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
432{
433 int i = 0;
434
435 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
436 priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
437 priv->tx_dnext = 0;
438
439 for (i = 0; i < priv->tx_dcount; i++)
440 priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
441}
442
443static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
444{
445 int i;
446
447 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
448 priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
449 priv->rx_dnext = 0;
450
451 for (i = 0; i < priv->rx_dcount; i++) {
452 struct rx_desc *desc = &priv->rx_descs[i];
453 u32 next;
454
455 if (i != priv->rx_dcount - 1)
456 next = (u8 *)(desc + 1) - priv->rx_buf.buf;
457 else
458 next = 0;
459 next |= RXHDR_NEXT_VALID;
460 desc->header.next = cpu_to_le32(next);
461 desc->header.recv = 0;
462 ec_bhf_add_rx_desc(priv, desc);
463 }
464}
465
466static int ec_bhf_open(struct net_device *net_dev)
467{
468 struct ec_bhf_priv *priv = netdev_priv(net_dev);
469 struct device *dev = PRIV_TO_DEV(priv);
470 int err = 0;
471
472 dev_info(dev, "Opening device\n");
473
474 ec_bhf_reset(priv);
475
476 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
477 FIFO_SIZE * sizeof(struct rx_desc));
478 if (err) {
479 dev_err(dev, "Failed to allocate rx buffer\n");
480 goto out;
481 }
482 ec_bhf_setup_rx_descs(priv);
483
484 dev_info(dev, "RX buffer allocated, address: %x\n",
485 (unsigned)priv->rx_buf.buf_phys);
486
487 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
488 FIFO_SIZE * sizeof(struct tx_desc));
489 if (err) {
490 dev_err(dev, "Failed to allocate tx buffer\n");
491 goto error_rx_free;
492 }
493 dev_dbg(dev, "TX buffer allocated, addres: %x\n",
494 (unsigned)priv->tx_buf.buf_phys);
495
496 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
497
498 ec_bhf_setup_tx_descs(priv);
499
500 netif_start_queue(net_dev);
501
502 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
503 priv->hrtimer.function = ec_bhf_timer_fun;
504 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
505 HRTIMER_MODE_REL);
506
507 dev_info(PRIV_TO_DEV(priv), "Device open\n");
508
509 ec_bhf_print_status(priv);
510
511 return 0;
512
513error_rx_free:
514 dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
515 priv->rx_buf.alloc_len);
516out:
517 return err;
518}
519
520static int ec_bhf_stop(struct net_device *net_dev)
521{
522 struct ec_bhf_priv *priv = netdev_priv(net_dev);
523 struct device *dev = PRIV_TO_DEV(priv);
524
525 hrtimer_cancel(&priv->hrtimer);
526
527 ec_bhf_reset(priv);
528
529 netif_tx_disable(net_dev);
530
531 dma_free_coherent(dev, priv->tx_buf.alloc_len,
532 priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
533 dma_free_coherent(dev, priv->rx_buf.alloc_len,
534 priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
535
536 return 0;
537}
538
539static struct rtnl_link_stats64 *
540ec_bhf_get_stats(struct net_device *net_dev,
541 struct rtnl_link_stats64 *stats)
542{
543 struct ec_bhf_priv *priv = netdev_priv(net_dev);
544
545 stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
546 ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
547 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
548 stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
549 stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
550 stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
551
552 stats->tx_bytes = priv->stat_tx_bytes;
553 stats->rx_bytes = priv->stat_rx_bytes;
554
555 return stats;
556}
557
558static const struct net_device_ops ec_bhf_netdev_ops = {
559 .ndo_start_xmit = ec_bhf_start_xmit,
560 .ndo_open = ec_bhf_open,
561 .ndo_stop = ec_bhf_stop,
562 .ndo_get_stats64 = ec_bhf_get_stats,
563 .ndo_change_mtu = eth_change_mtu,
564 .ndo_validate_addr = eth_validate_addr,
565 .ndo_set_mac_address = eth_mac_addr
566};
567
568static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
569{
570 struct net_device *net_dev;
571 struct ec_bhf_priv *priv;
572 void * __iomem dma_io;
573 void * __iomem io;
574 int err = 0;
575
576 err = pci_enable_device(dev);
577 if (err)
578 return err;
579
580 pci_set_master(dev);
581
582 err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
583 if (err) {
584 dev_err(&dev->dev,
585 "Required dma mask not supported, failed to initialize device\n");
586 err = -EIO;
587 goto err_disable_dev;
588 }
589
590 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
591 if (err) {
592 dev_err(&dev->dev,
593 "Required dma mask not supported, failed to initialize device\n");
594 goto err_disable_dev;
595 }
596
597 err = pci_request_regions(dev, "ec_bhf");
598 if (err) {
599 dev_err(&dev->dev, "Failed to request pci memory regions\n");
600 goto err_disable_dev;
601 }
602
603 io = pci_iomap(dev, 0, 0);
604 if (!io) {
605 dev_err(&dev->dev, "Failed to map pci card memory bar 0");
606 err = -EIO;
607 goto err_release_regions;
608 }
609
610 dma_io = pci_iomap(dev, 2, 0);
611 if (!dma_io) {
612 dev_err(&dev->dev, "Failed to map pci card memory bar 2");
613 err = -EIO;
614 goto err_unmap;
615 }
616
617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
618 if (net_dev == 0) {
619 err = -ENOMEM;
620 goto err_unmap_dma_io;
621 }
622
623 pci_set_drvdata(dev, net_dev);
624 SET_NETDEV_DEV(net_dev, &dev->dev);
625
626 net_dev->features = 0;
627 net_dev->flags |= IFF_NOARP;
628
629 net_dev->netdev_ops = &ec_bhf_netdev_ops;
630
631 priv = netdev_priv(net_dev);
632 priv->net_dev = net_dev;
633 priv->io = io;
634 priv->dma_io = dma_io;
635 priv->dev = dev;
636
637 err = ec_bhf_setup_offsets(priv);
638 if (err < 0)
639 goto err_free_net_dev;
640
641 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
642
643 dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
644 net_dev->dev_addr);
645
646 err = register_netdev(net_dev);
647 if (err < 0)
648 goto err_free_net_dev;
649
650 return 0;
651
652err_free_net_dev:
653 free_netdev(net_dev);
654err_unmap_dma_io:
655 pci_iounmap(dev, dma_io);
656err_unmap:
657 pci_iounmap(dev, io);
658err_release_regions:
659 pci_release_regions(dev);
660err_disable_dev:
661 pci_clear_master(dev);
662 pci_disable_device(dev);
663
664 return err;
665}
666
667static void ec_bhf_remove(struct pci_dev *dev)
668{
669 struct net_device *net_dev = pci_get_drvdata(dev);
670 struct ec_bhf_priv *priv = netdev_priv(net_dev);
671
672 unregister_netdev(net_dev);
673 free_netdev(net_dev);
674
675 pci_iounmap(dev, priv->dma_io);
676 pci_iounmap(dev, priv->io);
677 pci_release_regions(dev);
678 pci_clear_master(dev);
679 pci_disable_device(dev);
680}
681
682static struct pci_driver pci_driver = {
683 .name = "ec_bhf",
684 .id_table = ids,
685 .probe = ec_bhf_probe,
686 .remove = ec_bhf_remove,
687};
688
689static int __init ec_bhf_init(void)
690{
691 return pci_register_driver(&pci_driver);
692}
693
694static void __exit ec_bhf_exit(void)
695{
696 pci_unregister_driver(&pci_driver);
697}
698
699module_init(ec_bhf_init);
700module_exit(ec_bhf_exit);
701
702module_param(polling_frequency, long, S_IRUGO);
703MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
704
705MODULE_LICENSE("GPL");
706MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a18645407d21..dc19bc5dec77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
4949 if (status) 4949 if (status)
4950 goto err; 4950 goto err;
4951 4951
4952 /* On some BE3 FW versions, after a HW reset,
4953 * interrupts will remain disabled for each function.
4954 * So, explicitly enable interrupts
4955 */
4956 be_intr_set(adapter, true);
4957
4952 /* tell fw we're ready to fire cmds */ 4958 /* tell fw we're ready to fire cmds */
4953 status = be_cmd_fw_init(adapter); 4959 status = be_cmd_fw_init(adapter);
4954 if (status) 4960 if (status)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b0c6050479eb..b78378cea5e3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
1988 return idx; 1988 return idx;
1989} 1989}
1990 1990
1991static void 1991static int
1992jme_fill_tx_map(struct pci_dev *pdev, 1992jme_fill_tx_map(struct pci_dev *pdev,
1993 struct txdesc *txdesc, 1993 struct txdesc *txdesc,
1994 struct jme_buffer_info *txbi, 1994 struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
2005 len, 2005 len,
2006 PCI_DMA_TODEVICE); 2006 PCI_DMA_TODEVICE);
2007 2007
2008 if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
2009 return -EINVAL;
2010
2008 pci_dma_sync_single_for_device(pdev, 2011 pci_dma_sync_single_for_device(pdev,
2009 dmaaddr, 2012 dmaaddr,
2010 len, 2013 len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
2021 2024
2022 txbi->mapping = dmaaddr; 2025 txbi->mapping = dmaaddr;
2023 txbi->len = len; 2026 txbi->len = len;
2027 return 0;
2024} 2028}
2025 2029
2026static void 2030static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
2031{
2032 struct jme_ring *txring = &(jme->txring[0]);
2033 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
2034 int mask = jme->tx_ring_mask;
2035 int j;
2036
2037 for (j = 0 ; j < count ; j++) {
2038 ctxbi = txbi + ((startidx + j + 2) & (mask));
2039 pci_unmap_page(jme->pdev,
2040 ctxbi->mapping,
2041 ctxbi->len,
2042 PCI_DMA_TODEVICE);
2043
2044 ctxbi->mapping = 0;
2045 ctxbi->len = 0;
2046 }
2047
2048}
2049
2050static int
2027jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 2051jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2028{ 2052{
2029 struct jme_ring *txring = &(jme->txring[0]); 2053 struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2034 int mask = jme->tx_ring_mask; 2058 int mask = jme->tx_ring_mask;
2035 const struct skb_frag_struct *frag; 2059 const struct skb_frag_struct *frag;
2036 u32 len; 2060 u32 len;
2061 int ret = 0;
2037 2062
2038 for (i = 0 ; i < nr_frags ; ++i) { 2063 for (i = 0 ; i < nr_frags ; ++i) {
2039 frag = &skb_shinfo(skb)->frags[i]; 2064 frag = &skb_shinfo(skb)->frags[i];
2040 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 2065 ctxdesc = txdesc + ((idx + i + 2) & (mask));
2041 ctxbi = txbi + ((idx + i + 2) & (mask)); 2066 ctxbi = txbi + ((idx + i + 2) & (mask));
2042 2067
2043 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, 2068 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
2044 skb_frag_page(frag), 2069 skb_frag_page(frag),
2045 frag->page_offset, skb_frag_size(frag), hidma); 2070 frag->page_offset, skb_frag_size(frag), hidma);
2071 if (ret) {
2072 jme_drop_tx_map(jme, idx, i);
2073 goto out;
2074 }
2075
2046 } 2076 }
2047 2077
2048 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 2078 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2049 ctxdesc = txdesc + ((idx + 1) & (mask)); 2079 ctxdesc = txdesc + ((idx + 1) & (mask));
2050 ctxbi = txbi + ((idx + 1) & (mask)); 2080 ctxbi = txbi + ((idx + 1) & (mask));
2051 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 2081 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
2052 offset_in_page(skb->data), len, hidma); 2082 offset_in_page(skb->data), len, hidma);
2083 if (ret)
2084 jme_drop_tx_map(jme, idx, i);
2085
2086out:
2087 return ret;
2053 2088
2054} 2089}
2055 2090
2091
2056static int 2092static int
2057jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 2093jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2058{ 2094{
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2131 struct txdesc *txdesc; 2167 struct txdesc *txdesc;
2132 struct jme_buffer_info *txbi; 2168 struct jme_buffer_info *txbi;
2133 u8 flags; 2169 u8 flags;
2170 int ret = 0;
2134 2171
2135 txdesc = (struct txdesc *)txring->desc + idx; 2172 txdesc = (struct txdesc *)txring->desc + idx;
2136 txbi = txring->bufinf + idx; 2173 txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2155 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 2192 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2156 jme_tx_csum(jme, skb, &flags); 2193 jme_tx_csum(jme, skb, &flags);
2157 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 2194 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2158 jme_map_tx_skb(jme, skb, idx); 2195 ret = jme_map_tx_skb(jme, skb, idx);
2196 if (ret)
2197 return ret;
2198
2159 txdesc->desc1.flags = flags; 2199 txdesc->desc1.flags = flags;
2160 /* 2200 /*
2161 * Set tx buffer info after telling NIC to send 2201 * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2228 return NETDEV_TX_BUSY; 2268 return NETDEV_TX_BUSY;
2229 } 2269 }
2230 2270
2231 jme_fill_tx_desc(jme, skb, idx); 2271 if (jme_fill_tx_desc(jme, skb, idx))
2272 return NETDEV_TX_OK;
2232 2273
2233 jwrite32(jme, JME_TXCS, jme->reg_txcs | 2274 jwrite32(jme, JME_TXCS, jme->reg_txcs |
2234 TXCS_SELECT_QUEUE0 | 2275 TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..92d3249f63f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
1253 }, 1253 },
1254 { 1254 {
1255 .opcode = MLX4_CMD_UPDATE_QP, 1255 .opcode = MLX4_CMD_UPDATE_QP,
1256 .has_inbox = false, 1256 .has_inbox = true,
1257 .has_outbox = false, 1257 .has_outbox = false,
1258 .out_is_imm = false, 1258 .out_is_imm = false,
1259 .encode_slave_id = false, 1259 .encode_slave_id = false,
1260 .verify = NULL, 1260 .verify = NULL,
1261 .wrapper = mlx4_CMD_EPERM_wrapper 1261 .wrapper = mlx4_UPDATE_QP_wrapper
1262 }, 1262 },
1263 { 1263 {
1264 .opcode = MLX4_CMD_GET_OP_REQ, 1264 .opcode = MLX4_CMD_GET_OP_REQ,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f9c465101963..212cea440f90 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1195 struct mlx4_cmd_mailbox *outbox, 1195 struct mlx4_cmd_mailbox *outbox,
1196 struct mlx4_cmd_info *cmd); 1196 struct mlx4_cmd_info *cmd);
1197 1197
1198int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
1199 struct mlx4_vhcr *vhcr,
1200 struct mlx4_cmd_mailbox *inbox,
1201 struct mlx4_cmd_mailbox *outbox,
1202 struct mlx4_cmd_info *cmd);
1203
1198int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1204int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1199 struct mlx4_vhcr *vhcr, 1205 struct mlx4_vhcr *vhcr,
1200 struct mlx4_cmd_mailbox *inbox, 1206 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 61d64ebffd56..fbd32af89c7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -389,6 +389,41 @@ err_icm:
389 389
390EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 390EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
391 391
392#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
393int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
394 enum mlx4_update_qp_attr attr,
395 struct mlx4_update_qp_params *params)
396{
397 struct mlx4_cmd_mailbox *mailbox;
398 struct mlx4_update_qp_context *cmd;
399 u64 pri_addr_path_mask = 0;
400 int err = 0;
401
402 mailbox = mlx4_alloc_cmd_mailbox(dev);
403 if (IS_ERR(mailbox))
404 return PTR_ERR(mailbox);
405
406 cmd = (struct mlx4_update_qp_context *)mailbox->buf;
407
408 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
409 return -EINVAL;
410
411 if (attr & MLX4_UPDATE_QP_SMAC) {
412 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
413 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
414 }
415
416 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
417
418 err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
419 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
420 MLX4_CMD_NATIVE);
421
422 mlx4_free_cmd_mailbox(dev, mailbox);
423 return err;
424}
425EXPORT_SYMBOL_GPL(mlx4_update_qp);
426
392void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) 427void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
393{ 428{
394 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 429 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1c3fdd4a1f7d..8f1254a79832 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3895 3895
3896} 3896}
3897 3897
3898#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3899int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3900 struct mlx4_vhcr *vhcr,
3901 struct mlx4_cmd_mailbox *inbox,
3902 struct mlx4_cmd_mailbox *outbox,
3903 struct mlx4_cmd_info *cmd_info)
3904{
3905 int err;
3906 u32 qpn = vhcr->in_modifier & 0xffffff;
3907 struct res_qp *rqp;
3908 u64 mac;
3909 unsigned port;
3910 u64 pri_addr_path_mask;
3911 struct mlx4_update_qp_context *cmd;
3912 int smac_index;
3913
3914 cmd = (struct mlx4_update_qp_context *)inbox->buf;
3915
3916 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
3917 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
3918 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
3919 return -EPERM;
3920
3921 /* Just change the smac for the QP */
3922 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3923 if (err) {
3924 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
3925 return err;
3926 }
3927
3928 port = (rqp->sched_queue >> 6 & 1) + 1;
3929 smac_index = cmd->qp_context.pri_path.grh_mylmc;
3930 err = mac_find_smac_ix_in_slave(dev, slave, port,
3931 smac_index, &mac);
3932 if (err) {
3933 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
3934 qpn, smac_index);
3935 goto err_mac;
3936 }
3937
3938 err = mlx4_cmd(dev, inbox->dma,
3939 vhcr->in_modifier, 0,
3940 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
3941 MLX4_CMD_NATIVE);
3942 if (err) {
3943 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
3944 goto err_mac;
3945 }
3946
3947err_mac:
3948 put_res(dev, slave, qpn, RES_QP);
3949 return err;
3950}
3951
3898int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3952int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3899 struct mlx4_vhcr *vhcr, 3953 struct mlx4_vhcr *vhcr,
3900 struct mlx4_cmd_mailbox *inbox, 3954 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7b52a88923ef..f785d01c7d12 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1719 tx_ring->producer; 1719 tx_ring->producer;
1720} 1720}
1721 1721
1722static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
1723 struct net_device *netdev)
1724{
1725 int err;
1726
1727 netdev->num_tx_queues = adapter->drv_tx_rings;
1728 netdev->real_num_tx_queues = adapter->drv_tx_rings;
1729
1730 err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
1731 if (err)
1732 netdev_err(netdev, "failed to set %d Tx queues\n",
1733 adapter->drv_tx_rings);
1734
1735 return err;
1736}
1737
1738struct qlcnic_nic_template { 1722struct qlcnic_nic_template {
1739 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1723 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1740 int (*config_led) (struct qlcnic_adapter *, u32, u32); 1724 int (*config_led) (struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 0bc914859e38..7e55e88a81bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
2206 ahw->max_uc_count = count; 2206 ahw->max_uc_count = count;
2207} 2207}
2208 2208
2209static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
2210 u8 tx_queues, u8 rx_queues)
2211{
2212 struct net_device *netdev = adapter->netdev;
2213 int err = 0;
2214
2215 if (tx_queues) {
2216 err = netif_set_real_num_tx_queues(netdev, tx_queues);
2217 if (err) {
2218 netdev_err(netdev, "failed to set %d Tx queues\n",
2219 tx_queues);
2220 return err;
2221 }
2222 }
2223
2224 if (rx_queues) {
2225 err = netif_set_real_num_rx_queues(netdev, rx_queues);
2226 if (err)
2227 netdev_err(netdev, "failed to set %d Rx queues\n",
2228 rx_queues);
2229 }
2230
2231 return err;
2232}
2233
2209int 2234int
2210qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, 2235qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2211 int pci_using_dac) 2236 int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2269 netdev->priv_flags |= IFF_UNICAST_FLT; 2294 netdev->priv_flags |= IFF_UNICAST_FLT;
2270 netdev->irq = adapter->msix_entries[0].vector; 2295 netdev->irq = adapter->msix_entries[0].vector;
2271 2296
2272 err = qlcnic_set_real_num_queues(adapter, netdev); 2297 err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
2298 adapter->drv_sds_rings);
2273 if (err) 2299 if (err)
2274 return err; 2300 return err;
2275 2301
@@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
2943 tx_ring->tx_stats.xmit_called, 2969 tx_ring->tx_stats.xmit_called,
2944 tx_ring->tx_stats.xmit_on, 2970 tx_ring->tx_stats.xmit_on,
2945 tx_ring->tx_stats.xmit_off); 2971 tx_ring->tx_stats.xmit_off);
2972
2973 if (tx_ring->crb_intr_mask)
2974 netdev_info(netdev, "crb_intr_mask=%d\n",
2975 readl(tx_ring->crb_intr_mask));
2976
2946 netdev_info(netdev, 2977 netdev_info(netdev,
2947 "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", 2978 "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
2948 readl(tx_ring->crb_intr_mask),
2949 readl(tx_ring->crb_cmd_producer), 2979 readl(tx_ring->crb_cmd_producer),
2950 tx_ring->producer, tx_ring->sw_consumer, 2980 tx_ring->producer, tx_ring->sw_consumer,
2951 le32_to_cpu(*(tx_ring->hw_consumer))); 2981 le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
3978int qlcnic_setup_rings(struct qlcnic_adapter *adapter) 4008int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
3979{ 4009{
3980 struct net_device *netdev = adapter->netdev; 4010 struct net_device *netdev = adapter->netdev;
4011 u8 tx_rings, rx_rings;
3981 int err; 4012 int err;
3982 4013
3983 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 4014 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3984 return -EBUSY; 4015 return -EBUSY;
3985 4016
4017 tx_rings = adapter->drv_tss_rings;
4018 rx_rings = adapter->drv_rss_rings;
4019
3986 netif_device_detach(netdev); 4020 netif_device_detach(netdev);
4021
4022 err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
4023 if (err)
4024 goto done;
4025
3987 if (netif_running(netdev)) 4026 if (netif_running(netdev))
3988 __qlcnic_down(adapter, netdev); 4027 __qlcnic_down(adapter, netdev);
3989 4028
@@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
4003 return err; 4042 return err;
4004 } 4043 }
4005 4044
4006 netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); 4045 /* Check if we need to update real_num_{tx|rx}_queues because
4046 * qlcnic_setup_intr() may change Tx/Rx rings size
4047 */
4048 if ((tx_rings != adapter->drv_tx_rings) ||
4049 (rx_rings != adapter->drv_sds_rings)) {
4050 err = qlcnic_set_real_num_queues(adapter,
4051 adapter->drv_tx_rings,
4052 adapter->drv_sds_rings);
4053 if (err)
4054 goto done;
4055 }
4007 4056
4008 if (qlcnic_83xx_check(adapter)) { 4057 if (qlcnic_83xx_check(adapter)) {
4009 qlcnic_83xx_initialize_nic(adapter, 1); 4058 qlcnic_83xx_initialize_nic(adapter, 1);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 32d969e857f7..89b83e59e1dc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
156 efx->net_dev->rx_cpu_rmap = NULL; 156 efx->net_dev->rx_cpu_rmap = NULL;
157#endif 157#endif
158 158
159 /* Disable MSI/MSI-X interrupts */ 159 if (EFX_INT_MODE_USE_MSI(efx)) {
160 efx_for_each_channel(channel, efx) 160 /* Disable MSI/MSI-X interrupts */
161 free_irq(channel->irq, &efx->msi_context[channel->channel]); 161 efx_for_each_channel(channel, efx)
162 162 free_irq(channel->irq,
163 /* Disable legacy interrupt */ 163 &efx->msi_context[channel->channel]);
164 if (efx->legacy_irq) 164 } else {
165 /* Disable legacy interrupt */
165 free_irq(efx->legacy_irq, efx); 166 free_irq(efx->legacy_irq, efx);
167 }
166} 168}
167 169
168/* Register dump */ 170/* Register dump */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d940034acdd4..0f4841d2e8dc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
1704 if (ret) { 1704 if (ret) {
1705 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1705 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1706 __func__, ret); 1706 __func__, ret);
1707 goto phy_error; 1707 return ret;
1708 } 1708 }
1709 } 1709 }
1710 1710
@@ -1779,8 +1779,6 @@ init_error:
1779dma_desc_error: 1779dma_desc_error:
1780 if (priv->phydev) 1780 if (priv->phydev)
1781 phy_disconnect(priv->phydev); 1781 phy_disconnect(priv->phydev);
1782phy_error:
1783 clk_disable_unprepare(priv->stmmac_clk);
1784 1782
1785 return ret; 1783 return ret;
1786} 1784}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index df8d383acf48..b9ac20f42651 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
246 int i; 246 int i;
247 247
248 for (i = 0; i < N_TX_RINGS; i++) 248 for (i = 0; i < N_TX_RINGS; i++)
249 spin_lock(&cp->tx_lock[i]); 249 spin_lock_nested(&cp->tx_lock[i], i);
250} 250}
251 251
252static inline void cas_lock_all(struct cas *cp) 252static inline void cas_lock_all(struct cas *cp)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 36aa109416c4..c331b7ebc812 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1872 phyid = be32_to_cpup(parp+1); 1872 phyid = be32_to_cpup(parp+1);
1873 mdio = of_find_device_by_node(mdio_node); 1873 mdio = of_find_device_by_node(mdio_node);
1874 1874 of_node_put(mdio_node);
1875 if (strncmp(mdio->name, "gpio", 4) == 0) { 1875 if (!mdio) {
1876 /* GPIO bitbang MDIO driver attached */ 1876 pr_err("Missing mdio platform device\n");
1877 struct mii_bus *bus = dev_get_drvdata(&mdio->dev); 1877 return -EINVAL;
1878
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, bus->id, phyid);
1881 } else {
1882 /* davinci MDIO driver attached */
1883 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1884 PHY_ID_FMT, mdio->name, phyid);
1885 } 1878 }
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, mdio->name, phyid);
1886 1881
1887 mac_addr = of_get_mac_address(slave_node); 1882 mac_addr = of_get_mac_address(slave_node);
1888 if (mac_addr) 1883 if (mac_addr)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b0e2865a6810..d53e299ae1d9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
458 struct macvlan_dev *vlan = netdev_priv(dev); 458 struct macvlan_dev *vlan = netdev_priv(dev);
459 struct net_device *lowerdev = vlan->lowerdev; 459 struct net_device *lowerdev = vlan->lowerdev;
460 460
461 if (change & IFF_ALLMULTI) 461 if (dev->flags & IFF_UP) {
462 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 462 if (change & IFF_ALLMULTI)
463 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
464 }
463} 465}
464 466
465static void macvlan_set_mac_lists(struct net_device *dev) 467static void macvlan_set_mac_lists(struct net_device *dev)
@@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
515#define MACVLAN_STATE_MASK \ 517#define MACVLAN_STATE_MASK \
516 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 518 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
517 519
520static int macvlan_get_nest_level(struct net_device *dev)
521{
522 return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
523}
524
518static void macvlan_set_lockdep_class_one(struct net_device *dev, 525static void macvlan_set_lockdep_class_one(struct net_device *dev,
519 struct netdev_queue *txq, 526 struct netdev_queue *txq,
520 void *_unused) 527 void *_unused)
@@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
525 532
526static void macvlan_set_lockdep_class(struct net_device *dev) 533static void macvlan_set_lockdep_class(struct net_device *dev)
527{ 534{
528 lockdep_set_class(&dev->addr_list_lock, 535 lockdep_set_class_and_subclass(&dev->addr_list_lock,
529 &macvlan_netdev_addr_lock_key); 536 &macvlan_netdev_addr_lock_key,
537 macvlan_get_nest_level(dev));
530 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); 538 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
531} 539}
532 540
@@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
721 .ndo_fdb_add = macvlan_fdb_add, 729 .ndo_fdb_add = macvlan_fdb_add,
722 .ndo_fdb_del = macvlan_fdb_del, 730 .ndo_fdb_del = macvlan_fdb_del,
723 .ndo_fdb_dump = ndo_dflt_fdb_dump, 731 .ndo_fdb_dump = ndo_dflt_fdb_dump,
732 .ndo_get_lock_subclass = macvlan_get_nest_level,
724}; 733};
725 734
726void macvlan_common_setup(struct net_device *dev) 735void macvlan_common_setup(struct net_device *dev)
@@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
849 vlan->dev = dev; 858 vlan->dev = dev;
850 vlan->port = port; 859 vlan->port = port;
851 vlan->set_features = MACVLAN_FEATURES; 860 vlan->set_features = MACVLAN_FEATURES;
861 vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
852 862
853 vlan->mode = MACVLAN_MODE_VEPA; 863 vlan->mode = MACVLAN_MODE_VEPA;
854 if (data && data[IFLA_MACVLAN_MODE]) 864 if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 9c4defdec67b..5f1a2250018f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
215 if (pdev->dev.of_node) { 215 if (pdev->dev.of_node) {
216 pdata = mdio_gpio_of_get_data(pdev); 216 pdata = mdio_gpio_of_get_data(pdev);
217 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); 217 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
218 if (bus_id < 0) {
219 dev_warn(&pdev->dev, "failed to get alias id\n");
220 bus_id = 0;
221 }
218 } else { 222 } else {
219 pdata = dev_get_platdata(&pdev->dev); 223 pdata = dev_get_platdata(&pdev->dev);
220 bus_id = pdev->id; 224 bus_id = pdev->id;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a972056b2249..3bc079a67a3d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
715 struct delayed_work *dwork = to_delayed_work(work); 715 struct delayed_work *dwork = to_delayed_work(work);
716 struct phy_device *phydev = 716 struct phy_device *phydev =
717 container_of(dwork, struct phy_device, state_queue); 717 container_of(dwork, struct phy_device, state_queue);
718 int needs_aneg = 0, do_suspend = 0; 718 bool needs_aneg = false, do_suspend = false, do_resume = false;
719 int err = 0; 719 int err = 0;
720 720
721 mutex_lock(&phydev->lock); 721 mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
727 case PHY_PENDING: 727 case PHY_PENDING:
728 break; 728 break;
729 case PHY_UP: 729 case PHY_UP:
730 needs_aneg = 1; 730 needs_aneg = true;
731 731
732 phydev->link_timeout = PHY_AN_TIMEOUT; 732 phydev->link_timeout = PHY_AN_TIMEOUT;
733 733
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
757 phydev->adjust_link(phydev->attached_dev); 757 phydev->adjust_link(phydev->attached_dev);
758 758
759 } else if (0 == phydev->link_timeout--) 759 } else if (0 == phydev->link_timeout--)
760 needs_aneg = 1; 760 needs_aneg = true;
761 break; 761 break;
762 case PHY_NOLINK: 762 case PHY_NOLINK:
763 err = phy_read_status(phydev); 763 err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
791 netif_carrier_on(phydev->attached_dev); 791 netif_carrier_on(phydev->attached_dev);
792 } else { 792 } else {
793 if (0 == phydev->link_timeout--) 793 if (0 == phydev->link_timeout--)
794 needs_aneg = 1; 794 needs_aneg = true;
795 } 795 }
796 796
797 phydev->adjust_link(phydev->attached_dev); 797 phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
827 phydev->link = 0; 827 phydev->link = 0;
828 netif_carrier_off(phydev->attached_dev); 828 netif_carrier_off(phydev->attached_dev);
829 phydev->adjust_link(phydev->attached_dev); 829 phydev->adjust_link(phydev->attached_dev);
830 do_suspend = 1; 830 do_suspend = true;
831 } 831 }
832 break; 832 break;
833 case PHY_RESUMING: 833 case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
876 } 876 }
877 phydev->adjust_link(phydev->attached_dev); 877 phydev->adjust_link(phydev->attached_dev);
878 } 878 }
879 do_resume = true;
879 break; 880 break;
880 } 881 }
881 882
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
883 884
884 if (needs_aneg) 885 if (needs_aneg)
885 err = phy_start_aneg(phydev); 886 err = phy_start_aneg(phydev);
886 887 else if (do_suspend)
887 if (do_suspend)
888 phy_suspend(phydev); 888 phy_suspend(phydev);
889 else if (do_resume)
890 phy_resume(phydev);
889 891
890 if (err < 0) 892 if (err < 0)
891 phy_error(phydev); 893 phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0ce606624296..4987a1c6dc52 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
614 err = phy_init_hw(phydev); 614 err = phy_init_hw(phydev);
615 if (err) 615 if (err)
616 phy_detach(phydev); 616 phy_detach(phydev);
617 617 else
618 phy_resume(phydev); 618 phy_resume(phydev);
619 619
620 return err; 620 return err;
621} 621}
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c9f3281506af..2e025ddcef21 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
120 cdc_ncm_unbind(dev, intf); 120 cdc_ncm_unbind(dev, intf);
121} 121}
122 122
123/* verify that the ethernet protocol is IPv4 or IPv6 */
124static bool is_ip_proto(__be16 proto)
125{
126 switch (proto) {
127 case htons(ETH_P_IP):
128 case htons(ETH_P_IPV6):
129 return true;
130 }
131 return false;
132}
123 133
124static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) 134static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
125{ 135{
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
128 struct cdc_ncm_ctx *ctx = info->ctx; 138 struct cdc_ncm_ctx *ctx = info->ctx;
129 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); 139 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
130 u16 tci = 0; 140 u16 tci = 0;
141 bool is_ip;
131 u8 *c; 142 u8 *c;
132 143
133 if (!ctx) 144 if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
137 if (skb->len <= ETH_HLEN) 148 if (skb->len <= ETH_HLEN)
138 goto error; 149 goto error;
139 150
151 /* Some applications using e.g. packet sockets will
152 * bypass the VLAN acceleration and create tagged
153 * ethernet frames directly. We primarily look for
154 * the accelerated out-of-band tag, but fall back if
155 * required
156 */
157 skb_reset_mac_header(skb);
158 if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
159 __vlan_get_tag(skb, &tci) == 0) {
160 is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
161 skb_pull(skb, VLAN_ETH_HLEN);
162 } else {
163 is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
164 skb_pull(skb, ETH_HLEN);
165 }
166
140 /* mapping VLANs to MBIM sessions: 167 /* mapping VLANs to MBIM sessions:
141 * no tag => IPS session <0> 168 * no tag => IPS session <0>
142 * 1 - 255 => IPS session <vlanid> 169 * 1 - 255 => IPS session <vlanid>
143 * 256 - 511 => DSS session <vlanid - 256> 170 * 256 - 511 => DSS session <vlanid - 256>
144 * 512 - 4095 => unsupported, drop 171 * 512 - 4095 => unsupported, drop
145 */ 172 */
146 vlan_get_tag(skb, &tci);
147
148 switch (tci & 0x0f00) { 173 switch (tci & 0x0f00) {
149 case 0x0000: /* VLAN ID 0 - 255 */ 174 case 0x0000: /* VLAN ID 0 - 255 */
150 /* verify that datagram is IPv4 or IPv6 */ 175 if (!is_ip)
151 skb_reset_mac_header(skb);
152 switch (eth_hdr(skb)->h_proto) {
153 case htons(ETH_P_IP):
154 case htons(ETH_P_IPV6):
155 break;
156 default:
157 goto error; 176 goto error;
158 }
159 c = (u8 *)&sign; 177 c = (u8 *)&sign;
160 c[3] = tci; 178 c[3] = tci;
161 break; 179 break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
169 "unsupported tci=0x%04x\n", tci); 187 "unsupported tci=0x%04x\n", tci);
170 goto error; 188 goto error;
171 } 189 }
172 skb_pull(skb, ETH_HLEN);
173 } 190 }
174 191
175 spin_lock_bh(&ctx->mtx); 192 spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
204 return; 221 return;
205 222
206 /* need to send the NA on the VLAN dev, if any */ 223 /* need to send the NA on the VLAN dev, if any */
207 if (tci) 224 rcu_read_lock();
225 if (tci) {
208 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), 226 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
209 tci); 227 tci);
210 else 228 if (!netdev) {
229 rcu_read_unlock();
230 return;
231 }
232 } else {
211 netdev = dev->net; 233 netdev = dev->net;
212 if (!netdev) 234 }
213 return; 235 dev_hold(netdev);
236 rcu_read_unlock();
214 237
215 in6_dev = in6_dev_get(netdev); 238 in6_dev = in6_dev_get(netdev);
216 if (!in6_dev) 239 if (!in6_dev)
217 return; 240 goto out;
218 is_router = !!in6_dev->cnf.forwarding; 241 is_router = !!in6_dev->cnf.forwarding;
219 in6_dev_put(in6_dev); 242 in6_dev_put(in6_dev);
220 243
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
224 true /* solicited */, 247 true /* solicited */,
225 false /* override */, 248 false /* override */,
226 true /* inc_opt */); 249 true /* inc_opt */);
250out:
251 dev_put(netdev);
227} 252}
228 253
229static bool is_neigh_solicit(u8 *buf, size_t len) 254static bool is_neigh_solicit(u8 *buf, size_t len)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f46cd0250e48..5627917c5ff7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
95 95
96 if ((vif->type == NL80211_IFTYPE_AP || 96 if ((vif->type == NL80211_IFTYPE_AP ||
97 vif->type == NL80211_IFTYPE_MESH_POINT) && 97 vif->type == NL80211_IFTYPE_MESH_POINT) &&
98 bss_conf->enable_beacon) 98 bss_conf->enable_beacon) {
99 priv->reconfig_beacon = true; 99 priv->reconfig_beacon = true;
100 priv->rearm_ani = true;
101 }
100 102
101 if (bss_conf->assoc) { 103 if (bss_conf->assoc) {
102 priv->rearm_ani = true; 104 priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
257 259
258 ath9k_htc_ps_wakeup(priv); 260 ath9k_htc_ps_wakeup(priv);
259 261
262 ath9k_htc_stop_ani(priv);
260 del_timer_sync(&priv->tx.cleanup_timer); 263 del_timer_sync(&priv->tx.cleanup_timer);
261 ath9k_htc_tx_drain(priv); 264 ath9k_htc_tx_drain(priv);
262 265
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index afb3d15e38ff..be1985296bdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
4948 if (!err) { 4948 if (!err) {
4949 /* only set 2G bandwidth using bw_cap command */ 4949 /* only set 2G bandwidth using bw_cap command */
4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G); 4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT); 4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, 4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
4953 sizeof(band_bwcap)); 4953 sizeof(band_bwcap));
4954 } else { 4954 } else {
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index fa858d548d13..0489314425cb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); 611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
612 612
613 if (IWL_MVM_BT_COEX_CORUNNING) { 613 if (IWL_MVM_BT_COEX_CORUNNING) {
614 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 | 614 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
615 BT_VALID_CORUN_LUT_40); 615 BT_VALID_CORUN_LUT_40);
616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); 616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
617 } 617 }
618 618
619 if (IWL_MVM_BT_COEX_MPLUT) { 619 if (IWL_MVM_BT_COEX_MPLUT) {
620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); 620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
621 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); 621 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
622 } 622 }
623 623
624 if (mvm->cfg->bt_shared_single_ant) 624 if (mvm->cfg->bt_shared_single_ant)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 9426905de6b2..d73a89ecd78a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -183,9 +183,9 @@ enum iwl_scan_type {
183 * this number of packets were received (typically 1) 183 * this number of packets were received (typically 1)
184 * @passive2active: is auto switching from passive to active during scan allowed 184 * @passive2active: is auto switching from passive to active during scan allowed
185 * @rxchain_sel_flags: RXON_RX_CHAIN_* 185 * @rxchain_sel_flags: RXON_RX_CHAIN_*
186 * @max_out_time: in usecs, max out of serving channel time 186 * @max_out_time: in TUs, max out of serving channel time
187 * @suspend_time: how long to pause scan when returning to service channel: 187 * @suspend_time: how long to pause scan when returning to service channel:
188 * bits 0-19: beacon interal in usecs (suspend before executing) 188 * bits 0-19: beacon interal in TUs (suspend before executing)
189 * bits 20-23: reserved 189 * bits 20-23: reserved
190 * bits 24-31: number of beacons (suspend between channels) 190 * bits 24-31: number of beacons (suspend between channels)
191 * @rxon_flags: RXON_FLG_* 191 * @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
383 * @quiet_plcp_th: quiet channel num of packets threshold 383 * @quiet_plcp_th: quiet channel num of packets threshold
384 * @good_CRC_th: passive to active promotion threshold 384 * @good_CRC_th: passive to active promotion threshold
385 * @rx_chain: RXON rx chain. 385 * @rx_chain: RXON rx chain.
386 * @max_out_time: max uSec to be out of assoceated channel 386 * @max_out_time: max TUs to be out of assoceated channel
387 * @suspend_time: pause scan this long when returning to service channel 387 * @suspend_time: pause scan this TUs when returning to service channel
388 * @flags: RXON flags 388 * @flags: RXON flags
389 * @filter_flags: RXONfilter 389 * @filter_flags: RXONfilter
390 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. 390 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index f0cebf12c7b8..b41dc84e9431 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1007 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1007 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1008 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1008 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1009 1009
1010 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd); 1010 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1011 if (ret) 1011 if (ret)
1012 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1012 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1013} 1013}
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1023 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1023 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1024 return; 1024 return;
1025 1025
1026 ieee80211_iterate_active_interfaces( 1026 ieee80211_iterate_active_interfaces_atomic(
1027 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1027 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1028 iwl_mvm_mc_iface_iterator, &iter_data); 1028 iwl_mvm_mc_iface_iterator, &iter_data);
1029} 1029}
@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
1807 1807
1808 mutex_lock(&mvm->mutex); 1808 mutex_lock(&mvm->mutex);
1809 1809
1810 if (!iwl_mvm_is_idle(mvm)) {
1811 ret = -EBUSY;
1812 goto out;
1813 }
1814
1810 switch (mvm->scan_status) { 1815 switch (mvm->scan_status) {
1811 case IWL_MVM_SCAN_OS: 1816 case IWL_MVM_SCAN_OS:
1812 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); 1817 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d564233a65da..f1ec0986c3c9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
1003 return mvmvif->low_latency; 1003 return mvmvif->low_latency;
1004} 1004}
1005 1005
1006/* Assoc status */
1007bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
1008
1006/* Thermal management and CT-kill */ 1009/* Thermal management and CT-kill */
1007void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); 1010void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
1008void iwl_mvm_tt_handler(struct iwl_mvm *mvm); 1011void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 9f52c5b3f0ec..e1c838899363 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1010 return; 1010 return;
1011 } 1011 }
1012 1012
1013#ifdef CPTCFG_MAC80211_DEBUGFS 1013#ifdef CONFIG_MAC80211_DEBUGFS
1014 /* Disable last tx check if we are debugging with fixed rate */ 1014 /* Disable last tx check if we are debugging with fixed rate */
1015 if (lq_sta->dbg_fixed_rate) { 1015 if (lq_sta->dbg_fixed_rate) {
1016 IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); 1016 IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c91dc8498852..c28de54c75d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
277 IEEE80211_IFACE_ITER_NORMAL, 277 IEEE80211_IFACE_ITER_NORMAL,
278 iwl_mvm_scan_condition_iterator, 278 iwl_mvm_scan_condition_iterator,
279 &global_bound); 279 &global_bound);
280 /*
281 * Under low latency traffic passive scan is fragmented meaning
282 * that dwell on a particular channel will be fragmented. Each fragment
283 * dwell time is 20ms and fragments period is 105ms. Skipping to next
284 * channel will be delayed by the same period - 105ms. So suspend_time
285 * parameter describing both fragments and channels skipping periods is
286 * set to 105ms. This value is chosen so that overall passive scan
287 * duration will not be too long. Max_out_time in this case is set to
288 * 70ms, so for active scanning operating channel will be left for 70ms
289 * while for passive still for 20ms (fragment dwell).
290 */
291 if (global_bound) {
292 if (!iwl_mvm_low_latency(mvm)) {
293 params->suspend_time = ieee80211_tu_to_usec(100);
294 params->max_out_time = ieee80211_tu_to_usec(600);
295 } else {
296 params->suspend_time = ieee80211_tu_to_usec(105);
297 /* P2P doesn't support fragmented passive scan, so
298 * configure max_out_time to be at least longest dwell
299 * time for passive scan.
300 */
301 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
302 params->max_out_time = ieee80211_tu_to_usec(70);
303 params->passive_fragmented = true;
304 } else {
305 u32 passive_dwell;
306 280
307 /* 281 if (!global_bound)
308 * Use band G so that passive channel dwell time 282 goto not_bound;
309 * will be assigned with maximum value. 283
310 */ 284 params->suspend_time = 100;
311 band = IEEE80211_BAND_2GHZ; 285 params->max_out_time = 600;
312 passive_dwell = iwl_mvm_get_passive_dwell(band); 286
313 params->max_out_time = 287 if (iwl_mvm_low_latency(mvm)) {
314 ieee80211_tu_to_usec(passive_dwell); 288 params->suspend_time = 250;
315 } 289 params->max_out_time = 250;
316 }
317 } 290 }
318 291
292not_bound:
293
319 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 294 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
320 if (params->passive_fragmented) 295 params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
321 params->dwell[band].passive = 20;
322 else
323 params->dwell[band].passive =
324 iwl_mvm_get_passive_dwell(band);
325 params->dwell[band].active = iwl_mvm_get_active_dwell(band, 296 params->dwell[band].active = iwl_mvm_get_active_dwell(band,
326 n_ssids); 297 n_ssids);
327 } 298 }
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
761 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; 732 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
762 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 733 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
763 int head = 0; 734 int head = 0;
764 int tail = band_2ghz + band_5ghz; 735 int tail = band_2ghz + band_5ghz - 1;
765 u32 ssid_bitmap; 736 u32 ssid_bitmap;
766 int cmd_len; 737 int cmd_len;
767 int ret; 738 int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index d619851745a1..2180902266ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
644 644
645 return result; 645 return result;
646} 646}
647
648static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
649{
650 bool *idle = _data;
651
652 if (!vif->bss_conf.idle)
653 *idle = false;
654}
655
656bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
657{
658 bool idle = true;
659
660 ieee80211_iterate_active_interfaces_atomic(
661 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
662 iwl_mvm_idle_iter, &idle);
663
664 return idle;
665}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dcfd6d866d09..2365553f1ef7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1749 * PCI Tx retries from interfering with C3 CPU state */ 1749 * PCI Tx retries from interfering with C3 CPU state */
1750 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 1750 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1751 1751
1752 trans->dev = &pdev->dev;
1753 trans_pcie->pci_dev = pdev;
1754 iwl_disable_interrupts(trans);
1755
1752 err = pci_enable_msi(pdev); 1756 err = pci_enable_msi(pdev);
1753 if (err) { 1757 if (err) {
1754 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 1758 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1760 } 1764 }
1761 } 1765 }
1762 1766
1763 trans->dev = &pdev->dev;
1764 trans_pcie->pci_dev = pdev;
1765 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 1767 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
1766 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 1768 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
1767 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 1769 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1787 goto out_pci_disable_msi; 1789 goto out_pci_disable_msi;
1788 } 1790 }
1789 1791
1790 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1791
1792 if (iwl_pcie_alloc_ict(trans)) 1792 if (iwl_pcie_alloc_ict(trans))
1793 goto out_free_cmd_pool; 1793 goto out_free_cmd_pool;
1794 1794
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1800 goto out_free_ict; 1800 goto out_free_ict;
1801 } 1801 }
1802 1802
1803 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1804
1803 return trans; 1805 return trans;
1804 1806
1805out_free_ict: 1807out_free_ict:
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 630a3fcf65bc..0d4a285cbd7e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
226 grant_ref_t rx_ring_ref); 226 grant_ref_t rx_ring_ref);
227 227
228/* Check for SKBs from frontend and schedule backend processing */ 228/* Check for SKBs from frontend and schedule backend processing */
229void xenvif_check_rx_xenvif(struct xenvif *vif); 229void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
230 230
231/* Prevent the device from generating any further traffic. */ 231/* Prevent the device from generating any further traffic. */
232void xenvif_carrier_off(struct xenvif *vif); 232void xenvif_carrier_off(struct xenvif *vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ef05c5c49d41..20e9defa1060 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
75 work_done = xenvif_tx_action(vif, budget); 75 work_done = xenvif_tx_action(vif, budget);
76 76
77 if (work_done < budget) { 77 if (work_done < budget) {
78 int more_to_do = 0; 78 napi_complete(napi);
79 unsigned long flags; 79 xenvif_napi_schedule_or_enable_events(vif);
80
81 /* It is necessary to disable IRQ before calling
82 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
83 * lose event from the frontend.
84 *
85 * Consider:
86 * RING_HAS_UNCONSUMED_REQUESTS
87 * <frontend generates event to trigger napi_schedule>
88 * __napi_complete
89 *
90 * This handler is still in scheduled state so the
91 * event has no effect at all. After __napi_complete
92 * this handler is descheduled and cannot get
93 * scheduled again. We lose event in this case and the ring
94 * will be completely stalled.
95 */
96
97 local_irq_save(flags);
98
99 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
100 if (!more_to_do)
101 __napi_complete(napi);
102
103 local_irq_restore(flags);
104 } 80 }
105 81
106 return work_done; 82 return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
194 enable_irq(vif->tx_irq); 170 enable_irq(vif->tx_irq);
195 if (vif->tx_irq != vif->rx_irq) 171 if (vif->tx_irq != vif->rx_irq)
196 enable_irq(vif->rx_irq); 172 enable_irq(vif->rx_irq);
197 xenvif_check_rx_xenvif(vif); 173 xenvif_napi_schedule_or_enable_events(vif);
198} 174}
199 175
200static void xenvif_down(struct xenvif *vif) 176static void xenvif_down(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 76665405c5aa..7367208ee8cd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
104 104
105/* Find the containing VIF's structure from a pointer in pending_tx_info array 105/* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */ 106 */
107static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) 107static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
108{ 108{
109 u16 pending_idx = ubuf->desc; 109 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp = 110 struct pending_tx_info *temp =
@@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
323} 323}
324 324
325/* 325/*
326 * Find the grant ref for a given frag in a chain of struct ubuf_info's
327 * skb: the skb itself
328 * i: the frag's number
329 * ubuf: a pointer to an element in the chain. It should not be NULL
330 *
331 * Returns a pointer to the element in the chain where the page were found. If
332 * not found, returns NULL.
333 * See the definition of callback_struct in common.h for more details about
334 * the chain.
335 */
336static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
337 const int i,
338 const struct ubuf_info *ubuf)
339{
340 struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
341
342 do {
343 u16 pending_idx = ubuf->desc;
344
345 if (skb_shinfo(skb)->frags[i].page.p ==
346 foreign_vif->mmap_pages[pending_idx])
347 break;
348 ubuf = (struct ubuf_info *) ubuf->ctx;
349 } while (ubuf);
350
351 return ubuf;
352}
353
354/*
326 * Prepare an SKB to be transmitted to the frontend. 355 * Prepare an SKB to be transmitted to the frontend.
327 * 356 *
328 * This function is responsible for allocating grant operations, meta 357 * This function is responsible for allocating grant operations, meta
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
346 int head = 1; 375 int head = 1;
347 int old_meta_prod; 376 int old_meta_prod;
348 int gso_type; 377 int gso_type;
349 struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; 378 const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
350 grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; 379 const struct ubuf_info *const head_ubuf = ubuf;
351 struct xenvif *foreign_vif = NULL;
352 380
353 old_meta_prod = npo->meta_prod; 381 old_meta_prod = npo->meta_prod;
354 382
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
386 npo->copy_off = 0; 414 npo->copy_off = 0;
387 npo->copy_gref = req->gref; 415 npo->copy_gref = req->gref;
388 416
389 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
390 (ubuf->callback == &xenvif_zerocopy_callback)) {
391 int i = 0;
392 foreign_vif = ubuf_to_vif(ubuf);
393
394 do {
395 u16 pending_idx = ubuf->desc;
396 foreign_grefs[i++] =
397 foreign_vif->pending_tx_info[pending_idx].req.gref;
398 ubuf = (struct ubuf_info *) ubuf->ctx;
399 } while (ubuf);
400 }
401
402 data = skb->data; 417 data = skb->data;
403 while (data < skb_tail_pointer(skb)) { 418 while (data < skb_tail_pointer(skb)) {
404 unsigned int offset = offset_in_page(data); 419 unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
415 } 430 }
416 431
417 for (i = 0; i < nr_frags; i++) { 432 for (i = 0; i < nr_frags; i++) {
433 /* This variable also signals whether foreign_gref has a real
434 * value or not.
435 */
436 struct xenvif *foreign_vif = NULL;
437 grant_ref_t foreign_gref;
438
439 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
440 (ubuf->callback == &xenvif_zerocopy_callback)) {
441 const struct ubuf_info *const startpoint = ubuf;
442
443 /* Ideally ubuf points to the chain element which
444 * belongs to this frag. Or if frags were removed from
445 * the beginning, then shortly before it.
446 */
447 ubuf = xenvif_find_gref(skb, i, ubuf);
448
449 /* Try again from the beginning of the list, if we
450 * haven't tried from there. This only makes sense in
451 * the unlikely event of reordering the original frags.
452 * For injected local pages it's an unnecessary second
453 * run.
454 */
455 if (unlikely(!ubuf) && startpoint != head_ubuf)
456 ubuf = xenvif_find_gref(skb, i, head_ubuf);
457
458 if (likely(ubuf)) {
459 u16 pending_idx = ubuf->desc;
460
461 foreign_vif = ubuf_to_vif(ubuf);
462 foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
463 /* Just a safety measure. If this was the last
464 * element on the list, the for loop will
465 * iterate again if a local page were added to
466 * the end. Using head_ubuf here prevents the
467 * second search on the chain. Or the original
468 * frags changed order, but that's less likely.
469 * In any way, ubuf shouldn't be NULL.
470 */
471 ubuf = ubuf->ctx ?
472 (struct ubuf_info *) ubuf->ctx :
473 head_ubuf;
474 } else
475 /* This frag was a local page, added to the
476 * array after the skb left netback.
477 */
478 ubuf = head_ubuf;
479 }
418 xenvif_gop_frag_copy(vif, skb, npo, 480 xenvif_gop_frag_copy(vif, skb, npo,
419 skb_frag_page(&skb_shinfo(skb)->frags[i]), 481 skb_frag_page(&skb_shinfo(skb)->frags[i]),
420 skb_frag_size(&skb_shinfo(skb)->frags[i]), 482 skb_frag_size(&skb_shinfo(skb)->frags[i]),
421 skb_shinfo(skb)->frags[i].page_offset, 483 skb_shinfo(skb)->frags[i].page_offset,
422 &head, 484 &head,
423 foreign_vif, 485 foreign_vif,
424 foreign_grefs[i]); 486 foreign_vif ? foreign_gref : UINT_MAX);
425 } 487 }
426 488
427 return npo->meta_prod - old_meta_prod; 489 return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
654 notify_remote_via_irq(vif->rx_irq); 716 notify_remote_via_irq(vif->rx_irq);
655} 717}
656 718
657void xenvif_check_rx_xenvif(struct xenvif *vif) 719void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
658{ 720{
659 int more_to_do; 721 int more_to_do;
660 722
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
688{ 750{
689 struct xenvif *vif = (struct xenvif *)data; 751 struct xenvif *vif = (struct xenvif *)data;
690 tx_add_credit(vif); 752 tx_add_credit(vif);
691 xenvif_check_rx_xenvif(vif); 753 xenvif_napi_schedule_or_enable_events(vif);
692} 754}
693 755
694static void xenvif_tx_err(struct xenvif *vif, 756static void xenvif_tx_err(struct xenvif *vif,
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 6963bdf54175..6aea373547f6 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -6,6 +6,7 @@ menu "PTP clock support"
6 6
7config PTP_1588_CLOCK 7config PTP_1588_CLOCK
8 tristate "PTP clock support" 8 tristate "PTP clock support"
9 depends on NET
9 select PPS 10 select PPS
10 select NET_PTP_CLASSIFY 11 select NET_PTP_CLASSIFY
11 help 12 help
@@ -74,7 +75,7 @@ config DP83640_PHY
74config PTP_1588_CLOCK_PCH 75config PTP_1588_CLOCK_PCH
75 tristate "Intel PCH EG20T as PTP clock" 76 tristate "Intel PCH EG20T as PTP clock"
76 depends on X86 || COMPILE_TEST 77 depends on X86 || COMPILE_TEST
77 depends on HAS_IOMEM 78 depends on HAS_IOMEM && NET
78 select PTP_1588_CLOCK 79 select PTP_1588_CLOCK
79 help 80 help
80 This driver adds support for using the PCH EG20T as a PTP 81 This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 1b681427dde0..c341f855fadc 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
1621 list_del(&rphy->list); 1621 list_del(&rphy->list);
1622 mutex_unlock(&sas_host->lock); 1622 mutex_unlock(&sas_host->lock);
1623 1623
1624 sas_bsg_remove(shost, rphy);
1625
1626 transport_destroy_device(dev); 1624 transport_destroy_device(dev);
1627 1625
1628 put_device(dev); 1626 put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
1681 } 1679 }
1682 1680
1683 sas_rphy_unlink(rphy); 1681 sas_rphy_unlink(rphy);
1682 sas_bsg_remove(NULL, rphy);
1684 transport_remove_device(dev); 1683 transport_remove_device(dev);
1685 device_del(dev); 1684 device_del(dev);
1686} 1685}
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 1c8c6cc6de30..4b0eff6da674 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -130,6 +130,15 @@ static void afs_cm_destructor(struct afs_call *call)
130{ 130{
131 _enter(""); 131 _enter("");
132 132
133 /* Break the callbacks here so that we do it after the final ACK is
134 * received. The step number here must match the final number in
135 * afs_deliver_cb_callback().
136 */
137 if (call->unmarshall == 6) {
138 ASSERT(call->server && call->count && call->request);
139 afs_break_callbacks(call->server, call->count, call->request);
140 }
141
133 afs_put_server(call->server); 142 afs_put_server(call->server);
134 call->server = NULL; 143 call->server = NULL;
135 kfree(call->buffer); 144 kfree(call->buffer);
@@ -272,6 +281,16 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
272 _debug("trailer"); 281 _debug("trailer");
273 if (skb->len != 0) 282 if (skb->len != 0)
274 return -EBADMSG; 283 return -EBADMSG;
284
285 /* Record that the message was unmarshalled successfully so
286 * that the call destructor can know do the callback breaking
287 * work, even if the final ACK isn't received.
288 *
289 * If the step number changes, then afs_cm_destructor() must be
290 * updated also.
291 */
292 call->unmarshall++;
293 case 6:
275 break; 294 break;
276 } 295 }
277 296
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index be75b500005d..590b55f46d61 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -75,7 +75,7 @@ struct afs_call {
75 const struct afs_call_type *type; /* type of call */ 75 const struct afs_call_type *type; /* type of call */
76 const struct afs_wait_mode *wait_mode; /* completion wait mode */ 76 const struct afs_wait_mode *wait_mode; /* completion wait mode */
77 wait_queue_head_t waitq; /* processes awaiting completion */ 77 wait_queue_head_t waitq; /* processes awaiting completion */
78 work_func_t async_workfn; 78 void (*async_workfn)(struct afs_call *call); /* asynchronous work function */
79 struct work_struct async_work; /* asynchronous work processor */ 79 struct work_struct async_work; /* asynchronous work processor */
80 struct work_struct work; /* actual work processor */ 80 struct work_struct work; /* actual work processor */
81 struct sk_buff_head rx_queue; /* received packets */ 81 struct sk_buff_head rx_queue; /* received packets */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index ef943df73b8c..03a3beb17004 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -25,7 +25,7 @@ static void afs_wake_up_call_waiter(struct afs_call *);
25static int afs_wait_for_call_to_complete(struct afs_call *); 25static int afs_wait_for_call_to_complete(struct afs_call *);
26static void afs_wake_up_async_call(struct afs_call *); 26static void afs_wake_up_async_call(struct afs_call *);
27static int afs_dont_wait_for_call_to_complete(struct afs_call *); 27static int afs_dont_wait_for_call_to_complete(struct afs_call *);
28static void afs_process_async_call(struct work_struct *); 28static void afs_process_async_call(struct afs_call *);
29static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *); 29static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
30static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool); 30static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
31 31
@@ -58,6 +58,13 @@ static void afs_collect_incoming_call(struct work_struct *);
58static struct sk_buff_head afs_incoming_calls; 58static struct sk_buff_head afs_incoming_calls;
59static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call); 59static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
60 60
61static void afs_async_workfn(struct work_struct *work)
62{
63 struct afs_call *call = container_of(work, struct afs_call, async_work);
64
65 call->async_workfn(call);
66}
67
61/* 68/*
62 * open an RxRPC socket and bind it to be a server for callback notifications 69 * open an RxRPC socket and bind it to be a server for callback notifications
63 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT 70 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
@@ -184,6 +191,28 @@ static void afs_free_call(struct afs_call *call)
184} 191}
185 192
186/* 193/*
194 * End a call but do not free it
195 */
196static void afs_end_call_nofree(struct afs_call *call)
197{
198 if (call->rxcall) {
199 rxrpc_kernel_end_call(call->rxcall);
200 call->rxcall = NULL;
201 }
202 if (call->type->destructor)
203 call->type->destructor(call);
204}
205
206/*
207 * End a call and free it
208 */
209static void afs_end_call(struct afs_call *call)
210{
211 afs_end_call_nofree(call);
212 afs_free_call(call);
213}
214
215/*
187 * allocate a call with flat request and reply buffers 216 * allocate a call with flat request and reply buffers
188 */ 217 */
189struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, 218struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
@@ -326,7 +355,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
326 atomic_read(&afs_outstanding_calls)); 355 atomic_read(&afs_outstanding_calls));
327 356
328 call->wait_mode = wait_mode; 357 call->wait_mode = wait_mode;
329 INIT_WORK(&call->async_work, afs_process_async_call); 358 call->async_workfn = afs_process_async_call;
359 INIT_WORK(&call->async_work, afs_async_workfn);
330 360
331 memset(&srx, 0, sizeof(srx)); 361 memset(&srx, 0, sizeof(srx));
332 srx.srx_family = AF_RXRPC; 362 srx.srx_family = AF_RXRPC;
@@ -383,11 +413,8 @@ error_do_abort:
383 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); 413 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
384 while ((skb = skb_dequeue(&call->rx_queue))) 414 while ((skb = skb_dequeue(&call->rx_queue)))
385 afs_free_skb(skb); 415 afs_free_skb(skb);
386 rxrpc_kernel_end_call(rxcall);
387 call->rxcall = NULL;
388error_kill_call: 416error_kill_call:
389 call->type->destructor(call); 417 afs_end_call(call);
390 afs_free_call(call);
391 _leave(" = %d", ret); 418 _leave(" = %d", ret);
392 return ret; 419 return ret;
393} 420}
@@ -509,12 +536,8 @@ static void afs_deliver_to_call(struct afs_call *call)
509 if (call->state >= AFS_CALL_COMPLETE) { 536 if (call->state >= AFS_CALL_COMPLETE) {
510 while ((skb = skb_dequeue(&call->rx_queue))) 537 while ((skb = skb_dequeue(&call->rx_queue)))
511 afs_free_skb(skb); 538 afs_free_skb(skb);
512 if (call->incoming) { 539 if (call->incoming)
513 rxrpc_kernel_end_call(call->rxcall); 540 afs_end_call(call);
514 call->rxcall = NULL;
515 call->type->destructor(call);
516 afs_free_call(call);
517 }
518 } 541 }
519 542
520 _leave(""); 543 _leave("");
@@ -564,10 +587,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
564 } 587 }
565 588
566 _debug("call complete"); 589 _debug("call complete");
567 rxrpc_kernel_end_call(call->rxcall); 590 afs_end_call(call);
568 call->rxcall = NULL;
569 call->type->destructor(call);
570 afs_free_call(call);
571 _leave(" = %d", ret); 591 _leave(" = %d", ret);
572 return ret; 592 return ret;
573} 593}
@@ -603,11 +623,8 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
603/* 623/*
604 * delete an asynchronous call 624 * delete an asynchronous call
605 */ 625 */
606static void afs_delete_async_call(struct work_struct *work) 626static void afs_delete_async_call(struct afs_call *call)
607{ 627{
608 struct afs_call *call =
609 container_of(work, struct afs_call, async_work);
610
611 _enter(""); 628 _enter("");
612 629
613 afs_free_call(call); 630 afs_free_call(call);
@@ -620,11 +637,8 @@ static void afs_delete_async_call(struct work_struct *work)
620 * - on a multiple-thread workqueue this work item may try to run on several 637 * - on a multiple-thread workqueue this work item may try to run on several
621 * CPUs at the same time 638 * CPUs at the same time
622 */ 639 */
623static void afs_process_async_call(struct work_struct *work) 640static void afs_process_async_call(struct afs_call *call)
624{ 641{
625 struct afs_call *call =
626 container_of(work, struct afs_call, async_work);
627
628 _enter(""); 642 _enter("");
629 643
630 if (!skb_queue_empty(&call->rx_queue)) 644 if (!skb_queue_empty(&call->rx_queue))
@@ -637,10 +651,7 @@ static void afs_process_async_call(struct work_struct *work)
637 call->reply = NULL; 651 call->reply = NULL;
638 652
639 /* kill the call */ 653 /* kill the call */
640 rxrpc_kernel_end_call(call->rxcall); 654 afs_end_call_nofree(call);
641 call->rxcall = NULL;
642 if (call->type->destructor)
643 call->type->destructor(call);
644 655
645 /* we can't just delete the call because the work item may be 656 /* we can't just delete the call because the work item may be
646 * queued */ 657 * queued */
@@ -663,13 +674,6 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
663 call->reply_size += len; 674 call->reply_size += len;
664} 675}
665 676
666static void afs_async_workfn(struct work_struct *work)
667{
668 struct afs_call *call = container_of(work, struct afs_call, async_work);
669
670 call->async_workfn(work);
671}
672
673/* 677/*
674 * accept the backlog of incoming calls 678 * accept the backlog of incoming calls
675 */ 679 */
@@ -790,10 +794,7 @@ void afs_send_empty_reply(struct afs_call *call)
790 _debug("oom"); 794 _debug("oom");
791 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); 795 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
792 default: 796 default:
793 rxrpc_kernel_end_call(call->rxcall); 797 afs_end_call(call);
794 call->rxcall = NULL;
795 call->type->destructor(call);
796 afs_free_call(call);
797 _leave(" [error]"); 798 _leave(" [error]");
798 return; 799 return;
799 } 800 }
@@ -823,17 +824,16 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
823 call->state = AFS_CALL_AWAIT_ACK; 824 call->state = AFS_CALL_AWAIT_ACK;
824 n = rxrpc_kernel_send_data(call->rxcall, &msg, len); 825 n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
825 if (n >= 0) { 826 if (n >= 0) {
827 /* Success */
826 _leave(" [replied]"); 828 _leave(" [replied]");
827 return; 829 return;
828 } 830 }
831
829 if (n == -ENOMEM) { 832 if (n == -ENOMEM) {
830 _debug("oom"); 833 _debug("oom");
831 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); 834 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
832 } 835 }
833 rxrpc_kernel_end_call(call->rxcall); 836 afs_end_call(call);
834 call->rxcall = NULL;
835 call->type->destructor(call);
836 afs_free_call(call);
837 _leave(" [error]"); 837 _leave(" [error]");
838} 838}
839 839
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index b6f46013dddf..f66c66b9f182 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -590,7 +590,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
590 add_to_mask(state, &state->groups->aces[i].perms); 590 add_to_mask(state, &state->groups->aces[i].perms);
591 } 591 }
592 592
593 if (!state->users->n && !state->groups->n) { 593 if (state->users->n || state->groups->n) {
594 pace++; 594 pace++;
595 pace->e_tag = ACL_MASK; 595 pace->e_tag = ACL_MASK;
596 low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags); 596 low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 32b699bebb9c..9a77a5a21557 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3717,9 +3717,16 @@ out:
3717static __be32 3717static __be32
3718nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) 3718nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3719{ 3719{
3720 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner))) 3720 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
3721
3722 if (check_for_locks(stp->st_file, lo))
3721 return nfserr_locks_held; 3723 return nfserr_locks_held;
3722 release_lock_stateid(stp); 3724 /*
3725 * Currently there's a 1-1 lock stateid<->lockowner
3726 * correspondance, and we have to delete the lockowner when we
3727 * delete the lock stateid:
3728 */
3729 unhash_lockowner(lo);
3723 return nfs_ok; 3730 return nfs_ok;
3724} 3731}
3725 3732
@@ -4159,6 +4166,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
4159 4166
4160 if (!same_owner_str(&lo->lo_owner, owner, clid)) 4167 if (!same_owner_str(&lo->lo_owner, owner, clid))
4161 return false; 4168 return false;
4169 if (list_empty(&lo->lo_owner.so_stateids)) {
4170 WARN_ON_ONCE(1);
4171 return false;
4172 }
4162 lst = list_first_entry(&lo->lo_owner.so_stateids, 4173 lst = list_first_entry(&lo->lo_owner.so_stateids,
4163 struct nfs4_ol_stateid, st_perstateowner); 4174 struct nfs4_ol_stateid, st_perstateowner);
4164 return lst->st_file->fi_inode == inode; 4175 return lst->st_file->fi_inode == inode;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index af3f7aa73e13..ee1f88419cb0 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -472,11 +472,15 @@ bail:
472 472
473void dlm_destroy_master_caches(void) 473void dlm_destroy_master_caches(void)
474{ 474{
475 if (dlm_lockname_cache) 475 if (dlm_lockname_cache) {
476 kmem_cache_destroy(dlm_lockname_cache); 476 kmem_cache_destroy(dlm_lockname_cache);
477 dlm_lockname_cache = NULL;
478 }
477 479
478 if (dlm_lockres_cache) 480 if (dlm_lockres_cache) {
479 kmem_cache_destroy(dlm_lockres_cache); 481 kmem_cache_destroy(dlm_lockres_cache);
482 dlm_lockres_cache = NULL;
483 }
480} 484}
481 485
482static void dlm_lockres_release(struct kref *kref) 486static void dlm_lockres_release(struct kref *kref)
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 84a2e29a2314..d2006ca31dba 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -406,6 +406,8 @@ extern struct kobject *acpi_kobj;
406extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int); 406extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
407void acpi_bus_private_data_handler(acpi_handle, void *); 407void acpi_bus_private_data_handler(acpi_handle, void *);
408int acpi_bus_get_private_data(acpi_handle, void **); 408int acpi_bus_get_private_data(acpi_handle, void **);
409int acpi_bus_attach_private_data(acpi_handle, void *);
410void acpi_bus_detach_private_data(acpi_handle);
409void acpi_bus_no_hotplug(acpi_handle handle); 411void acpi_bus_no_hotplug(acpi_handle handle);
410extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32); 412extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
411extern int register_acpi_notifier(struct notifier_block *); 413extern int register_acpi_notifier(struct notifier_block *);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8300fb87b84a..72cb0ddb9678 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -429,6 +429,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
429typedef void (*dma_async_tx_callback)(void *dma_async_param); 429typedef void (*dma_async_tx_callback)(void *dma_async_param);
430 430
431struct dmaengine_unmap_data { 431struct dmaengine_unmap_data {
432 u8 map_cnt;
432 u8 to_cnt; 433 u8 to_cnt;
433 u8 from_cnt; 434 u8 from_cnt;
434 u8 bidi_cnt; 435 u8 bidi_cnt;
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 7c8b20b120ea..a9a53b12397b 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -56,6 +56,7 @@ struct macvlan_dev {
56 int numqueues; 56 int numqueues;
57 netdev_features_t tap_features; 57 netdev_features_t tap_features;
58 int minor; 58 int minor;
59 int nest_level;
59}; 60};
60 61
61static inline void macvlan_count_rx(const struct macvlan_dev *vlan, 62static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 13bbbde00e68..b2acc4a1b13c 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
73/* found in socket.c */ 73/* found in socket.c */
74extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); 74extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
75 75
76static inline int is_vlan_dev(struct net_device *dev) 76static inline bool is_vlan_dev(struct net_device *dev)
77{ 77{
78 return dev->priv_flags & IFF_802_1Q_VLAN; 78 return dev->priv_flags & IFF_802_1Q_VLAN;
79} 79}
@@ -159,6 +159,7 @@ struct vlan_dev_priv {
159#ifdef CONFIG_NET_POLL_CONTROLLER 159#ifdef CONFIG_NET_POLL_CONTROLLER
160 struct netpoll *netpoll; 160 struct netpoll *netpoll;
161#endif 161#endif
162 unsigned int nest_level;
162}; 163};
163 164
164static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) 165static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -197,6 +198,12 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
197 const struct net_device *by_dev); 198 const struct net_device *by_dev);
198 199
199extern bool vlan_uses_dev(const struct net_device *dev); 200extern bool vlan_uses_dev(const struct net_device *dev);
201
202static inline int vlan_get_encap_level(struct net_device *dev)
203{
204 BUG_ON(!is_vlan_dev(dev));
205 return vlan_dev_priv(dev)->nest_level;
206}
200#else 207#else
201static inline struct net_device * 208static inline struct net_device *
202__vlan_find_dev_deep(struct net_device *real_dev, 209__vlan_find_dev_deep(struct net_device *real_dev,
@@ -263,6 +270,11 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
263{ 270{
264 return false; 271 return false;
265} 272}
273static inline int vlan_get_encap_level(struct net_device *dev)
274{
275 BUG();
276 return 0;
277}
266#endif 278#endif
267 279
268static inline bool vlan_hw_offload_capable(netdev_features_t features, 280static inline bool vlan_hw_offload_capable(netdev_features_t features,
@@ -483,4 +495,5 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
483 */ 495 */
484 skb->protocol = htons(ETH_P_802_2); 496 skb->protocol = htons(ETH_P_802_2);
485} 497}
498
486#endif /* !(_LINUX_IF_VLAN_H_) */ 499#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b66e7610d4ee..7040dc98ff8b 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -421,6 +421,17 @@ struct mlx4_wqe_inline_seg {
421 __be32 byte_count; 421 __be32 byte_count;
422}; 422};
423 423
424enum mlx4_update_qp_attr {
425 MLX4_UPDATE_QP_SMAC = 1 << 0,
426};
427
428struct mlx4_update_qp_params {
429 u8 smac_index;
430};
431
432int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
433 enum mlx4_update_qp_attr attr,
434 struct mlx4_update_qp_params *params);
424int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 435int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
425 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, 436 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
426 struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, 437 struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
diff --git a/include/linux/net.h b/include/linux/net.h
index 94734a6259a4..17d83393afcc 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -248,24 +248,17 @@ do { \
248bool __net_get_random_once(void *buf, int nbytes, bool *done, 248bool __net_get_random_once(void *buf, int nbytes, bool *done,
249 struct static_key *done_key); 249 struct static_key *done_key);
250 250
251#ifdef HAVE_JUMP_LABEL
252#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
253 { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
254#else /* !HAVE_JUMP_LABEL */
255#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
256#endif /* HAVE_JUMP_LABEL */
257
258#define net_get_random_once(buf, nbytes) \ 251#define net_get_random_once(buf, nbytes) \
259 ({ \ 252 ({ \
260 bool ___ret = false; \ 253 bool ___ret = false; \
261 static bool ___done = false; \ 254 static bool ___done = false; \
262 static struct static_key ___done_key = \ 255 static struct static_key ___once_key = \
263 ___NET_RANDOM_STATIC_KEY_INIT; \ 256 STATIC_KEY_INIT_TRUE; \
264 if (!static_key_true(&___done_key)) \ 257 if (static_key_true(&___once_key)) \
265 ___ret = __net_get_random_once(buf, \ 258 ___ret = __net_get_random_once(buf, \
266 nbytes, \ 259 nbytes, \
267 &___done, \ 260 &___done, \
268 &___done_key); \ 261 &___once_key); \
269 ___ret; \ 262 ___ret; \
270 }) 263 })
271 264
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7ed3a3aa6604..b42d07b0390b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1144,6 +1144,7 @@ struct net_device_ops {
1144 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, 1144 netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
1145 struct net_device *dev, 1145 struct net_device *dev,
1146 void *priv); 1146 void *priv);
1147 int (*ndo_get_lock_subclass)(struct net_device *dev);
1147}; 1148};
1148 1149
1149/** 1150/**
@@ -2950,7 +2951,12 @@ static inline void netif_addr_lock(struct net_device *dev)
2950 2951
2951static inline void netif_addr_lock_nested(struct net_device *dev) 2952static inline void netif_addr_lock_nested(struct net_device *dev)
2952{ 2953{
2953 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING); 2954 int subclass = SINGLE_DEPTH_NESTING;
2955
2956 if (dev->netdev_ops->ndo_get_lock_subclass)
2957 subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
2958
2959 spin_lock_nested(&dev->addr_list_lock, subclass);
2954} 2960}
2955 2961
2956static inline void netif_addr_lock_bh(struct net_device *dev) 2962static inline void netif_addr_lock_bh(struct net_device *dev)
@@ -3050,10 +3056,19 @@ extern int weight_p;
3050extern int bpf_jit_enable; 3056extern int bpf_jit_enable;
3051 3057
3052bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); 3058bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
3059struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
3060 struct list_head **iter);
3053struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, 3061struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
3054 struct list_head **iter); 3062 struct list_head **iter);
3055 3063
3056/* iterate through upper list, must be called under RCU read lock */ 3064/* iterate through upper list, must be called under RCU read lock */
3065#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
3066 for (iter = &(dev)->adj_list.upper, \
3067 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
3068 updev; \
3069 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
3070
3071/* iterate through upper list, must be called under RCU read lock */
3057#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ 3072#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
3058 for (iter = &(dev)->all_adj_list.upper, \ 3073 for (iter = &(dev)->all_adj_list.upper, \
3059 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ 3074 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
@@ -3077,6 +3092,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3077 priv; \ 3092 priv; \
3078 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 3093 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
3079 3094
3095void *netdev_lower_get_next(struct net_device *dev,
3096 struct list_head **iter);
3097#define netdev_for_each_lower_dev(dev, ldev, iter) \
3098 for (iter = &(dev)->adj_list.lower, \
3099 ldev = netdev_lower_get_next(dev, &(iter)); \
3100 ldev; \
3101 ldev = netdev_lower_get_next(dev, &(iter)))
3102
3080void *netdev_adjacent_get_private(struct list_head *adj_list); 3103void *netdev_adjacent_get_private(struct list_head *adj_list);
3081void *netdev_lower_get_first_private_rcu(struct net_device *dev); 3104void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3082struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 3105struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@ -3092,6 +3115,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
3092void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 3115void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3093void *netdev_lower_dev_get_private(struct net_device *dev, 3116void *netdev_lower_dev_get_private(struct net_device *dev,
3094 struct net_device *lower_dev); 3117 struct net_device *lower_dev);
3118int dev_get_nest_level(struct net_device *dev,
3119 bool (*type_check)(struct net_device *dev));
3095int skb_checksum_help(struct sk_buff *skb); 3120int skb_checksum_help(struct sk_buff *skb);
3096struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3121struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3097 netdev_features_t features, bool tx_path); 3122 netdev_features_t features, bool tx_path);
@@ -3180,12 +3205,7 @@ void netdev_change_features(struct net_device *dev);
3180void netif_stacked_transfer_operstate(const struct net_device *rootdev, 3205void netif_stacked_transfer_operstate(const struct net_device *rootdev,
3181 struct net_device *dev); 3206 struct net_device *dev);
3182 3207
3183netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 3208netdev_features_t netif_skb_features(struct sk_buff *skb);
3184 const struct net_device *dev);
3185static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
3186{
3187 return netif_skb_dev_features(skb, skb->dev);
3188}
3189 3209
3190static inline bool net_gso_ok(netdev_features_t features, int gso_type) 3210static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3191{ 3211{
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 6fe8464ed767..881a7c3571f4 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -31,7 +31,12 @@ extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
31#else /* CONFIG_OF */ 31#else /* CONFIG_OF */
32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) 32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
33{ 33{
34 return -ENOSYS; 34 /*
35 * Fall back to the non-DT function to register a bus.
36 * This way, we don't have to keep compat bits around in drivers.
37 */
38
39 return mdiobus_register(mdio);
35} 40}
36 41
37static inline struct phy_device *of_phy_find_device(struct device_node *phy_np) 42static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3356abcfff18..3ef6ea12806a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -402,6 +402,8 @@ struct perf_event {
402 402
403 struct ring_buffer *rb; 403 struct ring_buffer *rb;
404 struct list_head rb_entry; 404 struct list_head rb_entry;
405 unsigned long rcu_batches;
406 int rcu_pending;
405 407
406 /* poll related */ 408 /* poll related */
407 wait_queue_head_t waitq; 409 wait_queue_head_t waitq;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 8e3e66ac0a52..953937ea5233 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/mutex.h> 5#include <linux/mutex.h>
6#include <linux/netdevice.h> 6#include <linux/netdevice.h>
7#include <linux/wait.h>
7#include <uapi/linux/rtnetlink.h> 8#include <uapi/linux/rtnetlink.h>
8 9
9extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); 10extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -22,6 +23,10 @@ extern void rtnl_lock(void);
22extern void rtnl_unlock(void); 23extern void rtnl_unlock(void);
23extern int rtnl_trylock(void); 24extern int rtnl_trylock(void);
24extern int rtnl_is_locked(void); 25extern int rtnl_is_locked(void);
26
27extern wait_queue_head_t netdev_unregistering_wq;
28extern struct mutex net_mutex;
29
25#ifdef CONFIG_PROVE_LOCKING 30#ifdef CONFIG_PROVE_LOCKING
26extern int lockdep_rtnl_is_held(void); 31extern int lockdep_rtnl_is_held(void);
27#else 32#else
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 25f54c79f757..221b2bde3723 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -220,7 +220,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
220#define TASK_PARKED 512 220#define TASK_PARKED 512
221#define TASK_STATE_MAX 1024 221#define TASK_STATE_MAX 1024
222 222
223#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" 223#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
224 224
225extern char ___assert_task_state[1 - 2*!!( 225extern char ___assert_task_state[1 - 2*!!(
226 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 226 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -1153,9 +1153,12 @@ struct sched_dl_entity {
1153 * 1153 *
1154 * @dl_boosted tells if we are boosted due to DI. If so we are 1154 * @dl_boosted tells if we are boosted due to DI. If so we are
1155 * outside bandwidth enforcement mechanism (but only until we 1155 * outside bandwidth enforcement mechanism (but only until we
1156 * exit the critical section). 1156 * exit the critical section);
1157 *
1158 * @dl_yielded tells if task gave up the cpu before consuming
1159 * all its available runtime during the last job.
1157 */ 1160 */
1158 int dl_throttled, dl_new, dl_boosted; 1161 int dl_throttled, dl_new, dl_boosted, dl_yielded;
1159 1162
1160 /* 1163 /*
1161 * Bandwidth enforcement timer. Each -deadline task has its 1164 * Bandwidth enforcement timer. Each -deadline task has its
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f3539a15c411..f856e5a746fa 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3669,6 +3669,18 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
3669void cfg80211_sched_scan_stopped(struct wiphy *wiphy); 3669void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
3670 3670
3671/** 3671/**
3672 * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
3673 *
3674 * @wiphy: the wiphy on which the scheduled scan stopped
3675 *
3676 * The driver can call this function to inform cfg80211 that the
3677 * scheduled scan had to be stopped, for whatever reason. The driver
3678 * is then called back via the sched_scan_stop operation when done.
3679 * This function should be called with rtnl locked.
3680 */
3681void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
3682
3683/**
3672 * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame 3684 * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
3673 * 3685 *
3674 * @wiphy: the wiphy reporting the BSS 3686 * @wiphy: the wiphy reporting the BSS
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 6c4f5eac98e7..216cecce65e9 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -127,6 +127,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg);
127void rt6_ifdown(struct net *net, struct net_device *dev); 127void rt6_ifdown(struct net *net, struct net_device *dev);
128void rt6_mtu_change(struct net_device *dev, unsigned int mtu); 128void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
129void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); 129void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
130void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
130 131
131 132
132/* 133/*
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 80f500a29498..b2704fd0ec80 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -20,6 +20,11 @@ struct local_ports {
20 int range[2]; 20 int range[2];
21}; 21};
22 22
23struct ping_group_range {
24 seqlock_t lock;
25 kgid_t range[2];
26};
27
23struct netns_ipv4 { 28struct netns_ipv4 {
24#ifdef CONFIG_SYSCTL 29#ifdef CONFIG_SYSCTL
25 struct ctl_table_header *forw_hdr; 30 struct ctl_table_header *forw_hdr;
@@ -66,13 +71,13 @@ struct netns_ipv4 {
66 int sysctl_icmp_ratemask; 71 int sysctl_icmp_ratemask;
67 int sysctl_icmp_errors_use_inbound_ifaddr; 72 int sysctl_icmp_errors_use_inbound_ifaddr;
68 73
69 struct local_ports sysctl_local_ports; 74 struct local_ports ip_local_ports;
70 75
71 int sysctl_tcp_ecn; 76 int sysctl_tcp_ecn;
72 int sysctl_ip_no_pmtu_disc; 77 int sysctl_ip_no_pmtu_disc;
73 int sysctl_ip_fwd_use_pmtu; 78 int sysctl_ip_fwd_use_pmtu;
74 79
75 kgid_t sysctl_ping_group_range[2]; 80 struct ping_group_range ping_group_range;
76 81
77 atomic_t dev_addr_genid; 82 atomic_t dev_addr_genid;
78 83
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 1ba9d626aa83..194c1eab04d8 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -3856,6 +3856,8 @@ enum nl80211_ap_sme_features {
3856 * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested 3856 * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
3857 * to work properly to suppport receiving regulatory hints from 3857 * to work properly to suppport receiving regulatory hints from
3858 * cellular base stations. 3858 * cellular base stations.
3859 * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
3860 * here to reserve the value for API/ABI compatibility)
3859 * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of 3861 * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
3860 * equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station 3862 * equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
3861 * mode 3863 * mode
@@ -3897,7 +3899,7 @@ enum nl80211_feature_flags {
3897 NL80211_FEATURE_HT_IBSS = 1 << 1, 3899 NL80211_FEATURE_HT_IBSS = 1 << 1,
3898 NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2, 3900 NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
3899 NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3, 3901 NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3,
3900 /* bit 4 is reserved - don't use */ 3902 NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 1 << 4,
3901 NL80211_FEATURE_SAE = 1 << 5, 3903 NL80211_FEATURE_SAE = 1 << 5,
3902 NL80211_FEATURE_LOW_PRIORITY_SCAN = 1 << 6, 3904 NL80211_FEATURE_LOW_PRIORITY_SCAN = 1 << 6,
3903 NL80211_FEATURE_SCAN_FLUSH = 1 << 7, 3905 NL80211_FEATURE_SCAN_FLUSH = 1 << 7,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f83a71a3e46d..440eefc67397 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event,
1443 cpuctx->exclusive = 0; 1443 cpuctx->exclusive = 0;
1444} 1444}
1445 1445
1446struct remove_event {
1447 struct perf_event *event;
1448 bool detach_group;
1449};
1450
1446/* 1451/*
1447 * Cross CPU call to remove a performance event 1452 * Cross CPU call to remove a performance event
1448 * 1453 *
@@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event,
1451 */ 1456 */
1452static int __perf_remove_from_context(void *info) 1457static int __perf_remove_from_context(void *info)
1453{ 1458{
1454 struct perf_event *event = info; 1459 struct remove_event *re = info;
1460 struct perf_event *event = re->event;
1455 struct perf_event_context *ctx = event->ctx; 1461 struct perf_event_context *ctx = event->ctx;
1456 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1462 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1457 1463
1458 raw_spin_lock(&ctx->lock); 1464 raw_spin_lock(&ctx->lock);
1459 event_sched_out(event, cpuctx, ctx); 1465 event_sched_out(event, cpuctx, ctx);
1466 if (re->detach_group)
1467 perf_group_detach(event);
1460 list_del_event(event, ctx); 1468 list_del_event(event, ctx);
1461 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1469 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1462 ctx->is_active = 0; 1470 ctx->is_active = 0;
@@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info)
1481 * When called from perf_event_exit_task, it's OK because the 1489 * When called from perf_event_exit_task, it's OK because the
1482 * context has been detached from its task. 1490 * context has been detached from its task.
1483 */ 1491 */
1484static void perf_remove_from_context(struct perf_event *event) 1492static void perf_remove_from_context(struct perf_event *event, bool detach_group)
1485{ 1493{
1486 struct perf_event_context *ctx = event->ctx; 1494 struct perf_event_context *ctx = event->ctx;
1487 struct task_struct *task = ctx->task; 1495 struct task_struct *task = ctx->task;
1496 struct remove_event re = {
1497 .event = event,
1498 .detach_group = detach_group,
1499 };
1488 1500
1489 lockdep_assert_held(&ctx->mutex); 1501 lockdep_assert_held(&ctx->mutex);
1490 1502
@@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event)
1493 * Per cpu events are removed via an smp call and 1505 * Per cpu events are removed via an smp call and
1494 * the removal is always successful. 1506 * the removal is always successful.
1495 */ 1507 */
1496 cpu_function_call(event->cpu, __perf_remove_from_context, event); 1508 cpu_function_call(event->cpu, __perf_remove_from_context, &re);
1497 return; 1509 return;
1498 } 1510 }
1499 1511
1500retry: 1512retry:
1501 if (!task_function_call(task, __perf_remove_from_context, event)) 1513 if (!task_function_call(task, __perf_remove_from_context, &re))
1502 return; 1514 return;
1503 1515
1504 raw_spin_lock_irq(&ctx->lock); 1516 raw_spin_lock_irq(&ctx->lock);
@@ -1515,6 +1527,8 @@ retry:
1515 * Since the task isn't running, its safe to remove the event, us 1527 * Since the task isn't running, its safe to remove the event, us
1516 * holding the ctx->lock ensures the task won't get scheduled in. 1528 * holding the ctx->lock ensures the task won't get scheduled in.
1517 */ 1529 */
1530 if (detach_group)
1531 perf_group_detach(event);
1518 list_del_event(event, ctx); 1532 list_del_event(event, ctx);
1519 raw_spin_unlock_irq(&ctx->lock); 1533 raw_spin_unlock_irq(&ctx->lock);
1520} 1534}
@@ -3178,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head)
3178} 3192}
3179 3193
3180static void ring_buffer_put(struct ring_buffer *rb); 3194static void ring_buffer_put(struct ring_buffer *rb);
3181static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); 3195static void ring_buffer_attach(struct perf_event *event,
3196 struct ring_buffer *rb);
3182 3197
3183static void unaccount_event_cpu(struct perf_event *event, int cpu) 3198static void unaccount_event_cpu(struct perf_event *event, int cpu)
3184{ 3199{
@@ -3238,8 +3253,6 @@ static void free_event(struct perf_event *event)
3238 unaccount_event(event); 3253 unaccount_event(event);
3239 3254
3240 if (event->rb) { 3255 if (event->rb) {
3241 struct ring_buffer *rb;
3242
3243 /* 3256 /*
3244 * Can happen when we close an event with re-directed output. 3257 * Can happen when we close an event with re-directed output.
3245 * 3258 *
@@ -3247,12 +3260,7 @@ static void free_event(struct perf_event *event)
3247 * over us; possibly making our ring_buffer_put() the last. 3260 * over us; possibly making our ring_buffer_put() the last.
3248 */ 3261 */
3249 mutex_lock(&event->mmap_mutex); 3262 mutex_lock(&event->mmap_mutex);
3250 rb = event->rb; 3263 ring_buffer_attach(event, NULL);
3251 if (rb) {
3252 rcu_assign_pointer(event->rb, NULL);
3253 ring_buffer_detach(event, rb);
3254 ring_buffer_put(rb); /* could be last */
3255 }
3256 mutex_unlock(&event->mmap_mutex); 3264 mutex_unlock(&event->mmap_mutex);
3257 } 3265 }
3258 3266
@@ -3281,10 +3289,7 @@ int perf_event_release_kernel(struct perf_event *event)
3281 * to trigger the AB-BA case. 3289 * to trigger the AB-BA case.
3282 */ 3290 */
3283 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); 3291 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
3284 raw_spin_lock_irq(&ctx->lock); 3292 perf_remove_from_context(event, true);
3285 perf_group_detach(event);
3286 raw_spin_unlock_irq(&ctx->lock);
3287 perf_remove_from_context(event);
3288 mutex_unlock(&ctx->mutex); 3293 mutex_unlock(&ctx->mutex);
3289 3294
3290 free_event(event); 3295 free_event(event);
@@ -3839,28 +3844,47 @@ unlock:
3839static void ring_buffer_attach(struct perf_event *event, 3844static void ring_buffer_attach(struct perf_event *event,
3840 struct ring_buffer *rb) 3845 struct ring_buffer *rb)
3841{ 3846{
3847 struct ring_buffer *old_rb = NULL;
3842 unsigned long flags; 3848 unsigned long flags;
3843 3849
3844 if (!list_empty(&event->rb_entry)) 3850 if (event->rb) {
3845 return; 3851 /*
3852 * Should be impossible, we set this when removing
3853 * event->rb_entry and wait/clear when adding event->rb_entry.
3854 */
3855 WARN_ON_ONCE(event->rcu_pending);
3846 3856
3847 spin_lock_irqsave(&rb->event_lock, flags); 3857 old_rb = event->rb;
3848 if (list_empty(&event->rb_entry)) 3858 event->rcu_batches = get_state_synchronize_rcu();
3849 list_add(&event->rb_entry, &rb->event_list); 3859 event->rcu_pending = 1;
3850 spin_unlock_irqrestore(&rb->event_lock, flags);
3851}
3852 3860
3853static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) 3861 spin_lock_irqsave(&old_rb->event_lock, flags);
3854{ 3862 list_del_rcu(&event->rb_entry);
3855 unsigned long flags; 3863 spin_unlock_irqrestore(&old_rb->event_lock, flags);
3864 }
3856 3865
3857 if (list_empty(&event->rb_entry)) 3866 if (event->rcu_pending && rb) {
3858 return; 3867 cond_synchronize_rcu(event->rcu_batches);
3868 event->rcu_pending = 0;
3869 }
3870
3871 if (rb) {
3872 spin_lock_irqsave(&rb->event_lock, flags);
3873 list_add_rcu(&event->rb_entry, &rb->event_list);
3874 spin_unlock_irqrestore(&rb->event_lock, flags);
3875 }
3876
3877 rcu_assign_pointer(event->rb, rb);
3859 3878
3860 spin_lock_irqsave(&rb->event_lock, flags); 3879 if (old_rb) {
3861 list_del_init(&event->rb_entry); 3880 ring_buffer_put(old_rb);
3862 wake_up_all(&event->waitq); 3881 /*
3863 spin_unlock_irqrestore(&rb->event_lock, flags); 3882 * Since we detached before setting the new rb, so that we
3883 * could attach the new rb, we could have missed a wakeup.
3884 * Provide it now.
3885 */
3886 wake_up_all(&event->waitq);
3887 }
3864} 3888}
3865 3889
3866static void ring_buffer_wakeup(struct perf_event *event) 3890static void ring_buffer_wakeup(struct perf_event *event)
@@ -3929,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3929{ 3953{
3930 struct perf_event *event = vma->vm_file->private_data; 3954 struct perf_event *event = vma->vm_file->private_data;
3931 3955
3932 struct ring_buffer *rb = event->rb; 3956 struct ring_buffer *rb = ring_buffer_get(event);
3933 struct user_struct *mmap_user = rb->mmap_user; 3957 struct user_struct *mmap_user = rb->mmap_user;
3934 int mmap_locked = rb->mmap_locked; 3958 int mmap_locked = rb->mmap_locked;
3935 unsigned long size = perf_data_size(rb); 3959 unsigned long size = perf_data_size(rb);
@@ -3937,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3937 atomic_dec(&rb->mmap_count); 3961 atomic_dec(&rb->mmap_count);
3938 3962
3939 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 3963 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3940 return; 3964 goto out_put;
3941 3965
3942 /* Detach current event from the buffer. */ 3966 ring_buffer_attach(event, NULL);
3943 rcu_assign_pointer(event->rb, NULL);
3944 ring_buffer_detach(event, rb);
3945 mutex_unlock(&event->mmap_mutex); 3967 mutex_unlock(&event->mmap_mutex);
3946 3968
3947 /* If there's still other mmap()s of this buffer, we're done. */ 3969 /* If there's still other mmap()s of this buffer, we're done. */
3948 if (atomic_read(&rb->mmap_count)) { 3970 if (atomic_read(&rb->mmap_count))
3949 ring_buffer_put(rb); /* can't be last */ 3971 goto out_put;
3950 return;
3951 }
3952 3972
3953 /* 3973 /*
3954 * No other mmap()s, detach from all other events that might redirect 3974 * No other mmap()s, detach from all other events that might redirect
@@ -3978,11 +3998,9 @@ again:
3978 * still restart the iteration to make sure we're not now 3998 * still restart the iteration to make sure we're not now
3979 * iterating the wrong list. 3999 * iterating the wrong list.
3980 */ 4000 */
3981 if (event->rb == rb) { 4001 if (event->rb == rb)
3982 rcu_assign_pointer(event->rb, NULL); 4002 ring_buffer_attach(event, NULL);
3983 ring_buffer_detach(event, rb); 4003
3984 ring_buffer_put(rb); /* can't be last, we still have one */
3985 }
3986 mutex_unlock(&event->mmap_mutex); 4004 mutex_unlock(&event->mmap_mutex);
3987 put_event(event); 4005 put_event(event);
3988 4006
@@ -4007,6 +4025,7 @@ again:
4007 vma->vm_mm->pinned_vm -= mmap_locked; 4025 vma->vm_mm->pinned_vm -= mmap_locked;
4008 free_uid(mmap_user); 4026 free_uid(mmap_user);
4009 4027
4028out_put:
4010 ring_buffer_put(rb); /* could be last */ 4029 ring_buffer_put(rb); /* could be last */
4011} 4030}
4012 4031
@@ -4124,7 +4143,6 @@ again:
4124 vma->vm_mm->pinned_vm += extra; 4143 vma->vm_mm->pinned_vm += extra;
4125 4144
4126 ring_buffer_attach(event, rb); 4145 ring_buffer_attach(event, rb);
4127 rcu_assign_pointer(event->rb, rb);
4128 4146
4129 perf_event_init_userpage(event); 4147 perf_event_init_userpage(event);
4130 perf_event_update_userpage(event); 4148 perf_event_update_userpage(event);
@@ -5408,6 +5426,9 @@ struct swevent_htable {
5408 5426
5409 /* Recursion avoidance in each contexts */ 5427 /* Recursion avoidance in each contexts */
5410 int recursion[PERF_NR_CONTEXTS]; 5428 int recursion[PERF_NR_CONTEXTS];
5429
5430 /* Keeps track of cpu being initialized/exited */
5431 bool online;
5411}; 5432};
5412 5433
5413static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 5434static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5654,8 +5675,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
5654 hwc->state = !(flags & PERF_EF_START); 5675 hwc->state = !(flags & PERF_EF_START);
5655 5676
5656 head = find_swevent_head(swhash, event); 5677 head = find_swevent_head(swhash, event);
5657 if (WARN_ON_ONCE(!head)) 5678 if (!head) {
5679 /*
5680 * We can race with cpu hotplug code. Do not
5681 * WARN if the cpu just got unplugged.
5682 */
5683 WARN_ON_ONCE(swhash->online);
5658 return -EINVAL; 5684 return -EINVAL;
5685 }
5659 5686
5660 hlist_add_head_rcu(&event->hlist_entry, head); 5687 hlist_add_head_rcu(&event->hlist_entry, head);
5661 5688
@@ -6914,7 +6941,7 @@ err_size:
6914static int 6941static int
6915perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 6942perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6916{ 6943{
6917 struct ring_buffer *rb = NULL, *old_rb = NULL; 6944 struct ring_buffer *rb = NULL;
6918 int ret = -EINVAL; 6945 int ret = -EINVAL;
6919 6946
6920 if (!output_event) 6947 if (!output_event)
@@ -6942,8 +6969,6 @@ set:
6942 if (atomic_read(&event->mmap_count)) 6969 if (atomic_read(&event->mmap_count))
6943 goto unlock; 6970 goto unlock;
6944 6971
6945 old_rb = event->rb;
6946
6947 if (output_event) { 6972 if (output_event) {
6948 /* get the rb we want to redirect to */ 6973 /* get the rb we want to redirect to */
6949 rb = ring_buffer_get(output_event); 6974 rb = ring_buffer_get(output_event);
@@ -6951,23 +6976,7 @@ set:
6951 goto unlock; 6976 goto unlock;
6952 } 6977 }
6953 6978
6954 if (old_rb) 6979 ring_buffer_attach(event, rb);
6955 ring_buffer_detach(event, old_rb);
6956
6957 if (rb)
6958 ring_buffer_attach(event, rb);
6959
6960 rcu_assign_pointer(event->rb, rb);
6961
6962 if (old_rb) {
6963 ring_buffer_put(old_rb);
6964 /*
6965 * Since we detached before setting the new rb, so that we
6966 * could attach the new rb, we could have missed a wakeup.
6967 * Provide it now.
6968 */
6969 wake_up_all(&event->waitq);
6970 }
6971 6980
6972 ret = 0; 6981 ret = 0;
6973unlock: 6982unlock:
@@ -7018,6 +7027,9 @@ SYSCALL_DEFINE5(perf_event_open,
7018 if (attr.freq) { 7027 if (attr.freq) {
7019 if (attr.sample_freq > sysctl_perf_event_sample_rate) 7028 if (attr.sample_freq > sysctl_perf_event_sample_rate)
7020 return -EINVAL; 7029 return -EINVAL;
7030 } else {
7031 if (attr.sample_period & (1ULL << 63))
7032 return -EINVAL;
7021 } 7033 }
7022 7034
7023 /* 7035 /*
@@ -7165,7 +7177,7 @@ SYSCALL_DEFINE5(perf_event_open,
7165 struct perf_event_context *gctx = group_leader->ctx; 7177 struct perf_event_context *gctx = group_leader->ctx;
7166 7178
7167 mutex_lock(&gctx->mutex); 7179 mutex_lock(&gctx->mutex);
7168 perf_remove_from_context(group_leader); 7180 perf_remove_from_context(group_leader, false);
7169 7181
7170 /* 7182 /*
7171 * Removing from the context ends up with disabled 7183 * Removing from the context ends up with disabled
@@ -7175,7 +7187,7 @@ SYSCALL_DEFINE5(perf_event_open,
7175 perf_event__state_init(group_leader); 7187 perf_event__state_init(group_leader);
7176 list_for_each_entry(sibling, &group_leader->sibling_list, 7188 list_for_each_entry(sibling, &group_leader->sibling_list,
7177 group_entry) { 7189 group_entry) {
7178 perf_remove_from_context(sibling); 7190 perf_remove_from_context(sibling, false);
7179 perf_event__state_init(sibling); 7191 perf_event__state_init(sibling);
7180 put_ctx(gctx); 7192 put_ctx(gctx);
7181 } 7193 }
@@ -7305,7 +7317,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7305 mutex_lock(&src_ctx->mutex); 7317 mutex_lock(&src_ctx->mutex);
7306 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 7318 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7307 event_entry) { 7319 event_entry) {
7308 perf_remove_from_context(event); 7320 perf_remove_from_context(event, false);
7309 unaccount_event_cpu(event, src_cpu); 7321 unaccount_event_cpu(event, src_cpu);
7310 put_ctx(src_ctx); 7322 put_ctx(src_ctx);
7311 list_add(&event->migrate_entry, &events); 7323 list_add(&event->migrate_entry, &events);
@@ -7367,13 +7379,7 @@ __perf_event_exit_task(struct perf_event *child_event,
7367 struct perf_event_context *child_ctx, 7379 struct perf_event_context *child_ctx,
7368 struct task_struct *child) 7380 struct task_struct *child)
7369{ 7381{
7370 if (child_event->parent) { 7382 perf_remove_from_context(child_event, !!child_event->parent);
7371 raw_spin_lock_irq(&child_ctx->lock);
7372 perf_group_detach(child_event);
7373 raw_spin_unlock_irq(&child_ctx->lock);
7374 }
7375
7376 perf_remove_from_context(child_event);
7377 7383
7378 /* 7384 /*
7379 * It can happen that the parent exits first, and has events 7385 * It can happen that the parent exits first, and has events
@@ -7724,6 +7730,8 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
7724 * swapped under us. 7730 * swapped under us.
7725 */ 7731 */
7726 parent_ctx = perf_pin_task_context(parent, ctxn); 7732 parent_ctx = perf_pin_task_context(parent, ctxn);
7733 if (!parent_ctx)
7734 return 0;
7727 7735
7728 /* 7736 /*
7729 * No need to check if parent_ctx != NULL here; since we saw 7737 * No need to check if parent_ctx != NULL here; since we saw
@@ -7835,6 +7843,7 @@ static void perf_event_init_cpu(int cpu)
7835 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7843 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7836 7844
7837 mutex_lock(&swhash->hlist_mutex); 7845 mutex_lock(&swhash->hlist_mutex);
7846 swhash->online = true;
7838 if (swhash->hlist_refcount > 0) { 7847 if (swhash->hlist_refcount > 0) {
7839 struct swevent_hlist *hlist; 7848 struct swevent_hlist *hlist;
7840 7849
@@ -7857,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
7857 7866
7858static void __perf_event_exit_context(void *__info) 7867static void __perf_event_exit_context(void *__info)
7859{ 7868{
7869 struct remove_event re = { .detach_group = false };
7860 struct perf_event_context *ctx = __info; 7870 struct perf_event_context *ctx = __info;
7861 struct perf_event *event;
7862 7871
7863 perf_pmu_rotate_stop(ctx->pmu); 7872 perf_pmu_rotate_stop(ctx->pmu);
7864 7873
7865 rcu_read_lock(); 7874 rcu_read_lock();
7866 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) 7875 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
7867 __perf_remove_from_context(event); 7876 __perf_remove_from_context(&re);
7868 rcu_read_unlock(); 7877 rcu_read_unlock();
7869} 7878}
7870 7879
@@ -7892,6 +7901,7 @@ static void perf_event_exit_cpu(int cpu)
7892 perf_event_exit_cpu_context(cpu); 7901 perf_event_exit_cpu_context(cpu);
7893 7902
7894 mutex_lock(&swhash->hlist_mutex); 7903 mutex_lock(&swhash->hlist_mutex);
7904 swhash->online = false;
7895 swevent_hlist_release(swhash); 7905 swevent_hlist_release(swhash);
7896 mutex_unlock(&swhash->hlist_mutex); 7906 mutex_unlock(&swhash->hlist_mutex);
7897} 7907}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d9d8ece46a15..204d3d281809 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
2592 if (likely(prev->sched_class == class && 2592 if (likely(prev->sched_class == class &&
2593 rq->nr_running == rq->cfs.h_nr_running)) { 2593 rq->nr_running == rq->cfs.h_nr_running)) {
2594 p = fair_sched_class.pick_next_task(rq, prev); 2594 p = fair_sched_class.pick_next_task(rq, prev);
2595 if (likely(p && p != RETRY_TASK)) 2595 if (unlikely(p == RETRY_TASK))
2596 return p; 2596 goto again;
2597
2598 /* assumes fair_sched_class->next == idle_sched_class */
2599 if (unlikely(!p))
2600 p = idle_sched_class.pick_next_task(rq, prev);
2601
2602 return p;
2597 } 2603 }
2598 2604
2599again: 2605again:
@@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3124 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3130 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3125 dl_se->dl_throttled = 0; 3131 dl_se->dl_throttled = 0;
3126 dl_se->dl_new = 1; 3132 dl_se->dl_new = 1;
3133 dl_se->dl_yielded = 0;
3127} 3134}
3128 3135
3129static void __setscheduler_params(struct task_struct *p, 3136static void __setscheduler_params(struct task_struct *p,
@@ -3639,6 +3646,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3639 * sys_sched_setattr - same as above, but with extended sched_attr 3646 * sys_sched_setattr - same as above, but with extended sched_attr
3640 * @pid: the pid in question. 3647 * @pid: the pid in question.
3641 * @uattr: structure containing the extended parameters. 3648 * @uattr: structure containing the extended parameters.
3649 * @flags: for future extension.
3642 */ 3650 */
3643SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 3651SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3644 unsigned int, flags) 3652 unsigned int, flags)
@@ -3783,6 +3791,7 @@ err_size:
3783 * @pid: the pid in question. 3791 * @pid: the pid in question.
3784 * @uattr: structure containing the extended parameters. 3792 * @uattr: structure containing the extended parameters.
3785 * @size: sizeof(attr) for fwd/bwd comp. 3793 * @size: sizeof(attr) for fwd/bwd comp.
3794 * @flags: for future extension.
3786 */ 3795 */
3787SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3796SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3788 unsigned int, size, unsigned int, flags) 3797 unsigned int, size, unsigned int, flags)
@@ -6017,6 +6026,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
6017 , 6026 ,
6018 .last_balance = jiffies, 6027 .last_balance = jiffies,
6019 .balance_interval = sd_weight, 6028 .balance_interval = sd_weight,
6029 .max_newidle_lb_cost = 0,
6030 .next_decay_max_lb_cost = jiffies,
6020 }; 6031 };
6021 SD_INIT_NAME(sd, NUMA); 6032 SD_INIT_NAME(sd, NUMA);
6022 sd->private = &tl->data; 6033 sd->private = &tl->data;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 5b9bb42b2d47..ab001b5d5048 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -210,7 +210,5 @@ int cpudl_init(struct cpudl *cp)
210 */ 210 */
211void cpudl_cleanup(struct cpudl *cp) 211void cpudl_cleanup(struct cpudl *cp)
212{ 212{
213 /* 213 free_cpumask_var(cp->free_cpus);
214 * nothing to do for the moment
215 */
216} 214}
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 8b836b376d91..3031bac8aa3e 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
70 int idx = 0; 70 int idx = 0;
71 int task_pri = convert_prio(p->prio); 71 int task_pri = convert_prio(p->prio);
72 72
73 if (task_pri >= MAX_RT_PRIO) 73 BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
74 return 0;
75 74
76 for (idx = 0; idx < task_pri; idx++) { 75 for (idx = 0; idx < task_pri; idx++) {
77 struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; 76 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index a95097cb4591..72fdf06ef865 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -332,50 +332,50 @@ out:
332 * softirq as those do not count in task exec_runtime any more. 332 * softirq as those do not count in task exec_runtime any more.
333 */ 333 */
334static void irqtime_account_process_tick(struct task_struct *p, int user_tick, 334static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
335 struct rq *rq) 335 struct rq *rq, int ticks)
336{ 336{
337 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 337 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
338 u64 cputime = (__force u64) cputime_one_jiffy;
338 u64 *cpustat = kcpustat_this_cpu->cpustat; 339 u64 *cpustat = kcpustat_this_cpu->cpustat;
339 340
340 if (steal_account_process_tick()) 341 if (steal_account_process_tick())
341 return; 342 return;
342 343
344 cputime *= ticks;
345 scaled *= ticks;
346
343 if (irqtime_account_hi_update()) { 347 if (irqtime_account_hi_update()) {
344 cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; 348 cpustat[CPUTIME_IRQ] += cputime;
345 } else if (irqtime_account_si_update()) { 349 } else if (irqtime_account_si_update()) {
346 cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; 350 cpustat[CPUTIME_SOFTIRQ] += cputime;
347 } else if (this_cpu_ksoftirqd() == p) { 351 } else if (this_cpu_ksoftirqd() == p) {
348 /* 352 /*
349 * ksoftirqd time do not get accounted in cpu_softirq_time. 353 * ksoftirqd time do not get accounted in cpu_softirq_time.
350 * So, we have to handle it separately here. 354 * So, we have to handle it separately here.
351 * Also, p->stime needs to be updated for ksoftirqd. 355 * Also, p->stime needs to be updated for ksoftirqd.
352 */ 356 */
353 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, 357 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
354 CPUTIME_SOFTIRQ);
355 } else if (user_tick) { 358 } else if (user_tick) {
356 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); 359 account_user_time(p, cputime, scaled);
357 } else if (p == rq->idle) { 360 } else if (p == rq->idle) {
358 account_idle_time(cputime_one_jiffy); 361 account_idle_time(cputime);
359 } else if (p->flags & PF_VCPU) { /* System time or guest time */ 362 } else if (p->flags & PF_VCPU) { /* System time or guest time */
360 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); 363 account_guest_time(p, cputime, scaled);
361 } else { 364 } else {
362 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, 365 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
363 CPUTIME_SYSTEM);
364 } 366 }
365} 367}
366 368
367static void irqtime_account_idle_ticks(int ticks) 369static void irqtime_account_idle_ticks(int ticks)
368{ 370{
369 int i;
370 struct rq *rq = this_rq(); 371 struct rq *rq = this_rq();
371 372
372 for (i = 0; i < ticks; i++) 373 irqtime_account_process_tick(current, 0, rq, ticks);
373 irqtime_account_process_tick(current, 0, rq);
374} 374}
375#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 375#else /* CONFIG_IRQ_TIME_ACCOUNTING */
376static inline void irqtime_account_idle_ticks(int ticks) {} 376static inline void irqtime_account_idle_ticks(int ticks) {}
377static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, 377static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
378 struct rq *rq) {} 378 struct rq *rq, int nr_ticks) {}
379#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 379#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
380 380
381/* 381/*
@@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
464 return; 464 return;
465 465
466 if (sched_clock_irqtime) { 466 if (sched_clock_irqtime) {
467 irqtime_account_process_tick(p, user_tick, rq); 467 irqtime_account_process_tick(p, user_tick, rq, 1);
468 return; 468 return;
469 } 469 }
470 470
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b08095786cb8..800e99b99075 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
528 sched_clock_tick(); 528 sched_clock_tick();
529 update_rq_clock(rq); 529 update_rq_clock(rq);
530 dl_se->dl_throttled = 0; 530 dl_se->dl_throttled = 0;
531 dl_se->dl_yielded = 0;
531 if (p->on_rq) { 532 if (p->on_rq) {
532 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 533 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
533 if (task_has_dl_policy(rq->curr)) 534 if (task_has_dl_policy(rq->curr))
@@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq)
893 * We make the task go to sleep until its current deadline by 894 * We make the task go to sleep until its current deadline by
894 * forcing its runtime to zero. This way, update_curr_dl() stops 895 * forcing its runtime to zero. This way, update_curr_dl() stops
895 * it and the bandwidth timer will wake it up and will give it 896 * it and the bandwidth timer will wake it up and will give it
896 * new scheduling parameters (thanks to dl_new=1). 897 * new scheduling parameters (thanks to dl_yielded=1).
897 */ 898 */
898 if (p->dl.runtime > 0) { 899 if (p->dl.runtime > 0) {
899 rq->curr->dl.dl_new = 1; 900 rq->curr->dl.dl_yielded = 1;
900 p->dl.runtime = 0; 901 p->dl.runtime = 0;
901 } 902 }
902 update_curr_dl(rq); 903 update_curr_dl(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7570dd969c28..0fdb96de81a5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq)
6653 int this_cpu = this_rq->cpu; 6653 int this_cpu = this_rq->cpu;
6654 6654
6655 idle_enter_fair(this_rq); 6655 idle_enter_fair(this_rq);
6656
6656 /* 6657 /*
6657 * We must set idle_stamp _before_ calling idle_balance(), such that we 6658 * We must set idle_stamp _before_ calling idle_balance(), such that we
6658 * measure the duration of idle_balance() as idle time. 6659 * measure the duration of idle_balance() as idle time.
@@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq)
6705 6706
6706 raw_spin_lock(&this_rq->lock); 6707 raw_spin_lock(&this_rq->lock);
6707 6708
6709 if (curr_cost > this_rq->max_idle_balance_cost)
6710 this_rq->max_idle_balance_cost = curr_cost;
6711
6708 /* 6712 /*
6709 * While browsing the domains, we released the rq lock. 6713 * While browsing the domains, we released the rq lock, a task could
6710 * A task could have be enqueued in the meantime 6714 * have been enqueued in the meantime. Since we're not going idle,
6715 * pretend we pulled a task.
6711 */ 6716 */
6712 if (this_rq->cfs.h_nr_running && !pulled_task) { 6717 if (this_rq->cfs.h_nr_running && !pulled_task)
6713 pulled_task = 1; 6718 pulled_task = 1;
6714 goto out;
6715 }
6716 6719
6717 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 6720 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6718 /* 6721 /*
@@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq)
6722 this_rq->next_balance = next_balance; 6725 this_rq->next_balance = next_balance;
6723 } 6726 }
6724 6727
6725 if (curr_cost > this_rq->max_idle_balance_cost)
6726 this_rq->max_idle_balance_cost = curr_cost;
6727
6728out: 6728out:
6729 /* Is there a task of a high priority class? */ 6729 /* Is there a task of a high priority class? */
6730 if (this_rq->nr_running != this_rq->cfs.h_nr_running && 6730 if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
diff --git a/mm/filemap.c b/mm/filemap.c
index 000a220e2a41..088358c8006b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
257{ 257{
258 int ret = 0; 258 int ret = 0;
259 /* Check for outstanding write errors */ 259 /* Check for outstanding write errors */
260 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 260 if (test_bit(AS_ENOSPC, &mapping->flags) &&
261 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
261 ret = -ENOSPC; 262 ret = -ENOSPC;
262 if (test_and_clear_bit(AS_EIO, &mapping->flags)) 263 if (test_bit(AS_EIO, &mapping->flags) &&
264 test_and_clear_bit(AS_EIO, &mapping->flags))
263 ret = -EIO; 265 ret = -EIO;
264 return ret; 266 return ret;
265} 267}
diff --git a/mm/madvise.c b/mm/madvise.c
index 539eeb96b323..a402f8fdc68e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
195 for (; start < end; start += PAGE_SIZE) { 195 for (; start < end; start += PAGE_SIZE) {
196 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 196 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
197 197
198 page = find_get_page(mapping, index); 198 page = find_get_entry(mapping, index);
199 if (!radix_tree_exceptional_entry(page)) { 199 if (!radix_tree_exceptional_entry(page)) {
200 if (page) 200 if (page)
201 page_cache_release(page); 201 page_cache_release(page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c47dffdcb246..5177c6d4a2dd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1077 1077
1078 rcu_read_lock(); 1078 rcu_read_lock();
1079 do { 1079 do {
1080 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1080 /*
1081 if (unlikely(!memcg)) 1081 * Page cache insertions can happen withou an
1082 * actual mm context, e.g. during disk probing
1083 * on boot, loopback IO, acct() writes etc.
1084 */
1085 if (unlikely(!mm))
1082 memcg = root_mem_cgroup; 1086 memcg = root_mem_cgroup;
1087 else {
1088 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1089 if (unlikely(!memcg))
1090 memcg = root_mem_cgroup;
1091 }
1083 } while (!css_tryget(&memcg->css)); 1092 } while (!css_tryget(&memcg->css));
1084 rcu_read_unlock(); 1093 rcu_read_unlock();
1085 return memcg; 1094 return memcg;
@@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
3958 return 0; 3967 return 0;
3959 } 3968 }
3960 3969
3961 /* 3970 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3962 * Page cache insertions can happen without an actual mm 3971 if (!memcg)
3963 * context, e.g. during disk probing on boot. 3972 return -ENOMEM;
3964 */
3965 if (unlikely(!mm))
3966 memcg = root_mem_cgroup;
3967 else {
3968 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3969 if (!memcg)
3970 return -ENOMEM;
3971 }
3972 __mem_cgroup_commit_charge(memcg, page, 1, type, false); 3973 __mem_cgroup_commit_charge(memcg, page, 1, type, false);
3973 return 0; 3974 return 0;
3974} 3975}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 35ef28acf137..9ccef39a9de2 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1081 return 0; 1081 return 0;
1082 } else if (PageHuge(hpage)) { 1082 } else if (PageHuge(hpage)) {
1083 /* 1083 /*
1084 * Check "just unpoisoned", "filter hit", and 1084 * Check "filter hit" and "race with other subpage."
1085 * "race with other subpage."
1086 */ 1085 */
1087 lock_page(hpage); 1086 lock_page(hpage);
1088 if (!PageHWPoison(hpage) 1087 if (PageHWPoison(hpage)) {
1089 || (hwpoison_filter(p) && TestClearPageHWPoison(p)) 1088 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1090 || (p != hpage && TestSetPageHWPoison(hpage))) { 1089 || (p != hpage && TestSetPageHWPoison(hpage))) {
1091 atomic_long_sub(nr_pages, &num_poisoned_pages); 1090 atomic_long_sub(nr_pages, &num_poisoned_pages);
1092 return 0; 1091 unlock_page(hpage);
1092 return 0;
1093 }
1093 } 1094 }
1094 set_page_hwpoison_huge_page(hpage); 1095 set_page_hwpoison_huge_page(hpage);
1095 res = dequeue_hwpoisoned_huge_page(hpage); 1096 res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1152 */ 1153 */
1153 if (!PageHWPoison(p)) { 1154 if (!PageHWPoison(p)) {
1154 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1155 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1156 atomic_long_sub(nr_pages, &num_poisoned_pages);
1157 put_page(hpage);
1155 res = 0; 1158 res = 0;
1156 goto out; 1159 goto out;
1157 } 1160 }
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 175273f38cb1..44ebd5c2cd4a 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
169 if (err < 0) 169 if (err < 0)
170 goto out_uninit_mvrp; 170 goto out_uninit_mvrp;
171 171
172 vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
172 err = register_netdevice(dev); 173 err = register_netdevice(dev);
173 if (err < 0) 174 if (err < 0)
174 goto out_uninit_mvrp; 175 goto out_uninit_mvrp;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 733ec283ed1b..019efb79708f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
493 } 493 }
494} 494}
495 495
496static int vlan_calculate_locking_subclass(struct net_device *real_dev)
497{
498 int subclass = 0;
499
500 while (is_vlan_dev(real_dev)) {
501 subclass++;
502 real_dev = vlan_dev_priv(real_dev)->real_dev;
503 }
504
505 return subclass;
506}
507
508static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
509{
510 int err = 0, subclass;
511
512 subclass = vlan_calculate_locking_subclass(to);
513
514 spin_lock_nested(&to->addr_list_lock, subclass);
515 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
516 if (!err)
517 __dev_set_rx_mode(to);
518 spin_unlock(&to->addr_list_lock);
519}
520
521static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
522{
523 int err = 0, subclass;
524
525 subclass = vlan_calculate_locking_subclass(to);
526
527 spin_lock_nested(&to->addr_list_lock, subclass);
528 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
529 if (!err)
530 __dev_set_rx_mode(to);
531 spin_unlock(&to->addr_list_lock);
532}
533
534static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) 496static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
535{ 497{
536 vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 498 dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
537 vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); 499 dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
538} 500}
539 501
540/* 502/*
@@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
562 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); 524 netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
563} 525}
564 526
527static int vlan_dev_get_lock_subclass(struct net_device *dev)
528{
529 return vlan_dev_priv(dev)->nest_level;
530}
531
565static const struct header_ops vlan_header_ops = { 532static const struct header_ops vlan_header_ops = {
566 .create = vlan_dev_hard_header, 533 .create = vlan_dev_hard_header,
567 .rebuild = vlan_dev_rebuild_header, 534 .rebuild = vlan_dev_rebuild_header,
@@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
597static int vlan_dev_init(struct net_device *dev) 564static int vlan_dev_init(struct net_device *dev)
598{ 565{
599 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 566 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
600 int subclass = 0;
601 567
602 netif_carrier_off(dev); 568 netif_carrier_off(dev);
603 569
@@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
646 612
647 SET_NETDEV_DEVTYPE(dev, &vlan_type); 613 SET_NETDEV_DEVTYPE(dev, &vlan_type);
648 614
649 subclass = vlan_calculate_locking_subclass(dev); 615 vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
650 vlan_dev_set_lockdep_class(dev, subclass);
651 616
652 vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); 617 vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
653 if (!vlan_dev_priv(dev)->vlan_pcpu_stats) 618 if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@ -819,6 +784,7 @@ static const struct net_device_ops vlan_netdev_ops = {
819 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, 784 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
820#endif 785#endif
821 .ndo_fix_features = vlan_dev_fix_features, 786 .ndo_fix_features = vlan_dev_fix_features,
787 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
822}; 788};
823 789
824void vlan_setup(struct net_device *dev) 790void vlan_setup(struct net_device *dev)
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index b3bd4ec3fd94..f04224c32005 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1545,6 +1545,8 @@ out_neigh:
1545 if ((orig_neigh_node) && (!is_single_hop_neigh)) 1545 if ((orig_neigh_node) && (!is_single_hop_neigh))
1546 batadv_orig_node_free_ref(orig_neigh_node); 1546 batadv_orig_node_free_ref(orig_neigh_node);
1547out: 1547out:
1548 if (router_ifinfo)
1549 batadv_neigh_ifinfo_free_ref(router_ifinfo);
1548 if (router) 1550 if (router)
1549 batadv_neigh_node_free_ref(router); 1551 batadv_neigh_node_free_ref(router);
1550 if (router_router) 1552 if (router_router)
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index b25fd64d727b..aa5d4946d0d7 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
940 * additional DAT answer may trigger kernel warnings about 940 * additional DAT answer may trigger kernel warnings about
941 * a packet coming from the wrong port. 941 * a packet coming from the wrong port.
942 */ 942 */
943 if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, 943 if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
944 BATADV_NO_FLAGS)) {
945 ret = true; 944 ret = true;
946 goto out; 945 goto out;
947 } 946 }
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index bcc4bea632fa..f14e54a05691 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
418 struct batadv_neigh_node *neigh_node) 418 struct batadv_neigh_node *neigh_node)
419{ 419{
420 struct batadv_priv *bat_priv; 420 struct batadv_priv *bat_priv;
421 struct batadv_hard_iface *primary_if; 421 struct batadv_hard_iface *primary_if = NULL;
422 struct batadv_frag_packet frag_header; 422 struct batadv_frag_packet frag_header;
423 struct sk_buff *skb_fragment; 423 struct sk_buff *skb_fragment;
424 unsigned mtu = neigh_node->if_incoming->net_dev->mtu; 424 unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
425 unsigned header_size = sizeof(frag_header); 425 unsigned header_size = sizeof(frag_header);
426 unsigned max_fragment_size, max_packet_size; 426 unsigned max_fragment_size, max_packet_size;
427 bool ret = false;
427 428
428 /* To avoid merge and refragmentation at next-hops we never send 429 /* To avoid merge and refragmentation at next-hops we never send
429 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE 430 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
483 skb->len + ETH_HLEN); 484 skb->len + ETH_HLEN);
484 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 485 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
485 486
486 return true; 487 ret = true;
488
487out_err: 489out_err:
488 return false; 490 if (primary_if)
491 batadv_hardif_free_ref(primary_if);
492
493 return ret;
489} 494}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index c835e137423b..90cff585b37d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -42,8 +42,10 @@
42 42
43static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node) 43static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
44{ 44{
45 if (atomic_dec_and_test(&gw_node->refcount)) 45 if (atomic_dec_and_test(&gw_node->refcount)) {
46 batadv_orig_node_free_ref(gw_node->orig_node);
46 kfree_rcu(gw_node, rcu); 47 kfree_rcu(gw_node, rcu);
48 }
47} 49}
48 50
49static struct batadv_gw_node * 51static struct batadv_gw_node *
@@ -406,9 +408,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
406 if (gateway->bandwidth_down == 0) 408 if (gateway->bandwidth_down == 0)
407 return; 409 return;
408 410
411 if (!atomic_inc_not_zero(&orig_node->refcount))
412 return;
413
409 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); 414 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
410 if (!gw_node) 415 if (!gw_node) {
416 batadv_orig_node_free_ref(orig_node);
411 return; 417 return;
418 }
412 419
413 INIT_HLIST_NODE(&gw_node->list); 420 INIT_HLIST_NODE(&gw_node->list);
414 gw_node->orig_node = orig_node; 421 gw_node->orig_node = orig_node;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b851cc580853..fbda6b54baff 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
83 return true; 83 return true;
84 84
85 /* no more parents..stop recursion */ 85 /* no more parents..stop recursion */
86 if (net_dev->iflink == net_dev->ifindex) 86 if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
87 return false; 87 return false;
88 88
89 /* recurse over the parent device */ 89 /* recurse over the parent device */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index ffd9dfbd9b0e..6a484514cd3e 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
501static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu) 501static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
502{ 502{
503 struct batadv_orig_ifinfo *orig_ifinfo; 503 struct batadv_orig_ifinfo *orig_ifinfo;
504 struct batadv_neigh_node *router;
504 505
505 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu); 506 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
506 507
507 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT) 508 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
508 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing); 509 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
509 510
511 /* this is the last reference to this object */
512 router = rcu_dereference_protected(orig_ifinfo->router, true);
513 if (router)
514 batadv_neigh_node_free_ref_now(router);
510 kfree(orig_ifinfo); 515 kfree(orig_ifinfo);
511} 516}
512 517
@@ -702,6 +707,47 @@ free_orig_node:
702} 707}
703 708
704/** 709/**
710 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
711 * @bat_priv: the bat priv with all the soft interface information
712 * @neigh: orig node which is to be checked
713 */
714static void
715batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
716 struct batadv_neigh_node *neigh)
717{
718 struct batadv_neigh_ifinfo *neigh_ifinfo;
719 struct batadv_hard_iface *if_outgoing;
720 struct hlist_node *node_tmp;
721
722 spin_lock_bh(&neigh->ifinfo_lock);
723
724 /* for all ifinfo objects for this neighinator */
725 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
726 &neigh->ifinfo_list, list) {
727 if_outgoing = neigh_ifinfo->if_outgoing;
728
729 /* always keep the default interface */
730 if (if_outgoing == BATADV_IF_DEFAULT)
731 continue;
732
733 /* don't purge if the interface is not (going) down */
734 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
735 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
736 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
737 continue;
738
739 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
740 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
741 neigh->addr, if_outgoing->net_dev->name);
742
743 hlist_del_rcu(&neigh_ifinfo->list);
744 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
745 }
746
747 spin_unlock_bh(&neigh->ifinfo_lock);
748}
749
750/**
705 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator 751 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
706 * @bat_priv: the bat priv with all the soft interface information 752 * @bat_priv: the bat priv with all the soft interface information
707 * @orig_node: orig node which is to be checked 753 * @orig_node: orig node which is to be checked
@@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
800 846
801 hlist_del_rcu(&neigh_node->list); 847 hlist_del_rcu(&neigh_node->list);
802 batadv_neigh_node_free_ref(neigh_node); 848 batadv_neigh_node_free_ref(neigh_node);
849 } else {
850 /* only necessary if not the whole neighbor is to be
851 * deleted, but some interface has been removed.
852 */
853 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
803 } 854 }
804 } 855 }
805 856
@@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
857{ 908{
858 struct batadv_neigh_node *best_neigh_node; 909 struct batadv_neigh_node *best_neigh_node;
859 struct batadv_hard_iface *hard_iface; 910 struct batadv_hard_iface *hard_iface;
860 bool changed; 911 bool changed_ifinfo, changed_neigh;
861 912
862 if (batadv_has_timed_out(orig_node->last_seen, 913 if (batadv_has_timed_out(orig_node->last_seen,
863 2 * BATADV_PURGE_TIMEOUT)) { 914 2 * BATADV_PURGE_TIMEOUT)) {
@@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
867 jiffies_to_msecs(orig_node->last_seen)); 918 jiffies_to_msecs(orig_node->last_seen));
868 return true; 919 return true;
869 } 920 }
870 changed = batadv_purge_orig_ifinfo(bat_priv, orig_node); 921 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
871 changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node); 922 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
872 923
873 if (!changed) 924 if (!changed_ifinfo && !changed_neigh)
874 return false; 925 return false;
875 926
876 /* first for NULL ... */ 927 /* first for NULL ... */
@@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1028 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface); 1079 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1029 1080
1030out: 1081out:
1031 batadv_hardif_free_ref(hard_iface); 1082 if (hard_iface)
1083 batadv_hardif_free_ref(hard_iface);
1032 return 0; 1084 return 0;
1033} 1085}
1034 1086
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 80e1b0f60a30..2acf7fa1fec6 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
859 return NF_STOLEN; 859 return NF_STOLEN;
860} 860}
861 861
862#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4) 862#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
863static int br_nf_dev_queue_xmit(struct sk_buff *skb) 863static int br_nf_dev_queue_xmit(struct sk_buff *skb)
864{ 864{
865 int ret; 865 int ret;
866 866
867 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && 867 if (skb->protocol == htons(ETH_P_IP) &&
868 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && 868 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
869 !skb_is_gso(skb)) { 869 !skb_is_gso(skb)) {
870 if (br_parse_ip_options(skb)) 870 if (br_parse_ip_options(skb))
diff --git a/net/core/dev.c b/net/core/dev.c
index d2c8a06b3a98..9abc503b19b7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2418,7 +2418,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
2418 * 2. No high memory really exists on this machine. 2418 * 2. No high memory really exists on this machine.
2419 */ 2419 */
2420 2420
2421static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) 2421static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2422{ 2422{
2423#ifdef CONFIG_HIGHMEM 2423#ifdef CONFIG_HIGHMEM
2424 int i; 2424 int i;
@@ -2493,38 +2493,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2493} 2493}
2494 2494
2495static netdev_features_t harmonize_features(struct sk_buff *skb, 2495static netdev_features_t harmonize_features(struct sk_buff *skb,
2496 const struct net_device *dev, 2496 netdev_features_t features)
2497 netdev_features_t features)
2498{ 2497{
2499 int tmp; 2498 int tmp;
2500 2499
2501 if (skb->ip_summed != CHECKSUM_NONE && 2500 if (skb->ip_summed != CHECKSUM_NONE &&
2502 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { 2501 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
2503 features &= ~NETIF_F_ALL_CSUM; 2502 features &= ~NETIF_F_ALL_CSUM;
2504 } else if (illegal_highdma(dev, skb)) { 2503 } else if (illegal_highdma(skb->dev, skb)) {
2505 features &= ~NETIF_F_SG; 2504 features &= ~NETIF_F_SG;
2506 } 2505 }
2507 2506
2508 return features; 2507 return features;
2509} 2508}
2510 2509
2511netdev_features_t netif_skb_dev_features(struct sk_buff *skb, 2510netdev_features_t netif_skb_features(struct sk_buff *skb)
2512 const struct net_device *dev)
2513{ 2511{
2514 __be16 protocol = skb->protocol; 2512 __be16 protocol = skb->protocol;
2515 netdev_features_t features = dev->features; 2513 netdev_features_t features = skb->dev->features;
2516 2514
2517 if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) 2515 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2518 features &= ~NETIF_F_GSO_MASK; 2516 features &= ~NETIF_F_GSO_MASK;
2519 2517
2520 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2518 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2521 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2519 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2522 protocol = veh->h_vlan_encapsulated_proto; 2520 protocol = veh->h_vlan_encapsulated_proto;
2523 } else if (!vlan_tx_tag_present(skb)) { 2521 } else if (!vlan_tx_tag_present(skb)) {
2524 return harmonize_features(skb, dev, features); 2522 return harmonize_features(skb, features);
2525 } 2523 }
2526 2524
2527 features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | 2525 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2528 NETIF_F_HW_VLAN_STAG_TX); 2526 NETIF_F_HW_VLAN_STAG_TX);
2529 2527
2530 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) 2528 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2532,9 +2530,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2532 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | 2530 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2533 NETIF_F_HW_VLAN_STAG_TX; 2531 NETIF_F_HW_VLAN_STAG_TX;
2534 2532
2535 return harmonize_features(skb, dev, features); 2533 return harmonize_features(skb, features);
2536} 2534}
2537EXPORT_SYMBOL(netif_skb_dev_features); 2535EXPORT_SYMBOL(netif_skb_features);
2538 2536
2539int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2537int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2540 struct netdev_queue *txq) 2538 struct netdev_queue *txq)
@@ -3953,6 +3951,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3953 } 3951 }
3954 NAPI_GRO_CB(skb)->count = 1; 3952 NAPI_GRO_CB(skb)->count = 1;
3955 NAPI_GRO_CB(skb)->age = jiffies; 3953 NAPI_GRO_CB(skb)->age = jiffies;
3954 NAPI_GRO_CB(skb)->last = skb;
3956 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 3955 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3957 skb->next = napi->gro_list; 3956 skb->next = napi->gro_list;
3958 napi->gro_list = skb; 3957 napi->gro_list = skb;
@@ -4543,6 +4542,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
4543EXPORT_SYMBOL(netdev_adjacent_get_private); 4542EXPORT_SYMBOL(netdev_adjacent_get_private);
4544 4543
4545/** 4544/**
4545 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4546 * @dev: device
4547 * @iter: list_head ** of the current position
4548 *
4549 * Gets the next device from the dev's upper list, starting from iter
4550 * position. The caller must hold RCU read lock.
4551 */
4552struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4553 struct list_head **iter)
4554{
4555 struct netdev_adjacent *upper;
4556
4557 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4558
4559 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4560
4561 if (&upper->list == &dev->adj_list.upper)
4562 return NULL;
4563
4564 *iter = &upper->list;
4565
4566 return upper->dev;
4567}
4568EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4569
4570/**
4546 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list 4571 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4547 * @dev: device 4572 * @dev: device
4548 * @iter: list_head ** of the current position 4573 * @iter: list_head ** of the current position
@@ -4624,6 +4649,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4624EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); 4649EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4625 4650
4626/** 4651/**
4652 * netdev_lower_get_next - Get the next device from the lower neighbour
4653 * list
4654 * @dev: device
4655 * @iter: list_head ** of the current position
4656 *
4657 * Gets the next netdev_adjacent from the dev's lower neighbour
4658 * list, starting from iter position. The caller must hold RTNL lock or
4659 * its own locking that guarantees that the neighbour lower
4660 * list will remain unchainged.
4661 */
4662void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4663{
4664 struct netdev_adjacent *lower;
4665
4666 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4667
4668 if (&lower->list == &dev->adj_list.lower)
4669 return NULL;
4670
4671 *iter = &lower->list;
4672
4673 return lower->dev;
4674}
4675EXPORT_SYMBOL(netdev_lower_get_next);
4676
4677/**
4627 * netdev_lower_get_first_private_rcu - Get the first ->private from the 4678 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4628 * lower neighbour list, RCU 4679 * lower neighbour list, RCU
4629 * variant 4680 * variant
@@ -5073,6 +5124,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
5073} 5124}
5074EXPORT_SYMBOL(netdev_lower_dev_get_private); 5125EXPORT_SYMBOL(netdev_lower_dev_get_private);
5075 5126
5127
5128int dev_get_nest_level(struct net_device *dev,
5129 bool (*type_check)(struct net_device *dev))
5130{
5131 struct net_device *lower = NULL;
5132 struct list_head *iter;
5133 int max_nest = -1;
5134 int nest;
5135
5136 ASSERT_RTNL();
5137
5138 netdev_for_each_lower_dev(dev, lower, iter) {
5139 nest = dev_get_nest_level(lower, type_check);
5140 if (max_nest < nest)
5141 max_nest = nest;
5142 }
5143
5144 if (type_check(dev))
5145 max_nest++;
5146
5147 return max_nest;
5148}
5149EXPORT_SYMBOL(dev_get_nest_level);
5150
5076static void dev_change_rx_flags(struct net_device *dev, int flags) 5151static void dev_change_rx_flags(struct net_device *dev, int flags)
5077{ 5152{
5078 const struct net_device_ops *ops = dev->netdev_ops; 5153 const struct net_device_ops *ops = dev->netdev_ops;
@@ -5238,7 +5313,6 @@ void __dev_set_rx_mode(struct net_device *dev)
5238 if (ops->ndo_set_rx_mode) 5313 if (ops->ndo_set_rx_mode)
5239 ops->ndo_set_rx_mode(dev); 5314 ops->ndo_set_rx_mode(dev);
5240} 5315}
5241EXPORT_SYMBOL(__dev_set_rx_mode);
5242 5316
5243void dev_set_rx_mode(struct net_device *dev) 5317void dev_set_rx_mode(struct net_device *dev)
5244{ 5318{
@@ -5543,7 +5617,7 @@ static int dev_new_index(struct net *net)
5543 5617
5544/* Delayed registration/unregisteration */ 5618/* Delayed registration/unregisteration */
5545static LIST_HEAD(net_todo_list); 5619static LIST_HEAD(net_todo_list);
5546static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); 5620DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5547 5621
5548static void net_set_todo(struct net_device *dev) 5622static void net_set_todo(struct net_device *dev)
5549{ 5623{
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8f8a96ef9f3f..32d872eec7f5 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
1248 neigh->updated = jiffies; 1248 neigh->updated = jiffies;
1249 if (!(neigh->nud_state & NUD_FAILED)) 1249 if (!(neigh->nud_state & NUD_FAILED))
1250 return; 1250 return;
1251 neigh->nud_state = NUD_PROBE; 1251 neigh->nud_state = NUD_INCOMPLETE;
1252 atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1252 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253 neigh_add_timer(neigh, 1253 neigh_add_timer(neigh,
1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME)); 1254 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255} 1255}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 81d3a9a08453..7c8ffd974961 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -24,7 +24,7 @@
24 24
25static LIST_HEAD(pernet_list); 25static LIST_HEAD(pernet_list);
26static struct list_head *first_device = &pernet_list; 26static struct list_head *first_device = &pernet_list;
27static DEFINE_MUTEX(net_mutex); 27DEFINE_MUTEX(net_mutex);
28 28
29LIST_HEAD(net_namespace_list); 29LIST_HEAD(net_namespace_list);
30EXPORT_SYMBOL_GPL(net_namespace_list); 30EXPORT_SYMBOL_GPL(net_namespace_list);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9837bebf93ce..2d8d8fcfa060 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
353} 353}
354EXPORT_SYMBOL_GPL(__rtnl_link_unregister); 354EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
355 355
356/* Return with the rtnl_lock held when there are no network
357 * devices unregistering in any network namespace.
358 */
359static void rtnl_lock_unregistering_all(void)
360{
361 struct net *net;
362 bool unregistering;
363 DEFINE_WAIT(wait);
364
365 for (;;) {
366 prepare_to_wait(&netdev_unregistering_wq, &wait,
367 TASK_UNINTERRUPTIBLE);
368 unregistering = false;
369 rtnl_lock();
370 for_each_net(net) {
371 if (net->dev_unreg_count > 0) {
372 unregistering = true;
373 break;
374 }
375 }
376 if (!unregistering)
377 break;
378 __rtnl_unlock();
379 schedule();
380 }
381 finish_wait(&netdev_unregistering_wq, &wait);
382}
383
356/** 384/**
357 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. 385 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
358 * @ops: struct rtnl_link_ops * to unregister 386 * @ops: struct rtnl_link_ops * to unregister
359 */ 387 */
360void rtnl_link_unregister(struct rtnl_link_ops *ops) 388void rtnl_link_unregister(struct rtnl_link_ops *ops)
361{ 389{
362 rtnl_lock(); 390 /* Close the race with cleanup_net() */
391 mutex_lock(&net_mutex);
392 rtnl_lock_unregistering_all();
363 __rtnl_link_unregister(ops); 393 __rtnl_link_unregister(ops);
364 rtnl_unlock(); 394 rtnl_unlock();
395 mutex_unlock(&net_mutex);
365} 396}
366EXPORT_SYMBOL_GPL(rtnl_link_unregister); 397EXPORT_SYMBOL_GPL(rtnl_link_unregister);
367 398
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1b62343f5837..8383b2bddeb9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3076 if (unlikely(p->len + len >= 65536)) 3076 if (unlikely(p->len + len >= 65536))
3077 return -E2BIG; 3077 return -E2BIG;
3078 3078
3079 lp = NAPI_GRO_CB(p)->last ?: p; 3079 lp = NAPI_GRO_CB(p)->last;
3080 pinfo = skb_shinfo(lp); 3080 pinfo = skb_shinfo(lp);
3081 3081
3082 if (headlen <= offset) { 3082 if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
3192 3192
3193 __skb_pull(skb, offset); 3193 __skb_pull(skb, offset);
3194 3194
3195 if (!NAPI_GRO_CB(p)->last) 3195 if (NAPI_GRO_CB(p)->last == p)
3196 skb_shinfo(p)->frag_list = skb; 3196 skb_shinfo(p)->frag_list = skb;
3197 else 3197 else
3198 NAPI_GRO_CB(p)->last->next = skb; 3198 NAPI_GRO_CB(p)->last->next = skb;
diff --git a/net/core/utils.c b/net/core/utils.c
index 2f737bf90b3f..eed34338736c 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
348{ 348{
349 struct __net_random_once_work *work = 349 struct __net_random_once_work *work =
350 container_of(w, struct __net_random_once_work, work); 350 container_of(w, struct __net_random_once_work, work);
351 if (!static_key_enabled(work->key)) 351 BUG_ON(!static_key_enabled(work->key));
352 static_key_slow_inc(work->key); 352 static_key_slow_dec(work->key);
353 kfree(work); 353 kfree(work);
354} 354}
355 355
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
367} 367}
368 368
369bool __net_get_random_once(void *buf, int nbytes, bool *done, 369bool __net_get_random_once(void *buf, int nbytes, bool *done,
370 struct static_key *done_key) 370 struct static_key *once_key)
371{ 371{
372 static DEFINE_SPINLOCK(lock); 372 static DEFINE_SPINLOCK(lock);
373 unsigned long flags; 373 unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
382 *done = true; 382 *done = true;
383 spin_unlock_irqrestore(&lock, flags); 383 spin_unlock_irqrestore(&lock, flags);
384 384
385 __net_random_once_disable_jump(done_key); 385 __net_random_once_disable_jump(once_key);
386 386
387 return true; 387 return true;
388} 388}
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0eb5d5e76dfb..5db37cef50a9 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev)
406 goto out_free; 406 goto out_free;
407 } 407 }
408 408
409 chip_index = 0; 409 chip_index = -1;
410 for_each_available_child_of_node(np, child) { 410 for_each_available_child_of_node(np, child) {
411 chip_index++;
411 cd = &pd->chip[chip_index]; 412 cd = &pd->chip[chip_index];
412 413
413 cd->mii_bus = &mdio_bus->dev; 414 cd->mii_bus = &mdio_bus->dev;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8c54870db792..6d6dd345bc4d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1650,6 +1650,39 @@ static int __init init_ipv4_mibs(void)
1650 return register_pernet_subsys(&ipv4_mib_ops); 1650 return register_pernet_subsys(&ipv4_mib_ops);
1651} 1651}
1652 1652
1653static __net_init int inet_init_net(struct net *net)
1654{
1655 /*
1656 * Set defaults for local port range
1657 */
1658 seqlock_init(&net->ipv4.ip_local_ports.lock);
1659 net->ipv4.ip_local_ports.range[0] = 32768;
1660 net->ipv4.ip_local_ports.range[1] = 61000;
1661
1662 seqlock_init(&net->ipv4.ping_group_range.lock);
1663 /*
1664 * Sane defaults - nobody may create ping sockets.
1665 * Boot scripts should set this to distro-specific group.
1666 */
1667 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1668 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1669 return 0;
1670}
1671
1672static __net_exit void inet_exit_net(struct net *net)
1673{
1674}
1675
1676static __net_initdata struct pernet_operations af_inet_ops = {
1677 .init = inet_init_net,
1678 .exit = inet_exit_net,
1679};
1680
1681static int __init init_inet_pernet_ops(void)
1682{
1683 return register_pernet_subsys(&af_inet_ops);
1684}
1685
1653static int ipv4_proc_init(void); 1686static int ipv4_proc_init(void);
1654 1687
1655/* 1688/*
@@ -1794,6 +1827,9 @@ static int __init inet_init(void)
1794 if (ip_mr_init()) 1827 if (ip_mr_init())
1795 pr_crit("%s: Cannot init ipv4 mroute\n", __func__); 1828 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1796#endif 1829#endif
1830
1831 if (init_inet_pernet_ops())
1832 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
1797 /* 1833 /*
1798 * Initialise per-cpu ipv4 mibs 1834 * Initialise per-cpu ipv4 mibs
1799 */ 1835 */
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 8a043f03c88e..b10cd43a4722 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
821 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 821 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
822 if (fi == NULL) 822 if (fi == NULL)
823 goto failure; 823 goto failure;
824 fib_info_cnt++;
824 if (cfg->fc_mx) { 825 if (cfg->fc_mx) {
825 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 826 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
826 if (!fi->fib_metrics) 827 if (!fi->fib_metrics)
827 goto failure; 828 goto failure;
828 } else 829 } else
829 fi->fib_metrics = (u32 *) dst_default_metrics; 830 fi->fib_metrics = (u32 *) dst_default_metrics;
830 fib_info_cnt++;
831 831
832 fi->fib_net = hold_net(net); 832 fi->fib_net = hold_net(net);
833 fi->fib_protocol = cfg->fc_protocol; 833 fi->fib_protocol = cfg->fc_protocol;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 0d1e2cb877ec..a56b8e6e866a 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -37,11 +37,11 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
37 unsigned int seq; 37 unsigned int seq;
38 38
39 do { 39 do {
40 seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); 40 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
41 41
42 *low = net->ipv4.sysctl_local_ports.range[0]; 42 *low = net->ipv4.ip_local_ports.range[0];
43 *high = net->ipv4.sysctl_local_ports.range[1]; 43 *high = net->ipv4.ip_local_ports.range[1];
44 } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); 44 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
45} 45}
46EXPORT_SYMBOL(inet_get_local_port_range); 46EXPORT_SYMBOL(inet_get_local_port_range);
47 47
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index be8abe73bb9f..6f111e48e11c 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -42,12 +42,12 @@
42static bool ip_may_fragment(const struct sk_buff *skb) 42static bool ip_may_fragment(const struct sk_buff *skb)
43{ 43{
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || 44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
45 !skb->local_df; 45 skb->local_df;
46} 46}
47 47
48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) 48static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
49{ 49{
50 if (skb->len <= mtu || skb->local_df) 50 if (skb->len <= mtu)
51 return false; 51 return false;
52 52
53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
56 return true; 56 return true;
57} 57}
58 58
59static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
60{
61 unsigned int mtu;
62
63 if (skb->local_df || !skb_is_gso(skb))
64 return false;
65
66 mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
67
68 /* if seglen > mtu, do software segmentation for IP fragmentation on
69 * output. DF bit cannot be set since ip_forward would have sent
70 * icmp error.
71 */
72 return skb_gso_network_seglen(skb) > mtu;
73}
74
75/* called if GSO skb needs to be fragmented on forward */
76static int ip_forward_finish_gso(struct sk_buff *skb)
77{
78 struct dst_entry *dst = skb_dst(skb);
79 netdev_features_t features;
80 struct sk_buff *segs;
81 int ret = 0;
82
83 features = netif_skb_dev_features(skb, dst->dev);
84 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
85 if (IS_ERR(segs)) {
86 kfree_skb(skb);
87 return -ENOMEM;
88 }
89
90 consume_skb(skb);
91
92 do {
93 struct sk_buff *nskb = segs->next;
94 int err;
95
96 segs->next = NULL;
97 err = dst_output(segs);
98
99 if (err && ret == 0)
100 ret = err;
101 segs = nskb;
102 } while (segs);
103
104 return ret;
105}
106 59
107static int ip_forward_finish(struct sk_buff *skb) 60static int ip_forward_finish(struct sk_buff *skb)
108{ 61{
@@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb)
114 if (unlikely(opt->optlen)) 67 if (unlikely(opt->optlen))
115 ip_forward_options(skb); 68 ip_forward_options(skb);
116 69
117 if (ip_gso_exceeds_dst_mtu(skb))
118 return ip_forward_finish_gso(skb);
119
120 return dst_output(skb); 70 return dst_output(skb);
121} 71}
122 72
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index c10a3ce5cbff..ed32313e307c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg)
232 * "Fragment Reassembly Timeout" message, per RFC792. 232 * "Fragment Reassembly Timeout" message, per RFC792.
233 */ 233 */
234 if (qp->user == IP_DEFRAG_AF_PACKET || 234 if (qp->user == IP_DEFRAG_AF_PACKET ||
235 (qp->user == IP_DEFRAG_CONNTRACK_IN && 235 ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
236 skb_rtable(head)->rt_type != RTN_LOCAL)) 236 (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
237 (skb_rtable(head)->rt_type != RTN_LOCAL)))
237 goto out_rcu_unlock; 238 goto out_rcu_unlock;
238 239
239 240
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 1cbeba5edff9..a52f50187b54 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb)
211 return -EINVAL; 211 return -EINVAL;
212} 212}
213 213
214static int ip_finish_output_gso(struct sk_buff *skb)
215{
216 netdev_features_t features;
217 struct sk_buff *segs;
218 int ret = 0;
219
220 /* common case: locally created skb or seglen is <= mtu */
221 if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
222 skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
223 return ip_finish_output2(skb);
224
225 /* Slowpath - GSO segment length is exceeding the dst MTU.
226 *
227 * This can happen in two cases:
228 * 1) TCP GRO packet, DF bit not set
229 * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
230 * from host network stack.
231 */
232 features = netif_skb_features(skb);
233 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
234 if (IS_ERR(segs)) {
235 kfree_skb(skb);
236 return -ENOMEM;
237 }
238
239 consume_skb(skb);
240
241 do {
242 struct sk_buff *nskb = segs->next;
243 int err;
244
245 segs->next = NULL;
246 err = ip_fragment(segs, ip_finish_output2);
247
248 if (err && ret == 0)
249 ret = err;
250 segs = nskb;
251 } while (segs);
252
253 return ret;
254}
255
214static int ip_finish_output(struct sk_buff *skb) 256static int ip_finish_output(struct sk_buff *skb)
215{ 257{
216#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 258#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
@@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb)
220 return dst_output(skb); 262 return dst_output(skb);
221 } 263 }
222#endif 264#endif
223 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb)) 265 if (skb_is_gso(skb))
266 return ip_finish_output_gso(skb);
267
268 if (skb->len > ip_skb_dst_mtu(skb))
224 return ip_fragment(skb, ip_finish_output2); 269 return ip_fragment(skb, ip_finish_output2);
225 else 270
226 return ip_finish_output2(skb); 271 return ip_finish_output2(skb);
227} 272}
228 273
229int ip_mc_output(struct sock *sk, struct sk_buff *skb) 274int ip_mc_output(struct sock *sk, struct sk_buff *skb)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index b3f859731c60..2acc2337d38b 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -540,9 +540,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
540 unsigned int max_headroom; /* The extra header space needed */ 540 unsigned int max_headroom; /* The extra header space needed */
541 __be32 dst; 541 __be32 dst;
542 int err; 542 int err;
543 bool connected = true; 543 bool connected;
544 544
545 inner_iph = (const struct iphdr *)skb_inner_network_header(skb); 545 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
546 connected = (tunnel->parms.iph.daddr != 0);
546 547
547 dst = tnl_params->daddr; 548 dst = tnl_params->daddr;
548 if (dst == 0) { 549 if (dst == 0) {
@@ -882,6 +883,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
882 */ 883 */
883 if (!IS_ERR(itn->fb_tunnel_dev)) { 884 if (!IS_ERR(itn->fb_tunnel_dev)) {
884 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; 885 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
886 itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
885 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev)); 887 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
886 } 888 }
887 rtnl_unlock(); 889 rtnl_unlock();
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index afcee51b90ed..13ef00f1e17b 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
239static int vti4_err(struct sk_buff *skb, u32 info) 239static int vti4_err(struct sk_buff *skb, u32 info)
240{ 240{
241 __be32 spi; 241 __be32 spi;
242 __u32 mark;
242 struct xfrm_state *x; 243 struct xfrm_state *x;
243 struct ip_tunnel *tunnel; 244 struct ip_tunnel *tunnel;
244 struct ip_esp_hdr *esph; 245 struct ip_esp_hdr *esph;
@@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info)
254 if (!tunnel) 255 if (!tunnel)
255 return -1; 256 return -1;
256 257
258 mark = be32_to_cpu(tunnel->parms.o_key);
259
257 switch (protocol) { 260 switch (protocol) {
258 case IPPROTO_ESP: 261 case IPPROTO_ESP:
259 esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 262 esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
@@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
281 return 0; 284 return 0;
282 } 285 }
283 286
284 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 287 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
285 spi, protocol, AF_INET); 288 spi, protocol, AF_INET);
286 if (!x) 289 if (!x)
287 return 0; 290 return 0;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 12e13bd82b5b..f40f321b41fc 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -22,7 +22,6 @@
22#endif 22#endif
23#include <net/netfilter/nf_conntrack_zones.h> 23#include <net/netfilter/nf_conntrack_zones.h>
24 24
25/* Returns new sk_buff, or NULL */
26static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) 25static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
27{ 26{
28 int err; 27 int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
33 err = ip_defrag(skb, user); 32 err = ip_defrag(skb, user);
34 local_bh_enable(); 33 local_bh_enable();
35 34
36 if (!err) 35 if (!err) {
37 ip_send_check(ip_hdr(skb)); 36 ip_send_check(ip_hdr(skb));
37 skb->local_df = 1;
38 }
38 39
39 return err; 40 return err;
40} 41}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8210964a9f19..044a0ddf6a79 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -236,15 +236,15 @@ exit:
236static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, 236static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
237 kgid_t *high) 237 kgid_t *high)
238{ 238{
239 kgid_t *data = net->ipv4.sysctl_ping_group_range; 239 kgid_t *data = net->ipv4.ping_group_range.range;
240 unsigned int seq; 240 unsigned int seq;
241 241
242 do { 242 do {
243 seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); 243 seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
244 244
245 *low = data[0]; 245 *low = data[0];
246 *high = data[1]; 246 *high = data[1];
247 } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); 247 } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
248} 248}
249 249
250 250
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index db1e0da871f4..5e676be3daeb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1519,7 +1519,7 @@ static int __mkroute_input(struct sk_buff *skb,
1519 struct in_device *out_dev; 1519 struct in_device *out_dev;
1520 unsigned int flags = 0; 1520 unsigned int flags = 0;
1521 bool do_cache; 1521 bool do_cache;
1522 u32 itag; 1522 u32 itag = 0;
1523 1523
1524 /* get a working reference to the output device */ 1524 /* get a working reference to the output device */
1525 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); 1525 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 44eba052b43d..5cde8f263d40 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
45/* Update system visible IP port range */ 45/* Update system visible IP port range */
46static void set_local_port_range(struct net *net, int range[2]) 46static void set_local_port_range(struct net *net, int range[2])
47{ 47{
48 write_seqlock(&net->ipv4.sysctl_local_ports.lock); 48 write_seqlock(&net->ipv4.ip_local_ports.lock);
49 net->ipv4.sysctl_local_ports.range[0] = range[0]; 49 net->ipv4.ip_local_ports.range[0] = range[0];
50 net->ipv4.sysctl_local_ports.range[1] = range[1]; 50 net->ipv4.ip_local_ports.range[1] = range[1];
51 write_sequnlock(&net->ipv4.sysctl_local_ports.lock); 51 write_sequnlock(&net->ipv4.ip_local_ports.lock);
52} 52}
53 53
54/* Validate changes from /proc interface. */ 54/* Validate changes from /proc interface. */
@@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
57 size_t *lenp, loff_t *ppos) 57 size_t *lenp, loff_t *ppos)
58{ 58{
59 struct net *net = 59 struct net *net =
60 container_of(table->data, struct net, ipv4.sysctl_local_ports.range); 60 container_of(table->data, struct net, ipv4.ip_local_ports.range);
61 int ret; 61 int ret;
62 int range[2]; 62 int range[2];
63 struct ctl_table tmp = { 63 struct ctl_table tmp = {
@@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
87{ 87{
88 kgid_t *data = table->data; 88 kgid_t *data = table->data;
89 struct net *net = 89 struct net *net =
90 container_of(table->data, struct net, ipv4.sysctl_ping_group_range); 90 container_of(table->data, struct net, ipv4.ping_group_range.range);
91 unsigned int seq; 91 unsigned int seq;
92 do { 92 do {
93 seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); 93 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
94 94
95 *low = data[0]; 95 *low = data[0];
96 *high = data[1]; 96 *high = data[1];
97 } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); 97 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
98} 98}
99 99
100/* Update system visible IP port range */ 100/* Update system visible IP port range */
@@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
102{ 102{
103 kgid_t *data = table->data; 103 kgid_t *data = table->data;
104 struct net *net = 104 struct net *net =
105 container_of(table->data, struct net, ipv4.sysctl_ping_group_range); 105 container_of(table->data, struct net, ipv4.ping_group_range.range);
106 write_seqlock(&net->ipv4.sysctl_local_ports.lock); 106 write_seqlock(&net->ipv4.ip_local_ports.lock);
107 data[0] = low; 107 data[0] = low;
108 data[1] = high; 108 data[1] = high;
109 write_sequnlock(&net->ipv4.sysctl_local_ports.lock); 109 write_sequnlock(&net->ipv4.ip_local_ports.lock);
110} 110}
111 111
112/* Validate changes from /proc interface. */ 112/* Validate changes from /proc interface. */
@@ -805,7 +805,7 @@ static struct ctl_table ipv4_net_table[] = {
805 }, 805 },
806 { 806 {
807 .procname = "ping_group_range", 807 .procname = "ping_group_range",
808 .data = &init_net.ipv4.sysctl_ping_group_range, 808 .data = &init_net.ipv4.ping_group_range.range,
809 .maxlen = sizeof(gid_t)*2, 809 .maxlen = sizeof(gid_t)*2,
810 .mode = 0644, 810 .mode = 0644,
811 .proc_handler = ipv4_ping_group_range, 811 .proc_handler = ipv4_ping_group_range,
@@ -819,8 +819,8 @@ static struct ctl_table ipv4_net_table[] = {
819 }, 819 },
820 { 820 {
821 .procname = "ip_local_port_range", 821 .procname = "ip_local_port_range",
822 .maxlen = sizeof(init_net.ipv4.sysctl_local_ports.range), 822 .maxlen = sizeof(init_net.ipv4.ip_local_ports.range),
823 .data = &init_net.ipv4.sysctl_local_ports.range, 823 .data = &init_net.ipv4.ip_local_ports.range,
824 .mode = 0644, 824 .mode = 0644,
825 .proc_handler = ipv4_local_port_range, 825 .proc_handler = ipv4_local_port_range,
826 }, 826 },
@@ -858,20 +858,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
858 table[i].data += (void *)net - (void *)&init_net; 858 table[i].data += (void *)net - (void *)&init_net;
859 } 859 }
860 860
861 /*
862 * Sane defaults - nobody may create ping sockets.
863 * Boot scripts should set this to distro-specific group.
864 */
865 net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
866 net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
867
868 /*
869 * Set defaults for local port range
870 */
871 seqlock_init(&net->ipv4.sysctl_local_ports.lock);
872 net->ipv4.sysctl_local_ports.range[0] = 32768;
873 net->ipv4.sysctl_local_ports.range[1] = 61000;
874
875 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); 861 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
876 if (net->ipv4.ipv4_hdr == NULL) 862 if (net->ipv4.ipv4_hdr == NULL)
877 goto err_reg; 863 goto err_reg;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 40e701f2e1e0..186a8ecf92fa 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
62 if (err) 62 if (err)
63 return err; 63 return err;
64 64
65 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 65 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
66 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
67
68 skb->protocol = htons(ETH_P_IP);
69 66
70 return x->outer_mode->output2(x, skb); 67 return x->outer_mode->output2(x, skb);
71} 68}
@@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
73 70
74int xfrm4_output_finish(struct sk_buff *skb) 71int xfrm4_output_finish(struct sk_buff *skb)
75{ 72{
73 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
74 skb->protocol = htons(ETH_P_IP);
75
76#ifdef CONFIG_NETFILTER
77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
78#endif
79
80 return xfrm_output(skb);
81}
82
83static int __xfrm4_output(struct sk_buff *skb)
84{
85 struct xfrm_state *x = skb_dst(skb)->xfrm;
86
76#ifdef CONFIG_NETFILTER 87#ifdef CONFIG_NETFILTER
77 if (!skb_dst(skb)->xfrm) { 88 if (!x) {
78 IPCB(skb)->flags |= IPSKB_REROUTED; 89 IPCB(skb)->flags |= IPSKB_REROUTED;
79 return dst_output(skb); 90 return dst_output(skb);
80 } 91 }
81
82 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
83#endif 92#endif
84 93
85 skb->protocol = htons(ETH_P_IP); 94 return x->outer_mode->afinfo->output_finish(skb);
86 return xfrm_output(skb);
87} 95}
88 96
89int xfrm4_output(struct sock *sk, struct sk_buff *skb) 97int xfrm4_output(struct sock *sk, struct sk_buff *skb)
90{ 98{
91 struct dst_entry *dst = skb_dst(skb);
92 struct xfrm_state *x = dst->xfrm;
93
94 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, 99 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
95 NULL, dst->dev, 100 NULL, skb_dst(skb)->dev, __xfrm4_output,
96 x->outer_mode->afinfo->output_finish,
97 !(IPCB(skb)->flags & IPSKB_REROUTED)); 101 !(IPCB(skb)->flags & IPSKB_REROUTED));
98} 102}
99 103
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
index 7f7b243e8139..a2ce0101eaac 100644
--- a/net/ipv4/xfrm4_protocol.c
+++ b/net/ipv4/xfrm4_protocol.c
@@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
50{ 50{
51 int ret; 51 int ret;
52 struct xfrm4_protocol *handler; 52 struct xfrm4_protocol *handler;
53 struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
53 54
54 for_each_protocol_rcu(*proto_handlers(protocol), handler) 55 if (!head)
56 return 0;
57
58 for_each_protocol_rcu(*head, handler)
55 if ((ret = handler->cb_handler(skb, err)) <= 0) 59 if ((ret = handler->cb_handler(skb, err)) <= 0)
56 return ret; 60 return ret;
57 61
@@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
64{ 68{
65 int ret; 69 int ret;
66 struct xfrm4_protocol *handler; 70 struct xfrm4_protocol *handler;
71 struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
67 72
68 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; 73 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
69 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 74 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
70 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 75 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
71 76
72 for_each_protocol_rcu(*proto_handlers(nexthdr), handler) 77 if (!head)
78 goto out;
79
80 for_each_protocol_rcu(*head, handler)
73 if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL) 81 if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
74 return ret; 82 return ret;
75 83
84out:
76 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 85 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
77 86
78 kfree_skb(skb); 87 kfree_skb(skb);
@@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler,
208 int ret = -EEXIST; 217 int ret = -EEXIST;
209 int priority = handler->priority; 218 int priority = handler->priority;
210 219
220 if (!proto_handlers(protocol) || !netproto(protocol))
221 return -EINVAL;
222
211 mutex_lock(&xfrm4_protocol_mutex); 223 mutex_lock(&xfrm4_protocol_mutex);
212 224
213 if (!rcu_dereference_protected(*proto_handlers(protocol), 225 if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
250 struct xfrm4_protocol *t; 262 struct xfrm4_protocol *t;
251 int ret = -ENOENT; 263 int ret = -ENOENT;
252 264
265 if (!proto_handlers(protocol) || !netproto(protocol))
266 return -EINVAL;
267
253 mutex_lock(&xfrm4_protocol_mutex); 268 mutex_lock(&xfrm4_protocol_mutex);
254 269
255 for (pprev = proto_handlers(protocol); 270 for (pprev = proto_handlers(protocol);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 59f95affceb0..b2f091566f88 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
196 unsigned int off; 196 unsigned int off;
197 u16 flush = 1; 197 u16 flush = 1;
198 int proto; 198 int proto;
199 __wsum csum;
200 199
201 off = skb_gro_offset(skb); 200 off = skb_gro_offset(skb);
202 hlen = off + sizeof(*iph); 201 hlen = off + sizeof(*iph);
@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
264 263
265 NAPI_GRO_CB(skb)->flush |= flush; 264 NAPI_GRO_CB(skb)->flush |= flush;
266 265
267 csum = skb->csum; 266 skb_gro_postpull_rcsum(skb, iph, nlen);
268 skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
269 267
270 pp = ops->callbacks.gro_receive(head, skb); 268 pp = ops->callbacks.gro_receive(head, skb);
271 269
272 skb->csum = csum;
273
274out_unlock: 270out_unlock:
275 rcu_read_unlock(); 271 rcu_read_unlock();
276 272
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 40e7581374f7..fbf11562b54c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
344 344
345static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) 345static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
346{ 346{
347 if (skb->len <= mtu || skb->local_df) 347 if (skb->len <= mtu)
348 return false; 348 return false;
349 349
350 /* ipv6 conntrack defrag sets max_frag_size + local_df */
350 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) 351 if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
351 return true; 352 return true;
352 353
354 if (skb->local_df)
355 return false;
356
353 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) 357 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
354 return false; 358 return false;
355 359
@@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1225 unsigned int maxnonfragsize, headersize; 1229 unsigned int maxnonfragsize, headersize;
1226 1230
1227 headersize = sizeof(struct ipv6hdr) + 1231 headersize = sizeof(struct ipv6hdr) +
1228 (opt ? opt->tot_len : 0) + 1232 (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1229 (dst_allfrag(&rt->dst) ? 1233 (dst_allfrag(&rt->dst) ?
1230 sizeof(struct frag_hdr) : 0) + 1234 sizeof(struct frag_hdr) : 0) +
1231 rt->rt6i_nfheader_len; 1235 rt->rt6i_nfheader_len;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index b05b609f69d1..f6a66bb4114d 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1557,7 +1557,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
1557{ 1557{
1558 u8 proto; 1558 u8 proto;
1559 1559
1560 if (!data) 1560 if (!data || !data[IFLA_IPTUN_PROTO])
1561 return 0; 1561 return 0;
1562 1562
1563 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); 1563 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index b7c0f827140b..6cc9f9371cc5 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
511 u8 type, u8 code, int offset, __be32 info) 511 u8 type, u8 code, int offset, __be32 info)
512{ 512{
513 __be32 spi; 513 __be32 spi;
514 __u32 mark;
514 struct xfrm_state *x; 515 struct xfrm_state *x;
515 struct ip6_tnl *t; 516 struct ip6_tnl *t;
516 struct ip_esp_hdr *esph; 517 struct ip_esp_hdr *esph;
@@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
524 if (!t) 525 if (!t)
525 return -1; 526 return -1;
526 527
528 mark = be32_to_cpu(t->parms.o_key);
529
527 switch (protocol) { 530 switch (protocol) {
528 case IPPROTO_ESP: 531 case IPPROTO_ESP:
529 esph = (struct ip_esp_hdr *)(skb->data + offset); 532 esph = (struct ip_esp_hdr *)(skb->data + offset);
@@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
545 type != NDISC_REDIRECT) 548 type != NDISC_REDIRECT)
546 return 0; 549 return 0;
547 550
548 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 551 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
549 spi, protocol, AF_INET6); 552 spi, protocol, AF_INET6);
550 if (!x) 553 if (!x)
551 return 0; 554 return 0;
@@ -1097,7 +1100,6 @@ static int __init vti6_tunnel_init(void)
1097 1100
1098 err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP); 1101 err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
1099 if (err < 0) { 1102 if (err < 0) {
1100 unregister_pernet_device(&vti6_net_ops);
1101 pr_err("%s: can't register vti6 protocol\n", __func__); 1103 pr_err("%s: can't register vti6 protocol\n", __func__);
1102 1104
1103 goto out; 1105 goto out;
@@ -1106,7 +1108,6 @@ static int __init vti6_tunnel_init(void)
1106 err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH); 1108 err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
1107 if (err < 0) { 1109 if (err < 0) {
1108 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1110 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
1109 unregister_pernet_device(&vti6_net_ops);
1110 pr_err("%s: can't register vti6 protocol\n", __func__); 1111 pr_err("%s: can't register vti6 protocol\n", __func__);
1111 1112
1112 goto out; 1113 goto out;
@@ -1116,7 +1117,6 @@ static int __init vti6_tunnel_init(void)
1116 if (err < 0) { 1117 if (err < 0) {
1117 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1118 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
1118 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1119 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
1119 unregister_pernet_device(&vti6_net_ops);
1120 pr_err("%s: can't register vti6 protocol\n", __func__); 1120 pr_err("%s: can't register vti6 protocol\n", __func__);
1121 1121
1122 goto out; 1122 goto out;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 09a22f4f36c9..ca8d4ea48a5d 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -851,7 +851,7 @@ out:
851static void ndisc_recv_na(struct sk_buff *skb) 851static void ndisc_recv_na(struct sk_buff *skb)
852{ 852{
853 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 853 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
854 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 854 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
855 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 855 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
856 u8 *lladdr = NULL; 856 u8 *lladdr = NULL;
857 u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) + 857 u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
@@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
944 /* 944 /*
945 * Change: router to host 945 * Change: router to host
946 */ 946 */
947 struct rt6_info *rt; 947 rt6_clean_tohost(dev_net(dev), saddr);
948 rt = rt6_get_dflt_router(saddr, dev);
949 if (rt)
950 ip6_del_rt(rt);
951 } 948 }
952 949
953out: 950out:
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 95f3f1da0d7f..d38e6a8d8b9f 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
30 .daddr = iph->daddr, 30 .daddr = iph->daddr,
31 .saddr = iph->saddr, 31 .saddr = iph->saddr,
32 }; 32 };
33 int err;
33 34
34 dst = ip6_route_output(net, skb->sk, &fl6); 35 dst = ip6_route_output(net, skb->sk, &fl6);
35 if (dst->error) { 36 err = dst->error;
37 if (err) {
36 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 38 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
37 LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); 39 LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
38 dst_release(dst); 40 dst_release(dst);
39 return dst->error; 41 return err;
40 } 42 }
41 43
42 /* Drop old route. */ 44 /* Drop old route. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 004fffb6c221..6ebdb7b6744c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2234,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2234 fib6_clean_all(net, fib6_remove_prefsrc, &adni); 2234 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2235} 2235}
2236 2236
2237#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2238#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2239
2240/* Remove routers and update dst entries when gateway turn into host. */
2241static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2242{
2243 struct in6_addr *gateway = (struct in6_addr *)arg;
2244
2245 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2246 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2247 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2248 return -1;
2249 }
2250 return 0;
2251}
2252
2253void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2254{
2255 fib6_clean_all(net, fib6_clean_tohost, gateway);
2256}
2257
2237struct arg_dev_net { 2258struct arg_dev_net {
2238 struct net_device *dev; 2259 struct net_device *dev;
2239 struct net *net; 2260 struct net *net;
@@ -2709,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
2709 if (tb[RTA_OIF]) 2730 if (tb[RTA_OIF])
2710 oif = nla_get_u32(tb[RTA_OIF]); 2731 oif = nla_get_u32(tb[RTA_OIF]);
2711 2732
2733 if (tb[RTA_MARK])
2734 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
2735
2712 if (iif) { 2736 if (iif) {
2713 struct net_device *dev; 2737 struct net_device *dev;
2714 int flags = 0; 2738 int flags = 0;
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 0d78132ff18a..8517d3cd1aed 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
42 if (NAPI_GRO_CB(skb)->flush) 42 if (NAPI_GRO_CB(skb)->flush)
43 goto skip_csum; 43 goto skip_csum;
44 44
45 wsum = skb->csum; 45 wsum = NAPI_GRO_CB(skb)->csum;
46 46
47 switch (skb->ip_summed) { 47 switch (skb->ip_summed) {
48 case CHECKSUM_NONE: 48 case CHECKSUM_NONE:
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 19ef329bdbf8..b930d080c66f 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -114,12 +114,6 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
114 if (err) 114 if (err)
115 return err; 115 return err;
116 116
117 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
118#ifdef CONFIG_NETFILTER
119 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
120#endif
121
122 skb->protocol = htons(ETH_P_IPV6);
123 skb->local_df = 1; 117 skb->local_df = 1;
124 118
125 return x->outer_mode->output2(x, skb); 119 return x->outer_mode->output2(x, skb);
@@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
128 122
129int xfrm6_output_finish(struct sk_buff *skb) 123int xfrm6_output_finish(struct sk_buff *skb)
130{ 124{
125 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
126 skb->protocol = htons(ETH_P_IPV6);
127
131#ifdef CONFIG_NETFILTER 128#ifdef CONFIG_NETFILTER
132 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; 129 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
133#endif 130#endif
134 131
135 skb->protocol = htons(ETH_P_IPV6);
136 return xfrm_output(skb); 132 return xfrm_output(skb);
137} 133}
138 134
@@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb)
142 struct xfrm_state *x = dst->xfrm; 138 struct xfrm_state *x = dst->xfrm;
143 int mtu; 139 int mtu;
144 140
141#ifdef CONFIG_NETFILTER
142 if (!x) {
143 IP6CB(skb)->flags |= IP6SKB_REROUTED;
144 return dst_output(skb);
145 }
146#endif
147
145 if (skb->protocol == htons(ETH_P_IPV6)) 148 if (skb->protocol == htons(ETH_P_IPV6))
146 mtu = ip6_skb_dst_mtu(skb); 149 mtu = ip6_skb_dst_mtu(skb);
147 else 150 else
@@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb)
165 168
166int xfrm6_output(struct sock *sk, struct sk_buff *skb) 169int xfrm6_output(struct sock *sk, struct sk_buff *skb)
167{ 170{
168 return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, 171 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
169 skb_dst(skb)->dev, __xfrm6_output); 172 NULL, skb_dst(skb)->dev, __xfrm6_output,
173 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
170} 174}
diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c
index 6ab989c486f7..54d13f8dbbae 100644
--- a/net/ipv6/xfrm6_protocol.c
+++ b/net/ipv6/xfrm6_protocol.c
@@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
50{ 50{
51 int ret; 51 int ret;
52 struct xfrm6_protocol *handler; 52 struct xfrm6_protocol *handler;
53 struct xfrm6_protocol __rcu **head = proto_handlers(protocol);
54
55 if (!head)
56 return 0;
53 57
54 for_each_protocol_rcu(*proto_handlers(protocol), handler) 58 for_each_protocol_rcu(*proto_handlers(protocol), handler)
55 if ((ret = handler->cb_handler(skb, err)) <= 0) 59 if ((ret = handler->cb_handler(skb, err)) <= 0)
@@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler,
184 struct xfrm6_protocol __rcu **pprev; 188 struct xfrm6_protocol __rcu **pprev;
185 struct xfrm6_protocol *t; 189 struct xfrm6_protocol *t;
186 bool add_netproto = false; 190 bool add_netproto = false;
187
188 int ret = -EEXIST; 191 int ret = -EEXIST;
189 int priority = handler->priority; 192 int priority = handler->priority;
190 193
194 if (!proto_handlers(protocol) || !netproto(protocol))
195 return -EINVAL;
196
191 mutex_lock(&xfrm6_protocol_mutex); 197 mutex_lock(&xfrm6_protocol_mutex);
192 198
193 if (!rcu_dereference_protected(*proto_handlers(protocol), 199 if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
230 struct xfrm6_protocol *t; 236 struct xfrm6_protocol *t;
231 int ret = -ENOENT; 237 int ret = -ENOENT;
232 238
239 if (!proto_handlers(protocol) || !netproto(protocol))
240 return -EINVAL;
241
233 mutex_lock(&xfrm6_protocol_mutex); 242 mutex_lock(&xfrm6_protocol_mutex);
234 243
235 for (pprev = proto_handlers(protocol); 244 for (pprev = proto_handlers(protocol);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 01e77b0ae075..8c9d7302c846 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1830 spin_lock_irqsave(&list->lock, flags); 1830 spin_lock_irqsave(&list->lock, flags);
1831 1831
1832 while (list_skb != (struct sk_buff *)list) { 1832 while (list_skb != (struct sk_buff *)list) {
1833 if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { 1833 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1834 this = list_skb; 1834 this = list_skb;
1835 break; 1835 break;
1836 } 1836 }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 222c28b75315..f169b6ee94ee 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -317,6 +317,7 @@ struct ieee80211_roc_work {
317 317
318 bool started, abort, hw_begun, notified; 318 bool started, abort, hw_begun, notified;
319 bool to_be_freed; 319 bool to_be_freed;
320 bool on_channel;
320 321
321 unsigned long hw_start_time; 322 unsigned long hw_start_time;
322 323
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index dee50aefd6e8..27600a9808ba 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3598,18 +3598,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
3598 3598
3599 sdata_lock(sdata); 3599 sdata_lock(sdata);
3600 3600
3601 if (ifmgd->auth_data) { 3601 if (ifmgd->auth_data || ifmgd->assoc_data) {
3602 const u8 *bssid = ifmgd->auth_data ?
3603 ifmgd->auth_data->bss->bssid :
3604 ifmgd->assoc_data->bss->bssid;
3605
3602 /* 3606 /*
3603 * If we are trying to authenticate while suspending, cfg80211 3607 * If we are trying to authenticate / associate while suspending,
3604 * won't know and won't actually abort those attempts, thus we 3608 * cfg80211 won't know and won't actually abort those attempts,
3605 * need to do that ourselves. 3609 * thus we need to do that ourselves.
3606 */ 3610 */
3607 ieee80211_send_deauth_disassoc(sdata, 3611 ieee80211_send_deauth_disassoc(sdata, bssid,
3608 ifmgd->auth_data->bss->bssid,
3609 IEEE80211_STYPE_DEAUTH, 3612 IEEE80211_STYPE_DEAUTH,
3610 WLAN_REASON_DEAUTH_LEAVING, 3613 WLAN_REASON_DEAUTH_LEAVING,
3611 false, frame_buf); 3614 false, frame_buf);
3612 ieee80211_destroy_auth_data(sdata, false); 3615 if (ifmgd->assoc_data)
3616 ieee80211_destroy_assoc_data(sdata, false);
3617 if (ifmgd->auth_data)
3618 ieee80211_destroy_auth_data(sdata, false);
3613 cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, 3619 cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
3614 IEEE80211_DEAUTH_FRAME_LEN); 3620 IEEE80211_DEAUTH_FRAME_LEN);
3615 } 3621 }
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 6fb38558a5e6..7a17decd27f9 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
333 container_of(work, struct ieee80211_roc_work, work.work); 333 container_of(work, struct ieee80211_roc_work, work.work);
334 struct ieee80211_sub_if_data *sdata = roc->sdata; 334 struct ieee80211_sub_if_data *sdata = roc->sdata;
335 struct ieee80211_local *local = sdata->local; 335 struct ieee80211_local *local = sdata->local;
336 bool started; 336 bool started, on_channel;
337 337
338 mutex_lock(&local->mtx); 338 mutex_lock(&local->mtx);
339 339
@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
354 if (!roc->started) { 354 if (!roc->started) {
355 struct ieee80211_roc_work *dep; 355 struct ieee80211_roc_work *dep;
356 356
357 /* start this ROC */ 357 WARN_ON(local->use_chanctx);
358 ieee80211_offchannel_stop_vifs(local); 358
359 /* If actually operating on the desired channel (with at least
360 * 20 MHz channel width) don't stop all the operations but still
361 * treat it as though the ROC operation started properly, so
362 * other ROC operations won't interfere with this one.
363 */
364 roc->on_channel = roc->chan == local->_oper_chandef.chan &&
365 local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
366 local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
359 367
360 /* switch channel etc */ 368 /* start this ROC */
361 ieee80211_recalc_idle(local); 369 ieee80211_recalc_idle(local);
362 370
363 local->tmp_channel = roc->chan; 371 if (!roc->on_channel) {
364 ieee80211_hw_config(local, 0); 372 ieee80211_offchannel_stop_vifs(local);
373
374 local->tmp_channel = roc->chan;
375 ieee80211_hw_config(local, 0);
376 }
365 377
366 /* tell userspace or send frame */ 378 /* tell userspace or send frame */
367 ieee80211_handle_roc_started(roc); 379 ieee80211_handle_roc_started(roc);
@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
380 finish: 392 finish:
381 list_del(&roc->list); 393 list_del(&roc->list);
382 started = roc->started; 394 started = roc->started;
395 on_channel = roc->on_channel;
383 ieee80211_roc_notify_destroy(roc, !roc->abort); 396 ieee80211_roc_notify_destroy(roc, !roc->abort);
384 397
385 if (started) { 398 if (started && !on_channel) {
386 ieee80211_flush_queues(local, NULL); 399 ieee80211_flush_queues(local, NULL);
387 400
388 local->tmp_channel = NULL; 401 local->tmp_channel = NULL;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 216c45b949e5..2b608b2b70ec 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1231,7 +1231,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1231 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1231 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1232 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1232 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1233 sta->last_rx = jiffies; 1233 sta->last_rx = jiffies;
1234 if (ieee80211_is_data(hdr->frame_control)) { 1234 if (ieee80211_is_data(hdr->frame_control) &&
1235 !is_multicast_ether_addr(hdr->addr1)) {
1235 sta->last_rx_rate_idx = status->rate_idx; 1236 sta->last_rx_rate_idx = status->rate_idx;
1236 sta->last_rx_rate_flag = status->flag; 1237 sta->last_rx_rate_flag = status->flag;
1237 sta->last_rx_rate_vht_flag = status->vht_flag; 1238 sta->last_rx_rate_vht_flag = status->vht_flag;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 137a192e64bc..847d92f6bef6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1148,7 +1148,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
1148 atomic_dec(&ps->num_sta_ps); 1148 atomic_dec(&ps->num_sta_ps);
1149 1149
1150 /* This station just woke up and isn't aware of our SMPS state */ 1150 /* This station just woke up and isn't aware of our SMPS state */
1151 if (!ieee80211_smps_is_restrictive(sta->known_smps_mode, 1151 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
1152 !ieee80211_smps_is_restrictive(sta->known_smps_mode,
1152 sdata->smps_mode) && 1153 sdata->smps_mode) &&
1153 sta->known_smps_mode != sdata->bss->req_smps && 1154 sta->known_smps_mode != sdata->bss->req_smps &&
1154 sta_info_tx_streams(sta) != 1) { 1155 sta_info_tx_streams(sta) != 1) {
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 00ba90b02ab2..60cb7a665976 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
314 !is_multicast_ether_addr(hdr->addr1)) 314 !is_multicast_ether_addr(hdr->addr1))
315 txflags |= IEEE80211_RADIOTAP_F_TX_FAIL; 315 txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
316 316
317 if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || 317 if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
318 (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
319 txflags |= IEEE80211_RADIOTAP_F_TX_CTS; 318 txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
320 else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) 319 if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
321 txflags |= IEEE80211_RADIOTAP_F_TX_RTS; 320 txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
322 321
323 put_unaligned_le16(txflags, pos); 322 put_unaligned_le16(txflags, pos);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index a0b0aea76525..cec5b60487a4 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -21,10 +21,10 @@
21 21
22#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \ 22#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
23 __field(bool, p2p) \ 23 __field(bool, p2p) \
24 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>") 24 __string(vif_name, sdata->name)
25#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \ 25#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
26 __entry->p2p = sdata->vif.p2p; \ 26 __entry->p2p = sdata->vif.p2p; \
27 __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name) 27 __assign_str(vif_name, sdata->name)
28#define VIF_PR_FMT " vif:%s(%d%s)" 28#define VIF_PR_FMT " vif:%s(%d%s)"
29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" 29#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
30 30
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 275c94f995f7..3c365837e910 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1780,7 +1780,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1780 mutex_unlock(&local->mtx); 1780 mutex_unlock(&local->mtx);
1781 1781
1782 if (sched_scan_stopped) 1782 if (sched_scan_stopped)
1783 cfg80211_sched_scan_stopped(local->hw.wiphy); 1783 cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
1784 1784
1785 /* 1785 /*
1786 * If this is for hw restart things are still running. 1786 * If this is for hw restart things are still running.
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index e9e36a256165..9265adfdabfc 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
129 if (!vht_cap_ie || !sband->vht_cap.vht_supported) 129 if (!vht_cap_ie || !sband->vht_cap.vht_supported)
130 return; 130 return;
131 131
132 /* A VHT STA must support 40 MHz */ 132 /*
133 if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 133 * A VHT STA must support 40 MHz, but if we verify that here
134 return; 134 * then we break a few things - some APs (e.g. Netgear R6300v2
135 * and others based on the BCM4360 chipset) will unset this
136 * capability bit when operating in 20 MHz.
137 */
135 138
136 vht_cap->vht_supported = true; 139 vht_cap->vht_supported = true;
137 140
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ccc46fa5edbc..58579634427d 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1336#ifdef CONFIG_NF_NAT_NEEDED 1336#ifdef CONFIG_NF_NAT_NEEDED
1337 int ret; 1337 int ret;
1338 1338
1339 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1340 return 0;
1341
1339 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, 1342 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
1340 cda[CTA_NAT_DST]); 1343 cda[CTA_NAT_DST]);
1341 if (ret < 0) 1344 if (ret < 0)
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 804105391b9a..345acfb1720b 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -66,20 +66,6 @@ struct nft_jumpstack {
66 int rulenum; 66 int rulenum;
67}; 67};
68 68
69static inline void
70nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
71 struct nft_jumpstack *jumpstack, unsigned int stackptr)
72{
73 struct nft_stats __percpu *stats;
74 const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
75
76 rcu_read_lock_bh();
77 stats = rcu_dereference(nft_base_chain(chain)->stats);
78 __this_cpu_inc(stats->pkts);
79 __this_cpu_add(stats->bytes, pkt->skb->len);
80 rcu_read_unlock_bh();
81}
82
83enum nft_trace { 69enum nft_trace {
84 NFT_TRACE_RULE, 70 NFT_TRACE_RULE,
85 NFT_TRACE_RETURN, 71 NFT_TRACE_RETURN,
@@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
117unsigned int 103unsigned int
118nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops) 104nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
119{ 105{
120 const struct nft_chain *chain = ops->priv; 106 const struct nft_chain *chain = ops->priv, *basechain = chain;
121 const struct nft_rule *rule; 107 const struct nft_rule *rule;
122 const struct nft_expr *expr, *last; 108 const struct nft_expr *expr, *last;
123 struct nft_data data[NFT_REG_MAX + 1]; 109 struct nft_data data[NFT_REG_MAX + 1];
124 unsigned int stackptr = 0; 110 unsigned int stackptr = 0;
125 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; 111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
126 int rulenum = 0; 112 struct nft_stats __percpu *stats;
113 int rulenum;
127 /* 114 /*
128 * Cache cursor to avoid problems in case that the cursor is updated 115 * Cache cursor to avoid problems in case that the cursor is updated
129 * while traversing the ruleset. 116 * while traversing the ruleset.
@@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
131 unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor); 118 unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
132 119
133do_chain: 120do_chain:
121 rulenum = 0;
134 rule = list_entry(&chain->rules, struct nft_rule, list); 122 rule = list_entry(&chain->rules, struct nft_rule, list);
135next_rule: 123next_rule:
136 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; 124 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
@@ -156,8 +144,10 @@ next_rule:
156 switch (data[NFT_REG_VERDICT].verdict) { 144 switch (data[NFT_REG_VERDICT].verdict) {
157 case NFT_BREAK: 145 case NFT_BREAK:
158 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; 146 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
159 /* fall through */ 147 continue;
160 case NFT_CONTINUE: 148 case NFT_CONTINUE:
149 if (unlikely(pkt->skb->nf_trace))
150 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
161 continue; 151 continue;
162 } 152 }
163 break; 153 break;
@@ -183,37 +173,44 @@ next_rule:
183 jumpstack[stackptr].rule = rule; 173 jumpstack[stackptr].rule = rule;
184 jumpstack[stackptr].rulenum = rulenum; 174 jumpstack[stackptr].rulenum = rulenum;
185 stackptr++; 175 stackptr++;
186 /* fall through */ 176 chain = data[NFT_REG_VERDICT].chain;
177 goto do_chain;
187 case NFT_GOTO: 178 case NFT_GOTO:
179 if (unlikely(pkt->skb->nf_trace))
180 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
181
188 chain = data[NFT_REG_VERDICT].chain; 182 chain = data[NFT_REG_VERDICT].chain;
189 goto do_chain; 183 goto do_chain;
190 case NFT_RETURN: 184 case NFT_RETURN:
191 if (unlikely(pkt->skb->nf_trace)) 185 if (unlikely(pkt->skb->nf_trace))
192 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN); 186 nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
193 187 break;
194 /* fall through */
195 case NFT_CONTINUE: 188 case NFT_CONTINUE:
189 if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
190 nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
196 break; 191 break;
197 default: 192 default:
198 WARN_ON(1); 193 WARN_ON(1);
199 } 194 }
200 195
201 if (stackptr > 0) { 196 if (stackptr > 0) {
202 if (unlikely(pkt->skb->nf_trace))
203 nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
204
205 stackptr--; 197 stackptr--;
206 chain = jumpstack[stackptr].chain; 198 chain = jumpstack[stackptr].chain;
207 rule = jumpstack[stackptr].rule; 199 rule = jumpstack[stackptr].rule;
208 rulenum = jumpstack[stackptr].rulenum; 200 rulenum = jumpstack[stackptr].rulenum;
209 goto next_rule; 201 goto next_rule;
210 } 202 }
211 nft_chain_stats(chain, pkt, jumpstack, stackptr);
212 203
213 if (unlikely(pkt->skb->nf_trace)) 204 if (unlikely(pkt->skb->nf_trace))
214 nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY); 205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
206
207 rcu_read_lock_bh();
208 stats = rcu_dereference(nft_base_chain(basechain)->stats);
209 __this_cpu_inc(stats->pkts);
210 __this_cpu_add(stats->bytes, pkt->skb->len);
211 rcu_read_unlock_bh();
215 212
216 return nft_base_chain(chain)->policy; 213 return nft_base_chain(basechain)->policy;
217} 214}
218EXPORT_SYMBOL_GPL(nft_do_chain); 215EXPORT_SYMBOL_GPL(nft_do_chain);
219 216
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index e009087620e3..23ef77c60fff 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -256,15 +256,15 @@ replay:
256#endif 256#endif
257 { 257 {
258 nfnl_unlock(subsys_id); 258 nfnl_unlock(subsys_id);
259 kfree_skb(nskb); 259 netlink_ack(skb, nlh, -EOPNOTSUPP);
260 return netlink_ack(skb, nlh, -EOPNOTSUPP); 260 return kfree_skb(nskb);
261 } 261 }
262 } 262 }
263 263
264 if (!ss->commit || !ss->abort) { 264 if (!ss->commit || !ss->abort) {
265 nfnl_unlock(subsys_id); 265 nfnl_unlock(subsys_id);
266 kfree_skb(nskb); 266 netlink_ack(skb, nlh, -EOPNOTSUPP);
267 return netlink_ack(skb, nlh, -EOPNOTSUPP); 267 return kfree_skb(skb);
268 } 268 }
269 269
270 while (skb->len >= nlmsg_total_size(0)) { 270 while (skb->len >= nlmsg_total_size(0)) {
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 7633a752c65e..0ad080790a32 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
99 _debug("tktlen: %x", tktlen); 99 _debug("tktlen: %x", tktlen);
100 if (tktlen > AFSTOKEN_RK_TIX_MAX) 100 if (tktlen > AFSTOKEN_RK_TIX_MAX)
101 return -EKEYREJECTED; 101 return -EKEYREJECTED;
102 if (8 * 4 + tktlen != toklen) 102 if (toklen < 8 * 4 + tktlen)
103 return -EKEYREJECTED; 103 return -EKEYREJECTED;
104 104
105 plen = sizeof(*token) + sizeof(*token->kad) + tktlen; 105 plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index eed8404443d8..f435a88d899a 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
188 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, 188 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
189}; 189};
190 190
191static void tcindex_filter_result_init(struct tcindex_filter_result *r)
192{
193 memset(r, 0, sizeof(*r));
194 tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
195}
196
191static int 197static int
192tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, 198tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
193 u32 handle, struct tcindex_data *p, 199 u32 handle, struct tcindex_data *p,
@@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
207 return err; 213 return err;
208 214
209 memcpy(&cp, p, sizeof(cp)); 215 memcpy(&cp, p, sizeof(cp));
210 memset(&new_filter_result, 0, sizeof(new_filter_result)); 216 tcindex_filter_result_init(&new_filter_result);
211 tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
212 217
218 tcindex_filter_result_init(&cr);
213 if (old_r) 219 if (old_r)
214 memcpy(&cr, r, sizeof(cr)); 220 cr.res = r->res;
215 else {
216 memset(&cr, 0, sizeof(cr));
217 tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
218 }
219 221
220 if (tb[TCA_TCINDEX_HASH]) 222 if (tb[TCA_TCINDEX_HASH])
221 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); 223 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
267 err = -ENOMEM; 269 err = -ENOMEM;
268 if (!cp.perfect && !cp.h) { 270 if (!cp.perfect && !cp.h) {
269 if (valid_perfect_hash(&cp)) { 271 if (valid_perfect_hash(&cp)) {
272 int i;
273
270 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); 274 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
271 if (!cp.perfect) 275 if (!cp.perfect)
272 goto errout; 276 goto errout;
277 for (i = 0; i < cp.hash; i++)
278 tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
279 TCA_TCINDEX_POLICE);
273 balloc = 1; 280 balloc = 1;
274 } else { 281 } else {
275 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); 282 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
295 tcf_bind_filter(tp, &cr.res, base); 302 tcf_bind_filter(tp, &cr.res, base);
296 } 303 }
297 304
298 tcf_exts_change(tp, &cr.exts, &e); 305 if (old_r)
306 tcf_exts_change(tp, &r->exts, &e);
307 else
308 tcf_exts_change(tp, &cr.exts, &e);
299 309
300 tcf_tree_lock(tp); 310 tcf_tree_lock(tp);
301 if (old_r && old_r != r) 311 if (old_r && old_r != r)
302 memset(old_r, 0, sizeof(*old_r)); 312 tcindex_filter_result_init(old_r);
303 313
304 memcpy(p, &cp, sizeof(cp)); 314 memcpy(p, &cp, sizeof(cp));
305 memcpy(r, &cr, sizeof(cr)); 315 r->res = cr.res;
306 316
307 if (r == &new_filter_result) { 317 if (r == &new_filter_result) {
308 struct tcindex_filter **fp; 318 struct tcindex_filter **fp;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 7d09a712cb1f..88f108edfb58 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -284,14 +284,22 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
284} 284}
285EXPORT_SYMBOL(cfg80211_sched_scan_results); 285EXPORT_SYMBOL(cfg80211_sched_scan_results);
286 286
287void cfg80211_sched_scan_stopped(struct wiphy *wiphy) 287void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
288{ 288{
289 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 289 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
290 290
291 ASSERT_RTNL();
292
291 trace_cfg80211_sched_scan_stopped(wiphy); 293 trace_cfg80211_sched_scan_stopped(wiphy);
292 294
293 rtnl_lock();
294 __cfg80211_stop_sched_scan(rdev, true); 295 __cfg80211_stop_sched_scan(rdev, true);
296}
297EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
298
299void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
300{
301 rtnl_lock();
302 cfg80211_sched_scan_stopped_rtnl(wiphy);
295 rtnl_unlock(); 303 rtnl_unlock();
296} 304}
297EXPORT_SYMBOL(cfg80211_sched_scan_stopped); 305EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index acdcb4a81817..3546a77033de 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -234,7 +234,6 @@ void cfg80211_conn_work(struct work_struct *work)
234 NULL, 0, NULL, 0, 234 NULL, 0, NULL, 0,
235 WLAN_STATUS_UNSPECIFIED_FAILURE, 235 WLAN_STATUS_UNSPECIFIED_FAILURE,
236 false, NULL); 236 false, NULL);
237 cfg80211_sme_free(wdev);
238 } 237 }
239 wdev_unlock(wdev); 238 wdev_unlock(wdev);
240 } 239 }
@@ -648,6 +647,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
648 cfg80211_unhold_bss(bss_from_pub(bss)); 647 cfg80211_unhold_bss(bss_from_pub(bss));
649 cfg80211_put_bss(wdev->wiphy, bss); 648 cfg80211_put_bss(wdev->wiphy, bss);
650 } 649 }
650 cfg80211_sme_free(wdev);
651 return; 651 return;
652 } 652 }
653 653
diff --git a/tools/Makefile b/tools/Makefile
index bcae806b0c39..9a617adc6675 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -44,6 +44,9 @@ cpupower: FORCE
44cgroup firewire hv guest usb virtio vm net: FORCE 44cgroup firewire hv guest usb virtio vm net: FORCE
45 $(call descend,$@) 45 $(call descend,$@)
46 46
47liblockdep: FORCE
48 $(call descend,lib/lockdep)
49
47libapikfs: FORCE 50libapikfs: FORCE
48 $(call descend,lib/api) 51 $(call descend,lib/api)
49 52
@@ -91,6 +94,9 @@ cpupower_clean:
91cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean: 94cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
92 $(call descend,$(@:_clean=),clean) 95 $(call descend,$(@:_clean=),clean)
93 96
97liblockdep_clean:
98 $(call descend,lib/lockdep,clean)
99
94libapikfs_clean: 100libapikfs_clean:
95 $(call descend,lib/api,clean) 101 $(call descend,lib/api,clean)
96 102
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index cb09d3ff8f58..bba2f5253b6e 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -1,8 +1,7 @@
1# file format version 1# file format version
2FILE_VERSION = 1 2FILE_VERSION = 1
3 3
4MAKEFLAGS += --no-print-directory 4LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
5LIBLOCKDEP_VERSION=$(shell make -sC ../../.. kernelversion)
6 5
7# Makefiles suck: This macro sets a default value of $(2) for the 6# Makefiles suck: This macro sets a default value of $(2) for the
8# variable named by $(1), unless the variable has been set by 7# variable named by $(1), unless the variable has been set by
@@ -231,7 +230,7 @@ install_lib: all_cmd
231install: install_lib 230install: install_lib
232 231
233clean: 232clean:
234 $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d 233 $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
235 $(RM) tags TAGS 234 $(RM) tags TAGS
236 235
237endif # skip-makefile 236endif # skip-makefile