aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc/Kconfig23
-rw-r--r--arch/sparc/include/asm/hypervisor.h343
-rw-r--r--arch/sparc/include/asm/iommu_64.h28
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/iommu.c8
-rw-r--r--arch/sparc/kernel/iommu_common.h1
-rw-r--r--arch/sparc/kernel/pci_sun4v.c418
-rw-r--r--arch/sparc/kernel/pci_sun4v.h21
-rw-r--r--arch/sparc/kernel/pci_sun4v_asm.S68
-rw-r--r--arch/sparc/kernel/signal_32.c4
-rw-r--r--arch/sparc/mm/init_64.c71
-rw-r--r--arch/tile/kernel/time.c4
-rw-r--r--arch/x86/boot/compressed/Makefile5
-rw-r--r--arch/x86/boot/cpu.c6
-rw-r--r--arch/x86/events/amd/core.c8
-rw-r--r--arch/x86/events/core.c10
-rw-r--r--arch/x86/events/intel/ds.c35
-rw-r--r--arch/x86/events/intel/uncore.c8
-rw-r--r--arch/x86/events/intel/uncore_snb.c12
-rw-r--r--arch/x86/events/perf_event.h2
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/fpu/core.c16
-rw-r--r--arch/x86/kernel/head_32.S9
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c39
-rw-r--r--arch/x86/kernel/unwind_guess.c8
-rw-r--r--arch/x86/mm/extable.c7
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c (renamed from arch/x86/platform/intel-mid/device_libs/platform_wdt.c)34
-rw-r--r--crypto/algif_hash.c2
-rw-r--r--crypto/scatterwalk.c4
-rw-r--r--drivers/clk/berlin/bg2.c2
-rw-r--r--drivers/clk/berlin/bg2q.c2
-rw-r--r--drivers/clk/clk-efm32gg.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c12
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c12
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c9
-rw-r--r--drivers/hid/hid-cp2112.c115
-rw-r--r--drivers/hid/hid-lg.c14
-rw-r--r--drivers/hid/hid-magicmouse.c12
-rw-r--r--drivers/hid/hid-rmi.c10
-rw-r--r--drivers/hid/hid-sensor-hub.c1
-rw-r--r--drivers/media/tuners/tuner-xc2028.c37
-rw-r--r--drivers/mmc/host/dw_mmc.c1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c14
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/net/dsa/b53/b53_common.c16
-rw-r--r--drivers/net/ethernet/arc/emac_main.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h64
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c37
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c105
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c153
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c118
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h24
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c95
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c97
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h72
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c5
-rw-r--r--drivers/net/ethernet/sun/sunbmac.h2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c11
-rw-r--r--drivers/net/ethernet/sun/sunqe.h4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c95
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/vitesse.c34
-rw-r--r--drivers/net/virtio_net.c5
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/of/of_mdio.c6
-rw-r--r--drivers/phy/phy-twl4030-usb.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c21
-rw-r--r--drivers/thermal/intel_powerclamp.c9
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/udc.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c8
-rw-r--r--drivers/usb/musb/musb_core.c147
-rw-r--r--drivers/usb/musb/musb_core.h13
-rw-r--r--drivers/usb/musb/musb_dsps.c58
-rw-r--r--drivers/usb/musb/musb_gadget.c39
-rw-r--r--drivers/usb/musb/omap2430.c10
-rw-r--r--drivers/usb/musb/tusb6010.c6
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/storage/transport.c7
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/nfs4_fs.h7
-rw-r--r--fs/nfs/nfs4proc.c38
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--include/linux/bpf_verifier.h5
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/net/gro_cells.h3
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--init/do_mounts_rd.c2
-rw-r--r--kernel/bpf/verifier.c70
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/locking/lockdep_internals.h20
-rw-r--r--kernel/sched/auto_group.c36
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--net/batman-adv/hard-interface.c1
-rw-r--r--net/batman-adv/tp_meter.c1
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/rtnetlink.c22
-rw-r--r--net/ipv4/fib_frontend.c20
-rw-r--r--net/ipv4/fib_trie.c69
-rw-r--r--net/ipv4/igmp.c50
-rw-r--r--net/ipv4/tcp_cong.c4
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/ip6_tunnel.c13
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ip.c5
-rw-r--r--net/l2tp/l2tp_ip6.c5
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/tx.c14
-rw-r--r--net/mac80211/vht.c16
-rw-r--r--net/sched/cls_api.c5
-rw-r--r--net/tipc/socket.c48
-rw-r--r--net/unix/af_unix.c17
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/scan.c69
-rw-r--r--net/wireless/util.c3
-rw-r--r--security/apparmor/domain.c6
148 files changed, 2692 insertions, 934 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index b23c76b42d6e..165ecdd24d22 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC
43 select ARCH_HAS_SG_CHAIN 43 select ARCH_HAS_SG_CHAIN
44 select CPU_NO_EFFICIENT_FFS 44 select CPU_NO_EFFICIENT_FFS
45 select HAVE_ARCH_HARDENED_USERCOPY 45 select HAVE_ARCH_HARDENED_USERCOPY
46 select PROVE_LOCKING_SMALL if PROVE_LOCKING
46 47
47config SPARC32 48config SPARC32
48 def_bool !64BIT 49 def_bool !64BIT
@@ -89,6 +90,14 @@ config ARCH_DEFCONFIG
89config ARCH_PROC_KCORE_TEXT 90config ARCH_PROC_KCORE_TEXT
90 def_bool y 91 def_bool y
91 92
93config ARCH_ATU
94 bool
95 default y if SPARC64
96
97config ARCH_DMA_ADDR_T_64BIT
98 bool
99 default y if ARCH_ATU
100
92config IOMMU_HELPER 101config IOMMU_HELPER
93 bool 102 bool
94 default y if SPARC64 103 default y if SPARC64
@@ -304,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE
304config ARCH_SPARSEMEM_DEFAULT 313config ARCH_SPARSEMEM_DEFAULT
305 def_bool y if SPARC64 314 def_bool y if SPARC64
306 315
316config FORCE_MAX_ZONEORDER
317 int "Maximum zone order"
318 default "13"
319 help
320 The kernel memory allocator divides physically contiguous memory
321 blocks into "zones", where each zone is a power of two number of
322 pages. This option selects the largest power of two that the kernel
323 keeps in the memory allocator. If you need to allocate very large
324 blocks of physically contiguous memory, then you may need to
325 increase this value.
326
327 This config option is actually maximum order plus one. For example,
328 a value of 13 means that the largest free memory block is 2^12 pages.
329
307source "mm/Kconfig" 330source "mm/Kconfig"
308 331
309if SPARC64 332if SPARC64
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 666d5ba230d2..73cb8978df58 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2335,6 +2335,348 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
2335 */ 2335 */
2336#define HV_FAST_PCI_MSG_SETVALID 0xd3 2336#define HV_FAST_PCI_MSG_SETVALID 0xd3
2337 2337
2338/* PCI IOMMU v2 definitions and services
2339 *
2340 * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO
2341 * definitions and services.
2342 *
2343 * CTE Clump Table Entry. First level table entry in the ATU.
2344 *
2345 * pci_device_list
2346 * A 32-bit aligned list of pci_devices.
2347 *
2348 * pci_device_listp
2349 * real address of a pci_device_list. 32-bit aligned.
2350 *
2351 * iotte IOMMU translation table entry.
2352 *
2353 * iotte_attributes
2354 * IO Attributes for IOMMU v2 mappings. In addition to
2355 * read, write IOMMU v2 supports relax ordering
2356 *
2357 * io_page_list A 64-bit aligned list of real addresses. Each real
2358 * address in an io_page_list must be properly aligned
2359 * to the pagesize of the given IOTSB.
2360 *
2361 * io_page_list_p Real address of an io_page_list, 64-bit aligned.
2362 *
2363 * IOTSB IO Translation Storage Buffer. An aligned table of
2364 * IOTTEs. Each IOTSB has a pagesize, table size, and
2365 * virtual address associated with it that must match
2366 * a pagesize and table size supported by the un-derlying
2367 * hardware implementation. The alignment requirements
2368 * for an IOTSB depend on the pagesize used for that IOTSB.
2369 * Each IOTTE in an IOTSB maps one pagesize-sized page.
2370 * The size of the IOTSB dictates how large of a virtual
2371 * address space the IOTSB is capable of mapping.
2372 *
2373 * iotsb_handle An opaque identifier for an IOTSB. A devhandle plus
2374 * iotsb_handle represents a binding of an IOTSB to a
2375 * PCI root complex.
2376 *
2377 * iotsb_index Zero-based IOTTE number within an IOTSB.
2378 */
2379
2380/* The index_count argument consists of two fields:
2381 * bits 63:48 #iottes and bits 47:0 iotsb_index
2382 */
2383#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \
2384 (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index)))
2385
2386/* pci_iotsb_conf()
2387 * TRAP: HV_FAST_TRAP
2388 * FUNCTION: HV_FAST_PCI_IOTSB_CONF
2389 * ARG0: devhandle
2390 * ARG1: r_addr
2391 * ARG2: size
2392 * ARG3: pagesize
2393 * ARG4: iova
2394 * RET0: status
2395 * RET1: iotsb_handle
2396 * ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize
2397 * EBADALIGN r_addr is not properly aligned
2398 * ENORADDR r_addr is not a valid real address
2399 * ETOOMANY No further IOTSBs may be configured
2400 * EBUSY Duplicate devhandle, raddir, iova combination
2401 *
2402 * Create an IOTSB suitable for the PCI root complex identified by devhandle,
2403 * for the DMA virtual address defined by the argument iova.
2404 *
2405 * r_addr is the properly aligned base address of the IOTSB and size is the
2406 * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to
2407 * being configured. If it contains any values other than zeros then the
2408 * behavior is undefined.
2409 *
2410 * pagesize is the size of each page in the IOTSB. Note that the combination of
2411 * size (table size) and pagesize must be valid.
2412 *
2413 * virt is the DMA virtual address this IOTSB will map.
2414 *
2415 * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1.
2416 * Once configured, privileged access to the IOTSB memory is prohibited and
2417 * creates undefined behavior. The only permitted access is indirect via these
2418 * services.
2419 */
2420#define HV_FAST_PCI_IOTSB_CONF 0x190
2421
2422/* pci_iotsb_info()
2423 * TRAP: HV_FAST_TRAP
2424 * FUNCTION: HV_FAST_PCI_IOTSB_INFO
2425 * ARG0: devhandle
2426 * ARG1: iotsb_handle
2427 * RET0: status
2428 * RET1: r_addr
2429 * RET2: size
2430 * RET3: pagesize
2431 * RET4: iova
2432 * RET5: #bound
2433 * ERRORS: EINVAL Invalid devhandle or iotsb_handle
2434 *
2435 * This service returns configuration information about an IOTSB previously
2436 * created with pci_iotsb_conf.
2437 *
2438 * iotsb_handle value 0 may be used with this service to inquire about the
2439 * legacy IOTSB that may or may not exist. If the service succeeds, the return
2440 * values describe the legacy IOTSB and I/O virtual addresses mapped by that
2441 * table. However, the table base address r_addr may contain the value -1 which
2442 * indicates a memory range that cannot be accessed or be reclaimed.
2443 *
2444 * The return value #bound contains the number of PCI devices that iotsb_handle
2445 * is currently bound to.
2446 */
2447#define HV_FAST_PCI_IOTSB_INFO 0x191
2448
2449/* pci_iotsb_unconf()
2450 * TRAP: HV_FAST_TRAP
2451 * FUNCTION: HV_FAST_PCI_IOTSB_UNCONF
2452 * ARG0: devhandle
2453 * ARG1: iotsb_handle
2454 * RET0: status
2455 * ERRORS: EINVAL Invalid devhandle or iotsb_handle
2456 * EBUSY The IOTSB is bound and may not be unconfigured
2457 *
2458 * This service unconfigures the IOTSB identified by the devhandle and
2459 * iotsb_handle arguments, previously created with pci_iotsb_conf.
2460 * The IOTSB must not be currently bound to any device or the service will fail
2461 *
2462 * If the call succeeds, iotsb_handle is no longer valid.
2463 */
2464#define HV_FAST_PCI_IOTSB_UNCONF 0x192
2465
2466/* pci_iotsb_bind()
2467 * TRAP: HV_FAST_TRAP
2468 * FUNCTION: HV_FAST_PCI_IOTSB_BIND
2469 * ARG0: devhandle
2470 * ARG1: iotsb_handle
2471 * ARG2: pci_device
2472 * RET0: status
2473 * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
2474 * EBUSY A PCI function is already bound to an IOTSB at the same
2475 * address range as specified by devhandle, iotsb_handle.
2476 *
2477 * This service binds the PCI function specified by the argument pci_device to
2478 * the IOTSB specified by the arguments devhandle and iotsb_handle.
2479 *
2480 * The PCI device function is bound to the specified IOTSB with the IOVA range
2481 * specified when the IOTSB was configured via pci_iotsb_conf. If the function
2482 * is already bound then it is unbound first.
2483 */
2484#define HV_FAST_PCI_IOTSB_BIND 0x193
2485
2486/* pci_iotsb_unbind()
2487 * TRAP: HV_FAST_TRAP
2488 * FUNCTION: HV_FAST_PCI_IOTSB_UNBIND
2489 * ARG0: devhandle
2490 * ARG1: iotsb_handle
2491 * ARG2: pci_device
2492 * RET0: status
2493 * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
2494 * ENOMAP The PCI function was not bound to the specified IOTSB
2495 *
2496 * This service unbinds the PCI device specified by the argument pci_device
2497 * from the IOTSB identified * by the arguments devhandle and iotsb_handle.
2498 *
2499 * If the PCI device is not bound to the specified IOTSB then this service will
2500 * fail with status ENOMAP
2501 */
2502#define HV_FAST_PCI_IOTSB_UNBIND 0x194
2503
2504/* pci_iotsb_get_binding()
2505 * TRAP: HV_FAST_TRAP
2506 * FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING
2507 * ARG0: devhandle
2508 * ARG1: iotsb_handle
2509 * ARG2: iova
2510 * RET0: status
2511 * RET1: iotsb_handle
2512 * ERRORS: EINVAL Invalid devhandle, pci_device, or iova
2513 * ENOMAP The PCI function is not bound to an IOTSB at iova
2514 *
2515 * This service returns the IOTSB binding, iotsb_handle, for a given pci_device
2516 * and DMA virtual address, iova.
2517 *
2518 * iova must be the base address of a DMA virtual address range as defined by
2519 * the iommu-address-ranges property in the root complex device node defined
2520 * by the argument devhandle.
2521 */
2522#define HV_FAST_PCI_IOTSB_GET_BINDING 0x195
2523
2524/* pci_iotsb_map()
2525 * TRAP: HV_FAST_TRAP
2526 * FUNCTION: HV_FAST_PCI_IOTSB_MAP
2527 * ARG0: devhandle
2528 * ARG1: iotsb_handle
2529 * ARG2: index_count
2530 * ARG3: iotte_attributes
2531 * ARG4: io_page_list_p
2532 * RET0: status
2533 * RET1: #mapped
2534 * ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes,
2535 * iotsb_index or iotte_attributes
2536 * EBADALIGN Improperly aligned io_page_list_p or I/O page
2537 * address in the I/O page list.
2538 * ENORADDR Invalid io_page_list_p or I/O page address in
2539 * the I/O page list.
2540 *
2541 * This service creates and flushes mappings in the IOTSB defined by the
2542 * arguments devhandle, iotsb.
2543 *
2544 * The index_count argument consists of two fields. Bits 63:48 contain #iotte
2545 * and bits 47:0 contain iotsb_index
2546 *
2547 * The first mapping is created in the IOTSB index specified by iotsb_index.
2548 * Subsequent mappings are created at iotsb_index+1 and so on.
2549 *
2550 * The attributes of each mapping are defined by the argument iotte_attributes.
2551 *
2552 * The io_page_list_p specifies the real address of the 64-bit-aligned list of
2553 * #iottes I/O page addresses. Each page address must be a properly aligned
2554 * real address of a page to be mapped in the IOTSB. The first entry in the I/O
2555 * page list contains the real address of the first page, the 2nd entry for the
2556 * 2nd page, and so on.
2557 *
2558 * #iottes must be greater than zero.
2559 *
2560 * The return value #mapped is the actual number of mappings created, which may
2561 * be less than or equal to the argument #iottes. If the function returns
2562 * successfully with a #mapped value less than the requested #iottes then the
2563 * caller should continue to invoke the service with updated iotsb_index,
2564 * #iottes, and io_page_list_p arguments until all pages are mapped.
2565 *
2566 * This service must not be used to demap a mapping. In other words, all
2567 * mappings must be valid and have one or both of the RW attribute bits set.
2568 *
2569 * Note:
2570 * It is implementation-defined whether I/O page real address validity checking
2571 * is done at time mappings are established or deferred until they are
2572 * accessed.
2573 */
2574#define HV_FAST_PCI_IOTSB_MAP 0x196
2575
2576/* pci_iotsb_map_one()
2577 * TRAP: HV_FAST_TRAP
2578 * FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE
2579 * ARG0: devhandle
2580 * ARG1: iotsb_handle
2581 * ARG2: iotsb_index
2582 * ARG3: iotte_attributes
2583 * ARG4: r_addr
2584 * RET0: status
2585 * ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index
2586 * or iotte_attributes
2587 * EBADALIGN Improperly aligned r_addr
2588 * ENORADDR Invalid r_addr
2589 *
2590 * This service creates and flushes a single mapping in the IOTSB defined by the
2591 * arguments devhandle, iotsb.
2592 *
2593 * The mapping for the page at r_addr is created at the IOTSB index specified by
2594 * iotsb_index with the attributes iotte_attributes.
2595 *
2596 * This service must not be used to demap a mapping. In other words, the mapping
2597 * must be valid and have one or both of the RW attribute bits set.
2598 *
2599 * Note:
2600 * It is implementation-defined whether I/O page real address validity checking
2601 * is done at time mappings are established or deferred until they are
2602 * accessed.
2603 */
2604#define HV_FAST_PCI_IOTSB_MAP_ONE 0x197
2605
2606/* pci_iotsb_demap()
2607 * TRAP: HV_FAST_TRAP
2608 * FUNCTION: HV_FAST_PCI_IOTSB_DEMAP
2609 * ARG0: devhandle
2610 * ARG1: iotsb_handle
2611 * ARG2: iotsb_index
2612 * ARG3: #iottes
2613 * RET0: status
2614 * RET1: #unmapped
2615 * ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes
2616 *
2617 * This service unmaps and flushes up to #iottes mappings starting at index
2618 * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb.
2619 *
2620 * #iottes must be greater than zero.
2621 *
2622 * The actual number of IOTTEs unmapped is returned in #unmapped and may be less
2623 * than or equal to the requested number of IOTTEs, #iottes.
2624 *
2625 * If #unmapped is less than #iottes, the caller should continue to invoke this
2626 * service with updated iotsb_index and #iottes arguments until all pages are
2627 * demapped.
2628 */
2629#define HV_FAST_PCI_IOTSB_DEMAP 0x198
2630
2631/* pci_iotsb_getmap()
2632 * TRAP: HV_FAST_TRAP
2633 * FUNCTION: HV_FAST_PCI_IOTSB_GETMAP
2634 * ARG0: devhandle
2635 * ARG1: iotsb_handle
2636 * ARG2: iotsb_index
2637 * RET0: status
2638 * RET1: r_addr
2639 * RET2: iotte_attributes
2640 * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index
2641 * ENOMAP No mapping was found
2642 *
2643 * This service returns the mapping specified by index iotsb_index from the
2644 * IOTSB defined by the arguments devhandle, iotsb.
2645 *
2646 * Upon success, the real address of the mapping shall be returned in
2647 * r_addr and thethe IOTTE mapping attributes shall be returned in
2648 * iotte_attributes.
2649 *
2650 * The return value iotte_attributes may not include optional features used in
2651 * the call to create the mapping.
2652 */
2653#define HV_FAST_PCI_IOTSB_GETMAP 0x199
2654
2655/* pci_iotsb_sync_mappings()
2656 * TRAP: HV_FAST_TRAP
2657 * FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS
2658 * ARG0: devhandle
2659 * ARG1: iotsb_handle
2660 * ARG2: iotsb_index
2661 * ARG3: #iottes
2662 * RET0: status
2663 * RET1: #synced
2664 * ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes
2665 *
2666 * This service synchronizes #iottes mappings starting at index iotsb_index in
2667 * the IOTSB defined by the arguments devhandle, iotsb.
2668 *
2669 * #iottes must be greater than zero.
2670 *
2671 * The actual number of IOTTEs synchronized is returned in #synced, which may
2672 * be less than or equal to the requested number, #iottes.
2673 *
2674 * Upon a successful return, #synced is less than #iottes, the caller should
2675 * continue to invoke this service with updated iotsb_index and #iottes
2676 * arguments until all pages are synchronized.
2677 */
2678#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a
2679
2338/* Logical Domain Channel services. */ 2680/* Logical Domain Channel services. */
2339 2681
2340#define LDC_CHANNEL_DOWN 0 2682#define LDC_CHANNEL_DOWN 0
@@ -2993,6 +3335,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
2993#define HV_GRP_SDIO 0x0108 3335#define HV_GRP_SDIO 0x0108
2994#define HV_GRP_SDIO_ERR 0x0109 3336#define HV_GRP_SDIO_ERR 0x0109
2995#define HV_GRP_REBOOT_DATA 0x0110 3337#define HV_GRP_REBOOT_DATA 0x0110
3338#define HV_GRP_ATU 0x0111
2996#define HV_GRP_M7_PERF 0x0114 3339#define HV_GRP_M7_PERF 0x0114
2997#define HV_GRP_NIAG_PERF 0x0200 3340#define HV_GRP_NIAG_PERF 0x0200
2998#define HV_GRP_FIRE_PERF 0x0201 3341#define HV_GRP_FIRE_PERF 0x0201
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h
index cd0d69fa7592..f24f356f2503 100644
--- a/arch/sparc/include/asm/iommu_64.h
+++ b/arch/sparc/include/asm/iommu_64.h
@@ -24,8 +24,36 @@ struct iommu_arena {
24 unsigned int limit; 24 unsigned int limit;
25}; 25};
26 26
27#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */
28
29/* Data structures for SPARC ATU architecture */
30struct atu_iotsb {
31 void *table; /* IOTSB table base virtual addr*/
32 u64 ra; /* IOTSB table real addr */
33 u64 dvma_size; /* ranges[3].size or OS slected 32G size */
34 u64 dvma_base; /* ranges[3].base */
35 u64 table_size; /* IOTSB table size */
36 u64 page_size; /* IO PAGE size for IOTSB */
37 u32 iotsb_num; /* tsbnum is same as iotsb_handle */
38};
39
40struct atu_ranges {
41 u64 base;
42 u64 size;
43};
44
45struct atu {
46 struct atu_ranges *ranges;
47 struct atu_iotsb *iotsb;
48 struct iommu_map_table tbl;
49 u64 base;
50 u64 size;
51 u64 dma_addr_mask;
52};
53
27struct iommu { 54struct iommu {
28 struct iommu_map_table tbl; 55 struct iommu_map_table tbl;
56 struct atu *atu;
29 spinlock_t lock; 57 spinlock_t lock;
30 u32 dma_addr_mask; 58 u32 dma_addr_mask;
31 iopte_t *page_table; 59 iopte_t *page_table;
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index 662500fa555f..267731234ce8 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -39,6 +39,7 @@ static struct api_info api_table[] = {
39 { .group = HV_GRP_SDIO, }, 39 { .group = HV_GRP_SDIO, },
40 { .group = HV_GRP_SDIO_ERR, }, 40 { .group = HV_GRP_SDIO_ERR, },
41 { .group = HV_GRP_REBOOT_DATA, }, 41 { .group = HV_GRP_REBOOT_DATA, },
42 { .group = HV_GRP_ATU, .flags = FLAG_PRE_API },
42 { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, 43 { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
43 { .group = HV_GRP_FIRE_PERF, }, 44 { .group = HV_GRP_FIRE_PERF, },
44 { .group = HV_GRP_N2_CPU, }, 45 { .group = HV_GRP_N2_CPU, },
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 5c615abff030..852a3291db96 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask)
760 struct iommu *iommu = dev->archdata.iommu; 760 struct iommu *iommu = dev->archdata.iommu;
761 u64 dma_addr_mask = iommu->dma_addr_mask; 761 u64 dma_addr_mask = iommu->dma_addr_mask;
762 762
763 if (device_mask >= (1UL << 32UL)) 763 if (device_mask > DMA_BIT_MASK(32)) {
764 return 0; 764 if (iommu->atu)
765 dma_addr_mask = iommu->atu->dma_addr_mask;
766 else
767 return 0;
768 }
765 769
766 if ((device_mask & dma_addr_mask) == dma_addr_mask) 770 if ((device_mask & dma_addr_mask) == dma_addr_mask)
767 return 1; 771 return 1;
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index b40cec252905..828493329f68 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -13,7 +13,6 @@
13#include <linux/scatterlist.h> 13#include <linux/scatterlist.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/iommu-helper.h> 15#include <linux/iommu-helper.h>
16#include <linux/scatterlist.h>
17 16
18#include <asm/iommu.h> 17#include <asm/iommu.h>
19 18
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index db57d8acdc01..06981cc716b6 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -44,6 +44,9 @@ static struct vpci_version vpci_versions[] = {
44 { .major = 1, .minor = 1 }, 44 { .major = 1, .minor = 1 },
45}; 45};
46 46
47static unsigned long vatu_major = 1;
48static unsigned long vatu_minor = 1;
49
47#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 50#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
48 51
49struct iommu_batch { 52struct iommu_batch {
@@ -69,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
69} 72}
70 73
71/* Interrupts must be disabled. */ 74/* Interrupts must be disabled. */
72static long iommu_batch_flush(struct iommu_batch *p) 75static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
73{ 76{
74 struct pci_pbm_info *pbm = p->dev->archdata.host_controller; 77 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
78 u64 *pglist = p->pglist;
79 u64 index_count;
75 unsigned long devhandle = pbm->devhandle; 80 unsigned long devhandle = pbm->devhandle;
76 unsigned long prot = p->prot; 81 unsigned long prot = p->prot;
77 unsigned long entry = p->entry; 82 unsigned long entry = p->entry;
78 u64 *pglist = p->pglist;
79 unsigned long npages = p->npages; 83 unsigned long npages = p->npages;
84 unsigned long iotsb_num;
85 unsigned long ret;
86 long num;
80 87
81 /* VPCI maj=1, min=[0,1] only supports read and write */ 88 /* VPCI maj=1, min=[0,1] only supports read and write */
82 if (vpci_major < 2) 89 if (vpci_major < 2)
83 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); 90 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
84 91
85 while (npages != 0) { 92 while (npages != 0) {
86 long num; 93 if (mask <= DMA_BIT_MASK(32)) {
87 94 num = pci_sun4v_iommu_map(devhandle,
88 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), 95 HV_PCI_TSBID(0, entry),
89 npages, prot, __pa(pglist)); 96 npages,
90 if (unlikely(num < 0)) { 97 prot,
91 if (printk_ratelimit()) 98 __pa(pglist));
92 printk("iommu_batch_flush: IOMMU map of " 99 if (unlikely(num < 0)) {
93 "[%08lx:%08llx:%lx:%lx:%lx] failed with " 100 pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
94 "status %ld\n", 101 __func__,
95 devhandle, HV_PCI_TSBID(0, entry), 102 devhandle,
96 npages, prot, __pa(pglist), num); 103 HV_PCI_TSBID(0, entry),
97 return -1; 104 npages, prot, __pa(pglist),
105 num);
106 return -1;
107 }
108 } else {
109 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
110 iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
111 ret = pci_sun4v_iotsb_map(devhandle,
112 iotsb_num,
113 index_count,
114 prot,
115 __pa(pglist),
116 &num);
117 if (unlikely(ret != HV_EOK)) {
118 pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
119 __func__,
120 devhandle, iotsb_num,
121 index_count, prot,
122 __pa(pglist), ret);
123 return -1;
124 }
98 } 125 }
99
100 entry += num; 126 entry += num;
101 npages -= num; 127 npages -= num;
102 pglist += num; 128 pglist += num;
@@ -108,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p)
108 return 0; 134 return 0;
109} 135}
110 136
111static inline void iommu_batch_new_entry(unsigned long entry) 137static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
112{ 138{
113 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 139 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
114 140
115 if (p->entry + p->npages == entry) 141 if (p->entry + p->npages == entry)
116 return; 142 return;
117 if (p->entry != ~0UL) 143 if (p->entry != ~0UL)
118 iommu_batch_flush(p); 144 iommu_batch_flush(p, mask);
119 p->entry = entry; 145 p->entry = entry;
120} 146}
121 147
122/* Interrupts must be disabled. */ 148/* Interrupts must be disabled. */
123static inline long iommu_batch_add(u64 phys_page) 149static inline long iommu_batch_add(u64 phys_page, u64 mask)
124{ 150{
125 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 151 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
126 152
@@ -128,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page)
128 154
129 p->pglist[p->npages++] = phys_page; 155 p->pglist[p->npages++] = phys_page;
130 if (p->npages == PGLIST_NENTS) 156 if (p->npages == PGLIST_NENTS)
131 return iommu_batch_flush(p); 157 return iommu_batch_flush(p, mask);
132 158
133 return 0; 159 return 0;
134} 160}
135 161
136/* Interrupts must be disabled. */ 162/* Interrupts must be disabled. */
137static inline long iommu_batch_end(void) 163static inline long iommu_batch_end(u64 mask)
138{ 164{
139 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 165 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
140 166
141 BUG_ON(p->npages >= PGLIST_NENTS); 167 BUG_ON(p->npages >= PGLIST_NENTS);
142 168
143 return iommu_batch_flush(p); 169 return iommu_batch_flush(p, mask);
144} 170}
145 171
146static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 172static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
147 dma_addr_t *dma_addrp, gfp_t gfp, 173 dma_addr_t *dma_addrp, gfp_t gfp,
148 unsigned long attrs) 174 unsigned long attrs)
149{ 175{
176 u64 mask;
150 unsigned long flags, order, first_page, npages, n; 177 unsigned long flags, order, first_page, npages, n;
151 unsigned long prot = 0; 178 unsigned long prot = 0;
152 struct iommu *iommu; 179 struct iommu *iommu;
180 struct atu *atu;
181 struct iommu_map_table *tbl;
153 struct page *page; 182 struct page *page;
154 void *ret; 183 void *ret;
155 long entry; 184 long entry;
@@ -174,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
174 memset((char *)first_page, 0, PAGE_SIZE << order); 203 memset((char *)first_page, 0, PAGE_SIZE << order);
175 204
176 iommu = dev->archdata.iommu; 205 iommu = dev->archdata.iommu;
206 atu = iommu->atu;
207
208 mask = dev->coherent_dma_mask;
209 if (mask <= DMA_BIT_MASK(32))
210 tbl = &iommu->tbl;
211 else
212 tbl = &atu->tbl;
177 213
178 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 214 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
179 (unsigned long)(-1), 0); 215 (unsigned long)(-1), 0);
180 216
181 if (unlikely(entry == IOMMU_ERROR_CODE)) 217 if (unlikely(entry == IOMMU_ERROR_CODE))
182 goto range_alloc_fail; 218 goto range_alloc_fail;
183 219
184 *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); 220 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
185 ret = (void *) first_page; 221 ret = (void *) first_page;
186 first_page = __pa(first_page); 222 first_page = __pa(first_page);
187 223
@@ -193,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
193 entry); 229 entry);
194 230
195 for (n = 0; n < npages; n++) { 231 for (n = 0; n < npages; n++) {
196 long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); 232 long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
197 if (unlikely(err < 0L)) 233 if (unlikely(err < 0L))
198 goto iommu_map_fail; 234 goto iommu_map_fail;
199 } 235 }
200 236
201 if (unlikely(iommu_batch_end() < 0L)) 237 if (unlikely(iommu_batch_end(mask) < 0L))
202 goto iommu_map_fail; 238 goto iommu_map_fail;
203 239
204 local_irq_restore(flags); 240 local_irq_restore(flags);
@@ -206,25 +242,71 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
206 return ret; 242 return ret;
207 243
208iommu_map_fail: 244iommu_map_fail:
209 iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); 245 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
210 246
211range_alloc_fail: 247range_alloc_fail:
212 free_pages(first_page, order); 248 free_pages(first_page, order);
213 return NULL; 249 return NULL;
214} 250}
215 251
216static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, 252unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
217 unsigned long npages) 253 unsigned long iotsb_num,
254 struct pci_bus *bus_dev)
255{
256 struct pci_dev *pdev;
257 unsigned long err;
258 unsigned int bus;
259 unsigned int device;
260 unsigned int fun;
261
262 list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
263 if (pdev->subordinate) {
264 /* No need to bind pci bridge */
265 dma_4v_iotsb_bind(devhandle, iotsb_num,
266 pdev->subordinate);
267 } else {
268 bus = bus_dev->number;
269 device = PCI_SLOT(pdev->devfn);
270 fun = PCI_FUNC(pdev->devfn);
271 err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
272 HV_PCI_DEVICE_BUILD(bus,
273 device,
274 fun));
275
276 /* If bind fails for one device it is going to fail
277 * for rest of the devices because we are sharing
278 * IOTSB. So in case of failure simply return with
279 * error.
280 */
281 if (err)
282 return err;
283 }
284 }
285
286 return 0;
287}
288
289static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
290 dma_addr_t dvma, unsigned long iotsb_num,
291 unsigned long entry, unsigned long npages)
218{ 292{
219 u32 devhandle = *(u32 *)demap_arg;
220 unsigned long num, flags; 293 unsigned long num, flags;
294 unsigned long ret;
221 295
222 local_irq_save(flags); 296 local_irq_save(flags);
223 do { 297 do {
224 num = pci_sun4v_iommu_demap(devhandle, 298 if (dvma <= DMA_BIT_MASK(32)) {
225 HV_PCI_TSBID(0, entry), 299 num = pci_sun4v_iommu_demap(devhandle,
226 npages); 300 HV_PCI_TSBID(0, entry),
227 301 npages);
302 } else {
303 ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
304 entry, npages, &num);
305 if (unlikely(ret != HV_EOK)) {
306 pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
307 ret);
308 }
309 }
228 entry += num; 310 entry += num;
229 npages -= num; 311 npages -= num;
230 } while (npages != 0); 312 } while (npages != 0);
@@ -236,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
236{ 318{
237 struct pci_pbm_info *pbm; 319 struct pci_pbm_info *pbm;
238 struct iommu *iommu; 320 struct iommu *iommu;
321 struct atu *atu;
322 struct iommu_map_table *tbl;
239 unsigned long order, npages, entry; 323 unsigned long order, npages, entry;
324 unsigned long iotsb_num;
240 u32 devhandle; 325 u32 devhandle;
241 326
242 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 327 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
243 iommu = dev->archdata.iommu; 328 iommu = dev->archdata.iommu;
244 pbm = dev->archdata.host_controller; 329 pbm = dev->archdata.host_controller;
330 atu = iommu->atu;
245 devhandle = pbm->devhandle; 331 devhandle = pbm->devhandle;
246 entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); 332
247 dma_4v_iommu_demap(&devhandle, entry, npages); 333 if (dvma <= DMA_BIT_MASK(32)) {
248 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); 334 tbl = &iommu->tbl;
335 iotsb_num = 0; /* we don't care for legacy iommu */
336 } else {
337 tbl = &atu->tbl;
338 iotsb_num = atu->iotsb->iotsb_num;
339 }
340 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
341 dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
342 iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
249 order = get_order(size); 343 order = get_order(size);
250 if (order < 10) 344 if (order < 10)
251 free_pages((unsigned long)cpu, order); 345 free_pages((unsigned long)cpu, order);
@@ -257,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
257 unsigned long attrs) 351 unsigned long attrs)
258{ 352{
259 struct iommu *iommu; 353 struct iommu *iommu;
354 struct atu *atu;
355 struct iommu_map_table *tbl;
356 u64 mask;
260 unsigned long flags, npages, oaddr; 357 unsigned long flags, npages, oaddr;
261 unsigned long i, base_paddr; 358 unsigned long i, base_paddr;
262 u32 bus_addr, ret;
263 unsigned long prot; 359 unsigned long prot;
360 dma_addr_t bus_addr, ret;
264 long entry; 361 long entry;
265 362
266 iommu = dev->archdata.iommu; 363 iommu = dev->archdata.iommu;
364 atu = iommu->atu;
267 365
268 if (unlikely(direction == DMA_NONE)) 366 if (unlikely(direction == DMA_NONE))
269 goto bad; 367 goto bad;
@@ -272,13 +370,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
272 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 370 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
273 npages >>= IO_PAGE_SHIFT; 371 npages >>= IO_PAGE_SHIFT;
274 372
275 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 373 mask = *dev->dma_mask;
374 if (mask <= DMA_BIT_MASK(32))
375 tbl = &iommu->tbl;
376 else
377 tbl = &atu->tbl;
378
379 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
276 (unsigned long)(-1), 0); 380 (unsigned long)(-1), 0);
277 381
278 if (unlikely(entry == IOMMU_ERROR_CODE)) 382 if (unlikely(entry == IOMMU_ERROR_CODE))
279 goto bad; 383 goto bad;
280 384
281 bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); 385 bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
282 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 386 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
283 base_paddr = __pa(oaddr & IO_PAGE_MASK); 387 base_paddr = __pa(oaddr & IO_PAGE_MASK);
284 prot = HV_PCI_MAP_ATTR_READ; 388 prot = HV_PCI_MAP_ATTR_READ;
@@ -293,11 +397,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
293 iommu_batch_start(dev, prot, entry); 397 iommu_batch_start(dev, prot, entry);
294 398
295 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { 399 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
296 long err = iommu_batch_add(base_paddr); 400 long err = iommu_batch_add(base_paddr, mask);
297 if (unlikely(err < 0L)) 401 if (unlikely(err < 0L))
298 goto iommu_map_fail; 402 goto iommu_map_fail;
299 } 403 }
300 if (unlikely(iommu_batch_end() < 0L)) 404 if (unlikely(iommu_batch_end(mask) < 0L))
301 goto iommu_map_fail; 405 goto iommu_map_fail;
302 406
303 local_irq_restore(flags); 407 local_irq_restore(flags);
@@ -310,7 +414,7 @@ bad:
310 return DMA_ERROR_CODE; 414 return DMA_ERROR_CODE;
311 415
312iommu_map_fail: 416iommu_map_fail:
313 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); 417 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
314 return DMA_ERROR_CODE; 418 return DMA_ERROR_CODE;
315} 419}
316 420
@@ -320,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
320{ 424{
321 struct pci_pbm_info *pbm; 425 struct pci_pbm_info *pbm;
322 struct iommu *iommu; 426 struct iommu *iommu;
427 struct atu *atu;
428 struct iommu_map_table *tbl;
323 unsigned long npages; 429 unsigned long npages;
430 unsigned long iotsb_num;
324 long entry; 431 long entry;
325 u32 devhandle; 432 u32 devhandle;
326 433
@@ -332,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
332 439
333 iommu = dev->archdata.iommu; 440 iommu = dev->archdata.iommu;
334 pbm = dev->archdata.host_controller; 441 pbm = dev->archdata.host_controller;
442 atu = iommu->atu;
335 devhandle = pbm->devhandle; 443 devhandle = pbm->devhandle;
336 444
337 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 445 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
338 npages >>= IO_PAGE_SHIFT; 446 npages >>= IO_PAGE_SHIFT;
339 bus_addr &= IO_PAGE_MASK; 447 bus_addr &= IO_PAGE_MASK;
340 entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; 448
341 dma_4v_iommu_demap(&devhandle, entry, npages); 449 if (bus_addr <= DMA_BIT_MASK(32)) {
342 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); 450 iotsb_num = 0; /* we don't care for legacy iommu */
451 tbl = &iommu->tbl;
452 } else {
453 iotsb_num = atu->iotsb->iotsb_num;
454 tbl = &atu->tbl;
455 }
456 entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
457 dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
458 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
343} 459}
344 460
345static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 461static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -353,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
353 unsigned long seg_boundary_size; 469 unsigned long seg_boundary_size;
354 int outcount, incount, i; 470 int outcount, incount, i;
355 struct iommu *iommu; 471 struct iommu *iommu;
472 struct atu *atu;
473 struct iommu_map_table *tbl;
474 u64 mask;
356 unsigned long base_shift; 475 unsigned long base_shift;
357 long err; 476 long err;
358 477
359 BUG_ON(direction == DMA_NONE); 478 BUG_ON(direction == DMA_NONE);
360 479
361 iommu = dev->archdata.iommu; 480 iommu = dev->archdata.iommu;
481 atu = iommu->atu;
482
362 if (nelems == 0 || !iommu) 483 if (nelems == 0 || !iommu)
363 return 0; 484 return 0;
364 485
@@ -384,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
384 max_seg_size = dma_get_max_seg_size(dev); 505 max_seg_size = dma_get_max_seg_size(dev);
385 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 506 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
386 IO_PAGE_SIZE) >> IO_PAGE_SHIFT; 507 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
387 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; 508
509 mask = *dev->dma_mask;
510 if (mask <= DMA_BIT_MASK(32))
511 tbl = &iommu->tbl;
512 else
513 tbl = &atu->tbl;
514
515 base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
516
388 for_each_sg(sglist, s, nelems, i) { 517 for_each_sg(sglist, s, nelems, i) {
389 unsigned long paddr, npages, entry, out_entry = 0, slen; 518 unsigned long paddr, npages, entry, out_entry = 0, slen;
390 519
@@ -397,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
397 /* Allocate iommu entries for that segment */ 526 /* Allocate iommu entries for that segment */
398 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); 527 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
399 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); 528 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
400 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, 529 entry = iommu_tbl_range_alloc(dev, tbl, npages,
401 &handle, (unsigned long)(-1), 0); 530 &handle, (unsigned long)(-1), 0);
402 531
403 /* Handle failure */ 532 /* Handle failure */
404 if (unlikely(entry == IOMMU_ERROR_CODE)) { 533 if (unlikely(entry == IOMMU_ERROR_CODE)) {
405 if (printk_ratelimit()) 534 pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
406 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" 535 tbl, paddr, npages);
407 " npages %lx\n", iommu, paddr, npages);
408 goto iommu_map_failed; 536 goto iommu_map_failed;
409 } 537 }
410 538
411 iommu_batch_new_entry(entry); 539 iommu_batch_new_entry(entry, mask);
412 540
413 /* Convert entry to a dma_addr_t */ 541 /* Convert entry to a dma_addr_t */
414 dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); 542 dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
415 dma_addr |= (s->offset & ~IO_PAGE_MASK); 543 dma_addr |= (s->offset & ~IO_PAGE_MASK);
416 544
417 /* Insert into HW table */ 545 /* Insert into HW table */
418 paddr &= IO_PAGE_MASK; 546 paddr &= IO_PAGE_MASK;
419 while (npages--) { 547 while (npages--) {
420 err = iommu_batch_add(paddr); 548 err = iommu_batch_add(paddr, mask);
421 if (unlikely(err < 0L)) 549 if (unlikely(err < 0L))
422 goto iommu_map_failed; 550 goto iommu_map_failed;
423 paddr += IO_PAGE_SIZE; 551 paddr += IO_PAGE_SIZE;
@@ -452,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
452 dma_next = dma_addr + slen; 580 dma_next = dma_addr + slen;
453 } 581 }
454 582
455 err = iommu_batch_end(); 583 err = iommu_batch_end(mask);
456 584
457 if (unlikely(err < 0L)) 585 if (unlikely(err < 0L))
458 goto iommu_map_failed; 586 goto iommu_map_failed;
@@ -475,7 +603,7 @@ iommu_map_failed:
475 vaddr = s->dma_address & IO_PAGE_MASK; 603 vaddr = s->dma_address & IO_PAGE_MASK;
476 npages = iommu_num_pages(s->dma_address, s->dma_length, 604 npages = iommu_num_pages(s->dma_address, s->dma_length,
477 IO_PAGE_SIZE); 605 IO_PAGE_SIZE);
478 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, 606 iommu_tbl_range_free(tbl, vaddr, npages,
479 IOMMU_ERROR_CODE); 607 IOMMU_ERROR_CODE);
480 /* XXX demap? XXX */ 608 /* XXX demap? XXX */
481 s->dma_address = DMA_ERROR_CODE; 609 s->dma_address = DMA_ERROR_CODE;
@@ -496,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
496 struct pci_pbm_info *pbm; 624 struct pci_pbm_info *pbm;
497 struct scatterlist *sg; 625 struct scatterlist *sg;
498 struct iommu *iommu; 626 struct iommu *iommu;
627 struct atu *atu;
499 unsigned long flags, entry; 628 unsigned long flags, entry;
629 unsigned long iotsb_num;
500 u32 devhandle; 630 u32 devhandle;
501 631
502 BUG_ON(direction == DMA_NONE); 632 BUG_ON(direction == DMA_NONE);
503 633
504 iommu = dev->archdata.iommu; 634 iommu = dev->archdata.iommu;
505 pbm = dev->archdata.host_controller; 635 pbm = dev->archdata.host_controller;
636 atu = iommu->atu;
506 devhandle = pbm->devhandle; 637 devhandle = pbm->devhandle;
507 638
508 local_irq_save(flags); 639 local_irq_save(flags);
@@ -512,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
512 dma_addr_t dma_handle = sg->dma_address; 643 dma_addr_t dma_handle = sg->dma_address;
513 unsigned int len = sg->dma_length; 644 unsigned int len = sg->dma_length;
514 unsigned long npages; 645 unsigned long npages;
515 struct iommu_map_table *tbl = &iommu->tbl; 646 struct iommu_map_table *tbl;
516 unsigned long shift = IO_PAGE_SHIFT; 647 unsigned long shift = IO_PAGE_SHIFT;
517 648
518 if (!len) 649 if (!len)
519 break; 650 break;
520 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); 651 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
652
653 if (dma_handle <= DMA_BIT_MASK(32)) {
654 iotsb_num = 0; /* we don't care for legacy iommu */
655 tbl = &iommu->tbl;
656 } else {
657 iotsb_num = atu->iotsb->iotsb_num;
658 tbl = &atu->tbl;
659 }
521 entry = ((dma_handle - tbl->table_map_base) >> shift); 660 entry = ((dma_handle - tbl->table_map_base) >> shift);
522 dma_4v_iommu_demap(&devhandle, entry, npages); 661 dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
523 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, 662 entry, npages);
663 iommu_tbl_range_free(tbl, dma_handle, npages,
524 IOMMU_ERROR_CODE); 664 IOMMU_ERROR_CODE);
525 sg = sg_next(sg); 665 sg = sg_next(sg);
526 } 666 }
@@ -581,6 +721,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
581 return cnt; 721 return cnt;
582} 722}
583 723
724static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
725{
726 struct atu *atu = pbm->iommu->atu;
727 struct atu_iotsb *iotsb;
728 void *table;
729 u64 table_size;
730 u64 iotsb_num;
731 unsigned long order;
732 unsigned long err;
733
734 iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
735 if (!iotsb) {
736 err = -ENOMEM;
737 goto out_err;
738 }
739 atu->iotsb = iotsb;
740
741 /* calculate size of IOTSB */
742 table_size = (atu->size / IO_PAGE_SIZE) * 8;
743 order = get_order(table_size);
744 table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
745 if (!table) {
746 err = -ENOMEM;
747 goto table_failed;
748 }
749 iotsb->table = table;
750 iotsb->ra = __pa(table);
751 iotsb->dvma_size = atu->size;
752 iotsb->dvma_base = atu->base;
753 iotsb->table_size = table_size;
754 iotsb->page_size = IO_PAGE_SIZE;
755
756 /* configure and register IOTSB with HV */
757 err = pci_sun4v_iotsb_conf(pbm->devhandle,
758 iotsb->ra,
759 iotsb->table_size,
760 iotsb->page_size,
761 iotsb->dvma_base,
762 &iotsb_num);
763 if (err) {
764 pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
765 goto iotsb_conf_failed;
766 }
767 iotsb->iotsb_num = iotsb_num;
768
769 err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
770 if (err) {
771 pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
772 goto iotsb_conf_failed;
773 }
774
775 return 0;
776
777iotsb_conf_failed:
778 free_pages((unsigned long)table, order);
779table_failed:
780 kfree(iotsb);
781out_err:
782 return err;
783}
784
785static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
786{
787 struct atu *atu = pbm->iommu->atu;
788 unsigned long err;
789 const u64 *ranges;
790 u64 map_size, num_iotte;
791 u64 dma_mask;
792 const u32 *page_size;
793 int len;
794
795 ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
796 &len);
797 if (!ranges) {
798 pr_err(PFX "No iommu-address-ranges\n");
799 return -EINVAL;
800 }
801
802 page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
803 NULL);
804 if (!page_size) {
805 pr_err(PFX "No iommu-pagesizes\n");
806 return -EINVAL;
807 }
808
809 /* There are 4 iommu-address-ranges supported. Each range is pair of
810 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
811 * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
812 * address ranges to support 64bit addressing. Because 'size' for
813 * address ranges[2] and ranges[3] are same we can select either of
814 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
815 * large for OS to allocate IOTSB we are using fix size 32G
816 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
817 * to share.
818 */
819 atu->ranges = (struct atu_ranges *)ranges;
820 atu->base = atu->ranges[3].base;
821 atu->size = ATU_64_SPACE_SIZE;
822
823 /* Create IOTSB */
824 err = pci_sun4v_atu_alloc_iotsb(pbm);
825 if (err) {
826 pr_err(PFX "Error creating ATU IOTSB\n");
827 return err;
828 }
829
830 /* Create ATU iommu map.
831 * One bit represents one iotte in IOTSB table.
832 */
833 dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
834 num_iotte = atu->size / IO_PAGE_SIZE;
835 map_size = num_iotte / 8;
836 atu->tbl.table_map_base = atu->base;
837 atu->dma_addr_mask = dma_mask;
838 atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
839 if (!atu->tbl.map)
840 return -ENOMEM;
841
842 iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
843 NULL, false /* no large_pool */,
844 0 /* default npools */,
845 false /* want span boundary checking */);
846
847 return 0;
848}
849
584static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) 850static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
585{ 851{
586 static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; 852 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
@@ -918,6 +1184,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
918 1184
919 pci_sun4v_scan_bus(pbm, &op->dev); 1185 pci_sun4v_scan_bus(pbm, &op->dev);
920 1186
1187 /* if atu_init fails its not complete failure.
1188 * we can still continue using legacy iommu.
1189 */
1190 if (pbm->iommu->atu) {
1191 err = pci_sun4v_atu_init(pbm);
1192 if (err) {
1193 kfree(pbm->iommu->atu);
1194 pbm->iommu->atu = NULL;
1195 pr_err(PFX "ATU init failed, err=%d\n", err);
1196 }
1197 }
1198
921 pbm->next = pci_pbm_root; 1199 pbm->next = pci_pbm_root;
922 pci_pbm_root = pbm; 1200 pci_pbm_root = pbm;
923 1201
@@ -931,8 +1209,10 @@ static int pci_sun4v_probe(struct platform_device *op)
931 struct pci_pbm_info *pbm; 1209 struct pci_pbm_info *pbm;
932 struct device_node *dp; 1210 struct device_node *dp;
933 struct iommu *iommu; 1211 struct iommu *iommu;
1212 struct atu *atu;
934 u32 devhandle; 1213 u32 devhandle;
935 int i, err = -ENODEV; 1214 int i, err = -ENODEV;
1215 static bool hv_atu = true;
936 1216
937 dp = op->dev.of_node; 1217 dp = op->dev.of_node;
938 1218
@@ -954,6 +1234,19 @@ static int pci_sun4v_probe(struct platform_device *op)
954 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", 1234 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
955 vpci_major, vpci_minor); 1235 vpci_major, vpci_minor);
956 1236
1237 err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1238 if (err) {
1239 /* don't return an error if we fail to register the
1240 * ATU group, but ATU hcalls won't be available.
1241 */
1242 hv_atu = false;
1243 pr_err(PFX "Could not register hvapi ATU err=%d\n",
1244 err);
1245 } else {
1246 pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1247 vatu_major, vatu_minor);
1248 }
1249
957 dma_ops = &sun4v_dma_ops; 1250 dma_ops = &sun4v_dma_ops;
958 } 1251 }
959 1252
@@ -991,6 +1284,14 @@ static int pci_sun4v_probe(struct platform_device *op)
991 } 1284 }
992 1285
993 pbm->iommu = iommu; 1286 pbm->iommu = iommu;
1287 iommu->atu = NULL;
1288 if (hv_atu) {
1289 atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1290 if (!atu)
1291 pr_err(PFX "Could not allocate atu\n");
1292 else
1293 iommu->atu = atu;
1294 }
994 1295
995 err = pci_sun4v_pbm_init(pbm, op, devhandle); 1296 err = pci_sun4v_pbm_init(pbm, op, devhandle);
996 if (err) 1297 if (err)
@@ -1001,6 +1302,7 @@ static int pci_sun4v_probe(struct platform_device *op)
1001 return 0; 1302 return 0;
1002 1303
1003out_free_iommu: 1304out_free_iommu:
1305 kfree(iommu->atu);
1004 kfree(pbm->iommu); 1306 kfree(pbm->iommu);
1005 1307
1006out_free_controller: 1308out_free_controller:
diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h
index 5642212390b2..22603a4e48bf 100644
--- a/arch/sparc/kernel/pci_sun4v.h
+++ b/arch/sparc/kernel/pci_sun4v.h
@@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
89 unsigned long msinum, 89 unsigned long msinum,
90 unsigned long valid); 90 unsigned long valid);
91 91
92/* Sun4v HV IOMMU v2 APIs */
93unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
94 unsigned long ra,
95 unsigned long table_size,
96 unsigned long page_size,
97 unsigned long dvma_base,
98 u64 *iotsb_num);
99unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
100 unsigned long iotsb_num,
101 unsigned int pci_device);
102unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
103 unsigned long iotsb_num,
104 unsigned long iotsb_index_iottes,
105 unsigned long io_attributes,
106 unsigned long io_page_list_pa,
107 long *mapped);
108unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
109 unsigned long iotsb_num,
110 unsigned long iotsb_index,
111 unsigned long iottes,
112 unsigned long *demapped);
92#endif /* !(_PCI_SUN4V_H) */ 113#endif /* !(_PCI_SUN4V_H) */
diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S
index e606d46c6815..578f09657916 100644
--- a/arch/sparc/kernel/pci_sun4v_asm.S
+++ b/arch/sparc/kernel/pci_sun4v_asm.S
@@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid)
360 mov %o0, %o0 360 mov %o0, %o0
361ENDPROC(pci_sun4v_msg_setvalid) 361ENDPROC(pci_sun4v_msg_setvalid)
362 362
363 /*
364 * %o0: devhandle
365 * %o1: r_addr
366 * %o2: size
367 * %o3: pagesize
368 * %o4: virt
369 * %o5: &iotsb_num/&iotsb_handle
370 *
371 * returns %o0: status
372 * %o1: iotsb_num/iotsb_handle
373 */
374ENTRY(pci_sun4v_iotsb_conf)
375 mov %o5, %g1
376 mov HV_FAST_PCI_IOTSB_CONF, %o5
377 ta HV_FAST_TRAP
378 retl
379 stx %o1, [%g1]
380ENDPROC(pci_sun4v_iotsb_conf)
381
382 /*
383 * %o0: devhandle
384 * %o1: iotsb_num/iotsb_handle
385 * %o2: pci_device
386 *
387 * returns %o0: status
388 */
389ENTRY(pci_sun4v_iotsb_bind)
390 mov HV_FAST_PCI_IOTSB_BIND, %o5
391 ta HV_FAST_TRAP
392 retl
393 nop
394ENDPROC(pci_sun4v_iotsb_bind)
395
396 /*
397 * %o0: devhandle
398 * %o1: iotsb_num/iotsb_handle
399 * %o2: index_count
400 * %o3: iotte_attributes
401 * %o4: io_page_list_p
402 * %o5: &mapped
403 *
404 * returns %o0: status
405 * %o1: #mapped
406 */
407ENTRY(pci_sun4v_iotsb_map)
408 mov %o5, %g1
409 mov HV_FAST_PCI_IOTSB_MAP, %o5
410 ta HV_FAST_TRAP
411 retl
412 stx %o1, [%g1]
413ENDPROC(pci_sun4v_iotsb_map)
414
415 /*
416 * %o0: devhandle
417 * %o1: iotsb_num/iotsb_handle
418 * %o2: iotsb_index
419 * %o3: #iottes
420 * %o4: &demapped
421 *
422 * returns %o0: status
423 * %o1: #demapped
424 */
425ENTRY(pci_sun4v_iotsb_demap)
426 mov HV_FAST_PCI_IOTSB_DEMAP, %o5
427 ta HV_FAST_TRAP
428 retl
429 stx %o1, [%o4]
430ENDPROC(pci_sun4v_iotsb_demap)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index c3c12efe0bc0..9c0c8fd0b292 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
89 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; 89 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
90 90
91 /* 1. Make sure we are not getting garbage from the user */ 91 /* 1. Make sure we are not getting garbage from the user */
92 if (!invalid_frame_pointer(sf, sizeof(*sf))) 92 if (invalid_frame_pointer(sf, sizeof(*sf)))
93 goto segv_and_exit; 93 goto segv_and_exit;
94 94
95 if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) 95 if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
150 150
151 synchronize_user_stack(); 151 synchronize_user_stack();
152 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; 152 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
153 if (!invalid_frame_pointer(sf, sizeof(*sf))) 153 if (invalid_frame_pointer(sf, sizeof(*sf)))
154 goto segv; 154 goto segv;
155 155
156 if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) 156 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 439784b7b7ac..37aa537b3ad8 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -802,8 +802,10 @@ struct mdesc_mblock {
802}; 802};
803static struct mdesc_mblock *mblocks; 803static struct mdesc_mblock *mblocks;
804static int num_mblocks; 804static int num_mblocks;
805static int find_numa_node_for_addr(unsigned long pa,
806 struct node_mem_mask *pnode_mask);
805 807
806static unsigned long ra_to_pa(unsigned long addr) 808static unsigned long __init ra_to_pa(unsigned long addr)
807{ 809{
808 int i; 810 int i;
809 811
@@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr)
819 return addr; 821 return addr;
820} 822}
821 823
822static int find_node(unsigned long addr) 824static int __init find_node(unsigned long addr)
823{ 825{
826 static bool search_mdesc = true;
827 static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
828 static int last_index;
824 int i; 829 int i;
825 830
826 addr = ra_to_pa(addr); 831 addr = ra_to_pa(addr);
@@ -830,13 +835,30 @@ static int find_node(unsigned long addr)
830 if ((addr & p->mask) == p->val) 835 if ((addr & p->mask) == p->val)
831 return i; 836 return i;
832 } 837 }
833 /* The following condition has been observed on LDOM guests.*/ 838 /* The following condition has been observed on LDOM guests because
834 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" 839 * node_masks only contains the best latency mask and value.
835 " rule. Some physical memory will be owned by node 0."); 840 * LDOM guest's mdesc can contain a single latency group to
836 return 0; 841 * cover multiple address range. Print warning message only if the
842 * address cannot be found in node_masks nor mdesc.
843 */
844 if ((search_mdesc) &&
845 ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
846 /* find the available node in the mdesc */
847 last_index = find_numa_node_for_addr(addr, &last_mem_mask);
848 numadbg("find_node: latency group for address 0x%lx is %d\n",
849 addr, last_index);
850 if ((last_index < 0) || (last_index >= num_node_masks)) {
851 /* WARN_ONCE() and use default group 0 */
852 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
853 search_mdesc = false;
854 last_index = 0;
855 }
856 }
857
858 return last_index;
837} 859}
838 860
839static u64 memblock_nid_range(u64 start, u64 end, int *nid) 861static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
840{ 862{
841 *nid = find_node(start); 863 *nid = find_node(start);
842 start += PAGE_SIZE; 864 start += PAGE_SIZE;
@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to)
1160 return numa_latency[from][to]; 1182 return numa_latency[from][to];
1161} 1183}
1162 1184
1185static int find_numa_node_for_addr(unsigned long pa,
1186 struct node_mem_mask *pnode_mask)
1187{
1188 struct mdesc_handle *md = mdesc_grab();
1189 u64 node, arc;
1190 int i = 0;
1191
1192 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1193 if (node == MDESC_NODE_NULL)
1194 goto out;
1195
1196 mdesc_for_each_node_by_name(md, node, "group") {
1197 mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
1198 u64 target = mdesc_arc_target(md, arc);
1199 struct mdesc_mlgroup *m = find_mlgroup(target);
1200
1201 if (!m)
1202 continue;
1203 if ((pa & m->mask) == m->match) {
1204 if (pnode_mask) {
1205 pnode_mask->mask = m->mask;
1206 pnode_mask->val = m->match;
1207 }
1208 mdesc_release(md);
1209 return i;
1210 }
1211 }
1212 i++;
1213 }
1214
1215out:
1216 mdesc_release(md);
1217 return -1;
1218}
1219
1163static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) 1220static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1164{ 1221{
1165 int i; 1222 int i;
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 178989e6d3e3..ea960d660917 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
218 */ 218 */
219unsigned long long sched_clock(void) 219unsigned long long sched_clock(void)
220{ 220{
221 return clocksource_cyc2ns(get_cycles(), 221 return mult_frac(get_cycles(),
222 sched_clock_mult, SCHED_CLOCK_SHIFT); 222 sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
223} 223}
224 224
225int setup_profiling_timer(unsigned int multiplier) 225int setup_profiling_timer(unsigned int multiplier)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 536ccfcc01c6..34d9e15857c3 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -40,8 +40,8 @@ GCOV_PROFILE := n
40UBSAN_SANITIZE :=n 40UBSAN_SANITIZE :=n
41 41
42LDFLAGS := -m elf_$(UTS_MACHINE) 42LDFLAGS := -m elf_$(UTS_MACHINE)
43ifeq ($(CONFIG_RELOCATABLE),y) 43# Compressed kernel should be built as PIE since it may be loaded at any
44# If kernel is relocatable, build compressed kernel as PIE. 44# address by the bootloader.
45ifeq ($(CONFIG_X86_32),y) 45ifeq ($(CONFIG_X86_32),y)
46LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) 46LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
47else 47else
@@ -51,7 +51,6 @@ else
51LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ 51LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
52 && echo "-z noreloc-overflow -pie --no-dynamic-linker") 52 && echo "-z noreloc-overflow -pie --no-dynamic-linker")
53endif 53endif
54endif
55LDFLAGS_vmlinux := -T 54LDFLAGS_vmlinux := -T
56 55
57hostprogs-y := mkpiggy 56hostprogs-y := mkpiggy
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c
index 26240dde081e..4224ede43b4e 100644
--- a/arch/x86/boot/cpu.c
+++ b/arch/x86/boot/cpu.c
@@ -87,6 +87,12 @@ int validate_cpu(void)
87 return -1; 87 return -1;
88 } 88 }
89 89
90 if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
91 !has_eflag(X86_EFLAGS_ID)) {
92 printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n");
93 return -1;
94 }
95
90 if (err_flags) { 96 if (err_flags) {
91 puts("This kernel requires the following features " 97 puts("This kernel requires the following features "
92 "not present on the CPU:\n"); 98 "not present on the CPU:\n");
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index f5f4b3fbbbc2..afb222b63cae 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void)
662 pr_cont("Fam15h "); 662 pr_cont("Fam15h ");
663 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; 663 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
664 break; 664 break;
665 665 case 0x17:
666 pr_cont("Fam17h ");
667 /*
668 * In family 17h, there are no event constraints in the PMC hardware.
669 * We fallback to using default amd_get_event_constraints.
670 */
671 break;
666 default: 672 default:
667 pr_err("core perfctr but no constraints; unknown hardware!\n"); 673 pr_err("core perfctr but no constraints; unknown hardware!\n");
668 return -ENODEV; 674 return -ENODEV;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index d31735f37ed7..9d4bf3ab049e 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
2352 frame.next_frame = 0; 2352 frame.next_frame = 0;
2353 frame.return_address = 0; 2353 frame.return_address = 0;
2354 2354
2355 if (!access_ok(VERIFY_READ, fp, 8)) 2355 if (!valid_user_frame(fp, sizeof(frame)))
2356 break; 2356 break;
2357 2357
2358 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); 2358 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
@@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
2362 if (bytes != 0) 2362 if (bytes != 0)
2363 break; 2363 break;
2364 2364
2365 if (!valid_user_frame(fp, sizeof(frame)))
2366 break;
2367
2368 perf_callchain_store(entry, cs_base + frame.return_address); 2365 perf_callchain_store(entry, cs_base + frame.return_address);
2369 fp = compat_ptr(ss_base + frame.next_frame); 2366 fp = compat_ptr(ss_base + frame.next_frame);
2370 } 2367 }
@@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
2413 frame.next_frame = NULL; 2410 frame.next_frame = NULL;
2414 frame.return_address = 0; 2411 frame.return_address = 0;
2415 2412
2416 if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2)) 2413 if (!valid_user_frame(fp, sizeof(frame)))
2417 break; 2414 break;
2418 2415
2419 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); 2416 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
@@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
2423 if (bytes != 0) 2420 if (bytes != 0)
2424 break; 2421 break;
2425 2422
2426 if (!valid_user_frame(fp, sizeof(frame)))
2427 break;
2428
2429 perf_callchain_store(entry, frame.return_address); 2423 perf_callchain_store(entry, frame.return_address);
2430 fp = (void __user *)frame.next_frame; 2424 fp = (void __user *)frame.next_frame;
2431 } 2425 }
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 0319311dbdbb..be202390bbd3 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
1108 } 1108 }
1109 1109
1110 /* 1110 /*
1111 * We use the interrupt regs as a base because the PEBS record 1111 * We use the interrupt regs as a base because the PEBS record does not
1112 * does not contain a full regs set, specifically it seems to 1112 * contain a full regs set, specifically it seems to lack segment
1113 * lack segment descriptors, which get used by things like 1113 * descriptors, which get used by things like user_mode().
1114 * user_mode().
1115 * 1114 *
1116 * In the simple case fix up only the IP and BP,SP regs, for 1115 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1117 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. 1116 *
1118 * A possible PERF_SAMPLE_REGS will have to transfer all regs. 1117 * We must however always use BP,SP from iregs for the unwinder to stay
1118 * sane; the record BP,SP can point into thin air when the record is
1119 * from a previous PMI context or an (I)RET happend between the record
1120 * and PMI.
1119 */ 1121 */
1120 *regs = *iregs; 1122 *regs = *iregs;
1121 regs->flags = pebs->flags; 1123 regs->flags = pebs->flags;
1122 set_linear_ip(regs, pebs->ip); 1124 set_linear_ip(regs, pebs->ip);
1123 regs->bp = pebs->bp;
1124 regs->sp = pebs->sp;
1125 1125
1126 if (sample_type & PERF_SAMPLE_REGS_INTR) { 1126 if (sample_type & PERF_SAMPLE_REGS_INTR) {
1127 regs->ax = pebs->ax; 1127 regs->ax = pebs->ax;
@@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
1130 regs->dx = pebs->dx; 1130 regs->dx = pebs->dx;
1131 regs->si = pebs->si; 1131 regs->si = pebs->si;
1132 regs->di = pebs->di; 1132 regs->di = pebs->di;
1133 regs->bp = pebs->bp;
1134 regs->sp = pebs->sp;
1135 1133
1136 regs->flags = pebs->flags; 1134 /*
1135 * Per the above; only set BP,SP if we don't need callchains.
1136 *
1137 * XXX: does this make sense?
1138 */
1139 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
1140 regs->bp = pebs->bp;
1141 regs->sp = pebs->sp;
1142 }
1143
1144 /*
1145 * Preserve PERF_EFLAGS_VM from set_linear_ip().
1146 */
1147 regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
1137#ifndef CONFIG_X86_32 1148#ifndef CONFIG_X86_32
1138 regs->r8 = pebs->r8; 1149 regs->r8 = pebs->r8;
1139 regs->r9 = pebs->r9; 1150 regs->r9 = pebs->r9;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index efca2685d876..dbaaf7dc8373 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
319 */ 319 */
320static int uncore_pmu_event_init(struct perf_event *event); 320static int uncore_pmu_event_init(struct perf_event *event);
321 321
322static bool is_uncore_event(struct perf_event *event) 322static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
323{ 323{
324 return event->pmu->event_init == uncore_pmu_event_init; 324 return &box->pmu->pmu == event->pmu;
325} 325}
326 326
327static int 327static int
@@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
340 340
341 n = box->n_events; 341 n = box->n_events;
342 342
343 if (is_uncore_event(leader)) { 343 if (is_box_event(box, leader)) {
344 box->event_list[n] = leader; 344 box->event_list[n] = leader;
345 n++; 345 n++;
346 } 346 }
@@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
349 return n; 349 return n;
350 350
351 list_for_each_entry(event, &leader->sibling_list, group_entry) { 351 list_for_each_entry(event, &leader->sibling_list, group_entry) {
352 if (!is_uncore_event(event) || 352 if (!is_box_event(box, event) ||
353 event->state <= PERF_EVENT_STATE_OFF) 353 event->state <= PERF_EVENT_STATE_OFF)
354 continue; 354 continue;
355 355
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 81195cca7eae..a3dcc12bef4a 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -490,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
490 490
491 snb_uncore_imc_event_start(event, 0); 491 snb_uncore_imc_event_start(event, 0);
492 492
493 box->n_events++;
494
495 return 0; 493 return 0;
496} 494}
497 495
498static void snb_uncore_imc_event_del(struct perf_event *event, int flags) 496static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
499{ 497{
500 struct intel_uncore_box *box = uncore_event_to_box(event);
501 int i;
502
503 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); 498 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
504
505 for (i = 0; i < box->n_events; i++) {
506 if (event == box->event_list[i]) {
507 --box->n_events;
508 break;
509 }
510 }
511} 499}
512 500
513int snb_pci2phy_map_init(int devid) 501int snb_pci2phy_map_init(int devid)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 5874d8de1f8d..a77ee026643d 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -113,7 +113,7 @@ struct debug_store {
113 * Per register state. 113 * Per register state.
114 */ 114 */
115struct er_account { 115struct er_account {
116 raw_spinlock_t lock; /* per-core: protect structure */ 116 raw_spinlock_t lock; /* per-core: protect structure */
117 u64 config; /* extra MSR config */ 117 u64 config; /* extra MSR config */
118 u64 reg; /* extra MSR number */ 118 u64 reg; /* extra MSR number */
119 atomic_t ref; /* reference count */ 119 atomic_t ref; /* reference count */
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 9b7cf5c28f5f..85f854b98a9d 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -112,7 +112,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
112 for (; stack < stack_info.end; stack++) { 112 for (; stack < stack_info.end; stack++) {
113 unsigned long real_addr; 113 unsigned long real_addr;
114 int reliable = 0; 114 int reliable = 0;
115 unsigned long addr = *stack; 115 unsigned long addr = READ_ONCE_NOCHECK(*stack);
116 unsigned long *ret_addr_p = 116 unsigned long *ret_addr_p =
117 unwind_get_return_address_ptr(&state); 117 unwind_get_return_address_ptr(&state);
118 118
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 47004010ad5d..ebb4e95fbd74 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -521,14 +521,14 @@ void fpu__clear(struct fpu *fpu)
521{ 521{
522 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */ 522 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
523 523
524 if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) { 524 fpu__drop(fpu);
525 /* FPU state will be reallocated lazily at the first use. */ 525
526 fpu__drop(fpu); 526 /*
527 } else { 527 * Make sure fpstate is cleared and initialized.
528 if (!fpu->fpstate_active) { 528 */
529 fpu__activate_curr(fpu); 529 if (static_cpu_has(X86_FEATURE_FPU)) {
530 user_fpu_begin(); 530 fpu__activate_curr(fpu);
531 } 531 user_fpu_begin();
532 copy_init_fpstate_to_fpregs(); 532 copy_init_fpstate_to_fpregs();
533 } 533 }
534} 534}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b6b2f0264af3..2dabea46f039 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -665,14 +665,17 @@ __PAGE_ALIGNED_BSS
665initial_pg_pmd: 665initial_pg_pmd:
666 .fill 1024*KPMDS,4,0 666 .fill 1024*KPMDS,4,0
667#else 667#else
668ENTRY(initial_page_table) 668.globl initial_page_table
669initial_page_table:
669 .fill 1024,4,0 670 .fill 1024,4,0
670#endif 671#endif
671initial_pg_fixmap: 672initial_pg_fixmap:
672 .fill 1024,4,0 673 .fill 1024,4,0
673ENTRY(empty_zero_page) 674.globl empty_zero_page
675empty_zero_page:
674 .fill 4096,1,0 676 .fill 4096,1,0
675ENTRY(swapper_pg_dir) 677.globl swapper_pg_dir
678swapper_pg_dir:
676 .fill 1024,4,0 679 .fill 1024,4,0
677EXPORT_SYMBOL(empty_zero_page) 680EXPORT_SYMBOL(empty_zero_page)
678 681
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 764a29f84de7..85195d447a92 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -66,13 +66,36 @@ __init int create_simplefb(const struct screen_info *si,
66{ 66{
67 struct platform_device *pd; 67 struct platform_device *pd;
68 struct resource res; 68 struct resource res;
69 unsigned long len; 69 u64 base, size;
70 u32 length;
70 71
71 /* don't use lfb_size as it may contain the whole VMEM instead of only 72 /*
72 * the part that is occupied by the framebuffer */ 73 * If the 64BIT_BASE capability is set, ext_lfb_base will contain the
73 len = mode->height * mode->stride; 74 * upper half of the base address. Assemble the address, then make sure
74 len = PAGE_ALIGN(len); 75 * it is valid and we can actually access it.
75 if (len > (u64)si->lfb_size << 16) { 76 */
77 base = si->lfb_base;
78 if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
79 base |= (u64)si->ext_lfb_base << 32;
80 if (!base || (u64)(resource_size_t)base != base) {
81 printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
82 return -EINVAL;
83 }
84
85 /*
86 * Don't use lfb_size as IORESOURCE size, since it may contain the
87 * entire VMEM, and thus require huge mappings. Use just the part we
88 * need, that is, the part where the framebuffer is located. But verify
89 * that it does not exceed the advertised VMEM.
90 * Note that in case of VBE, the lfb_size is shifted by 16 bits for
91 * historical reasons.
92 */
93 size = si->lfb_size;
94 if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
95 size <<= 16;
96 length = mode->height * mode->stride;
97 length = PAGE_ALIGN(length);
98 if (length > size) {
76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); 99 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
77 return -EINVAL; 100 return -EINVAL;
78 } 101 }
@@ -81,8 +104,8 @@ __init int create_simplefb(const struct screen_info *si,
81 memset(&res, 0, sizeof(res)); 104 memset(&res, 0, sizeof(res));
82 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 105 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
83 res.name = simplefb_resname; 106 res.name = simplefb_resname;
84 res.start = si->lfb_base; 107 res.start = base;
85 res.end = si->lfb_base + len - 1; 108 res.end = res.start + length - 1;
86 if (res.end <= res.start) 109 if (res.end <= res.start)
87 return -EINVAL; 110 return -EINVAL;
88 111
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 2d721e533cf4..b80e8bf43cc6 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -7,11 +7,13 @@
7 7
8unsigned long unwind_get_return_address(struct unwind_state *state) 8unsigned long unwind_get_return_address(struct unwind_state *state)
9{ 9{
10 unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
11
10 if (unwind_done(state)) 12 if (unwind_done(state))
11 return 0; 13 return 0;
12 14
13 return ftrace_graph_ret_addr(state->task, &state->graph_idx, 15 return ftrace_graph_ret_addr(state->task, &state->graph_idx,
14 *state->sp, state->sp); 16 addr, state->sp);
15} 17}
16EXPORT_SYMBOL_GPL(unwind_get_return_address); 18EXPORT_SYMBOL_GPL(unwind_get_return_address);
17 19
@@ -23,8 +25,10 @@ bool unwind_next_frame(struct unwind_state *state)
23 return false; 25 return false;
24 26
25 do { 27 do {
28 unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
29
26 for (state->sp++; state->sp < info->end; state->sp++) 30 for (state->sp++; state->sp < info->end; state->sp++)
27 if (__kernel_text_address(*state->sp)) 31 if (__kernel_text_address(addr))
28 return true; 32 return true;
29 33
30 state->sp = info->next_sp; 34 state->sp = info->next_sp;
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 79ae939970d3..fcd06f7526de 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
135 if (early_recursion_flag > 2) 135 if (early_recursion_flag > 2)
136 goto halt_loop; 136 goto halt_loop;
137 137
138 if (regs->cs != __KERNEL_CS) 138 /*
139 * Old CPUs leave the high bits of CS on the stack
140 * undefined. I'm not sure which CPUs do this, but at least
141 * the 486 DX works this way.
142 */
143 if ((regs->cs & 0xFFFF) != __KERNEL_CS)
139 goto fail; 144 goto fail;
140 145
141 /* 146 /*
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 429d08be7848..dd6cfa4ad3ac 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
28obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o 28obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
29# MISC Devices 29# MISC Devices
30obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o 30obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
31obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o 31obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index de734134bc8d..3f1f1c77d090 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * platform_wdt.c: Watchdog platform library file 2 * Intel Merrifield watchdog platform device library file
3 * 3 *
4 * (C) Copyright 2014 Intel Corporation 4 * (C) Copyright 2014 Intel Corporation
5 * Author: David Cohen <david.a.cohen@linux.intel.com> 5 * Author: David Cohen <david.a.cohen@linux.intel.com>
@@ -14,7 +14,9 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/platform_data/intel-mid_wdt.h> 16#include <linux/platform_data/intel-mid_wdt.h>
17
17#include <asm/intel-mid.h> 18#include <asm/intel-mid.h>
19#include <asm/intel_scu_ipc.h>
18#include <asm/io_apic.h> 20#include <asm/io_apic.h>
19 21
20#define TANGIER_EXT_TIMER0_MSI 15 22#define TANGIER_EXT_TIMER0_MSI 15
@@ -50,14 +52,34 @@ static struct intel_mid_wdt_pdata tangier_pdata = {
50 .probe = tangier_probe, 52 .probe = tangier_probe,
51}; 53};
52 54
53static int __init register_mid_wdt(void) 55static int wdt_scu_status_change(struct notifier_block *nb,
56 unsigned long code, void *data)
54{ 57{
55 if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) { 58 if (code == SCU_DOWN) {
56 wdt_dev.dev.platform_data = &tangier_pdata; 59 platform_device_unregister(&wdt_dev);
57 return platform_device_register(&wdt_dev); 60 return 0;
58 } 61 }
59 62
60 return -ENODEV; 63 return platform_device_register(&wdt_dev);
61} 64}
62 65
66static struct notifier_block wdt_scu_notifier = {
67 .notifier_call = wdt_scu_status_change,
68};
69
70static int __init register_mid_wdt(void)
71{
72 if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
73 return -ENODEV;
74
75 wdt_dev.dev.platform_data = &tangier_pdata;
76
77 /*
78 * We need to be sure that the SCU IPC is ready before watchdog device
79 * can be registered:
80 */
81 intel_scu_notifier_add(&wdt_scu_notifier);
82
83 return 0;
84}
63rootfs_initcall(register_mid_wdt); 85rootfs_initcall(register_mid_wdt);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 05e21b464433..d19b09cdf284 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -214,7 +214,7 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
214 214
215 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); 215 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
216 216
217 if (!result) { 217 if (!result && !ctx->more) {
218 err = af_alg_wait_for_completion( 218 err = af_alg_wait_for_completion(
219 crypto_ahash_init(&ctx->req), 219 crypto_ahash_init(&ctx->req),
220 &ctx->completion); 220 &ctx->completion);
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 52ce17a3dd63..c16c94f88733 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,10 +68,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
68 68
69 sg = scatterwalk_ffwd(tmp, sg, start); 69 sg = scatterwalk_ffwd(tmp, sg, start);
70 70
71 if (sg_page(sg) == virt_to_page(buf) &&
72 sg->offset == offset_in_page(buf))
73 return;
74
75 scatterwalk_start(&walk, sg); 71 scatterwalk_start(&walk, sg);
76 scatterwalk_copychunks(buf, &walk, nbytes, out); 72 scatterwalk_copychunks(buf, &walk, nbytes, out);
77 scatterwalk_done(&walk, out, 0); 73 scatterwalk_done(&walk, out, 0);
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index edf3b96b3b73..1d99292e2039 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
685 } 685 }
686 686
687 /* register clk-provider */ 687 /* register clk-provider */
688 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 688 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
689 689
690 return; 690 return;
691 691
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 0718e831475f..3b784b593afd 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
382 } 382 }
383 383
384 /* register clk-provider */ 384 /* register clk-provider */
385 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 385 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
386 386
387 return; 387 return;
388 388
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index 8802a2dd56ac..f674778fb3ac 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np)
82 hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0", 82 hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
83 "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); 83 "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
84 84
85 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 85 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
86} 86}
87CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); 87CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 79596463e0d9..4a82a49cff5e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = {
191static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu", 191static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
192 0x050, 0, 3, axi_div_table, 0); 192 0x050, 0, 3, axi_div_table, 0);
193 193
194#define SUN6I_A31_AHB1_REG 0x054
195
194static const char * const ahb1_parents[] = { "osc32k", "osc24M", 196static const char * const ahb1_parents[] = { "osc32k", "osc24M",
195 "axi", "pll-periph" }; 197 "axi", "pll-periph" };
196 198
@@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
1230 val &= BIT(16); 1232 val &= BIT(16);
1231 writel(val, reg + SUN6I_A31_PLL_MIPI_REG); 1233 writel(val, reg + SUN6I_A31_PLL_MIPI_REG);
1232 1234
1235 /* Force AHB1 to PLL6 / 3 */
1236 val = readl(reg + SUN6I_A31_AHB1_REG);
1237 /* set PLL6 pre-div = 3 */
1238 val &= ~GENMASK(7, 6);
1239 val |= 0x2 << 6;
1240 /* select PLL6 / pre-div */
1241 val &= ~GENMASK(13, 12);
1242 val |= 0x3 << 12;
1243 writel(val, reg + SUN6I_A31_AHB1_REG);
1244
1233 sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc); 1245 sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
1234 1246
1235 ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk, 1247 ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 838b22aa8b67..f2c9274b8bd5 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
373 else 373 else
374 calcp = 3; 374 calcp = 3;
375 375
376 calcm = (req->parent_rate >> calcp) - 1; 376 calcm = (div >> calcp) - 1;
377 377
378 req->rate = (req->parent_rate >> calcp) / (calcm + 1); 378 req->rate = (req->parent_rate >> calcp) / (calcm + 1);
379 req->m = calcm; 379 req->m = calcm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index dae35a96a694..02ca5dd978f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -34,6 +34,7 @@ struct amdgpu_atpx {
34 34
35static struct amdgpu_atpx_priv { 35static struct amdgpu_atpx_priv {
36 bool atpx_detected; 36 bool atpx_detected;
37 bool bridge_pm_usable;
37 /* handle for device - and atpx */ 38 /* handle for device - and atpx */
38 acpi_handle dhandle; 39 acpi_handle dhandle;
39 acpi_handle other_handle; 40 acpi_handle other_handle;
@@ -205,7 +206,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
205 atpx->is_hybrid = false; 206 atpx->is_hybrid = false;
206 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 207 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
207 printk("ATPX Hybrid Graphics\n"); 208 printk("ATPX Hybrid Graphics\n");
208 atpx->functions.power_cntl = false; 209 /*
210 * Disable legacy PM methods only when pcie port PM is usable,
211 * otherwise the device might fail to power off or power on.
212 */
213 atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
209 atpx->is_hybrid = true; 214 atpx->is_hybrid = true;
210 } 215 }
211 216
@@ -480,6 +485,7 @@ static int amdgpu_atpx_power_state(enum vga_switcheroo_client_id id,
480 */ 485 */
481static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev) 486static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
482{ 487{
488 struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
483 acpi_handle dhandle, atpx_handle; 489 acpi_handle dhandle, atpx_handle;
484 acpi_status status; 490 acpi_status status;
485 491
@@ -494,6 +500,7 @@ static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
494 } 500 }
495 amdgpu_atpx_priv.dhandle = dhandle; 501 amdgpu_atpx_priv.dhandle = dhandle;
496 amdgpu_atpx_priv.atpx.handle = atpx_handle; 502 amdgpu_atpx_priv.atpx.handle = atpx_handle;
503 amdgpu_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3;
497 return true; 504 return true;
498} 505}
499 506
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 13f2b705ea49..08cd0bd3ebe5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2984,19 +2984,19 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
2984 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) 2984 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
2985 data->highest_mclk = memory_clock; 2985 data->highest_mclk = memory_clock;
2986 2986
2987 performance_level = &(ps->performance_levels
2988 [ps->performance_level_count++]);
2989
2990 PP_ASSERT_WITH_CODE( 2987 PP_ASSERT_WITH_CODE(
2991 (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), 2988 (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
2992 "Performance levels exceeds SMC limit!", 2989 "Performance levels exceeds SMC limit!",
2993 return -EINVAL); 2990 return -EINVAL);
2994 2991
2995 PP_ASSERT_WITH_CODE( 2992 PP_ASSERT_WITH_CODE(
2996 (ps->performance_level_count <= 2993 (ps->performance_level_count <
2997 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), 2994 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
2998 "Performance levels exceeds Driver limit!", 2995 "Performance levels exceeds Driver limit, Skip!",
2999 return -EINVAL); 2996 return 0);
2997
2998 performance_level = &(ps->performance_levels
2999 [ps->performance_level_count++]);
3000 3000
3001 /* Performance levels are arranged from low to high. */ 3001 /* Performance levels are arranged from low to high. */
3002 performance_level->memory_clock = memory_clock; 3002 performance_level->memory_clock = memory_clock;
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 48019ae22ddb..28341b32067f 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -150,15 +150,14 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
150 clk_prepare_enable(hdlcd->clk); 150 clk_prepare_enable(hdlcd->clk);
151 hdlcd_crtc_mode_set_nofb(crtc); 151 hdlcd_crtc_mode_set_nofb(crtc);
152 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); 152 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
153 drm_crtc_vblank_on(crtc);
153} 154}
154 155
155static void hdlcd_crtc_disable(struct drm_crtc *crtc) 156static void hdlcd_crtc_disable(struct drm_crtc *crtc)
156{ 157{
157 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 158 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
158 159
159 if (!crtc->state->active) 160 drm_crtc_vblank_off(crtc);
160 return;
161
162 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); 161 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
163 clk_disable_unprepare(hdlcd->clk); 162 clk_disable_unprepare(hdlcd->clk);
164} 163}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index e8fb6ef947ee..38eaa63afb31 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1907,6 +1907,8 @@ err_disable_pm_runtime:
1907err_hdmiphy: 1907err_hdmiphy:
1908 if (hdata->hdmiphy_port) 1908 if (hdata->hdmiphy_port)
1909 put_device(&hdata->hdmiphy_port->dev); 1909 put_device(&hdata->hdmiphy_port->dev);
1910 if (hdata->regs_hdmiphy)
1911 iounmap(hdata->regs_hdmiphy);
1910err_ddc: 1912err_ddc:
1911 put_device(&hdata->ddc_adpt->dev); 1913 put_device(&hdata->ddc_adpt->dev);
1912 1914
@@ -1929,6 +1931,9 @@ static int hdmi_remove(struct platform_device *pdev)
1929 if (hdata->hdmiphy_port) 1931 if (hdata->hdmiphy_port)
1930 put_device(&hdata->hdmiphy_port->dev); 1932 put_device(&hdata->hdmiphy_port->dev);
1931 1933
1934 if (hdata->regs_hdmiphy)
1935 iounmap(hdata->regs_hdmiphy);
1936
1932 put_device(&hdata->ddc_adpt->dev); 1937 put_device(&hdata->ddc_adpt->dev);
1933 1938
1934 return 0; 1939 return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index f75c5b5a536c..c70310206ac5 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -251,13 +251,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
251 if (irq < 0) 251 if (irq < 0)
252 return irq; 252 return irq;
253 253
254 ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
255 IRQF_TRIGGER_NONE, dev_name(dev), priv);
256 if (ret < 0) {
257 dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
258 return ret;
259 }
260
261 comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL); 254 comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
262 if (comp_id < 0) { 255 if (comp_id < 0) {
263 dev_err(dev, "Failed to identify by alias: %d\n", comp_id); 256 dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
@@ -273,6 +266,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
273 266
274 platform_set_drvdata(pdev, priv); 267 platform_set_drvdata(pdev, priv);
275 268
269 ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
270 IRQF_TRIGGER_NONE, dev_name(dev), priv);
271 if (ret < 0) {
272 dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
273 return ret;
274 }
275
276 ret = component_add(dev, &mtk_disp_ovl_component_ops); 276 ret = component_add(dev, &mtk_disp_ovl_component_ops);
277 if (ret) 277 if (ret)
278 dev_err(dev, "Failed to add component: %d\n", ret); 278 dev_err(dev, "Failed to add component: %d\n", ret);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index df33b3ca6ffd..48cc01fd20c7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -123,7 +123,7 @@ static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
123 unsigned int bpc) 123 unsigned int bpc)
124{ 124{
125 writel(w << 16 | h, comp->regs + DISP_OD_SIZE); 125 writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
126 writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE); 126 writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG);
127 mtk_dither_set(comp, bpc, DISP_OD_CFG); 127 mtk_dither_set(comp, bpc, DISP_OD_CFG);
128} 128}
129 129
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 28b2044ed9f2..eaa5a2240c0c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -86,7 +86,7 @@
86 86
87#define DSI_PHY_TIMECON0 0x110 87#define DSI_PHY_TIMECON0 0x110
88#define LPX (0xff << 0) 88#define LPX (0xff << 0)
89#define HS_PRPR (0xff << 8) 89#define HS_PREP (0xff << 8)
90#define HS_ZERO (0xff << 16) 90#define HS_ZERO (0xff << 16)
91#define HS_TRAIL (0xff << 24) 91#define HS_TRAIL (0xff << 24)
92 92
@@ -102,10 +102,16 @@
102#define CLK_TRAIL (0xff << 24) 102#define CLK_TRAIL (0xff << 24)
103 103
104#define DSI_PHY_TIMECON3 0x11c 104#define DSI_PHY_TIMECON3 0x11c
105#define CLK_HS_PRPR (0xff << 0) 105#define CLK_HS_PREP (0xff << 0)
106#define CLK_HS_POST (0xff << 8) 106#define CLK_HS_POST (0xff << 8)
107#define CLK_HS_EXIT (0xff << 16) 107#define CLK_HS_EXIT (0xff << 16)
108 108
109#define T_LPX 5
110#define T_HS_PREP 6
111#define T_HS_TRAIL 8
112#define T_HS_EXIT 7
113#define T_HS_ZERO 10
114
109#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) 115#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
110 116
111struct phy; 117struct phy;
@@ -161,20 +167,18 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
161static void dsi_phy_timconfig(struct mtk_dsi *dsi) 167static void dsi_phy_timconfig(struct mtk_dsi *dsi)
162{ 168{
163 u32 timcon0, timcon1, timcon2, timcon3; 169 u32 timcon0, timcon1, timcon2, timcon3;
164 unsigned int ui, cycle_time; 170 u32 ui, cycle_time;
165 unsigned int lpx;
166 171
167 ui = 1000 / dsi->data_rate + 0x01; 172 ui = 1000 / dsi->data_rate + 0x01;
168 cycle_time = 8000 / dsi->data_rate + 0x01; 173 cycle_time = 8000 / dsi->data_rate + 0x01;
169 lpx = 5;
170 174
171 timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx; 175 timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
172 timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 | 176 timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
173 (4 * lpx); 177 T_HS_EXIT << 24;
174 timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) | 178 timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
175 (NS_TO_CYCLE(0x150, cycle_time) << 16); 179 (NS_TO_CYCLE(0x150, cycle_time) << 16);
176 timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 | 180 timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
177 NS_TO_CYCLE(0x40, cycle_time); 181 NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
178 182
179 writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); 183 writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
180 writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); 184 writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -202,19 +206,47 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
202{ 206{
203 struct device *dev = dsi->dev; 207 struct device *dev = dsi->dev;
204 int ret; 208 int ret;
209 u64 pixel_clock, total_bits;
210 u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
205 211
206 if (++dsi->refcount != 1) 212 if (++dsi->refcount != 1)
207 return 0; 213 return 0;
208 214
215 switch (dsi->format) {
216 case MIPI_DSI_FMT_RGB565:
217 bit_per_pixel = 16;
218 break;
219 case MIPI_DSI_FMT_RGB666_PACKED:
220 bit_per_pixel = 18;
221 break;
222 case MIPI_DSI_FMT_RGB666:
223 case MIPI_DSI_FMT_RGB888:
224 default:
225 bit_per_pixel = 24;
226 break;
227 }
228
209 /** 229 /**
210 * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio; 230 * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
211 * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000. 231 * htotal_time = htotal * byte_per_pixel / num_lanes
212 * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi. 232 * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
213 * we set mipi_ratio is 1.05. 233 * mipi_ratio = (htotal_time + overhead_time) / htotal_time
234 * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
214 */ 235 */
215 dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10); 236 pixel_clock = dsi->vm.pixelclock * 1000;
237 htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
238 dsi->vm.hsync_len;
239 htotal_bits = htotal * bit_per_pixel;
240
241 overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
242 T_HS_EXIT;
243 overhead_bits = overhead_cycles * dsi->lanes * 8;
244 total_bits = htotal_bits + overhead_bits;
245
246 dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
247 htotal * dsi->lanes);
216 248
217 ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000); 249 ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
218 if (ret < 0) { 250 if (ret < 0) {
219 dev_err(dev, "Failed to set data rate: %d\n", ret); 251 dev_err(dev, "Failed to set data rate: %d\n", ret);
220 goto err_refcount; 252 goto err_refcount;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 2fdcd04bc93f..4129b12521a6 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -34,6 +34,7 @@ struct radeon_atpx {
34 34
35static struct radeon_atpx_priv { 35static struct radeon_atpx_priv {
36 bool atpx_detected; 36 bool atpx_detected;
37 bool bridge_pm_usable;
37 /* handle for device - and atpx */ 38 /* handle for device - and atpx */
38 acpi_handle dhandle; 39 acpi_handle dhandle;
39 struct radeon_atpx atpx; 40 struct radeon_atpx atpx;
@@ -203,7 +204,11 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
203 atpx->is_hybrid = false; 204 atpx->is_hybrid = false;
204 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 205 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
205 printk("ATPX Hybrid Graphics\n"); 206 printk("ATPX Hybrid Graphics\n");
206 atpx->functions.power_cntl = false; 207 /*
208 * Disable legacy PM methods only when pcie port PM is usable,
209 * otherwise the device might fail to power off or power on.
210 */
211 atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable;
207 atpx->is_hybrid = true; 212 atpx->is_hybrid = true;
208 } 213 }
209 214
@@ -474,6 +479,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
474 */ 479 */
475static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) 480static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
476{ 481{
482 struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
477 acpi_handle dhandle, atpx_handle; 483 acpi_handle dhandle, atpx_handle;
478 acpi_status status; 484 acpi_status status;
479 485
@@ -487,6 +493,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
487 493
488 radeon_atpx_priv.dhandle = dhandle; 494 radeon_atpx_priv.dhandle = dhandle;
489 radeon_atpx_priv.atpx.handle = atpx_handle; 495 radeon_atpx_priv.atpx.handle = atpx_handle;
496 radeon_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3;
490 return true; 497 return true;
491} 498}
492 499
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 086d8a507157..60d30203a5fa 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -32,6 +32,11 @@
32#include <linux/usb/ch9.h> 32#include <linux/usb/ch9.h>
33#include "hid-ids.h" 33#include "hid-ids.h"
34 34
35#define CP2112_REPORT_MAX_LENGTH 64
36#define CP2112_GPIO_CONFIG_LENGTH 5
37#define CP2112_GPIO_GET_LENGTH 2
38#define CP2112_GPIO_SET_LENGTH 3
39
35enum { 40enum {
36 CP2112_GPIO_CONFIG = 0x02, 41 CP2112_GPIO_CONFIG = 0x02,
37 CP2112_GPIO_GET = 0x03, 42 CP2112_GPIO_GET = 0x03,
@@ -161,6 +166,8 @@ struct cp2112_device {
161 atomic_t read_avail; 166 atomic_t read_avail;
162 atomic_t xfer_avail; 167 atomic_t xfer_avail;
163 struct gpio_chip gc; 168 struct gpio_chip gc;
169 u8 *in_out_buffer;
170 spinlock_t lock;
164}; 171};
165 172
166static int gpio_push_pull = 0xFF; 173static int gpio_push_pull = 0xFF;
@@ -171,62 +178,86 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
171{ 178{
172 struct cp2112_device *dev = gpiochip_get_data(chip); 179 struct cp2112_device *dev = gpiochip_get_data(chip);
173 struct hid_device *hdev = dev->hdev; 180 struct hid_device *hdev = dev->hdev;
174 u8 buf[5]; 181 u8 *buf = dev->in_out_buffer;
182 unsigned long flags;
175 int ret; 183 int ret;
176 184
185 spin_lock_irqsave(&dev->lock, flags);
186
177 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 187 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
178 sizeof(buf), HID_FEATURE_REPORT, 188 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
179 HID_REQ_GET_REPORT); 189 HID_REQ_GET_REPORT);
180 if (ret != sizeof(buf)) { 190 if (ret != CP2112_GPIO_CONFIG_LENGTH) {
181 hid_err(hdev, "error requesting GPIO config: %d\n", ret); 191 hid_err(hdev, "error requesting GPIO config: %d\n", ret);
182 return ret; 192 goto exit;
183 } 193 }
184 194
185 buf[1] &= ~(1 << offset); 195 buf[1] &= ~(1 << offset);
186 buf[2] = gpio_push_pull; 196 buf[2] = gpio_push_pull;
187 197
188 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf), 198 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
189 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 199 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
200 HID_REQ_SET_REPORT);
190 if (ret < 0) { 201 if (ret < 0) {
191 hid_err(hdev, "error setting GPIO config: %d\n", ret); 202 hid_err(hdev, "error setting GPIO config: %d\n", ret);
192 return ret; 203 goto exit;
193 } 204 }
194 205
195 return 0; 206 ret = 0;
207
208exit:
209 spin_unlock_irqrestore(&dev->lock, flags);
210 return ret <= 0 ? ret : -EIO;
196} 211}
197 212
198static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 213static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
199{ 214{
200 struct cp2112_device *dev = gpiochip_get_data(chip); 215 struct cp2112_device *dev = gpiochip_get_data(chip);
201 struct hid_device *hdev = dev->hdev; 216 struct hid_device *hdev = dev->hdev;
202 u8 buf[3]; 217 u8 *buf = dev->in_out_buffer;
218 unsigned long flags;
203 int ret; 219 int ret;
204 220
221 spin_lock_irqsave(&dev->lock, flags);
222
205 buf[0] = CP2112_GPIO_SET; 223 buf[0] = CP2112_GPIO_SET;
206 buf[1] = value ? 0xff : 0; 224 buf[1] = value ? 0xff : 0;
207 buf[2] = 1 << offset; 225 buf[2] = 1 << offset;
208 226
209 ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf), 227 ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
210 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 228 CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
229 HID_REQ_SET_REPORT);
211 if (ret < 0) 230 if (ret < 0)
212 hid_err(hdev, "error setting GPIO values: %d\n", ret); 231 hid_err(hdev, "error setting GPIO values: %d\n", ret);
232
233 spin_unlock_irqrestore(&dev->lock, flags);
213} 234}
214 235
215static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset) 236static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
216{ 237{
217 struct cp2112_device *dev = gpiochip_get_data(chip); 238 struct cp2112_device *dev = gpiochip_get_data(chip);
218 struct hid_device *hdev = dev->hdev; 239 struct hid_device *hdev = dev->hdev;
219 u8 buf[2]; 240 u8 *buf = dev->in_out_buffer;
241 unsigned long flags;
220 int ret; 242 int ret;
221 243
222 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf), 244 spin_lock_irqsave(&dev->lock, flags);
223 HID_FEATURE_REPORT, HID_REQ_GET_REPORT); 245
224 if (ret != sizeof(buf)) { 246 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
247 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
248 HID_REQ_GET_REPORT);
249 if (ret != CP2112_GPIO_GET_LENGTH) {
225 hid_err(hdev, "error requesting GPIO values: %d\n", ret); 250 hid_err(hdev, "error requesting GPIO values: %d\n", ret);
226 return ret; 251 ret = ret < 0 ? ret : -EIO;
252 goto exit;
227 } 253 }
228 254
229 return (buf[1] >> offset) & 1; 255 ret = (buf[1] >> offset) & 1;
256
257exit:
258 spin_unlock_irqrestore(&dev->lock, flags);
259
260 return ret;
230} 261}
231 262
232static int cp2112_gpio_direction_output(struct gpio_chip *chip, 263static int cp2112_gpio_direction_output(struct gpio_chip *chip,
@@ -234,27 +265,33 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
234{ 265{
235 struct cp2112_device *dev = gpiochip_get_data(chip); 266 struct cp2112_device *dev = gpiochip_get_data(chip);
236 struct hid_device *hdev = dev->hdev; 267 struct hid_device *hdev = dev->hdev;
237 u8 buf[5]; 268 u8 *buf = dev->in_out_buffer;
269 unsigned long flags;
238 int ret; 270 int ret;
239 271
272 spin_lock_irqsave(&dev->lock, flags);
273
240 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 274 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
241 sizeof(buf), HID_FEATURE_REPORT, 275 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
242 HID_REQ_GET_REPORT); 276 HID_REQ_GET_REPORT);
243 if (ret != sizeof(buf)) { 277 if (ret != CP2112_GPIO_CONFIG_LENGTH) {
244 hid_err(hdev, "error requesting GPIO config: %d\n", ret); 278 hid_err(hdev, "error requesting GPIO config: %d\n", ret);
245 return ret; 279 goto fail;
246 } 280 }
247 281
248 buf[1] |= 1 << offset; 282 buf[1] |= 1 << offset;
249 buf[2] = gpio_push_pull; 283 buf[2] = gpio_push_pull;
250 284
251 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf), 285 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
252 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 286 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
287 HID_REQ_SET_REPORT);
253 if (ret < 0) { 288 if (ret < 0) {
254 hid_err(hdev, "error setting GPIO config: %d\n", ret); 289 hid_err(hdev, "error setting GPIO config: %d\n", ret);
255 return ret; 290 goto fail;
256 } 291 }
257 292
293 spin_unlock_irqrestore(&dev->lock, flags);
294
258 /* 295 /*
259 * Set gpio value when output direction is already set, 296 * Set gpio value when output direction is already set,
260 * as specified in AN495, Rev. 0.2, cpt. 4.4 297 * as specified in AN495, Rev. 0.2, cpt. 4.4
@@ -262,6 +299,10 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
262 cp2112_gpio_set(chip, offset, value); 299 cp2112_gpio_set(chip, offset, value);
263 300
264 return 0; 301 return 0;
302
303fail:
304 spin_unlock_irqrestore(&dev->lock, flags);
305 return ret < 0 ? ret : -EIO;
265} 306}
266 307
267static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, 308static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1007,6 +1048,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1007 struct cp2112_smbus_config_report config; 1048 struct cp2112_smbus_config_report config;
1008 int ret; 1049 int ret;
1009 1050
1051 dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
1052 if (!dev)
1053 return -ENOMEM;
1054
1055 dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH,
1056 GFP_KERNEL);
1057 if (!dev->in_out_buffer)
1058 return -ENOMEM;
1059
1060 spin_lock_init(&dev->lock);
1061
1010 ret = hid_parse(hdev); 1062 ret = hid_parse(hdev);
1011 if (ret) { 1063 if (ret) {
1012 hid_err(hdev, "parse failed\n"); 1064 hid_err(hdev, "parse failed\n");
@@ -1063,12 +1115,6 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1063 goto err_power_normal; 1115 goto err_power_normal;
1064 } 1116 }
1065 1117
1066 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1067 if (!dev) {
1068 ret = -ENOMEM;
1069 goto err_power_normal;
1070 }
1071
1072 hid_set_drvdata(hdev, (void *)dev); 1118 hid_set_drvdata(hdev, (void *)dev);
1073 dev->hdev = hdev; 1119 dev->hdev = hdev;
1074 dev->adap.owner = THIS_MODULE; 1120 dev->adap.owner = THIS_MODULE;
@@ -1087,7 +1133,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1087 1133
1088 if (ret) { 1134 if (ret) {
1089 hid_err(hdev, "error registering i2c adapter\n"); 1135 hid_err(hdev, "error registering i2c adapter\n");
1090 goto err_free_dev; 1136 goto err_power_normal;
1091 } 1137 }
1092 1138
1093 hid_dbg(hdev, "adapter registered\n"); 1139 hid_dbg(hdev, "adapter registered\n");
@@ -1123,8 +1169,6 @@ err_gpiochip_remove:
1123 gpiochip_remove(&dev->gc); 1169 gpiochip_remove(&dev->gc);
1124err_free_i2c: 1170err_free_i2c:
1125 i2c_del_adapter(&dev->adap); 1171 i2c_del_adapter(&dev->adap);
1126err_free_dev:
1127 kfree(dev);
1128err_power_normal: 1172err_power_normal:
1129 hid_hw_power(hdev, PM_HINT_NORMAL); 1173 hid_hw_power(hdev, PM_HINT_NORMAL);
1130err_hid_close: 1174err_hid_close:
@@ -1149,7 +1193,6 @@ static void cp2112_remove(struct hid_device *hdev)
1149 */ 1193 */
1150 hid_hw_close(hdev); 1194 hid_hw_close(hdev);
1151 hid_hw_stop(hdev); 1195 hid_hw_stop(hdev);
1152 kfree(dev);
1153} 1196}
1154 1197
1155static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report, 1198static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report,
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 76f644deb0a7..c5c5fbe9d605 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -756,11 +756,16 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
756 756
757 /* Setup wireless link with Logitech Wii wheel */ 757 /* Setup wireless link with Logitech Wii wheel */
758 if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { 758 if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
759 unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 759 const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
760 u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL);
760 761
761 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), 762 if (!buf) {
762 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 763 ret = -ENOMEM;
764 goto err_free;
765 }
763 766
767 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
768 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
764 if (ret >= 0) { 769 if (ret >= 0) {
765 /* insert a little delay of 10 jiffies ~ 40ms */ 770 /* insert a little delay of 10 jiffies ~ 40ms */
766 wait_queue_head_t wait; 771 wait_queue_head_t wait;
@@ -772,9 +777,10 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
772 buf[1] = 0xB2; 777 buf[1] = 0xB2;
773 get_random_bytes(&buf[2], 2); 778 get_random_bytes(&buf[2], 2);
774 779
775 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), 780 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
776 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 781 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
777 } 782 }
783 kfree(buf);
778 } 784 }
779 785
780 if (drv_data->quirks & LG_FF) 786 if (drv_data->quirks & LG_FF)
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index d6fa496d0ca2..20b40ad26325 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -493,7 +493,8 @@ static int magicmouse_input_configured(struct hid_device *hdev,
493static int magicmouse_probe(struct hid_device *hdev, 493static int magicmouse_probe(struct hid_device *hdev,
494 const struct hid_device_id *id) 494 const struct hid_device_id *id)
495{ 495{
496 __u8 feature[] = { 0xd7, 0x01 }; 496 const u8 feature[] = { 0xd7, 0x01 };
497 u8 *buf;
497 struct magicmouse_sc *msc; 498 struct magicmouse_sc *msc;
498 struct hid_report *report; 499 struct hid_report *report;
499 int ret; 500 int ret;
@@ -544,6 +545,12 @@ static int magicmouse_probe(struct hid_device *hdev,
544 } 545 }
545 report->size = 6; 546 report->size = 6;
546 547
548 buf = kmemdup(feature, sizeof(feature), GFP_KERNEL);
549 if (!buf) {
550 ret = -ENOMEM;
551 goto err_stop_hw;
552 }
553
547 /* 554 /*
548 * Some devices repond with 'invalid report id' when feature 555 * Some devices repond with 'invalid report id' when feature
549 * report switching it into multitouch mode is sent to it. 556 * report switching it into multitouch mode is sent to it.
@@ -552,8 +559,9 @@ static int magicmouse_probe(struct hid_device *hdev,
552 * but there seems to be no other way of switching the mode. 559 * but there seems to be no other way of switching the mode.
553 * Thus the super-ugly hacky success check below. 560 * Thus the super-ugly hacky success check below.
554 */ 561 */
555 ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature), 562 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature),
556 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 563 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
564 kfree(buf);
557 if (ret != -EIO && ret != sizeof(feature)) { 565 if (ret != -EIO && ret != sizeof(feature)) {
558 hid_err(hdev, "unable to request touch data (%d)\n", ret); 566 hid_err(hdev, "unable to request touch data (%d)\n", ret);
559 goto err_stop_hw; 567 goto err_stop_hw;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 9cd2ca34a6be..be89bcbf6a71 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -188,10 +188,16 @@ static int rmi_set_page(struct hid_device *hdev, u8 page)
188static int rmi_set_mode(struct hid_device *hdev, u8 mode) 188static int rmi_set_mode(struct hid_device *hdev, u8 mode)
189{ 189{
190 int ret; 190 int ret;
191 u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode}; 191 const u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
192 u8 *buf;
192 193
193 ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf, 194 buf = kmemdup(txbuf, sizeof(txbuf), GFP_KERNEL);
195 if (!buf)
196 return -ENOMEM;
197
198 ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, buf,
194 sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 199 sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
200 kfree(buf);
195 if (ret < 0) { 201 if (ret < 0) {
196 dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode, 202 dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode,
197 ret); 203 ret);
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index c5c3d6111729..60875625cbdf 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -212,6 +212,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
212 __s32 value; 212 __s32 value;
213 int ret = 0; 213 int ret = 0;
214 214
215 memset(buffer, 0, buffer_size);
215 mutex_lock(&data->mutex); 216 mutex_lock(&data->mutex);
216 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); 217 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
217 if (!report || (field_index >= report->maxfield)) { 218 if (!report || (field_index >= report->maxfield)) {
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 317ef63ee789..8d96a22647b3 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv)
281 int i; 281 int i;
282 tuner_dbg("%s called\n", __func__); 282 tuner_dbg("%s called\n", __func__);
283 283
284 /* free allocated f/w string */
285 if (priv->fname != firmware_name)
286 kfree(priv->fname);
287 priv->fname = NULL;
288
289 priv->state = XC2028_NO_FIRMWARE;
290 memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
291
284 if (!priv->firm) 292 if (!priv->firm)
285 return; 293 return;
286 294
@@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv)
291 299
292 priv->firm = NULL; 300 priv->firm = NULL;
293 priv->firm_size = 0; 301 priv->firm_size = 0;
294 priv->state = XC2028_NO_FIRMWARE;
295
296 memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
297} 302}
298 303
299static int load_all_firmwares(struct dvb_frontend *fe, 304static int load_all_firmwares(struct dvb_frontend *fe,
@@ -884,9 +889,8 @@ read_not_reliable:
884 return 0; 889 return 0;
885 890
886fail: 891fail:
887 priv->state = XC2028_NO_FIRMWARE; 892 free_firmware(priv);
888 893
889 memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
890 if (retry_count < 8) { 894 if (retry_count < 8) {
891 msleep(50); 895 msleep(50);
892 retry_count++; 896 retry_count++;
@@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
1332 mutex_lock(&xc2028_list_mutex); 1336 mutex_lock(&xc2028_list_mutex);
1333 1337
1334 /* only perform final cleanup if this is the last instance */ 1338 /* only perform final cleanup if this is the last instance */
1335 if (hybrid_tuner_report_instance_count(priv) == 1) { 1339 if (hybrid_tuner_report_instance_count(priv) == 1)
1336 free_firmware(priv); 1340 free_firmware(priv);
1337 kfree(priv->ctrl.fname);
1338 priv->ctrl.fname = NULL;
1339 }
1340 1341
1341 if (priv) 1342 if (priv)
1342 hybrid_tuner_release_state(priv); 1343 hybrid_tuner_release_state(priv);
@@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
1399 1400
1400 /* 1401 /*
1401 * Copy the config data. 1402 * Copy the config data.
1402 * For the firmware name, keep a local copy of the string,
1403 * in order to avoid troubles during device release.
1404 */ 1403 */
1405 kfree(priv->ctrl.fname);
1406 priv->ctrl.fname = NULL;
1407 memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); 1404 memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
1408 if (p->fname) {
1409 priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
1410 if (priv->ctrl.fname == NULL) {
1411 rc = -ENOMEM;
1412 goto unlock;
1413 }
1414 }
1415 1405
1416 /* 1406 /*
1417 * If firmware name changed, frees firmware. As free_firmware will 1407 * If firmware name changed, frees firmware. As free_firmware will
@@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
1426 1416
1427 if (priv->state == XC2028_NO_FIRMWARE) { 1417 if (priv->state == XC2028_NO_FIRMWARE) {
1428 if (!firmware_name[0]) 1418 if (!firmware_name[0])
1429 priv->fname = priv->ctrl.fname; 1419 priv->fname = kstrdup(p->fname, GFP_KERNEL);
1430 else 1420 else
1431 priv->fname = firmware_name; 1421 priv->fname = firmware_name;
1432 1422
1423 if (!priv->fname) {
1424 rc = -ENOMEM;
1425 goto unlock;
1426 }
1427
1433 rc = request_firmware_nowait(THIS_MODULE, 1, 1428 rc = request_firmware_nowait(THIS_MODULE, 1,
1434 priv->fname, 1429 priv->fname,
1435 priv->i2c_props.adap->dev.parent, 1430 priv->i2c_props.adap->dev.parent,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 50a674be6655..df478ae72e23 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1058,6 +1058,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1058 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1058 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1059 1059
1060 if (host->dma_ops->start(host, sg_len)) { 1060 if (host->dma_ops->start(host, sg_len)) {
1061 host->dma_ops->stop(host);
1061 /* We can't do DMA, try PIO for this one */ 1062 /* We can't do DMA, try PIO for this one */
1062 dev_dbg(host->dev, 1063 dev_dbg(host->dev,
1063 "%s: fall back to PIO mode for current transfer\n", 1064 "%s: fall back to PIO mode for current transfer\n",
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index fb71c866eacc..1bb11e4a9fe5 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -66,6 +66,20 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
66 return ret; 66 return ret;
67 } 67 }
68 } 68 }
69 /*
70 * The DAT[3:0] line signal levels and the CMD line signal level are
71 * not compatible with standard SDHC register. The line signal levels
72 * DAT[7:0] are at bits 31:24 and the command line signal level is at
73 * bit 23. All other bits are the same as in the standard SDHC
74 * register.
75 */
76 if (spec_reg == SDHCI_PRESENT_STATE) {
77 ret = value & 0x000fffff;
78 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
79 ret |= (value << 1) & SDHCI_CMD_LVL;
80 return ret;
81 }
82
69 ret = value; 83 ret = value;
70 return ret; 84 return ret;
71} 85}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 766df17fb7eb..2570455b219a 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -73,6 +73,7 @@
73#define SDHCI_DATA_LVL_MASK 0x00F00000 73#define SDHCI_DATA_LVL_MASK 0x00F00000
74#define SDHCI_DATA_LVL_SHIFT 20 74#define SDHCI_DATA_LVL_SHIFT 20
75#define SDHCI_DATA_0_LVL_MASK 0x00100000 75#define SDHCI_DATA_0_LVL_MASK 0x00100000
76#define SDHCI_CMD_LVL 0x01000000
76 77
77#define SDHCI_HOST_CONTROL 0x28 78#define SDHCI_HOST_CONTROL 0x28
78#define SDHCI_CTRL_LED 0x01 79#define SDHCI_CTRL_LED 0x01
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 7717b19dc806..947adda3397d 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -962,9 +962,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
962 962
963 vl->members |= BIT(port) | BIT(cpu_port); 963 vl->members |= BIT(port) | BIT(cpu_port);
964 if (untagged) 964 if (untagged)
965 vl->untag |= BIT(port) | BIT(cpu_port); 965 vl->untag |= BIT(port);
966 else 966 else
967 vl->untag &= ~(BIT(port) | BIT(cpu_port)); 967 vl->untag &= ~BIT(port);
968 vl->untag &= ~BIT(cpu_port);
968 969
969 b53_set_vlan_entry(dev, vid, vl); 970 b53_set_vlan_entry(dev, vid, vl);
970 b53_fast_age_vlan(dev, vid); 971 b53_fast_age_vlan(dev, vid);
@@ -973,8 +974,6 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
973 if (pvid) { 974 if (pvid) {
974 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 975 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
975 vlan->vid_end); 976 vlan->vid_end);
976 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
977 vlan->vid_end);
978 b53_fast_age_vlan(dev, vid); 977 b53_fast_age_vlan(dev, vid);
979 } 978 }
980} 979}
@@ -984,7 +983,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
984{ 983{
985 struct b53_device *dev = ds->priv; 984 struct b53_device *dev = ds->priv;
986 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 985 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
987 unsigned int cpu_port = dev->cpu_port;
988 struct b53_vlan *vl; 986 struct b53_vlan *vl;
989 u16 vid; 987 u16 vid;
990 u16 pvid; 988 u16 pvid;
@@ -997,8 +995,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
997 b53_get_vlan_entry(dev, vid, vl); 995 b53_get_vlan_entry(dev, vid, vl);
998 996
999 vl->members &= ~BIT(port); 997 vl->members &= ~BIT(port);
1000 if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
1001 vl->members = 0;
1002 998
1003 if (pvid == vid) { 999 if (pvid == vid) {
1004 if (is5325(dev) || is5365(dev)) 1000 if (is5325(dev) || is5365(dev))
@@ -1007,18 +1003,14 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
1007 pvid = 0; 1003 pvid = 0;
1008 } 1004 }
1009 1005
1010 if (untagged) { 1006 if (untagged)
1011 vl->untag &= ~(BIT(port)); 1007 vl->untag &= ~(BIT(port));
1012 if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
1013 vl->untag = 0;
1014 }
1015 1008
1016 b53_set_vlan_entry(dev, vid, vl); 1009 b53_set_vlan_entry(dev, vid, vl);
1017 b53_fast_age_vlan(dev, vid); 1010 b53_fast_age_vlan(dev, vid);
1018 } 1011 }
1019 1012
1020 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1013 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1021 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
1022 b53_fast_age_vlan(dev, pvid); 1014 b53_fast_age_vlan(dev, pvid);
1023 1015
1024 return 0; 1016 return 0;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b0da9693f28a..be865b4dada2 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -460,7 +460,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
460 if (ndev->flags & IFF_ALLMULTI) { 460 if (ndev->flags & IFF_ALLMULTI) {
461 arc_reg_set(priv, R_LAFL, ~0); 461 arc_reg_set(priv, R_LAFL, ~0);
462 arc_reg_set(priv, R_LAFH, ~0); 462 arc_reg_set(priv, R_LAFH, ~0);
463 } else { 463 } else if (ndev->flags & IFF_MULTICAST) {
464 struct netdev_hw_addr *ha; 464 struct netdev_hw_addr *ha;
465 unsigned int filter[2] = { 0, 0 }; 465 unsigned int filter[2] = { 0, 0 };
466 int bit; 466 int bit;
@@ -472,6 +472,9 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
472 472
473 arc_reg_set(priv, R_LAFL, filter[0]); 473 arc_reg_set(priv, R_LAFL, filter[0]);
474 arc_reg_set(priv, R_LAFH, filter[1]); 474 arc_reg_set(priv, R_LAFH, filter[1]);
475 } else {
476 arc_reg_set(priv, R_LAFL, 0);
477 arc_reg_set(priv, R_LAFH, 0);
475 } 478 }
476 } 479 }
477} 480}
@@ -764,8 +767,6 @@ int arc_emac_probe(struct net_device *ndev, int interface)
764 ndev->netdev_ops = &arc_emac_netdev_ops; 767 ndev->netdev_ops = &arc_emac_netdev_ops;
765 ndev->ethtool_ops = &arc_emac_ethtool_ops; 768 ndev->ethtool_ops = &arc_emac_ethtool_ops;
766 ndev->watchdog_timeo = TX_TIMEOUT; 769 ndev->watchdog_timeo = TX_TIMEOUT;
767 /* FIXME :: no multicast support yet */
768 ndev->flags &= ~IFF_MULTICAST;
769 770
770 priv = netdev_priv(ndev); 771 priv = netdev_priv(ndev);
771 priv->dev = dev; 772 priv->dev = dev;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c6909660e097..e18635b2a002 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4934,6 +4934,10 @@ static void bnxt_del_napi(struct bnxt *bp)
4934 napi_hash_del(&bnapi->napi); 4934 napi_hash_del(&bnapi->napi);
4935 netif_napi_del(&bnapi->napi); 4935 netif_napi_del(&bnapi->napi);
4936 } 4936 }
4937 /* We called napi_hash_del() before netif_napi_del(), we need
4938 * to respect an RCU grace period before freeing napi structures.
4939 */
4940 synchronize_net();
4937} 4941}
4938 4942
4939static void bnxt_init_napi(struct bnxt *bp) 4943static void bnxt_init_napi(struct bnxt *bp)
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index b32444a3ed79..533653bd7aec 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2673,6 +2673,12 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2673 lp->skb_length = skb->len; 2673 lp->skb_length = skb->len;
2674 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, 2674 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
2675 DMA_TO_DEVICE); 2675 DMA_TO_DEVICE);
2676 if (dma_mapping_error(NULL, lp->skb_physaddr)) {
2677 dev_kfree_skb_any(skb);
2678 dev->stats.tx_dropped++;
2679 netdev_err(dev, "%s: DMA mapping error\n", __func__);
2680 return NETDEV_TX_OK;
2681 }
2676 2682
2677 /* Set address of the data in the Transmit Address register */ 2683 /* Set address of the data in the Transmit Address register */
2678 macb_writel(lp, TAR, lp->skb_physaddr); 2684 macb_writel(lp, TAR, lp->skb_physaddr);
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 30426109711c..86bd93ce2ea3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -47,7 +47,7 @@
47 47
48/* Min/Max packet size */ 48/* Min/Max packet size */
49#define NIC_HW_MIN_FRS 64 49#define NIC_HW_MIN_FRS 64
50#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ 50#define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
51 51
52/* Max pkinds */ 52/* Max pkinds */
53#define NIC_MAX_PKIND 16 53#define NIC_MAX_PKIND 16
@@ -178,11 +178,11 @@ enum tx_stats_reg_offset {
178 178
179struct nicvf_hw_stats { 179struct nicvf_hw_stats {
180 u64 rx_bytes; 180 u64 rx_bytes;
181 u64 rx_frames;
181 u64 rx_ucast_frames; 182 u64 rx_ucast_frames;
182 u64 rx_bcast_frames; 183 u64 rx_bcast_frames;
183 u64 rx_mcast_frames; 184 u64 rx_mcast_frames;
184 u64 rx_fcs_errors; 185 u64 rx_drops;
185 u64 rx_l2_errors;
186 u64 rx_drop_red; 186 u64 rx_drop_red;
187 u64 rx_drop_red_bytes; 187 u64 rx_drop_red_bytes;
188 u64 rx_drop_overrun; 188 u64 rx_drop_overrun;
@@ -191,6 +191,19 @@ struct nicvf_hw_stats {
191 u64 rx_drop_mcast; 191 u64 rx_drop_mcast;
192 u64 rx_drop_l3_bcast; 192 u64 rx_drop_l3_bcast;
193 u64 rx_drop_l3_mcast; 193 u64 rx_drop_l3_mcast;
194 u64 rx_fcs_errors;
195 u64 rx_l2_errors;
196
197 u64 tx_bytes;
198 u64 tx_frames;
199 u64 tx_ucast_frames;
200 u64 tx_bcast_frames;
201 u64 tx_mcast_frames;
202 u64 tx_drops;
203};
204
205struct nicvf_drv_stats {
206 /* CQE Rx errs */
194 u64 rx_bgx_truncated_pkts; 207 u64 rx_bgx_truncated_pkts;
195 u64 rx_jabber_errs; 208 u64 rx_jabber_errs;
196 u64 rx_fcs_errs; 209 u64 rx_fcs_errs;
@@ -216,34 +229,30 @@ struct nicvf_hw_stats {
216 u64 rx_l4_pclp; 229 u64 rx_l4_pclp;
217 u64 rx_truncated_pkts; 230 u64 rx_truncated_pkts;
218 231
219 u64 tx_bytes_ok; 232 /* CQE Tx errs */
220 u64 tx_ucast_frames_ok; 233 u64 tx_desc_fault;
221 u64 tx_bcast_frames_ok; 234 u64 tx_hdr_cons_err;
222 u64 tx_mcast_frames_ok; 235 u64 tx_subdesc_err;
223 u64 tx_drops; 236 u64 tx_max_size_exceeded;
224}; 237 u64 tx_imm_size_oflow;
225 238 u64 tx_data_seq_err;
226struct nicvf_drv_stats { 239 u64 tx_mem_seq_err;
227 /* Rx */ 240 u64 tx_lock_viol;
228 u64 rx_frames_ok; 241 u64 tx_data_fault;
229 u64 rx_frames_64; 242 u64 tx_tstmp_conflict;
230 u64 rx_frames_127; 243 u64 tx_tstmp_timeout;
231 u64 rx_frames_255; 244 u64 tx_mem_fault;
232 u64 rx_frames_511; 245 u64 tx_csum_overlap;
233 u64 rx_frames_1023; 246 u64 tx_csum_overflow;
234 u64 rx_frames_1518; 247
235 u64 rx_frames_jumbo; 248 /* driver debug stats */
236 u64 rx_drops;
237
238 u64 rcv_buffer_alloc_failures; 249 u64 rcv_buffer_alloc_failures;
239
240 /* Tx */
241 u64 tx_frames_ok;
242 u64 tx_drops;
243 u64 tx_tso; 250 u64 tx_tso;
244 u64 tx_timeout; 251 u64 tx_timeout;
245 u64 txq_stop; 252 u64 txq_stop;
246 u64 txq_wake; 253 u64 txq_wake;
254
255 struct u64_stats_sync syncp;
247}; 256};
248 257
249struct nicvf { 258struct nicvf {
@@ -282,7 +291,6 @@ struct nicvf {
282 291
283 u8 node; 292 u8 node;
284 u8 cpi_alg; 293 u8 cpi_alg;
285 u16 mtu;
286 bool link_up; 294 bool link_up;
287 u8 duplex; 295 u8 duplex;
288 u32 speed; 296 u32 speed;
@@ -298,7 +306,7 @@ struct nicvf {
298 306
299 /* Stats */ 307 /* Stats */
300 struct nicvf_hw_stats hw_stats; 308 struct nicvf_hw_stats hw_stats;
301 struct nicvf_drv_stats drv_stats; 309 struct nicvf_drv_stats __percpu *drv_stats;
302 struct bgx_stats bgx_stats; 310 struct bgx_stats bgx_stats;
303 311
304 /* MSI-X */ 312 /* MSI-X */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 2bbf4cbf08b2..6677b96e1f3f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -11,6 +11,7 @@
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <linux/of.h> 13#include <linux/of.h>
14#include <linux/if_vlan.h>
14 15
15#include "nic_reg.h" 16#include "nic_reg.h"
16#include "nic.h" 17#include "nic.h"
@@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
260/* Update hardware min/max frame size */ 261/* Update hardware min/max frame size */
261static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 262static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
262{ 263{
263 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 264 int bgx, lmac, lmac_cnt;
264 dev_err(&nic->pdev->dev, 265 u64 lmac_credits;
265 "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", 266
266 vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 267 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
267 return 1; 268 return 1;
268 }
269 new_frs += ETH_HLEN;
270 if (new_frs <= nic->pkind.maxlen)
271 return 0;
272 269
273 nic->pkind.maxlen = new_frs; 270 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
274 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); 271 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
272 lmac += bgx * MAX_LMAC_PER_BGX;
273
274 new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
275
276 /* Update corresponding LMAC credits */
277 lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
278 lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
279 lmac_credits &= ~(0xFFFFFULL << 12);
280 lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
281 nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
282
283 /* Enforce MTU in HW
284 * This config is supported only from 88xx pass 2.0 onwards.
285 */
286 if (!pass1_silicon(nic->pdev))
287 nic_reg_write(nic,
288 NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
275 return 0; 289 return 0;
276} 290}
277 291
@@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic)
464 478
465 /* PKIND configuration */ 479 /* PKIND configuration */
466 nic->pkind.minlen = 0; 480 nic->pkind.minlen = 0;
467 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; 481 nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
468 nic->pkind.lenerr_en = 1; 482 nic->pkind.lenerr_en = 1;
469 nic->pkind.rx_hdr = 0; 483 nic->pkind.rx_hdr = 0;
470 nic->pkind.hdr_sl = 0; 484 nic->pkind.hdr_sl = 0;
@@ -837,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic,
837 nic_reg_write(nic, reg_addr, 0); 851 nic_reg_write(nic, reg_addr, 0);
838 } 852 }
839 } 853 }
854
840 return 0; 855 return 0;
841} 856}
842 857
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index edf779f5a227..80d46337cf29 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -106,6 +106,7 @@
106#define NIC_PF_MPI_0_2047_CFG (0x210000) 106#define NIC_PF_MPI_0_2047_CFG (0x210000)
107#define NIC_PF_RSSI_0_4097_RQ (0x220000) 107#define NIC_PF_RSSI_0_4097_RQ (0x220000)
108#define NIC_PF_LMAC_0_7_CFG (0x240000) 108#define NIC_PF_LMAC_0_7_CFG (0x240000)
109#define NIC_PF_LMAC_0_7_CFG2 (0x240100)
109#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) 110#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
110#define NIC_PF_LMAC_0_7_CREDIT (0x244000) 111#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
111#define NIC_PF_CHAN_0_255_TX_CFG (0x400000) 112#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index ad4fddb55421..432bf6be57cb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -36,11 +36,11 @@ struct nicvf_stat {
36 36
37static const struct nicvf_stat nicvf_hw_stats[] = { 37static const struct nicvf_stat nicvf_hw_stats[] = {
38 NICVF_HW_STAT(rx_bytes), 38 NICVF_HW_STAT(rx_bytes),
39 NICVF_HW_STAT(rx_frames),
39 NICVF_HW_STAT(rx_ucast_frames), 40 NICVF_HW_STAT(rx_ucast_frames),
40 NICVF_HW_STAT(rx_bcast_frames), 41 NICVF_HW_STAT(rx_bcast_frames),
41 NICVF_HW_STAT(rx_mcast_frames), 42 NICVF_HW_STAT(rx_mcast_frames),
42 NICVF_HW_STAT(rx_fcs_errors), 43 NICVF_HW_STAT(rx_drops),
43 NICVF_HW_STAT(rx_l2_errors),
44 NICVF_HW_STAT(rx_drop_red), 44 NICVF_HW_STAT(rx_drop_red),
45 NICVF_HW_STAT(rx_drop_red_bytes), 45 NICVF_HW_STAT(rx_drop_red_bytes),
46 NICVF_HW_STAT(rx_drop_overrun), 46 NICVF_HW_STAT(rx_drop_overrun),
@@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
49 NICVF_HW_STAT(rx_drop_mcast), 49 NICVF_HW_STAT(rx_drop_mcast),
50 NICVF_HW_STAT(rx_drop_l3_bcast), 50 NICVF_HW_STAT(rx_drop_l3_bcast),
51 NICVF_HW_STAT(rx_drop_l3_mcast), 51 NICVF_HW_STAT(rx_drop_l3_mcast),
52 NICVF_HW_STAT(rx_bgx_truncated_pkts), 52 NICVF_HW_STAT(rx_fcs_errors),
53 NICVF_HW_STAT(rx_jabber_errs), 53 NICVF_HW_STAT(rx_l2_errors),
54 NICVF_HW_STAT(rx_fcs_errs), 54 NICVF_HW_STAT(tx_bytes),
55 NICVF_HW_STAT(rx_bgx_errs), 55 NICVF_HW_STAT(tx_frames),
56 NICVF_HW_STAT(rx_prel2_errs), 56 NICVF_HW_STAT(tx_ucast_frames),
57 NICVF_HW_STAT(rx_l2_hdr_malformed), 57 NICVF_HW_STAT(tx_bcast_frames),
58 NICVF_HW_STAT(rx_oversize), 58 NICVF_HW_STAT(tx_mcast_frames),
59 NICVF_HW_STAT(rx_undersize), 59 NICVF_HW_STAT(tx_drops),
60 NICVF_HW_STAT(rx_l2_len_mismatch),
61 NICVF_HW_STAT(rx_l2_pclp),
62 NICVF_HW_STAT(rx_ip_ver_errs),
63 NICVF_HW_STAT(rx_ip_csum_errs),
64 NICVF_HW_STAT(rx_ip_hdr_malformed),
65 NICVF_HW_STAT(rx_ip_payload_malformed),
66 NICVF_HW_STAT(rx_ip_ttl_errs),
67 NICVF_HW_STAT(rx_l3_pclp),
68 NICVF_HW_STAT(rx_l4_malformed),
69 NICVF_HW_STAT(rx_l4_csum_errs),
70 NICVF_HW_STAT(rx_udp_len_errs),
71 NICVF_HW_STAT(rx_l4_port_errs),
72 NICVF_HW_STAT(rx_tcp_flag_errs),
73 NICVF_HW_STAT(rx_tcp_offset_errs),
74 NICVF_HW_STAT(rx_l4_pclp),
75 NICVF_HW_STAT(rx_truncated_pkts),
76 NICVF_HW_STAT(tx_bytes_ok),
77 NICVF_HW_STAT(tx_ucast_frames_ok),
78 NICVF_HW_STAT(tx_bcast_frames_ok),
79 NICVF_HW_STAT(tx_mcast_frames_ok),
80}; 60};
81 61
82static const struct nicvf_stat nicvf_drv_stats[] = { 62static const struct nicvf_stat nicvf_drv_stats[] = {
83 NICVF_DRV_STAT(rx_frames_ok), 63 NICVF_DRV_STAT(rx_bgx_truncated_pkts),
84 NICVF_DRV_STAT(rx_frames_64), 64 NICVF_DRV_STAT(rx_jabber_errs),
85 NICVF_DRV_STAT(rx_frames_127), 65 NICVF_DRV_STAT(rx_fcs_errs),
86 NICVF_DRV_STAT(rx_frames_255), 66 NICVF_DRV_STAT(rx_bgx_errs),
87 NICVF_DRV_STAT(rx_frames_511), 67 NICVF_DRV_STAT(rx_prel2_errs),
88 NICVF_DRV_STAT(rx_frames_1023), 68 NICVF_DRV_STAT(rx_l2_hdr_malformed),
89 NICVF_DRV_STAT(rx_frames_1518), 69 NICVF_DRV_STAT(rx_oversize),
90 NICVF_DRV_STAT(rx_frames_jumbo), 70 NICVF_DRV_STAT(rx_undersize),
91 NICVF_DRV_STAT(rx_drops), 71 NICVF_DRV_STAT(rx_l2_len_mismatch),
72 NICVF_DRV_STAT(rx_l2_pclp),
73 NICVF_DRV_STAT(rx_ip_ver_errs),
74 NICVF_DRV_STAT(rx_ip_csum_errs),
75 NICVF_DRV_STAT(rx_ip_hdr_malformed),
76 NICVF_DRV_STAT(rx_ip_payload_malformed),
77 NICVF_DRV_STAT(rx_ip_ttl_errs),
78 NICVF_DRV_STAT(rx_l3_pclp),
79 NICVF_DRV_STAT(rx_l4_malformed),
80 NICVF_DRV_STAT(rx_l4_csum_errs),
81 NICVF_DRV_STAT(rx_udp_len_errs),
82 NICVF_DRV_STAT(rx_l4_port_errs),
83 NICVF_DRV_STAT(rx_tcp_flag_errs),
84 NICVF_DRV_STAT(rx_tcp_offset_errs),
85 NICVF_DRV_STAT(rx_l4_pclp),
86 NICVF_DRV_STAT(rx_truncated_pkts),
87
88 NICVF_DRV_STAT(tx_desc_fault),
89 NICVF_DRV_STAT(tx_hdr_cons_err),
90 NICVF_DRV_STAT(tx_subdesc_err),
91 NICVF_DRV_STAT(tx_max_size_exceeded),
92 NICVF_DRV_STAT(tx_imm_size_oflow),
93 NICVF_DRV_STAT(tx_data_seq_err),
94 NICVF_DRV_STAT(tx_mem_seq_err),
95 NICVF_DRV_STAT(tx_lock_viol),
96 NICVF_DRV_STAT(tx_data_fault),
97 NICVF_DRV_STAT(tx_tstmp_conflict),
98 NICVF_DRV_STAT(tx_tstmp_timeout),
99 NICVF_DRV_STAT(tx_mem_fault),
100 NICVF_DRV_STAT(tx_csum_overlap),
101 NICVF_DRV_STAT(tx_csum_overflow),
102
92 NICVF_DRV_STAT(rcv_buffer_alloc_failures), 103 NICVF_DRV_STAT(rcv_buffer_alloc_failures),
93 NICVF_DRV_STAT(tx_frames_ok),
94 NICVF_DRV_STAT(tx_tso), 104 NICVF_DRV_STAT(tx_tso),
95 NICVF_DRV_STAT(tx_drops),
96 NICVF_DRV_STAT(tx_timeout), 105 NICVF_DRV_STAT(tx_timeout),
97 NICVF_DRV_STAT(txq_stop), 106 NICVF_DRV_STAT(txq_stop),
98 NICVF_DRV_STAT(txq_wake), 107 NICVF_DRV_STAT(txq_wake),
@@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
278 struct ethtool_stats *stats, u64 *data) 287 struct ethtool_stats *stats, u64 *data)
279{ 288{
280 struct nicvf *nic = netdev_priv(netdev); 289 struct nicvf *nic = netdev_priv(netdev);
281 int stat; 290 int stat, tmp_stats;
282 int sqs; 291 int sqs, cpu;
283 292
284 nicvf_update_stats(nic); 293 nicvf_update_stats(nic);
285 294
@@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
289 for (stat = 0; stat < nicvf_n_hw_stats; stat++) 298 for (stat = 0; stat < nicvf_n_hw_stats; stat++)
290 *(data++) = ((u64 *)&nic->hw_stats) 299 *(data++) = ((u64 *)&nic->hw_stats)
291 [nicvf_hw_stats[stat].index]; 300 [nicvf_hw_stats[stat].index];
292 for (stat = 0; stat < nicvf_n_drv_stats; stat++) 301 for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
293 *(data++) = ((u64 *)&nic->drv_stats) 302 tmp_stats = 0;
294 [nicvf_drv_stats[stat].index]; 303 for_each_possible_cpu(cpu)
304 tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
305 [nicvf_drv_stats[stat].index];
306 *(data++) = tmp_stats;
307 }
295 308
296 nicvf_get_qset_stats(nic, stats, &data); 309 nicvf_get_qset_stats(nic, stats, &data);
297 310
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 45a13f718863..8a37012c9c89 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
69 return qidx; 69 return qidx;
70} 70}
71 71
72static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
73 struct sk_buff *skb)
74{
75 if (skb->len <= 64)
76 nic->drv_stats.rx_frames_64++;
77 else if (skb->len <= 127)
78 nic->drv_stats.rx_frames_127++;
79 else if (skb->len <= 255)
80 nic->drv_stats.rx_frames_255++;
81 else if (skb->len <= 511)
82 nic->drv_stats.rx_frames_511++;
83 else if (skb->len <= 1023)
84 nic->drv_stats.rx_frames_1023++;
85 else if (skb->len <= 1518)
86 nic->drv_stats.rx_frames_1518++;
87 else
88 nic->drv_stats.rx_frames_jumbo++;
89}
90
91/* The Cavium ThunderX network controller can *only* be found in SoCs 72/* The Cavium ThunderX network controller can *only* be found in SoCs
92 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
93 * registers on this platform are implicitly strongly ordered with respect 74 * registers on this platform are implicitly strongly ordered with respect
@@ -492,9 +473,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev,
492static int nicvf_init_resources(struct nicvf *nic) 473static int nicvf_init_resources(struct nicvf *nic)
493{ 474{
494 int err; 475 int err;
495 union nic_mbx mbx = {};
496
497 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
498 476
499 /* Enable Qset */ 477 /* Enable Qset */
500 nicvf_qset_config(nic, true); 478 nicvf_qset_config(nic, true);
@@ -507,14 +485,10 @@ static int nicvf_init_resources(struct nicvf *nic)
507 return err; 485 return err;
508 } 486 }
509 487
510 /* Send VF config done msg to PF */
511 nicvf_write_to_mbx(nic, &mbx);
512
513 return 0; 488 return 0;
514} 489}
515 490
516static void nicvf_snd_pkt_handler(struct net_device *netdev, 491static void nicvf_snd_pkt_handler(struct net_device *netdev,
517 struct cmp_queue *cq,
518 struct cqe_send_t *cqe_tx, 492 struct cqe_send_t *cqe_tx,
519 int cqe_type, int budget, 493 int cqe_type, int budget,
520 unsigned int *tx_pkts, unsigned int *tx_bytes) 494 unsigned int *tx_pkts, unsigned int *tx_bytes)
@@ -536,7 +510,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
536 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 510 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
537 cqe_tx->sqe_ptr, hdr->subdesc_cnt); 511 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
538 512
539 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 513 nicvf_check_cqe_tx_errs(nic, cqe_tx);
540 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; 514 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
541 if (skb) { 515 if (skb) {
542 /* Check for dummy descriptor used for HW TSO offload on 88xx */ 516 /* Check for dummy descriptor used for HW TSO offload on 88xx */
@@ -630,8 +604,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
630 return; 604 return;
631 } 605 }
632 606
633 nicvf_set_rx_frame_cnt(nic, skb);
634
635 nicvf_set_rxhash(netdev, cqe_rx, skb); 607 nicvf_set_rxhash(netdev, cqe_rx, skb);
636 608
637 skb_record_rx_queue(skb, rq_idx); 609 skb_record_rx_queue(skb, rq_idx);
@@ -703,7 +675,7 @@ loop:
703 work_done++; 675 work_done++;
704 break; 676 break;
705 case CQE_TYPE_SEND: 677 case CQE_TYPE_SEND:
706 nicvf_snd_pkt_handler(netdev, cq, 678 nicvf_snd_pkt_handler(netdev,
707 (void *)cq_desc, CQE_TYPE_SEND, 679 (void *)cq_desc, CQE_TYPE_SEND,
708 budget, &tx_pkts, &tx_bytes); 680 budget, &tx_pkts, &tx_bytes);
709 tx_done++; 681 tx_done++;
@@ -740,7 +712,7 @@ done:
740 nic = nic->pnicvf; 712 nic = nic->pnicvf;
741 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { 713 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
742 netif_tx_start_queue(txq); 714 netif_tx_start_queue(txq);
743 nic->drv_stats.txq_wake++; 715 this_cpu_inc(nic->drv_stats->txq_wake);
744 if (netif_msg_tx_err(nic)) 716 if (netif_msg_tx_err(nic))
745 netdev_warn(netdev, 717 netdev_warn(netdev,
746 "%s: Transmit queue wakeup SQ%d\n", 718 "%s: Transmit queue wakeup SQ%d\n",
@@ -1084,7 +1056,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1084 1056
1085 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { 1057 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
1086 netif_tx_stop_queue(txq); 1058 netif_tx_stop_queue(txq);
1087 nic->drv_stats.txq_stop++; 1059 this_cpu_inc(nic->drv_stats->txq_stop);
1088 if (netif_msg_tx_err(nic)) 1060 if (netif_msg_tx_err(nic))
1089 netdev_warn(netdev, 1061 netdev_warn(netdev,
1090 "%s: Transmit ring full, stopping SQ%d\n", 1062 "%s: Transmit ring full, stopping SQ%d\n",
@@ -1189,14 +1161,24 @@ int nicvf_stop(struct net_device *netdev)
1189 return 0; 1161 return 0;
1190} 1162}
1191 1163
1164static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1165{
1166 union nic_mbx mbx = {};
1167
1168 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1169 mbx.frs.max_frs = mtu;
1170 mbx.frs.vf_id = nic->vf_id;
1171
1172 return nicvf_send_msg_to_pf(nic, &mbx);
1173}
1174
1192int nicvf_open(struct net_device *netdev) 1175int nicvf_open(struct net_device *netdev)
1193{ 1176{
1194 int err, qidx; 1177 int cpu, err, qidx;
1195 struct nicvf *nic = netdev_priv(netdev); 1178 struct nicvf *nic = netdev_priv(netdev);
1196 struct queue_set *qs = nic->qs; 1179 struct queue_set *qs = nic->qs;
1197 struct nicvf_cq_poll *cq_poll = NULL; 1180 struct nicvf_cq_poll *cq_poll = NULL;
1198 1181 union nic_mbx mbx = {};
1199 nic->mtu = netdev->mtu;
1200 1182
1201 netif_carrier_off(netdev); 1183 netif_carrier_off(netdev);
1202 1184
@@ -1248,9 +1230,17 @@ int nicvf_open(struct net_device *netdev)
1248 if (nic->sqs_mode) 1230 if (nic->sqs_mode)
1249 nicvf_get_primary_vf_struct(nic); 1231 nicvf_get_primary_vf_struct(nic);
1250 1232
1251 /* Configure receive side scaling */ 1233 /* Configure receive side scaling and MTU */
1252 if (!nic->sqs_mode) 1234 if (!nic->sqs_mode) {
1253 nicvf_rss_init(nic); 1235 nicvf_rss_init(nic);
1236 if (nicvf_update_hw_max_frs(nic, netdev->mtu))
1237 goto cleanup;
1238
1239 /* Clear percpu stats */
1240 for_each_possible_cpu(cpu)
1241 memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1242 sizeof(struct nicvf_drv_stats));
1243 }
1254 1244
1255 err = nicvf_register_interrupts(nic); 1245 err = nicvf_register_interrupts(nic);
1256 if (err) 1246 if (err)
@@ -1276,8 +1266,9 @@ int nicvf_open(struct net_device *netdev)
1276 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1266 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1277 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1267 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1278 1268
1279 nic->drv_stats.txq_stop = 0; 1269 /* Send VF config done msg to PF */
1280 nic->drv_stats.txq_wake = 0; 1270 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1271 nicvf_write_to_mbx(nic, &mbx);
1281 1272
1282 return 0; 1273 return 0;
1283cleanup: 1274cleanup:
@@ -1297,17 +1288,6 @@ napi_del:
1297 return err; 1288 return err;
1298} 1289}
1299 1290
1300static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1301{
1302 union nic_mbx mbx = {};
1303
1304 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1305 mbx.frs.max_frs = mtu;
1306 mbx.frs.vf_id = nic->vf_id;
1307
1308 return nicvf_send_msg_to_pf(nic, &mbx);
1309}
1310
1311static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) 1291static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1312{ 1292{
1313 struct nicvf *nic = netdev_priv(netdev); 1293 struct nicvf *nic = netdev_priv(netdev);
@@ -1318,10 +1298,13 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1318 if (new_mtu < NIC_HW_MIN_FRS) 1298 if (new_mtu < NIC_HW_MIN_FRS)
1319 return -EINVAL; 1299 return -EINVAL;
1320 1300
1301 netdev->mtu = new_mtu;
1302
1303 if (!netif_running(netdev))
1304 return 0;
1305
1321 if (nicvf_update_hw_max_frs(nic, new_mtu)) 1306 if (nicvf_update_hw_max_frs(nic, new_mtu))
1322 return -EINVAL; 1307 return -EINVAL;
1323 netdev->mtu = new_mtu;
1324 nic->mtu = new_mtu;
1325 1308
1326 return 0; 1309 return 0;
1327} 1310}
@@ -1379,9 +1362,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
1379 1362
1380void nicvf_update_stats(struct nicvf *nic) 1363void nicvf_update_stats(struct nicvf *nic)
1381{ 1364{
1382 int qidx; 1365 int qidx, cpu;
1366 u64 tmp_stats = 0;
1383 struct nicvf_hw_stats *stats = &nic->hw_stats; 1367 struct nicvf_hw_stats *stats = &nic->hw_stats;
1384 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1368 struct nicvf_drv_stats *drv_stats;
1385 struct queue_set *qs = nic->qs; 1369 struct queue_set *qs = nic->qs;
1386 1370
1387#define GET_RX_STATS(reg) \ 1371#define GET_RX_STATS(reg) \
@@ -1404,21 +1388,33 @@ void nicvf_update_stats(struct nicvf *nic)
1404 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); 1388 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1405 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); 1389 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1406 1390
1407 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); 1391 stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1408 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); 1392 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1409 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); 1393 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1410 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); 1394 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
1411 stats->tx_drops = GET_TX_STATS(TX_DROP); 1395 stats->tx_drops = GET_TX_STATS(TX_DROP);
1412 1396
1413 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1397 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1414 stats->tx_bcast_frames_ok + 1398 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1415 stats->tx_mcast_frames_ok; 1399 * pointed by dummy SQE and results in tx_drops counter being
1416 drv_stats->rx_frames_ok = stats->rx_ucast_frames + 1400 * incremented. Subtracting it from tx_tso counter will give
1417 stats->rx_bcast_frames + 1401 * exact tx_drops counter.
1418 stats->rx_mcast_frames; 1402 */
1419 drv_stats->rx_drops = stats->rx_drop_red + 1403 if (nic->t88 && nic->hw_tso) {
1420 stats->rx_drop_overrun; 1404 for_each_possible_cpu(cpu) {
1421 drv_stats->tx_drops = stats->tx_drops; 1405 drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1406 tmp_stats += drv_stats->tx_tso;
1407 }
1408 stats->tx_drops = tmp_stats - stats->tx_drops;
1409 }
1410 stats->tx_frames = stats->tx_ucast_frames +
1411 stats->tx_bcast_frames +
1412 stats->tx_mcast_frames;
1413 stats->rx_frames = stats->rx_ucast_frames +
1414 stats->rx_bcast_frames +
1415 stats->rx_mcast_frames;
1416 stats->rx_drops = stats->rx_drop_red +
1417 stats->rx_drop_overrun;
1422 1418
1423 /* Update RQ and SQ stats */ 1419 /* Update RQ and SQ stats */
1424 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1420 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
@@ -1432,18 +1428,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
1432{ 1428{
1433 struct nicvf *nic = netdev_priv(netdev); 1429 struct nicvf *nic = netdev_priv(netdev);
1434 struct nicvf_hw_stats *hw_stats = &nic->hw_stats; 1430 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1435 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1436 1431
1437 nicvf_update_stats(nic); 1432 nicvf_update_stats(nic);
1438 1433
1439 stats->rx_bytes = hw_stats->rx_bytes; 1434 stats->rx_bytes = hw_stats->rx_bytes;
1440 stats->rx_packets = drv_stats->rx_frames_ok; 1435 stats->rx_packets = hw_stats->rx_frames;
1441 stats->rx_dropped = drv_stats->rx_drops; 1436 stats->rx_dropped = hw_stats->rx_drops;
1442 stats->multicast = hw_stats->rx_mcast_frames; 1437 stats->multicast = hw_stats->rx_mcast_frames;
1443 1438
1444 stats->tx_bytes = hw_stats->tx_bytes_ok; 1439 stats->tx_bytes = hw_stats->tx_bytes;
1445 stats->tx_packets = drv_stats->tx_frames_ok; 1440 stats->tx_packets = hw_stats->tx_frames;
1446 stats->tx_dropped = drv_stats->tx_drops; 1441 stats->tx_dropped = hw_stats->tx_drops;
1447 1442
1448 return stats; 1443 return stats;
1449} 1444}
@@ -1456,7 +1451,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
1456 netdev_warn(dev, "%s: Transmit timed out, resetting\n", 1451 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1457 dev->name); 1452 dev->name);
1458 1453
1459 nic->drv_stats.tx_timeout++; 1454 this_cpu_inc(nic->drv_stats->tx_timeout);
1460 schedule_work(&nic->reset_task); 1455 schedule_work(&nic->reset_task);
1461} 1456}
1462 1457
@@ -1590,6 +1585,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1590 goto err_free_netdev; 1585 goto err_free_netdev;
1591 } 1586 }
1592 1587
1588 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
1589 if (!nic->drv_stats) {
1590 err = -ENOMEM;
1591 goto err_free_netdev;
1592 }
1593
1593 err = nicvf_set_qset_resources(nic); 1594 err = nicvf_set_qset_resources(nic);
1594 if (err) 1595 if (err)
1595 goto err_free_netdev; 1596 goto err_free_netdev;
@@ -1648,6 +1649,8 @@ err_unregister_interrupts:
1648 nicvf_unregister_interrupts(nic); 1649 nicvf_unregister_interrupts(nic);
1649err_free_netdev: 1650err_free_netdev:
1650 pci_set_drvdata(pdev, NULL); 1651 pci_set_drvdata(pdev, NULL);
1652 if (nic->drv_stats)
1653 free_percpu(nic->drv_stats);
1651 free_netdev(netdev); 1654 free_netdev(netdev);
1652err_release_regions: 1655err_release_regions:
1653 pci_release_regions(pdev); 1656 pci_release_regions(pdev);
@@ -1675,6 +1678,8 @@ static void nicvf_remove(struct pci_dev *pdev)
1675 unregister_netdev(pnetdev); 1678 unregister_netdev(pnetdev);
1676 nicvf_unregister_interrupts(nic); 1679 nicvf_unregister_interrupts(nic);
1677 pci_set_drvdata(pdev, NULL); 1680 pci_set_drvdata(pdev, NULL);
1681 if (nic->drv_stats)
1682 free_percpu(nic->drv_stats);
1678 free_netdev(netdev); 1683 free_netdev(netdev);
1679 pci_release_regions(pdev); 1684 pci_release_regions(pdev);
1680 pci_disable_device(pdev); 1685 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a4fc50155881..747ef0882976 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
105 order); 105 order);
106 if (!nic->rb_page) { 106 if (!nic->rb_page) {
107 nic->drv_stats.rcv_buffer_alloc_failures++; 107 this_cpu_inc(nic->pnicvf->drv_stats->
108 rcv_buffer_alloc_failures);
108 return -ENOMEM; 109 return -ENOMEM;
109 } 110 }
110 nic->rb_page_offset = 0; 111 nic->rb_page_offset = 0;
@@ -270,7 +271,8 @@ refill:
270 rbdr_idx, new_rb); 271 rbdr_idx, new_rb);
271next_rbdr: 272next_rbdr:
272 /* Re-enable RBDR interrupts only if buffer allocation is success */ 273 /* Re-enable RBDR interrupts only if buffer allocation is success */
273 if (!nic->rb_alloc_fail && rbdr->enable) 274 if (!nic->rb_alloc_fail && rbdr->enable &&
275 netif_running(nic->pnicvf->netdev))
274 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 276 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
275 277
276 if (rbdr_idx) 278 if (rbdr_idx)
@@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
361 363
362static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 364static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
363{ 365{
366 struct sk_buff *skb;
367
364 if (!sq) 368 if (!sq)
365 return; 369 return;
366 if (!sq->dmem.base) 370 if (!sq->dmem.base)
@@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
371 sq->dmem.q_len * TSO_HEADER_SIZE, 375 sq->dmem.q_len * TSO_HEADER_SIZE,
372 sq->tso_hdrs, sq->tso_hdrs_phys); 376 sq->tso_hdrs, sq->tso_hdrs_phys);
373 377
378 /* Free pending skbs in the queue */
379 smp_rmb();
380 while (sq->head != sq->tail) {
381 skb = (struct sk_buff *)sq->skbuff[sq->head];
382 if (skb)
383 dev_kfree_skb_any(skb);
384 sq->head++;
385 sq->head &= (sq->dmem.q_len - 1);
386 }
374 kfree(sq->skbuff); 387 kfree(sq->skbuff);
375 nicvf_free_q_desc_mem(nic, &sq->dmem); 388 nicvf_free_q_desc_mem(nic, &sq->dmem);
376} 389}
@@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
483{ 496{
484 union nic_mbx mbx = {}; 497 union nic_mbx mbx = {};
485 498
486 /* Reset all RXQ's stats */ 499 /* Reset all RQ/SQ and VF stats */
487 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; 500 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
501 mbx.reset_stat.rx_stat_mask = 0x3FFF;
502 mbx.reset_stat.tx_stat_mask = 0x1F;
488 mbx.reset_stat.rq_stat_mask = 0xFFFF; 503 mbx.reset_stat.rq_stat_mask = 0xFFFF;
504 mbx.reset_stat.sq_stat_mask = 0xFFFF;
489 nicvf_send_msg_to_pf(nic, &mbx); 505 nicvf_send_msg_to_pf(nic, &mbx);
490} 506}
491 507
@@ -538,9 +554,12 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
538 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); 554 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
539 nicvf_send_msg_to_pf(nic, &mbx); 555 nicvf_send_msg_to_pf(nic, &mbx);
540 556
541 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 557 if (!nic->sqs_mode && (qidx == 0)) {
542 if (!nic->sqs_mode) 558 /* Enable checking L3/L4 length and TCP/UDP checksums */
559 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
560 (BIT(24) | BIT(23) | BIT(21)));
543 nicvf_config_vlan_stripping(nic, nic->netdev->features); 561 nicvf_config_vlan_stripping(nic, nic->netdev->features);
562 }
544 563
545 /* Enable Receive queue */ 564 /* Enable Receive queue */
546 memset(&rq_cfg, 0, sizeof(struct rq_cfg)); 565 memset(&rq_cfg, 0, sizeof(struct rq_cfg));
@@ -1029,7 +1048,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1029 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 1048 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1030 /* For non-tunneled pkts, point this to L2 ethertype */ 1049 /* For non-tunneled pkts, point this to L2 ethertype */
1031 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 1050 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
1032 nic->drv_stats.tx_tso++; 1051 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1033 } 1052 }
1034} 1053}
1035 1054
@@ -1161,7 +1180,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1161 1180
1162 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); 1181 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
1163 1182
1164 nic->drv_stats.tx_tso++; 1183 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1165 return 1; 1184 return 1;
1166} 1185}
1167 1186
@@ -1422,8 +1441,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1422/* Check for errors in the receive cmp.queue entry */ 1441/* Check for errors in the receive cmp.queue entry */
1423int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1442int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1424{ 1443{
1425 struct nicvf_hw_stats *stats = &nic->hw_stats;
1426
1427 if (!cqe_rx->err_level && !cqe_rx->err_opcode) 1444 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1428 return 0; 1445 return 0;
1429 1446
@@ -1435,76 +1452,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1435 1452
1436 switch (cqe_rx->err_opcode) { 1453 switch (cqe_rx->err_opcode) {
1437 case CQ_RX_ERROP_RE_PARTIAL: 1454 case CQ_RX_ERROP_RE_PARTIAL:
1438 stats->rx_bgx_truncated_pkts++; 1455 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
1439 break; 1456 break;
1440 case CQ_RX_ERROP_RE_JABBER: 1457 case CQ_RX_ERROP_RE_JABBER:
1441 stats->rx_jabber_errs++; 1458 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
1442 break; 1459 break;
1443 case CQ_RX_ERROP_RE_FCS: 1460 case CQ_RX_ERROP_RE_FCS:
1444 stats->rx_fcs_errs++; 1461 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
1445 break; 1462 break;
1446 case CQ_RX_ERROP_RE_RX_CTL: 1463 case CQ_RX_ERROP_RE_RX_CTL:
1447 stats->rx_bgx_errs++; 1464 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
1448 break; 1465 break;
1449 case CQ_RX_ERROP_PREL2_ERR: 1466 case CQ_RX_ERROP_PREL2_ERR:
1450 stats->rx_prel2_errs++; 1467 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
1451 break; 1468 break;
1452 case CQ_RX_ERROP_L2_MAL: 1469 case CQ_RX_ERROP_L2_MAL:
1453 stats->rx_l2_hdr_malformed++; 1470 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
1454 break; 1471 break;
1455 case CQ_RX_ERROP_L2_OVERSIZE: 1472 case CQ_RX_ERROP_L2_OVERSIZE:
1456 stats->rx_oversize++; 1473 this_cpu_inc(nic->drv_stats->rx_oversize);
1457 break; 1474 break;
1458 case CQ_RX_ERROP_L2_UNDERSIZE: 1475 case CQ_RX_ERROP_L2_UNDERSIZE:
1459 stats->rx_undersize++; 1476 this_cpu_inc(nic->drv_stats->rx_undersize);
1460 break; 1477 break;
1461 case CQ_RX_ERROP_L2_LENMISM: 1478 case CQ_RX_ERROP_L2_LENMISM:
1462 stats->rx_l2_len_mismatch++; 1479 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
1463 break; 1480 break;
1464 case CQ_RX_ERROP_L2_PCLP: 1481 case CQ_RX_ERROP_L2_PCLP:
1465 stats->rx_l2_pclp++; 1482 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
1466 break; 1483 break;
1467 case CQ_RX_ERROP_IP_NOT: 1484 case CQ_RX_ERROP_IP_NOT:
1468 stats->rx_ip_ver_errs++; 1485 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
1469 break; 1486 break;
1470 case CQ_RX_ERROP_IP_CSUM_ERR: 1487 case CQ_RX_ERROP_IP_CSUM_ERR:
1471 stats->rx_ip_csum_errs++; 1488 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
1472 break; 1489 break;
1473 case CQ_RX_ERROP_IP_MAL: 1490 case CQ_RX_ERROP_IP_MAL:
1474 stats->rx_ip_hdr_malformed++; 1491 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
1475 break; 1492 break;
1476 case CQ_RX_ERROP_IP_MALD: 1493 case CQ_RX_ERROP_IP_MALD:
1477 stats->rx_ip_payload_malformed++; 1494 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
1478 break; 1495 break;
1479 case CQ_RX_ERROP_IP_HOP: 1496 case CQ_RX_ERROP_IP_HOP:
1480 stats->rx_ip_ttl_errs++; 1497 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
1481 break; 1498 break;
1482 case CQ_RX_ERROP_L3_PCLP: 1499 case CQ_RX_ERROP_L3_PCLP:
1483 stats->rx_l3_pclp++; 1500 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
1484 break; 1501 break;
1485 case CQ_RX_ERROP_L4_MAL: 1502 case CQ_RX_ERROP_L4_MAL:
1486 stats->rx_l4_malformed++; 1503 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
1487 break; 1504 break;
1488 case CQ_RX_ERROP_L4_CHK: 1505 case CQ_RX_ERROP_L4_CHK:
1489 stats->rx_l4_csum_errs++; 1506 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
1490 break; 1507 break;
1491 case CQ_RX_ERROP_UDP_LEN: 1508 case CQ_RX_ERROP_UDP_LEN:
1492 stats->rx_udp_len_errs++; 1509 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
1493 break; 1510 break;
1494 case CQ_RX_ERROP_L4_PORT: 1511 case CQ_RX_ERROP_L4_PORT:
1495 stats->rx_l4_port_errs++; 1512 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
1496 break; 1513 break;
1497 case CQ_RX_ERROP_TCP_FLAG: 1514 case CQ_RX_ERROP_TCP_FLAG:
1498 stats->rx_tcp_flag_errs++; 1515 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
1499 break; 1516 break;
1500 case CQ_RX_ERROP_TCP_OFFSET: 1517 case CQ_RX_ERROP_TCP_OFFSET:
1501 stats->rx_tcp_offset_errs++; 1518 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
1502 break; 1519 break;
1503 case CQ_RX_ERROP_L4_PCLP: 1520 case CQ_RX_ERROP_L4_PCLP:
1504 stats->rx_l4_pclp++; 1521 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
1505 break; 1522 break;
1506 case CQ_RX_ERROP_RBDR_TRUNC: 1523 case CQ_RX_ERROP_RBDR_TRUNC:
1507 stats->rx_truncated_pkts++; 1524 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
1508 break; 1525 break;
1509 } 1526 }
1510 1527
@@ -1512,53 +1529,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1512} 1529}
1513 1530
1514/* Check for errors in the send cmp.queue entry */ 1531/* Check for errors in the send cmp.queue entry */
1515int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1532int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
1516 struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
1517{ 1533{
1518 struct cmp_queue_stats *stats = &cq->stats;
1519
1520 switch (cqe_tx->send_status) { 1534 switch (cqe_tx->send_status) {
1521 case CQ_TX_ERROP_GOOD: 1535 case CQ_TX_ERROP_GOOD:
1522 stats->tx.good++;
1523 return 0; 1536 return 0;
1524 case CQ_TX_ERROP_DESC_FAULT: 1537 case CQ_TX_ERROP_DESC_FAULT:
1525 stats->tx.desc_fault++; 1538 this_cpu_inc(nic->drv_stats->tx_desc_fault);
1526 break; 1539 break;
1527 case CQ_TX_ERROP_HDR_CONS_ERR: 1540 case CQ_TX_ERROP_HDR_CONS_ERR:
1528 stats->tx.hdr_cons_err++; 1541 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
1529 break; 1542 break;
1530 case CQ_TX_ERROP_SUBDC_ERR: 1543 case CQ_TX_ERROP_SUBDC_ERR:
1531 stats->tx.subdesc_err++; 1544 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
1545 break;
1546 case CQ_TX_ERROP_MAX_SIZE_VIOL:
1547 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
1532 break; 1548 break;
1533 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1549 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1534 stats->tx.imm_size_oflow++; 1550 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
1535 break; 1551 break;
1536 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1552 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1537 stats->tx.data_seq_err++; 1553 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
1538 break; 1554 break;
1539 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1555 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1540 stats->tx.mem_seq_err++; 1556 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
1541 break; 1557 break;
1542 case CQ_TX_ERROP_LOCK_VIOL: 1558 case CQ_TX_ERROP_LOCK_VIOL:
1543 stats->tx.lock_viol++; 1559 this_cpu_inc(nic->drv_stats->tx_lock_viol);
1544 break; 1560 break;
1545 case CQ_TX_ERROP_DATA_FAULT: 1561 case CQ_TX_ERROP_DATA_FAULT:
1546 stats->tx.data_fault++; 1562 this_cpu_inc(nic->drv_stats->tx_data_fault);
1547 break; 1563 break;
1548 case CQ_TX_ERROP_TSTMP_CONFLICT: 1564 case CQ_TX_ERROP_TSTMP_CONFLICT:
1549 stats->tx.tstmp_conflict++; 1565 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
1550 break; 1566 break;
1551 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1567 case CQ_TX_ERROP_TSTMP_TIMEOUT:
1552 stats->tx.tstmp_timeout++; 1568 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
1553 break; 1569 break;
1554 case CQ_TX_ERROP_MEM_FAULT: 1570 case CQ_TX_ERROP_MEM_FAULT:
1555 stats->tx.mem_fault++; 1571 this_cpu_inc(nic->drv_stats->tx_mem_fault);
1556 break; 1572 break;
1557 case CQ_TX_ERROP_CK_OVERLAP: 1573 case CQ_TX_ERROP_CK_OVERLAP:
1558 stats->tx.csum_overlap++; 1574 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
1559 break; 1575 break;
1560 case CQ_TX_ERROP_CK_OFLOW: 1576 case CQ_TX_ERROP_CK_OFLOW:
1561 stats->tx.csum_overflow++; 1577 this_cpu_inc(nic->drv_stats->tx_csum_overflow);
1562 break; 1578 break;
1563 } 1579 }
1564 1580
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 869f3386028b..2e3c940c1093 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -158,6 +158,7 @@ enum CQ_TX_ERROP_E {
158 CQ_TX_ERROP_DESC_FAULT = 0x10, 158 CQ_TX_ERROP_DESC_FAULT = 0x10,
159 CQ_TX_ERROP_HDR_CONS_ERR = 0x11, 159 CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
160 CQ_TX_ERROP_SUBDC_ERR = 0x12, 160 CQ_TX_ERROP_SUBDC_ERR = 0x12,
161 CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
161 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, 162 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
162 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, 163 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
163 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, 164 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
@@ -171,25 +172,6 @@ enum CQ_TX_ERROP_E {
171 CQ_TX_ERROP_ENUM_LAST = 0x8a, 172 CQ_TX_ERROP_ENUM_LAST = 0x8a,
172}; 173};
173 174
174struct cmp_queue_stats {
175 struct tx_stats {
176 u64 good;
177 u64 desc_fault;
178 u64 hdr_cons_err;
179 u64 subdesc_err;
180 u64 imm_size_oflow;
181 u64 data_seq_err;
182 u64 mem_seq_err;
183 u64 lock_viol;
184 u64 data_fault;
185 u64 tstmp_conflict;
186 u64 tstmp_timeout;
187 u64 mem_fault;
188 u64 csum_overlap;
189 u64 csum_overflow;
190 } tx;
191} ____cacheline_aligned_in_smp;
192
193enum RQ_SQ_STATS { 175enum RQ_SQ_STATS {
194 RQ_SQ_STATS_OCTS, 176 RQ_SQ_STATS_OCTS,
195 RQ_SQ_STATS_PKTS, 177 RQ_SQ_STATS_PKTS,
@@ -241,7 +223,6 @@ struct cmp_queue {
241 spinlock_t lock; /* lock to serialize processing CQEs */ 223 spinlock_t lock; /* lock to serialize processing CQEs */
242 void *desc; 224 void *desc;
243 struct q_desc_mem dmem; 225 struct q_desc_mem dmem;
244 struct cmp_queue_stats stats;
245 int irq; 226 int irq;
246} ____cacheline_aligned_in_smp; 227} ____cacheline_aligned_in_smp;
247 228
@@ -336,6 +317,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
336void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 317void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
337void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 318void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
338int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); 319int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
339int nicvf_check_cqe_tx_errs(struct nicvf *nic, 320int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
340 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
341#endif /* NICVF_QUEUES_H */ 321#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 8bbaedbb7b94..050e21fbb147 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1242,8 +1242,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1242 1242
1243 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); 1243 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
1244 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { 1244 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
1245 bgx->bgx_id = 1245 bgx->bgx_id = (pci_resource_start(pdev,
1246 (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; 1246 PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
1247 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; 1247 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
1248 bgx->max_lmac = MAX_LMAC_PER_BGX; 1248 bgx->max_lmac = MAX_LMAC_PER_BGX;
1249 bgx_vnic[bgx->bgx_id] = bgx; 1249 bgx_vnic[bgx->bgx_id] = bgx;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index d59c71e4a000..01cc7c859131 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -28,6 +28,8 @@
28#define MAX_DMAC_PER_LMAC 8 28#define MAX_DMAC_PER_LMAC 8
29#define MAX_FRAME_SIZE 9216 29#define MAX_FRAME_SIZE 9216
30 30
31#define BGX_ID_MASK 0x3
32
31#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 33#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
32 34
33/* Registers */ 35/* Registers */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1e74fd6085df..e19a0ca8e5dd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2951,7 +2951,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2951 rq->cntxt_id, fl_id, 0xffff); 2951 rq->cntxt_id, fl_id, 0xffff);
2952 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2952 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2953 rq->desc, rq->phys_addr); 2953 rq->desc, rq->phys_addr);
2954 napi_hash_del(&rq->napi);
2955 netif_napi_del(&rq->napi); 2954 netif_napi_del(&rq->napi);
2956 rq->netdev = NULL; 2955 rq->netdev = NULL;
2957 rq->cntxt_id = rq->abs_id = 0; 2956 rq->cntxt_id = rq->abs_id = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index cece8a08edca..93aa2939142a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2813,7 +2813,6 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
2813 if (eqo->q.created) { 2813 if (eqo->q.created) {
2814 be_eq_clean(eqo); 2814 be_eq_clean(eqo);
2815 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 2815 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2816 napi_hash_del(&eqo->napi);
2817 netif_napi_del(&eqo->napi); 2816 netif_napi_del(&eqo->napi);
2818 free_cpumask_var(eqo->affinity_mask); 2817 free_cpumask_var(eqo->affinity_mask);
2819 } 2818 }
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f05ea56dcff2..941c8e2c944e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
5220 5220
5221static void sky2_shutdown(struct pci_dev *pdev) 5221static void sky2_shutdown(struct pci_dev *pdev)
5222{ 5222{
5223 struct sky2_hw *hw = pci_get_drvdata(pdev);
5224 int port;
5225
5226 for (port = 0; port < hw->ports; port++) {
5227 struct net_device *ndev = hw->dev[port];
5228
5229 rtnl_lock();
5230 if (netif_running(ndev)) {
5231 dev_close(ndev);
5232 netif_device_detach(ndev);
5233 }
5234 rtnl_unlock();
5235 }
5223 sky2_suspend(&pdev->dev); 5236 sky2_suspend(&pdev->dev);
5224 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); 5237 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
5225 pci_set_power_state(pdev, PCI_D3hot); 5238 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 3818c5e06eba..4b78168a5f3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -107,7 +107,7 @@ config DWMAC_STI
107config DWMAC_STM32 107config DWMAC_STM32
108 tristate "STM32 DWMAC support" 108 tristate "STM32 DWMAC support"
109 default ARCH_STM32 109 default ARCH_STM32
110 depends on OF && HAS_IOMEM 110 depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST)
111 select MFD_SYSCON 111 select MFD_SYSCON
112 ---help--- 112 ---help---
113 Support for ethernet controller on STM32 SOCs. 113 Support for ethernet controller on STM32 SOCs.
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index 2920e2ee3864..489ef146201e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -63,8 +63,8 @@
63#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 63#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40
64#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 64#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003
65#define TSE_PCS_SW_RESET_TIMEOUT 100 65#define TSE_PCS_SW_RESET_TIMEOUT 100
66#define TSE_PCS_USE_SGMII_AN_MASK BIT(2) 66#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
67#define TSE_PCS_USE_SGMII_ENA BIT(1) 67#define TSE_PCS_USE_SGMII_ENA BIT(0)
68 68
69#define SGMII_ADAPTER_CTRL_REG 0x00 69#define SGMII_ADAPTER_CTRL_REG 0x00
70#define SGMII_ADAPTER_DISABLE 0x0001 70#define SGMII_ADAPTER_DISABLE 0x0001
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index d3292c4a6eda..6d2de4e01f6d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -120,14 +120,17 @@ struct stmmac_extra_stats {
120 unsigned long ip_csum_bypassed; 120 unsigned long ip_csum_bypassed;
121 unsigned long ipv4_pkt_rcvd; 121 unsigned long ipv4_pkt_rcvd;
122 unsigned long ipv6_pkt_rcvd; 122 unsigned long ipv6_pkt_rcvd;
123 unsigned long rx_msg_type_ext_no_ptp; 123 unsigned long no_ptp_rx_msg_type_ext;
124 unsigned long rx_msg_type_sync; 124 unsigned long ptp_rx_msg_type_sync;
125 unsigned long rx_msg_type_follow_up; 125 unsigned long ptp_rx_msg_type_follow_up;
126 unsigned long rx_msg_type_delay_req; 126 unsigned long ptp_rx_msg_type_delay_req;
127 unsigned long rx_msg_type_delay_resp; 127 unsigned long ptp_rx_msg_type_delay_resp;
128 unsigned long rx_msg_type_pdelay_req; 128 unsigned long ptp_rx_msg_type_pdelay_req;
129 unsigned long rx_msg_type_pdelay_resp; 129 unsigned long ptp_rx_msg_type_pdelay_resp;
130 unsigned long rx_msg_type_pdelay_follow_up; 130 unsigned long ptp_rx_msg_type_pdelay_follow_up;
131 unsigned long ptp_rx_msg_type_announce;
132 unsigned long ptp_rx_msg_type_management;
133 unsigned long ptp_rx_msg_pkt_reserved_type;
131 unsigned long ptp_frame_type; 134 unsigned long ptp_frame_type;
132 unsigned long ptp_ver; 135 unsigned long ptp_ver;
133 unsigned long timestamp_dropped; 136 unsigned long timestamp_dropped;
@@ -482,11 +485,12 @@ struct stmmac_ops {
482/* PTP and HW Timer helpers */ 485/* PTP and HW Timer helpers */
483struct stmmac_hwtimestamp { 486struct stmmac_hwtimestamp {
484 void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); 487 void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
485 u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate); 488 u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
489 int gmac4);
486 int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); 490 int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
487 int (*config_addend) (void __iomem *ioaddr, u32 addend); 491 int (*config_addend) (void __iomem *ioaddr, u32 addend);
488 int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, 492 int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
489 int add_sub); 493 int add_sub, int gmac4);
490 u64(*get_systime) (void __iomem *ioaddr); 494 u64(*get_systime) (void __iomem *ioaddr);
491}; 495};
492 496
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 2e4c171a2b41..e3c86d422109 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -155,14 +155,18 @@
155#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) 155#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
156 156
157/* Extended RDES4 message type definitions */ 157/* Extended RDES4 message type definitions */
158#define RDES_EXT_NO_PTP 0 158#define RDES_EXT_NO_PTP 0x0
159#define RDES_EXT_SYNC 1 159#define RDES_EXT_SYNC 0x1
160#define RDES_EXT_FOLLOW_UP 2 160#define RDES_EXT_FOLLOW_UP 0x2
161#define RDES_EXT_DELAY_REQ 3 161#define RDES_EXT_DELAY_REQ 0x3
162#define RDES_EXT_DELAY_RESP 4 162#define RDES_EXT_DELAY_RESP 0x4
163#define RDES_EXT_PDELAY_REQ 5 163#define RDES_EXT_PDELAY_REQ 0x5
164#define RDES_EXT_PDELAY_RESP 6 164#define RDES_EXT_PDELAY_RESP 0x6
165#define RDES_EXT_PDELAY_FOLLOW_UP 7 165#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
166#define RDES_PTP_ANNOUNCE 0x8
167#define RDES_PTP_MANAGEMENT 0x9
168#define RDES_PTP_SIGNALING 0xa
169#define RDES_PTP_PKT_RESERVED_TYPE 0xf
166 170
167/* Basic descriptor structure for normal and alternate descriptors */ 171/* Basic descriptor structure for normal and alternate descriptors */
168struct dma_desc { 172struct dma_desc {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index a1b17cd7886b..a601f8d43b75 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -123,22 +123,29 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
123 x->ipv4_pkt_rcvd++; 123 x->ipv4_pkt_rcvd++;
124 if (rdes1 & RDES1_IPV6_HEADER) 124 if (rdes1 & RDES1_IPV6_HEADER)
125 x->ipv6_pkt_rcvd++; 125 x->ipv6_pkt_rcvd++;
126 if (message_type == RDES_EXT_SYNC) 126
127 x->rx_msg_type_sync++; 127 if (message_type == RDES_EXT_NO_PTP)
128 x->no_ptp_rx_msg_type_ext++;
129 else if (message_type == RDES_EXT_SYNC)
130 x->ptp_rx_msg_type_sync++;
128 else if (message_type == RDES_EXT_FOLLOW_UP) 131 else if (message_type == RDES_EXT_FOLLOW_UP)
129 x->rx_msg_type_follow_up++; 132 x->ptp_rx_msg_type_follow_up++;
130 else if (message_type == RDES_EXT_DELAY_REQ) 133 else if (message_type == RDES_EXT_DELAY_REQ)
131 x->rx_msg_type_delay_req++; 134 x->ptp_rx_msg_type_delay_req++;
132 else if (message_type == RDES_EXT_DELAY_RESP) 135 else if (message_type == RDES_EXT_DELAY_RESP)
133 x->rx_msg_type_delay_resp++; 136 x->ptp_rx_msg_type_delay_resp++;
134 else if (message_type == RDES_EXT_PDELAY_REQ) 137 else if (message_type == RDES_EXT_PDELAY_REQ)
135 x->rx_msg_type_pdelay_req++; 138 x->ptp_rx_msg_type_pdelay_req++;
136 else if (message_type == RDES_EXT_PDELAY_RESP) 139 else if (message_type == RDES_EXT_PDELAY_RESP)
137 x->rx_msg_type_pdelay_resp++; 140 x->ptp_rx_msg_type_pdelay_resp++;
138 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) 141 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
139 x->rx_msg_type_pdelay_follow_up++; 142 x->ptp_rx_msg_type_pdelay_follow_up++;
140 else 143 else if (message_type == RDES_PTP_ANNOUNCE)
141 x->rx_msg_type_ext_no_ptp++; 144 x->ptp_rx_msg_type_announce++;
145 else if (message_type == RDES_PTP_MANAGEMENT)
146 x->ptp_rx_msg_type_management++;
147 else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
148 x->ptp_rx_msg_pkt_reserved_type++;
142 149
143 if (rdes1 & RDES1_PTP_PACKET_TYPE) 150 if (rdes1 & RDES1_PTP_PACKET_TYPE)
144 x->ptp_frame_type++; 151 x->ptp_frame_type++;
@@ -204,14 +211,18 @@ static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
204 211
205static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) 212static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
206{ 213{
207 return (p->des3 & TDES3_TIMESTAMP_STATUS) 214 /* Context type from W/B descriptor must be zero */
208 >> TDES3_TIMESTAMP_STATUS_SHIFT; 215 if (p->des3 & TDES3_CONTEXT_TYPE)
216 return -EINVAL;
217
218 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
219 if (p->des3 & TDES3_TIMESTAMP_STATUS)
220 return 0;
221
222 return 1;
209} 223}
210 224
211/* NOTE: For RX CTX bit has to be checked before 225static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
212 * HAVE a specific function for TX and another one for RX
213 */
214static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
215{ 226{
216 struct dma_desc *p = (struct dma_desc *)desc; 227 struct dma_desc *p = (struct dma_desc *)desc;
217 u64 ns; 228 u64 ns;
@@ -223,12 +234,54 @@ static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
223 return ns; 234 return ns;
224} 235}
225 236
226static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats) 237static int dwmac4_rx_check_timestamp(void *desc)
238{
239 struct dma_desc *p = (struct dma_desc *)desc;
240 u32 own, ctxt;
241 int ret = 1;
242
243 own = p->des3 & RDES3_OWN;
244 ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
245 >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
246
247 if (likely(!own && ctxt)) {
248 if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
249 /* Corrupted value */
250 ret = -EINVAL;
251 else
252 /* A valid Timestamp is ready to be read */
253 ret = 0;
254 }
255
256 /* Timestamp not ready */
257 return ret;
258}
259
260static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
227{ 261{
228 struct dma_desc *p = (struct dma_desc *)desc; 262 struct dma_desc *p = (struct dma_desc *)desc;
263 int ret = -EINVAL;
264
265 /* Get the status from normal w/b descriptor */
266 if (likely(p->des3 & TDES3_RS1V)) {
267 if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
268 int i = 0;
269
270 /* Check if timestamp is OK from context descriptor */
271 do {
272 ret = dwmac4_rx_check_timestamp(desc);
273 if (ret < 0)
274 goto exit;
275 i++;
229 276
230 return (p->des1 & RDES1_TIMESTAMP_AVAILABLE) 277 } while ((ret == 1) || (i < 10));
231 >> RDES1_TIMESTAMP_AVAILABLE_SHIFT; 278
279 if (i == 10)
280 ret = -EBUSY;
281 }
282 }
283exit:
284 return ret;
232} 285}
233 286
234static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 287static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
@@ -373,8 +426,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
373 .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, 426 .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
374 .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, 427 .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
375 .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, 428 .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
376 .get_timestamp = dwmac4_wrback_get_timestamp, 429 .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
377 .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status, 430 .get_timestamp = dwmac4_get_timestamp,
378 .set_tx_ic = dwmac4_rd_set_tx_ic, 431 .set_tx_ic = dwmac4_rd_set_tx_ic,
379 .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, 432 .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
380 .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, 433 .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 0902a2edeaa9..9736c505211a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -59,10 +59,13 @@
59#define TDES3_CTXT_TCMSSV BIT(26) 59#define TDES3_CTXT_TCMSSV BIT(26)
60 60
61/* TDES3 Common */ 61/* TDES3 Common */
62#define TDES3_RS1V BIT(26)
63#define TDES3_RS1V_SHIFT 26
62#define TDES3_LAST_DESCRIPTOR BIT(28) 64#define TDES3_LAST_DESCRIPTOR BIT(28)
63#define TDES3_LAST_DESCRIPTOR_SHIFT 28 65#define TDES3_LAST_DESCRIPTOR_SHIFT 28
64#define TDES3_FIRST_DESCRIPTOR BIT(29) 66#define TDES3_FIRST_DESCRIPTOR BIT(29)
65#define TDES3_CONTEXT_TYPE BIT(30) 67#define TDES3_CONTEXT_TYPE BIT(30)
68#define TDES3_CONTEXT_TYPE_SHIFT 30
66 69
67/* TDS3 use for both format (read and write back) */ 70/* TDS3 use for both format (read and write back) */
68#define TDES3_OWN BIT(31) 71#define TDES3_OWN BIT(31)
@@ -117,6 +120,7 @@
117#define RDES3_LAST_DESCRIPTOR BIT(28) 120#define RDES3_LAST_DESCRIPTOR BIT(28)
118#define RDES3_FIRST_DESCRIPTOR BIT(29) 121#define RDES3_FIRST_DESCRIPTOR BIT(29)
119#define RDES3_CONTEXT_DESCRIPTOR BIT(30) 122#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
123#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30
120 124
121/* RDES3 (read format) */ 125/* RDES3 (read format) */
122#define RDES3_BUFFER1_VALID_ADDR BIT(24) 126#define RDES3_BUFFER1_VALID_ADDR BIT(24)
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 38f19c99cf59..e75549327c34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -150,22 +150,30 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
150 x->ipv4_pkt_rcvd++; 150 x->ipv4_pkt_rcvd++;
151 if (rdes4 & ERDES4_IPV6_PKT_RCVD) 151 if (rdes4 & ERDES4_IPV6_PKT_RCVD)
152 x->ipv6_pkt_rcvd++; 152 x->ipv6_pkt_rcvd++;
153 if (message_type == RDES_EXT_SYNC) 153
154 x->rx_msg_type_sync++; 154 if (message_type == RDES_EXT_NO_PTP)
155 x->no_ptp_rx_msg_type_ext++;
156 else if (message_type == RDES_EXT_SYNC)
157 x->ptp_rx_msg_type_sync++;
155 else if (message_type == RDES_EXT_FOLLOW_UP) 158 else if (message_type == RDES_EXT_FOLLOW_UP)
156 x->rx_msg_type_follow_up++; 159 x->ptp_rx_msg_type_follow_up++;
157 else if (message_type == RDES_EXT_DELAY_REQ) 160 else if (message_type == RDES_EXT_DELAY_REQ)
158 x->rx_msg_type_delay_req++; 161 x->ptp_rx_msg_type_delay_req++;
159 else if (message_type == RDES_EXT_DELAY_RESP) 162 else if (message_type == RDES_EXT_DELAY_RESP)
160 x->rx_msg_type_delay_resp++; 163 x->ptp_rx_msg_type_delay_resp++;
161 else if (message_type == RDES_EXT_PDELAY_REQ) 164 else if (message_type == RDES_EXT_PDELAY_REQ)
162 x->rx_msg_type_pdelay_req++; 165 x->ptp_rx_msg_type_pdelay_req++;
163 else if (message_type == RDES_EXT_PDELAY_RESP) 166 else if (message_type == RDES_EXT_PDELAY_RESP)
164 x->rx_msg_type_pdelay_resp++; 167 x->ptp_rx_msg_type_pdelay_resp++;
165 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) 168 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
166 x->rx_msg_type_pdelay_follow_up++; 169 x->ptp_rx_msg_type_pdelay_follow_up++;
167 else 170 else if (message_type == RDES_PTP_ANNOUNCE)
168 x->rx_msg_type_ext_no_ptp++; 171 x->ptp_rx_msg_type_announce++;
172 else if (message_type == RDES_PTP_MANAGEMENT)
173 x->ptp_rx_msg_type_management++;
174 else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
175 x->ptp_rx_msg_pkt_reserved_type++;
176
169 if (rdes4 & ERDES4_PTP_FRAME_TYPE) 177 if (rdes4 & ERDES4_PTP_FRAME_TYPE)
170 x->ptp_frame_type++; 178 x->ptp_frame_type++;
171 if (rdes4 & ERDES4_PTP_VER) 179 if (rdes4 & ERDES4_PTP_VER)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b15fc55f1b96..4d2a759b8465 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -129,6 +129,7 @@ struct stmmac_priv {
129 int irq_wake; 129 int irq_wake;
130 spinlock_t ptp_lock; 130 spinlock_t ptp_lock;
131 void __iomem *mmcaddr; 131 void __iomem *mmcaddr;
132 void __iomem *ptpaddr;
132 u32 rx_tail_addr; 133 u32 rx_tail_addr;
133 u32 tx_tail_addr; 134 u32 tx_tail_addr;
134 u32 mss; 135 u32 mss;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1e06173fc9d7..c5d0142adda2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -115,14 +115,17 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
115 STMMAC_STAT(ip_csum_bypassed), 115 STMMAC_STAT(ip_csum_bypassed),
116 STMMAC_STAT(ipv4_pkt_rcvd), 116 STMMAC_STAT(ipv4_pkt_rcvd),
117 STMMAC_STAT(ipv6_pkt_rcvd), 117 STMMAC_STAT(ipv6_pkt_rcvd),
118 STMMAC_STAT(rx_msg_type_ext_no_ptp), 118 STMMAC_STAT(no_ptp_rx_msg_type_ext),
119 STMMAC_STAT(rx_msg_type_sync), 119 STMMAC_STAT(ptp_rx_msg_type_sync),
120 STMMAC_STAT(rx_msg_type_follow_up), 120 STMMAC_STAT(ptp_rx_msg_type_follow_up),
121 STMMAC_STAT(rx_msg_type_delay_req), 121 STMMAC_STAT(ptp_rx_msg_type_delay_req),
122 STMMAC_STAT(rx_msg_type_delay_resp), 122 STMMAC_STAT(ptp_rx_msg_type_delay_resp),
123 STMMAC_STAT(rx_msg_type_pdelay_req), 123 STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
124 STMMAC_STAT(rx_msg_type_pdelay_resp), 124 STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
125 STMMAC_STAT(rx_msg_type_pdelay_follow_up), 125 STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
126 STMMAC_STAT(ptp_rx_msg_type_announce),
127 STMMAC_STAT(ptp_rx_msg_type_management),
128 STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
126 STMMAC_STAT(ptp_frame_type), 129 STMMAC_STAT(ptp_frame_type),
127 STMMAC_STAT(ptp_ver), 130 STMMAC_STAT(ptp_ver),
128 STMMAC_STAT(timestamp_dropped), 131 STMMAC_STAT(timestamp_dropped),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index a77f68918010..10d6059b2f26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -34,21 +34,29 @@ static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
34} 34}
35 35
36static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, 36static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
37 u32 ptp_clock) 37 u32 ptp_clock, int gmac4)
38{ 38{
39 u32 value = readl(ioaddr + PTP_TCR); 39 u32 value = readl(ioaddr + PTP_TCR);
40 unsigned long data; 40 unsigned long data;
41 41
42 /* Convert the ptp_clock to nano second 42 /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
43 * formula = (2/ptp_clock) * 1000000000 43 * formula = (1/ptp_clock) * 1000000000
44 * where, ptp_clock = 50MHz. 44 * where ptp_clock is 50MHz if fine method is used to update system
45 */ 45 */
46 data = (2000000000ULL / ptp_clock); 46 if (value & PTP_TCR_TSCFUPDT)
47 data = (1000000000ULL / 50000000);
48 else
49 data = (1000000000ULL / ptp_clock);
47 50
48 /* 0.465ns accuracy */ 51 /* 0.465ns accuracy */
49 if (!(value & PTP_TCR_TSCTRLSSR)) 52 if (!(value & PTP_TCR_TSCTRLSSR))
50 data = (data * 1000) / 465; 53 data = (data * 1000) / 465;
51 54
55 data &= PTP_SSIR_SSINC_MASK;
56
57 if (gmac4)
58 data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
59
52 writel(data, ioaddr + PTP_SSIR); 60 writel(data, ioaddr + PTP_SSIR);
53 61
54 return data; 62 return data;
@@ -104,14 +112,30 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
104} 112}
105 113
106static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, 114static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
107 int add_sub) 115 int add_sub, int gmac4)
108{ 116{
109 u32 value; 117 u32 value;
110 int limit; 118 int limit;
111 119
120 if (add_sub) {
121 /* If the new sec value needs to be subtracted with
122 * the system time, then MAC_STSUR reg should be
123 * programmed with (2^32 – <new_sec_value>)
124 */
125 if (gmac4)
126 sec = (100000000ULL - sec);
127
128 value = readl(ioaddr + PTP_TCR);
129 if (value & PTP_TCR_TSCTRLSSR)
130 nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec);
131 else
132 nsec = (PTP_BINARY_ROLLOVER_MODE - nsec);
133 }
134
112 writel(sec, ioaddr + PTP_STSUR); 135 writel(sec, ioaddr + PTP_STSUR);
113 writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec), 136 value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec;
114 ioaddr + PTP_STNSUR); 137 writel(value, ioaddr + PTP_STNSUR);
138
115 /* issue command to initialize the system time value */ 139 /* issue command to initialize the system time value */
116 value = readl(ioaddr + PTP_TCR); 140 value = readl(ioaddr + PTP_TCR);
117 value |= PTP_TCR_TSUPDT; 141 value |= PTP_TCR_TSUPDT;
@@ -134,8 +158,9 @@ static u64 stmmac_get_systime(void __iomem *ioaddr)
134{ 158{
135 u64 ns; 159 u64 ns;
136 160
161 /* Get the TSSS value */
137 ns = readl(ioaddr + PTP_STNSR); 162 ns = readl(ioaddr + PTP_STNSR);
138 /* convert sec time value to nanosecond */ 163 /* Get the TSS and convert sec time value to nanosecond */
139 ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; 164 ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
140 165
141 return ns; 166 return ns;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e2c94ec4edd0..1f9ec02fa7f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -340,18 +340,17 @@ out:
340 340
341/* stmmac_get_tx_hwtstamp - get HW TX timestamps 341/* stmmac_get_tx_hwtstamp - get HW TX timestamps
342 * @priv: driver private structure 342 * @priv: driver private structure
343 * @entry : descriptor index to be used. 343 * @p : descriptor pointer
344 * @skb : the socket buffer 344 * @skb : the socket buffer
345 * Description : 345 * Description :
346 * This function will read timestamp from the descriptor & pass it to stack. 346 * This function will read timestamp from the descriptor & pass it to stack.
347 * and also perform some sanity checks. 347 * and also perform some sanity checks.
348 */ 348 */
349static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 349static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
350 unsigned int entry, struct sk_buff *skb) 350 struct dma_desc *p, struct sk_buff *skb)
351{ 351{
352 struct skb_shared_hwtstamps shhwtstamp; 352 struct skb_shared_hwtstamps shhwtstamp;
353 u64 ns; 353 u64 ns;
354 void *desc = NULL;
355 354
356 if (!priv->hwts_tx_en) 355 if (!priv->hwts_tx_en)
357 return; 356 return;
@@ -360,58 +359,55 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
360 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 359 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
361 return; 360 return;
362 361
363 if (priv->adv_ts)
364 desc = (priv->dma_etx + entry);
365 else
366 desc = (priv->dma_tx + entry);
367
368 /* check tx tstamp status */ 362 /* check tx tstamp status */
369 if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) 363 if (!priv->hw->desc->get_tx_timestamp_status(p)) {
370 return; 364 /* get the valid tstamp */
365 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
371 366
372 /* get the valid tstamp */ 367 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
373 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 368 shhwtstamp.hwtstamp = ns_to_ktime(ns);
374 369
375 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 370 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
376 shhwtstamp.hwtstamp = ns_to_ktime(ns); 371 /* pass tstamp to stack */
377 /* pass tstamp to stack */ 372 skb_tstamp_tx(skb, &shhwtstamp);
378 skb_tstamp_tx(skb, &shhwtstamp); 373 }
379 374
380 return; 375 return;
381} 376}
382 377
383/* stmmac_get_rx_hwtstamp - get HW RX timestamps 378/* stmmac_get_rx_hwtstamp - get HW RX timestamps
384 * @priv: driver private structure 379 * @priv: driver private structure
385 * @entry : descriptor index to be used. 380 * @p : descriptor pointer
381 * @np : next descriptor pointer
386 * @skb : the socket buffer 382 * @skb : the socket buffer
387 * Description : 383 * Description :
388 * This function will read received packet's timestamp from the descriptor 384 * This function will read received packet's timestamp from the descriptor
389 * and pass it to stack. It also perform some sanity checks. 385 * and pass it to stack. It also perform some sanity checks.
390 */ 386 */
391static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, 387static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
392 unsigned int entry, struct sk_buff *skb) 388 struct dma_desc *np, struct sk_buff *skb)
393{ 389{
394 struct skb_shared_hwtstamps *shhwtstamp = NULL; 390 struct skb_shared_hwtstamps *shhwtstamp = NULL;
395 u64 ns; 391 u64 ns;
396 void *desc = NULL;
397 392
398 if (!priv->hwts_rx_en) 393 if (!priv->hwts_rx_en)
399 return; 394 return;
400 395
401 if (priv->adv_ts) 396 /* Check if timestamp is available */
402 desc = (priv->dma_erx + entry); 397 if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
403 else 398 /* For GMAC4, the valid timestamp is from CTX next desc. */
404 desc = (priv->dma_rx + entry); 399 if (priv->plat->has_gmac4)
405 400 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
406 /* exit if rx tstamp is not valid */ 401 else
407 if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) 402 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
408 return;
409 403
410 /* get valid tstamp */ 404 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
411 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 405 shhwtstamp = skb_hwtstamps(skb);
412 shhwtstamp = skb_hwtstamps(skb); 406 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
413 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 407 shhwtstamp->hwtstamp = ns_to_ktime(ns);
414 shhwtstamp->hwtstamp = ns_to_ktime(ns); 408 } else {
409 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
410 }
415} 411}
416 412
417/** 413/**
@@ -598,17 +594,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
598 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 594 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
599 595
600 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 596 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
601 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); 597 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
602 else { 598 else {
603 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 599 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
604 tstamp_all | ptp_v2 | ptp_over_ethernet | 600 tstamp_all | ptp_v2 | ptp_over_ethernet |
605 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 601 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
606 ts_master_en | snap_type_sel); 602 ts_master_en | snap_type_sel);
607 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); 603 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
608 604
609 /* program Sub Second Increment reg */ 605 /* program Sub Second Increment reg */
610 sec_inc = priv->hw->ptp->config_sub_second_increment( 606 sec_inc = priv->hw->ptp->config_sub_second_increment(
611 priv->ioaddr, priv->clk_ptp_rate); 607 priv->ptpaddr, priv->clk_ptp_rate,
608 priv->plat->has_gmac4);
612 temp = div_u64(1000000000ULL, sec_inc); 609 temp = div_u64(1000000000ULL, sec_inc);
613 610
614 /* calculate default added value: 611 /* calculate default added value:
@@ -618,14 +615,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
618 */ 615 */
619 temp = (u64)(temp << 32); 616 temp = (u64)(temp << 32);
620 priv->default_addend = div_u64(temp, priv->clk_ptp_rate); 617 priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
621 priv->hw->ptp->config_addend(priv->ioaddr, 618 priv->hw->ptp->config_addend(priv->ptpaddr,
622 priv->default_addend); 619 priv->default_addend);
623 620
624 /* initialize system time */ 621 /* initialize system time */
625 ktime_get_real_ts64(&now); 622 ktime_get_real_ts64(&now);
626 623
627 /* lower 32 bits of tv_sec are safe until y2106 */ 624 /* lower 32 bits of tv_sec are safe until y2106 */
628 priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec, 625 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
629 now.tv_nsec); 626 now.tv_nsec);
630 } 627 }
631 628
@@ -1340,7 +1337,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1340 priv->dev->stats.tx_packets++; 1337 priv->dev->stats.tx_packets++;
1341 priv->xstats.tx_pkt_n++; 1338 priv->xstats.tx_pkt_n++;
1342 } 1339 }
1343 stmmac_get_tx_hwtstamp(priv, entry, skb); 1340 stmmac_get_tx_hwtstamp(priv, p, skb);
1344 } 1341 }
1345 1342
1346 if (likely(priv->tx_skbuff_dma[entry].buf)) { 1343 if (likely(priv->tx_skbuff_dma[entry].buf)) {
@@ -1486,10 +1483,13 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
1486 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 1483 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1487 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 1484 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1488 1485
1489 if (priv->synopsys_id >= DWMAC_CORE_4_00) 1486 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1487 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1490 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; 1488 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1491 else 1489 } else {
1490 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1492 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; 1491 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1492 }
1493 1493
1494 dwmac_mmc_intr_all_mask(priv->mmcaddr); 1494 dwmac_mmc_intr_all_mask(priv->mmcaddr);
1495 1495
@@ -2484,7 +2484,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2484 if (netif_msg_rx_status(priv)) { 2484 if (netif_msg_rx_status(priv)) {
2485 void *rx_head; 2485 void *rx_head;
2486 2486
2487 pr_debug("%s: descriptor ring:\n", __func__); 2487 pr_info(">>>>>> %s: descriptor ring:\n", __func__);
2488 if (priv->extend_desc) 2488 if (priv->extend_desc)
2489 rx_head = (void *)priv->dma_erx; 2489 rx_head = (void *)priv->dma_erx;
2490 else 2490 else
@@ -2495,6 +2495,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2495 while (count < limit) { 2495 while (count < limit) {
2496 int status; 2496 int status;
2497 struct dma_desc *p; 2497 struct dma_desc *p;
2498 struct dma_desc *np;
2498 2499
2499 if (priv->extend_desc) 2500 if (priv->extend_desc)
2500 p = (struct dma_desc *)(priv->dma_erx + entry); 2501 p = (struct dma_desc *)(priv->dma_erx + entry);
@@ -2514,9 +2515,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2514 next_entry = priv->cur_rx; 2515 next_entry = priv->cur_rx;
2515 2516
2516 if (priv->extend_desc) 2517 if (priv->extend_desc)
2517 prefetch(priv->dma_erx + next_entry); 2518 np = (struct dma_desc *)(priv->dma_erx + next_entry);
2518 else 2519 else
2519 prefetch(priv->dma_rx + next_entry); 2520 np = priv->dma_rx + next_entry;
2521
2522 prefetch(np);
2520 2523
2521 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) 2524 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2522 priv->hw->desc->rx_extended_status(&priv->dev->stats, 2525 priv->hw->desc->rx_extended_status(&priv->dev->stats,
@@ -2568,7 +2571,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2568 frame_len -= ETH_FCS_LEN; 2571 frame_len -= ETH_FCS_LEN;
2569 2572
2570 if (netif_msg_rx_status(priv)) { 2573 if (netif_msg_rx_status(priv)) {
2571 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 2574 pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
2572 p, entry, des); 2575 p, entry, des);
2573 if (frame_len > ETH_FRAME_LEN) 2576 if (frame_len > ETH_FRAME_LEN)
2574 pr_debug("\tframe size %d, COE: %d\n", 2577 pr_debug("\tframe size %d, COE: %d\n",
@@ -2625,13 +2628,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2625 DMA_FROM_DEVICE); 2628 DMA_FROM_DEVICE);
2626 } 2629 }
2627 2630
2628 stmmac_get_rx_hwtstamp(priv, entry, skb);
2629
2630 if (netif_msg_pktdata(priv)) { 2631 if (netif_msg_pktdata(priv)) {
2631 pr_debug("frame received (%dbytes)", frame_len); 2632 pr_debug("frame received (%dbytes)", frame_len);
2632 print_pkt(skb->data, frame_len); 2633 print_pkt(skb->data, frame_len);
2633 } 2634 }
2634 2635
2636 stmmac_get_rx_hwtstamp(priv, p, np, skb);
2637
2635 stmmac_rx_vlan(priv->dev, skb); 2638 stmmac_rx_vlan(priv->dev, skb);
2636 2639
2637 skb->protocol = eth_type_trans(skb, priv->dev); 2640 skb->protocol = eth_type_trans(skb, priv->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 1477471f8d44..3eb281d1db08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -54,7 +54,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
54 54
55 spin_lock_irqsave(&priv->ptp_lock, flags); 55 spin_lock_irqsave(&priv->ptp_lock, flags);
56 56
57 priv->hw->ptp->config_addend(priv->ioaddr, addend); 57 priv->hw->ptp->config_addend(priv->ptpaddr, addend);
58 58
59 spin_unlock_irqrestore(&priv->ptp_lock, flags); 59 spin_unlock_irqrestore(&priv->ptp_lock, flags);
60 60
@@ -89,7 +89,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
89 89
90 spin_lock_irqsave(&priv->ptp_lock, flags); 90 spin_lock_irqsave(&priv->ptp_lock, flags);
91 91
92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); 92 priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj,
93 priv->plat->has_gmac4);
93 94
94 spin_unlock_irqrestore(&priv->ptp_lock, flags); 95 spin_unlock_irqrestore(&priv->ptp_lock, flags);
95 96
@@ -114,7 +115,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
114 115
115 spin_lock_irqsave(&priv->ptp_lock, flags); 116 spin_lock_irqsave(&priv->ptp_lock, flags);
116 117
117 ns = priv->hw->ptp->get_systime(priv->ioaddr); 118 ns = priv->hw->ptp->get_systime(priv->ptpaddr);
118 119
119 spin_unlock_irqrestore(&priv->ptp_lock, flags); 120 spin_unlock_irqrestore(&priv->ptp_lock, flags);
120 121
@@ -141,7 +142,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
141 142
142 spin_lock_irqsave(&priv->ptp_lock, flags); 143 spin_lock_irqsave(&priv->ptp_lock, flags);
143 144
144 priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); 145 priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
145 146
146 spin_unlock_irqrestore(&priv->ptp_lock, flags); 147 spin_unlock_irqrestore(&priv->ptp_lock, flags);
147 148
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 4535df37c227..c06938c47af5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -22,51 +22,53 @@
22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> 22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
23******************************************************************************/ 23******************************************************************************/
24 24
25#ifndef __STMMAC_PTP_H__ 25#ifndef __STMMAC_PTP_H__
26#define __STMMAC_PTP_H__ 26#define __STMMAC_PTP_H__
27 27
28/* IEEE 1588 PTP register offsets */ 28#define PTP_GMAC4_OFFSET 0xb00
29#define PTP_TCR 0x0700 /* Timestamp Control Reg */ 29#define PTP_GMAC3_X_OFFSET 0x700
30#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
31#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
32#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
33#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
34#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
35#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
36#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
37#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
38#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
39#define PTP_TSR 0x0728 /* Timestamp Status */
40 30
41#define PTP_STNSUR_ADDSUB_SHIFT 31 31/* IEEE 1588 PTP register offsets */
32#define PTP_TCR 0x00 /* Timestamp Control Reg */
33#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */
34#define PTP_STSR 0x08 /* System Time – Seconds Regr */
35#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */
36#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */
37#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */
38#define PTP_TAR 0x18 /* Timestamp Addend Reg */
42 39
43/* PTP TCR defines */ 40#define PTP_STNSUR_ADDSUB_SHIFT 31
44#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */ 41#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
45#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */ 42#define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */
46#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
47#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
48/* Timestamp Interrupt Trigger Enable */
49#define PTP_TCR_TSTRIG 0x00000010
50#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
51#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
52/* Timestamp Digital or Binary Rollover Control */
53#define PTP_TCR_TSCTRLSSR 0x00000200
54 43
44/* PTP Timestamp control register defines */
45#define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */
46#define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */
47#define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */
48#define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */
49#define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */
50#define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */
51#define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */
52#define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */
55/* Enable PTP packet Processing for Version 2 Format */ 53/* Enable PTP packet Processing for Version 2 Format */
56#define PTP_TCR_TSVER2ENA 0x00000400 54#define PTP_TCR_TSVER2ENA BIT(10)
57/* Enable Processing of PTP over Ethernet Frames */ 55/* Enable Processing of PTP over Ethernet Frames */
58#define PTP_TCR_TSIPENA 0x00000800 56#define PTP_TCR_TSIPENA BIT(11)
59/* Enable Processing of PTP Frames Sent over IPv6-UDP */ 57/* Enable Processing of PTP Frames Sent over IPv6-UDP */
60#define PTP_TCR_TSIPV6ENA 0x00001000 58#define PTP_TCR_TSIPV6ENA BIT(12)
61/* Enable Processing of PTP Frames Sent over IPv4-UDP */ 59/* Enable Processing of PTP Frames Sent over IPv4-UDP */
62#define PTP_TCR_TSIPV4ENA 0x00002000 60#define PTP_TCR_TSIPV4ENA BIT(13)
63/* Enable Timestamp Snapshot for Event Messages */ 61/* Enable Timestamp Snapshot for Event Messages */
64#define PTP_TCR_TSEVNTENA 0x00004000 62#define PTP_TCR_TSEVNTENA BIT(14)
65/* Enable Snapshot for Messages Relevant to Master */ 63/* Enable Snapshot for Messages Relevant to Master */
66#define PTP_TCR_TSMSTRENA 0x00008000 64#define PTP_TCR_TSMSTRENA BIT(15)
67/* Select PTP packets for Taking Snapshots */ 65/* Select PTP packets for Taking Snapshots */
68#define PTP_TCR_SNAPTYPSEL_1 0x00010000 66#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
69/* Enable MAC address for PTP Frame Filtering */ 67/* Enable MAC address for PTP Frame Filtering */
70#define PTP_TCR_TSENMACADDR 0x00040000 68#define PTP_TCR_TSENMACADDR BIT(18)
69
70/* SSIR defines */
71#define PTP_SSIR_SSINC_MASK 0xff
72#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
71 73
72#endif /* __STMMAC_PTP_H__ */ 74#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index aa4f9d2d8fa9..02f452730d52 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -623,6 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
623 void __iomem *gregs = bp->gregs; 623 void __iomem *gregs = bp->gregs;
624 void __iomem *cregs = bp->creg; 624 void __iomem *cregs = bp->creg;
625 void __iomem *bregs = bp->bregs; 625 void __iomem *bregs = bp->bregs;
626 __u32 bblk_dvma = (__u32)bp->bblock_dvma;
626 unsigned char *e = &bp->dev->dev_addr[0]; 627 unsigned char *e = &bp->dev->dev_addr[0];
627 628
628 /* Latch current counters into statistics. */ 629 /* Latch current counters into statistics. */
@@ -671,9 +672,9 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
671 bregs + BMAC_XIFCFG); 672 bregs + BMAC_XIFCFG);
672 673
673 /* Tell the QEC where the ring descriptors are. */ 674 /* Tell the QEC where the ring descriptors are. */
674 sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), 675 sbus_writel(bblk_dvma + bib_offset(be_rxd, 0),
675 cregs + CREG_RXDS); 676 cregs + CREG_RXDS);
676 sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), 677 sbus_writel(bblk_dvma + bib_offset(be_txd, 0),
677 cregs + CREG_TXDS); 678 cregs + CREG_TXDS);
678 679
679 /* Setup the FIFO pointers into QEC local memory. */ 680 /* Setup the FIFO pointers into QEC local memory. */
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h
index 06dd21707353..532fc56830cf 100644
--- a/drivers/net/ethernet/sun/sunbmac.h
+++ b/drivers/net/ethernet/sun/sunbmac.h
@@ -291,7 +291,7 @@ struct bigmac {
291 void __iomem *bregs; /* BigMAC Registers */ 291 void __iomem *bregs; /* BigMAC Registers */
292 void __iomem *tregs; /* BigMAC Transceiver */ 292 void __iomem *tregs; /* BigMAC Transceiver */
293 struct bmac_init_block *bmac_block; /* RX and TX descriptors */ 293 struct bmac_init_block *bmac_block; /* RX and TX descriptors */
294 __u32 bblock_dvma; /* RX and TX descriptors */ 294 dma_addr_t bblock_dvma; /* RX and TX descriptors */
295 295
296 spinlock_t lock; 296 spinlock_t lock;
297 297
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 9b825780b3be..9582948145c1 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep)
124{ 124{
125 struct qe_init_block *qb = qep->qe_block; 125 struct qe_init_block *qb = qep->qe_block;
126 struct sunqe_buffers *qbufs = qep->buffers; 126 struct sunqe_buffers *qbufs = qep->buffers;
127 __u32 qbufs_dvma = qep->buffers_dvma; 127 __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
128 int i; 128 int i;
129 129
130 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; 130 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
@@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq)
144 void __iomem *mregs = qep->mregs; 144 void __iomem *mregs = qep->mregs;
145 void __iomem *gregs = qecp->gregs; 145 void __iomem *gregs = qecp->gregs;
146 unsigned char *e = &qep->dev->dev_addr[0]; 146 unsigned char *e = &qep->dev->dev_addr[0];
147 __u32 qblk_dvma = (__u32)qep->qblock_dvma;
147 u32 tmp; 148 u32 tmp;
148 int i; 149 int i;
149 150
@@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq)
152 return -EAGAIN; 153 return -EAGAIN;
153 154
154 /* Setup initial rx/tx init block pointers. */ 155 /* Setup initial rx/tx init block pointers. */
155 sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); 156 sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
156 sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); 157 sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
157 158
158 /* Enable/mask the various irq's. */ 159 /* Enable/mask the various irq's. */
159 sbus_writel(0, cregs + CREG_RIMASK); 160 sbus_writel(0, cregs + CREG_RIMASK);
@@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
413 struct net_device *dev = qep->dev; 414 struct net_device *dev = qep->dev;
414 struct qe_rxd *this; 415 struct qe_rxd *this;
415 struct sunqe_buffers *qbufs = qep->buffers; 416 struct sunqe_buffers *qbufs = qep->buffers;
416 __u32 qbufs_dvma = qep->buffers_dvma; 417 __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
417 int elem = qep->rx_new; 418 int elem = qep->rx_new;
418 u32 flags; 419 u32 flags;
419 420
@@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
572{ 573{
573 struct sunqe *qep = netdev_priv(dev); 574 struct sunqe *qep = netdev_priv(dev);
574 struct sunqe_buffers *qbufs = qep->buffers; 575 struct sunqe_buffers *qbufs = qep->buffers;
575 __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; 576 __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
576 unsigned char *txbuf; 577 unsigned char *txbuf;
577 int len, entry; 578 int len, entry;
578 579
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
index 581781b6b2fa..ae190b77431b 100644
--- a/drivers/net/ethernet/sun/sunqe.h
+++ b/drivers/net/ethernet/sun/sunqe.h
@@ -334,12 +334,12 @@ struct sunqe {
334 void __iomem *qcregs; /* QEC per-channel Registers */ 334 void __iomem *qcregs; /* QEC per-channel Registers */
335 void __iomem *mregs; /* Per-channel MACE Registers */ 335 void __iomem *mregs; /* Per-channel MACE Registers */
336 struct qe_init_block *qe_block; /* RX and TX descriptors */ 336 struct qe_init_block *qe_block; /* RX and TX descriptors */
337 __u32 qblock_dvma; /* RX and TX descriptors */ 337 dma_addr_t qblock_dvma; /* RX and TX descriptors */
338 spinlock_t lock; /* Protects txfull state */ 338 spinlock_t lock; /* Protects txfull state */
339 int rx_new, rx_old; /* RX ring extents */ 339 int rx_new, rx_old; /* RX ring extents */
340 int tx_new, tx_old; /* TX ring extents */ 340 int tx_new, tx_old; /* TX ring extents */
341 struct sunqe_buffers *buffers; /* CPU visible address. */ 341 struct sunqe_buffers *buffers; /* CPU visible address. */
342 __u32 buffers_dvma; /* DVMA visible address. */ 342 dma_addr_t buffers_dvma; /* DVMA visible address. */
343 struct sunqec *parent; 343 struct sunqec *parent;
344 u8 mconfig; /* Base MACE mconfig value */ 344 u8 mconfig; /* Base MACE mconfig value */
345 struct platform_device *op; /* QE's OF device struct */ 345 struct platform_device *op; /* QE's OF device struct */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c6cff3d2ff05..58947aae31c7 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2375,8 +2375,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2375 * to the PHY is the Ethernet MAC DT node. 2375 * to the PHY is the Ethernet MAC DT node.
2376 */ 2376 */
2377 ret = of_phy_register_fixed_link(slave_node); 2377 ret = of_phy_register_fixed_link(slave_node);
2378 if (ret) 2378 if (ret) {
2379 if (ret != -EPROBE_DEFER)
2380 dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
2379 return ret; 2381 return ret;
2382 }
2380 slave_data->phy_node = of_node_get(slave_node); 2383 slave_data->phy_node = of_node_get(slave_node);
2381 } else if (parp) { 2384 } else if (parp) {
2382 u32 phyid; 2385 u32 phyid;
@@ -2397,6 +2400,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2397 } 2400 }
2398 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2401 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2399 PHY_ID_FMT, mdio->name, phyid); 2402 PHY_ID_FMT, mdio->name, phyid);
2403 put_device(&mdio->dev);
2400 } else { 2404 } else {
2401 dev_err(&pdev->dev, 2405 dev_err(&pdev->dev,
2402 "No slave[%d] phy_id, phy-handle, or fixed-link property\n", 2406 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
@@ -2440,6 +2444,46 @@ no_phy_slave:
2440 return 0; 2444 return 0;
2441} 2445}
2442 2446
2447static void cpsw_remove_dt(struct platform_device *pdev)
2448{
2449 struct net_device *ndev = platform_get_drvdata(pdev);
2450 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2451 struct cpsw_platform_data *data = &cpsw->data;
2452 struct device_node *node = pdev->dev.of_node;
2453 struct device_node *slave_node;
2454 int i = 0;
2455
2456 for_each_available_child_of_node(node, slave_node) {
2457 struct cpsw_slave_data *slave_data = &data->slave_data[i];
2458
2459 if (strcmp(slave_node->name, "slave"))
2460 continue;
2461
2462 if (of_phy_is_fixed_link(slave_node)) {
2463 struct phy_device *phydev;
2464
2465 phydev = of_phy_find_device(slave_node);
2466 if (phydev) {
2467 fixed_phy_unregister(phydev);
2468 /* Put references taken by
2469 * of_phy_find_device() and
2470 * of_phy_register_fixed_link().
2471 */
2472 phy_device_free(phydev);
2473 phy_device_free(phydev);
2474 }
2475 }
2476
2477 of_node_put(slave_data->phy_node);
2478
2479 i++;
2480 if (i == data->slaves)
2481 break;
2482 }
2483
2484 of_platform_depopulate(&pdev->dev);
2485}
2486
2443static int cpsw_probe_dual_emac(struct cpsw_priv *priv) 2487static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
2444{ 2488{
2445 struct cpsw_common *cpsw = priv->cpsw; 2489 struct cpsw_common *cpsw = priv->cpsw;
@@ -2547,6 +2591,9 @@ static int cpsw_probe(struct platform_device *pdev)
2547 int irq; 2591 int irq;
2548 2592
2549 cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); 2593 cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
2594 if (!cpsw)
2595 return -ENOMEM;
2596
2550 cpsw->dev = &pdev->dev; 2597 cpsw->dev = &pdev->dev;
2551 2598
2552 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); 2599 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
@@ -2584,11 +2631,19 @@ static int cpsw_probe(struct platform_device *pdev)
2584 /* Select default pin state */ 2631 /* Select default pin state */
2585 pinctrl_pm_select_default_state(&pdev->dev); 2632 pinctrl_pm_select_default_state(&pdev->dev);
2586 2633
2587 if (cpsw_probe_dt(&cpsw->data, pdev)) { 2634 /* Need to enable clocks with runtime PM api to access module
2588 dev_err(&pdev->dev, "cpsw: platform data missing\n"); 2635 * registers
2589 ret = -ENODEV; 2636 */
2637 ret = pm_runtime_get_sync(&pdev->dev);
2638 if (ret < 0) {
2639 pm_runtime_put_noidle(&pdev->dev);
2590 goto clean_runtime_disable_ret; 2640 goto clean_runtime_disable_ret;
2591 } 2641 }
2642
2643 ret = cpsw_probe_dt(&cpsw->data, pdev);
2644 if (ret)
2645 goto clean_dt_ret;
2646
2592 data = &cpsw->data; 2647 data = &cpsw->data;
2593 cpsw->rx_ch_num = 1; 2648 cpsw->rx_ch_num = 1;
2594 cpsw->tx_ch_num = 1; 2649 cpsw->tx_ch_num = 1;
@@ -2608,7 +2663,7 @@ static int cpsw_probe(struct platform_device *pdev)
2608 GFP_KERNEL); 2663 GFP_KERNEL);
2609 if (!cpsw->slaves) { 2664 if (!cpsw->slaves) {
2610 ret = -ENOMEM; 2665 ret = -ENOMEM;
2611 goto clean_runtime_disable_ret; 2666 goto clean_dt_ret;
2612 } 2667 }
2613 for (i = 0; i < data->slaves; i++) 2668 for (i = 0; i < data->slaves; i++)
2614 cpsw->slaves[i].slave_num = i; 2669 cpsw->slaves[i].slave_num = i;
@@ -2620,7 +2675,7 @@ static int cpsw_probe(struct platform_device *pdev)
2620 if (IS_ERR(clk)) { 2675 if (IS_ERR(clk)) {
2621 dev_err(priv->dev, "fck is not found\n"); 2676 dev_err(priv->dev, "fck is not found\n");
2622 ret = -ENODEV; 2677 ret = -ENODEV;
2623 goto clean_runtime_disable_ret; 2678 goto clean_dt_ret;
2624 } 2679 }
2625 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; 2680 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
2626 2681
@@ -2628,26 +2683,17 @@ static int cpsw_probe(struct platform_device *pdev)
2628 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); 2683 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
2629 if (IS_ERR(ss_regs)) { 2684 if (IS_ERR(ss_regs)) {
2630 ret = PTR_ERR(ss_regs); 2685 ret = PTR_ERR(ss_regs);
2631 goto clean_runtime_disable_ret; 2686 goto clean_dt_ret;
2632 } 2687 }
2633 cpsw->regs = ss_regs; 2688 cpsw->regs = ss_regs;
2634 2689
2635 /* Need to enable clocks with runtime PM api to access module
2636 * registers
2637 */
2638 ret = pm_runtime_get_sync(&pdev->dev);
2639 if (ret < 0) {
2640 pm_runtime_put_noidle(&pdev->dev);
2641 goto clean_runtime_disable_ret;
2642 }
2643 cpsw->version = readl(&cpsw->regs->id_ver); 2690 cpsw->version = readl(&cpsw->regs->id_ver);
2644 pm_runtime_put_sync(&pdev->dev);
2645 2691
2646 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2692 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2647 cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); 2693 cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
2648 if (IS_ERR(cpsw->wr_regs)) { 2694 if (IS_ERR(cpsw->wr_regs)) {
2649 ret = PTR_ERR(cpsw->wr_regs); 2695 ret = PTR_ERR(cpsw->wr_regs);
2650 goto clean_runtime_disable_ret; 2696 goto clean_dt_ret;
2651 } 2697 }
2652 2698
2653 memset(&dma_params, 0, sizeof(dma_params)); 2699 memset(&dma_params, 0, sizeof(dma_params));
@@ -2684,7 +2730,7 @@ static int cpsw_probe(struct platform_device *pdev)
2684 default: 2730 default:
2685 dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); 2731 dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
2686 ret = -ENODEV; 2732 ret = -ENODEV;
2687 goto clean_runtime_disable_ret; 2733 goto clean_dt_ret;
2688 } 2734 }
2689 for (i = 0; i < cpsw->data.slaves; i++) { 2735 for (i = 0; i < cpsw->data.slaves; i++) {
2690 struct cpsw_slave *slave = &cpsw->slaves[i]; 2736 struct cpsw_slave *slave = &cpsw->slaves[i];
@@ -2713,7 +2759,7 @@ static int cpsw_probe(struct platform_device *pdev)
2713 if (!cpsw->dma) { 2759 if (!cpsw->dma) {
2714 dev_err(priv->dev, "error initializing dma\n"); 2760 dev_err(priv->dev, "error initializing dma\n");
2715 ret = -ENOMEM; 2761 ret = -ENOMEM;
2716 goto clean_runtime_disable_ret; 2762 goto clean_dt_ret;
2717 } 2763 }
2718 2764
2719 cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); 2765 cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
@@ -2811,16 +2857,23 @@ static int cpsw_probe(struct platform_device *pdev)
2811 ret = cpsw_probe_dual_emac(priv); 2857 ret = cpsw_probe_dual_emac(priv);
2812 if (ret) { 2858 if (ret) {
2813 cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); 2859 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
2814 goto clean_ale_ret; 2860 goto clean_unregister_netdev_ret;
2815 } 2861 }
2816 } 2862 }
2817 2863
2864 pm_runtime_put(&pdev->dev);
2865
2818 return 0; 2866 return 0;
2819 2867
2868clean_unregister_netdev_ret:
2869 unregister_netdev(ndev);
2820clean_ale_ret: 2870clean_ale_ret:
2821 cpsw_ale_destroy(cpsw->ale); 2871 cpsw_ale_destroy(cpsw->ale);
2822clean_dma_ret: 2872clean_dma_ret:
2823 cpdma_ctlr_destroy(cpsw->dma); 2873 cpdma_ctlr_destroy(cpsw->dma);
2874clean_dt_ret:
2875 cpsw_remove_dt(pdev);
2876 pm_runtime_put_sync(&pdev->dev);
2824clean_runtime_disable_ret: 2877clean_runtime_disable_ret:
2825 pm_runtime_disable(&pdev->dev); 2878 pm_runtime_disable(&pdev->dev);
2826clean_ndev_ret: 2879clean_ndev_ret:
@@ -2846,7 +2899,7 @@ static int cpsw_remove(struct platform_device *pdev)
2846 2899
2847 cpsw_ale_destroy(cpsw->ale); 2900 cpsw_ale_destroy(cpsw->ale);
2848 cpdma_ctlr_destroy(cpsw->dma); 2901 cpdma_ctlr_destroy(cpsw->dma);
2849 of_platform_depopulate(&pdev->dev); 2902 cpsw_remove_dt(pdev);
2850 pm_runtime_put_sync(&pdev->dev); 2903 pm_runtime_put_sync(&pdev->dev);
2851 pm_runtime_disable(&pdev->dev); 2904 pm_runtime_disable(&pdev->dev);
2852 if (cpsw->data.dual_emac) 2905 if (cpsw->data.dual_emac)
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index c649c101bbab..eb5167210681 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -279,7 +279,7 @@ EXPORT_SYMBOL_GPL(fixed_phy_register);
279void fixed_phy_unregister(struct phy_device *phy) 279void fixed_phy_unregister(struct phy_device *phy)
280{ 280{
281 phy_device_remove(phy); 281 phy_device_remove(phy);
282 282 of_node_put(phy->mdio.dev.of_node);
283 fixed_phy_del(phy->mdio.addr); 283 fixed_phy_del(phy->mdio.addr);
284} 284}
285EXPORT_SYMBOL_GPL(fixed_phy_unregister); 285EXPORT_SYMBOL_GPL(fixed_phy_unregister);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2e37eb337d48..24b4a09468dd 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -62,6 +62,10 @@
62/* Vitesse Extended Page Access Register */ 62/* Vitesse Extended Page Access Register */
63#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f 63#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
64 64
65/* Vitesse VSC8601 Extended PHY Control Register 1 */
66#define MII_VSC8601_EPHY_CTL 0x17
67#define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8)
68
65#define PHY_ID_VSC8234 0x000fc620 69#define PHY_ID_VSC8234 0x000fc620
66#define PHY_ID_VSC8244 0x000fc6c0 70#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670 71#define PHY_ID_VSC8514 0x00070670
@@ -111,6 +115,34 @@ static int vsc824x_config_init(struct phy_device *phydev)
111 return err; 115 return err;
112} 116}
113 117
118/* This adds a skew for both TX and RX clocks, so the skew should only be
119 * applied to "rgmii-id" interfaces. It may not work as expected
120 * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
121static int vsc8601_add_skew(struct phy_device *phydev)
122{
123 int ret;
124
125 ret = phy_read(phydev, MII_VSC8601_EPHY_CTL);
126 if (ret < 0)
127 return ret;
128
129 ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW;
130 return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret);
131}
132
133static int vsc8601_config_init(struct phy_device *phydev)
134{
135 int ret = 0;
136
137 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
138 ret = vsc8601_add_skew(phydev);
139
140 if (ret < 0)
141 return ret;
142
143 return genphy_config_init(phydev);
144}
145
114static int vsc824x_ack_interrupt(struct phy_device *phydev) 146static int vsc824x_ack_interrupt(struct phy_device *phydev)
115{ 147{
116 int err = 0; 148 int err = 0;
@@ -275,7 +307,7 @@ static struct phy_driver vsc82xx_driver[] = {
275 .phy_id_mask = 0x000ffff0, 307 .phy_id_mask = 0x000ffff0,
276 .features = PHY_GBIT_FEATURES, 308 .features = PHY_GBIT_FEATURES,
277 .flags = PHY_HAS_INTERRUPT, 309 .flags = PHY_HAS_INTERRUPT,
278 .config_init = &genphy_config_init, 310 .config_init = &vsc8601_config_init,
279 .config_aneg = &genphy_config_aneg, 311 .config_aneg = &genphy_config_aneg,
280 .read_status = &genphy_read_status, 312 .read_status = &genphy_read_status,
281 .ack_interrupt = &vsc824x_ack_interrupt, 313 .ack_interrupt = &vsc824x_ack_interrupt,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fd8b1e62301f..7276d5a95bd0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1497,6 +1497,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
1497 netif_napi_del(&vi->rq[i].napi); 1497 netif_napi_del(&vi->rq[i].napi);
1498 } 1498 }
1499 1499
1500 /* We called napi_hash_del() before netif_napi_del(),
1501 * we need to respect an RCU grace period before freeing vi->rq
1502 */
1503 synchronize_net();
1504
1500 kfree(vi->rq); 1505 kfree(vi->rq);
1501 kfree(vi->sq); 1506 kfree(vi->sq);
1502} 1507}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 431f13b4faf6..d3bad5779376 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -826,7 +826,7 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
826 data->bcn_delta = do_div(delta, bcn_int); 826 data->bcn_delta = do_div(delta, bcn_int);
827 } else { 827 } else {
828 data->tsf_offset -= delta; 828 data->tsf_offset -= delta;
829 data->bcn_delta = -do_div(delta, bcn_int); 829 data->bcn_delta = -(s64)do_div(delta, bcn_int);
830 } 830 }
831} 831}
832 832
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index b470f7e3521d..5a3145a02547 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -292,6 +292,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np)
292 mdiodev = to_mdio_device(d); 292 mdiodev = to_mdio_device(d);
293 if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) 293 if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
294 return to_phy_device(d); 294 return to_phy_device(d);
295 put_device(d);
295 } 296 }
296 297
297 return NULL; 298 return NULL;
@@ -456,8 +457,11 @@ int of_phy_register_fixed_link(struct device_node *np)
456 status.link = 1; 457 status.link = 1;
457 status.duplex = of_property_read_bool(fixed_link_node, 458 status.duplex = of_property_read_bool(fixed_link_node,
458 "full-duplex"); 459 "full-duplex");
459 if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) 460 if (of_property_read_u32(fixed_link_node, "speed",
461 &status.speed)) {
462 of_node_put(fixed_link_node);
460 return -EINVAL; 463 return -EINVAL;
464 }
461 status.pause = of_property_read_bool(fixed_link_node, "pause"); 465 status.pause = of_property_read_bool(fixed_link_node, "pause");
462 status.asym_pause = of_property_read_bool(fixed_link_node, 466 status.asym_pause = of_property_read_bool(fixed_link_node,
463 "asym-pause"); 467 "asym-pause");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 87e6334eab93..547ca7b3f098 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -459,8 +459,6 @@ static int twl4030_phy_power_off(struct phy *phy)
459 struct twl4030_usb *twl = phy_get_drvdata(phy); 459 struct twl4030_usb *twl = phy_get_drvdata(phy);
460 460
461 dev_dbg(twl->dev, "%s\n", __func__); 461 dev_dbg(twl->dev, "%s\n", __func__);
462 pm_runtime_mark_last_busy(twl->dev);
463 pm_runtime_put_autosuspend(twl->dev);
464 462
465 return 0; 463 return 0;
466} 464}
@@ -472,6 +470,8 @@ static int twl4030_phy_power_on(struct phy *phy)
472 dev_dbg(twl->dev, "%s\n", __func__); 470 dev_dbg(twl->dev, "%s\n", __func__);
473 pm_runtime_get_sync(twl->dev); 471 pm_runtime_get_sync(twl->dev);
474 schedule_delayed_work(&twl->id_workaround_work, HZ); 472 schedule_delayed_work(&twl->id_workaround_work, HZ);
473 pm_runtime_mark_last_busy(twl->dev);
474 pm_runtime_put_autosuspend(twl->dev);
475 475
476 return 0; 476 return 0;
477} 477}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8aa769a2d919..91b70bc46e7f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4010,7 +4010,10 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4010 SAM_STAT_CHECK_CONDITION; 4010 SAM_STAT_CHECK_CONDITION;
4011} 4011}
4012 4012
4013 4013static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
4014{
4015 return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
4016}
4014 4017
4015/** 4018/**
4016 * scsih_qcmd - main scsi request entry point 4019 * scsih_qcmd - main scsi request entry point
@@ -4038,6 +4041,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4038 if (ioc->logging_level & MPT_DEBUG_SCSI) 4041 if (ioc->logging_level & MPT_DEBUG_SCSI)
4039 scsi_print_command(scmd); 4042 scsi_print_command(scmd);
4040 4043
4044 /*
4045 * Lock the device for any subsequent command until command is
4046 * done.
4047 */
4048 if (ata_12_16_cmd(scmd))
4049 scsi_internal_device_block(scmd->device);
4050
4041 sas_device_priv_data = scmd->device->hostdata; 4051 sas_device_priv_data = scmd->device->hostdata;
4042 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 4052 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4043 scmd->result = DID_NO_CONNECT << 16; 4053 scmd->result = DID_NO_CONNECT << 16;
@@ -4613,6 +4623,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4613 if (scmd == NULL) 4623 if (scmd == NULL)
4614 return 1; 4624 return 1;
4615 4625
4626 if (ata_12_16_cmd(scmd))
4627 scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
4628
4616 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4629 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4617 4630
4618 if (mpi_reply == NULL) { 4631 if (mpi_reply == NULL) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 567fa080e261..56d6142852a5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1456,15 +1456,20 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1456 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1456 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1457 sp = req->outstanding_cmds[cnt]; 1457 sp = req->outstanding_cmds[cnt];
1458 if (sp) { 1458 if (sp) {
1459 /* Get a reference to the sp and drop the lock. 1459 /* Don't abort commands in adapter during EEH
1460 * The reference ensures this sp->done() call 1460 * recovery as it's not accessible/responding.
1461 * - and not the call in qla2xxx_eh_abort() -
1462 * ends the SCSI command (with result 'res').
1463 */ 1461 */
1464 sp_get(sp); 1462 if (!ha->flags.eeh_busy) {
1465 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1463 /* Get a reference to the sp and drop the lock.
1466 qla2xxx_eh_abort(GET_CMD_SP(sp)); 1464 * The reference ensures this sp->done() call
1467 spin_lock_irqsave(&ha->hardware_lock, flags); 1465 * - and not the call in qla2xxx_eh_abort() -
1466 * ends the SCSI command (with result 'res').
1467 */
1468 sp_get(sp);
1469 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1470 qla2xxx_eh_abort(GET_CMD_SP(sp));
1471 spin_lock_irqsave(&ha->hardware_lock, flags);
1472 }
1468 req->outstanding_cmds[cnt] = NULL; 1473 req->outstanding_cmds[cnt] = NULL;
1469 sp->done(vha, sp, res); 1474 sp->done(vha, sp, res);
1470 } 1475 }
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 7a223074df3d..afada655f861 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
669 .set_cur_state = powerclamp_set_cur_state, 669 .set_cur_state = powerclamp_set_cur_state,
670}; 670};
671 671
672static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
673 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
674 {}
675};
676MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
677
672static int __init powerclamp_probe(void) 678static int __init powerclamp_probe(void)
673{ 679{
674 if (!boot_cpu_has(X86_FEATURE_MWAIT)) { 680
681 if (!x86_match_cpu(intel_powerclamp_ids)) {
675 pr_err("CPU does not support MWAIT"); 682 pr_err("CPU does not support MWAIT");
676 return -ENODEV; 683 return -ENODEV;
677 } 684 }
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 69426e644d17..3dbb4a21ab44 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
914 if (!ci) 914 if (!ci)
915 return -ENOMEM; 915 return -ENOMEM;
916 916
917 spin_lock_init(&ci->lock);
917 ci->dev = dev; 918 ci->dev = dev;
918 ci->platdata = dev_get_platdata(dev); 919 ci->platdata = dev_get_platdata(dev);
919 ci->imx28_write_fix = !!(ci->platdata->flags & 920 ci->imx28_write_fix = !!(ci->platdata->flags &
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 661f43fe0f9e..c9e80ad48fdc 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1889,8 +1889,6 @@ static int udc_start(struct ci_hdrc *ci)
1889 struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; 1889 struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
1890 int retval = 0; 1890 int retval = 0;
1891 1891
1892 spin_lock_init(&ci->lock);
1893
1894 ci->gadget.ops = &usb_gadget_ops; 1892 ci->gadget.ops = &usb_gadget_ops;
1895 ci->gadget.speed = USB_SPEED_UNKNOWN; 1893 ci->gadget.speed = USB_SPEED_UNKNOWN;
1896 ci->gadget.max_speed = USB_SPEED_HIGH; 1894 ci->gadget.max_speed = USB_SPEED_HIGH;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e40d47d47d82..17989b72cdae 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3225,11 +3225,11 @@ static bool ffs_func_req_match(struct usb_function *f,
3225 3225
3226 switch (creq->bRequestType & USB_RECIP_MASK) { 3226 switch (creq->bRequestType & USB_RECIP_MASK) {
3227 case USB_RECIP_INTERFACE: 3227 case USB_RECIP_INTERFACE:
3228 return ffs_func_revmap_intf(func, 3228 return (ffs_func_revmap_intf(func,
3229 le16_to_cpu(creq->wIndex) >= 0); 3229 le16_to_cpu(creq->wIndex)) >= 0);
3230 case USB_RECIP_ENDPOINT: 3230 case USB_RECIP_ENDPOINT:
3231 return ffs_func_revmap_ep(func, 3231 return (ffs_func_revmap_ep(func,
3232 le16_to_cpu(creq->wIndex) >= 0); 3232 le16_to_cpu(creq->wIndex)) >= 0);
3233 default: 3233 default:
3234 return (bool) (func->ffs->user_flags & 3234 return (bool) (func->ffs->user_flags &
3235 FUNCTIONFS_ALL_CTRL_RECIP); 3235 FUNCTIONFS_ALL_CTRL_RECIP);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index e01116e4c067..c3e172e15ec3 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -986,7 +986,7 @@ b_host:
986 } 986 }
987#endif 987#endif
988 988
989 schedule_work(&musb->irq_work); 989 schedule_delayed_work(&musb->irq_work, 0);
990 990
991 return handled; 991 return handled;
992} 992}
@@ -1855,14 +1855,23 @@ static void musb_pm_runtime_check_session(struct musb *musb)
1855 MUSB_DEVCTL_HR; 1855 MUSB_DEVCTL_HR;
1856 switch (devctl & ~s) { 1856 switch (devctl & ~s) {
1857 case MUSB_QUIRK_B_INVALID_VBUS_91: 1857 case MUSB_QUIRK_B_INVALID_VBUS_91:
1858 if (!musb->session && !musb->quirk_invalid_vbus) { 1858 if (musb->quirk_retries--) {
1859 musb->quirk_invalid_vbus = true;
1860 musb_dbg(musb, 1859 musb_dbg(musb,
1861 "First invalid vbus, assume no session"); 1860 "Poll devctl on invalid vbus, assume no session");
1861 schedule_delayed_work(&musb->irq_work,
1862 msecs_to_jiffies(1000));
1863
1862 return; 1864 return;
1863 } 1865 }
1864 break;
1865 case MUSB_QUIRK_A_DISCONNECT_19: 1866 case MUSB_QUIRK_A_DISCONNECT_19:
1867 if (musb->quirk_retries--) {
1868 musb_dbg(musb,
1869 "Poll devctl on possible host mode disconnect");
1870 schedule_delayed_work(&musb->irq_work,
1871 msecs_to_jiffies(1000));
1872
1873 return;
1874 }
1866 if (!musb->session) 1875 if (!musb->session)
1867 break; 1876 break;
1868 musb_dbg(musb, "Allow PM on possible host mode disconnect"); 1877 musb_dbg(musb, "Allow PM on possible host mode disconnect");
@@ -1886,9 +1895,9 @@ static void musb_pm_runtime_check_session(struct musb *musb)
1886 if (error < 0) 1895 if (error < 0)
1887 dev_err(musb->controller, "Could not enable: %i\n", 1896 dev_err(musb->controller, "Could not enable: %i\n",
1888 error); 1897 error);
1898 musb->quirk_retries = 3;
1889 } else { 1899 } else {
1890 musb_dbg(musb, "Allow PM with no session: %02x", devctl); 1900 musb_dbg(musb, "Allow PM with no session: %02x", devctl);
1891 musb->quirk_invalid_vbus = false;
1892 pm_runtime_mark_last_busy(musb->controller); 1901 pm_runtime_mark_last_busy(musb->controller);
1893 pm_runtime_put_autosuspend(musb->controller); 1902 pm_runtime_put_autosuspend(musb->controller);
1894 } 1903 }
@@ -1899,7 +1908,7 @@ static void musb_pm_runtime_check_session(struct musb *musb)
1899/* Only used to provide driver mode change events */ 1908/* Only used to provide driver mode change events */
1900static void musb_irq_work(struct work_struct *data) 1909static void musb_irq_work(struct work_struct *data)
1901{ 1910{
1902 struct musb *musb = container_of(data, struct musb, irq_work); 1911 struct musb *musb = container_of(data, struct musb, irq_work.work);
1903 1912
1904 musb_pm_runtime_check_session(musb); 1913 musb_pm_runtime_check_session(musb);
1905 1914
@@ -1969,6 +1978,7 @@ static struct musb *allocate_instance(struct device *dev,
1969 INIT_LIST_HEAD(&musb->control); 1978 INIT_LIST_HEAD(&musb->control);
1970 INIT_LIST_HEAD(&musb->in_bulk); 1979 INIT_LIST_HEAD(&musb->in_bulk);
1971 INIT_LIST_HEAD(&musb->out_bulk); 1980 INIT_LIST_HEAD(&musb->out_bulk);
1981 INIT_LIST_HEAD(&musb->pending_list);
1972 1982
1973 musb->vbuserr_retry = VBUSERR_RETRY_COUNT; 1983 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1974 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; 1984 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
@@ -2018,6 +2028,84 @@ static void musb_free(struct musb *musb)
2018 musb_host_free(musb); 2028 musb_host_free(musb);
2019} 2029}
2020 2030
2031struct musb_pending_work {
2032 int (*callback)(struct musb *musb, void *data);
2033 void *data;
2034 struct list_head node;
2035};
2036
2037/*
2038 * Called from musb_runtime_resume(), musb_resume(), and
2039 * musb_queue_resume_work(). Callers must take musb->lock.
2040 */
2041static int musb_run_resume_work(struct musb *musb)
2042{
2043 struct musb_pending_work *w, *_w;
2044 unsigned long flags;
2045 int error = 0;
2046
2047 spin_lock_irqsave(&musb->list_lock, flags);
2048 list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
2049 if (w->callback) {
2050 error = w->callback(musb, w->data);
2051 if (error < 0) {
2052 dev_err(musb->controller,
2053 "resume callback %p failed: %i\n",
2054 w->callback, error);
2055 }
2056 }
2057 list_del(&w->node);
2058 devm_kfree(musb->controller, w);
2059 }
2060 spin_unlock_irqrestore(&musb->list_lock, flags);
2061
2062 return error;
2063}
2064
2065/*
2066 * Called to run work if device is active or else queue the work to happen
2067 * on resume. Caller must take musb->lock and must hold an RPM reference.
2068 *
2069 * Note that we cowardly refuse queuing work after musb PM runtime
2070 * resume is done calling musb_run_resume_work() and return -EINPROGRESS
2071 * instead.
2072 */
2073int musb_queue_resume_work(struct musb *musb,
2074 int (*callback)(struct musb *musb, void *data),
2075 void *data)
2076{
2077 struct musb_pending_work *w;
2078 unsigned long flags;
2079 int error;
2080
2081 if (WARN_ON(!callback))
2082 return -EINVAL;
2083
2084 if (pm_runtime_active(musb->controller))
2085 return callback(musb, data);
2086
2087 w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
2088 if (!w)
2089 return -ENOMEM;
2090
2091 w->callback = callback;
2092 w->data = data;
2093 spin_lock_irqsave(&musb->list_lock, flags);
2094 if (musb->is_runtime_suspended) {
2095 list_add_tail(&w->node, &musb->pending_list);
2096 error = 0;
2097 } else {
2098 dev_err(musb->controller, "could not add resume work %p\n",
2099 callback);
2100 devm_kfree(musb->controller, w);
2101 error = -EINPROGRESS;
2102 }
2103 spin_unlock_irqrestore(&musb->list_lock, flags);
2104
2105 return error;
2106}
2107EXPORT_SYMBOL_GPL(musb_queue_resume_work);
2108
2021static void musb_deassert_reset(struct work_struct *work) 2109static void musb_deassert_reset(struct work_struct *work)
2022{ 2110{
2023 struct musb *musb; 2111 struct musb *musb;
@@ -2065,6 +2153,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2065 } 2153 }
2066 2154
2067 spin_lock_init(&musb->lock); 2155 spin_lock_init(&musb->lock);
2156 spin_lock_init(&musb->list_lock);
2068 musb->board_set_power = plat->set_power; 2157 musb->board_set_power = plat->set_power;
2069 musb->min_power = plat->min_power; 2158 musb->min_power = plat->min_power;
2070 musb->ops = plat->platform_ops; 2159 musb->ops = plat->platform_ops;
@@ -2208,7 +2297,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2208 musb_generic_disable(musb); 2297 musb_generic_disable(musb);
2209 2298
2210 /* Init IRQ workqueue before request_irq */ 2299 /* Init IRQ workqueue before request_irq */
2211 INIT_WORK(&musb->irq_work, musb_irq_work); 2300 INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
2212 INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); 2301 INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
2213 INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume); 2302 INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
2214 2303
@@ -2291,6 +2380,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2291 if (status) 2380 if (status)
2292 goto fail5; 2381 goto fail5;
2293 2382
2383 musb->is_initialized = 1;
2294 pm_runtime_mark_last_busy(musb->controller); 2384 pm_runtime_mark_last_busy(musb->controller);
2295 pm_runtime_put_autosuspend(musb->controller); 2385 pm_runtime_put_autosuspend(musb->controller);
2296 2386
@@ -2304,7 +2394,7 @@ fail4:
2304 musb_host_cleanup(musb); 2394 musb_host_cleanup(musb);
2305 2395
2306fail3: 2396fail3:
2307 cancel_work_sync(&musb->irq_work); 2397 cancel_delayed_work_sync(&musb->irq_work);
2308 cancel_delayed_work_sync(&musb->finish_resume_work); 2398 cancel_delayed_work_sync(&musb->finish_resume_work);
2309 cancel_delayed_work_sync(&musb->deassert_reset_work); 2399 cancel_delayed_work_sync(&musb->deassert_reset_work);
2310 if (musb->dma_controller) 2400 if (musb->dma_controller)
@@ -2371,7 +2461,7 @@ static int musb_remove(struct platform_device *pdev)
2371 */ 2461 */
2372 musb_exit_debugfs(musb); 2462 musb_exit_debugfs(musb);
2373 2463
2374 cancel_work_sync(&musb->irq_work); 2464 cancel_delayed_work_sync(&musb->irq_work);
2375 cancel_delayed_work_sync(&musb->finish_resume_work); 2465 cancel_delayed_work_sync(&musb->finish_resume_work);
2376 cancel_delayed_work_sync(&musb->deassert_reset_work); 2466 cancel_delayed_work_sync(&musb->deassert_reset_work);
2377 pm_runtime_get_sync(musb->controller); 2467 pm_runtime_get_sync(musb->controller);
@@ -2557,6 +2647,7 @@ static int musb_suspend(struct device *dev)
2557 2647
2558 musb_platform_disable(musb); 2648 musb_platform_disable(musb);
2559 musb_generic_disable(musb); 2649 musb_generic_disable(musb);
2650 WARN_ON(!list_empty(&musb->pending_list));
2560 2651
2561 spin_lock_irqsave(&musb->lock, flags); 2652 spin_lock_irqsave(&musb->lock, flags);
2562 2653
@@ -2578,9 +2669,11 @@ static int musb_suspend(struct device *dev)
2578 2669
2579static int musb_resume(struct device *dev) 2670static int musb_resume(struct device *dev)
2580{ 2671{
2581 struct musb *musb = dev_to_musb(dev); 2672 struct musb *musb = dev_to_musb(dev);
2582 u8 devctl; 2673 unsigned long flags;
2583 u8 mask; 2674 int error;
2675 u8 devctl;
2676 u8 mask;
2584 2677
2585 /* 2678 /*
2586 * For static cmos like DaVinci, register values were preserved 2679 * For static cmos like DaVinci, register values were preserved
@@ -2614,6 +2707,13 @@ static int musb_resume(struct device *dev)
2614 2707
2615 musb_start(musb); 2708 musb_start(musb);
2616 2709
2710 spin_lock_irqsave(&musb->lock, flags);
2711 error = musb_run_resume_work(musb);
2712 if (error)
2713 dev_err(musb->controller, "resume work failed with %i\n",
2714 error);
2715 spin_unlock_irqrestore(&musb->lock, flags);
2716
2617 return 0; 2717 return 0;
2618} 2718}
2619 2719
@@ -2622,14 +2722,16 @@ static int musb_runtime_suspend(struct device *dev)
2622 struct musb *musb = dev_to_musb(dev); 2722 struct musb *musb = dev_to_musb(dev);
2623 2723
2624 musb_save_context(musb); 2724 musb_save_context(musb);
2725 musb->is_runtime_suspended = 1;
2625 2726
2626 return 0; 2727 return 0;
2627} 2728}
2628 2729
2629static int musb_runtime_resume(struct device *dev) 2730static int musb_runtime_resume(struct device *dev)
2630{ 2731{
2631 struct musb *musb = dev_to_musb(dev); 2732 struct musb *musb = dev_to_musb(dev);
2632 static int first = 1; 2733 unsigned long flags;
2734 int error;
2633 2735
2634 /* 2736 /*
2635 * When pm_runtime_get_sync called for the first time in driver 2737 * When pm_runtime_get_sync called for the first time in driver
@@ -2640,9 +2742,10 @@ static int musb_runtime_resume(struct device *dev)
2640 * Also context restore without save does not make 2742 * Also context restore without save does not make
2641 * any sense 2743 * any sense
2642 */ 2744 */
2643 if (!first) 2745 if (!musb->is_initialized)
2644 musb_restore_context(musb); 2746 return 0;
2645 first = 0; 2747
2748 musb_restore_context(musb);
2646 2749
2647 if (musb->need_finish_resume) { 2750 if (musb->need_finish_resume) {
2648 musb->need_finish_resume = 0; 2751 musb->need_finish_resume = 0;
@@ -2650,6 +2753,14 @@ static int musb_runtime_resume(struct device *dev)
2650 msecs_to_jiffies(USB_RESUME_TIMEOUT)); 2753 msecs_to_jiffies(USB_RESUME_TIMEOUT));
2651 } 2754 }
2652 2755
2756 spin_lock_irqsave(&musb->lock, flags);
2757 error = musb_run_resume_work(musb);
2758 if (error)
2759 dev_err(musb->controller, "resume work failed with %i\n",
2760 error);
2761 musb->is_runtime_suspended = 0;
2762 spin_unlock_irqrestore(&musb->lock, flags);
2763
2653 return 0; 2764 return 0;
2654} 2765}
2655 2766
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 2cb88a498f8a..91817d77d59c 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -303,13 +303,14 @@ struct musb_context_registers {
303struct musb { 303struct musb {
304 /* device lock */ 304 /* device lock */
305 spinlock_t lock; 305 spinlock_t lock;
306 spinlock_t list_lock; /* resume work list lock */
306 307
307 struct musb_io io; 308 struct musb_io io;
308 const struct musb_platform_ops *ops; 309 const struct musb_platform_ops *ops;
309 struct musb_context_registers context; 310 struct musb_context_registers context;
310 311
311 irqreturn_t (*isr)(int, void *); 312 irqreturn_t (*isr)(int, void *);
312 struct work_struct irq_work; 313 struct delayed_work irq_work;
313 struct delayed_work deassert_reset_work; 314 struct delayed_work deassert_reset_work;
314 struct delayed_work finish_resume_work; 315 struct delayed_work finish_resume_work;
315 struct delayed_work gadget_work; 316 struct delayed_work gadget_work;
@@ -337,6 +338,7 @@ struct musb {
337 struct list_head control; /* of musb_qh */ 338 struct list_head control; /* of musb_qh */
338 struct list_head in_bulk; /* of musb_qh */ 339 struct list_head in_bulk; /* of musb_qh */
339 struct list_head out_bulk; /* of musb_qh */ 340 struct list_head out_bulk; /* of musb_qh */
341 struct list_head pending_list; /* pending work list */
340 342
341 struct timer_list otg_timer; 343 struct timer_list otg_timer;
342 struct notifier_block nb; 344 struct notifier_block nb;
@@ -379,12 +381,15 @@ struct musb {
379 381
380 int port_mode; /* MUSB_PORT_MODE_* */ 382 int port_mode; /* MUSB_PORT_MODE_* */
381 bool session; 383 bool session;
382 bool quirk_invalid_vbus; 384 unsigned long quirk_retries;
383 bool is_host; 385 bool is_host;
384 386
385 int a_wait_bcon; /* VBUS timeout in msecs */ 387 int a_wait_bcon; /* VBUS timeout in msecs */
386 unsigned long idle_timeout; /* Next timeout in jiffies */ 388 unsigned long idle_timeout; /* Next timeout in jiffies */
387 389
390 unsigned is_initialized:1;
391 unsigned is_runtime_suspended:1;
392
388 /* active means connected and not suspended */ 393 /* active means connected and not suspended */
389 unsigned is_active:1; 394 unsigned is_active:1;
390 395
@@ -540,6 +545,10 @@ extern irqreturn_t musb_interrupt(struct musb *);
540 545
541extern void musb_hnp_stop(struct musb *musb); 546extern void musb_hnp_stop(struct musb *musb);
542 547
548int musb_queue_resume_work(struct musb *musb,
549 int (*callback)(struct musb *musb, void *data),
550 void *data);
551
543static inline void musb_platform_set_vbus(struct musb *musb, int is_on) 552static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
544{ 553{
545 if (musb->ops->set_vbus) 554 if (musb->ops->set_vbus)
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 0f17d2140db6..feae1561b9ab 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -185,24 +185,19 @@ static void dsps_musb_disable(struct musb *musb)
185 musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); 185 musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
186 musb_writel(reg_base, wrp->epintr_clear, 186 musb_writel(reg_base, wrp->epintr_clear,
187 wrp->txep_bitmap | wrp->rxep_bitmap); 187 wrp->txep_bitmap | wrp->rxep_bitmap);
188 del_timer_sync(&glue->timer);
188 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 189 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
189} 190}
190 191
191static void otg_timer(unsigned long _musb) 192/* Caller must take musb->lock */
193static int dsps_check_status(struct musb *musb, void *unused)
192{ 194{
193 struct musb *musb = (void *)_musb;
194 void __iomem *mregs = musb->mregs; 195 void __iomem *mregs = musb->mregs;
195 struct device *dev = musb->controller; 196 struct device *dev = musb->controller;
196 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 197 struct dsps_glue *glue = dev_get_drvdata(dev->parent);
197 const struct dsps_musb_wrapper *wrp = glue->wrp; 198 const struct dsps_musb_wrapper *wrp = glue->wrp;
198 u8 devctl; 199 u8 devctl;
199 unsigned long flags;
200 int skip_session = 0; 200 int skip_session = 0;
201 int err;
202
203 err = pm_runtime_get_sync(dev);
204 if (err < 0)
205 dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
206 201
207 /* 202 /*
208 * We poll because DSPS IP's won't expose several OTG-critical 203 * We poll because DSPS IP's won't expose several OTG-critical
@@ -212,7 +207,6 @@ static void otg_timer(unsigned long _musb)
212 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, 207 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
213 usb_otg_state_string(musb->xceiv->otg->state)); 208 usb_otg_state_string(musb->xceiv->otg->state));
214 209
215 spin_lock_irqsave(&musb->lock, flags);
216 switch (musb->xceiv->otg->state) { 210 switch (musb->xceiv->otg->state) {
217 case OTG_STATE_A_WAIT_VRISE: 211 case OTG_STATE_A_WAIT_VRISE:
218 mod_timer(&glue->timer, jiffies + 212 mod_timer(&glue->timer, jiffies +
@@ -245,8 +239,30 @@ static void otg_timer(unsigned long _musb)
245 default: 239 default:
246 break; 240 break;
247 } 241 }
248 spin_unlock_irqrestore(&musb->lock, flags);
249 242
243 return 0;
244}
245
246static void otg_timer(unsigned long _musb)
247{
248 struct musb *musb = (void *)_musb;
249 struct device *dev = musb->controller;
250 unsigned long flags;
251 int err;
252
253 err = pm_runtime_get(dev);
254 if ((err != -EINPROGRESS) && err < 0) {
255 dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
256 pm_runtime_put_noidle(dev);
257
258 return;
259 }
260
261 spin_lock_irqsave(&musb->lock, flags);
262 err = musb_queue_resume_work(musb, dsps_check_status, NULL);
263 if (err < 0)
264 dev_err(dev, "%s resume work: %i\n", __func__, err);
265 spin_unlock_irqrestore(&musb->lock, flags);
250 pm_runtime_mark_last_busy(dev); 266 pm_runtime_mark_last_busy(dev);
251 pm_runtime_put_autosuspend(dev); 267 pm_runtime_put_autosuspend(dev);
252} 268}
@@ -767,28 +783,13 @@ static int dsps_probe(struct platform_device *pdev)
767 783
768 platform_set_drvdata(pdev, glue); 784 platform_set_drvdata(pdev, glue);
769 pm_runtime_enable(&pdev->dev); 785 pm_runtime_enable(&pdev->dev);
770 pm_runtime_use_autosuspend(&pdev->dev);
771 pm_runtime_set_autosuspend_delay(&pdev->dev, 200);
772
773 ret = pm_runtime_get_sync(&pdev->dev);
774 if (ret < 0) {
775 dev_err(&pdev->dev, "pm_runtime_get_sync FAILED");
776 goto err2;
777 }
778
779 ret = dsps_create_musb_pdev(glue, pdev); 786 ret = dsps_create_musb_pdev(glue, pdev);
780 if (ret) 787 if (ret)
781 goto err3; 788 goto err;
782
783 pm_runtime_mark_last_busy(&pdev->dev);
784 pm_runtime_put_autosuspend(&pdev->dev);
785 789
786 return 0; 790 return 0;
787 791
788err3: 792err:
789 pm_runtime_put_sync(&pdev->dev);
790err2:
791 pm_runtime_dont_use_autosuspend(&pdev->dev);
792 pm_runtime_disable(&pdev->dev); 793 pm_runtime_disable(&pdev->dev);
793 return ret; 794 return ret;
794} 795}
@@ -799,9 +800,6 @@ static int dsps_remove(struct platform_device *pdev)
799 800
800 platform_device_unregister(glue->musb); 801 platform_device_unregister(glue->musb);
801 802
802 /* disable usbss clocks */
803 pm_runtime_dont_use_autosuspend(&pdev->dev);
804 pm_runtime_put_sync(&pdev->dev);
805 pm_runtime_disable(&pdev->dev); 803 pm_runtime_disable(&pdev->dev);
806 804
807 return 0; 805 return 0;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 4042ea017985..a55173c9e564 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1114,7 +1114,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
1114 musb_ep->dma ? "dma, " : "", 1114 musb_ep->dma ? "dma, " : "",
1115 musb_ep->packet_sz); 1115 musb_ep->packet_sz);
1116 1116
1117 schedule_work(&musb->irq_work); 1117 schedule_delayed_work(&musb->irq_work, 0);
1118 1118
1119fail: 1119fail:
1120 spin_unlock_irqrestore(&musb->lock, flags); 1120 spin_unlock_irqrestore(&musb->lock, flags);
@@ -1158,7 +1158,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
1158 musb_ep->desc = NULL; 1158 musb_ep->desc = NULL;
1159 musb_ep->end_point.desc = NULL; 1159 musb_ep->end_point.desc = NULL;
1160 1160
1161 schedule_work(&musb->irq_work); 1161 schedule_delayed_work(&musb->irq_work, 0);
1162 1162
1163 spin_unlock_irqrestore(&(musb->lock), flags); 1163 spin_unlock_irqrestore(&(musb->lock), flags);
1164 1164
@@ -1222,13 +1222,22 @@ void musb_ep_restart(struct musb *musb, struct musb_request *req)
1222 rxstate(musb, req); 1222 rxstate(musb, req);
1223} 1223}
1224 1224
1225static int musb_ep_restart_resume_work(struct musb *musb, void *data)
1226{
1227 struct musb_request *req = data;
1228
1229 musb_ep_restart(musb, req);
1230
1231 return 0;
1232}
1233
1225static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, 1234static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1226 gfp_t gfp_flags) 1235 gfp_t gfp_flags)
1227{ 1236{
1228 struct musb_ep *musb_ep; 1237 struct musb_ep *musb_ep;
1229 struct musb_request *request; 1238 struct musb_request *request;
1230 struct musb *musb; 1239 struct musb *musb;
1231 int status = 0; 1240 int status;
1232 unsigned long lockflags; 1241 unsigned long lockflags;
1233 1242
1234 if (!ep || !req) 1243 if (!ep || !req)
@@ -1245,6 +1254,17 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1245 if (request->ep != musb_ep) 1254 if (request->ep != musb_ep)
1246 return -EINVAL; 1255 return -EINVAL;
1247 1256
1257 status = pm_runtime_get(musb->controller);
1258 if ((status != -EINPROGRESS) && status < 0) {
1259 dev_err(musb->controller,
1260 "pm runtime get failed in %s\n",
1261 __func__);
1262 pm_runtime_put_noidle(musb->controller);
1263
1264 return status;
1265 }
1266 status = 0;
1267
1248 trace_musb_req_enq(request); 1268 trace_musb_req_enq(request);
1249 1269
1250 /* request is mine now... */ 1270 /* request is mine now... */
@@ -1255,7 +1275,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1255 1275
1256 map_dma_buffer(request, musb, musb_ep); 1276 map_dma_buffer(request, musb, musb_ep);
1257 1277
1258 pm_runtime_get_sync(musb->controller);
1259 spin_lock_irqsave(&musb->lock, lockflags); 1278 spin_lock_irqsave(&musb->lock, lockflags);
1260 1279
1261 /* don't queue if the ep is down */ 1280 /* don't queue if the ep is down */
@@ -1271,8 +1290,14 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1271 list_add_tail(&request->list, &musb_ep->req_list); 1290 list_add_tail(&request->list, &musb_ep->req_list);
1272 1291
1273 /* it this is the head of the queue, start i/o ... */ 1292 /* it this is the head of the queue, start i/o ... */
1274 if (!musb_ep->busy && &request->list == musb_ep->req_list.next) 1293 if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
1275 musb_ep_restart(musb, request); 1294 status = musb_queue_resume_work(musb,
1295 musb_ep_restart_resume_work,
1296 request);
1297 if (status < 0)
1298 dev_err(musb->controller, "%s resume work: %i\n",
1299 __func__, status);
1300 }
1276 1301
1277unlock: 1302unlock:
1278 spin_unlock_irqrestore(&musb->lock, lockflags); 1303 spin_unlock_irqrestore(&musb->lock, lockflags);
@@ -1969,7 +1994,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
1969 */ 1994 */
1970 1995
1971 /* Force check of devctl register for PM runtime */ 1996 /* Force check of devctl register for PM runtime */
1972 schedule_work(&musb->irq_work); 1997 schedule_delayed_work(&musb->irq_work, 0);
1973 1998
1974 pm_runtime_mark_last_busy(musb->controller); 1999 pm_runtime_mark_last_busy(musb->controller);
1975 pm_runtime_put_autosuspend(musb->controller); 2000 pm_runtime_put_autosuspend(musb->controller);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index cc1225485509..e8be8e39ab8f 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -513,17 +513,18 @@ static int omap2430_probe(struct platform_device *pdev)
513 } 513 }
514 514
515 pm_runtime_enable(glue->dev); 515 pm_runtime_enable(glue->dev);
516 pm_runtime_use_autosuspend(glue->dev);
517 pm_runtime_set_autosuspend_delay(glue->dev, 100);
518 516
519 ret = platform_device_add(musb); 517 ret = platform_device_add(musb);
520 if (ret) { 518 if (ret) {
521 dev_err(&pdev->dev, "failed to register musb device\n"); 519 dev_err(&pdev->dev, "failed to register musb device\n");
522 goto err2; 520 goto err3;
523 } 521 }
524 522
525 return 0; 523 return 0;
526 524
525err3:
526 pm_runtime_disable(glue->dev);
527
527err2: 528err2:
528 platform_device_put(musb); 529 platform_device_put(musb);
529 530
@@ -535,10 +536,7 @@ static int omap2430_remove(struct platform_device *pdev)
535{ 536{
536 struct omap2430_glue *glue = platform_get_drvdata(pdev); 537 struct omap2430_glue *glue = platform_get_drvdata(pdev);
537 538
538 pm_runtime_get_sync(glue->dev);
539 platform_device_unregister(glue->musb); 539 platform_device_unregister(glue->musb);
540 pm_runtime_put_sync(glue->dev);
541 pm_runtime_dont_use_autosuspend(glue->dev);
542 pm_runtime_disable(glue->dev); 540 pm_runtime_disable(glue->dev);
543 541
544 return 0; 542 return 0;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index df7c9f46be54..e85cc8e4e7a9 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -724,7 +724,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
724 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", 724 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
725 usb_otg_state_string(musb->xceiv->otg->state), otg_stat); 725 usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
726 idle_timeout = jiffies + (1 * HZ); 726 idle_timeout = jiffies + (1 * HZ);
727 schedule_work(&musb->irq_work); 727 schedule_delayed_work(&musb->irq_work, 0);
728 728
729 } else /* A-dev state machine */ { 729 } else /* A-dev state machine */ {
730 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", 730 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
@@ -814,7 +814,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
814 break; 814 break;
815 } 815 }
816 } 816 }
817 schedule_work(&musb->irq_work); 817 schedule_delayed_work(&musb->irq_work, 0);
818 818
819 return idle_timeout; 819 return idle_timeout;
820} 820}
@@ -864,7 +864,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
864 musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); 864 musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
865 if (reg & ~TUSB_PRCM_WNORCS) { 865 if (reg & ~TUSB_PRCM_WNORCS) {
866 musb->is_active = 1; 866 musb->is_active = 1;
867 schedule_work(&musb->irq_work); 867 schedule_delayed_work(&musb->irq_work, 0);
868 } 868 }
869 dev_dbg(musb->controller, "wake %sactive %02x\n", 869 dev_dbg(musb->controller, "wake %sactive %02x\n",
870 musb->is_active ? "" : "in", reg); 870 musb->is_active ? "" : "in", reg);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f61477bed3a8..243ac5ebe46a 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = {
131 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 131 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
132 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 132 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
133 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 133 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
134 { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
134 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ 135 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
135 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 136 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
136 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ 137 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0ff7f38d7800..6e9fc8bcc285 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = {
1012 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, 1012 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
1013 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, 1013 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
1014 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, 1014 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
1015 { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
1016 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1015 { } /* Terminating entry */ 1017 { } /* Terminating entry */
1016}; 1018};
1017 1019
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 21011c0a4c64..48ee04c94a75 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -596,6 +596,12 @@
596#define STK541_PID 0x2109 /* Zigbee Controller */ 596#define STK541_PID 0x2109 /* Zigbee Controller */
597 597
598/* 598/*
599 * Texas Instruments
600 */
601#define TI_VID 0x0451
602#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
603
604/*
599 * Blackfin gnICE JTAG 605 * Blackfin gnICE JTAG
600 * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice 606 * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
601 */ 607 */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index ffd086733421..1a59f335b063 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
954 954
955 /* COMMAND STAGE */ 955 /* COMMAND STAGE */
956 /* let's send the command via the control pipe */ 956 /* let's send the command via the control pipe */
957 /*
958 * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
959 * Stack may be vmallocated. So no DMA for us. Make a copy.
960 */
961 memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
957 result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 962 result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
958 US_CBI_ADSC, 963 US_CBI_ADSC,
959 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 964 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
960 us->ifnum, srb->cmnd, srb->cmd_len); 965 us->ifnum, us->iobuf, srb->cmd_len);
961 966
962 /* check the return code for the command */ 967 /* check the return code for the command */
963 usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", 968 usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 532d8e242d4d..484bebc20bca 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -197,7 +197,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
197 } 197 }
198 198
199 ret = -EPROTONOSUPPORT; 199 ret = -EPROTONOSUPPORT;
200 if (minorversion == 0) 200 if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
201 ret = nfs4_callback_up_net(serv, net); 201 ret = nfs4_callback_up_net(serv, net);
202 else if (xprt->ops->bc_up) 202 else if (xprt->ops->bc_up)
203 ret = xprt->ops->bc_up(serv, net); 203 ret = xprt->ops->bc_up(serv, net);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9b3a82abab07..1452177c822d 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -542,6 +542,13 @@ static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state)
542 return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0; 542 return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;
543} 543}
544 544
545static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *state,
546 const nfs4_stateid *stateid)
547{
548 return test_bit(NFS_OPEN_STATE, &state->flags) &&
549 nfs4_stateid_match_other(&state->open_stateid, stateid);
550}
551
545#else 552#else
546 553
547#define nfs4_close_state(a, b) do { } while (0) 554#define nfs4_close_state(a, b) do { } while (0)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7897826d7c51..241da19b7da4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1451,7 +1451,6 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1451} 1451}
1452 1452
1453static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1453static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1454 nfs4_stateid *arg_stateid,
1455 nfs4_stateid *stateid, fmode_t fmode) 1454 nfs4_stateid *stateid, fmode_t fmode)
1456{ 1455{
1457 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1456 clear_bit(NFS_O_RDWR_STATE, &state->flags);
@@ -1469,10 +1468,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1469 } 1468 }
1470 if (stateid == NULL) 1469 if (stateid == NULL)
1471 return; 1470 return;
1472 /* Handle races with OPEN */ 1471 /* Handle OPEN+OPEN_DOWNGRADE races */
1473 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1472 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1474 (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1473 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1475 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
1476 nfs_resync_open_stateid_locked(state); 1474 nfs_resync_open_stateid_locked(state);
1477 return; 1475 return;
1478 } 1476 }
@@ -1486,7 +1484,9 @@ static void nfs_clear_open_stateid(struct nfs4_state *state,
1486 nfs4_stateid *stateid, fmode_t fmode) 1484 nfs4_stateid *stateid, fmode_t fmode)
1487{ 1485{
1488 write_seqlock(&state->seqlock); 1486 write_seqlock(&state->seqlock);
1489 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1487 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1488 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1489 nfs_clear_open_stateid_locked(state, stateid, fmode);
1490 write_sequnlock(&state->seqlock); 1490 write_sequnlock(&state->seqlock);
1491 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1491 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1492 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1492 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
@@ -2564,15 +2564,23 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2564static int nfs41_check_expired_locks(struct nfs4_state *state) 2564static int nfs41_check_expired_locks(struct nfs4_state *state)
2565{ 2565{
2566 int status, ret = NFS_OK; 2566 int status, ret = NFS_OK;
2567 struct nfs4_lock_state *lsp; 2567 struct nfs4_lock_state *lsp, *prev = NULL;
2568 struct nfs_server *server = NFS_SERVER(state->inode); 2568 struct nfs_server *server = NFS_SERVER(state->inode);
2569 2569
2570 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 2570 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2571 goto out; 2571 goto out;
2572
2573 spin_lock(&state->state_lock);
2572 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 2574 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2573 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 2575 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2574 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 2576 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
2575 2577
2578 atomic_inc(&lsp->ls_count);
2579 spin_unlock(&state->state_lock);
2580
2581 nfs4_put_lock_state(prev);
2582 prev = lsp;
2583
2576 status = nfs41_test_and_free_expired_stateid(server, 2584 status = nfs41_test_and_free_expired_stateid(server,
2577 &lsp->ls_stateid, 2585 &lsp->ls_stateid,
2578 cred); 2586 cred);
@@ -2585,10 +2593,14 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
2585 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2593 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2586 } else if (status != NFS_OK) { 2594 } else if (status != NFS_OK) {
2587 ret = status; 2595 ret = status;
2588 break; 2596 nfs4_put_lock_state(prev);
2597 goto out;
2589 } 2598 }
2599 spin_lock(&state->state_lock);
2590 } 2600 }
2591 }; 2601 }
2602 spin_unlock(&state->state_lock);
2603 nfs4_put_lock_state(prev);
2592out: 2604out:
2593 return ret; 2605 return ret;
2594} 2606}
@@ -3122,7 +3134,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
3122 } else if (is_rdwr) 3134 } else if (is_rdwr)
3123 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3135 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3124 3136
3125 if (!nfs4_valid_open_stateid(state)) 3137 if (!nfs4_valid_open_stateid(state) ||
3138 test_bit(NFS_OPEN_STATE, &state->flags) == 0)
3126 call_close = 0; 3139 call_close = 0;
3127 spin_unlock(&state->owner->so_lock); 3140 spin_unlock(&state->owner->so_lock);
3128 3141
@@ -5569,6 +5582,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5569 switch (task->tk_status) { 5582 switch (task->tk_status) {
5570 case 0: 5583 case 0:
5571 renew_lease(data->res.server, data->timestamp); 5584 renew_lease(data->res.server, data->timestamp);
5585 break;
5572 case -NFS4ERR_ADMIN_REVOKED: 5586 case -NFS4ERR_ADMIN_REVOKED:
5573 case -NFS4ERR_DELEG_REVOKED: 5587 case -NFS4ERR_DELEG_REVOKED:
5574 case -NFS4ERR_EXPIRED: 5588 case -NFS4ERR_EXPIRED:
@@ -5579,8 +5593,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5579 case -NFS4ERR_OLD_STATEID: 5593 case -NFS4ERR_OLD_STATEID:
5580 case -NFS4ERR_STALE_STATEID: 5594 case -NFS4ERR_STALE_STATEID:
5581 task->tk_status = 0; 5595 task->tk_status = 0;
5582 if (data->roc)
5583 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5584 break; 5596 break;
5585 default: 5597 default:
5586 if (nfs4_async_handle_error(task, data->res.server, 5598 if (nfs4_async_handle_error(task, data->res.server,
@@ -5590,6 +5602,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5590 } 5602 }
5591 } 5603 }
5592 data->rpc_status = task->tk_status; 5604 data->rpc_status = task->tk_status;
5605 if (data->roc && data->rpc_status == 0)
5606 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5593} 5607}
5594 5608
5595static void nfs4_delegreturn_release(void *calldata) 5609static void nfs4_delegreturn_release(void *calldata)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5f4281ec5f72..0959c9661662 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1547,6 +1547,7 @@ restart:
1547 ssleep(1); 1547 ssleep(1);
1548 case -NFS4ERR_ADMIN_REVOKED: 1548 case -NFS4ERR_ADMIN_REVOKED:
1549 case -NFS4ERR_STALE_STATEID: 1549 case -NFS4ERR_STALE_STATEID:
1550 case -NFS4ERR_OLD_STATEID:
1550 case -NFS4ERR_BAD_STATEID: 1551 case -NFS4ERR_BAD_STATEID:
1551 case -NFS4ERR_RECLAIM_BAD: 1552 case -NFS4ERR_RECLAIM_BAD:
1552 case -NFS4ERR_RECLAIM_CONFLICT: 1553 case -NFS4ERR_RECLAIM_CONFLICT:
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7035b997aaa5..6aaf425cebc3 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -14,7 +14,7 @@
14 * are obviously wrong for any sort of memory access. 14 * are obviously wrong for any sort of memory access.
15 */ 15 */
16#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) 16#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
17#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) 17#define BPF_REGISTER_MIN_RANGE -1
18 18
19struct bpf_reg_state { 19struct bpf_reg_state {
20 enum bpf_reg_type type; 20 enum bpf_reg_type type;
@@ -22,7 +22,8 @@ struct bpf_reg_state {
22 * Used to determine if any memory access using this register will 22 * Used to determine if any memory access using this register will
23 * result in a bad access. 23 * result in a bad access.
24 */ 24 */
25 u64 min_value, max_value; 25 s64 min_value;
26 u64 max_value;
26 union { 27 union {
27 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ 28 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
28 s64 imm; 29 s64 imm;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 348f51b0ec92..e9c009dc3a4a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2567,6 +2567,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p);
2567extern void sched_autogroup_detach(struct task_struct *p); 2567extern void sched_autogroup_detach(struct task_struct *p);
2568extern void sched_autogroup_fork(struct signal_struct *sig); 2568extern void sched_autogroup_fork(struct signal_struct *sig);
2569extern void sched_autogroup_exit(struct signal_struct *sig); 2569extern void sched_autogroup_exit(struct signal_struct *sig);
2570extern void sched_autogroup_exit_task(struct task_struct *p);
2570#ifdef CONFIG_PROC_FS 2571#ifdef CONFIG_PROC_FS
2571extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2572extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2572extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); 2573extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
@@ -2576,6 +2577,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2576static inline void sched_autogroup_detach(struct task_struct *p) { } 2577static inline void sched_autogroup_detach(struct task_struct *p) { }
2577static inline void sched_autogroup_fork(struct signal_struct *sig) { } 2578static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2578static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2579static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2580static inline void sched_autogroup_exit_task(struct task_struct *p) { }
2579#endif 2581#endif
2580 2582
2581extern int yield_to(struct task_struct *p, bool preempt); 2583extern int yield_to(struct task_struct *p, bool preempt);
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index d15214d673b2..2a1abbf8da74 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -68,6 +68,9 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
68 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 68 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
69 69
70 __skb_queue_head_init(&cell->napi_skbs); 70 __skb_queue_head_init(&cell->napi_skbs);
71
72 set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
73
71 netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); 74 netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
72 napi_enable(&cell->napi); 75 napi_enable(&cell->napi);
73 } 76 }
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index b9314b48e39f..f390c3bb05c5 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -243,6 +243,7 @@ int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
243 struct netlink_callback *cb); 243 struct netlink_callback *cb);
244int fib_table_flush(struct net *net, struct fib_table *table); 244int fib_table_flush(struct net *net, struct fib_table *table);
245struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); 245struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
246void fib_table_flush_external(struct fib_table *table);
246void fib_free_table(struct fib_table *tb); 247void fib_free_table(struct fib_table *tb);
247 248
248#ifndef CONFIG_IP_MULTIPLE_TABLES 249#ifndef CONFIG_IP_MULTIPLE_TABLES
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index fc4f757107df..0940598c002f 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -170,7 +170,7 @@ static inline struct net *copy_net_ns(unsigned long flags,
170extern struct list_head net_namespace_list; 170extern struct list_head net_namespace_list;
171 171
172struct net *get_net_ns_by_pid(pid_t pid); 172struct net *get_net_ns_by_pid(pid_t pid);
173struct net *get_net_ns_by_fd(int pid); 173struct net *get_net_ns_by_fd(int fd);
174 174
175#ifdef CONFIG_SYSCTL 175#ifdef CONFIG_SYSCTL
176void ipx_register_sysctl(void); 176void ipx_register_sysctl(void);
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 8a09b32e07d6..dd4104c9aa12 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -272,7 +272,7 @@ int __init rd_load_image(char *from)
272 sys_write(out_fd, buf, BLOCK_SIZE); 272 sys_write(out_fd, buf, BLOCK_SIZE);
273#if !defined(CONFIG_S390) 273#if !defined(CONFIG_S390)
274 if (!(i % 16)) { 274 if (!(i % 16)) {
275 printk("%c\b", rotator[rotate & 0x3]); 275 pr_cont("%c\b", rotator[rotate & 0x3]);
276 rotate++; 276 rotate++;
277 } 277 }
278#endif 278#endif
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 99a7e5b388f2..6a936159c6e0 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -216,8 +216,8 @@ static void print_verifier_state(struct bpf_verifier_state *state)
216 reg->map_ptr->key_size, 216 reg->map_ptr->key_size,
217 reg->map_ptr->value_size); 217 reg->map_ptr->value_size);
218 if (reg->min_value != BPF_REGISTER_MIN_RANGE) 218 if (reg->min_value != BPF_REGISTER_MIN_RANGE)
219 verbose(",min_value=%llu", 219 verbose(",min_value=%lld",
220 (unsigned long long)reg->min_value); 220 (long long)reg->min_value);
221 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 221 if (reg->max_value != BPF_REGISTER_MAX_RANGE)
222 verbose(",max_value=%llu", 222 verbose(",max_value=%llu",
223 (unsigned long long)reg->max_value); 223 (unsigned long long)reg->max_value);
@@ -758,7 +758,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
758 * index'es we need to make sure that whatever we use 758 * index'es we need to make sure that whatever we use
759 * will have a set floor within our range. 759 * will have a set floor within our range.
760 */ 760 */
761 if ((s64)reg->min_value < 0) { 761 if (reg->min_value < 0) {
762 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 762 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
763 regno); 763 regno);
764 return -EACCES; 764 return -EACCES;
@@ -1468,7 +1468,8 @@ static void check_reg_overflow(struct bpf_reg_state *reg)
1468{ 1468{
1469 if (reg->max_value > BPF_REGISTER_MAX_RANGE) 1469 if (reg->max_value > BPF_REGISTER_MAX_RANGE)
1470 reg->max_value = BPF_REGISTER_MAX_RANGE; 1470 reg->max_value = BPF_REGISTER_MAX_RANGE;
1471 if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE) 1471 if (reg->min_value < BPF_REGISTER_MIN_RANGE ||
1472 reg->min_value > BPF_REGISTER_MAX_RANGE)
1472 reg->min_value = BPF_REGISTER_MIN_RANGE; 1473 reg->min_value = BPF_REGISTER_MIN_RANGE;
1473} 1474}
1474 1475
@@ -1476,7 +1477,8 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1476 struct bpf_insn *insn) 1477 struct bpf_insn *insn)
1477{ 1478{
1478 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1479 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
1479 u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE; 1480 s64 min_val = BPF_REGISTER_MIN_RANGE;
1481 u64 max_val = BPF_REGISTER_MAX_RANGE;
1480 bool min_set = false, max_set = false; 1482 bool min_set = false, max_set = false;
1481 u8 opcode = BPF_OP(insn->code); 1483 u8 opcode = BPF_OP(insn->code);
1482 1484
@@ -1512,22 +1514,43 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1512 return; 1514 return;
1513 } 1515 }
1514 1516
1517 /* If one of our values was at the end of our ranges then we can't just
1518 * do our normal operations to the register, we need to set the values
1519 * to the min/max since they are undefined.
1520 */
1521 if (min_val == BPF_REGISTER_MIN_RANGE)
1522 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1523 if (max_val == BPF_REGISTER_MAX_RANGE)
1524 dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1525
1515 switch (opcode) { 1526 switch (opcode) {
1516 case BPF_ADD: 1527 case BPF_ADD:
1517 dst_reg->min_value += min_val; 1528 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1518 dst_reg->max_value += max_val; 1529 dst_reg->min_value += min_val;
1530 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1531 dst_reg->max_value += max_val;
1519 break; 1532 break;
1520 case BPF_SUB: 1533 case BPF_SUB:
1521 dst_reg->min_value -= min_val; 1534 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1522 dst_reg->max_value -= max_val; 1535 dst_reg->min_value -= min_val;
1536 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1537 dst_reg->max_value -= max_val;
1523 break; 1538 break;
1524 case BPF_MUL: 1539 case BPF_MUL:
1525 dst_reg->min_value *= min_val; 1540 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1526 dst_reg->max_value *= max_val; 1541 dst_reg->min_value *= min_val;
1542 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1543 dst_reg->max_value *= max_val;
1527 break; 1544 break;
1528 case BPF_AND: 1545 case BPF_AND:
1529 /* & is special since it could end up with 0 bits set. */ 1546 /* Disallow AND'ing of negative numbers, ain't nobody got time
1530 dst_reg->min_value &= min_val; 1547 * for that. Otherwise the minimum is 0 and the max is the max
1548 * value we could AND against.
1549 */
1550 if (min_val < 0)
1551 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1552 else
1553 dst_reg->min_value = 0;
1531 dst_reg->max_value = max_val; 1554 dst_reg->max_value = max_val;
1532 break; 1555 break;
1533 case BPF_LSH: 1556 case BPF_LSH:
@@ -1537,24 +1560,25 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1537 */ 1560 */
1538 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1561 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE))
1539 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1562 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1540 else 1563 else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1541 dst_reg->min_value <<= min_val; 1564 dst_reg->min_value <<= min_val;
1542 1565
1543 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1566 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
1544 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1567 dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1545 else 1568 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1546 dst_reg->max_value <<= max_val; 1569 dst_reg->max_value <<= max_val;
1547 break; 1570 break;
1548 case BPF_RSH: 1571 case BPF_RSH:
1549 dst_reg->min_value >>= min_val; 1572 /* RSH by a negative number is undefined, and the BPF_RSH is an
1550 dst_reg->max_value >>= max_val; 1573 * unsigned shift, so make the appropriate casts.
1551 break;
1552 case BPF_MOD:
1553 /* % is special since it is an unsigned modulus, so the floor
1554 * will always be 0.
1555 */ 1574 */
1556 dst_reg->min_value = 0; 1575 if (min_val < 0 || dst_reg->min_value < 0)
1557 dst_reg->max_value = max_val - 1; 1576 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1577 else
1578 dst_reg->min_value =
1579 (u64)(dst_reg->min_value) >> min_val;
1580 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1581 dst_reg->max_value >>= max_val;
1558 break; 1582 break;
1559 default: 1583 default:
1560 reset_reg_range_values(regs, insn->dst_reg); 1584 reset_reg_range_values(regs, insn->dst_reg);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e292132efac..6ee1febdf6ff 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event,
902 * this will always be called from the right CPU. 902 * this will always be called from the right CPU.
903 */ 903 */
904 cpuctx = __get_cpu_context(ctx); 904 cpuctx = __get_cpu_context(ctx);
905
906 /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
907 if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
908 /*
909 * We are removing the last cpu event in this context.
910 * If that event is not active in this cpu, cpuctx->cgrp
911 * should've been cleared by perf_cgroup_switch.
912 */
913 WARN_ON_ONCE(!add && cpuctx->cgrp);
914 return;
915 }
905 cpuctx->cgrp = add ? event->cgrp : NULL; 916 cpuctx->cgrp = add ? event->cgrp : NULL;
906} 917}
907 918
@@ -8018,6 +8029,7 @@ restart:
8018 * if <size> is not specified, the range is treated as a single address. 8029 * if <size> is not specified, the range is treated as a single address.
8019 */ 8030 */
8020enum { 8031enum {
8032 IF_ACT_NONE = -1,
8021 IF_ACT_FILTER, 8033 IF_ACT_FILTER,
8022 IF_ACT_START, 8034 IF_ACT_START,
8023 IF_ACT_STOP, 8035 IF_ACT_STOP,
@@ -8041,6 +8053,7 @@ static const match_table_t if_tokens = {
8041 { IF_SRC_KERNEL, "%u/%u" }, 8053 { IF_SRC_KERNEL, "%u/%u" },
8042 { IF_SRC_FILEADDR, "%u@%s" }, 8054 { IF_SRC_FILEADDR, "%u@%s" },
8043 { IF_SRC_KERNELADDR, "%u" }, 8055 { IF_SRC_KERNELADDR, "%u" },
8056 { IF_ACT_NONE, NULL },
8044}; 8057};
8045 8058
8046/* 8059/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 9d68c45ebbe3..3076f3089919 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -836,6 +836,7 @@ void __noreturn do_exit(long code)
836 */ 836 */
837 perf_event_exit_task(tsk); 837 perf_event_exit_task(tsk);
838 838
839 sched_autogroup_exit_task(tsk);
839 cgroup_exit(tsk); 840 cgroup_exit(tsk);
840 841
841 /* 842 /*
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 51c4b24b6328..c2b88490d857 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -46,6 +46,14 @@ enum {
46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) 46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
47 47
48/* 48/*
49 * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
50 * .data and .bss to fit in required 32MB limit for the kernel. With
51 * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
52 * So, reduce the static allocations for lockdeps related structures so that
53 * everything fits in current required size limit.
54 */
55#ifdef CONFIG_PROVE_LOCKING_SMALL
56/*
49 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies 57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
50 * we track. 58 * we track.
51 * 59 *
@@ -54,18 +62,24 @@ enum {
54 * table (if it's not there yet), and we check it for lock order 62 * table (if it's not there yet), and we check it for lock order
55 * conflicts and deadlocks. 63 * conflicts and deadlocks.
56 */ 64 */
65#define MAX_LOCKDEP_ENTRIES 16384UL
66#define MAX_LOCKDEP_CHAINS_BITS 15
67#define MAX_STACK_TRACE_ENTRIES 262144UL
68#else
57#define MAX_LOCKDEP_ENTRIES 32768UL 69#define MAX_LOCKDEP_ENTRIES 32768UL
58 70
59#define MAX_LOCKDEP_CHAINS_BITS 16 71#define MAX_LOCKDEP_CHAINS_BITS 16
60#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
61
62#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
63 72
64/* 73/*
65 * Stack-trace: tightly packed array of stack backtrace 74 * Stack-trace: tightly packed array of stack backtrace
66 * addresses. Protected by the hash_lock. 75 * addresses. Protected by the hash_lock.
67 */ 76 */
68#define MAX_STACK_TRACE_ENTRIES 524288UL 77#define MAX_STACK_TRACE_ENTRIES 524288UL
78#endif
79
80#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
81
82#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
69 83
70extern struct list_head all_lock_classes; 84extern struct list_head all_lock_classes;
71extern struct lock_chain lock_chains[]; 85extern struct lock_chain lock_chains[];
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index a5d966cb8891..f1c8fd566246 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -111,10 +111,13 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
111{ 111{
112 if (tg != &root_task_group) 112 if (tg != &root_task_group)
113 return false; 113 return false;
114
115 /* 114 /*
116 * We can only assume the task group can't go away on us if 115 * If we race with autogroup_move_group() the caller can use the old
117 * autogroup_move_group() can see us on ->thread_group list. 116 * value of signal->autogroup but in this case sched_move_task() will
117 * be called again before autogroup_kref_put().
118 *
119 * However, there is no way sched_autogroup_exit_task() could tell us
120 * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
118 */ 121 */
119 if (p->flags & PF_EXITING) 122 if (p->flags & PF_EXITING)
120 return false; 123 return false;
@@ -122,6 +125,16 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
122 return true; 125 return true;
123} 126}
124 127
128void sched_autogroup_exit_task(struct task_struct *p)
129{
130 /*
131 * We are going to call exit_notify() and autogroup_move_group() can't
132 * see this thread after that: we can no longer use signal->autogroup.
133 * See the PF_EXITING check in task_wants_autogroup().
134 */
135 sched_move_task(p);
136}
137
125static void 138static void
126autogroup_move_group(struct task_struct *p, struct autogroup *ag) 139autogroup_move_group(struct task_struct *p, struct autogroup *ag)
127{ 140{
@@ -138,13 +151,20 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
138 } 151 }
139 152
140 p->signal->autogroup = autogroup_kref_get(ag); 153 p->signal->autogroup = autogroup_kref_get(ag);
141 154 /*
142 if (!READ_ONCE(sysctl_sched_autogroup_enabled)) 155 * We can't avoid sched_move_task() after we changed signal->autogroup,
143 goto out; 156 * this process can already run with task_group() == prev->tg or we can
144 157 * race with cgroup code which can read autogroup = prev under rq->lock.
158 * In the latter case for_each_thread() can not miss a migrating thread,
159 * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
160 * can't be removed from thread list, we hold ->siglock.
161 *
162 * If an exiting thread was already removed from thread list we rely on
163 * sched_autogroup_exit_task().
164 */
145 for_each_thread(p, t) 165 for_each_thread(p, t)
146 sched_move_task(t); 166 sched_move_task(t);
147out: 167
148 unlock_task_sighand(p, &flags); 168 unlock_task_sighand(p, &flags);
149 autogroup_kref_put(prev); 169 autogroup_kref_put(prev);
150} 170}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b01e547d4d04..a6c8db1d62f6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1085,6 +1085,9 @@ config PROVE_LOCKING
1085 1085
1086 For more details, see Documentation/locking/lockdep-design.txt. 1086 For more details, see Documentation/locking/lockdep-design.txt.
1087 1087
1088config PROVE_LOCKING_SMALL
1089 bool
1090
1088config LOCKDEP 1091config LOCKDEP
1089 bool 1092 bool
1090 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1093 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index e034afbd1bb0..08ce36147c4c 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -652,6 +652,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
652 batadv_softif_destroy_sysfs(hard_iface->soft_iface); 652 batadv_softif_destroy_sysfs(hard_iface->soft_iface);
653 } 653 }
654 654
655 hard_iface->soft_iface = NULL;
655 batadv_hardif_put(hard_iface); 656 batadv_hardif_put(hard_iface);
656 657
657out: 658out:
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 2333777f919d..8af1611b8ab2 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -837,6 +837,7 @@ static int batadv_tp_send(void *arg)
837 primary_if = batadv_primary_if_get_selected(bat_priv); 837 primary_if = batadv_primary_if_get_selected(bat_priv);
838 if (unlikely(!primary_if)) { 838 if (unlikely(!primary_if)) {
839 err = BATADV_TP_REASON_DST_UNREACHABLE; 839 err = BATADV_TP_REASON_DST_UNREACHABLE;
840 tp_vars->reason = err;
840 goto out; 841 goto out;
841 } 842 }
842 843
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index f61c0e02a413..7001da910c6b 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -219,6 +219,8 @@ int peernet2id_alloc(struct net *net, struct net *peer)
219 bool alloc; 219 bool alloc;
220 int id; 220 int id;
221 221
222 if (atomic_read(&net->count) == 0)
223 return NETNSA_NSID_NOT_ASSIGNED;
222 spin_lock_irqsave(&net->nsid_lock, flags); 224 spin_lock_irqsave(&net->nsid_lock, flags);
223 alloc = atomic_read(&peer->count) == 0 ? false : true; 225 alloc = atomic_read(&peer->count) == 0 ? false : true;
224 id = __peernet2id_alloc(net, peer, &alloc); 226 id = __peernet2id_alloc(net, peer, &alloc);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index db313ec7af32..a99917b5de33 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -840,18 +840,20 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
840 if (dev->dev.parent && dev_is_pci(dev->dev.parent) && 840 if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
841 (ext_filter_mask & RTEXT_FILTER_VF)) { 841 (ext_filter_mask & RTEXT_FILTER_VF)) {
842 int num_vfs = dev_num_vf(dev->dev.parent); 842 int num_vfs = dev_num_vf(dev->dev.parent);
843 size_t size = nla_total_size(sizeof(struct nlattr)); 843 size_t size = nla_total_size(0);
844 size += nla_total_size(num_vfs * sizeof(struct nlattr));
845 size += num_vfs * 844 size += num_vfs *
846 (nla_total_size(sizeof(struct ifla_vf_mac)) + 845 (nla_total_size(0) +
847 nla_total_size(MAX_VLAN_LIST_LEN * 846 nla_total_size(sizeof(struct ifla_vf_mac)) +
848 sizeof(struct nlattr)) + 847 nla_total_size(sizeof(struct ifla_vf_vlan)) +
848 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
849 nla_total_size(MAX_VLAN_LIST_LEN * 849 nla_total_size(MAX_VLAN_LIST_LEN *
850 sizeof(struct ifla_vf_vlan_info)) + 850 sizeof(struct ifla_vf_vlan_info)) +
851 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 851 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
852 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
852 nla_total_size(sizeof(struct ifla_vf_rate)) + 853 nla_total_size(sizeof(struct ifla_vf_rate)) +
853 nla_total_size(sizeof(struct ifla_vf_link_state)) + 854 nla_total_size(sizeof(struct ifla_vf_link_state)) +
854 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 855 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
856 nla_total_size(0) + /* nest IFLA_VF_STATS */
855 /* IFLA_VF_STATS_RX_PACKETS */ 857 /* IFLA_VF_STATS_RX_PACKETS */
856 nla_total_size_64bit(sizeof(__u64)) + 858 nla_total_size_64bit(sizeof(__u64)) +
857 /* IFLA_VF_STATS_TX_PACKETS */ 859 /* IFLA_VF_STATS_TX_PACKETS */
@@ -899,7 +901,8 @@ static size_t rtnl_port_size(const struct net_device *dev,
899 901
900static size_t rtnl_xdp_size(const struct net_device *dev) 902static size_t rtnl_xdp_size(const struct net_device *dev)
901{ 903{
902 size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */ 904 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
905 nla_total_size(1); /* XDP_ATTACHED */
903 906
904 if (!dev->netdev_ops->ndo_xdp) 907 if (!dev->netdev_ops->ndo_xdp)
905 return 0; 908 return 0;
@@ -1606,7 +1609,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1606 head = &net->dev_index_head[h]; 1609 head = &net->dev_index_head[h];
1607 hlist_for_each_entry(dev, head, index_hlist) { 1610 hlist_for_each_entry(dev, head, index_hlist) {
1608 if (link_dump_filtered(dev, master_idx, kind_ops)) 1611 if (link_dump_filtered(dev, master_idx, kind_ops))
1609 continue; 1612 goto cont;
1610 if (idx < s_idx) 1613 if (idx < s_idx)
1611 goto cont; 1614 goto cont;
1612 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1615 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -2849,7 +2852,10 @@ nla_put_failure:
2849 2852
2850static inline size_t rtnl_fdb_nlmsg_size(void) 2853static inline size_t rtnl_fdb_nlmsg_size(void)
2851{ 2854{
2852 return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); 2855 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
2856 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
2857 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
2858 0;
2853} 2859}
2854 2860
2855static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 2861static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c3b80478226e..161fc0f0d752 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -151,7 +151,7 @@ static void fib_replace_table(struct net *net, struct fib_table *old,
151 151
152int fib_unmerge(struct net *net) 152int fib_unmerge(struct net *net)
153{ 153{
154 struct fib_table *old, *new; 154 struct fib_table *old, *new, *main_table;
155 155
156 /* attempt to fetch local table if it has been allocated */ 156 /* attempt to fetch local table if it has been allocated */
157 old = fib_get_table(net, RT_TABLE_LOCAL); 157 old = fib_get_table(net, RT_TABLE_LOCAL);
@@ -162,11 +162,21 @@ int fib_unmerge(struct net *net)
162 if (!new) 162 if (!new)
163 return -ENOMEM; 163 return -ENOMEM;
164 164
165 /* table is already unmerged */
166 if (new == old)
167 return 0;
168
165 /* replace merged table with clean table */ 169 /* replace merged table with clean table */
166 if (new != old) { 170 fib_replace_table(net, old, new);
167 fib_replace_table(net, old, new); 171 fib_free_table(old);
168 fib_free_table(old); 172
169 } 173 /* attempt to fetch main table if it has been allocated */
174 main_table = fib_get_table(net, RT_TABLE_MAIN);
175 if (!main_table)
176 return 0;
177
178 /* flush local entries from main table */
179 fib_table_flush_external(main_table);
170 180
171 return 0; 181 return 0;
172} 182}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 4cff74d4133f..026f309c51e9 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1743,8 +1743,10 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
1743 local_l = fib_find_node(lt, &local_tp, l->key); 1743 local_l = fib_find_node(lt, &local_tp, l->key);
1744 1744
1745 if (fib_insert_alias(lt, local_tp, local_l, new_fa, 1745 if (fib_insert_alias(lt, local_tp, local_l, new_fa,
1746 NULL, l->key)) 1746 NULL, l->key)) {
1747 kmem_cache_free(fn_alias_kmem, new_fa);
1747 goto out; 1748 goto out;
1749 }
1748 } 1750 }
1749 1751
1750 /* stop loop if key wrapped back to 0 */ 1752 /* stop loop if key wrapped back to 0 */
@@ -1760,6 +1762,71 @@ out:
1760 return NULL; 1762 return NULL;
1761} 1763}
1762 1764
1765/* Caller must hold RTNL */
1766void fib_table_flush_external(struct fib_table *tb)
1767{
1768 struct trie *t = (struct trie *)tb->tb_data;
1769 struct key_vector *pn = t->kv;
1770 unsigned long cindex = 1;
1771 struct hlist_node *tmp;
1772 struct fib_alias *fa;
1773
1774 /* walk trie in reverse order */
1775 for (;;) {
1776 unsigned char slen = 0;
1777 struct key_vector *n;
1778
1779 if (!(cindex--)) {
1780 t_key pkey = pn->key;
1781
1782 /* cannot resize the trie vector */
1783 if (IS_TRIE(pn))
1784 break;
1785
1786 /* resize completed node */
1787 pn = resize(t, pn);
1788 cindex = get_index(pkey, pn);
1789
1790 continue;
1791 }
1792
1793 /* grab the next available node */
1794 n = get_child(pn, cindex);
1795 if (!n)
1796 continue;
1797
1798 if (IS_TNODE(n)) {
1799 /* record pn and cindex for leaf walking */
1800 pn = n;
1801 cindex = 1ul << n->bits;
1802
1803 continue;
1804 }
1805
1806 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
1807 /* if alias was cloned to local then we just
1808 * need to remove the local copy from main
1809 */
1810 if (tb->tb_id != fa->tb_id) {
1811 hlist_del_rcu(&fa->fa_list);
1812 alias_free_mem_rcu(fa);
1813 continue;
1814 }
1815
1816 /* record local slen */
1817 slen = fa->fa_slen;
1818 }
1819
1820 /* update leaf slen */
1821 n->slen = slen;
1822
1823 if (hlist_empty(&n->leaf)) {
1824 put_child_root(pn, n->key, NULL);
1825 node_free(n);
1826 }
1827 }
1828}
1829
1763/* Caller must hold RTNL. */ 1830/* Caller must hold RTNL. */
1764int fib_table_flush(struct net *net, struct fib_table *tb) 1831int fib_table_flush(struct net *net, struct fib_table *tb)
1765{ 1832{
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 606cc3e85d2b..15db786d50ed 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -162,7 +162,7 @@ static int unsolicited_report_interval(struct in_device *in_dev)
162} 162}
163 163
164static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); 164static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
165static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr); 165static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
166static void igmpv3_clear_delrec(struct in_device *in_dev); 166static void igmpv3_clear_delrec(struct in_device *in_dev);
167static int sf_setstate(struct ip_mc_list *pmc); 167static int sf_setstate(struct ip_mc_list *pmc);
168static void sf_markstate(struct ip_mc_list *pmc); 168static void sf_markstate(struct ip_mc_list *pmc);
@@ -1130,10 +1130,15 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1130 spin_unlock_bh(&in_dev->mc_tomb_lock); 1130 spin_unlock_bh(&in_dev->mc_tomb_lock);
1131} 1131}
1132 1132
1133static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr) 1133/*
1134 * restore ip_mc_list deleted records
1135 */
1136static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1134{ 1137{
1135 struct ip_mc_list *pmc, *pmc_prev; 1138 struct ip_mc_list *pmc, *pmc_prev;
1136 struct ip_sf_list *psf, *psf_next; 1139 struct ip_sf_list *psf;
1140 struct net *net = dev_net(in_dev->dev);
1141 __be32 multiaddr = im->multiaddr;
1137 1142
1138 spin_lock_bh(&in_dev->mc_tomb_lock); 1143 spin_lock_bh(&in_dev->mc_tomb_lock);
1139 pmc_prev = NULL; 1144 pmc_prev = NULL;
@@ -1149,16 +1154,26 @@ static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
1149 in_dev->mc_tomb = pmc->next; 1154 in_dev->mc_tomb = pmc->next;
1150 } 1155 }
1151 spin_unlock_bh(&in_dev->mc_tomb_lock); 1156 spin_unlock_bh(&in_dev->mc_tomb_lock);
1157
1158 spin_lock_bh(&im->lock);
1152 if (pmc) { 1159 if (pmc) {
1153 for (psf = pmc->tomb; psf; psf = psf_next) { 1160 im->interface = pmc->interface;
1154 psf_next = psf->sf_next; 1161 im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1155 kfree(psf); 1162 im->sfmode = pmc->sfmode;
1163 if (pmc->sfmode == MCAST_INCLUDE) {
1164 im->tomb = pmc->tomb;
1165 im->sources = pmc->sources;
1166 for (psf = im->sources; psf; psf = psf->sf_next)
1167 psf->sf_crcount = im->crcount;
1156 } 1168 }
1157 in_dev_put(pmc->interface); 1169 in_dev_put(pmc->interface);
1158 kfree(pmc);
1159 } 1170 }
1171 spin_unlock_bh(&im->lock);
1160} 1172}
1161 1173
1174/*
1175 * flush ip_mc_list deleted records
1176 */
1162static void igmpv3_clear_delrec(struct in_device *in_dev) 1177static void igmpv3_clear_delrec(struct in_device *in_dev)
1163{ 1178{
1164 struct ip_mc_list *pmc, *nextpmc; 1179 struct ip_mc_list *pmc, *nextpmc;
@@ -1366,7 +1381,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1366 ip_mc_hash_add(in_dev, im); 1381 ip_mc_hash_add(in_dev, im);
1367 1382
1368#ifdef CONFIG_IP_MULTICAST 1383#ifdef CONFIG_IP_MULTICAST
1369 igmpv3_del_delrec(in_dev, im->multiaddr); 1384 igmpv3_del_delrec(in_dev, im);
1370#endif 1385#endif
1371 igmp_group_added(im); 1386 igmp_group_added(im);
1372 if (!in_dev->dead) 1387 if (!in_dev->dead)
@@ -1626,8 +1641,12 @@ void ip_mc_remap(struct in_device *in_dev)
1626 1641
1627 ASSERT_RTNL(); 1642 ASSERT_RTNL();
1628 1643
1629 for_each_pmc_rtnl(in_dev, pmc) 1644 for_each_pmc_rtnl(in_dev, pmc) {
1645#ifdef CONFIG_IP_MULTICAST
1646 igmpv3_del_delrec(in_dev, pmc);
1647#endif
1630 igmp_group_added(pmc); 1648 igmp_group_added(pmc);
1649 }
1631} 1650}
1632 1651
1633/* Device going down */ 1652/* Device going down */
@@ -1648,7 +1667,6 @@ void ip_mc_down(struct in_device *in_dev)
1648 in_dev->mr_gq_running = 0; 1667 in_dev->mr_gq_running = 0;
1649 if (del_timer(&in_dev->mr_gq_timer)) 1668 if (del_timer(&in_dev->mr_gq_timer))
1650 __in_dev_put(in_dev); 1669 __in_dev_put(in_dev);
1651 igmpv3_clear_delrec(in_dev);
1652#endif 1670#endif
1653 1671
1654 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); 1672 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
@@ -1688,8 +1706,12 @@ void ip_mc_up(struct in_device *in_dev)
1688#endif 1706#endif
1689 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1707 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1690 1708
1691 for_each_pmc_rtnl(in_dev, pmc) 1709 for_each_pmc_rtnl(in_dev, pmc) {
1710#ifdef CONFIG_IP_MULTICAST
1711 igmpv3_del_delrec(in_dev, pmc);
1712#endif
1692 igmp_group_added(pmc); 1713 igmp_group_added(pmc);
1714 }
1693} 1715}
1694 1716
1695/* 1717/*
@@ -1704,13 +1726,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1704 1726
1705 /* Deactivate timers */ 1727 /* Deactivate timers */
1706 ip_mc_down(in_dev); 1728 ip_mc_down(in_dev);
1729#ifdef CONFIG_IP_MULTICAST
1730 igmpv3_clear_delrec(in_dev);
1731#endif
1707 1732
1708 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { 1733 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1709 in_dev->mc_list = i->next_rcu; 1734 in_dev->mc_list = i->next_rcu;
1710 in_dev->mc_count--; 1735 in_dev->mc_count--;
1711
1712 /* We've dropped the groups in ip_mc_down already */
1713 ip_mc_clear_src(i);
1714 ip_ma_put(i); 1736 ip_ma_put(i);
1715 } 1737 }
1716} 1738}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1294af4e0127..f9038d6b109e 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -200,8 +200,10 @@ static void tcp_reinit_congestion_control(struct sock *sk,
200 icsk->icsk_ca_ops = ca; 200 icsk->icsk_ca_ops = ca;
201 icsk->icsk_ca_setsockopt = 1; 201 icsk->icsk_ca_setsockopt = 1;
202 202
203 if (sk->sk_state != TCP_CLOSE) 203 if (sk->sk_state != TCP_CLOSE) {
204 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
204 tcp_init_congestion_control(sk); 205 tcp_init_congestion_control(sk);
206 }
205} 207}
206 208
207/* Manage refcounts on socket close. */ 209/* Manage refcounts on socket close. */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d123d68f4d1d..0de9d5d2b9ae 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1652,10 +1652,10 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1652 1652
1653 if (use_hash2) { 1653 if (use_hash2) {
1654 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 1654 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
1655 udp_table.mask; 1655 udptable->mask;
1656 hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask; 1656 hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
1657start_lookup: 1657start_lookup:
1658 hslot = &udp_table.hash2[hash2]; 1658 hslot = &udptable->hash2[hash2];
1659 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 1659 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
1660 } 1660 }
1661 1661
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 87784560dc46..0a4759b89da2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1034,6 +1034,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1034 int mtu; 1034 int mtu;
1035 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1035 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1036 unsigned int max_headroom = psh_hlen; 1036 unsigned int max_headroom = psh_hlen;
1037 bool use_cache = false;
1037 u8 hop_limit; 1038 u8 hop_limit;
1038 int err = -1; 1039 int err = -1;
1039 1040
@@ -1066,7 +1067,15 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1066 1067
1067 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1068 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1068 neigh_release(neigh); 1069 neigh_release(neigh);
1069 } else if (!fl6->flowi6_mark) 1070 } else if (!(t->parms.flags &
1071 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
1072 /* enable the cache only only if the routing decision does
1073 * not depend on the current inner header value
1074 */
1075 use_cache = true;
1076 }
1077
1078 if (use_cache)
1070 dst = dst_cache_get(&t->dst_cache); 1079 dst = dst_cache_get(&t->dst_cache);
1071 1080
1072 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1081 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
@@ -1150,7 +1159,7 @@ route_lookup:
1150 if (t->encap.type != TUNNEL_ENCAP_NONE) 1159 if (t->encap.type != TUNNEL_ENCAP_NONE)
1151 goto tx_err_dst_release; 1160 goto tx_err_dst_release;
1152 } else { 1161 } else {
1153 if (!fl6->flowi6_mark && ndst) 1162 if (use_cache && ndst)
1154 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1163 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1155 } 1164 }
1156 skb_dst_set(skb, dst); 1165 skb_dst_set(skb, dst);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index b2ef061e6836..e5056d4873d1 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -706,10 +706,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
706 706
707 if (use_hash2) { 707 if (use_hash2) {
708 hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & 708 hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
709 udp_table.mask; 709 udptable->mask;
710 hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask; 710 hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask;
711start_lookup: 711start_lookup:
712 hslot = &udp_table.hash2[hash2]; 712 hslot = &udptable->hash2[hash2];
713 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 713 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
714 } 714 }
715 715
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 965f7e344cef..3dc97b4f982b 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -97,7 +97,7 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
97 unsigned int len = skb->len; 97 unsigned int len = skb->len;
98 int ret = l2tp_xmit_skb(session, skb, session->hdr_len); 98 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
99 99
100 if (likely(ret == NET_XMIT_SUCCESS)) { 100 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
101 atomic_long_add(len, &priv->tx_bytes); 101 atomic_long_add(len, &priv->tx_bytes);
102 atomic_long_inc(&priv->tx_packets); 102 atomic_long_inc(&priv->tx_packets);
103 } else { 103 } else {
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index fce25afb652a..982f6c44ea01 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
251 int ret; 251 int ret;
252 int chk_addr_ret; 252 int chk_addr_ret;
253 253
254 if (!sock_flag(sk, SOCK_ZAPPED))
255 return -EINVAL;
256 if (addr_len < sizeof(struct sockaddr_l2tpip)) 254 if (addr_len < sizeof(struct sockaddr_l2tpip))
257 return -EINVAL; 255 return -EINVAL;
258 if (addr->l2tp_family != AF_INET) 256 if (addr->l2tp_family != AF_INET)
@@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
267 read_unlock_bh(&l2tp_ip_lock); 265 read_unlock_bh(&l2tp_ip_lock);
268 266
269 lock_sock(sk); 267 lock_sock(sk);
268 if (!sock_flag(sk, SOCK_ZAPPED))
269 goto out;
270
270 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) 271 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
271 goto out; 272 goto out;
272 273
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index ad3468c32b53..9978d01ba0ba 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -269,8 +269,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
269 int addr_type; 269 int addr_type;
270 int err; 270 int err;
271 271
272 if (!sock_flag(sk, SOCK_ZAPPED))
273 return -EINVAL;
274 if (addr->l2tp_family != AF_INET6) 272 if (addr->l2tp_family != AF_INET6)
275 return -EINVAL; 273 return -EINVAL;
276 if (addr_len < sizeof(*addr)) 274 if (addr_len < sizeof(*addr))
@@ -296,6 +294,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
296 lock_sock(sk); 294 lock_sock(sk);
297 295
298 err = -EINVAL; 296 err = -EINVAL;
297 if (!sock_flag(sk, SOCK_ZAPPED))
298 goto out_unlock;
299
299 if (sk->sk_state != TCP_CLOSE) 300 if (sk->sk_state != TCP_CLOSE)
300 goto out_unlock; 301 goto out_unlock;
301 302
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 78e9ecbc96e6..8e05032689f0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
688 } 688 }
689 689
690 /* No need to do anything if the driver does all */ 690 /* No need to do anything if the driver does all */
691 if (!local->ops->set_tim) 691 if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
692 return; 692 return;
693 693
694 if (sta->dead) 694 if (sta->dead)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1c56abc49627..bd5f4be89435 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1501,7 +1501,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
1501 struct sta_info *sta, 1501 struct sta_info *sta,
1502 struct sk_buff *skb) 1502 struct sk_buff *skb)
1503{ 1503{
1504 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1505 struct fq *fq = &local->fq; 1504 struct fq *fq = &local->fq;
1506 struct ieee80211_vif *vif; 1505 struct ieee80211_vif *vif;
1507 struct txq_info *txqi; 1506 struct txq_info *txqi;
@@ -1526,8 +1525,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
1526 if (!txqi) 1525 if (!txqi)
1527 return false; 1526 return false;
1528 1527
1529 info->control.vif = vif;
1530
1531 spin_lock_bh(&fq->lock); 1528 spin_lock_bh(&fq->lock);
1532 ieee80211_txq_enqueue(local, txqi, skb); 1529 ieee80211_txq_enqueue(local, txqi, skb);
1533 spin_unlock_bh(&fq->lock); 1530 spin_unlock_bh(&fq->lock);
@@ -3213,7 +3210,6 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
3213 3210
3214 if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { 3211 if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
3215 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 3212 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
3216 *ieee80211_get_qos_ctl(hdr) = tid;
3217 hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); 3213 hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
3218 } else { 3214 } else {
3219 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 3215 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
@@ -3338,6 +3334,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
3338 (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); 3334 (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
3339 info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT; 3335 info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
3340 3336
3337 if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
3338 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
3339 *ieee80211_get_qos_ctl(hdr) = tid;
3340 }
3341
3341 __skb_queue_head_init(&tx.skbs); 3342 __skb_queue_head_init(&tx.skbs);
3342 3343
3343 tx.flags = IEEE80211_TX_UNICAST; 3344 tx.flags = IEEE80211_TX_UNICAST;
@@ -3426,6 +3427,11 @@ begin:
3426 goto begin; 3427 goto begin;
3427 } 3428 }
3428 3429
3430 if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
3431 info->flags |= IEEE80211_TX_CTL_AMPDU;
3432 else
3433 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
3434
3429 if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { 3435 if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
3430 struct sta_info *sta = container_of(txq->sta, struct sta_info, 3436 struct sta_info *sta = container_of(txq->sta, struct sta_info,
3431 sta); 3437 sta);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index ee715764a828..6832bf6ab69f 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -270,6 +270,22 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
270 vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); 270 vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2);
271 } 271 }
272 272
273 /*
274 * This is a workaround for VHT-enabled STAs which break the spec
275 * and have the VHT-MCS Rx map filled in with value 3 for all eight
276 * spacial streams, an example is AR9462.
277 *
278 * As per spec, in section 22.1.1 Introduction to the VHT PHY
279 * A VHT STA shall support at least single spactial stream VHT-MCSs
280 * 0 to 7 (transmit and receive) in all supported channel widths.
281 */
282 if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) {
283 vht_cap->vht_supported = false;
284 sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n",
285 sta->addr);
286 return;
287 }
288
273 /* finally set up the bandwidth */ 289 /* finally set up the bandwidth */
274 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 290 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
275 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 291 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2b2a7974e4bb..8e93d4afe5ea 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -112,7 +112,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
112 112
113 for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL; 113 for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL;
114 it_chain = &tp->next) 114 it_chain = &tp->next)
115 tfilter_notify(net, oskb, n, tp, 0, event, false); 115 tfilter_notify(net, oskb, n, tp, n->nlmsg_flags, event, false);
116} 116}
117 117
118/* Select new prio value from the range, managed by kernel. */ 118/* Select new prio value from the range, managed by kernel. */
@@ -430,7 +430,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
430 if (!skb) 430 if (!skb)
431 return -ENOBUFS; 431 return -ENOBUFS;
432 432
433 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) { 433 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
434 n->nlmsg_flags, event) <= 0) {
434 kfree_skb(skb); 435 kfree_skb(skb);
435 return -EINVAL; 436 return -EINVAL;
436 } 437 }
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f9f5f3c3dab5..db32777ab591 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB 4 * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -129,54 +129,8 @@ static const struct proto_ops packet_ops;
129static const struct proto_ops stream_ops; 129static const struct proto_ops stream_ops;
130static const struct proto_ops msg_ops; 130static const struct proto_ops msg_ops;
131static struct proto tipc_proto; 131static struct proto tipc_proto;
132
133static const struct rhashtable_params tsk_rht_params; 132static const struct rhashtable_params tsk_rht_params;
134 133
135/*
136 * Revised TIPC socket locking policy:
137 *
138 * Most socket operations take the standard socket lock when they start
139 * and hold it until they finish (or until they need to sleep). Acquiring
140 * this lock grants the owner exclusive access to the fields of the socket
141 * data structures, with the exception of the backlog queue. A few socket
142 * operations can be done without taking the socket lock because they only
143 * read socket information that never changes during the life of the socket.
144 *
145 * Socket operations may acquire the lock for the associated TIPC port if they
146 * need to perform an operation on the port. If any routine needs to acquire
147 * both the socket lock and the port lock it must take the socket lock first
148 * to avoid the risk of deadlock.
149 *
150 * The dispatcher handling incoming messages cannot grab the socket lock in
151 * the standard fashion, since invoked it runs at the BH level and cannot block.
152 * Instead, it checks to see if the socket lock is currently owned by someone,
153 * and either handles the message itself or adds it to the socket's backlog
154 * queue; in the latter case the queued message is processed once the process
155 * owning the socket lock releases it.
156 *
157 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
158 * the problem of a blocked socket operation preventing any other operations
159 * from occurring. However, applications must be careful if they have
160 * multiple threads trying to send (or receive) on the same socket, as these
161 * operations might interfere with each other. For example, doing a connect
162 * and a receive at the same time might allow the receive to consume the
163 * ACK message meant for the connect. While additional work could be done
164 * to try and overcome this, it doesn't seem to be worthwhile at the present.
165 *
166 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
167 * that another operation that must be performed in a non-blocking manner is
168 * not delayed for very long because the lock has already been taken.
169 *
170 * NOTE: This code assumes that certain fields of a port/socket pair are
171 * constant over its lifetime; such fields can be examined without taking
172 * the socket lock and/or port lock, and do not need to be re-read even
173 * after resuming processing after waiting. These fields include:
174 * - socket type
175 * - pointer to socket sk structure (aka tipc_sock structure)
176 * - pointer to port structure
177 * - port reference
178 */
179
180static u32 tsk_own_node(struct tipc_sock *tsk) 134static u32 tsk_own_node(struct tipc_sock *tsk)
181{ 135{
182 return msg_prevnode(&tsk->phdr); 136 return msg_prevnode(&tsk->phdr);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 5d1c14a2f268..2358f2690ec5 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2199,7 +2199,8 @@ out:
2199 * Sleep until more data has arrived. But check for races.. 2199 * Sleep until more data has arrived. But check for races..
2200 */ 2200 */
2201static long unix_stream_data_wait(struct sock *sk, long timeo, 2201static long unix_stream_data_wait(struct sock *sk, long timeo,
2202 struct sk_buff *last, unsigned int last_len) 2202 struct sk_buff *last, unsigned int last_len,
2203 bool freezable)
2203{ 2204{
2204 struct sk_buff *tail; 2205 struct sk_buff *tail;
2205 DEFINE_WAIT(wait); 2206 DEFINE_WAIT(wait);
@@ -2220,7 +2221,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
2220 2221
2221 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2222 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2222 unix_state_unlock(sk); 2223 unix_state_unlock(sk);
2223 timeo = freezable_schedule_timeout(timeo); 2224 if (freezable)
2225 timeo = freezable_schedule_timeout(timeo);
2226 else
2227 timeo = schedule_timeout(timeo);
2224 unix_state_lock(sk); 2228 unix_state_lock(sk);
2225 2229
2226 if (sock_flag(sk, SOCK_DEAD)) 2230 if (sock_flag(sk, SOCK_DEAD))
@@ -2250,7 +2254,8 @@ struct unix_stream_read_state {
2250 unsigned int splice_flags; 2254 unsigned int splice_flags;
2251}; 2255};
2252 2256
2253static int unix_stream_read_generic(struct unix_stream_read_state *state) 2257static int unix_stream_read_generic(struct unix_stream_read_state *state,
2258 bool freezable)
2254{ 2259{
2255 struct scm_cookie scm; 2260 struct scm_cookie scm;
2256 struct socket *sock = state->socket; 2261 struct socket *sock = state->socket;
@@ -2330,7 +2335,7 @@ again:
2330 mutex_unlock(&u->iolock); 2335 mutex_unlock(&u->iolock);
2331 2336
2332 timeo = unix_stream_data_wait(sk, timeo, last, 2337 timeo = unix_stream_data_wait(sk, timeo, last,
2333 last_len); 2338 last_len, freezable);
2334 2339
2335 if (signal_pending(current)) { 2340 if (signal_pending(current)) {
2336 err = sock_intr_errno(timeo); 2341 err = sock_intr_errno(timeo);
@@ -2472,7 +2477,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2472 .flags = flags 2477 .flags = flags
2473 }; 2478 };
2474 2479
2475 return unix_stream_read_generic(&state); 2480 return unix_stream_read_generic(&state, true);
2476} 2481}
2477 2482
2478static int unix_stream_splice_actor(struct sk_buff *skb, 2483static int unix_stream_splice_actor(struct sk_buff *skb,
@@ -2503,7 +2508,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2503 flags & SPLICE_F_NONBLOCK) 2508 flags & SPLICE_F_NONBLOCK)
2504 state.flags = MSG_DONTWAIT; 2509 state.flags = MSG_DONTWAIT;
2505 2510
2506 return unix_stream_read_generic(&state); 2511 return unix_stream_read_generic(&state, false);
2507} 2512}
2508 2513
2509static int unix_shutdown(struct socket *sock, int mode) 2514static int unix_shutdown(struct socket *sock, int mode)
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 08d2e948c9ad..f0c0c8a48c92 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -71,6 +71,7 @@ struct cfg80211_registered_device {
71 struct list_head bss_list; 71 struct list_head bss_list;
72 struct rb_root bss_tree; 72 struct rb_root bss_tree;
73 u32 bss_generation; 73 u32 bss_generation;
74 u32 bss_entries;
74 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 75 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
75 struct sk_buff *scan_msg; 76 struct sk_buff *scan_msg;
76 struct cfg80211_sched_scan_request __rcu *sched_scan_req; 77 struct cfg80211_sched_scan_request __rcu *sched_scan_req;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index b5bd58d0f731..35ad69fd0838 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -57,6 +57,19 @@
57 * also linked into the probe response struct. 57 * also linked into the probe response struct.
58 */ 58 */
59 59
60/*
61 * Limit the number of BSS entries stored in mac80211. Each one is
62 * a bit over 4k at most, so this limits to roughly 4-5M of memory.
63 * If somebody wants to really attack this though, they'd likely
64 * use small beacons, and only one type of frame, limiting each of
65 * the entries to a much smaller size (in order to generate more
66 * entries in total, so overhead is bigger.)
67 */
68static int bss_entries_limit = 1000;
69module_param(bss_entries_limit, int, 0644);
70MODULE_PARM_DESC(bss_entries_limit,
71 "limit to number of scan BSS entries (per wiphy, default 1000)");
72
60#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) 73#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
61 74
62static void bss_free(struct cfg80211_internal_bss *bss) 75static void bss_free(struct cfg80211_internal_bss *bss)
@@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
137 150
138 list_del_init(&bss->list); 151 list_del_init(&bss->list);
139 rb_erase(&bss->rbn, &rdev->bss_tree); 152 rb_erase(&bss->rbn, &rdev->bss_tree);
153 rdev->bss_entries--;
154 WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
155 "rdev bss entries[%d]/list[empty:%d] corruption\n",
156 rdev->bss_entries, list_empty(&rdev->bss_list));
140 bss_ref_put(rdev, bss); 157 bss_ref_put(rdev, bss);
141 return true; 158 return true;
142} 159}
@@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
163 rdev->bss_generation++; 180 rdev->bss_generation++;
164} 181}
165 182
183static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
184{
185 struct cfg80211_internal_bss *bss, *oldest = NULL;
186 bool ret;
187
188 lockdep_assert_held(&rdev->bss_lock);
189
190 list_for_each_entry(bss, &rdev->bss_list, list) {
191 if (atomic_read(&bss->hold))
192 continue;
193
194 if (!list_empty(&bss->hidden_list) &&
195 !bss->pub.hidden_beacon_bss)
196 continue;
197
198 if (oldest && time_before(oldest->ts, bss->ts))
199 continue;
200 oldest = bss;
201 }
202
203 if (WARN_ON(!oldest))
204 return false;
205
206 /*
207 * The callers make sure to increase rdev->bss_generation if anything
208 * gets removed (and a new entry added), so there's no need to also do
209 * it here.
210 */
211
212 ret = __cfg80211_unlink_bss(rdev, oldest);
213 WARN_ON(!ret);
214 return ret;
215}
216
166void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, 217void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
167 bool send_message) 218 bool send_message)
168{ 219{
@@ -689,6 +740,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
689 const u8 *ie; 740 const u8 *ie;
690 int i, ssidlen; 741 int i, ssidlen;
691 u8 fold = 0; 742 u8 fold = 0;
743 u32 n_entries = 0;
692 744
693 ies = rcu_access_pointer(new->pub.beacon_ies); 745 ies = rcu_access_pointer(new->pub.beacon_ies);
694 if (WARN_ON(!ies)) 746 if (WARN_ON(!ies))
@@ -712,6 +764,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
712 /* This is the bad part ... */ 764 /* This is the bad part ... */
713 765
714 list_for_each_entry(bss, &rdev->bss_list, list) { 766 list_for_each_entry(bss, &rdev->bss_list, list) {
767 /*
768 * we're iterating all the entries anyway, so take the
769 * opportunity to validate the list length accounting
770 */
771 n_entries++;
772
715 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) 773 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
716 continue; 774 continue;
717 if (bss->pub.channel != new->pub.channel) 775 if (bss->pub.channel != new->pub.channel)
@@ -740,6 +798,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
740 new->pub.beacon_ies); 798 new->pub.beacon_ies);
741 } 799 }
742 800
801 WARN_ONCE(n_entries != rdev->bss_entries,
802 "rdev bss entries[%d]/list[len:%d] corruption\n",
803 rdev->bss_entries, n_entries);
804
743 return true; 805 return true;
744} 806}
745 807
@@ -894,7 +956,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
894 } 956 }
895 } 957 }
896 958
959 if (rdev->bss_entries >= bss_entries_limit &&
960 !cfg80211_bss_expire_oldest(rdev)) {
961 kfree(new);
962 goto drop;
963 }
964
897 list_add_tail(&new->list, &rdev->bss_list); 965 list_add_tail(&new->list, &rdev->bss_list);
966 rdev->bss_entries++;
898 rb_insert_bss(rdev, new); 967 rb_insert_bss(rdev, new);
899 found = new; 968 found = new;
900 } 969 }
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 5ea12afc7706..659b507b347d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1158,7 +1158,8 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
1158 58500000, 1158 58500000,
1159 65000000, 1159 65000000,
1160 78000000, 1160 78000000,
1161 0, 1161 /* not in the spec, but some devices use this: */
1162 86500000,
1162 }, 1163 },
1163 { 13500000, 1164 { 13500000,
1164 27000000, 1165 27000000,
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index fc3036b34e51..a4d90aa1045a 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
621 /* released below */ 621 /* released below */
622 cred = get_current_cred(); 622 cred = get_current_cred();
623 cxt = cred_cxt(cred); 623 cxt = cred_cxt(cred);
624 profile = aa_cred_profile(cred); 624 profile = aa_get_newest_profile(aa_cred_profile(cred));
625 previous_profile = cxt->previous; 625 previous_profile = aa_get_newest_profile(cxt->previous);
626 626
627 if (unconfined(profile)) { 627 if (unconfined(profile)) {
628 info = "unconfined"; 628 info = "unconfined";
@@ -718,6 +718,8 @@ audit:
718out: 718out:
719 aa_put_profile(hat); 719 aa_put_profile(hat);
720 kfree(name); 720 kfree(name);
721 aa_put_profile(profile);
722 aa_put_profile(previous_profile);
721 put_cred(cred); 723 put_cred(cred);
722 724
723 return error; 725 return error;