aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-iommu_groups12
-rw-r--r--Documentation/DMA-attributes.txt10
-rw-r--r--MAINTAINERS3
-rw-r--r--Makefile4
-rw-r--r--arch/arm/mm/dma-mapping.c60
-rw-r--r--arch/arm64/crypto/aes-modes.S88
-rw-r--r--arch/arm64/mm/dma-mapping.c7
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h2
-rw-r--r--arch/powerpc/include/asm/module.h4
-rw-r--r--arch/powerpc/include/asm/stackprotector.h40
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S6
-rw-r--r--arch/powerpc/kernel/module_64.c8
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h8
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/sstate.c6
-rw-r--r--arch/sparc/kernel/traps_64.c73
-rw-r--r--arch/x86/events/intel/rapl.c60
-rw-r--r--arch/x86/events/intel/uncore.c232
-rw-r--r--arch/x86/include/asm/microcode.h1
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c31
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c22
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c9
-rw-r--r--arch/x86/kernel/fpu/core.c4
-rw-r--r--arch/x86/kernel/hpet.c1
-rw-r--r--arch/x86/kvm/x86.c1
-rw-r--r--arch/x86/platform/efi/efi_64.c16
-rw-r--r--arch/xtensa/kernel/setup.c2
-rw-r--r--crypto/algapi.c1
-rw-r--r--drivers/acpi/arm64/iort.c2
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/sata_mv.c3
-rw-r--r--drivers/base/firmware_class.c5
-rw-r--r--drivers/base/memory.c12
-rw-r--r--drivers/bcma/bcma_private.h3
-rw-r--r--drivers/bcma/driver_chipcommon.c11
-rw-r--r--drivers/bcma/driver_mips.c3
-rw-r--r--drivers/dma/cppi41.c69
-rw-r--r--drivers/dma/pl330.c24
-rw-r--r--drivers/firmware/efi/libstub/fdt.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c13
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c9
-rw-r--r--drivers/gpu/drm/drm_connector.c23
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h16
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c20
-rw-r--r--drivers/gpu/drm/i915/intel_display.c125
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c52
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/hid/hid-cp2112.c28
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c28
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/iio/adc/palmas_gpadc.c4
-rw-r--r--drivers/iio/health/afe4403.c4
-rw-r--r--drivers/iio/health/afe4404.c4
-rw-r--r--drivers/iio/health/max30100.c2
-rw-r--r--drivers/iio/humidity/dht11.c6
-rw-r--r--drivers/input/rmi4/rmi_driver.c4
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/iommu/Kconfig3
-rw-r--r--drivers/iommu/amd_iommu.c72
-rw-r--r--drivers/iommu/amd_iommu_init.c11
-rw-r--r--drivers/iommu/amd_iommu_types.h4
-rw-r--r--drivers/iommu/arm-smmu-v3.c90
-rw-r--r--drivers/iommu/arm-smmu.c135
-rw-r--r--drivers/iommu/dma-iommu.c183
-rw-r--r--drivers/iommu/dmar.c20
-rw-r--r--drivers/iommu/exynos-iommu.c55
-rw-r--r--drivers/iommu/intel-iommu.c111
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c5
-rw-r--r--drivers/iommu/iommu-sysfs.c61
-rw-r--r--drivers/iommu/iommu.c285
-rw-r--r--drivers/iommu/iova.c23
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/iommu/msm_iommu.c73
-rw-r--r--drivers/iommu/msm_iommu.h3
-rw-r--r--drivers/iommu/mtk_iommu.c27
-rw-r--r--drivers/iommu/mtk_iommu.h2
-rw-r--r--drivers/iommu/of_iommu.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1
-rw-r--r--drivers/mmc/host/sdhci.c3
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c45
-rw-r--r--drivers/net/ethernet/cadence/macb.c188
-rw-r--r--drivers/net/ethernet/cadence/macb.h20
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c33
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c4
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c12
-rw-r--r--drivers/pci/pcie/aspm.c19
-rw-r--r--drivers/pinctrl/berlin/berlin-bg4ct.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c27
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c3
-rw-r--r--drivers/regulator/axp20x-regulator.c2
-rw-r--r--drivers/regulator/fixed.c46
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/rtc/Kconfig5
-rw-r--r--drivers/rtc/rtc-jz4740.c12
-rw-r--r--drivers/scsi/virtio_scsi.c11
-rw-r--r--drivers/staging/greybus/timesync_platform.c6
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c13
-rw-r--r--drivers/usb/musb/musb_core.c26
-rw-r--r--drivers/usb/musb/musb_core.h1
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c11
-rw-r--r--drivers/vfio/vfio_iommu_type1.c40
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/virtio/virtio_ring.c7
-rw-r--r--fs/cifs/readdir.c1
-rw-r--r--fs/dax.c5
-rw-r--r--fs/fscache/cookie.c5
-rw-r--r--fs/fscache/netfs.c1
-rw-r--r--fs/fscache/object.c32
-rw-r--r--fs/iomap.c3
-rw-r--r--fs/nfsd/nfs4layouts.c5
-rw-r--r--fs/nfsd/nfs4state.c19
-rw-r--r--fs/nfsd/state.h4
-rw-r--r--fs/nfsd/vfs.c97
-rw-r--r--include/asm-generic/export.h11
-rw-r--r--include/drm/drmP.h1
-rw-r--r--include/drm/drm_connector.h16
-rw-r--r--include/linux/can/core.h7
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/dma-iommu.h10
-rw-r--r--include/linux/dma-mapping.h7
-rw-r--r--include/linux/export.h17
-rw-r--r--include/linux/fscache-cache.h1
-rw-r--r--include/linux/hyperv.h32
-rw-r--r--include/linux/intel-iommu.h3
-rw-r--r--include/linux/iommu.h138
-rw-r--r--include/linux/irq.h17
-rw-r--r--include/linux/irqdomain.h36
-rw-r--r--include/linux/log2.h13
-rw-r--r--include/linux/memory_hotplug.h3
-rw-r--r--include/linux/module.h14
-rw-r--r--include/linux/netdevice.h29
-rw-r--r--include/linux/of_iommu.h11
-rw-r--r--include/linux/percpu-refcount.h4
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--init/Kconfig4
-rw-r--r--kernel/cgroup.c13
-rw-r--r--kernel/events/core.c69
-rw-r--r--kernel/irq/irqdomain.c83
-rw-r--r--kernel/irq/msi.c4
-rw-r--r--kernel/module.c53
-rw-r--r--kernel/trace/trace_hwlat.c8
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--mm/filemap.c5
-rw-r--r--mm/kasan/report.c3
-rw-r--r--mm/memory_hotplug.c28
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/zswap.c30
-rw-r--r--net/can/af_can.c12
-rw-r--r--net/can/af_can.h3
-rw-r--r--net/can/bcm.c27
-rw-r--r--net/can/gw.c2
-rw-r--r--net/can/raw.c4
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/sched/cls_flower.c4
-rw-r--r--net/sched/cls_matchall.c127
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c2
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/genksyms/genksyms.c19
-rw-r--r--scripts/kallsyms.c12
-rw-r--r--scripts/mod/modpost.c10
-rw-r--r--tools/objtool/arch/x86/decode.c2
222 files changed, 2862 insertions, 1659 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
index 9b31556cfdda..35c64e00b35c 100644
--- a/Documentation/ABI/testing/sysfs-kernel-iommu_groups
+++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
@@ -12,3 +12,15 @@ Description: /sys/kernel/iommu_groups/ contains a number of sub-
12 file if the IOMMU driver has chosen to register a more 12 file if the IOMMU driver has chosen to register a more
13 common name for the group. 13 common name for the group.
14Users: 14Users:
15
16What: /sys/kernel/iommu_groups/reserved_regions
17Date: January 2017
18KernelVersion: v4.11
19Contact: Eric Auger <eric.auger@redhat.com>
20Description: /sys/kernel/iommu_groups/reserved_regions list IOVA
21 regions that are reserved. Not necessarily all
22 reserved regions are listed. This is typically used to
23 output direct-mapped, MSI, non mappable regions. Each
24 region is described on a single line: the 1st field is
25 the base IOVA, the second is the end IOVA and the third
26 field describes the type of the region.
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index 98bf7ac29aad..44c6bc496eee 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -143,3 +143,13 @@ So, this provides a way for drivers to avoid those error messages on calls
143where allocation failures are not a problem, and shouldn't bother the logs. 143where allocation failures are not a problem, and shouldn't bother the logs.
144 144
145NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC. 145NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
146
147DMA_ATTR_PRIVILEGED
148------------------------------
149
150Some advanced peripherals such as remote processors and GPUs perform
151accesses to DMA buffers in both privileged "supervisor" and unprivileged
152"user" modes. This attribute is used to indicate to the DMA-mapping
153subsystem that the buffer is fully accessible at the elevated privilege
154level (and ideally inaccessible or at least read-only at the
155lesser-privileged levels).
diff --git a/MAINTAINERS b/MAINTAINERS
index 5f10c28b2e15..187b9615e31a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10195,7 +10195,6 @@ F: drivers/media/tuners/qt1010*
10195QUALCOMM ATHEROS ATH9K WIRELESS DRIVER 10195QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
10196M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com> 10196M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
10197L: linux-wireless@vger.kernel.org 10197L: linux-wireless@vger.kernel.org
10198L: ath9k-devel@lists.ath9k.org
10199W: http://wireless.kernel.org/en/users/Drivers/ath9k 10198W: http://wireless.kernel.org/en/users/Drivers/ath9k
10200S: Supported 10199S: Supported
10201F: drivers/net/wireless/ath/ath9k/ 10200F: drivers/net/wireless/ath/ath9k/
@@ -13066,7 +13065,7 @@ F: drivers/input/serio/userio.c
13066F: include/uapi/linux/userio.h 13065F: include/uapi/linux/userio.h
13067 13066
13068VIRTIO CONSOLE DRIVER 13067VIRTIO CONSOLE DRIVER
13069M: Amit Shah <amit.shah@redhat.com> 13068M: Amit Shah <amit@kernel.org>
13070L: virtualization@lists.linux-foundation.org 13069L: virtualization@lists.linux-foundation.org
13071S: Maintained 13070S: Maintained
13072F: drivers/char/virtio_console.c 13071F: drivers/char/virtio_console.c
diff --git a/Makefile b/Makefile
index 96b27a888285..8e223e081c9d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 10 2PATCHLEVEL = 10
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
797KBUILD_ARFLAGS := $(call ar-option,D) 797KBUILD_ARFLAGS := $(call ar-option,D)
798 798
799# check for 'asm goto' 799# check for 'asm goto'
800ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) 800ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
801 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO 801 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
802 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO 802 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
803endif 803endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ab7710002ba6..82d3e79ec82b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1171,6 +1171,25 @@ core_initcall(dma_debug_do_init);
1171 1171
1172#ifdef CONFIG_ARM_DMA_USE_IOMMU 1172#ifdef CONFIG_ARM_DMA_USE_IOMMU
1173 1173
1174static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
1175{
1176 int prot = 0;
1177
1178 if (attrs & DMA_ATTR_PRIVILEGED)
1179 prot |= IOMMU_PRIV;
1180
1181 switch (dir) {
1182 case DMA_BIDIRECTIONAL:
1183 return prot | IOMMU_READ | IOMMU_WRITE;
1184 case DMA_TO_DEVICE:
1185 return prot | IOMMU_READ;
1186 case DMA_FROM_DEVICE:
1187 return prot | IOMMU_WRITE;
1188 default:
1189 return prot;
1190 }
1191}
1192
1174/* IOMMU */ 1193/* IOMMU */
1175 1194
1176static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 1195static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
@@ -1394,7 +1413,8 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1394 * Create a mapping in device IO address space for specified pages 1413 * Create a mapping in device IO address space for specified pages
1395 */ 1414 */
1396static dma_addr_t 1415static dma_addr_t
1397__iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 1416__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
1417 unsigned long attrs)
1398{ 1418{
1399 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1419 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
1400 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1420 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -1419,7 +1439,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1419 1439
1420 len = (j - i) << PAGE_SHIFT; 1440 len = (j - i) << PAGE_SHIFT;
1421 ret = iommu_map(mapping->domain, iova, phys, len, 1441 ret = iommu_map(mapping->domain, iova, phys, len,
1422 IOMMU_READ|IOMMU_WRITE); 1442 __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
1423 if (ret < 0) 1443 if (ret < 0)
1424 goto fail; 1444 goto fail;
1425 iova += len; 1445 iova += len;
@@ -1476,7 +1496,8 @@ static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
1476} 1496}
1477 1497
1478static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, 1498static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1479 dma_addr_t *handle, int coherent_flag) 1499 dma_addr_t *handle, int coherent_flag,
1500 unsigned long attrs)
1480{ 1501{
1481 struct page *page; 1502 struct page *page;
1482 void *addr; 1503 void *addr;
@@ -1488,7 +1509,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
1488 if (!addr) 1509 if (!addr)
1489 return NULL; 1510 return NULL;
1490 1511
1491 *handle = __iommu_create_mapping(dev, &page, size); 1512 *handle = __iommu_create_mapping(dev, &page, size, attrs);
1492 if (*handle == DMA_ERROR_CODE) 1513 if (*handle == DMA_ERROR_CODE)
1493 goto err_mapping; 1514 goto err_mapping;
1494 1515
@@ -1522,7 +1543,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1522 1543
1523 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) 1544 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
1524 return __iommu_alloc_simple(dev, size, gfp, handle, 1545 return __iommu_alloc_simple(dev, size, gfp, handle,
1525 coherent_flag); 1546 coherent_flag, attrs);
1526 1547
1527 /* 1548 /*
1528 * Following is a work-around (a.k.a. hack) to prevent pages 1549 * Following is a work-around (a.k.a. hack) to prevent pages
@@ -1537,7 +1558,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
1537 if (!pages) 1558 if (!pages)
1538 return NULL; 1559 return NULL;
1539 1560
1540 *handle = __iommu_create_mapping(dev, pages, size); 1561 *handle = __iommu_create_mapping(dev, pages, size, attrs);
1541 if (*handle == DMA_ERROR_CODE) 1562 if (*handle == DMA_ERROR_CODE)
1542 goto err_buffer; 1563 goto err_buffer;
1543 1564
@@ -1672,27 +1693,6 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1672 GFP_KERNEL); 1693 GFP_KERNEL);
1673} 1694}
1674 1695
1675static int __dma_direction_to_prot(enum dma_data_direction dir)
1676{
1677 int prot;
1678
1679 switch (dir) {
1680 case DMA_BIDIRECTIONAL:
1681 prot = IOMMU_READ | IOMMU_WRITE;
1682 break;
1683 case DMA_TO_DEVICE:
1684 prot = IOMMU_READ;
1685 break;
1686 case DMA_FROM_DEVICE:
1687 prot = IOMMU_WRITE;
1688 break;
1689 default:
1690 prot = 0;
1691 }
1692
1693 return prot;
1694}
1695
1696/* 1696/*
1697 * Map a part of the scatter-gather list into contiguous io address space 1697 * Map a part of the scatter-gather list into contiguous io address space
1698 */ 1698 */
@@ -1722,7 +1722,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1722 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1722 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
1723 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1723 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1724 1724
1725 prot = __dma_direction_to_prot(dir); 1725 prot = __dma_info_to_prot(dir, attrs);
1726 1726
1727 ret = iommu_map(mapping->domain, iova, phys, len, prot); 1727 ret = iommu_map(mapping->domain, iova, phys, len, prot);
1728 if (ret < 0) 1728 if (ret < 0)
@@ -1930,7 +1930,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1930 if (dma_addr == DMA_ERROR_CODE) 1930 if (dma_addr == DMA_ERROR_CODE)
1931 return dma_addr; 1931 return dma_addr;
1932 1932
1933 prot = __dma_direction_to_prot(dir); 1933 prot = __dma_info_to_prot(dir, attrs);
1934 1934
1935 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 1935 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1936 if (ret < 0) 1936 if (ret < 0)
@@ -2036,7 +2036,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
2036 if (dma_addr == DMA_ERROR_CODE) 2036 if (dma_addr == DMA_ERROR_CODE)
2037 return dma_addr; 2037 return dma_addr;
2038 2038
2039 prot = __dma_direction_to_prot(dir) | IOMMU_MMIO; 2039 prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
2040 2040
2041 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); 2041 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
2042 if (ret < 0) 2042 if (ret < 0)
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index c53dbeae79f2..838dad5c209f 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
193 cbz w6, .Lcbcencloop 193 cbz w6, .Lcbcencloop
194 194
195 ld1 {v0.16b}, [x5] /* get iv */ 195 ld1 {v0.16b}, [x5] /* get iv */
196 enc_prepare w3, x2, x5 196 enc_prepare w3, x2, x6
197 197
198.Lcbcencloop: 198.Lcbcencloop:
199 ld1 {v1.16b}, [x1], #16 /* get next pt block */ 199 ld1 {v1.16b}, [x1], #16 /* get next pt block */
200 eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */ 200 eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
201 encrypt_block v0, w3, x2, x5, w6 201 encrypt_block v0, w3, x2, x6, w7
202 st1 {v0.16b}, [x0], #16 202 st1 {v0.16b}, [x0], #16
203 subs w4, w4, #1 203 subs w4, w4, #1
204 bne .Lcbcencloop 204 bne .Lcbcencloop
205 st1 {v0.16b}, [x5] /* return iv */
205 ret 206 ret
206AES_ENDPROC(aes_cbc_encrypt) 207AES_ENDPROC(aes_cbc_encrypt)
207 208
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
211 cbz w6, .LcbcdecloopNx 212 cbz w6, .LcbcdecloopNx
212 213
213 ld1 {v7.16b}, [x5] /* get iv */ 214 ld1 {v7.16b}, [x5] /* get iv */
214 dec_prepare w3, x2, x5 215 dec_prepare w3, x2, x6
215 216
216.LcbcdecloopNx: 217.LcbcdecloopNx:
217#if INTERLEAVE >= 2 218#if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
248.Lcbcdecloop: 249.Lcbcdecloop:
249 ld1 {v1.16b}, [x1], #16 /* get next ct block */ 250 ld1 {v1.16b}, [x1], #16 /* get next ct block */
250 mov v0.16b, v1.16b /* ...and copy to v0 */ 251 mov v0.16b, v1.16b /* ...and copy to v0 */
251 decrypt_block v0, w3, x2, x5, w6 252 decrypt_block v0, w3, x2, x6, w7
252 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ 253 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
253 mov v7.16b, v1.16b /* ct is next iv */ 254 mov v7.16b, v1.16b /* ct is next iv */
254 st1 {v0.16b}, [x0], #16 255 st1 {v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
256 bne .Lcbcdecloop 257 bne .Lcbcdecloop
257.Lcbcdecout: 258.Lcbcdecout:
258 FRAME_POP 259 FRAME_POP
260 st1 {v7.16b}, [x5] /* return iv */
259 ret 261 ret
260AES_ENDPROC(aes_cbc_decrypt) 262AES_ENDPROC(aes_cbc_decrypt)
261 263
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
267 269
268AES_ENTRY(aes_ctr_encrypt) 270AES_ENTRY(aes_ctr_encrypt)
269 FRAME_PUSH 271 FRAME_PUSH
270 cbnz w6, .Lctrfirst /* 1st time around? */ 272 cbz w6, .Lctrnotfirst /* 1st time around? */
271 umov x5, v4.d[1] /* keep swabbed ctr in reg */
272 rev x5, x5
273#if INTERLEAVE >= 2
274 cmn w5, w4 /* 32 bit overflow? */
275 bcs .Lctrinc
276 add x5, x5, #1 /* increment BE ctr */
277 b .LctrincNx
278#else
279 b .Lctrinc
280#endif
281.Lctrfirst:
282 enc_prepare w3, x2, x6 273 enc_prepare w3, x2, x6
283 ld1 {v4.16b}, [x5] 274 ld1 {v4.16b}, [x5]
284 umov x5, v4.d[1] /* keep swabbed ctr in reg */ 275
285 rev x5, x5 276.Lctrnotfirst:
277 umov x8, v4.d[1] /* keep swabbed ctr in reg */
278 rev x8, x8
286#if INTERLEAVE >= 2 279#if INTERLEAVE >= 2
287 cmn w5, w4 /* 32 bit overflow? */ 280 cmn w8, w4 /* 32 bit overflow? */
288 bcs .Lctrloop 281 bcs .Lctrloop
289.LctrloopNx: 282.LctrloopNx:
290 subs w4, w4, #INTERLEAVE 283 subs w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
292#if INTERLEAVE == 2 285#if INTERLEAVE == 2
293 mov v0.8b, v4.8b 286 mov v0.8b, v4.8b
294 mov v1.8b, v4.8b 287 mov v1.8b, v4.8b
295 rev x7, x5 288 rev x7, x8
296 add x5, x5, #1 289 add x8, x8, #1
297 ins v0.d[1], x7 290 ins v0.d[1], x7
298 rev x7, x5 291 rev x7, x8
299 add x5, x5, #1 292 add x8, x8, #1
300 ins v1.d[1], x7 293 ins v1.d[1], x7
301 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */ 294 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
302 do_encrypt_block2x 295 do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
305 st1 {v0.16b-v1.16b}, [x0], #32 298 st1 {v0.16b-v1.16b}, [x0], #32
306#else 299#else
307 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */ 300 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
308 dup v7.4s, w5 301 dup v7.4s, w8
309 mov v0.16b, v4.16b 302 mov v0.16b, v4.16b
310 add v7.4s, v7.4s, v8.4s 303 add v7.4s, v7.4s, v8.4s
311 mov v1.16b, v4.16b 304 mov v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
323 eor v2.16b, v7.16b, v2.16b 316 eor v2.16b, v7.16b, v2.16b
324 eor v3.16b, v5.16b, v3.16b 317 eor v3.16b, v5.16b, v3.16b
325 st1 {v0.16b-v3.16b}, [x0], #64 318 st1 {v0.16b-v3.16b}, [x0], #64
326 add x5, x5, #INTERLEAVE 319 add x8, x8, #INTERLEAVE
327#endif 320#endif
328 cbz w4, .LctroutNx 321 rev x7, x8
329.LctrincNx:
330 rev x7, x5
331 ins v4.d[1], x7 322 ins v4.d[1], x7
323 cbz w4, .Lctrout
332 b .LctrloopNx 324 b .LctrloopNx
333.LctroutNx:
334 sub x5, x5, #1
335 rev x7, x5
336 ins v4.d[1], x7
337 b .Lctrout
338.Lctr1x: 325.Lctr1x:
339 adds w4, w4, #INTERLEAVE 326 adds w4, w4, #INTERLEAVE
340 beq .Lctrout 327 beq .Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
342.Lctrloop: 329.Lctrloop:
343 mov v0.16b, v4.16b 330 mov v0.16b, v4.16b
344 encrypt_block v0, w3, x2, x6, w7 331 encrypt_block v0, w3, x2, x6, w7
332
333 adds x8, x8, #1 /* increment BE ctr */
334 rev x7, x8
335 ins v4.d[1], x7
336 bcs .Lctrcarry /* overflow? */
337
338.Lctrcarrydone:
345 subs w4, w4, #1 339 subs w4, w4, #1
346 bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */ 340 bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
347 ld1 {v3.16b}, [x1], #16 341 ld1 {v3.16b}, [x1], #16
348 eor v3.16b, v0.16b, v3.16b 342 eor v3.16b, v0.16b, v3.16b
349 st1 {v3.16b}, [x0], #16 343 st1 {v3.16b}, [x0], #16
350 beq .Lctrout 344 bne .Lctrloop
351.Lctrinc: 345
352 adds x5, x5, #1 /* increment BE ctr */ 346.Lctrout:
353 rev x7, x5 347 st1 {v4.16b}, [x5] /* return next CTR value */
354 ins v4.d[1], x7 348 FRAME_POP
355 bcc .Lctrloop /* no overflow? */ 349 ret
356 umov x7, v4.d[0] /* load upper word of ctr */ 350
357 rev x7, x7 /* ... to handle the carry */
358 add x7, x7, #1
359 rev x7, x7
360 ins v4.d[0], x7
361 b .Lctrloop
362.Lctrhalfblock: 351.Lctrhalfblock:
363 ld1 {v3.8b}, [x1] 352 ld1 {v3.8b}, [x1]
364 eor v3.8b, v0.8b, v3.8b 353 eor v3.8b, v0.8b, v3.8b
365 st1 {v3.8b}, [x0] 354 st1 {v3.8b}, [x0]
366.Lctrout:
367 FRAME_POP 355 FRAME_POP
368 ret 356 ret
357
358.Lctrcarry:
359 umov x7, v4.d[0] /* load upper word of ctr */
360 rev x7, x7 /* ... to handle the carry */
361 add x7, x7, #1
362 rev x7, x7
363 ins v4.d[0], x7
364 b .Lctrcarrydone
369AES_ENDPROC(aes_ctr_encrypt) 365AES_ENDPROC(aes_ctr_encrypt)
370 .ltorg 366 .ltorg
371 367
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index e04082700bb1..4a14b25163fb 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -558,7 +558,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
558 unsigned long attrs) 558 unsigned long attrs)
559{ 559{
560 bool coherent = is_device_dma_coherent(dev); 560 bool coherent = is_device_dma_coherent(dev);
561 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); 561 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
562 size_t iosize = size; 562 size_t iosize = size;
563 void *addr; 563 void *addr;
564 564
@@ -712,7 +712,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
712 unsigned long attrs) 712 unsigned long attrs)
713{ 713{
714 bool coherent = is_device_dma_coherent(dev); 714 bool coherent = is_device_dma_coherent(dev);
715 int prot = dma_direction_to_prot(dir, coherent); 715 int prot = dma_info_to_prot(dir, coherent, attrs);
716 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); 716 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
717 717
718 if (!iommu_dma_mapping_error(dev, dev_addr) && 718 if (!iommu_dma_mapping_error(dev, dev_addr) &&
@@ -770,7 +770,7 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
770 __iommu_sync_sg_for_device(dev, sgl, nelems, dir); 770 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
771 771
772 return iommu_dma_map_sg(dev, sgl, nelems, 772 return iommu_dma_map_sg(dev, sgl, nelems,
773 dma_direction_to_prot(dir, coherent)); 773 dma_info_to_prot(dir, coherent, attrs));
774} 774}
775 775
776static void __iommu_unmap_sg_attrs(struct device *dev, 776static void __iommu_unmap_sg_attrs(struct device *dev,
@@ -799,7 +799,6 @@ static struct dma_map_ops iommu_dma_ops = {
799 .sync_sg_for_device = __iommu_sync_sg_for_device, 799 .sync_sg_for_device = __iommu_sync_sg_for_device,
800 .map_resource = iommu_dma_map_resource, 800 .map_resource = iommu_dma_map_resource,
801 .unmap_resource = iommu_dma_unmap_resource, 801 .unmap_resource = iommu_dma_unmap_resource,
802 .dma_supported = iommu_dma_supported,
803 .mapping_error = iommu_dma_mapping_error, 802 .mapping_error = iommu_dma_mapping_error,
804}; 803};
805 804
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a8ee573fe610..281f4f1fcd1f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -164,7 +164,6 @@ config PPC
164 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE 164 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
165 select HAVE_ARCH_HARDENED_USERCOPY 165 select HAVE_ARCH_HARDENED_USERCOPY
166 select HAVE_KERNEL_GZIP 166 select HAVE_KERNEL_GZIP
167 select HAVE_CC_STACKPROTECTOR
168 167
169config GENERIC_CSUM 168config GENERIC_CSUM
170 def_bool CPU_LITTLE_ENDIAN 169 def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
484 bool "Build a relocatable kernel" 483 bool "Build a relocatable kernel"
485 depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE)) 484 depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
486 select NONSTATIC_KERNEL 485 select NONSTATIC_KERNEL
486 select MODULE_REL_CRCS if MODVERSIONS
487 help 487 help
488 This builds a kernel image that is capable of running at the 488 This builds a kernel image that is capable of running at the
489 location the kernel is loaded at. For ppc32, there is no any 489 location the kernel is loaded at. For ppc32, there is no any
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index b312b152461b..6e834caa3720 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
23{ 23{
24 int i; 24 int i;
25 25
26#ifndef __clang__ /* clang can't cope with this */
26 BUILD_BUG_ON(!__builtin_constant_p(feature)); 27 BUILD_BUG_ON(!__builtin_constant_p(feature));
28#endif
27 29
28#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG 30#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
29 if (!static_key_initialized) { 31 if (!static_key_initialized) {
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index a34c764ca8dd..233a7e8cc8e3 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
160{ 160{
161 int i; 161 int i;
162 162
163#ifndef __clang__ /* clang can't cope with this */
163 BUILD_BUG_ON(!__builtin_constant_p(feature)); 164 BUILD_BUG_ON(!__builtin_constant_p(feature));
165#endif
164 166
165#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG 167#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
166 if (!static_key_initialized) { 168 if (!static_key_initialized) {
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index cc12c61ef315..53885512b8d3 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
90} 90}
91#endif 91#endif
92 92
93#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
94#define ARCH_RELOCATES_KCRCTAB
95#define reloc_start PHYSICAL_START
96#endif
97#endif /* __KERNEL__ */ 93#endif /* __KERNEL__ */
98#endif /* _ASM_POWERPC_MODULE_H */ 94#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
deleted file mode 100644
index 6720190eabec..000000000000
--- a/arch/powerpc/include/asm/stackprotector.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * GCC stack protector support.
3 *
4 * Stack protector works by putting predefined pattern at the start of
5 * the stack frame and verifying that it hasn't been overwritten when
6 * returning from the function. The pattern is called stack canary
7 * and gcc expects it to be defined by a global variable called
8 * "__stack_chk_guard" on PPC. This unfortunately means that on SMP
9 * we cannot have a different canary value per task.
10 */
11
12#ifndef _ASM_STACKPROTECTOR_H
13#define _ASM_STACKPROTECTOR_H
14
15#include <linux/random.h>
16#include <linux/version.h>
17#include <asm/reg.h>
18
19extern unsigned long __stack_chk_guard;
20
21/*
22 * Initialize the stackprotector canary value.
23 *
24 * NOTE: this must only be called from functions that never return,
25 * and it must always be inlined.
26 */
27static __always_inline void boot_init_stack_canary(void)
28{
29 unsigned long canary;
30
31 /* Try to get a semi random initial value. */
32 get_random_bytes(&canary, sizeof(canary));
33 canary ^= mftb();
34 canary ^= LINUX_VERSION_CODE;
35
36 current->stack_canary = canary;
37 __stack_chk_guard = current->stack_canary;
38}
39
40#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 23f8082d7bfa..f4c2b52e58b3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
19CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) 19CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
20CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) 20CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
21 21
22# -fstack-protector triggers protection checks in this code,
23# but it is being used too early to link to meaningful stack_chk logic.
24CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
25
26ifdef CONFIG_FUNCTION_TRACER 22ifdef CONFIG_FUNCTION_TRACER
27# Do not trace early boot code 23# Do not trace early boot code
28CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) 24CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0601e6a7297c..195a9fc8f81c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,9 +91,6 @@ int main(void)
91 DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); 91 DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
92#endif 92#endif
93 93
94#ifdef CONFIG_CC_STACKPROTECTOR
95 DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
96#endif
97 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 94 DEFINE(KSP, offsetof(struct thread_struct, ksp));
98 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 95 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
99#ifdef CONFIG_BOOKE 96#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index d88573bdd090..b94887165a10 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
545static void *__eeh_clear_pe_frozen_state(void *data, void *flag) 545static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
546{ 546{
547 struct eeh_pe *pe = (struct eeh_pe *)data; 547 struct eeh_pe *pe = (struct eeh_pe *)data;
548 bool *clear_sw_state = flag; 548 bool clear_sw_state = *(bool *)flag;
549 int i, rc = 1; 549 int i, rc = 1;
550 550
551 for (i = 0; rc && i < 3; i++) 551 for (i = 0; rc && i < 3; i++)
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 5742dbdbee46..3841d749a430 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
674 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ 674 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
675END_FTR_SECTION_IFSET(CPU_FTR_SPE) 675END_FTR_SECTION_IFSET(CPU_FTR_SPE)
676#endif /* CONFIG_SPE */ 676#endif /* CONFIG_SPE */
677#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 677
678 lwz r0,TSK_STACK_CANARY(r2)
679 lis r4,__stack_chk_guard@ha
680 stw r0,__stack_chk_guard@l(r4)
681#endif
682 lwz r0,_CCR(r1) 678 lwz r0,_CCR(r1)
683 mtcrf 0xFF,r0 679 mtcrf 0xFF,r0
684 /* r3-r12 are destroyed -- Cort */ 680 /* r3-r12 are destroyed -- Cort */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index bb1807184bad..0b0f89685b67 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
286 for (end = (void *)vers + size; vers < end; vers++) 286 for (end = (void *)vers + size; vers < end; vers++)
287 if (vers->name[0] == '.') { 287 if (vers->name[0] == '.') {
288 memmove(vers->name, vers->name+1, strlen(vers->name)); 288 memmove(vers->name, vers->name+1, strlen(vers->name));
289#ifdef ARCH_RELOCATES_KCRCTAB
290 /* The TOC symbol has no CRC computed. To avoid CRC
291 * check failing, we must force it to the expected
292 * value (see CRC check in module.c).
293 */
294 if (!strcmp(vers->name, "TOC."))
295 vers->crc = -(unsigned long)reloc_start;
296#endif
297 } 289 }
298} 290}
299 291
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 04885cec24df..5dd056df0baa 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -64,12 +64,6 @@
64#include <linux/kprobes.h> 64#include <linux/kprobes.h>
65#include <linux/kdebug.h> 65#include <linux/kdebug.h>
66 66
67#ifdef CONFIG_CC_STACKPROTECTOR
68#include <linux/stackprotector.h>
69unsigned long __stack_chk_guard __read_mostly;
70EXPORT_SYMBOL(__stack_chk_guard);
71#endif
72
73/* Transactional Memory debug */ 67/* Transactional Memory debug */
74#ifdef TM_DEBUG_SW 68#ifdef TM_DEBUG_SW
75#define TM_DEBUG(x...) printk(KERN_INFO x) 69#define TM_DEBUG(x...) printk(KERN_INFO x)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ec47a939cbdd..ac83eb04a8b8 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
2834 2834
2835 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 2835 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2836 2836
2837 if (!PHANDLE_VALID(cpu_pkg))
2838 return;
2839
2837 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 2840 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2838 prom.cpu = be32_to_cpu(rval); 2841 prom.cpu = be32_to_cpu(rval);
2839 2842
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index cfa53ccc8baf..34f1a0dbc898 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
65 if (!pmdp) 65 if (!pmdp)
66 return -ENOMEM; 66 return -ENOMEM;
67 if (map_page_size == PMD_SIZE) { 67 if (map_page_size == PMD_SIZE) {
68 ptep = (pte_t *)pudp; 68 ptep = pmdp_ptep(pmdp);
69 goto set_the_pte; 69 goto set_the_pte;
70 } 70 }
71 ptep = pte_alloc_kernel(pmdp, ea); 71 ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
90 } 90 }
91 pmdp = pmd_offset(pudp, ea); 91 pmdp = pmd_offset(pudp, ea);
92 if (map_page_size == PMD_SIZE) { 92 if (map_page_size == PMD_SIZE) {
93 ptep = (pte_t *)pudp; 93 ptep = pmdp_ptep(pmdp);
94 goto set_the_pte; 94 goto set_the_pte;
95 } 95 }
96 if (!pmd_present(*pmdp)) { 96 if (!pmd_present(*pmdp)) {
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index b84be675e507..d0317993e947 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
35static inline void tsb_context_switch(struct mm_struct *mm) 35static inline void tsb_context_switch(struct mm_struct *mm)
36{ 36{
37 __tsb_context_switch(__pa(mm->pgd), 37 __tsb_context_switch(__pa(mm->pgd),
38 &mm->context.tsb_block[0], 38 &mm->context.tsb_block[MM_TSB_BASE],
39#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 39#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
40 (mm->context.tsb_block[1].tsb ? 40 (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
41 &mm->context.tsb_block[1] : 41 &mm->context.tsb_block[MM_TSB_HUGE] :
42 NULL) 42 NULL)
43#else 43#else
44 NULL 44 NULL
45#endif 45#endif
46 , __pa(&mm->context.tsb_descr[0])); 46 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
47} 47}
48 48
49void tsb_grow(struct mm_struct *mm, 49void tsb_grow(struct mm_struct *mm,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 3bebf395252c..4d0248aa0928 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
1021 unsigned long order = get_order(size); 1021 unsigned long order = get_order(size);
1022 unsigned long p; 1022 unsigned long p;
1023 1023
1024 p = __get_free_pages(GFP_KERNEL, order); 1024 p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1025 if (!p) { 1025 if (!p) {
1026 prom_printf("SUN4V: Error, cannot allocate queue.\n"); 1026 prom_printf("SUN4V: Error, cannot allocate queue.\n");
1027 prom_halt(); 1027 prom_halt();
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c
index c59af546f522..3caed4023589 100644
--- a/arch/sparc/kernel/sstate.c
+++ b/arch/sparc/kernel/sstate.c
@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
43 "Linux powering off"; 43 "Linux powering off";
44static const char rebooting_msg[32] __attribute__((aligned(32))) = 44static const char rebooting_msg[32] __attribute__((aligned(32))) =
45 "Linux rebooting"; 45 "Linux rebooting";
46static const char panicing_msg[32] __attribute__((aligned(32))) = 46static const char panicking_msg[32] __attribute__((aligned(32))) =
47 "Linux panicing"; 47 "Linux panicking";
48 48
49static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused) 49static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
50{ 50{
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
76 76
77static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) 77static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
78{ 78{
79 do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg); 79 do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
80 80
81 return NOTIFY_DONE; 81 return NOTIFY_DONE;
82} 82}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 4bc10e44d1ca..dfc97a47c9a0 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
2051 atomic_inc(&sun4v_resum_oflow_cnt); 2051 atomic_inc(&sun4v_resum_oflow_cnt);
2052} 2052}
2053 2053
2054/* Given a set of registers, get the virtual addressi that was being accessed
2055 * by the faulting instructions at tpc.
2056 */
2057static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
2058{
2059 unsigned int insn;
2060
2061 if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
2062 return compute_effective_address(regs, insn,
2063 (insn >> 25) & 0x1f);
2064 }
2065 return 0;
2066}
2067
2068/* Attempt to handle non-resumable errors generated from userspace.
2069 * Returns true if the signal was handled, false otherwise.
2070 */
2071bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
2072 struct sun4v_error_entry *ent) {
2073
2074 unsigned int attrs = ent->err_attrs;
2075
2076 if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
2077 unsigned long addr = ent->err_raddr;
2078 siginfo_t info;
2079
2080 if (addr == ~(u64)0) {
2081 /* This seems highly unlikely to ever occur */
2082 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
2083 } else {
2084 unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
2085 PAGE_SIZE);
2086
2087 /* Break the unfortunate news. */
2088 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
2089 addr);
2090 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
2091 page_cnt);
2092
2093 while (page_cnt-- > 0) {
2094 if (pfn_valid(addr >> PAGE_SHIFT))
2095 get_page(pfn_to_page(addr >> PAGE_SHIFT));
2096 addr += PAGE_SIZE;
2097 }
2098 }
2099 info.si_signo = SIGKILL;
2100 info.si_errno = 0;
2101 info.si_trapno = 0;
2102 force_sig_info(info.si_signo, &info, current);
2103
2104 return true;
2105 }
2106 if (attrs & SUN4V_ERR_ATTRS_PIO) {
2107 siginfo_t info;
2108
2109 info.si_signo = SIGBUS;
2110 info.si_code = BUS_ADRERR;
2111 info.si_addr = (void __user *)sun4v_get_vaddr(regs);
2112 force_sig_info(info.si_signo, &info, current);
2113
2114 return true;
2115 }
2116
2117 /* Default to doing nothing */
2118 return false;
2119}
2120
2054/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. 2121/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2055 * Log the event, clear the first word of the entry, and die. 2122 * Log the event, clear the first word of the entry, and die.
2056 */ 2123 */
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2075 2142
2076 put_cpu(); 2143 put_cpu();
2077 2144
2145 if (!(regs->tstate & TSTATE_PRIV) &&
2146 sun4v_nonresum_error_user_handled(regs, &local_copy)) {
2147 /* DON'T PANIC: This userspace error was handled. */
2148 return;
2149 }
2150
2078#ifdef CONFIG_PCI 2151#ifdef CONFIG_PCI
2079 /* Check for the special PCI poke sequence. */ 2152 /* Check for the special PCI poke sequence. */
2080 if (pci_poke_in_progress && pci_poke_cpu == cpu) { 2153 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 17c3564d087a..22ef4f72cf32 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
161 161
162static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) 162static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
163{ 163{
164 return rapl_pmus->pmus[topology_logical_package_id(cpu)]; 164 unsigned int pkgid = topology_logical_package_id(cpu);
165
166 /*
167 * The unsigned check also catches the '-1' return value for non
168 * existent mappings in the topology map.
169 */
170 return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
165} 171}
166 172
167static inline u64 rapl_read_counter(struct perf_event *event) 173static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
402 408
403 /* must be done before validate_group */ 409 /* must be done before validate_group */
404 pmu = cpu_to_rapl_pmu(event->cpu); 410 pmu = cpu_to_rapl_pmu(event->cpu);
411 if (!pmu)
412 return -EINVAL;
405 event->cpu = pmu->cpu; 413 event->cpu = pmu->cpu;
406 event->pmu_private = pmu; 414 event->pmu_private = pmu;
407 event->hw.event_base = msr; 415 event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
585 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); 593 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
586 int target; 594 int target;
587 595
596 if (!pmu) {
597 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
598 if (!pmu)
599 return -ENOMEM;
600
601 raw_spin_lock_init(&pmu->lock);
602 INIT_LIST_HEAD(&pmu->active_list);
603 pmu->pmu = &rapl_pmus->pmu;
604 pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
605 rapl_hrtimer_init(pmu);
606
607 rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
608 }
609
588 /* 610 /*
589 * Check if there is an online cpu in the package which collects rapl 611 * Check if there is an online cpu in the package which collects rapl
590 * events already. 612 * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
598 return 0; 620 return 0;
599} 621}
600 622
601static int rapl_cpu_prepare(unsigned int cpu)
602{
603 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
604
605 if (pmu)
606 return 0;
607
608 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
609 if (!pmu)
610 return -ENOMEM;
611
612 raw_spin_lock_init(&pmu->lock);
613 INIT_LIST_HEAD(&pmu->active_list);
614 pmu->pmu = &rapl_pmus->pmu;
615 pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
616 pmu->cpu = -1;
617 rapl_hrtimer_init(pmu);
618 rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
619 return 0;
620}
621
622static int rapl_check_hw_unit(bool apply_quirk) 623static int rapl_check_hw_unit(bool apply_quirk)
623{ 624{
624 u64 msr_rapl_power_unit_bits; 625 u64 msr_rapl_power_unit_bits;
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
803 /* 804 /*
804 * Install callbacks. Core will call them for each online cpu. 805 * Install callbacks. Core will call them for each online cpu.
805 */ 806 */
806
807 ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
808 rapl_cpu_prepare, NULL);
809 if (ret)
810 goto out;
811
812 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, 807 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
813 "perf/x86/rapl:online", 808 "perf/x86/rapl:online",
814 rapl_cpu_online, rapl_cpu_offline); 809 rapl_cpu_online, rapl_cpu_offline);
815 if (ret) 810 if (ret)
816 goto out1; 811 goto out;
817 812
818 ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); 813 ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
819 if (ret) 814 if (ret)
820 goto out2; 815 goto out1;
821 816
822 rapl_advertise(); 817 rapl_advertise();
823 return 0; 818 return 0;
824 819
825out2:
826 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
827out1: 820out1:
828 cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP); 821 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
829out: 822out:
830 pr_warn("Initialization failed (%d), disabled\n", ret); 823 pr_warn("Initialization failed (%d), disabled\n", ret);
831 cleanup_rapl_pmus(); 824 cleanup_rapl_pmus();
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
836static void __exit intel_rapl_exit(void) 829static void __exit intel_rapl_exit(void)
837{ 830{
838 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); 831 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
839 cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
840 perf_pmu_unregister(&rapl_pmus->pmu); 832 perf_pmu_unregister(&rapl_pmus->pmu);
841 cleanup_rapl_pmus(); 833 cleanup_rapl_pmus();
842} 834}
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 8c4ccdc3a3f3..1ab45976474d 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
100 100
101struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) 101struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
102{ 102{
103 return pmu->boxes[topology_logical_package_id(cpu)]; 103 unsigned int pkgid = topology_logical_package_id(cpu);
104
105 /*
106 * The unsigned check also catches the '-1' return value for non
107 * existent mappings in the topology map.
108 */
109 return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
104} 110}
105 111
106u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) 112u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
764 pmu->registered = false; 770 pmu->registered = false;
765} 771}
766 772
767static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
768{
769 struct intel_uncore_pmu *pmu = type->pmus;
770 struct intel_uncore_box *box;
771 int i, pkg;
772
773 if (pmu) {
774 pkg = topology_physical_package_id(cpu);
775 for (i = 0; i < type->num_boxes; i++, pmu++) {
776 box = pmu->boxes[pkg];
777 if (box)
778 uncore_box_exit(box);
779 }
780 }
781}
782
783static void uncore_exit_boxes(void *dummy)
784{
785 struct intel_uncore_type **types;
786
787 for (types = uncore_msr_uncores; *types; types++)
788 __uncore_exit_boxes(*types++, smp_processor_id());
789}
790
791static void uncore_free_boxes(struct intel_uncore_pmu *pmu) 773static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
792{ 774{
793 int pkg; 775 int pkg;
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
1058 } 1040 }
1059} 1041}
1060 1042
1061static int uncore_cpu_dying(unsigned int cpu)
1062{
1063 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1064 struct intel_uncore_pmu *pmu;
1065 struct intel_uncore_box *box;
1066 int i, pkg;
1067
1068 pkg = topology_logical_package_id(cpu);
1069 for (; *types; types++) {
1070 type = *types;
1071 pmu = type->pmus;
1072 for (i = 0; i < type->num_boxes; i++, pmu++) {
1073 box = pmu->boxes[pkg];
1074 if (box && atomic_dec_return(&box->refcnt) == 0)
1075 uncore_box_exit(box);
1076 }
1077 }
1078 return 0;
1079}
1080
1081static int first_init;
1082
1083static int uncore_cpu_starting(unsigned int cpu)
1084{
1085 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1086 struct intel_uncore_pmu *pmu;
1087 struct intel_uncore_box *box;
1088 int i, pkg, ncpus = 1;
1089
1090 if (first_init) {
1091 /*
1092 * On init we get the number of online cpus in the package
1093 * and set refcount for all of them.
1094 */
1095 ncpus = cpumask_weight(topology_core_cpumask(cpu));
1096 }
1097
1098 pkg = topology_logical_package_id(cpu);
1099 for (; *types; types++) {
1100 type = *types;
1101 pmu = type->pmus;
1102 for (i = 0; i < type->num_boxes; i++, pmu++) {
1103 box = pmu->boxes[pkg];
1104 if (!box)
1105 continue;
1106 /* The first cpu on a package activates the box */
1107 if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
1108 uncore_box_init(box);
1109 }
1110 }
1111
1112 return 0;
1113}
1114
1115static int uncore_cpu_prepare(unsigned int cpu)
1116{
1117 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1118 struct intel_uncore_pmu *pmu;
1119 struct intel_uncore_box *box;
1120 int i, pkg;
1121
1122 pkg = topology_logical_package_id(cpu);
1123 for (; *types; types++) {
1124 type = *types;
1125 pmu = type->pmus;
1126 for (i = 0; i < type->num_boxes; i++, pmu++) {
1127 if (pmu->boxes[pkg])
1128 continue;
1129 /* First cpu of a package allocates the box */
1130 box = uncore_alloc_box(type, cpu_to_node(cpu));
1131 if (!box)
1132 return -ENOMEM;
1133 box->pmu = pmu;
1134 box->pkgid = pkg;
1135 pmu->boxes[pkg] = box;
1136 }
1137 }
1138 return 0;
1139}
1140
1141static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, 1043static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1142 int new_cpu) 1044 int new_cpu)
1143{ 1045{
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
1177 1079
1178static int uncore_event_cpu_offline(unsigned int cpu) 1080static int uncore_event_cpu_offline(unsigned int cpu)
1179{ 1081{
1180 int target; 1082 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1083 struct intel_uncore_pmu *pmu;
1084 struct intel_uncore_box *box;
1085 int i, pkg, target;
1181 1086
1182 /* Check if exiting cpu is used for collecting uncore events */ 1087 /* Check if exiting cpu is used for collecting uncore events */
1183 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) 1088 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1184 return 0; 1089 goto unref;
1185
1186 /* Find a new cpu to collect uncore events */ 1090 /* Find a new cpu to collect uncore events */
1187 target = cpumask_any_but(topology_core_cpumask(cpu), cpu); 1091 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1188 1092
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
1194 1098
1195 uncore_change_context(uncore_msr_uncores, cpu, target); 1099 uncore_change_context(uncore_msr_uncores, cpu, target);
1196 uncore_change_context(uncore_pci_uncores, cpu, target); 1100 uncore_change_context(uncore_pci_uncores, cpu, target);
1101
1102unref:
1103 /* Clear the references */
1104 pkg = topology_logical_package_id(cpu);
1105 for (; *types; types++) {
1106 type = *types;
1107 pmu = type->pmus;
1108 for (i = 0; i < type->num_boxes; i++, pmu++) {
1109 box = pmu->boxes[pkg];
1110 if (box && atomic_dec_return(&box->refcnt) == 0)
1111 uncore_box_exit(box);
1112 }
1113 }
1197 return 0; 1114 return 0;
1198} 1115}
1199 1116
1117static int allocate_boxes(struct intel_uncore_type **types,
1118 unsigned int pkg, unsigned int cpu)
1119{
1120 struct intel_uncore_box *box, *tmp;
1121 struct intel_uncore_type *type;
1122 struct intel_uncore_pmu *pmu;
1123 LIST_HEAD(allocated);
1124 int i;
1125
1126 /* Try to allocate all required boxes */
1127 for (; *types; types++) {
1128 type = *types;
1129 pmu = type->pmus;
1130 for (i = 0; i < type->num_boxes; i++, pmu++) {
1131 if (pmu->boxes[pkg])
1132 continue;
1133 box = uncore_alloc_box(type, cpu_to_node(cpu));
1134 if (!box)
1135 goto cleanup;
1136 box->pmu = pmu;
1137 box->pkgid = pkg;
1138 list_add(&box->active_list, &allocated);
1139 }
1140 }
1141 /* Install them in the pmus */
1142 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1143 list_del_init(&box->active_list);
1144 box->pmu->boxes[pkg] = box;
1145 }
1146 return 0;
1147
1148cleanup:
1149 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1150 list_del_init(&box->active_list);
1151 kfree(box);
1152 }
1153 return -ENOMEM;
1154}
1155
1200static int uncore_event_cpu_online(unsigned int cpu) 1156static int uncore_event_cpu_online(unsigned int cpu)
1201{ 1157{
1202 int target; 1158 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1159 struct intel_uncore_pmu *pmu;
1160 struct intel_uncore_box *box;
1161 int i, ret, pkg, target;
1162
1163 pkg = topology_logical_package_id(cpu);
1164 ret = allocate_boxes(types, pkg, cpu);
1165 if (ret)
1166 return ret;
1167
1168 for (; *types; types++) {
1169 type = *types;
1170 pmu = type->pmus;
1171 for (i = 0; i < type->num_boxes; i++, pmu++) {
1172 box = pmu->boxes[pkg];
1173 if (!box && atomic_inc_return(&box->refcnt) == 1)
1174 uncore_box_init(box);
1175 }
1176 }
1203 1177
1204 /* 1178 /*
1205 * Check if there is an online cpu in the package 1179 * Check if there is an online cpu in the package
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
1389 if (cret && pret) 1363 if (cret && pret)
1390 return -ENODEV; 1364 return -ENODEV;
1391 1365
1392 /* 1366 /* Install hotplug callbacks to setup the targets for each package */
1393 * Install callbacks. Core will call them for each online cpu. 1367 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1394 * 1368 "perf/x86/intel/uncore:online",
1395 * The first online cpu of each package allocates and takes 1369 uncore_event_cpu_online,
1396 * the refcounts for all other online cpus in that package. 1370 uncore_event_cpu_offline);
1397 * If msrs are not enabled no allocation is required and 1371 if (ret)
1398 * uncore_cpu_prepare() is not called for each online cpu. 1372 goto err;
1399 */
1400 if (!cret) {
1401 ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
1402 "perf/x86/intel/uncore:prepare",
1403 uncore_cpu_prepare, NULL);
1404 if (ret)
1405 goto err;
1406 } else {
1407 cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
1408 "perf/x86/intel/uncore:prepare",
1409 uncore_cpu_prepare, NULL);
1410 }
1411 first_init = 1;
1412 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
1413 "perf/x86/uncore:starting",
1414 uncore_cpu_starting, uncore_cpu_dying);
1415 first_init = 0;
1416 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1417 "perf/x86/uncore:online",
1418 uncore_event_cpu_online, uncore_event_cpu_offline);
1419 return 0; 1373 return 0;
1420 1374
1421err: 1375err:
1422 /* Undo box->init_box() */
1423 on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
1424 uncore_types_exit(uncore_msr_uncores); 1376 uncore_types_exit(uncore_msr_uncores);
1425 uncore_pci_exit(); 1377 uncore_pci_exit();
1426 return ret; 1378 return ret;
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
1429 1381
1430static void __exit intel_uncore_exit(void) 1382static void __exit intel_uncore_exit(void)
1431{ 1383{
1432 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE); 1384 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1433 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
1434 cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
1435 uncore_types_exit(uncore_msr_uncores); 1385 uncore_types_exit(uncore_msr_uncores);
1436 uncore_pci_exit(); 1386 uncore_pci_exit();
1437} 1387}
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 38711df3bcb5..2266f864b747 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
140extern void load_ucode_ap(void); 140extern void load_ucode_ap(void);
141void reload_early_microcode(void); 141void reload_early_microcode(void);
142extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); 142extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
143extern bool initrd_gone;
143#else 144#else
144static inline int __init microcode_init(void) { return 0; }; 145static inline int __init microcode_init(void) { return 0; };
145static inline void __init load_ucode_bsp(void) { } 146static inline void __init load_ucode_bsp(void) { }
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 1e35dd06b090..52f352b063fd 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2117,6 +2117,7 @@ static inline void __init check_timer(void)
2117 if (idx != -1 && irq_trigger(idx)) 2117 if (idx != -1 && irq_trigger(idx))
2118 unmask_ioapic_irq(irq_get_chip_data(0)); 2118 unmask_ioapic_irq(irq_get_chip_data(0));
2119 } 2119 }
2120 irq_domain_deactivate_irq(irq_data);
2120 irq_domain_activate_irq(irq_data); 2121 irq_domain_activate_irq(irq_data);
2121 if (timer_irq_works()) { 2122 if (timer_irq_works()) {
2122 if (disable_timer_pin_1 > 0) 2123 if (disable_timer_pin_1 > 0)
@@ -2138,6 +2139,7 @@ static inline void __init check_timer(void)
2138 * legacy devices should be connected to IO APIC #0 2139 * legacy devices should be connected to IO APIC #0
2139 */ 2140 */
2140 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); 2141 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
2142 irq_domain_deactivate_irq(irq_data);
2141 irq_domain_activate_irq(irq_data); 2143 irq_domain_activate_irq(irq_data);
2142 legacy_pic->unmask(0); 2144 legacy_pic->unmask(0);
2143 if (timer_irq_works()) { 2145 if (timer_irq_works()) {
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 00ef43233e03..537c6647d84c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
1373 1373
1374static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; 1374static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1375 1375
1376static void __restart_timer(struct timer_list *t, unsigned long interval) 1376static void __start_timer(struct timer_list *t, unsigned long interval)
1377{ 1377{
1378 unsigned long when = jiffies + interval; 1378 unsigned long when = jiffies + interval;
1379 unsigned long flags; 1379 unsigned long flags;
1380 1380
1381 local_irq_save(flags); 1381 local_irq_save(flags);
1382 1382
1383 if (timer_pending(t)) { 1383 if (!timer_pending(t) || time_before(when, t->expires))
1384 if (time_before(when, t->expires)) 1384 mod_timer(t, round_jiffies(when));
1385 mod_timer(t, when);
1386 } else {
1387 t->expires = round_jiffies(when);
1388 add_timer_on(t, smp_processor_id());
1389 }
1390 1385
1391 local_irq_restore(flags); 1386 local_irq_restore(flags);
1392} 1387}
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
1421 1416
1422done: 1417done:
1423 __this_cpu_write(mce_next_interval, iv); 1418 __this_cpu_write(mce_next_interval, iv);
1424 __restart_timer(t, iv); 1419 __start_timer(t, iv);
1425} 1420}
1426 1421
1427/* 1422/*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
1432 struct timer_list *t = this_cpu_ptr(&mce_timer); 1427 struct timer_list *t = this_cpu_ptr(&mce_timer);
1433 unsigned long iv = __this_cpu_read(mce_next_interval); 1428 unsigned long iv = __this_cpu_read(mce_next_interval);
1434 1429
1435 __restart_timer(t, interval); 1430 __start_timer(t, interval);
1436 1431
1437 if (interval < iv) 1432 if (interval < iv)
1438 __this_cpu_write(mce_next_interval, interval); 1433 __this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1779 } 1774 }
1780} 1775}
1781 1776
1782static void mce_start_timer(unsigned int cpu, struct timer_list *t) 1777static void mce_start_timer(struct timer_list *t)
1783{ 1778{
1784 unsigned long iv = check_interval * HZ; 1779 unsigned long iv = check_interval * HZ;
1785 1780
1786 if (mca_cfg.ignore_ce || !iv) 1781 if (mca_cfg.ignore_ce || !iv)
1787 return; 1782 return;
1788 1783
1789 per_cpu(mce_next_interval, cpu) = iv; 1784 this_cpu_write(mce_next_interval, iv);
1790 1785 __start_timer(t, iv);
1791 t->expires = round_jiffies(jiffies + iv);
1792 add_timer_on(t, cpu);
1793} 1786}
1794 1787
1795static void __mcheck_cpu_setup_timer(void) 1788static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
1806 unsigned int cpu = smp_processor_id(); 1799 unsigned int cpu = smp_processor_id();
1807 1800
1808 setup_pinned_timer(t, mce_timer_fn, cpu); 1801 setup_pinned_timer(t, mce_timer_fn, cpu);
1809 mce_start_timer(cpu, t); 1802 mce_start_timer(t);
1810} 1803}
1811 1804
1812/* Handle unconfigured int18 (should never happen) */ 1805/* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
2566 2559
2567static int mce_cpu_online(unsigned int cpu) 2560static int mce_cpu_online(unsigned int cpu)
2568{ 2561{
2569 struct timer_list *t = &per_cpu(mce_timer, cpu); 2562 struct timer_list *t = this_cpu_ptr(&mce_timer);
2570 int ret; 2563 int ret;
2571 2564
2572 mce_device_create(cpu); 2565 mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
2577 return ret; 2570 return ret;
2578 } 2571 }
2579 mce_reenable_cpu(); 2572 mce_reenable_cpu();
2580 mce_start_timer(cpu, t); 2573 mce_start_timer(t);
2581 return 0; 2574 return 0;
2582} 2575}
2583 2576
2584static int mce_cpu_pre_down(unsigned int cpu) 2577static int mce_cpu_pre_down(unsigned int cpu)
2585{ 2578{
2586 struct timer_list *t = &per_cpu(mce_timer, cpu); 2579 struct timer_list *t = this_cpu_ptr(&mce_timer);
2587 2580
2588 mce_disable_cpu(); 2581 mce_disable_cpu();
2589 del_timer_sync(t); 2582 del_timer_sync(t);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 6a31e2691f3a..079e81733a58 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
384reget: 384reget:
385 if (!get_builtin_microcode(&cp, family)) { 385 if (!get_builtin_microcode(&cp, family)) {
386#ifdef CONFIG_BLK_DEV_INITRD 386#ifdef CONFIG_BLK_DEV_INITRD
387 cp = find_cpio_data(ucode_path, (void *)initrd_start, 387 if (!initrd_gone)
388 initrd_end - initrd_start, NULL); 388 cp = find_cpio_data(ucode_path, (void *)initrd_start,
389 initrd_end - initrd_start, NULL);
389#endif 390#endif
390 if (!(cp.data && cp.size)) { 391 if (!(cp.data && cp.size)) {
391 /* 392 /*
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 2af69d27da62..73102d932760 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -46,6 +46,8 @@
46static struct microcode_ops *microcode_ops; 46static struct microcode_ops *microcode_ops;
47static bool dis_ucode_ldr = true; 47static bool dis_ucode_ldr = true;
48 48
49bool initrd_gone;
50
49LIST_HEAD(microcode_cache); 51LIST_HEAD(microcode_cache);
50 52
51/* 53/*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
190static int __init save_microcode_in_initrd(void) 192static int __init save_microcode_in_initrd(void)
191{ 193{
192 struct cpuinfo_x86 *c = &boot_cpu_data; 194 struct cpuinfo_x86 *c = &boot_cpu_data;
195 int ret = -EINVAL;
193 196
194 switch (c->x86_vendor) { 197 switch (c->x86_vendor) {
195 case X86_VENDOR_INTEL: 198 case X86_VENDOR_INTEL:
196 if (c->x86 >= 6) 199 if (c->x86 >= 6)
197 return save_microcode_in_initrd_intel(); 200 ret = save_microcode_in_initrd_intel();
198 break; 201 break;
199 case X86_VENDOR_AMD: 202 case X86_VENDOR_AMD:
200 if (c->x86 >= 0x10) 203 if (c->x86 >= 0x10)
201 return save_microcode_in_initrd_amd(c->x86); 204 ret = save_microcode_in_initrd_amd(c->x86);
202 break; 205 break;
203 default: 206 default:
204 break; 207 break;
205 } 208 }
206 209
207 return -EINVAL; 210 initrd_gone = true;
211
212 return ret;
208} 213}
209 214
210struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) 215struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
247 * has the virtual address of the beginning of the initrd. It also 252 * has the virtual address of the beginning of the initrd. It also
248 * possibly relocates the ramdisk. In either case, initrd_start contains 253 * possibly relocates the ramdisk. In either case, initrd_start contains
249 * the updated address so use that instead. 254 * the updated address so use that instead.
255 *
256 * initrd_gone is for the hotplug case where we've thrown out initrd
257 * already.
250 */ 258 */
251 if (!use_pa && initrd_start) 259 if (!use_pa) {
252 start = initrd_start; 260 if (initrd_gone)
261 return (struct cpio_data){ NULL, 0, "" };
262 if (initrd_start)
263 start = initrd_start;
264 }
253 265
254 return find_cpio_data(path, (void *)start, size, NULL); 266 return find_cpio_data(path, (void *)start, size, NULL);
255#else /* !CONFIG_BLK_DEV_INITRD */ 267#else /* !CONFIG_BLK_DEV_INITRD */
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 3f329b74e040..8325d8a09ab0 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -41,7 +41,7 @@
41 41
42static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; 42static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
43 43
44/* Current microcode patch used in early patching */ 44/* Current microcode patch used in early patching on the APs. */
45struct microcode_intel *intel_ucode_patch; 45struct microcode_intel *intel_ucode_patch;
46 46
47static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, 47static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
607 struct ucode_cpu_info uci; 607 struct ucode_cpu_info uci;
608 struct cpio_data cp; 608 struct cpio_data cp;
609 609
610 /*
611 * AP loading didn't find any microcode patch, no need to save anything.
612 */
613 if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
614 return 0;
615
616 if (!load_builtin_intel_microcode(&cp)) 610 if (!load_builtin_intel_microcode(&cp))
617 cp = find_microcode_in_initrd(ucode_path, false); 611 cp = find_microcode_in_initrd(ucode_path, false);
618 612
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
628 return 0; 622 return 0;
629} 623}
630 624
631
632/* 625/*
633 * @res_patch, output: a pointer to the patch we found. 626 * @res_patch, output: a pointer to the patch we found.
634 */ 627 */
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index e4e97a5355ce..de7234401275 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -9,6 +9,7 @@
9#include <asm/fpu/regset.h> 9#include <asm/fpu/regset.h>
10#include <asm/fpu/signal.h> 10#include <asm/fpu/signal.h>
11#include <asm/fpu/types.h> 11#include <asm/fpu/types.h>
12#include <asm/fpu/xstate.h>
12#include <asm/traps.h> 13#include <asm/traps.h>
13 14
14#include <linux/hardirq.h> 15#include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
183 * it will #GP. Make sure it is replaced after the memset(). 184 * it will #GP. Make sure it is replaced after the memset().
184 */ 185 */
185 if (static_cpu_has(X86_FEATURE_XSAVES)) 186 if (static_cpu_has(X86_FEATURE_XSAVES))
186 state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT; 187 state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
188 xfeatures_mask;
187 189
188 if (static_cpu_has(X86_FEATURE_FXSR)) 190 if (static_cpu_has(X86_FEATURE_FXSR))
189 fpstate_init_fxstate(&state->fxsave); 191 fpstate_init_fxstate(&state->fxsave);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 85e87b46c318..dc6ba5bda9fc 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
352 } else { 352 } else {
353 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 353 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
354 354
355 irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
355 irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); 356 irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
356 disable_irq(hdev->irq); 357 disable_irq(hdev->irq);
357 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); 358 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d153be8929a6..e52c9088660f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3182 memcpy(dest, xsave, XSAVE_HDR_OFFSET); 3182 memcpy(dest, xsave, XSAVE_HDR_OFFSET);
3183 3183
3184 /* Set XSTATE_BV */ 3184 /* Set XSTATE_BV */
3185 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
3185 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; 3186 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
3186 3187
3187 /* 3188 /*
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 319148bd4b05..2f25a363068c 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
269 efi_scratch.use_pgd = true; 269 efi_scratch.use_pgd = true;
270 270
271 /* 271 /*
272 * Certain firmware versions are way too sentimential and still believe
273 * they are exclusive and unquestionable owners of the first physical page,
274 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
275 * (but then write-access it later during SetVirtualAddressMap()).
276 *
277 * Create a 1:1 mapping for this page, to avoid triple faults during early
278 * boot with such firmware. We are free to hand this page to the BIOS,
279 * as trim_bios_range() will reserve the first page and isolate it away
280 * from memory allocators anyway.
281 */
282 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
283 pr_err("Failed to create 1:1 mapping for the first page!\n");
284 return 1;
285 }
286
287 /*
272 * When making calls to the firmware everything needs to be 1:1 288 * When making calls to the firmware everything needs to be 1:1
273 * mapped and addressable with 32-bit pointers. Map the kernel 289 * mapped and addressable with 32-bit pointers. Map the kernel
274 * text and allocate a new stack because we can't rely on the 290 * text and allocate a new stack because we can't rely on the
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 848e8568fb3c..8fd4be610607 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
419 419
420void cpu_reset(void) 420void cpu_reset(void)
421{ 421{
422#if XCHAL_HAVE_PTP_MMU 422#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
423 local_irq_disable(); 423 local_irq_disable();
424 /* 424 /*
425 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must 425 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
diff --git a/crypto/algapi.c b/crypto/algapi.c
index df939b54b09f..1fad2a6b3bbb 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
356 struct crypto_larval *larval; 356 struct crypto_larval *larval;
357 int err; 357 int err;
358 358
359 alg->cra_flags &= ~CRYPTO_ALG_DEAD;
359 err = crypto_check_alg(alg); 360 err = crypto_check_alg(alg);
360 if (err) 361 if (err)
361 return err; 362 return err;
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index e0d2e6e6e40c..3752521c62ab 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
536 if (!iort_fwnode) 536 if (!iort_fwnode)
537 return NULL; 537 return NULL;
538 538
539 ops = iommu_get_instance(iort_fwnode); 539 ops = iommu_ops_from_fwnode(iort_fwnode);
540 if (!ops) 540 if (!ops)
541 return NULL; 541 return NULL;
542 542
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9cd0a2d41816..c2d3785ec227 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1702 1702
1703 if (qc->err_mask & ~AC_ERR_OTHER) 1703 if (qc->err_mask & ~AC_ERR_OTHER)
1704 qc->err_mask &= ~AC_ERR_OTHER; 1704 qc->err_mask &= ~AC_ERR_OTHER;
1705 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1706 qc->result_tf.command |= ATA_SENSE;
1705 } 1707 }
1706 1708
1707 /* finish up */ 1709 /* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4356 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, 4358 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4357 4359
4358 /* 4360 /*
4359 * Device times out with higher max sects. 4361 * These devices time out with higher max sects.
4360 * https://bugzilla.kernel.org/show_bug.cgi?id=121671 4362 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4361 */ 4363 */
4362 { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, 4364 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4363 4365
4364 /* Devices we expect to fail diagnostics */ 4366 /* Devices we expect to fail diagnostics */
4365 4367
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 823e938c9a78..2f32782cea6d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
4132 host->iomap = NULL; 4132 host->iomap = NULL;
4133 hpriv->base = devm_ioremap(&pdev->dev, res->start, 4133 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4134 resource_size(res)); 4134 resource_size(res));
4135 if (!hpriv->base)
4136 return -ENOMEM;
4137
4135 hpriv->base -= SATAHC0_REG_BASE; 4138 hpriv->base -= SATAHC0_REG_BASE;
4136 4139
4137 hpriv->clk = clk_get(&pdev->dev, NULL); 4140 hpriv->clk = clk_get(&pdev->dev, NULL);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4497d263209f..ac350c518e0c 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
558 struct firmware_buf *buf = fw_priv->buf; 558 struct firmware_buf *buf = fw_priv->buf;
559 559
560 __fw_load_abort(buf); 560 __fw_load_abort(buf);
561
562 /* avoid user action after loading abort */
563 fw_priv->buf = NULL;
564} 561}
565 562
566static LIST_HEAD(pending_fw_head); 563static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
713 710
714 mutex_lock(&fw_lock); 711 mutex_lock(&fw_lock);
715 fw_buf = fw_priv->buf; 712 fw_buf = fw_priv->buf;
716 if (!fw_buf) 713 if (fw_state_is_aborted(&fw_buf->fw_st))
717 goto out; 714 goto out;
718 715
719 switch (loading) { 716 switch (loading) {
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index dacb6a8418aa..fa26ffd25fa6 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
389{ 389{
390 struct memory_block *mem = to_memory_block(dev); 390 struct memory_block *mem = to_memory_block(dev);
391 unsigned long start_pfn, end_pfn; 391 unsigned long start_pfn, end_pfn;
392 unsigned long valid_start, valid_end, valid_pages;
392 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 393 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
393 struct page *first_page;
394 struct zone *zone; 394 struct zone *zone;
395 int zone_shift = 0; 395 int zone_shift = 0;
396 396
397 start_pfn = section_nr_to_pfn(mem->start_section_nr); 397 start_pfn = section_nr_to_pfn(mem->start_section_nr);
398 end_pfn = start_pfn + nr_pages; 398 end_pfn = start_pfn + nr_pages;
399 first_page = pfn_to_page(start_pfn);
400 399
401 /* The block contains more than one zone can not be offlined. */ 400 /* The block contains more than one zone can not be offlined. */
402 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 401 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
403 return sprintf(buf, "none\n"); 402 return sprintf(buf, "none\n");
404 403
405 zone = page_zone(first_page); 404 zone = page_zone(pfn_to_page(valid_start));
405 valid_pages = valid_end - valid_start;
406 406
407 /* MMOP_ONLINE_KEEP */ 407 /* MMOP_ONLINE_KEEP */
408 sprintf(buf, "%s", zone->name); 408 sprintf(buf, "%s", zone->name);
409 409
410 /* MMOP_ONLINE_KERNEL */ 410 /* MMOP_ONLINE_KERNEL */
411 zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift); 411 zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
412 if (zone_shift) { 412 if (zone_shift) {
413 strcat(buf, " "); 413 strcat(buf, " ");
414 strcat(buf, (zone + zone_shift)->name); 414 strcat(buf, (zone + zone_shift)->name);
415 } 415 }
416 416
417 /* MMOP_ONLINE_MOVABLE */ 417 /* MMOP_ONLINE_MOVABLE */
418 zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift); 418 zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
419 if (zone_shift) { 419 if (zone_shift) {
420 strcat(buf, " "); 420 strcat(buf, " ");
421 strcat(buf, (zone + zone_shift)->name); 421 strcat(buf, (zone + zone_shift)->name);
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index f642c4264c27..168fa175d65a 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
45void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); 45void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
46void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); 46void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
47void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); 47void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
48#ifdef CONFIG_BCMA_DRIVER_MIPS
49void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
50#endif /* CONFIG_BCMA_DRIVER_MIPS */
48 51
49/* driver_chipcommon_b.c */ 52/* driver_chipcommon_b.c */
50int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb); 53int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index b4f6520e74f0..62f5bfa5065d 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -15,8 +15,6 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/bcma/bcma.h> 16#include <linux/bcma/bcma.h>
17 17
18static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
19
20static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, 18static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
21 u32 mask, u32 value) 19 u32 mask, u32 value)
22{ 20{
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
186 if (cc->capabilities & BCMA_CC_CAP_PMU) 184 if (cc->capabilities & BCMA_CC_CAP_PMU)
187 bcma_pmu_early_init(cc); 185 bcma_pmu_early_init(cc);
188 186
189 if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
190 bcma_chipco_serial_init(cc);
191
192 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 187 if (bus->hosttype == BCMA_HOSTTYPE_SOC)
193 bcma_core_chipcommon_flash_detect(cc); 188 bcma_core_chipcommon_flash_detect(cc);
194 189
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
378 return res; 373 return res;
379} 374}
380 375
381static void bcma_chipco_serial_init(struct bcma_drv_cc *cc) 376#ifdef CONFIG_BCMA_DRIVER_MIPS
377void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
382{ 378{
383#if IS_BUILTIN(CONFIG_BCM47XX)
384 unsigned int irq; 379 unsigned int irq;
385 u32 baud_base; 380 u32 baud_base;
386 u32 i; 381 u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
422 ports[i].baud_base = baud_base; 417 ports[i].baud_base = baud_base;
423 ports[i].reg_shift = 0; 418 ports[i].reg_shift = 0;
424 } 419 }
425#endif /* CONFIG_BCM47XX */
426} 420}
421#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 96f171328200..89af807cf29c 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
278 278
279void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) 279void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
280{ 280{
281 struct bcma_bus *bus = mcore->core->bus;
282
281 if (mcore->early_setup_done) 283 if (mcore->early_setup_done)
282 return; 284 return;
283 285
286 bcma_chipco_serial_init(&bus->drv_cc);
284 bcma_core_mips_nvram_init(mcore); 287 bcma_core_mips_nvram_init(mcore);
285 288
286 mcore->early_setup_done = true; 289 mcore->early_setup_done = true;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d5ba43a87a68..200828c60db9 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -153,6 +153,8 @@ struct cppi41_dd {
153 153
154 /* context for suspend/resume */ 154 /* context for suspend/resume */
155 unsigned int dma_tdfdq; 155 unsigned int dma_tdfdq;
156
157 bool is_suspended;
156}; 158};
157 159
158#define FIST_COMPLETION_QUEUE 93 160#define FIST_COMPLETION_QUEUE 93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
257 BUG_ON(desc_num >= ALLOC_DECS_NUM); 259 BUG_ON(desc_num >= ALLOC_DECS_NUM);
258 c = cdd->chan_busy[desc_num]; 260 c = cdd->chan_busy[desc_num];
259 cdd->chan_busy[desc_num] = NULL; 261 cdd->chan_busy[desc_num] = NULL;
262
263 /* Usecount for chan_busy[], paired with push_desc_queue() */
264 pm_runtime_put(cdd->ddev.dev);
265
260 return c; 266 return c;
261} 267}
262 268
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
317 323
318 while (val) { 324 while (val) {
319 u32 desc, len; 325 u32 desc, len;
320 int error;
321 326
322 error = pm_runtime_get(cdd->ddev.dev); 327 /*
323 if (error < 0) 328 * This should never trigger, see the comments in
324 dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", 329 * push_desc_queue()
325 __func__, error); 330 */
331 WARN_ON(cdd->is_suspended);
326 332
327 q_num = __fls(val); 333 q_num = __fls(val);
328 val &= ~(1 << q_num); 334 val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
343 c->residue = pd_trans_len(c->desc->pd6) - len; 349 c->residue = pd_trans_len(c->desc->pd6) - len;
344 dma_cookie_complete(&c->txd); 350 dma_cookie_complete(&c->txd);
345 dmaengine_desc_get_callback_invoke(&c->txd, NULL); 351 dmaengine_desc_get_callback_invoke(&c->txd, NULL);
346
347 pm_runtime_mark_last_busy(cdd->ddev.dev);
348 pm_runtime_put_autosuspend(cdd->ddev.dev);
349 } 352 }
350 } 353 }
351 return IRQ_HANDLED; 354 return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
447 */ 450 */
448 __iowmb(); 451 __iowmb();
449 452
453 /*
454 * DMA transfers can take at least 200ms to complete with USB mass
455 * storage connected. To prevent autosuspend timeouts, we must use
456 * pm_runtime_get/put() when chan_busy[] is modified. This will get
457 * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
458 * outcome of the transfer.
459 */
460 pm_runtime_get(cdd->ddev.dev);
461
450 desc_phys = lower_32_bits(c->desc_phys); 462 desc_phys = lower_32_bits(c->desc_phys);
451 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); 463 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
452 WARN_ON(cdd->chan_busy[desc_num]); 464 WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
457 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); 469 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
458} 470}
459 471
460static void pending_desc(struct cppi41_channel *c) 472/*
473 * Caller must hold cdd->lock to prevent push_desc_queue()
474 * getting called out of order. We have both cppi41_dma_issue_pending()
475 * and cppi41_runtime_resume() call this function.
476 */
477static void cppi41_run_queue(struct cppi41_dd *cdd)
461{ 478{
462 struct cppi41_dd *cdd = c->cdd; 479 struct cppi41_channel *c, *_c;
463 unsigned long flags;
464 480
465 spin_lock_irqsave(&cdd->lock, flags); 481 list_for_each_entry_safe(c, _c, &cdd->pending, node) {
466 list_add_tail(&c->node, &cdd->pending); 482 push_desc_queue(c);
467 spin_unlock_irqrestore(&cdd->lock, flags); 483 list_del(&c->node);
484 }
468} 485}
469 486
470static void cppi41_dma_issue_pending(struct dma_chan *chan) 487static void cppi41_dma_issue_pending(struct dma_chan *chan)
471{ 488{
472 struct cppi41_channel *c = to_cpp41_chan(chan); 489 struct cppi41_channel *c = to_cpp41_chan(chan);
473 struct cppi41_dd *cdd = c->cdd; 490 struct cppi41_dd *cdd = c->cdd;
491 unsigned long flags;
474 int error; 492 int error;
475 493
476 error = pm_runtime_get(cdd->ddev.dev); 494 error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
482 return; 500 return;
483 } 501 }
484 502
485 if (likely(pm_runtime_active(cdd->ddev.dev))) 503 spin_lock_irqsave(&cdd->lock, flags);
486 push_desc_queue(c); 504 list_add_tail(&c->node, &cdd->pending);
487 else 505 if (!cdd->is_suspended)
488 pending_desc(c); 506 cppi41_run_queue(cdd);
507 spin_unlock_irqrestore(&cdd->lock, flags);
489 508
490 pm_runtime_mark_last_busy(cdd->ddev.dev); 509 pm_runtime_mark_last_busy(cdd->ddev.dev);
491 pm_runtime_put_autosuspend(cdd->ddev.dev); 510 pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
705 WARN_ON(!cdd->chan_busy[desc_num]); 724 WARN_ON(!cdd->chan_busy[desc_num]);
706 cdd->chan_busy[desc_num] = NULL; 725 cdd->chan_busy[desc_num] = NULL;
707 726
727 /* Usecount for chan_busy[], paired with push_desc_queue() */
728 pm_runtime_put(cdd->ddev.dev);
729
708 return 0; 730 return 0;
709} 731}
710 732
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
1150static int __maybe_unused cppi41_runtime_suspend(struct device *dev) 1172static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1151{ 1173{
1152 struct cppi41_dd *cdd = dev_get_drvdata(dev); 1174 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1175 unsigned long flags;
1153 1176
1177 spin_lock_irqsave(&cdd->lock, flags);
1178 cdd->is_suspended = true;
1154 WARN_ON(!list_empty(&cdd->pending)); 1179 WARN_ON(!list_empty(&cdd->pending));
1180 spin_unlock_irqrestore(&cdd->lock, flags);
1155 1181
1156 return 0; 1182 return 0;
1157} 1183}
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1159static int __maybe_unused cppi41_runtime_resume(struct device *dev) 1185static int __maybe_unused cppi41_runtime_resume(struct device *dev)
1160{ 1186{
1161 struct cppi41_dd *cdd = dev_get_drvdata(dev); 1187 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1162 struct cppi41_channel *c, *_c;
1163 unsigned long flags; 1188 unsigned long flags;
1164 1189
1165 spin_lock_irqsave(&cdd->lock, flags); 1190 spin_lock_irqsave(&cdd->lock, flags);
1166 list_for_each_entry_safe(c, _c, &cdd->pending, node) { 1191 cdd->is_suspended = false;
1167 push_desc_queue(c); 1192 cppi41_run_queue(cdd);
1168 list_del(&c->node);
1169 }
1170 spin_unlock_irqrestore(&cdd->lock, flags); 1193 spin_unlock_irqrestore(&cdd->lock, flags);
1171 1194
1172 return 0; 1195 return 0;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 740bbb942594..f37f4978dabb 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1699static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) 1699static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1700{ 1700{
1701 struct pl330_thread *thrd = NULL; 1701 struct pl330_thread *thrd = NULL;
1702 unsigned long flags;
1703 int chans, i; 1702 int chans, i;
1704 1703
1705 if (pl330->state == DYING) 1704 if (pl330->state == DYING)
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1707 1706
1708 chans = pl330->pcfg.num_chan; 1707 chans = pl330->pcfg.num_chan;
1709 1708
1710 spin_lock_irqsave(&pl330->lock, flags);
1711
1712 for (i = 0; i < chans; i++) { 1709 for (i = 0; i < chans; i++) {
1713 thrd = &pl330->channels[i]; 1710 thrd = &pl330->channels[i];
1714 if ((thrd->free) && (!_manager_ns(thrd) || 1711 if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1726 thrd = NULL; 1723 thrd = NULL;
1727 } 1724 }
1728 1725
1729 spin_unlock_irqrestore(&pl330->lock, flags);
1730
1731 return thrd; 1726 return thrd;
1732} 1727}
1733 1728
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
1745static void pl330_release_channel(struct pl330_thread *thrd) 1740static void pl330_release_channel(struct pl330_thread *thrd)
1746{ 1741{
1747 struct pl330_dmac *pl330; 1742 struct pl330_dmac *pl330;
1748 unsigned long flags;
1749 1743
1750 if (!thrd || thrd->free) 1744 if (!thrd || thrd->free)
1751 return; 1745 return;
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
1757 1751
1758 pl330 = thrd->dmac; 1752 pl330 = thrd->dmac;
1759 1753
1760 spin_lock_irqsave(&pl330->lock, flags);
1761 _free_event(thrd, thrd->ev); 1754 _free_event(thrd, thrd->ev);
1762 thrd->free = true; 1755 thrd->free = true;
1763 spin_unlock_irqrestore(&pl330->lock, flags);
1764} 1756}
1765 1757
1766/* Initialize the structure for PL330 configuration, that can be used 1758/* Initialize the structure for PL330 configuration, that can be used
@@ -1867,9 +1859,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
1867 * Alloc MicroCode buffer for 'chans' Channel threads. 1859 * Alloc MicroCode buffer for 'chans' Channel threads.
1868 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) 1860 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1869 */ 1861 */
1870 pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev, 1862 pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
1871 chans * pl330->mcbufsz, 1863 chans * pl330->mcbufsz,
1872 &pl330->mcode_bus, GFP_KERNEL); 1864 &pl330->mcode_bus, GFP_KERNEL,
1865 DMA_ATTR_PRIVILEGED);
1873 if (!pl330->mcode_cpu) { 1866 if (!pl330->mcode_cpu) {
1874 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", 1867 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
1875 __func__, __LINE__); 1868 __func__, __LINE__);
@@ -2122,20 +2115,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
2122 struct pl330_dmac *pl330 = pch->dmac; 2115 struct pl330_dmac *pl330 = pch->dmac;
2123 unsigned long flags; 2116 unsigned long flags;
2124 2117
2125 spin_lock_irqsave(&pch->lock, flags); 2118 spin_lock_irqsave(&pl330->lock, flags);
2126 2119
2127 dma_cookie_init(chan); 2120 dma_cookie_init(chan);
2128 pch->cyclic = false; 2121 pch->cyclic = false;
2129 2122
2130 pch->thread = pl330_request_channel(pl330); 2123 pch->thread = pl330_request_channel(pl330);
2131 if (!pch->thread) { 2124 if (!pch->thread) {
2132 spin_unlock_irqrestore(&pch->lock, flags); 2125 spin_unlock_irqrestore(&pl330->lock, flags);
2133 return -ENOMEM; 2126 return -ENOMEM;
2134 } 2127 }
2135 2128
2136 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); 2129 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2137 2130
2138 spin_unlock_irqrestore(&pch->lock, flags); 2131 spin_unlock_irqrestore(&pl330->lock, flags);
2139 2132
2140 return 1; 2133 return 1;
2141} 2134}
@@ -2238,12 +2231,13 @@ static int pl330_pause(struct dma_chan *chan)
2238static void pl330_free_chan_resources(struct dma_chan *chan) 2231static void pl330_free_chan_resources(struct dma_chan *chan)
2239{ 2232{
2240 struct dma_pl330_chan *pch = to_pchan(chan); 2233 struct dma_pl330_chan *pch = to_pchan(chan);
2234 struct pl330_dmac *pl330 = pch->dmac;
2241 unsigned long flags; 2235 unsigned long flags;
2242 2236
2243 tasklet_kill(&pch->task); 2237 tasklet_kill(&pch->task);
2244 2238
2245 pm_runtime_get_sync(pch->dmac->ddma.dev); 2239 pm_runtime_get_sync(pch->dmac->ddma.dev);
2246 spin_lock_irqsave(&pch->lock, flags); 2240 spin_lock_irqsave(&pl330->lock, flags);
2247 2241
2248 pl330_release_channel(pch->thread); 2242 pl330_release_channel(pch->thread);
2249 pch->thread = NULL; 2243 pch->thread = NULL;
@@ -2251,7 +2245,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
2251 if (pch->cyclic) 2245 if (pch->cyclic)
2252 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); 2246 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2253 2247
2254 spin_unlock_irqrestore(&pch->lock, flags); 2248 spin_unlock_irqrestore(&pl330->lock, flags);
2255 pm_runtime_mark_last_busy(pch->dmac->ddma.dev); 2249 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2256 pm_runtime_put_autosuspend(pch->dmac->ddma.dev); 2250 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2257} 2251}
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 921dfa047202..260c4b4b492e 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
187struct exit_boot_struct { 187struct exit_boot_struct {
188 efi_memory_desc_t *runtime_map; 188 efi_memory_desc_t *runtime_map;
189 int *runtime_entry_count; 189 int *runtime_entry_count;
190 void *new_fdt_addr;
190}; 191};
191 192
192static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, 193static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
202 efi_get_virtmap(*map->map, *map->map_size, *map->desc_size, 203 efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
203 p->runtime_map, p->runtime_entry_count); 204 p->runtime_map, p->runtime_entry_count);
204 205
205 return EFI_SUCCESS; 206 return update_fdt_memmap(p->new_fdt_addr, map);
206} 207}
207 208
208/* 209/*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
300 301
301 priv.runtime_map = runtime_map; 302 priv.runtime_map = runtime_map;
302 priv.runtime_entry_count = &runtime_entry_count; 303 priv.runtime_entry_count = &runtime_entry_count;
304 priv.new_fdt_addr = (void *)*new_fdt_addr;
303 status = efi_exit_boot_services(sys_table, handle, &map, &priv, 305 status = efi_exit_boot_services(sys_table, handle, &map, &priv,
304 exit_boot_func); 306 exit_boot_func);
305 307
306 if (status == EFI_SUCCESS) { 308 if (status == EFI_SUCCESS) {
307 efi_set_virtual_address_map_t *svam; 309 efi_set_virtual_address_map_t *svam;
308 310
309 status = update_fdt_memmap((void *)*new_fdt_addr, &map);
310 if (status != EFI_SUCCESS) {
311 /*
312 * The kernel won't get far without the memory map, but
313 * may still be able to print something meaningful so
314 * return success here.
315 */
316 return EFI_SUCCESS;
317 }
318
319 /* Install the new virtual address map */ 311 /* Install the new virtual address map */
320 svam = sys_table->runtime->set_virtual_address_map; 312 svam = sys_table->runtime->set_virtual_address_map;
321 status = svam(runtime_entry_count * desc_size, desc_size, 313 status = svam(runtime_entry_count * desc_size, desc_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index e2b0b1646f99..0635829b18cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
254 } 254 }
255 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); 255 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
256 256
257 if (adev->mode_info.num_crtc)
258 amdgpu_display_set_vga_render_state(adev, false);
259
257 gmc_v6_0_mc_stop(adev, &save); 260 gmc_v6_0_mc_stop(adev, &save);
258 261
259 if (gmc_v6_0_wait_for_idle((void *)adev)) { 262 if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
283 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 286 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
284 } 287 }
285 gmc_v6_0_mc_resume(adev, &save); 288 gmc_v6_0_mc_resume(adev, &save);
286 amdgpu_display_set_vga_render_state(adev, false);
287} 289}
288 290
289static int gmc_v6_0_mc_init(struct amdgpu_device *adev) 291static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 50f5cf7b69d1..fdfb1ec17e66 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
2032 } 2032 }
2033 2033
2034 for_each_crtc_in_state(state, crtc, crtc_state, i) { 2034 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2035 struct drm_pending_vblank_event *event = crtc_state->event;
2035 /* 2036 /*
2036 * TEST_ONLY and PAGE_FLIP_EVENT are mutually 2037 * Free the allocated event. drm_atomic_helper_setup_commit
2037 * exclusive, if they weren't, this code should be 2038 * can allocate an event too, so only free it if it's ours
2038 * called on success for TEST_ONLY too. 2039 * to prevent a double free in drm_atomic_state_clear.
2039 */ 2040 */
2040 if (crtc_state->event) 2041 if (event && (event->base.fence || event->base.file_priv)) {
2041 drm_event_cancel_free(dev, &crtc_state->event->base); 2042 drm_event_cancel_free(dev, &event->base);
2043 crtc_state->event = NULL;
2044 }
2042 } 2045 }
2043 2046
2044 if (!fence_state) 2047 if (!fence_state)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 34f757bcabae..4594477dee00 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1666 1666
1667 funcs = plane->helper_private; 1667 funcs = plane->helper_private;
1668 1668
1669 if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
1670 continue;
1671
1672 if (funcs->prepare_fb) { 1669 if (funcs->prepare_fb) {
1673 ret = funcs->prepare_fb(plane, plane_state); 1670 ret = funcs->prepare_fb(plane, plane_state);
1674 if (ret) 1671 if (ret)
@@ -1685,9 +1682,6 @@ fail:
1685 if (j >= i) 1682 if (j >= i)
1686 continue; 1683 continue;
1687 1684
1688 if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
1689 continue;
1690
1691 funcs = plane->helper_private; 1685 funcs = plane->helper_private;
1692 1686
1693 if (funcs->cleanup_fb) 1687 if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
1954 for_each_plane_in_state(old_state, plane, plane_state, i) { 1948 for_each_plane_in_state(old_state, plane, plane_state, i) {
1955 const struct drm_plane_helper_funcs *funcs; 1949 const struct drm_plane_helper_funcs *funcs;
1956 1950
1957 if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
1958 continue;
1959
1960 funcs = plane->helper_private; 1951 funcs = plane->helper_private;
1961 1952
1962 if (funcs->cleanup_fb) 1953 if (funcs->cleanup_fb)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 5a4526289392..7a7019ac9388 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
225 225
226 INIT_LIST_HEAD(&connector->probed_modes); 226 INIT_LIST_HEAD(&connector->probed_modes);
227 INIT_LIST_HEAD(&connector->modes); 227 INIT_LIST_HEAD(&connector->modes);
228 mutex_init(&connector->mutex);
228 connector->edid_blob_ptr = NULL; 229 connector->edid_blob_ptr = NULL;
229 connector->status = connector_status_unknown; 230 connector->status = connector_status_unknown;
230 231
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
359 connector->funcs->atomic_destroy_state(connector, 360 connector->funcs->atomic_destroy_state(connector,
360 connector->state); 361 connector->state);
361 362
363 mutex_destroy(&connector->mutex);
364
362 memset(connector, 0, sizeof(*connector)); 365 memset(connector, 0, sizeof(*connector));
363} 366}
364EXPORT_SYMBOL(drm_connector_cleanup); 367EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
374 */ 377 */
375int drm_connector_register(struct drm_connector *connector) 378int drm_connector_register(struct drm_connector *connector)
376{ 379{
377 int ret; 380 int ret = 0;
378 381
379 if (connector->registered) 382 if (!connector->dev->registered)
380 return 0; 383 return 0;
381 384
385 mutex_lock(&connector->mutex);
386 if (connector->registered)
387 goto unlock;
388
382 ret = drm_sysfs_connector_add(connector); 389 ret = drm_sysfs_connector_add(connector);
383 if (ret) 390 if (ret)
384 return ret; 391 goto unlock;
385 392
386 ret = drm_debugfs_connector_add(connector); 393 ret = drm_debugfs_connector_add(connector);
387 if (ret) { 394 if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
397 drm_mode_object_register(connector->dev, &connector->base); 404 drm_mode_object_register(connector->dev, &connector->base);
398 405
399 connector->registered = true; 406 connector->registered = true;
400 return 0; 407 goto unlock;
401 408
402err_debugfs: 409err_debugfs:
403 drm_debugfs_connector_remove(connector); 410 drm_debugfs_connector_remove(connector);
404err_sysfs: 411err_sysfs:
405 drm_sysfs_connector_remove(connector); 412 drm_sysfs_connector_remove(connector);
413unlock:
414 mutex_unlock(&connector->mutex);
406 return ret; 415 return ret;
407} 416}
408EXPORT_SYMBOL(drm_connector_register); 417EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
415 */ 424 */
416void drm_connector_unregister(struct drm_connector *connector) 425void drm_connector_unregister(struct drm_connector *connector)
417{ 426{
418 if (!connector->registered) 427 mutex_lock(&connector->mutex);
428 if (!connector->registered) {
429 mutex_unlock(&connector->mutex);
419 return; 430 return;
431 }
420 432
421 if (connector->funcs->early_unregister) 433 if (connector->funcs->early_unregister)
422 connector->funcs->early_unregister(connector); 434 connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
425 drm_debugfs_connector_remove(connector); 437 drm_debugfs_connector_remove(connector);
426 438
427 connector->registered = false; 439 connector->registered = false;
440 mutex_unlock(&connector->mutex);
428} 441}
429EXPORT_SYMBOL(drm_connector_unregister); 442EXPORT_SYMBOL(drm_connector_unregister);
430 443
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a525751b4559..6594b4088f11 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
745 if (ret) 745 if (ret)
746 goto err_minors; 746 goto err_minors;
747 747
748 dev->registered = true;
749
748 if (dev->driver->load) { 750 if (dev->driver->load) {
749 ret = dev->driver->load(dev, flags); 751 ret = dev->driver->load(dev, flags);
750 if (ret) 752 if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
785 787
786 drm_lastclose(dev); 788 drm_lastclose(dev);
787 789
790 dev->registered = false;
791
788 if (drm_core_check_feature(dev, DRIVER_MODESET)) 792 if (drm_core_check_feature(dev, DRIVER_MODESET))
789 drm_modeset_unregister_all(dev); 793 drm_modeset_unregister_all(dev);
790 794
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 69bc3b0c4390..8493e19b563a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1012,6 +1012,8 @@ struct intel_fbc {
1012 struct work_struct underrun_work; 1012 struct work_struct underrun_work;
1013 1013
1014 struct intel_fbc_state_cache { 1014 struct intel_fbc_state_cache {
1015 struct i915_vma *vma;
1016
1015 struct { 1017 struct {
1016 unsigned int mode_flags; 1018 unsigned int mode_flags;
1017 uint32_t hsw_bdw_pixel_rate; 1019 uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
1025 } plane; 1027 } plane;
1026 1028
1027 struct { 1029 struct {
1028 u64 ilk_ggtt_offset;
1029 uint32_t pixel_format; 1030 uint32_t pixel_format;
1030 unsigned int stride; 1031 unsigned int stride;
1031 int fence_reg;
1032 unsigned int tiling_mode;
1033 } fb; 1032 } fb;
1034 } state_cache; 1033 } state_cache;
1035 1034
1036 struct intel_fbc_reg_params { 1035 struct intel_fbc_reg_params {
1036 struct i915_vma *vma;
1037
1037 struct { 1038 struct {
1038 enum pipe pipe; 1039 enum pipe pipe;
1039 enum plane plane; 1040 enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
1041 } crtc; 1042 } crtc;
1042 1043
1043 struct { 1044 struct {
1044 u64 ggtt_offset;
1045 uint32_t pixel_format; 1045 uint32_t pixel_format;
1046 unsigned int stride; 1046 unsigned int stride;
1047 int fence_reg;
1048 } fb; 1047 } fb;
1049 1048
1050 int cfb_size; 1049 int cfb_size;
@@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
3168 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); 3167 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
3169} 3168}
3170 3169
3171static inline unsigned long
3172i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
3173 const struct i915_ggtt_view *view)
3174{
3175 return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
3176}
3177
3178/* i915_gem_fence_reg.c */ 3170/* i915_gem_fence_reg.c */
3179int __must_check i915_vma_get_fence(struct i915_vma *vma); 3171int __must_check i915_vma_get_fence(struct i915_vma *vma);
3180int __must_check i915_vma_put_fence(struct i915_vma *vma); 3172int __must_check i915_vma_put_fence(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index dbe9fb41ae53..8d3e515f27ba 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
85 85
86 __drm_atomic_helper_plane_duplicate_state(plane, state); 86 __drm_atomic_helper_plane_duplicate_state(plane, state);
87 87
88 intel_state->vma = NULL;
89
88 return state; 90 return state;
89} 91}
90 92
@@ -100,6 +102,24 @@ void
100intel_plane_destroy_state(struct drm_plane *plane, 102intel_plane_destroy_state(struct drm_plane *plane,
101 struct drm_plane_state *state) 103 struct drm_plane_state *state)
102{ 104{
105 struct i915_vma *vma;
106
107 vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
108
109 /*
110 * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
111 * We currently don't clear all planes during driver unload, so we have
112 * to be able to unpin vma here for now.
113 *
114 * Normally this can only happen during unload when kmscon is disabled
115 * and userspace doesn't attempt to set a framebuffer at all.
116 */
117 if (vma) {
118 mutex_lock(&plane->dev->struct_mutex);
119 intel_unpin_fb_vma(vma);
120 mutex_unlock(&plane->dev->struct_mutex);
121 }
122
103 drm_atomic_helper_plane_destroy_state(plane, state); 123 drm_atomic_helper_plane_destroy_state(plane, state);
104} 124}
105 125
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f0b9aa7a0483..f1e4a21d4664 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2235,27 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2235 i915_vma_pin_fence(vma); 2235 i915_vma_pin_fence(vma);
2236 } 2236 }
2237 2237
2238 i915_vma_get(vma);
2238err: 2239err:
2239 intel_runtime_pm_put(dev_priv); 2240 intel_runtime_pm_put(dev_priv);
2240 return vma; 2241 return vma;
2241} 2242}
2242 2243
2243void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2244void intel_unpin_fb_vma(struct i915_vma *vma)
2244{ 2245{
2245 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2246 lockdep_assert_held(&vma->vm->dev->struct_mutex);
2246 struct i915_ggtt_view view;
2247 struct i915_vma *vma;
2248
2249 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2250
2251 intel_fill_fb_ggtt_view(&view, fb, rotation);
2252 vma = i915_gem_object_to_ggtt(obj, &view);
2253 2247
2254 if (WARN_ON_ONCE(!vma)) 2248 if (WARN_ON_ONCE(!vma))
2255 return; 2249 return;
2256 2250
2257 i915_vma_unpin_fence(vma); 2251 i915_vma_unpin_fence(vma);
2258 i915_gem_object_unpin_from_display_plane(vma); 2252 i915_gem_object_unpin_from_display_plane(vma);
2253 i915_vma_put(vma);
2259} 2254}
2260 2255
2261static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, 2256static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2750,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2750 struct drm_device *dev = intel_crtc->base.dev; 2745 struct drm_device *dev = intel_crtc->base.dev;
2751 struct drm_i915_private *dev_priv = to_i915(dev); 2746 struct drm_i915_private *dev_priv = to_i915(dev);
2752 struct drm_crtc *c; 2747 struct drm_crtc *c;
2753 struct intel_crtc *i;
2754 struct drm_i915_gem_object *obj; 2748 struct drm_i915_gem_object *obj;
2755 struct drm_plane *primary = intel_crtc->base.primary; 2749 struct drm_plane *primary = intel_crtc->base.primary;
2756 struct drm_plane_state *plane_state = primary->state; 2750 struct drm_plane_state *plane_state = primary->state;
@@ -2775,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2775 * an fb with another CRTC instead 2769 * an fb with another CRTC instead
2776 */ 2770 */
2777 for_each_crtc(dev, c) { 2771 for_each_crtc(dev, c) {
2778 i = to_intel_crtc(c); 2772 struct intel_plane_state *state;
2779 2773
2780 if (c == &intel_crtc->base) 2774 if (c == &intel_crtc->base)
2781 continue; 2775 continue;
2782 2776
2783 if (!i->active) 2777 if (!to_intel_crtc(c)->active)
2784 continue; 2778 continue;
2785 2779
2786 fb = c->primary->fb; 2780 state = to_intel_plane_state(c->primary->state);
2787 if (!fb) 2781 if (!state->vma)
2788 continue; 2782 continue;
2789 2783
2790 obj = intel_fb_obj(fb); 2784 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2791 if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) { 2785 fb = c->primary->fb;
2792 drm_framebuffer_reference(fb); 2786 drm_framebuffer_reference(fb);
2793 goto valid_fb; 2787 goto valid_fb;
2794 } 2788 }
@@ -2809,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2809 return; 2803 return;
2810 2804
2811valid_fb: 2805valid_fb:
2806 mutex_lock(&dev->struct_mutex);
2807 intel_state->vma =
2808 intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
2809 mutex_unlock(&dev->struct_mutex);
2810 if (IS_ERR(intel_state->vma)) {
2811 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2812 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2813
2814 intel_state->vma = NULL;
2815 drm_framebuffer_unreference(fb);
2816 return;
2817 }
2818
2812 plane_state->src_x = 0; 2819 plane_state->src_x = 0;
2813 plane_state->src_y = 0; 2820 plane_state->src_y = 0;
2814 plane_state->src_w = fb->width << 16; 2821 plane_state->src_w = fb->width << 16;
@@ -3104,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
3104 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 3111 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
3105 if (INTEL_GEN(dev_priv) >= 4) { 3112 if (INTEL_GEN(dev_priv) >= 4) {
3106 I915_WRITE(DSPSURF(plane), 3113 I915_WRITE(DSPSURF(plane),
3107 intel_fb_gtt_offset(fb, rotation) + 3114 intel_plane_ggtt_offset(plane_state) +
3108 intel_crtc->dspaddr_offset); 3115 intel_crtc->dspaddr_offset);
3109 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 3116 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3110 I915_WRITE(DSPLINOFF(plane), linear_offset); 3117 I915_WRITE(DSPLINOFF(plane), linear_offset);
3111 } else { 3118 } else {
3112 I915_WRITE(DSPADDR(plane), 3119 I915_WRITE(DSPADDR(plane),
3113 intel_fb_gtt_offset(fb, rotation) + 3120 intel_plane_ggtt_offset(plane_state) +
3114 intel_crtc->dspaddr_offset); 3121 intel_crtc->dspaddr_offset);
3115 } 3122 }
3116 POSTING_READ(reg); 3123 POSTING_READ(reg);
@@ -3207,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
3207 3214
3208 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 3215 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
3209 I915_WRITE(DSPSURF(plane), 3216 I915_WRITE(DSPSURF(plane),
3210 intel_fb_gtt_offset(fb, rotation) + 3217 intel_plane_ggtt_offset(plane_state) +
3211 intel_crtc->dspaddr_offset); 3218 intel_crtc->dspaddr_offset);
3212 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3219 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3213 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 3220 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3230,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
3230 } 3237 }
3231} 3238}
3232 3239
3233u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
3234 unsigned int rotation)
3235{
3236 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3237 struct i915_ggtt_view view;
3238 struct i915_vma *vma;
3239
3240 intel_fill_fb_ggtt_view(&view, fb, rotation);
3241
3242 vma = i915_gem_object_to_ggtt(obj, &view);
3243 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
3244 view.type))
3245 return -1;
3246
3247 return i915_ggtt_offset(vma);
3248}
3249
3250static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3240static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3251{ 3241{
3252 struct drm_device *dev = intel_crtc->base.dev; 3242 struct drm_device *dev = intel_crtc->base.dev;
@@ -3441,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
3441 } 3431 }
3442 3432
3443 I915_WRITE(PLANE_SURF(pipe, 0), 3433 I915_WRITE(PLANE_SURF(pipe, 0),
3444 intel_fb_gtt_offset(fb, rotation) + surf_addr); 3434 intel_plane_ggtt_offset(plane_state) + surf_addr);
3445 3435
3446 POSTING_READ(PLANE_SURF(pipe, 0)); 3436 POSTING_READ(PLANE_SURF(pipe, 0));
3447} 3437}
@@ -11536,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
11536 flush_work(&work->mmio_work); 11526 flush_work(&work->mmio_work);
11537 11527
11538 mutex_lock(&dev->struct_mutex); 11528 mutex_lock(&dev->struct_mutex);
11539 intel_unpin_fb_obj(work->old_fb, primary->state->rotation); 11529 intel_unpin_fb_vma(work->old_vma);
11540 i915_gem_object_put(work->pending_flip_obj); 11530 i915_gem_object_put(work->pending_flip_obj);
11541 mutex_unlock(&dev->struct_mutex); 11531 mutex_unlock(&dev->struct_mutex);
11542 11532
@@ -12246,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
12246 goto cleanup_pending; 12236 goto cleanup_pending;
12247 } 12237 }
12248 12238
12249 work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation); 12239 work->old_vma = to_intel_plane_state(primary->state)->vma;
12250 work->gtt_offset += intel_crtc->dspaddr_offset; 12240 to_intel_plane_state(primary->state)->vma = vma;
12241
12242 work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
12251 work->rotation = crtc->primary->state->rotation; 12243 work->rotation = crtc->primary->state->rotation;
12252 12244
12253 /* 12245 /*
@@ -12301,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
12301cleanup_request: 12293cleanup_request:
12302 i915_add_request_no_flush(request); 12294 i915_add_request_no_flush(request);
12303cleanup_unpin: 12295cleanup_unpin:
12304 intel_unpin_fb_obj(fb, crtc->primary->state->rotation); 12296 to_intel_plane_state(primary->state)->vma = work->old_vma;
12297 intel_unpin_fb_vma(vma);
12305cleanup_pending: 12298cleanup_pending:
12306 atomic_dec(&intel_crtc->unpin_work_count); 12299 atomic_dec(&intel_crtc->unpin_work_count);
12307unlock: 12300unlock:
@@ -14794,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
14794 DRM_DEBUG_KMS("failed to pin object\n"); 14787 DRM_DEBUG_KMS("failed to pin object\n");
14795 return PTR_ERR(vma); 14788 return PTR_ERR(vma);
14796 } 14789 }
14790
14791 to_intel_plane_state(new_state)->vma = vma;
14797 } 14792 }
14798 14793
14799 return 0; 14794 return 0;
@@ -14812,19 +14807,12 @@ void
14812intel_cleanup_plane_fb(struct drm_plane *plane, 14807intel_cleanup_plane_fb(struct drm_plane *plane,
14813 struct drm_plane_state *old_state) 14808 struct drm_plane_state *old_state)
14814{ 14809{
14815 struct drm_i915_private *dev_priv = to_i915(plane->dev); 14810 struct i915_vma *vma;
14816 struct intel_plane_state *old_intel_state;
14817 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
14818 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
14819
14820 old_intel_state = to_intel_plane_state(old_state);
14821
14822 if (!obj && !old_obj)
14823 return;
14824 14811
14825 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || 14812 /* Should only be called after a successful intel_prepare_plane_fb()! */
14826 !INTEL_INFO(dev_priv)->cursor_needs_physical)) 14813 vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
14827 intel_unpin_fb_obj(old_state->fb, old_state->rotation); 14814 if (vma)
14815 intel_unpin_fb_vma(vma);
14828} 14816}
14829 14817
14830int 14818int
@@ -15166,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
15166 if (!obj) 15154 if (!obj)
15167 addr = 0; 15155 addr = 0;
15168 else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) 15156 else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
15169 addr = i915_gem_object_ggtt_offset(obj, NULL); 15157 addr = intel_plane_ggtt_offset(state);
15170 else 15158 else
15171 addr = obj->phys_handle->busaddr; 15159 addr = obj->phys_handle->busaddr;
15172 15160
@@ -17066,41 +17054,12 @@ void intel_display_resume(struct drm_device *dev)
17066void intel_modeset_gem_init(struct drm_device *dev) 17054void intel_modeset_gem_init(struct drm_device *dev)
17067{ 17055{
17068 struct drm_i915_private *dev_priv = to_i915(dev); 17056 struct drm_i915_private *dev_priv = to_i915(dev);
17069 struct drm_crtc *c;
17070 struct drm_i915_gem_object *obj;
17071 17057
17072 intel_init_gt_powersave(dev_priv); 17058 intel_init_gt_powersave(dev_priv);
17073 17059
17074 intel_modeset_init_hw(dev); 17060 intel_modeset_init_hw(dev);
17075 17061
17076 intel_setup_overlay(dev_priv); 17062 intel_setup_overlay(dev_priv);
17077
17078 /*
17079 * Make sure any fbs we allocated at startup are properly
17080 * pinned & fenced. When we do the allocation it's too early
17081 * for this.
17082 */
17083 for_each_crtc(dev, c) {
17084 struct i915_vma *vma;
17085
17086 obj = intel_fb_obj(c->primary->fb);
17087 if (obj == NULL)
17088 continue;
17089
17090 mutex_lock(&dev->struct_mutex);
17091 vma = intel_pin_and_fence_fb_obj(c->primary->fb,
17092 c->primary->state->rotation);
17093 mutex_unlock(&dev->struct_mutex);
17094 if (IS_ERR(vma)) {
17095 DRM_ERROR("failed to pin boot fb on pipe %d\n",
17096 to_intel_crtc(c)->pipe);
17097 drm_framebuffer_unreference(c->primary->fb);
17098 c->primary->fb = NULL;
17099 c->primary->crtc = c->primary->state->crtc = NULL;
17100 update_state_fb(c->primary);
17101 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
17102 }
17103 }
17104} 17063}
17105 17064
17106int intel_connector_register(struct drm_connector *connector) 17065int intel_connector_register(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd72ae171eeb..03a2112004f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -377,6 +377,7 @@ struct intel_atomic_state {
377struct intel_plane_state { 377struct intel_plane_state {
378 struct drm_plane_state base; 378 struct drm_plane_state base;
379 struct drm_rect clip; 379 struct drm_rect clip;
380 struct i915_vma *vma;
380 381
381 struct { 382 struct {
382 u32 offset; 383 u32 offset;
@@ -1046,6 +1047,7 @@ struct intel_flip_work {
1046 struct work_struct mmio_work; 1047 struct work_struct mmio_work;
1047 1048
1048 struct drm_crtc *crtc; 1049 struct drm_crtc *crtc;
1050 struct i915_vma *old_vma;
1049 struct drm_framebuffer *old_fb; 1051 struct drm_framebuffer *old_fb;
1050 struct drm_i915_gem_object *pending_flip_obj; 1052 struct drm_i915_gem_object *pending_flip_obj;
1051 struct drm_pending_vblank_event *event; 1053 struct drm_pending_vblank_event *event;
@@ -1273,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1273 struct drm_modeset_acquire_ctx *ctx); 1275 struct drm_modeset_acquire_ctx *ctx);
1274struct i915_vma * 1276struct i915_vma *
1275intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); 1277intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
1276void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); 1278void intel_unpin_fb_vma(struct i915_vma *vma);
1277struct drm_framebuffer * 1279struct drm_framebuffer *
1278__intel_framebuffer_create(struct drm_device *dev, 1280__intel_framebuffer_create(struct drm_device *dev,
1279 struct drm_mode_fb_cmd2 *mode_cmd, 1281 struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1362,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1362int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); 1364int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1363int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); 1365int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
1364 1366
1365u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation); 1367static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
1368{
1369 return i915_ggtt_offset(state->vma);
1370}
1366 1371
1367u32 skl_plane_ctl_format(uint32_t pixel_format); 1372u32 skl_plane_ctl_format(uint32_t pixel_format);
1368u32 skl_plane_ctl_tiling(uint64_t fb_modifier); 1373u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 62f215b12eb5..f3a1d6a5cabe 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
173 if (IS_I945GM(dev_priv)) 173 if (IS_I945GM(dev_priv))
174 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 174 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
175 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 175 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
176 fbc_ctl |= params->fb.fence_reg; 176 fbc_ctl |= params->vma->fence->id;
177 I915_WRITE(FBC_CONTROL, fbc_ctl); 177 I915_WRITE(FBC_CONTROL, fbc_ctl);
178} 178}
179 179
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
193 else 193 else
194 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 194 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
195 195
196 if (params->fb.fence_reg != I915_FENCE_REG_NONE) { 196 if (params->vma->fence) {
197 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; 197 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
198 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 198 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
199 } else { 199 } else {
200 I915_WRITE(DPFC_FENCE_YOFF, 0); 200 I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
251 break; 251 break;
252 } 252 }
253 253
254 if (params->fb.fence_reg != I915_FENCE_REG_NONE) { 254 if (params->vma->fence) {
255 dpfc_ctl |= DPFC_CTL_FENCE_EN; 255 dpfc_ctl |= DPFC_CTL_FENCE_EN;
256 if (IS_GEN5(dev_priv)) 256 if (IS_GEN5(dev_priv))
257 dpfc_ctl |= params->fb.fence_reg; 257 dpfc_ctl |= params->vma->fence->id;
258 if (IS_GEN6(dev_priv)) { 258 if (IS_GEN6(dev_priv)) {
259 I915_WRITE(SNB_DPFC_CTL_SA, 259 I915_WRITE(SNB_DPFC_CTL_SA,
260 SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); 260 SNB_CPU_FENCE_ENABLE |
261 params->vma->fence->id);
261 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 262 I915_WRITE(DPFC_CPU_FENCE_OFFSET,
262 params->crtc.fence_y_offset); 263 params->crtc.fence_y_offset);
263 } 264 }
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
269 } 270 }
270 271
271 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 272 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
272 I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); 273 I915_WRITE(ILK_FBC_RT_BASE,
274 i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
273 /* enable it... */ 275 /* enable it... */
274 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 276 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
275 277
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
319 break; 321 break;
320 } 322 }
321 323
322 if (params->fb.fence_reg != I915_FENCE_REG_NONE) { 324 if (params->vma->fence) {
323 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 325 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
324 I915_WRITE(SNB_DPFC_CTL_SA, 326 I915_WRITE(SNB_DPFC_CTL_SA,
325 SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); 327 SNB_CPU_FENCE_ENABLE |
328 params->vma->fence->id);
326 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); 329 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
327 } else { 330 } else {
328 I915_WRITE(SNB_DPFC_CTL_SA,0); 331 I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
727 return effective_w <= max_w && effective_h <= max_h; 730 return effective_w <= max_w && effective_h <= max_h;
728} 731}
729 732
730/* XXX replace me when we have VMA tracking for intel_plane_state */
731static int get_fence_id(struct drm_framebuffer *fb)
732{
733 struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
734
735 return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
736}
737
738static void intel_fbc_update_state_cache(struct intel_crtc *crtc, 733static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
739 struct intel_crtc_state *crtc_state, 734 struct intel_crtc_state *crtc_state,
740 struct intel_plane_state *plane_state) 735 struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
743 struct intel_fbc *fbc = &dev_priv->fbc; 738 struct intel_fbc *fbc = &dev_priv->fbc;
744 struct intel_fbc_state_cache *cache = &fbc->state_cache; 739 struct intel_fbc_state_cache *cache = &fbc->state_cache;
745 struct drm_framebuffer *fb = plane_state->base.fb; 740 struct drm_framebuffer *fb = plane_state->base.fb;
746 struct drm_i915_gem_object *obj; 741
742 cache->vma = NULL;
747 743
748 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; 744 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
749 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 745 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
758 if (!cache->plane.visible) 754 if (!cache->plane.visible)
759 return; 755 return;
760 756
761 obj = intel_fb_obj(fb);
762
763 /* FIXME: We lack the proper locking here, so only run this on the
764 * platforms that need. */
765 if (IS_GEN(dev_priv, 5, 6))
766 cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
767 cache->fb.pixel_format = fb->pixel_format; 757 cache->fb.pixel_format = fb->pixel_format;
768 cache->fb.stride = fb->pitches[0]; 758 cache->fb.stride = fb->pitches[0];
769 cache->fb.fence_reg = get_fence_id(fb); 759
770 cache->fb.tiling_mode = i915_gem_object_get_tiling(obj); 760 cache->vma = plane_state->vma;
771} 761}
772 762
773static bool intel_fbc_can_activate(struct intel_crtc *crtc) 763static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
784 return false; 774 return false;
785 } 775 }
786 776
787 if (!cache->plane.visible) { 777 if (!cache->vma) {
788 fbc->no_fbc_reason = "primary plane not visible"; 778 fbc->no_fbc_reason = "primary plane not visible";
789 return false; 779 return false;
790 } 780 }
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
807 * so have no fence associated with it) due to aperture constaints 797 * so have no fence associated with it) due to aperture constaints
808 * at the time of pinning. 798 * at the time of pinning.
809 */ 799 */
810 if (cache->fb.tiling_mode != I915_TILING_X || 800 if (!cache->vma->fence) {
811 cache->fb.fence_reg == I915_FENCE_REG_NONE) {
812 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; 801 fbc->no_fbc_reason = "framebuffer not tiled or fenced";
813 return false; 802 return false;
814 } 803 }
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
888 * zero. */ 877 * zero. */
889 memset(params, 0, sizeof(*params)); 878 memset(params, 0, sizeof(*params));
890 879
880 params->vma = cache->vma;
881
891 params->crtc.pipe = crtc->pipe; 882 params->crtc.pipe = crtc->pipe;
892 params->crtc.plane = crtc->plane; 883 params->crtc.plane = crtc->plane;
893 params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); 884 params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
894 885
895 params->fb.pixel_format = cache->fb.pixel_format; 886 params->fb.pixel_format = cache->fb.pixel_format;
896 params->fb.stride = cache->fb.stride; 887 params->fb.stride = cache->fb.stride;
897 params->fb.fence_reg = cache->fb.fence_reg;
898 888
899 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); 889 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
900
901 params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
902} 890}
903 891
904static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, 892static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 8cf2d80f2254..f4a8c4fc57c4 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
284out_destroy_fbi: 284out_destroy_fbi:
285 drm_fb_helper_release_fbi(helper); 285 drm_fb_helper_release_fbi(helper);
286out_unpin: 286out_unpin:
287 intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); 287 intel_unpin_fb_vma(vma);
288out_unlock: 288out_unlock:
289 mutex_unlock(&dev->struct_mutex); 289 mutex_unlock(&dev->struct_mutex);
290 return ret; 290 return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
549 549
550 if (ifbdev->fb) { 550 if (ifbdev->fb) {
551 mutex_lock(&ifbdev->helper.dev->struct_mutex); 551 mutex_lock(&ifbdev->helper.dev->struct_mutex);
552 intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); 552 intel_unpin_fb_vma(ifbdev->vma);
553 mutex_unlock(&ifbdev->helper.dev->struct_mutex); 553 mutex_unlock(&ifbdev->helper.dev->struct_mutex);
554 554
555 drm_framebuffer_remove(&ifbdev->fb->base); 555 drm_framebuffer_remove(&ifbdev->fb->base);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8f131a08d440..242a73e66d82 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
273 273
274 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); 274 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
275 I915_WRITE(PLANE_SURF(pipe, plane), 275 I915_WRITE(PLANE_SURF(pipe, plane),
276 intel_fb_gtt_offset(fb, rotation) + surf_addr); 276 intel_plane_ggtt_offset(plane_state) + surf_addr);
277 POSTING_READ(PLANE_SURF(pipe, plane)); 277 POSTING_READ(PLANE_SURF(pipe, plane));
278} 278}
279 279
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
458 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); 458 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
459 I915_WRITE(SPCNTR(pipe, plane), sprctl); 459 I915_WRITE(SPCNTR(pipe, plane), sprctl);
460 I915_WRITE(SPSURF(pipe, plane), 460 I915_WRITE(SPSURF(pipe, plane),
461 intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); 461 intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
462 POSTING_READ(SPSURF(pipe, plane)); 462 POSTING_READ(SPSURF(pipe, plane));
463} 463}
464 464
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
594 I915_WRITE(SPRSCALE(pipe), sprscale); 594 I915_WRITE(SPRSCALE(pipe), sprscale);
595 I915_WRITE(SPRCTL(pipe), sprctl); 595 I915_WRITE(SPRCTL(pipe), sprctl);
596 I915_WRITE(SPRSURF(pipe), 596 I915_WRITE(SPRSURF(pipe),
597 intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); 597 intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
598 POSTING_READ(SPRSURF(pipe)); 598 POSTING_READ(SPRSURF(pipe));
599} 599}
600 600
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
721 I915_WRITE(DVSSCALE(pipe), dvsscale); 721 I915_WRITE(DVSSCALE(pipe), dvsscale);
722 I915_WRITE(DVSCNTR(pipe), dvscntr); 722 I915_WRITE(DVSCNTR(pipe), dvscntr);
723 I915_WRITE(DVSSURF(pipe), 723 I915_WRITE(DVSSURF(pipe),
724 intel_fb_gtt_offset(fb, rotation) + dvssurf_offset); 724 intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
725 POSTING_READ(DVSSURF(pipe)); 725 POSTING_READ(DVSSURF(pipe));
726} 726}
727 727
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 74856a8b8f35..e64f52464ecf 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
222 uint32_t mpllP; 222 uint32_t mpllP;
223 223
224 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); 224 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
225 mpllP = (mpllP >> 8) & 0xf;
225 if (!mpllP) 226 if (!mpllP)
226 mpllP = 4; 227 mpllP = 4;
227 228
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
232 uint32_t clock; 233 uint32_t clock;
233 234
234 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); 235 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
235 return clock; 236 return clock / 1000;
236 } 237 }
237 238
238 ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); 239 ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index ccdce1b4eec4..d5e58a38f160 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -99,6 +99,7 @@ struct nv84_fence_priv {
99 struct nouveau_bo *bo; 99 struct nouveau_bo *bo;
100 struct nouveau_bo *bo_gart; 100 struct nouveau_bo *bo_gart;
101 u32 *suspend; 101 u32 *suspend;
102 struct mutex mutex;
102}; 103};
103 104
104int nv84_fence_context_new(struct nouveau_channel *); 105int nv84_fence_context_new(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
index 187ecdb82002..21a5775028cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.h
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
42} 42}
43 43
44/* nouveau_led.c */ 44/* nouveau_led.c */
45#if IS_ENABLED(CONFIG_LEDS_CLASS) 45#if IS_REACHABLE(CONFIG_LEDS_CLASS)
46int nouveau_led_init(struct drm_device *dev); 46int nouveau_led_init(struct drm_device *dev);
47void nouveau_led_suspend(struct drm_device *dev); 47void nouveau_led_suspend(struct drm_device *dev);
48void nouveau_led_resume(struct drm_device *dev); 48void nouveau_led_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 08f9c6fa0f7f..1fba38622744 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
313 if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { 313 if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
314 /* block access to objects not created via this interface */ 314 /* block access to objects not created via this interface */
315 owner = argv->v0.owner; 315 owner = argv->v0.owner;
316 if (argv->v0.object == 0ULL) 316 if (argv->v0.object == 0ULL &&
317 argv->v0.type != NVIF_IOCTL_V0_DEL)
317 argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ 318 argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
318 else 319 else
319 argv->v0.owner = NVDRM_OBJECT_USIF; 320 argv->v0.owner = NVDRM_OBJECT_USIF;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2c2c64507661..32097fd615fd 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4052 } 4052 }
4053 } 4053 }
4054 4054
4055 for_each_crtc_in_state(state, crtc, crtc_state, i) {
4056 if (crtc->state->event)
4057 drm_crtc_vblank_get(crtc);
4058 }
4059
4055 /* Update plane(s). */ 4060 /* Update plane(s). */
4056 for_each_plane_in_state(state, plane, plane_state, i) { 4061 for_each_plane_in_state(state, plane, plane_state, i) {
4057 struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state); 4062 struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4101 drm_crtc_send_vblank_event(crtc, crtc->state->event); 4106 drm_crtc_send_vblank_event(crtc, crtc->state->event);
4102 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 4107 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4103 crtc->state->event = NULL; 4108 crtc->state->event = NULL;
4109 drm_crtc_vblank_put(crtc);
4104 } 4110 }
4105 } 4111 }
4106 4112
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 52b87ae83e7b..f0b322bec7df 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
107 struct nv84_fence_chan *fctx = chan->fence; 107 struct nv84_fence_chan *fctx = chan->fence;
108 108
109 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); 109 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
110 mutex_lock(&priv->mutex);
110 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); 111 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
111 nouveau_bo_vma_del(priv->bo, &fctx->vma); 112 nouveau_bo_vma_del(priv->bo, &fctx->vma);
113 mutex_unlock(&priv->mutex);
112 nouveau_fence_context_del(&fctx->base); 114 nouveau_fence_context_del(&fctx->base);
113 chan->fence = NULL; 115 chan->fence = NULL;
114 nouveau_fence_context_free(&fctx->base); 116 nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
134 fctx->base.sync32 = nv84_fence_sync32; 136 fctx->base.sync32 = nv84_fence_sync32;
135 fctx->base.sequence = nv84_fence_read(chan); 137 fctx->base.sequence = nv84_fence_read(chan);
136 138
139 mutex_lock(&priv->mutex);
137 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); 140 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
138 if (ret == 0) { 141 if (ret == 0) {
139 ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, 142 ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
140 &fctx->vma_gart); 143 &fctx->vma_gart);
141 } 144 }
145 mutex_unlock(&priv->mutex);
142 146
143 if (ret) 147 if (ret)
144 nv84_fence_context_del(chan); 148 nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
212 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); 216 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
213 priv->base.uevent = true; 217 priv->base.uevent = true;
214 218
219 mutex_init(&priv->mutex);
220
215 /* Use VRAM if there is any ; otherwise fallback to system memory */ 221 /* Use VRAM if there is any ; otherwise fallback to system memory */
216 domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : 222 domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
217 /* 223 /*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 6f0436df0219..f8f2f16c22a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
59 ); 59 );
60 } 60 }
61 for (i = 0; i < size; i++) 61 for (i = 0; i < size; i++)
62 nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]); 62 nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
63 for (; i < 0x60; i++) 63 for (; i < 0x60; i++)
64 nvkm_wr32(device, 0x61c440 + soff, (i << 8)); 64 nvkm_wr32(device, 0x61c440 + soff, (i << 8));
65 nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003); 65 nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 567466f93cd5..0db8efbf1c2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
433 case 0x94: 433 case 0x94:
434 case 0x96: 434 case 0x96:
435 case 0x98: 435 case 0x98:
436 case 0xaa:
437 case 0xac:
438 return true; 436 return true;
439 default: 437 default:
440 break; 438 break;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e0c143b865f3..30bd4a6a9d46 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -97,9 +97,10 @@
97 * 2.46.0 - Add PFP_SYNC_ME support on evergreen 97 * 2.46.0 - Add PFP_SYNC_ME support on evergreen
98 * 2.47.0 - Add UVD_NO_OP register support 98 * 2.47.0 - Add UVD_NO_OP register support
99 * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI 99 * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
100 * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
100 */ 101 */
101#define KMS_DRIVER_MAJOR 2 102#define KMS_DRIVER_MAJOR 2
102#define KMS_DRIVER_MINOR 48 103#define KMS_DRIVER_MINOR 49
103#define KMS_DRIVER_PATCHLEVEL 0 104#define KMS_DRIVER_PATCHLEVEL 0
104int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 105int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
105int radeon_driver_unload_kms(struct drm_device *dev); 106int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0bcffd8a7bd3..96683f5b2b1b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
220 220
221 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 221 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
222 222
223 args->vram_size = rdev->mc.real_vram_size; 223 args->vram_size = (u64)man->size << PAGE_SHIFT;
224 args->vram_visible = (u64)man->size << PAGE_SHIFT; 224 args->vram_visible = rdev->mc.visible_vram_size;
225 args->vram_visible -= rdev->vram_pin_size; 225 args->vram_visible -= rdev->vram_pin_size;
226 args->gart_size = rdev->mc.gtt_size; 226 args->gart_size = rdev->mc.gtt_size;
227 args->gart_size -= rdev->gart_pin_size; 227 args->gart_size -= rdev->gart_pin_size;
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index f31a778b0851..b22d0f83f8e3 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -168,7 +168,7 @@ struct cp2112_device {
168 atomic_t xfer_avail; 168 atomic_t xfer_avail;
169 struct gpio_chip gc; 169 struct gpio_chip gc;
170 u8 *in_out_buffer; 170 u8 *in_out_buffer;
171 spinlock_t lock; 171 struct mutex lock;
172 172
173 struct gpio_desc *desc[8]; 173 struct gpio_desc *desc[8];
174 bool gpio_poll; 174 bool gpio_poll;
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
186 struct cp2112_device *dev = gpiochip_get_data(chip); 186 struct cp2112_device *dev = gpiochip_get_data(chip);
187 struct hid_device *hdev = dev->hdev; 187 struct hid_device *hdev = dev->hdev;
188 u8 *buf = dev->in_out_buffer; 188 u8 *buf = dev->in_out_buffer;
189 unsigned long flags;
190 int ret; 189 int ret;
191 190
192 spin_lock_irqsave(&dev->lock, flags); 191 mutex_lock(&dev->lock);
193 192
194 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 193 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
195 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, 194 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
213 ret = 0; 212 ret = 0;
214 213
215exit: 214exit:
216 spin_unlock_irqrestore(&dev->lock, flags); 215 mutex_unlock(&dev->lock);
217 return ret <= 0 ? ret : -EIO; 216 return ret < 0 ? ret : -EIO;
218} 217}
219 218
220static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 219static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
222 struct cp2112_device *dev = gpiochip_get_data(chip); 221 struct cp2112_device *dev = gpiochip_get_data(chip);
223 struct hid_device *hdev = dev->hdev; 222 struct hid_device *hdev = dev->hdev;
224 u8 *buf = dev->in_out_buffer; 223 u8 *buf = dev->in_out_buffer;
225 unsigned long flags;
226 int ret; 224 int ret;
227 225
228 spin_lock_irqsave(&dev->lock, flags); 226 mutex_lock(&dev->lock);
229 227
230 buf[0] = CP2112_GPIO_SET; 228 buf[0] = CP2112_GPIO_SET;
231 buf[1] = value ? 0xff : 0; 229 buf[1] = value ? 0xff : 0;
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
237 if (ret < 0) 235 if (ret < 0)
238 hid_err(hdev, "error setting GPIO values: %d\n", ret); 236 hid_err(hdev, "error setting GPIO values: %d\n", ret);
239 237
240 spin_unlock_irqrestore(&dev->lock, flags); 238 mutex_unlock(&dev->lock);
241} 239}
242 240
243static int cp2112_gpio_get_all(struct gpio_chip *chip) 241static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
245 struct cp2112_device *dev = gpiochip_get_data(chip); 243 struct cp2112_device *dev = gpiochip_get_data(chip);
246 struct hid_device *hdev = dev->hdev; 244 struct hid_device *hdev = dev->hdev;
247 u8 *buf = dev->in_out_buffer; 245 u8 *buf = dev->in_out_buffer;
248 unsigned long flags;
249 int ret; 246 int ret;
250 247
251 spin_lock_irqsave(&dev->lock, flags); 248 mutex_lock(&dev->lock);
252 249
253 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, 250 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
254 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, 251 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
262 ret = buf[1]; 259 ret = buf[1];
263 260
264exit: 261exit:
265 spin_unlock_irqrestore(&dev->lock, flags); 262 mutex_unlock(&dev->lock);
266 263
267 return ret; 264 return ret;
268} 265}
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
284 struct cp2112_device *dev = gpiochip_get_data(chip); 281 struct cp2112_device *dev = gpiochip_get_data(chip);
285 struct hid_device *hdev = dev->hdev; 282 struct hid_device *hdev = dev->hdev;
286 u8 *buf = dev->in_out_buffer; 283 u8 *buf = dev->in_out_buffer;
287 unsigned long flags;
288 int ret; 284 int ret;
289 285
290 spin_lock_irqsave(&dev->lock, flags); 286 mutex_lock(&dev->lock);
291 287
292 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 288 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
293 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, 289 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
308 goto fail; 304 goto fail;
309 } 305 }
310 306
311 spin_unlock_irqrestore(&dev->lock, flags); 307 mutex_unlock(&dev->lock);
312 308
313 /* 309 /*
314 * Set gpio value when output direction is already set, 310 * Set gpio value when output direction is already set,
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
319 return 0; 315 return 0;
320 316
321fail: 317fail:
322 spin_unlock_irqrestore(&dev->lock, flags); 318 mutex_unlock(&dev->lock);
323 return ret < 0 ? ret : -EIO; 319 return ret < 0 ? ret : -EIO;
324} 320}
325 321
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1235 if (!dev->in_out_buffer) 1231 if (!dev->in_out_buffer)
1236 return -ENOMEM; 1232 return -ENOMEM;
1237 1233
1238 spin_lock_init(&dev->lock); 1234 mutex_init(&dev->lock);
1239 1235
1240 ret = hid_parse(hdev); 1236 ret = hid_parse(hdev);
1241 if (ret) { 1237 if (ret) {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f46f2c5117fa..350accfee8e8 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -76,6 +76,9 @@
76#define USB_VENDOR_ID_ALPS_JP 0x044E 76#define USB_VENDOR_ID_ALPS_JP 0x044E
77#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B 77#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
78 78
79#define USB_VENDOR_ID_AMI 0x046b
80#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
81
79#define USB_VENDOR_ID_ANTON 0x1130 82#define USB_VENDOR_ID_ANTON 0x1130
80#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 83#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
81 84
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index c5c5fbe9d605..52026dc94d5c 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
872 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), 872 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
873 .driver_data = LG_NOGET | LG_FF4 }, 873 .driver_data = LG_NOGET | LG_FF4 },
874 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), 874 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
875 .driver_data = LG_FF2 }, 875 .driver_data = LG_NOGET | LG_FF2 },
876 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), 876 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
877 .driver_data = LG_FF3 }, 877 .driver_data = LG_FF3 },
878 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), 878 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e9d6cc7cdfc5..30a2977e2645 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -57,6 +57,7 @@ static const struct hid_blacklist {
57 { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, 57 { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
58 { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS }, 58 { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
59 { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS }, 59 { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
60 { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
60 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, 61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, 62 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
62 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, 63 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0884dc9554fd..672145b0d8f5 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
166 wacom->id[0] = STYLUS_DEVICE_ID; 166 wacom->id[0] = STYLUS_DEVICE_ID;
167 } 167 }
168 168
169 pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1)); 169 if (prox) {
170 if (features->pressure_max > 255) 170 pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
171 pressure = (pressure << 1) | ((data[4] >> 6) & 1); 171 if (features->pressure_max > 255)
172 pressure += (features->pressure_max + 1) / 2; 172 pressure = (pressure << 1) | ((data[4] >> 6) & 1);
173 173 pressure += (features->pressure_max + 1) / 2;
174 input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14)); 174
175 input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14)); 175 input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
176 input_report_abs(input, ABS_PRESSURE, pressure); 176 input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
177 177 input_report_abs(input, ABS_PRESSURE, pressure);
178 input_report_key(input, BTN_TOUCH, data[4] & 0x08); 178
179 input_report_key(input, BTN_STYLUS, data[4] & 0x10); 179 input_report_key(input, BTN_TOUCH, data[4] & 0x08);
180 /* Only allow the stylus2 button to be reported for the pen tool. */ 180 input_report_key(input, BTN_STYLUS, data[4] & 0x10);
181 input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20)); 181 /* Only allow the stylus2 button to be reported for the pen tool. */
182 input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
183 }
182 184
183 if (!prox) 185 if (!prox)
184 wacom->id[0] = 0; 186 wacom->id[0] = 0;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index cd49cb17eb7f..308dbda700eb 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
383 return ret; 383 return ret;
384 } 384 }
385 385
386 init_cached_read_index(channel);
386 next_read_location = hv_get_next_read_location(inring_info); 387 next_read_location = hv_get_next_read_location(inring_info);
387 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, 388 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
388 sizeof(desc), 389 sizeof(desc),
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 2bbf0c521beb..7d61b566e148 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
775 775
776static int palmas_gpadc_suspend(struct device *dev) 776static int palmas_gpadc_suspend(struct device *dev)
777{ 777{
778 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 778 struct iio_dev *indio_dev = dev_get_drvdata(dev);
779 struct palmas_gpadc *adc = iio_priv(indio_dev); 779 struct palmas_gpadc *adc = iio_priv(indio_dev);
780 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; 780 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
781 int ret; 781 int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
798 798
799static int palmas_gpadc_resume(struct device *dev) 799static int palmas_gpadc_resume(struct device *dev)
800{ 800{
801 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 801 struct iio_dev *indio_dev = dev_get_drvdata(dev);
802 struct palmas_gpadc *adc = iio_priv(indio_dev); 802 struct palmas_gpadc *adc = iio_priv(indio_dev);
803 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; 803 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
804 int ret; 804 int ret;
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 9a081465c42f..6bb23a49e81e 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
422 422
423static int __maybe_unused afe4403_suspend(struct device *dev) 423static int __maybe_unused afe4403_suspend(struct device *dev)
424{ 424{
425 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 425 struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
426 struct afe4403_data *afe = iio_priv(indio_dev); 426 struct afe4403_data *afe = iio_priv(indio_dev);
427 int ret; 427 int ret;
428 428
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
443 443
444static int __maybe_unused afe4403_resume(struct device *dev) 444static int __maybe_unused afe4403_resume(struct device *dev)
445{ 445{
446 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 446 struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
447 struct afe4403_data *afe = iio_priv(indio_dev); 447 struct afe4403_data *afe = iio_priv(indio_dev);
448 int ret; 448 int ret;
449 449
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 45266404f7e3..964f5231a831 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
428 428
429static int __maybe_unused afe4404_suspend(struct device *dev) 429static int __maybe_unused afe4404_suspend(struct device *dev)
430{ 430{
431 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 431 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
432 struct afe4404_data *afe = iio_priv(indio_dev); 432 struct afe4404_data *afe = iio_priv(indio_dev);
433 int ret; 433 int ret;
434 434
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
449 449
450static int __maybe_unused afe4404_resume(struct device *dev) 450static int __maybe_unused afe4404_resume(struct device *dev)
451{ 451{
452 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 452 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
453 struct afe4404_data *afe = iio_priv(indio_dev); 453 struct afe4404_data *afe = iio_priv(indio_dev);
454 int ret; 454 int ret;
455 455
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 90ab8a2d2846..183c14329d6e 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
238 238
239 mutex_lock(&data->lock); 239 mutex_lock(&data->lock);
240 240
241 while (cnt || (cnt = max30100_fifo_count(data) > 0)) { 241 while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
242 ret = max30100_read_measurement(data); 242 ret = max30100_read_measurement(data);
243 if (ret) 243 if (ret)
244 break; 244 break;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 9c47bc98f3ac..2a22ad920333 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -71,7 +71,8 @@
71 * a) select an implementation using busy loop polling on those systems 71 * a) select an implementation using busy loop polling on those systems
72 * b) use the checksum to do some probabilistic decoding 72 * b) use the checksum to do some probabilistic decoding
73 */ 73 */
74#define DHT11_START_TRANSMISSION 18 /* ms */ 74#define DHT11_START_TRANSMISSION_MIN 18000 /* us */
75#define DHT11_START_TRANSMISSION_MAX 20000 /* us */
75#define DHT11_MIN_TIMERES 34000 /* ns */ 76#define DHT11_MIN_TIMERES 34000 /* ns */
76#define DHT11_THRESHOLD 49000 /* ns */ 77#define DHT11_THRESHOLD 49000 /* ns */
77#define DHT11_AMBIG_LOW 23000 /* ns */ 78#define DHT11_AMBIG_LOW 23000 /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
228 ret = gpio_direction_output(dht11->gpio, 0); 229 ret = gpio_direction_output(dht11->gpio, 0);
229 if (ret) 230 if (ret)
230 goto err; 231 goto err;
231 msleep(DHT11_START_TRANSMISSION); 232 usleep_range(DHT11_START_TRANSMISSION_MIN,
233 DHT11_START_TRANSMISSION_MAX);
232 ret = gpio_direction_input(dht11->gpio); 234 ret = gpio_direction_input(dht11->gpio);
233 if (ret) 235 if (ret)
234 goto err; 236 goto err;
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 11447ab1055c..bf5c36e229ba 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
901 data->enabled = true; 901 data->enabled = true;
902 if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { 902 if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
903 retval = disable_irq_wake(irq); 903 retval = disable_irq_wake(irq);
904 if (!retval) 904 if (retval)
905 dev_warn(&rmi_dev->dev, 905 dev_warn(&rmi_dev->dev,
906 "Failed to disable irq for wake: %d\n", 906 "Failed to disable irq for wake: %d\n",
907 retval); 907 retval);
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
936 disable_irq(irq); 936 disable_irq(irq);
937 if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { 937 if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
938 retval = enable_irq_wake(irq); 938 retval = enable_irq_wake(irq);
939 if (!retval) 939 if (retval)
940 dev_warn(&rmi_dev->dev, 940 dev_warn(&rmi_dev->dev,
941 "Failed to enable irq for wake: %d\n", 941 "Failed to enable irq for wake: %d\n",
942 retval); 942 retval);
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 83cf11312fd9..c9d1c91e1887 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
682 } 682 }
683 platform_set_drvdata(wm->battery_dev, wm); 683 platform_set_drvdata(wm->battery_dev, wm);
684 wm->battery_dev->dev.parent = dev; 684 wm->battery_dev->dev.parent = dev;
685 wm->battery_dev->dev.platform_data = pdata->batt_pdata; 685 wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
686 ret = platform_device_add(wm->battery_dev); 686 ret = platform_device_add(wm->battery_dev);
687 if (ret < 0) 687 if (ret < 0)
688 goto batt_reg_err; 688 goto batt_reg_err;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 8ee54d71c7eb..37e204f3d9be 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -352,9 +352,6 @@ config MTK_IOMMU_V1
352 select IOMMU_API 352 select IOMMU_API
353 select MEMORY 353 select MEMORY
354 select MTK_SMI 354 select MTK_SMI
355 select COMMON_CLK_MT2701_MMSYS
356 select COMMON_CLK_MT2701_IMGSYS
357 select COMMON_CLK_MT2701_VDECSYS
358 help 355 help
359 Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is 356 Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is
360 Multimedia Memory Managememt Unit. This option enables remapping of 357 Multimedia Memory Managememt Unit. This option enables remapping of
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 3ef0f42984f2..1b5b8c5361c5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -112,7 +112,7 @@ static struct timer_list queue_timer;
112 * Domain for untranslated devices - only allocated 112 * Domain for untranslated devices - only allocated
113 * if iommu=pt passed on kernel cmd line. 113 * if iommu=pt passed on kernel cmd line.
114 */ 114 */
115static const struct iommu_ops amd_iommu_ops; 115const struct iommu_ops amd_iommu_ops;
116 116
117static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 117static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
118int amd_iommu_max_glx_val = -1; 118int amd_iommu_max_glx_val = -1;
@@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev)
445static int iommu_init_device(struct device *dev) 445static int iommu_init_device(struct device *dev)
446{ 446{
447 struct iommu_dev_data *dev_data; 447 struct iommu_dev_data *dev_data;
448 struct amd_iommu *iommu;
448 int devid; 449 int devid;
449 450
450 if (dev->archdata.iommu) 451 if (dev->archdata.iommu)
@@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev)
454 if (devid < 0) 455 if (devid < 0)
455 return devid; 456 return devid;
456 457
458 iommu = amd_iommu_rlookup_table[devid];
459
457 dev_data = find_dev_data(devid); 460 dev_data = find_dev_data(devid);
458 if (!dev_data) 461 if (!dev_data)
459 return -ENOMEM; 462 return -ENOMEM;
@@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev)
469 472
470 dev->archdata.iommu = dev_data; 473 dev->archdata.iommu = dev_data;
471 474
472 iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, 475 iommu_device_link(&iommu->iommu, dev);
473 dev);
474 476
475 return 0; 477 return 0;
476} 478}
@@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev)
495 497
496static void iommu_uninit_device(struct device *dev) 498static void iommu_uninit_device(struct device *dev)
497{ 499{
498 int devid;
499 struct iommu_dev_data *dev_data; 500 struct iommu_dev_data *dev_data;
501 struct amd_iommu *iommu;
502 int devid;
500 503
501 devid = get_device_id(dev); 504 devid = get_device_id(dev);
502 if (devid < 0) 505 if (devid < 0)
503 return; 506 return;
504 507
508 iommu = amd_iommu_rlookup_table[devid];
509
505 dev_data = search_dev_data(devid); 510 dev_data = search_dev_data(devid);
506 if (!dev_data) 511 if (!dev_data)
507 return; 512 return;
@@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev)
509 if (dev_data->domain) 514 if (dev_data->domain)
510 detach_device(dev); 515 detach_device(dev);
511 516
512 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, 517 iommu_device_unlink(&iommu->iommu, dev);
513 dev);
514 518
515 iommu_group_remove_device(dev); 519 iommu_group_remove_device(dev);
516 520
@@ -3161,9 +3165,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
3161 return false; 3165 return false;
3162} 3166}
3163 3167
3164static void amd_iommu_get_dm_regions(struct device *dev, 3168static void amd_iommu_get_resv_regions(struct device *dev,
3165 struct list_head *head) 3169 struct list_head *head)
3166{ 3170{
3171 struct iommu_resv_region *region;
3167 struct unity_map_entry *entry; 3172 struct unity_map_entry *entry;
3168 int devid; 3173 int devid;
3169 3174
@@ -3172,41 +3177,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
3172 return; 3177 return;
3173 3178
3174 list_for_each_entry(entry, &amd_iommu_unity_map, list) { 3179 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3175 struct iommu_dm_region *region; 3180 size_t length;
3181 int prot = 0;
3176 3182
3177 if (devid < entry->devid_start || devid > entry->devid_end) 3183 if (devid < entry->devid_start || devid > entry->devid_end)
3178 continue; 3184 continue;
3179 3185
3180 region = kzalloc(sizeof(*region), GFP_KERNEL); 3186 length = entry->address_end - entry->address_start;
3187 if (entry->prot & IOMMU_PROT_IR)
3188 prot |= IOMMU_READ;
3189 if (entry->prot & IOMMU_PROT_IW)
3190 prot |= IOMMU_WRITE;
3191
3192 region = iommu_alloc_resv_region(entry->address_start,
3193 length, prot,
3194 IOMMU_RESV_DIRECT);
3181 if (!region) { 3195 if (!region) {
3182 pr_err("Out of memory allocating dm-regions for %s\n", 3196 pr_err("Out of memory allocating dm-regions for %s\n",
3183 dev_name(dev)); 3197 dev_name(dev));
3184 return; 3198 return;
3185 } 3199 }
3186
3187 region->start = entry->address_start;
3188 region->length = entry->address_end - entry->address_start;
3189 if (entry->prot & IOMMU_PROT_IR)
3190 region->prot |= IOMMU_READ;
3191 if (entry->prot & IOMMU_PROT_IW)
3192 region->prot |= IOMMU_WRITE;
3193
3194 list_add_tail(&region->list, head); 3200 list_add_tail(&region->list, head);
3195 } 3201 }
3202
3203 region = iommu_alloc_resv_region(MSI_RANGE_START,
3204 MSI_RANGE_END - MSI_RANGE_START + 1,
3205 0, IOMMU_RESV_RESERVED);
3206 if (!region)
3207 return;
3208 list_add_tail(&region->list, head);
3209
3210 region = iommu_alloc_resv_region(HT_RANGE_START,
3211 HT_RANGE_END - HT_RANGE_START + 1,
3212 0, IOMMU_RESV_RESERVED);
3213 if (!region)
3214 return;
3215 list_add_tail(&region->list, head);
3196} 3216}
3197 3217
3198static void amd_iommu_put_dm_regions(struct device *dev, 3218static void amd_iommu_put_resv_regions(struct device *dev,
3199 struct list_head *head) 3219 struct list_head *head)
3200{ 3220{
3201 struct iommu_dm_region *entry, *next; 3221 struct iommu_resv_region *entry, *next;
3202 3222
3203 list_for_each_entry_safe(entry, next, head, list) 3223 list_for_each_entry_safe(entry, next, head, list)
3204 kfree(entry); 3224 kfree(entry);
3205} 3225}
3206 3226
3207static void amd_iommu_apply_dm_region(struct device *dev, 3227static void amd_iommu_apply_resv_region(struct device *dev,
3208 struct iommu_domain *domain, 3228 struct iommu_domain *domain,
3209 struct iommu_dm_region *region) 3229 struct iommu_resv_region *region)
3210{ 3230{
3211 struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); 3231 struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
3212 unsigned long start, end; 3232 unsigned long start, end;
@@ -3217,7 +3237,7 @@ static void amd_iommu_apply_dm_region(struct device *dev,
3217 WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); 3237 WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
3218} 3238}
3219 3239
3220static const struct iommu_ops amd_iommu_ops = { 3240const struct iommu_ops amd_iommu_ops = {
3221 .capable = amd_iommu_capable, 3241 .capable = amd_iommu_capable,
3222 .domain_alloc = amd_iommu_domain_alloc, 3242 .domain_alloc = amd_iommu_domain_alloc,
3223 .domain_free = amd_iommu_domain_free, 3243 .domain_free = amd_iommu_domain_free,
@@ -3230,9 +3250,9 @@ static const struct iommu_ops amd_iommu_ops = {
3230 .add_device = amd_iommu_add_device, 3250 .add_device = amd_iommu_add_device,
3231 .remove_device = amd_iommu_remove_device, 3251 .remove_device = amd_iommu_remove_device,
3232 .device_group = amd_iommu_device_group, 3252 .device_group = amd_iommu_device_group,
3233 .get_dm_regions = amd_iommu_get_dm_regions, 3253 .get_resv_regions = amd_iommu_get_resv_regions,
3234 .put_dm_regions = amd_iommu_put_dm_regions, 3254 .put_resv_regions = amd_iommu_put_resv_regions,
3235 .apply_dm_region = amd_iommu_apply_dm_region, 3255 .apply_resv_region = amd_iommu_apply_resv_region,
3236 .pgsize_bitmap = AMD_IOMMU_PGSIZES, 3256 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
3237}; 3257};
3238 3258
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 6799cf9713f7..04cdac7ab3e3 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -94,6 +94,8 @@
94 * out of it. 94 * out of it.
95 */ 95 */
96 96
97extern const struct iommu_ops amd_iommu_ops;
98
97/* 99/*
98 * structure describing one IOMMU in the ACPI table. Typically followed by one 100 * structure describing one IOMMU in the ACPI table. Typically followed by one
99 * or more ivhd_entrys. 101 * or more ivhd_entrys.
@@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
1635 amd_iommu_erratum_746_workaround(iommu); 1637 amd_iommu_erratum_746_workaround(iommu);
1636 amd_iommu_ats_write_check_workaround(iommu); 1638 amd_iommu_ats_write_check_workaround(iommu);
1637 1639
1638 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, 1640 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1639 amd_iommu_groups, "ivhd%d", 1641 amd_iommu_groups, "ivhd%d", iommu->index);
1640 iommu->index); 1642 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1643 iommu_device_register(&iommu->iommu);
1641 1644
1642 return pci_enable_device(iommu->dev); 1645 return pci_enable_device(iommu->dev);
1643} 1646}
@@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void)
2230 */ 2233 */
2231 ret = check_ivrs_checksum(ivrs_base); 2234 ret = check_ivrs_checksum(ivrs_base);
2232 if (ret) 2235 if (ret)
2233 return ret; 2236 goto out;
2234 2237
2235 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); 2238 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2236 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); 2239 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 0d91785ebdc3..af00f381a7b1 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -535,8 +535,8 @@ struct amd_iommu {
535 /* if one, we need to send a completion wait command */ 535 /* if one, we need to send a completion wait command */
536 bool need_sync; 536 bool need_sync;
537 537
538 /* IOMMU sysfs device */ 538 /* Handle for IOMMU core code */
539 struct device *iommu_dev; 539 struct iommu_device iommu;
540 540
541 /* 541 /*
542 * We can't rely on the BIOS to restore all values on reinit, so we 542 * We can't rely on the BIOS to restore all values on reinit, so we
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 4d6ec444a9d6..5806a6acc94e 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -269,9 +269,6 @@
269#define STRTAB_STE_1_SHCFG_INCOMING 1UL 269#define STRTAB_STE_1_SHCFG_INCOMING 1UL
270#define STRTAB_STE_1_SHCFG_SHIFT 44 270#define STRTAB_STE_1_SHCFG_SHIFT 44
271 271
272#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
273#define STRTAB_STE_1_PRIVCFG_SHIFT 48
274
275#define STRTAB_STE_2_S2VMID_SHIFT 0 272#define STRTAB_STE_2_S2VMID_SHIFT 0
276#define STRTAB_STE_2_S2VMID_MASK 0xffffUL 273#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
277#define STRTAB_STE_2_VTCR_SHIFT 32 274#define STRTAB_STE_2_VTCR_SHIFT 32
@@ -412,6 +409,9 @@
412/* High-level queue structures */ 409/* High-level queue structures */
413#define ARM_SMMU_POLL_TIMEOUT_US 100 410#define ARM_SMMU_POLL_TIMEOUT_US 100
414 411
412#define MSI_IOVA_BASE 0x8000000
413#define MSI_IOVA_LENGTH 0x100000
414
415static bool disable_bypass; 415static bool disable_bypass;
416module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); 416module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
417MODULE_PARM_DESC(disable_bypass, 417MODULE_PARM_DESC(disable_bypass,
@@ -616,6 +616,9 @@ struct arm_smmu_device {
616 unsigned int sid_bits; 616 unsigned int sid_bits;
617 617
618 struct arm_smmu_strtab_cfg strtab_cfg; 618 struct arm_smmu_strtab_cfg strtab_cfg;
619
620 /* IOMMU core code handle */
621 struct iommu_device iommu;
619}; 622};
620 623
621/* SMMU private data for each master */ 624/* SMMU private data for each master */
@@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1042 } 1045 }
1043 } 1046 }
1044 1047
1045 /* Nuke the existing Config, as we're going to rewrite it */ 1048 /* Nuke the existing STE_0 value, as we're going to rewrite it */
1046 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT); 1049 val = ste->valid ? STRTAB_STE_0_V : 0;
1047
1048 if (ste->valid)
1049 val |= STRTAB_STE_0_V;
1050 else
1051 val &= ~STRTAB_STE_0_V;
1052 1050
1053 if (ste->bypass) { 1051 if (ste->bypass) {
1054 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT 1052 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
@@ -1073,9 +1071,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1073#ifdef CONFIG_PCI_ATS 1071#ifdef CONFIG_PCI_ATS
1074 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT | 1072 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1075#endif 1073#endif
1076 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT | 1074 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
1077 STRTAB_STE_1_PRIVCFG_UNPRIV <<
1078 STRTAB_STE_1_PRIVCFG_SHIFT);
1079 1075
1080 if (smmu->features & ARM_SMMU_FEAT_STALLS) 1076 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1081 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); 1077 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
@@ -1083,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1083 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK 1079 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1084 << STRTAB_STE_0_S1CTXPTR_SHIFT) | 1080 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1085 STRTAB_STE_0_CFG_S1_TRANS; 1081 STRTAB_STE_0_CFG_S1_TRANS;
1086
1087 } 1082 }
1088 1083
1089 if (ste->s2_cfg) { 1084 if (ste->s2_cfg) {
@@ -1372,8 +1367,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
1372 switch (cap) { 1367 switch (cap) {
1373 case IOMMU_CAP_CACHE_COHERENCY: 1368 case IOMMU_CAP_CACHE_COHERENCY:
1374 return true; 1369 return true;
1375 case IOMMU_CAP_INTR_REMAP:
1376 return true; /* MSIs are just memory writes */
1377 case IOMMU_CAP_NOEXEC: 1370 case IOMMU_CAP_NOEXEC:
1378 return true; 1371 return true;
1379 default: 1372 default:
@@ -1795,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev)
1795 } 1788 }
1796 1789
1797 group = iommu_group_get_for_dev(dev); 1790 group = iommu_group_get_for_dev(dev);
1798 if (!IS_ERR(group)) 1791 if (!IS_ERR(group)) {
1799 iommu_group_put(group); 1792 iommu_group_put(group);
1793 iommu_device_link(&smmu->iommu, dev);
1794 }
1800 1795
1801 return PTR_ERR_OR_ZERO(group); 1796 return PTR_ERR_OR_ZERO(group);
1802} 1797}
@@ -1805,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev)
1805{ 1800{
1806 struct iommu_fwspec *fwspec = dev->iommu_fwspec; 1801 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1807 struct arm_smmu_master_data *master; 1802 struct arm_smmu_master_data *master;
1803 struct arm_smmu_device *smmu;
1808 1804
1809 if (!fwspec || fwspec->ops != &arm_smmu_ops) 1805 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1810 return; 1806 return;
1811 1807
1812 master = fwspec->iommu_priv; 1808 master = fwspec->iommu_priv;
1809 smmu = master->smmu;
1813 if (master && master->ste.valid) 1810 if (master && master->ste.valid)
1814 arm_smmu_detach_dev(dev); 1811 arm_smmu_detach_dev(dev);
1815 iommu_group_remove_device(dev); 1812 iommu_group_remove_device(dev);
1813 iommu_device_unlink(&smmu->iommu, dev);
1816 kfree(master); 1814 kfree(master);
1817 iommu_fwspec_free(dev); 1815 iommu_fwspec_free(dev);
1818} 1816}
@@ -1883,6 +1881,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1883 return iommu_fwspec_add_ids(dev, args->args, 1); 1881 return iommu_fwspec_add_ids(dev, args->args, 1);
1884} 1882}
1885 1883
1884static void arm_smmu_get_resv_regions(struct device *dev,
1885 struct list_head *head)
1886{
1887 struct iommu_resv_region *region;
1888 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1889
1890 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1891 prot, IOMMU_RESV_MSI);
1892 if (!region)
1893 return;
1894
1895 list_add_tail(&region->list, head);
1896}
1897
1898static void arm_smmu_put_resv_regions(struct device *dev,
1899 struct list_head *head)
1900{
1901 struct iommu_resv_region *entry, *next;
1902
1903 list_for_each_entry_safe(entry, next, head, list)
1904 kfree(entry);
1905}
1906
1886static struct iommu_ops arm_smmu_ops = { 1907static struct iommu_ops arm_smmu_ops = {
1887 .capable = arm_smmu_capable, 1908 .capable = arm_smmu_capable,
1888 .domain_alloc = arm_smmu_domain_alloc, 1909 .domain_alloc = arm_smmu_domain_alloc,
@@ -1898,6 +1919,8 @@ static struct iommu_ops arm_smmu_ops = {
1898 .domain_get_attr = arm_smmu_domain_get_attr, 1919 .domain_get_attr = arm_smmu_domain_get_attr,
1899 .domain_set_attr = arm_smmu_domain_set_attr, 1920 .domain_set_attr = arm_smmu_domain_set_attr,
1900 .of_xlate = arm_smmu_of_xlate, 1921 .of_xlate = arm_smmu_of_xlate,
1922 .get_resv_regions = arm_smmu_get_resv_regions,
1923 .put_resv_regions = arm_smmu_put_resv_regions,
1901 .pgsize_bitmap = -1UL, /* Restricted during device attach */ 1924 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1902}; 1925};
1903 1926
@@ -1983,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
1983 u32 size, l1size; 2006 u32 size, l1size;
1984 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2007 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1985 2008
1986 /* 2009 /* Calculate the L1 size, capped to the SIDSIZE. */
1987 * If we can resolve everything with a single L2 table, then we 2010 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
1988 * just need a single L1 descriptor. Otherwise, calculate the L1 2011 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
1989 * size, capped to the SIDSIZE.
1990 */
1991 if (smmu->sid_bits < STRTAB_SPLIT) {
1992 size = 0;
1993 } else {
1994 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
1995 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
1996 }
1997 cfg->num_l1_ents = 1 << size; 2012 cfg->num_l1_ents = 1 << size;
1998 2013
1999 size += STRTAB_SPLIT; 2014 size += STRTAB_SPLIT;
@@ -2504,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
2504 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK; 2519 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2505 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK; 2520 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2506 2521
2522 /*
2523 * If the SMMU supports fewer bits than would fill a single L2 stream
2524 * table, use a linear table instead.
2525 */
2526 if (smmu->sid_bits <= STRTAB_SPLIT)
2527 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
2528
2507 /* IDR5 */ 2529 /* IDR5 */
2508 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); 2530 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2509 2531
@@ -2613,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
2613{ 2635{
2614 int irq, ret; 2636 int irq, ret;
2615 struct resource *res; 2637 struct resource *res;
2638 resource_size_t ioaddr;
2616 struct arm_smmu_device *smmu; 2639 struct arm_smmu_device *smmu;
2617 struct device *dev = &pdev->dev; 2640 struct device *dev = &pdev->dev;
2618 bool bypass; 2641 bool bypass;
@@ -2630,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
2630 dev_err(dev, "MMIO region too small (%pr)\n", res); 2653 dev_err(dev, "MMIO region too small (%pr)\n", res);
2631 return -EINVAL; 2654 return -EINVAL;
2632 } 2655 }
2656 ioaddr = res->start;
2633 2657
2634 smmu->base = devm_ioremap_resource(dev, res); 2658 smmu->base = devm_ioremap_resource(dev, res);
2635 if (IS_ERR(smmu->base)) 2659 if (IS_ERR(smmu->base))
@@ -2682,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
2682 return ret; 2706 return ret;
2683 2707
2684 /* And we're up. Go go go! */ 2708 /* And we're up. Go go go! */
2685 iommu_register_instance(dev->fwnode, &arm_smmu_ops); 2709 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
2710 "smmu3.%pa", &ioaddr);
2711 if (ret)
2712 return ret;
2713
2714 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2715 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2716
2717 ret = iommu_device_register(&smmu->iommu);
2686 2718
2687#ifdef CONFIG_PCI 2719#ifdef CONFIG_PCI
2688 if (pci_bus_type.iommu_ops != &arm_smmu_ops) { 2720 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a60cded8a6ed..abf6496843a6 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -24,6 +24,7 @@
24 * - v7/v8 long-descriptor format 24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU 25 * - Non-secure access to the SMMU
26 * - Context fault reporting 26 * - Context fault reporting
27 * - Extended Stream ID (16 bit)
27 */ 28 */
28 29
29#define pr_fmt(fmt) "arm-smmu: " fmt 30#define pr_fmt(fmt) "arm-smmu: " fmt
@@ -87,6 +88,7 @@
87#define sCR0_CLIENTPD (1 << 0) 88#define sCR0_CLIENTPD (1 << 0)
88#define sCR0_GFRE (1 << 1) 89#define sCR0_GFRE (1 << 1)
89#define sCR0_GFIE (1 << 2) 90#define sCR0_GFIE (1 << 2)
91#define sCR0_EXIDENABLE (1 << 3)
90#define sCR0_GCFGFRE (1 << 4) 92#define sCR0_GCFGFRE (1 << 4)
91#define sCR0_GCFGFIE (1 << 5) 93#define sCR0_GCFGFIE (1 << 5)
92#define sCR0_USFCFG (1 << 10) 94#define sCR0_USFCFG (1 << 10)
@@ -126,6 +128,7 @@
126#define ID0_NUMIRPT_MASK 0xff 128#define ID0_NUMIRPT_MASK 0xff
127#define ID0_NUMSIDB_SHIFT 9 129#define ID0_NUMSIDB_SHIFT 9
128#define ID0_NUMSIDB_MASK 0xf 130#define ID0_NUMSIDB_MASK 0xf
131#define ID0_EXIDS (1 << 8)
129#define ID0_NUMSMRG_SHIFT 0 132#define ID0_NUMSMRG_SHIFT 0
130#define ID0_NUMSMRG_MASK 0xff 133#define ID0_NUMSMRG_MASK 0xff
131 134
@@ -169,6 +172,7 @@
169#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) 172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
170#define S2CR_CBNDX_SHIFT 0 173#define S2CR_CBNDX_SHIFT 0
171#define S2CR_CBNDX_MASK 0xff 174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_EXIDVALID (1 << 10)
172#define S2CR_TYPE_SHIFT 16 176#define S2CR_TYPE_SHIFT 16
173#define S2CR_TYPE_MASK 0x3 177#define S2CR_TYPE_MASK 0x3
174enum arm_smmu_s2cr_type { 178enum arm_smmu_s2cr_type {
@@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg {
260 264
261#define TTBCR2_SEP_SHIFT 15 265#define TTBCR2_SEP_SHIFT 15
262#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) 266#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
267#define TTBCR2_AS (1 << 4)
263 268
264#define TTBRn_ASID_SHIFT 48 269#define TTBRn_ASID_SHIFT 48
265 270
@@ -281,6 +286,9 @@ enum arm_smmu_s2cr_privcfg {
281 286
282#define FSYNR0_WNR (1 << 4) 287#define FSYNR0_WNR (1 << 4)
283 288
289#define MSI_IOVA_BASE 0x8000000
290#define MSI_IOVA_LENGTH 0x100000
291
284static int force_stage; 292static int force_stage;
285module_param(force_stage, int, S_IRUGO); 293module_param(force_stage, int, S_IRUGO);
286MODULE_PARM_DESC(force_stage, 294MODULE_PARM_DESC(force_stage,
@@ -351,6 +359,7 @@ struct arm_smmu_device {
351#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) 359#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
352#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) 360#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
353#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) 361#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
362#define ARM_SMMU_FEAT_EXIDS (1 << 12)
354 u32 features; 363 u32 features;
355 364
356#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) 365#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -380,6 +389,9 @@ struct arm_smmu_device {
380 unsigned int *irqs; 389 unsigned int *irqs;
381 390
382 u32 cavium_id_base; /* Specific to Cavium */ 391 u32 cavium_id_base; /* Specific to Cavium */
392
393 /* IOMMU core code handle */
394 struct iommu_device iommu;
383}; 395};
384 396
385enum arm_smmu_context_fmt { 397enum arm_smmu_context_fmt {
@@ -778,6 +790,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
778 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; 790 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
779 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; 791 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
780 reg2 |= TTBCR2_SEP_UPSTREAM; 792 reg2 |= TTBCR2_SEP_UPSTREAM;
793 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
794 reg2 |= TTBCR2_AS;
781 } 795 }
782 if (smmu->version > ARM_SMMU_V1) 796 if (smmu->version > ARM_SMMU_V1)
783 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2); 797 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
@@ -1048,7 +1062,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1048 struct arm_smmu_smr *smr = smmu->smrs + idx; 1062 struct arm_smmu_smr *smr = smmu->smrs + idx;
1049 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; 1063 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
1050 1064
1051 if (smr->valid) 1065 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
1052 reg |= SMR_VALID; 1066 reg |= SMR_VALID;
1053 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); 1067 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1054} 1068}
@@ -1060,6 +1074,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1060 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | 1074 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1061 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; 1075 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1062 1076
1077 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1078 smmu->smrs[idx].valid)
1079 reg |= S2CR_EXIDVALID;
1063 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); 1080 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1064} 1081}
1065 1082
@@ -1070,6 +1087,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1070 arm_smmu_write_smr(smmu, idx); 1087 arm_smmu_write_smr(smmu, idx);
1071} 1088}
1072 1089
1090/*
1091 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1092 * should be called after sCR0 is written.
1093 */
1094static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1095{
1096 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1097 u32 smr;
1098
1099 if (!smmu->smrs)
1100 return;
1101
1102 /*
1103 * SMR.ID bits may not be preserved if the corresponding MASK
1104 * bits are set, so check each one separately. We can reject
1105 * masters later if they try to claim IDs outside these masks.
1106 */
1107 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1108 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1109 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1110 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1111
1112 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1113 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1114 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1115 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1116}
1117
1073static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) 1118static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
1074{ 1119{
1075 struct arm_smmu_smr *smrs = smmu->smrs; 1120 struct arm_smmu_smr *smrs = smmu->smrs;
@@ -1214,7 +1259,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1214 continue; 1259 continue;
1215 1260
1216 s2cr[idx].type = type; 1261 s2cr[idx].type = type;
1217 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV; 1262 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1218 s2cr[idx].cbndx = cbndx; 1263 s2cr[idx].cbndx = cbndx;
1219 arm_smmu_write_s2cr(smmu, idx); 1264 arm_smmu_write_s2cr(smmu, idx);
1220 } 1265 }
@@ -1371,8 +1416,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
1371 * requests. 1416 * requests.
1372 */ 1417 */
1373 return true; 1418 return true;
1374 case IOMMU_CAP_INTR_REMAP:
1375 return true; /* MSIs are just memory writes */
1376 case IOMMU_CAP_NOEXEC: 1419 case IOMMU_CAP_NOEXEC:
1377 return true; 1420 return true;
1378 default: 1421 default:
@@ -1444,6 +1487,8 @@ static int arm_smmu_add_device(struct device *dev)
1444 if (ret) 1487 if (ret)
1445 goto out_free; 1488 goto out_free;
1446 1489
1490 iommu_device_link(&smmu->iommu, dev);
1491
1447 return 0; 1492 return 0;
1448 1493
1449out_free: 1494out_free:
@@ -1456,10 +1501,17 @@ out_free:
1456static void arm_smmu_remove_device(struct device *dev) 1501static void arm_smmu_remove_device(struct device *dev)
1457{ 1502{
1458 struct iommu_fwspec *fwspec = dev->iommu_fwspec; 1503 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1504 struct arm_smmu_master_cfg *cfg;
1505 struct arm_smmu_device *smmu;
1506
1459 1507
1460 if (!fwspec || fwspec->ops != &arm_smmu_ops) 1508 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1461 return; 1509 return;
1462 1510
1511 cfg = fwspec->iommu_priv;
1512 smmu = cfg->smmu;
1513
1514 iommu_device_unlink(&smmu->iommu, dev);
1463 arm_smmu_master_free_smes(fwspec); 1515 arm_smmu_master_free_smes(fwspec);
1464 iommu_group_remove_device(dev); 1516 iommu_group_remove_device(dev);
1465 kfree(fwspec->iommu_priv); 1517 kfree(fwspec->iommu_priv);
@@ -1549,6 +1601,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1549 return iommu_fwspec_add_ids(dev, &fwid, 1); 1601 return iommu_fwspec_add_ids(dev, &fwid, 1);
1550} 1602}
1551 1603
1604static void arm_smmu_get_resv_regions(struct device *dev,
1605 struct list_head *head)
1606{
1607 struct iommu_resv_region *region;
1608 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1609
1610 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1611 prot, IOMMU_RESV_MSI);
1612 if (!region)
1613 return;
1614
1615 list_add_tail(&region->list, head);
1616}
1617
1618static void arm_smmu_put_resv_regions(struct device *dev,
1619 struct list_head *head)
1620{
1621 struct iommu_resv_region *entry, *next;
1622
1623 list_for_each_entry_safe(entry, next, head, list)
1624 kfree(entry);
1625}
1626
1552static struct iommu_ops arm_smmu_ops = { 1627static struct iommu_ops arm_smmu_ops = {
1553 .capable = arm_smmu_capable, 1628 .capable = arm_smmu_capable,
1554 .domain_alloc = arm_smmu_domain_alloc, 1629 .domain_alloc = arm_smmu_domain_alloc,
@@ -1564,6 +1639,8 @@ static struct iommu_ops arm_smmu_ops = {
1564 .domain_get_attr = arm_smmu_domain_get_attr, 1639 .domain_get_attr = arm_smmu_domain_get_attr,
1565 .domain_set_attr = arm_smmu_domain_set_attr, 1640 .domain_set_attr = arm_smmu_domain_set_attr,
1566 .of_xlate = arm_smmu_of_xlate, 1641 .of_xlate = arm_smmu_of_xlate,
1642 .get_resv_regions = arm_smmu_get_resv_regions,
1643 .put_resv_regions = arm_smmu_put_resv_regions,
1567 .pgsize_bitmap = -1UL, /* Restricted during device attach */ 1644 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1568}; 1645};
1569 1646
@@ -1648,6 +1725,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1648 if (smmu->features & ARM_SMMU_FEAT_VMID16) 1725 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1649 reg |= sCR0_VMID16EN; 1726 reg |= sCR0_VMID16EN;
1650 1727
1728 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1729 reg |= sCR0_EXIDENABLE;
1730
1651 /* Push the button */ 1731 /* Push the button */
1652 __arm_smmu_tlb_sync(smmu); 1732 __arm_smmu_tlb_sync(smmu);
1653 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); 1733 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -1735,11 +1815,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1735 "\t(IDR0.CTTW overridden by FW configuration)\n"); 1815 "\t(IDR0.CTTW overridden by FW configuration)\n");
1736 1816
1737 /* Max. number of entries we have for stream matching/indexing */ 1817 /* Max. number of entries we have for stream matching/indexing */
1738 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); 1818 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1819 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1820 size = 1 << 16;
1821 } else {
1822 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1823 }
1739 smmu->streamid_mask = size - 1; 1824 smmu->streamid_mask = size - 1;
1740 if (id & ID0_SMS) { 1825 if (id & ID0_SMS) {
1741 u32 smr;
1742
1743 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; 1826 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1744 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; 1827 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1745 if (size == 0) { 1828 if (size == 0) {
@@ -1748,21 +1831,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1748 return -ENODEV; 1831 return -ENODEV;
1749 } 1832 }
1750 1833
1751 /*
1752 * SMR.ID bits may not be preserved if the corresponding MASK
1753 * bits are set, so check each one separately. We can reject
1754 * masters later if they try to claim IDs outside these masks.
1755 */
1756 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1757 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1758 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1759 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1760
1761 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1762 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1763 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1764 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1765
1766 /* Zero-initialised to mark as invalid */ 1834 /* Zero-initialised to mark as invalid */
1767 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), 1835 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1768 GFP_KERNEL); 1836 GFP_KERNEL);
@@ -1770,8 +1838,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1770 return -ENOMEM; 1838 return -ENOMEM;
1771 1839
1772 dev_notice(smmu->dev, 1840 dev_notice(smmu->dev,
1773 "\tstream matching with %lu register groups, mask 0x%x", 1841 "\tstream matching with %lu register groups", size);
1774 size, smmu->smr_mask_mask);
1775 } 1842 }
1776 /* s2cr->type == 0 means translation, so initialise explicitly */ 1843 /* s2cr->type == 0 means translation, so initialise explicitly */
1777 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), 1844 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
@@ -2011,6 +2078,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2011static int arm_smmu_device_probe(struct platform_device *pdev) 2078static int arm_smmu_device_probe(struct platform_device *pdev)
2012{ 2079{
2013 struct resource *res; 2080 struct resource *res;
2081 resource_size_t ioaddr;
2014 struct arm_smmu_device *smmu; 2082 struct arm_smmu_device *smmu;
2015 struct device *dev = &pdev->dev; 2083 struct device *dev = &pdev->dev;
2016 int num_irqs, i, err; 2084 int num_irqs, i, err;
@@ -2031,6 +2099,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
2031 return err; 2099 return err;
2032 2100
2033 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2101 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2102 ioaddr = res->start;
2034 smmu->base = devm_ioremap_resource(dev, res); 2103 smmu->base = devm_ioremap_resource(dev, res);
2035 if (IS_ERR(smmu->base)) 2104 if (IS_ERR(smmu->base))
2036 return PTR_ERR(smmu->base); 2105 return PTR_ERR(smmu->base);
@@ -2091,9 +2160,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
2091 } 2160 }
2092 } 2161 }
2093 2162
2094 iommu_register_instance(dev->fwnode, &arm_smmu_ops); 2163 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2164 "smmu.%pa", &ioaddr);
2165 if (err) {
2166 dev_err(dev, "Failed to register iommu in sysfs\n");
2167 return err;
2168 }
2169
2170 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2171 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2172
2173 err = iommu_device_register(&smmu->iommu);
2174 if (err) {
2175 dev_err(dev, "Failed to register iommu\n");
2176 return err;
2177 }
2178
2095 platform_set_drvdata(pdev, smmu); 2179 platform_set_drvdata(pdev, smmu);
2096 arm_smmu_device_reset(smmu); 2180 arm_smmu_device_reset(smmu);
2181 arm_smmu_test_smr_masks(smmu);
2097 2182
2098 /* Oh, for a proper bus abstraction */ 2183 /* Oh, for a proper bus abstraction */
2099 if (!iommu_present(&platform_bus_type)) 2184 if (!iommu_present(&platform_bus_type))
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 2db0d641cf45..48d36ce59efb 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
37 phys_addr_t phys; 37 phys_addr_t phys;
38}; 38};
39 39
40enum iommu_dma_cookie_type {
41 IOMMU_DMA_IOVA_COOKIE,
42 IOMMU_DMA_MSI_COOKIE,
43};
44
40struct iommu_dma_cookie { 45struct iommu_dma_cookie {
41 struct iova_domain iovad; 46 enum iommu_dma_cookie_type type;
42 struct list_head msi_page_list; 47 union {
43 spinlock_t msi_lock; 48 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
49 struct iova_domain iovad;
50 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
51 dma_addr_t msi_iova;
52 };
53 struct list_head msi_page_list;
54 spinlock_t msi_lock;
44}; 55};
45 56
57static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
58{
59 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
60 return cookie->iovad.granule;
61 return PAGE_SIZE;
62}
63
46static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) 64static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
47{ 65{
48 return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; 66 struct iommu_dma_cookie *cookie = domain->iova_cookie;
67
68 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
69 return &cookie->iovad;
70 return NULL;
71}
72
73static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
74{
75 struct iommu_dma_cookie *cookie;
76
77 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
78 if (cookie) {
79 spin_lock_init(&cookie->msi_lock);
80 INIT_LIST_HEAD(&cookie->msi_page_list);
81 cookie->type = type;
82 }
83 return cookie;
49} 84}
50 85
51int iommu_dma_init(void) 86int iommu_dma_init(void)
@@ -62,25 +97,53 @@ int iommu_dma_init(void)
62 */ 97 */
63int iommu_get_dma_cookie(struct iommu_domain *domain) 98int iommu_get_dma_cookie(struct iommu_domain *domain)
64{ 99{
100 if (domain->iova_cookie)
101 return -EEXIST;
102
103 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
104 if (!domain->iova_cookie)
105 return -ENOMEM;
106
107 return 0;
108}
109EXPORT_SYMBOL(iommu_get_dma_cookie);
110
111/**
112 * iommu_get_msi_cookie - Acquire just MSI remapping resources
113 * @domain: IOMMU domain to prepare
114 * @base: Start address of IOVA region for MSI mappings
115 *
116 * Users who manage their own IOVA allocation and do not want DMA API support,
117 * but would still like to take advantage of automatic MSI remapping, can use
118 * this to initialise their own domain appropriately. Users should reserve a
119 * contiguous IOVA region, starting at @base, large enough to accommodate the
120 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
121 * used by the devices attached to @domain.
122 */
123int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
124{
65 struct iommu_dma_cookie *cookie; 125 struct iommu_dma_cookie *cookie;
66 126
127 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
128 return -EINVAL;
129
67 if (domain->iova_cookie) 130 if (domain->iova_cookie)
68 return -EEXIST; 131 return -EEXIST;
69 132
70 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 133 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
71 if (!cookie) 134 if (!cookie)
72 return -ENOMEM; 135 return -ENOMEM;
73 136
74 spin_lock_init(&cookie->msi_lock); 137 cookie->msi_iova = base;
75 INIT_LIST_HEAD(&cookie->msi_page_list);
76 domain->iova_cookie = cookie; 138 domain->iova_cookie = cookie;
77 return 0; 139 return 0;
78} 140}
79EXPORT_SYMBOL(iommu_get_dma_cookie); 141EXPORT_SYMBOL(iommu_get_msi_cookie);
80 142
81/** 143/**
82 * iommu_put_dma_cookie - Release a domain's DMA mapping resources 144 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
83 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 145 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
146 * iommu_get_msi_cookie()
84 * 147 *
85 * IOMMU drivers should normally call this from their domain_free callback. 148 * IOMMU drivers should normally call this from their domain_free callback.
86 */ 149 */
@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
92 if (!cookie) 155 if (!cookie)
93 return; 156 return;
94 157
95 if (cookie->iovad.granule) 158 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
96 put_iova_domain(&cookie->iovad); 159 put_iova_domain(&cookie->iovad);
97 160
98 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { 161 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
@@ -137,11 +200,13 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
137int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, 200int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
138 u64 size, struct device *dev) 201 u64 size, struct device *dev)
139{ 202{
140 struct iova_domain *iovad = cookie_iovad(domain); 203 struct iommu_dma_cookie *cookie = domain->iova_cookie;
204 struct iova_domain *iovad = &cookie->iovad;
141 unsigned long order, base_pfn, end_pfn; 205 unsigned long order, base_pfn, end_pfn;
206 bool pci = dev && dev_is_pci(dev);
142 207
143 if (!iovad) 208 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
144 return -ENODEV; 209 return -EINVAL;
145 210
146 /* Use the smallest supported page size for IOVA granularity */ 211 /* Use the smallest supported page size for IOVA granularity */
147 order = __ffs(domain->pgsize_bitmap); 212 order = __ffs(domain->pgsize_bitmap);
@@ -161,19 +226,31 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
161 end_pfn = min_t(unsigned long, end_pfn, 226 end_pfn = min_t(unsigned long, end_pfn,
162 domain->geometry.aperture_end >> order); 227 domain->geometry.aperture_end >> order);
163 } 228 }
229 /*
230 * PCI devices may have larger DMA masks, but still prefer allocating
231 * within a 32-bit mask to avoid DAC addressing. Such limitations don't
232 * apply to the typical platform device, so for those we may as well
233 * leave the cache limit at the top of their range to save an rb_last()
234 * traversal on every allocation.
235 */
236 if (pci)
237 end_pfn &= DMA_BIT_MASK(32) >> order;
164 238
165 /* All we can safely do with an existing domain is enlarge it */ 239 /* start_pfn is always nonzero for an already-initialised domain */
166 if (iovad->start_pfn) { 240 if (iovad->start_pfn) {
167 if (1UL << order != iovad->granule || 241 if (1UL << order != iovad->granule ||
168 base_pfn != iovad->start_pfn || 242 base_pfn != iovad->start_pfn) {
169 end_pfn < iovad->dma_32bit_pfn) {
170 pr_warn("Incompatible range for DMA domain\n"); 243 pr_warn("Incompatible range for DMA domain\n");
171 return -EFAULT; 244 return -EFAULT;
172 } 245 }
173 iovad->dma_32bit_pfn = end_pfn; 246 /*
247 * If we have devices with different DMA masks, move the free
248 * area cache limit down for the benefit of the smaller one.
249 */
250 iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
174 } else { 251 } else {
175 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); 252 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
176 if (dev && dev_is_pci(dev)) 253 if (pci)
177 iova_reserve_pci_windows(to_pci_dev(dev), iovad); 254 iova_reserve_pci_windows(to_pci_dev(dev), iovad);
178 } 255 }
179 return 0; 256 return 0;
@@ -181,16 +258,22 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
181EXPORT_SYMBOL(iommu_dma_init_domain); 258EXPORT_SYMBOL(iommu_dma_init_domain);
182 259
183/** 260/**
184 * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags 261 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
262 * page flags.
185 * @dir: Direction of DMA transfer 263 * @dir: Direction of DMA transfer
186 * @coherent: Is the DMA master cache-coherent? 264 * @coherent: Is the DMA master cache-coherent?
265 * @attrs: DMA attributes for the mapping
187 * 266 *
188 * Return: corresponding IOMMU API page protection flags 267 * Return: corresponding IOMMU API page protection flags
189 */ 268 */
190int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) 269int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
270 unsigned long attrs)
191{ 271{
192 int prot = coherent ? IOMMU_CACHE : 0; 272 int prot = coherent ? IOMMU_CACHE : 0;
193 273
274 if (attrs & DMA_ATTR_PRIVILEGED)
275 prot |= IOMMU_PRIV;
276
194 switch (dir) { 277 switch (dir) {
195 case DMA_BIDIRECTIONAL: 278 case DMA_BIDIRECTIONAL:
196 return prot | IOMMU_READ | IOMMU_WRITE; 279 return prot | IOMMU_READ | IOMMU_WRITE;
@@ -204,19 +287,28 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
204} 287}
205 288
206static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, 289static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
207 dma_addr_t dma_limit) 290 dma_addr_t dma_limit, struct device *dev)
208{ 291{
209 struct iova_domain *iovad = cookie_iovad(domain); 292 struct iova_domain *iovad = cookie_iovad(domain);
210 unsigned long shift = iova_shift(iovad); 293 unsigned long shift = iova_shift(iovad);
211 unsigned long length = iova_align(iovad, size) >> shift; 294 unsigned long length = iova_align(iovad, size) >> shift;
295 struct iova *iova = NULL;
212 296
213 if (domain->geometry.force_aperture) 297 if (domain->geometry.force_aperture)
214 dma_limit = min(dma_limit, domain->geometry.aperture_end); 298 dma_limit = min(dma_limit, domain->geometry.aperture_end);
299
300 /* Try to get PCI devices a SAC address */
301 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
302 iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
303 true);
215 /* 304 /*
216 * Enforce size-alignment to be safe - there could perhaps be an 305 * Enforce size-alignment to be safe - there could perhaps be an
217 * attribute to control this per-device, or at least per-domain... 306 * attribute to control this per-device, or at least per-domain...
218 */ 307 */
219 return alloc_iova(iovad, length, dma_limit >> shift, true); 308 if (!iova)
309 iova = alloc_iova(iovad, length, dma_limit >> shift, true);
310
311 return iova;
220} 312}
221 313
222/* The IOVA allocator knows what we mapped, so just unmap whatever that was */ 314/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
@@ -369,7 +461,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
369 if (!pages) 461 if (!pages)
370 return NULL; 462 return NULL;
371 463
372 iova = __alloc_iova(domain, size, dev->coherent_dma_mask); 464 iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
373 if (!iova) 465 if (!iova)
374 goto out_free_pages; 466 goto out_free_pages;
375 467
@@ -440,7 +532,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
440 struct iova_domain *iovad = cookie_iovad(domain); 532 struct iova_domain *iovad = cookie_iovad(domain);
441 size_t iova_off = iova_offset(iovad, phys); 533 size_t iova_off = iova_offset(iovad, phys);
442 size_t len = iova_align(iovad, size + iova_off); 534 size_t len = iova_align(iovad, size + iova_off);
443 struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); 535 struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
444 536
445 if (!iova) 537 if (!iova)
446 return DMA_ERROR_CODE; 538 return DMA_ERROR_CODE;
@@ -598,7 +690,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
598 prev = s; 690 prev = s;
599 } 691 }
600 692
601 iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); 693 iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
602 if (!iova) 694 if (!iova)
603 goto out_restore_sg; 695 goto out_restore_sg;
604 696
@@ -633,7 +725,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
633 size_t size, enum dma_data_direction dir, unsigned long attrs) 725 size_t size, enum dma_data_direction dir, unsigned long attrs)
634{ 726{
635 return __iommu_dma_map(dev, phys, size, 727 return __iommu_dma_map(dev, phys, size,
636 dma_direction_to_prot(dir, false) | IOMMU_MMIO); 728 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
637} 729}
638 730
639void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 731void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -642,16 +734,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
642 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); 734 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
643} 735}
644 736
645int iommu_dma_supported(struct device *dev, u64 mask)
646{
647 /*
648 * 'Special' IOMMUs which don't have the same addressing capability
649 * as the CPU will have to wait until we have some way to query that
650 * before they'll be able to use this framework.
651 */
652 return 1;
653}
654
655int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 737int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
656{ 738{
657 return dma_addr == DMA_ERROR_CODE; 739 return dma_addr == DMA_ERROR_CODE;
@@ -662,11 +744,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
662{ 744{
663 struct iommu_dma_cookie *cookie = domain->iova_cookie; 745 struct iommu_dma_cookie *cookie = domain->iova_cookie;
664 struct iommu_dma_msi_page *msi_page; 746 struct iommu_dma_msi_page *msi_page;
665 struct iova_domain *iovad = &cookie->iovad; 747 struct iova_domain *iovad = cookie_iovad(domain);
666 struct iova *iova; 748 struct iova *iova;
667 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 749 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
750 size_t size = cookie_msi_granule(cookie);
668 751
669 msi_addr &= ~(phys_addr_t)iova_mask(iovad); 752 msi_addr &= ~(phys_addr_t)(size - 1);
670 list_for_each_entry(msi_page, &cookie->msi_page_list, list) 753 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
671 if (msi_page->phys == msi_addr) 754 if (msi_page->phys == msi_addr)
672 return msi_page; 755 return msi_page;
@@ -675,13 +758,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
675 if (!msi_page) 758 if (!msi_page)
676 return NULL; 759 return NULL;
677 760
678 iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
679 if (!iova)
680 goto out_free_page;
681
682 msi_page->phys = msi_addr; 761 msi_page->phys = msi_addr;
683 msi_page->iova = iova_dma_addr(iovad, iova); 762 if (iovad) {
684 if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) 763 iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
764 if (!iova)
765 goto out_free_page;
766 msi_page->iova = iova_dma_addr(iovad, iova);
767 } else {
768 msi_page->iova = cookie->msi_iova;
769 cookie->msi_iova += size;
770 }
771
772 if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
685 goto out_free_iova; 773 goto out_free_iova;
686 774
687 INIT_LIST_HEAD(&msi_page->list); 775 INIT_LIST_HEAD(&msi_page->list);
@@ -689,7 +777,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
689 return msi_page; 777 return msi_page;
690 778
691out_free_iova: 779out_free_iova:
692 __free_iova(iovad, iova); 780 if (iovad)
781 __free_iova(iovad, iova);
782 else
783 cookie->msi_iova -= size;
693out_free_page: 784out_free_page:
694 kfree(msi_page); 785 kfree(msi_page);
695 return NULL; 786 return NULL;
@@ -730,7 +821,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
730 msg->data = ~0U; 821 msg->data = ~0U;
731 } else { 822 } else {
732 msg->address_hi = upper_32_bits(msi_page->iova); 823 msg->address_hi = upper_32_bits(msi_page->iova);
733 msg->address_lo &= iova_mask(&cookie->iovad); 824 msg->address_lo &= cookie_msi_granule(cookie) - 1;
734 msg->address_lo += lower_32_bits(msi_page->iova); 825 msg->address_lo += lower_32_bits(msi_page->iova);
735 } 826 }
736} 827}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8ccbd7023194..d9c0decfc91a 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -74,6 +74,8 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
74static int alloc_iommu(struct dmar_drhd_unit *drhd); 74static int alloc_iommu(struct dmar_drhd_unit *drhd);
75static void free_iommu(struct intel_iommu *iommu); 75static void free_iommu(struct intel_iommu *iommu);
76 76
77extern const struct iommu_ops intel_iommu_ops;
78
77static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) 79static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
78{ 80{
79 /* 81 /*
@@ -1078,14 +1080,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
1078 raw_spin_lock_init(&iommu->register_lock); 1080 raw_spin_lock_init(&iommu->register_lock);
1079 1081
1080 if (intel_iommu_enabled) { 1082 if (intel_iommu_enabled) {
1081 iommu->iommu_dev = iommu_device_create(NULL, iommu, 1083 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1082 intel_iommu_groups, 1084 intel_iommu_groups,
1083 "%s", iommu->name); 1085 "%s", iommu->name);
1086 if (err)
1087 goto err_unmap;
1084 1088
1085 if (IS_ERR(iommu->iommu_dev)) { 1089 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1086 err = PTR_ERR(iommu->iommu_dev); 1090
1091 err = iommu_device_register(&iommu->iommu);
1092 if (err)
1087 goto err_unmap; 1093 goto err_unmap;
1088 }
1089 } 1094 }
1090 1095
1091 drhd->iommu = iommu; 1096 drhd->iommu = iommu;
@@ -1103,7 +1108,8 @@ error:
1103 1108
1104static void free_iommu(struct intel_iommu *iommu) 1109static void free_iommu(struct intel_iommu *iommu)
1105{ 1110{
1106 iommu_device_destroy(iommu->iommu_dev); 1111 iommu_device_sysfs_remove(&iommu->iommu);
1112 iommu_device_unregister(&iommu->iommu);
1107 1113
1108 if (iommu->irq) { 1114 if (iommu->irq) {
1109 if (iommu->pr_irq) { 1115 if (iommu->pr_irq) {
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 57ba0d3091ea..a7e0821c9967 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -276,6 +276,8 @@ struct sysmmu_drvdata {
276 struct list_head owner_node; /* node for owner controllers list */ 276 struct list_head owner_node; /* node for owner controllers list */
277 phys_addr_t pgtable; /* assigned page table structure */ 277 phys_addr_t pgtable; /* assigned page table structure */
278 unsigned int version; /* our version */ 278 unsigned int version; /* our version */
279
280 struct iommu_device iommu; /* IOMMU core handle */
279}; 281};
280 282
281static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) 283static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -381,13 +383,14 @@ static void show_fault_information(struct sysmmu_drvdata *data,
381{ 383{
382 sysmmu_pte_t *ent; 384 sysmmu_pte_t *ent;
383 385
384 dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n", 386 dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
385 finfo->name, fault_addr, &data->pgtable); 387 dev_name(data->master), finfo->name, fault_addr);
388 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
386 ent = section_entry(phys_to_virt(data->pgtable), fault_addr); 389 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
387 dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent); 390 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
388 if (lv1ent_page(ent)) { 391 if (lv1ent_page(ent)) {
389 ent = page_entry(ent, fault_addr); 392 ent = page_entry(ent, fault_addr);
390 dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); 393 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
391 } 394 }
392} 395}
393 396
@@ -611,6 +614,18 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
611 data->sysmmu = dev; 614 data->sysmmu = dev;
612 spin_lock_init(&data->lock); 615 spin_lock_init(&data->lock);
613 616
617 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
618 dev_name(data->sysmmu));
619 if (ret)
620 return ret;
621
622 iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
623 iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
624
625 ret = iommu_device_register(&data->iommu);
626 if (ret)
627 return ret;
628
614 platform_set_drvdata(pdev, data); 629 platform_set_drvdata(pdev, data);
615 630
616 __sysmmu_get_version(data); 631 __sysmmu_get_version(data);
@@ -628,8 +643,6 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
628 643
629 pm_runtime_enable(dev); 644 pm_runtime_enable(dev);
630 645
631 of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
632
633 return 0; 646 return 0;
634} 647}
635 648
@@ -743,6 +756,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
743 DMA_TO_DEVICE); 756 DMA_TO_DEVICE);
744 /* For mapping page table entries we rely on dma == phys */ 757 /* For mapping page table entries we rely on dma == phys */
745 BUG_ON(handle != virt_to_phys(domain->pgtable)); 758 BUG_ON(handle != virt_to_phys(domain->pgtable));
759 if (dma_mapping_error(dma_dev, handle))
760 goto err_lv2ent;
746 761
747 spin_lock_init(&domain->lock); 762 spin_lock_init(&domain->lock);
748 spin_lock_init(&domain->pgtablelock); 763 spin_lock_init(&domain->pgtablelock);
@@ -754,6 +769,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
754 769
755 return &domain->domain; 770 return &domain->domain;
756 771
772err_lv2ent:
773 free_pages((unsigned long)domain->lv2entcnt, 1);
757err_counter: 774err_counter:
758 free_pages((unsigned long)domain->pgtable, 2); 775 free_pages((unsigned long)domain->pgtable, 2);
759err_dma_cookie: 776err_dma_cookie:
@@ -897,6 +914,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
897 } 914 }
898 915
899 if (lv1ent_fault(sent)) { 916 if (lv1ent_fault(sent)) {
917 dma_addr_t handle;
900 sysmmu_pte_t *pent; 918 sysmmu_pte_t *pent;
901 bool need_flush_flpd_cache = lv1ent_zero(sent); 919 bool need_flush_flpd_cache = lv1ent_zero(sent);
902 920
@@ -908,7 +926,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
908 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); 926 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
909 kmemleak_ignore(pent); 927 kmemleak_ignore(pent);
910 *pgcounter = NUM_LV2ENTRIES; 928 *pgcounter = NUM_LV2ENTRIES;
911 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE); 929 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
930 DMA_TO_DEVICE);
931 if (dma_mapping_error(dma_dev, handle)) {
932 kmem_cache_free(lv2table_kmem_cache, pent);
933 return ERR_PTR(-EADDRINUSE);
934 }
912 935
913 /* 936 /*
914 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, 937 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
@@ -1231,9 +1254,21 @@ static int exynos_iommu_add_device(struct device *dev)
1231 1254
1232static void exynos_iommu_remove_device(struct device *dev) 1255static void exynos_iommu_remove_device(struct device *dev)
1233{ 1256{
1257 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1258
1234 if (!has_sysmmu(dev)) 1259 if (!has_sysmmu(dev))
1235 return; 1260 return;
1236 1261
1262 if (owner->domain) {
1263 struct iommu_group *group = iommu_group_get(dev);
1264
1265 if (group) {
1266 WARN_ON(owner->domain !=
1267 iommu_group_default_domain(group));
1268 exynos_iommu_detach_device(owner->domain, dev);
1269 iommu_group_put(group);
1270 }
1271 }
1237 iommu_group_remove_device(dev); 1272 iommu_group_remove_device(dev);
1238} 1273}
1239 1274
@@ -1242,7 +1277,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
1242{ 1277{
1243 struct exynos_iommu_owner *owner = dev->archdata.iommu; 1278 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1244 struct platform_device *sysmmu = of_find_device_by_node(spec->np); 1279 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1245 struct sysmmu_drvdata *data; 1280 struct sysmmu_drvdata *data, *entry;
1246 1281
1247 if (!sysmmu) 1282 if (!sysmmu)
1248 return -ENODEV; 1283 return -ENODEV;
@@ -1261,6 +1296,10 @@ static int exynos_iommu_of_xlate(struct device *dev,
1261 dev->archdata.iommu = owner; 1296 dev->archdata.iommu = owner;
1262 } 1297 }
1263 1298
1299 list_for_each_entry(entry, &owner->controllers, owner_node)
1300 if (entry == data)
1301 return 0;
1302
1264 list_add_tail(&data->owner_node, &owner->controllers); 1303 list_add_tail(&data->owner_node, &owner->controllers);
1265 data->master = dev; 1304 data->master = dev;
1266 1305
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 5d179c8765c6..f5e02f8e7371 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
440 u64 end_address; /* reserved end address */ 440 u64 end_address; /* reserved end address */
441 struct dmar_dev_scope *devices; /* target devices */ 441 struct dmar_dev_scope *devices; /* target devices */
442 int devices_cnt; /* target device count */ 442 int devices_cnt; /* target device count */
443 struct iommu_resv_region *resv; /* reserved region handle */
443}; 444};
444 445
445struct dmar_atsr_unit { 446struct dmar_atsr_unit {
@@ -547,7 +548,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
547static DEFINE_SPINLOCK(device_domain_lock); 548static DEFINE_SPINLOCK(device_domain_lock);
548static LIST_HEAD(device_domain_list); 549static LIST_HEAD(device_domain_list);
549 550
550static const struct iommu_ops intel_iommu_ops; 551const struct iommu_ops intel_iommu_ops;
551 552
552static bool translation_pre_enabled(struct intel_iommu *iommu) 553static bool translation_pre_enabled(struct intel_iommu *iommu)
553{ 554{
@@ -4247,27 +4248,40 @@ static inline void init_iommu_pm_ops(void) {}
4247int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) 4248int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4248{ 4249{
4249 struct acpi_dmar_reserved_memory *rmrr; 4250 struct acpi_dmar_reserved_memory *rmrr;
4251 int prot = DMA_PTE_READ|DMA_PTE_WRITE;
4250 struct dmar_rmrr_unit *rmrru; 4252 struct dmar_rmrr_unit *rmrru;
4253 size_t length;
4251 4254
4252 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); 4255 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4253 if (!rmrru) 4256 if (!rmrru)
4254 return -ENOMEM; 4257 goto out;
4255 4258
4256 rmrru->hdr = header; 4259 rmrru->hdr = header;
4257 rmrr = (struct acpi_dmar_reserved_memory *)header; 4260 rmrr = (struct acpi_dmar_reserved_memory *)header;
4258 rmrru->base_address = rmrr->base_address; 4261 rmrru->base_address = rmrr->base_address;
4259 rmrru->end_address = rmrr->end_address; 4262 rmrru->end_address = rmrr->end_address;
4263
4264 length = rmrr->end_address - rmrr->base_address + 1;
4265 rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
4266 IOMMU_RESV_DIRECT);
4267 if (!rmrru->resv)
4268 goto free_rmrru;
4269
4260 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), 4270 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4261 ((void *)rmrr) + rmrr->header.length, 4271 ((void *)rmrr) + rmrr->header.length,
4262 &rmrru->devices_cnt); 4272 &rmrru->devices_cnt);
4263 if (rmrru->devices_cnt && rmrru->devices == NULL) { 4273 if (rmrru->devices_cnt && rmrru->devices == NULL)
4264 kfree(rmrru); 4274 goto free_all;
4265 return -ENOMEM;
4266 }
4267 4275
4268 list_add(&rmrru->list, &dmar_rmrr_units); 4276 list_add(&rmrru->list, &dmar_rmrr_units);
4269 4277
4270 return 0; 4278 return 0;
4279free_all:
4280 kfree(rmrru->resv);
4281free_rmrru:
4282 kfree(rmrru);
4283out:
4284 return -ENOMEM;
4271} 4285}
4272 4286
4273static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) 4287static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
@@ -4481,6 +4495,7 @@ static void intel_iommu_free_dmars(void)
4481 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { 4495 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4482 list_del(&rmrru->list); 4496 list_del(&rmrru->list);
4483 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); 4497 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4498 kfree(rmrru->resv);
4484 kfree(rmrru); 4499 kfree(rmrru);
4485 } 4500 }
4486 4501
@@ -4854,10 +4869,13 @@ int __init intel_iommu_init(void)
4854 4869
4855 init_iommu_pm_ops(); 4870 init_iommu_pm_ops();
4856 4871
4857 for_each_active_iommu(iommu, drhd) 4872 for_each_active_iommu(iommu, drhd) {
4858 iommu->iommu_dev = iommu_device_create(NULL, iommu, 4873 iommu_device_sysfs_add(&iommu->iommu, NULL,
4859 intel_iommu_groups, 4874 intel_iommu_groups,
4860 "%s", iommu->name); 4875 "%s", iommu->name);
4876 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
4877 iommu_device_register(&iommu->iommu);
4878 }
4861 4879
4862 bus_set_iommu(&pci_bus_type, &intel_iommu_ops); 4880 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4863 bus_register_notifier(&pci_bus_type, &device_nb); 4881 bus_register_notifier(&pci_bus_type, &device_nb);
@@ -5179,7 +5197,7 @@ static int intel_iommu_add_device(struct device *dev)
5179 if (!iommu) 5197 if (!iommu)
5180 return -ENODEV; 5198 return -ENODEV;
5181 5199
5182 iommu_device_link(iommu->iommu_dev, dev); 5200 iommu_device_link(&iommu->iommu, dev);
5183 5201
5184 group = iommu_group_get_for_dev(dev); 5202 group = iommu_group_get_for_dev(dev);
5185 5203
@@ -5201,7 +5219,46 @@ static void intel_iommu_remove_device(struct device *dev)
5201 5219
5202 iommu_group_remove_device(dev); 5220 iommu_group_remove_device(dev);
5203 5221
5204 iommu_device_unlink(iommu->iommu_dev, dev); 5222 iommu_device_unlink(&iommu->iommu, dev);
5223}
5224
5225static void intel_iommu_get_resv_regions(struct device *device,
5226 struct list_head *head)
5227{
5228 struct iommu_resv_region *reg;
5229 struct dmar_rmrr_unit *rmrr;
5230 struct device *i_dev;
5231 int i;
5232
5233 rcu_read_lock();
5234 for_each_rmrr_units(rmrr) {
5235 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5236 i, i_dev) {
5237 if (i_dev != device)
5238 continue;
5239
5240 list_add_tail(&rmrr->resv->list, head);
5241 }
5242 }
5243 rcu_read_unlock();
5244
5245 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5246 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5247 0, IOMMU_RESV_RESERVED);
5248 if (!reg)
5249 return;
5250 list_add_tail(&reg->list, head);
5251}
5252
5253static void intel_iommu_put_resv_regions(struct device *dev,
5254 struct list_head *head)
5255{
5256 struct iommu_resv_region *entry, *next;
5257
5258 list_for_each_entry_safe(entry, next, head, list) {
5259 if (entry->type == IOMMU_RESV_RESERVED)
5260 kfree(entry);
5261 }
5205} 5262}
5206 5263
5207#ifdef CONFIG_INTEL_IOMMU_SVM 5264#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -5333,20 +5390,22 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5333} 5390}
5334#endif /* CONFIG_INTEL_IOMMU_SVM */ 5391#endif /* CONFIG_INTEL_IOMMU_SVM */
5335 5392
5336static const struct iommu_ops intel_iommu_ops = { 5393const struct iommu_ops intel_iommu_ops = {
5337 .capable = intel_iommu_capable, 5394 .capable = intel_iommu_capable,
5338 .domain_alloc = intel_iommu_domain_alloc, 5395 .domain_alloc = intel_iommu_domain_alloc,
5339 .domain_free = intel_iommu_domain_free, 5396 .domain_free = intel_iommu_domain_free,
5340 .attach_dev = intel_iommu_attach_device, 5397 .attach_dev = intel_iommu_attach_device,
5341 .detach_dev = intel_iommu_detach_device, 5398 .detach_dev = intel_iommu_detach_device,
5342 .map = intel_iommu_map, 5399 .map = intel_iommu_map,
5343 .unmap = intel_iommu_unmap, 5400 .unmap = intel_iommu_unmap,
5344 .map_sg = default_iommu_map_sg, 5401 .map_sg = default_iommu_map_sg,
5345 .iova_to_phys = intel_iommu_iova_to_phys, 5402 .iova_to_phys = intel_iommu_iova_to_phys,
5346 .add_device = intel_iommu_add_device, 5403 .add_device = intel_iommu_add_device,
5347 .remove_device = intel_iommu_remove_device, 5404 .remove_device = intel_iommu_remove_device,
5348 .device_group = pci_device_group, 5405 .get_resv_regions = intel_iommu_get_resv_regions,
5349 .pgsize_bitmap = INTEL_IOMMU_PGSIZES, 5406 .put_resv_regions = intel_iommu_put_resv_regions,
5407 .device_group = pci_device_group,
5408 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5350}; 5409};
5351 5410
5352static void quirk_iommu_g4x_gfx(struct pci_dev *dev) 5411static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 0769276c0537..1c049e2e12bf 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -265,7 +265,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
265 if (!(prot & IOMMU_MMIO)) 265 if (!(prot & IOMMU_MMIO))
266 pte |= ARM_V7S_ATTR_TEX(1); 266 pte |= ARM_V7S_ATTR_TEX(1);
267 if (ap) { 267 if (ap) {
268 pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV; 268 pte |= ARM_V7S_PTE_AF;
269 if (!(prot & IOMMU_PRIV))
270 pte |= ARM_V7S_PTE_AP_UNPRIV;
269 if (!(prot & IOMMU_WRITE)) 271 if (!(prot & IOMMU_WRITE))
270 pte |= ARM_V7S_PTE_AP_RDONLY; 272 pte |= ARM_V7S_PTE_AP_RDONLY;
271 } 273 }
@@ -288,6 +290,8 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
288 290
289 if (!(attr & ARM_V7S_PTE_AP_RDONLY)) 291 if (!(attr & ARM_V7S_PTE_AP_RDONLY))
290 prot |= IOMMU_WRITE; 292 prot |= IOMMU_WRITE;
293 if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
294 prot |= IOMMU_PRIV;
291 if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) 295 if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
292 prot |= IOMMU_MMIO; 296 prot |= IOMMU_MMIO;
293 else if (pte & ARM_V7S_ATTR_C) 297 else if (pte & ARM_V7S_ATTR_C)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index a40ce3406fef..feacc54bec68 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -350,11 +350,14 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
350 350
351 if (data->iop.fmt == ARM_64_LPAE_S1 || 351 if (data->iop.fmt == ARM_64_LPAE_S1 ||
352 data->iop.fmt == ARM_32_LPAE_S1) { 352 data->iop.fmt == ARM_32_LPAE_S1) {
353 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; 353 pte = ARM_LPAE_PTE_nG;
354 354
355 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 355 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
356 pte |= ARM_LPAE_PTE_AP_RDONLY; 356 pte |= ARM_LPAE_PTE_AP_RDONLY;
357 357
358 if (!(prot & IOMMU_PRIV))
359 pte |= ARM_LPAE_PTE_AP_UNPRIV;
360
358 if (prot & IOMMU_MMIO) 361 if (prot & IOMMU_MMIO)
359 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV 362 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
360 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 363 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index 39b2d9127dbf..c58351ed61c1 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -50,85 +50,76 @@ static int __init iommu_dev_init(void)
50postcore_initcall(iommu_dev_init); 50postcore_initcall(iommu_dev_init);
51 51
52/* 52/*
53 * Create an IOMMU device and return a pointer to it. IOMMU specific 53 * Init the struct device for the IOMMU. IOMMU specific attributes can
54 * attributes can be provided as an attribute group, allowing a unique 54 * be provided as an attribute group, allowing a unique namespace per
55 * namespace per IOMMU type. 55 * IOMMU type.
56 */ 56 */
57struct device *iommu_device_create(struct device *parent, void *drvdata, 57int iommu_device_sysfs_add(struct iommu_device *iommu,
58 const struct attribute_group **groups, 58 struct device *parent,
59 const char *fmt, ...) 59 const struct attribute_group **groups,
60 const char *fmt, ...)
60{ 61{
61 struct device *dev;
62 va_list vargs; 62 va_list vargs;
63 int ret; 63 int ret;
64 64
65 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 65 device_initialize(&iommu->dev);
66 if (!dev)
67 return ERR_PTR(-ENOMEM);
68 66
69 device_initialize(dev); 67 iommu->dev.class = &iommu_class;
70 68 iommu->dev.parent = parent;
71 dev->class = &iommu_class; 69 iommu->dev.groups = groups;
72 dev->parent = parent;
73 dev->groups = groups;
74 dev_set_drvdata(dev, drvdata);
75 70
76 va_start(vargs, fmt); 71 va_start(vargs, fmt);
77 ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs); 72 ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
78 va_end(vargs); 73 va_end(vargs);
79 if (ret) 74 if (ret)
80 goto error; 75 goto error;
81 76
82 ret = device_add(dev); 77 ret = device_add(&iommu->dev);
83 if (ret) 78 if (ret)
84 goto error; 79 goto error;
85 80
86 return dev; 81 return 0;
87 82
88error: 83error:
89 put_device(dev); 84 put_device(&iommu->dev);
90 return ERR_PTR(ret); 85 return ret;
91} 86}
92 87
93void iommu_device_destroy(struct device *dev) 88void iommu_device_sysfs_remove(struct iommu_device *iommu)
94{ 89{
95 if (!dev || IS_ERR(dev)) 90 device_unregister(&iommu->dev);
96 return;
97
98 device_unregister(dev);
99} 91}
100
101/* 92/*
102 * IOMMU drivers can indicate a device is managed by a given IOMMU using 93 * IOMMU drivers can indicate a device is managed by a given IOMMU using
103 * this interface. A link to the device will be created in the "devices" 94 * this interface. A link to the device will be created in the "devices"
104 * directory of the IOMMU device in sysfs and an "iommu" link will be 95 * directory of the IOMMU device in sysfs and an "iommu" link will be
105 * created under the linked device, pointing back at the IOMMU device. 96 * created under the linked device, pointing back at the IOMMU device.
106 */ 97 */
107int iommu_device_link(struct device *dev, struct device *link) 98int iommu_device_link(struct iommu_device *iommu, struct device *link)
108{ 99{
109 int ret; 100 int ret;
110 101
111 if (!dev || IS_ERR(dev)) 102 if (!iommu || IS_ERR(iommu))
112 return -ENODEV; 103 return -ENODEV;
113 104
114 ret = sysfs_add_link_to_group(&dev->kobj, "devices", 105 ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
115 &link->kobj, dev_name(link)); 106 &link->kobj, dev_name(link));
116 if (ret) 107 if (ret)
117 return ret; 108 return ret;
118 109
119 ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu"); 110 ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
120 if (ret) 111 if (ret)
121 sysfs_remove_link_from_group(&dev->kobj, "devices", 112 sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
122 dev_name(link)); 113 dev_name(link));
123 114
124 return ret; 115 return ret;
125} 116}
126 117
127void iommu_device_unlink(struct device *dev, struct device *link) 118void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
128{ 119{
129 if (!dev || IS_ERR(dev)) 120 if (!iommu || IS_ERR(iommu))
130 return; 121 return;
131 122
132 sysfs_remove_link(&link->kobj, "iommu"); 123 sysfs_remove_link(&link->kobj, "iommu");
133 sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link)); 124 sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
134} 125}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index dbe7f653bb7c..8ea14f41a979 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -55,7 +55,7 @@ struct iommu_group {
55 struct iommu_domain *domain; 55 struct iommu_domain *domain;
56}; 56};
57 57
58struct iommu_device { 58struct group_device {
59 struct list_head list; 59 struct list_head list;
60 struct device *dev; 60 struct device *dev;
61 char *name; 61 char *name;
@@ -68,6 +68,12 @@ struct iommu_group_attribute {
68 const char *buf, size_t count); 68 const char *buf, size_t count);
69}; 69};
70 70
71static const char * const iommu_group_resv_type_string[] = {
72 [IOMMU_RESV_DIRECT] = "direct",
73 [IOMMU_RESV_RESERVED] = "reserved",
74 [IOMMU_RESV_MSI] = "msi",
75};
76
71#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 77#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
72struct iommu_group_attribute iommu_group_attr_##_name = \ 78struct iommu_group_attribute iommu_group_attr_##_name = \
73 __ATTR(_name, _mode, _show, _store) 79 __ATTR(_name, _mode, _show, _store)
@@ -77,6 +83,25 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
77#define to_iommu_group(_kobj) \ 83#define to_iommu_group(_kobj) \
78 container_of(_kobj, struct iommu_group, kobj) 84 container_of(_kobj, struct iommu_group, kobj)
79 85
86static LIST_HEAD(iommu_device_list);
87static DEFINE_SPINLOCK(iommu_device_lock);
88
89int iommu_device_register(struct iommu_device *iommu)
90{
91 spin_lock(&iommu_device_lock);
92 list_add_tail(&iommu->list, &iommu_device_list);
93 spin_unlock(&iommu_device_lock);
94
95 return 0;
96}
97
98void iommu_device_unregister(struct iommu_device *iommu)
99{
100 spin_lock(&iommu_device_lock);
101 list_del(&iommu->list);
102 spin_unlock(&iommu_device_lock);
103}
104
80static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, 105static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
81 unsigned type); 106 unsigned type);
82static int __iommu_attach_device(struct iommu_domain *domain, 107static int __iommu_attach_device(struct iommu_domain *domain,
@@ -133,8 +158,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
133 return sprintf(buf, "%s\n", group->name); 158 return sprintf(buf, "%s\n", group->name);
134} 159}
135 160
161/**
162 * iommu_insert_resv_region - Insert a new region in the
163 * list of reserved regions.
164 * @new: new region to insert
165 * @regions: list of regions
166 *
167 * The new element is sorted by address with respect to the other
168 * regions of the same type. In case it overlaps with another
169 * region of the same type, regions are merged. In case it
170 * overlaps with another region of different type, regions are
171 * not merged.
172 */
173static int iommu_insert_resv_region(struct iommu_resv_region *new,
174 struct list_head *regions)
175{
176 struct iommu_resv_region *region;
177 phys_addr_t start = new->start;
178 phys_addr_t end = new->start + new->length - 1;
179 struct list_head *pos = regions->next;
180
181 while (pos != regions) {
182 struct iommu_resv_region *entry =
183 list_entry(pos, struct iommu_resv_region, list);
184 phys_addr_t a = entry->start;
185 phys_addr_t b = entry->start + entry->length - 1;
186 int type = entry->type;
187
188 if (end < a) {
189 goto insert;
190 } else if (start > b) {
191 pos = pos->next;
192 } else if ((start >= a) && (end <= b)) {
193 if (new->type == type)
194 goto done;
195 else
196 pos = pos->next;
197 } else {
198 if (new->type == type) {
199 phys_addr_t new_start = min(a, start);
200 phys_addr_t new_end = max(b, end);
201
202 list_del(&entry->list);
203 entry->start = new_start;
204 entry->length = new_end - new_start + 1;
205 iommu_insert_resv_region(entry, regions);
206 } else {
207 pos = pos->next;
208 }
209 }
210 }
211insert:
212 region = iommu_alloc_resv_region(new->start, new->length,
213 new->prot, new->type);
214 if (!region)
215 return -ENOMEM;
216
217 list_add_tail(&region->list, pos);
218done:
219 return 0;
220}
221
222static int
223iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
224 struct list_head *group_resv_regions)
225{
226 struct iommu_resv_region *entry;
227 int ret = 0;
228
229 list_for_each_entry(entry, dev_resv_regions, list) {
230 ret = iommu_insert_resv_region(entry, group_resv_regions);
231 if (ret)
232 break;
233 }
234 return ret;
235}
236
237int iommu_get_group_resv_regions(struct iommu_group *group,
238 struct list_head *head)
239{
240 struct group_device *device;
241 int ret = 0;
242
243 mutex_lock(&group->mutex);
244 list_for_each_entry(device, &group->devices, list) {
245 struct list_head dev_resv_regions;
246
247 INIT_LIST_HEAD(&dev_resv_regions);
248 iommu_get_resv_regions(device->dev, &dev_resv_regions);
249 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
250 iommu_put_resv_regions(device->dev, &dev_resv_regions);
251 if (ret)
252 break;
253 }
254 mutex_unlock(&group->mutex);
255 return ret;
256}
257EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
258
259static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
260 char *buf)
261{
262 struct iommu_resv_region *region, *next;
263 struct list_head group_resv_regions;
264 char *str = buf;
265
266 INIT_LIST_HEAD(&group_resv_regions);
267 iommu_get_group_resv_regions(group, &group_resv_regions);
268
269 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
270 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
271 (long long int)region->start,
272 (long long int)(region->start +
273 region->length - 1),
274 iommu_group_resv_type_string[region->type]);
275 kfree(region);
276 }
277
278 return (str - buf);
279}
280
136static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); 281static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
137 282
283static IOMMU_GROUP_ATTR(reserved_regions, 0444,
284 iommu_group_show_resv_regions, NULL);
285
138static void iommu_group_release(struct kobject *kobj) 286static void iommu_group_release(struct kobject *kobj)
139{ 287{
140 struct iommu_group *group = to_iommu_group(kobj); 288 struct iommu_group *group = to_iommu_group(kobj);
@@ -212,6 +360,11 @@ struct iommu_group *iommu_group_alloc(void)
212 */ 360 */
213 kobject_put(&group->kobj); 361 kobject_put(&group->kobj);
214 362
363 ret = iommu_group_create_file(group,
364 &iommu_group_attr_reserved_regions);
365 if (ret)
366 return ERR_PTR(ret);
367
215 pr_debug("Allocated group %d\n", group->id); 368 pr_debug("Allocated group %d\n", group->id);
216 369
217 return group; 370 return group;
@@ -318,7 +471,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
318 struct device *dev) 471 struct device *dev)
319{ 472{
320 struct iommu_domain *domain = group->default_domain; 473 struct iommu_domain *domain = group->default_domain;
321 struct iommu_dm_region *entry; 474 struct iommu_resv_region *entry;
322 struct list_head mappings; 475 struct list_head mappings;
323 unsigned long pg_size; 476 unsigned long pg_size;
324 int ret = 0; 477 int ret = 0;
@@ -331,18 +484,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
331 pg_size = 1UL << __ffs(domain->pgsize_bitmap); 484 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
332 INIT_LIST_HEAD(&mappings); 485 INIT_LIST_HEAD(&mappings);
333 486
334 iommu_get_dm_regions(dev, &mappings); 487 iommu_get_resv_regions(dev, &mappings);
335 488
336 /* We need to consider overlapping regions for different devices */ 489 /* We need to consider overlapping regions for different devices */
337 list_for_each_entry(entry, &mappings, list) { 490 list_for_each_entry(entry, &mappings, list) {
338 dma_addr_t start, end, addr; 491 dma_addr_t start, end, addr;
339 492
340 if (domain->ops->apply_dm_region) 493 if (domain->ops->apply_resv_region)
341 domain->ops->apply_dm_region(dev, domain, entry); 494 domain->ops->apply_resv_region(dev, domain, entry);
342 495
343 start = ALIGN(entry->start, pg_size); 496 start = ALIGN(entry->start, pg_size);
344 end = ALIGN(entry->start + entry->length, pg_size); 497 end = ALIGN(entry->start + entry->length, pg_size);
345 498
499 if (entry->type != IOMMU_RESV_DIRECT)
500 continue;
501
346 for (addr = start; addr < end; addr += pg_size) { 502 for (addr = start; addr < end; addr += pg_size) {
347 phys_addr_t phys_addr; 503 phys_addr_t phys_addr;
348 504
@@ -358,7 +514,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
358 } 514 }
359 515
360out: 516out:
361 iommu_put_dm_regions(dev, &mappings); 517 iommu_put_resv_regions(dev, &mappings);
362 518
363 return ret; 519 return ret;
364} 520}
@@ -374,7 +530,7 @@ out:
374int iommu_group_add_device(struct iommu_group *group, struct device *dev) 530int iommu_group_add_device(struct iommu_group *group, struct device *dev)
375{ 531{
376 int ret, i = 0; 532 int ret, i = 0;
377 struct iommu_device *device; 533 struct group_device *device;
378 534
379 device = kzalloc(sizeof(*device), GFP_KERNEL); 535 device = kzalloc(sizeof(*device), GFP_KERNEL);
380 if (!device) 536 if (!device)
@@ -383,36 +539,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
383 device->dev = dev; 539 device->dev = dev;
384 540
385 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); 541 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
386 if (ret) { 542 if (ret)
387 kfree(device); 543 goto err_free_device;
388 return ret;
389 }
390 544
391 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); 545 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
392rename: 546rename:
393 if (!device->name) { 547 if (!device->name) {
394 sysfs_remove_link(&dev->kobj, "iommu_group"); 548 ret = -ENOMEM;
395 kfree(device); 549 goto err_remove_link;
396 return -ENOMEM;
397 } 550 }
398 551
399 ret = sysfs_create_link_nowarn(group->devices_kobj, 552 ret = sysfs_create_link_nowarn(group->devices_kobj,
400 &dev->kobj, device->name); 553 &dev->kobj, device->name);
401 if (ret) { 554 if (ret) {
402 kfree(device->name);
403 if (ret == -EEXIST && i >= 0) { 555 if (ret == -EEXIST && i >= 0) {
404 /* 556 /*
405 * Account for the slim chance of collision 557 * Account for the slim chance of collision
406 * and append an instance to the name. 558 * and append an instance to the name.
407 */ 559 */
560 kfree(device->name);
408 device->name = kasprintf(GFP_KERNEL, "%s.%d", 561 device->name = kasprintf(GFP_KERNEL, "%s.%d",
409 kobject_name(&dev->kobj), i++); 562 kobject_name(&dev->kobj), i++);
410 goto rename; 563 goto rename;
411 } 564 }
412 565 goto err_free_name;
413 sysfs_remove_link(&dev->kobj, "iommu_group");
414 kfree(device);
415 return ret;
416 } 566 }
417 567
418 kobject_get(group->devices_kobj); 568 kobject_get(group->devices_kobj);
@@ -424,8 +574,10 @@ rename:
424 mutex_lock(&group->mutex); 574 mutex_lock(&group->mutex);
425 list_add_tail(&device->list, &group->devices); 575 list_add_tail(&device->list, &group->devices);
426 if (group->domain) 576 if (group->domain)
427 __iommu_attach_device(group->domain, dev); 577 ret = __iommu_attach_device(group->domain, dev);
428 mutex_unlock(&group->mutex); 578 mutex_unlock(&group->mutex);
579 if (ret)
580 goto err_put_group;
429 581
430 /* Notify any listeners about change to group. */ 582 /* Notify any listeners about change to group. */
431 blocking_notifier_call_chain(&group->notifier, 583 blocking_notifier_call_chain(&group->notifier,
@@ -436,6 +588,21 @@ rename:
436 pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); 588 pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
437 589
438 return 0; 590 return 0;
591
592err_put_group:
593 mutex_lock(&group->mutex);
594 list_del(&device->list);
595 mutex_unlock(&group->mutex);
596 dev->iommu_group = NULL;
597 kobject_put(group->devices_kobj);
598err_free_name:
599 kfree(device->name);
600err_remove_link:
601 sysfs_remove_link(&dev->kobj, "iommu_group");
602err_free_device:
603 kfree(device);
604 pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
605 return ret;
439} 606}
440EXPORT_SYMBOL_GPL(iommu_group_add_device); 607EXPORT_SYMBOL_GPL(iommu_group_add_device);
441 608
@@ -449,7 +616,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);
449void iommu_group_remove_device(struct device *dev) 616void iommu_group_remove_device(struct device *dev)
450{ 617{
451 struct iommu_group *group = dev->iommu_group; 618 struct iommu_group *group = dev->iommu_group;
452 struct iommu_device *tmp_device, *device = NULL; 619 struct group_device *tmp_device, *device = NULL;
453 620
454 pr_info("Removing device %s from group %d\n", dev_name(dev), group->id); 621 pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
455 622
@@ -484,7 +651,7 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device);
484 651
485static int iommu_group_device_count(struct iommu_group *group) 652static int iommu_group_device_count(struct iommu_group *group)
486{ 653{
487 struct iommu_device *entry; 654 struct group_device *entry;
488 int ret = 0; 655 int ret = 0;
489 656
490 list_for_each_entry(entry, &group->devices, list) 657 list_for_each_entry(entry, &group->devices, list)
@@ -507,7 +674,7 @@ static int iommu_group_device_count(struct iommu_group *group)
507static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, 674static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
508 int (*fn)(struct device *, void *)) 675 int (*fn)(struct device *, void *))
509{ 676{
510 struct iommu_device *device; 677 struct group_device *device;
511 int ret = 0; 678 int ret = 0;
512 679
513 list_for_each_entry(device, &group->devices, list) { 680 list_for_each_entry(device, &group->devices, list) {
@@ -1559,20 +1726,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
1559} 1726}
1560EXPORT_SYMBOL_GPL(iommu_domain_set_attr); 1727EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1561 1728
1562void iommu_get_dm_regions(struct device *dev, struct list_head *list) 1729void iommu_get_resv_regions(struct device *dev, struct list_head *list)
1563{ 1730{
1564 const struct iommu_ops *ops = dev->bus->iommu_ops; 1731 const struct iommu_ops *ops = dev->bus->iommu_ops;
1565 1732
1566 if (ops && ops->get_dm_regions) 1733 if (ops && ops->get_resv_regions)
1567 ops->get_dm_regions(dev, list); 1734 ops->get_resv_regions(dev, list);
1568} 1735}
1569 1736
1570void iommu_put_dm_regions(struct device *dev, struct list_head *list) 1737void iommu_put_resv_regions(struct device *dev, struct list_head *list)
1571{ 1738{
1572 const struct iommu_ops *ops = dev->bus->iommu_ops; 1739 const struct iommu_ops *ops = dev->bus->iommu_ops;
1573 1740
1574 if (ops && ops->put_dm_regions) 1741 if (ops && ops->put_resv_regions)
1575 ops->put_dm_regions(dev, list); 1742 ops->put_resv_regions(dev, list);
1743}
1744
1745struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
1746 size_t length,
1747 int prot, int type)
1748{
1749 struct iommu_resv_region *region;
1750
1751 region = kzalloc(sizeof(*region), GFP_KERNEL);
1752 if (!region)
1753 return NULL;
1754
1755 INIT_LIST_HEAD(&region->list);
1756 region->start = start;
1757 region->length = length;
1758 region->prot = prot;
1759 region->type = type;
1760 return region;
1576} 1761}
1577 1762
1578/* Request that a device is direct mapped by the IOMMU */ 1763/* Request that a device is direct mapped by the IOMMU */
@@ -1628,43 +1813,18 @@ out:
1628 return ret; 1813 return ret;
1629} 1814}
1630 1815
1631struct iommu_instance { 1816const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
1632 struct list_head list;
1633 struct fwnode_handle *fwnode;
1634 const struct iommu_ops *ops;
1635};
1636static LIST_HEAD(iommu_instance_list);
1637static DEFINE_SPINLOCK(iommu_instance_lock);
1638
1639void iommu_register_instance(struct fwnode_handle *fwnode,
1640 const struct iommu_ops *ops)
1641{ 1817{
1642 struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1643
1644 if (WARN_ON(!iommu))
1645 return;
1646
1647 of_node_get(to_of_node(fwnode));
1648 INIT_LIST_HEAD(&iommu->list);
1649 iommu->fwnode = fwnode;
1650 iommu->ops = ops;
1651 spin_lock(&iommu_instance_lock);
1652 list_add_tail(&iommu->list, &iommu_instance_list);
1653 spin_unlock(&iommu_instance_lock);
1654}
1655
1656const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
1657{
1658 struct iommu_instance *instance;
1659 const struct iommu_ops *ops = NULL; 1818 const struct iommu_ops *ops = NULL;
1819 struct iommu_device *iommu;
1660 1820
1661 spin_lock(&iommu_instance_lock); 1821 spin_lock(&iommu_device_lock);
1662 list_for_each_entry(instance, &iommu_instance_list, list) 1822 list_for_each_entry(iommu, &iommu_device_list, list)
1663 if (instance->fwnode == fwnode) { 1823 if (iommu->fwnode == fwnode) {
1664 ops = instance->ops; 1824 ops = iommu->ops;
1665 break; 1825 break;
1666 } 1826 }
1667 spin_unlock(&iommu_instance_lock); 1827 spin_unlock(&iommu_device_lock);
1668 return ops; 1828 return ops;
1669} 1829}
1670 1830
@@ -1714,13 +1874,14 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
1714 fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); 1874 fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
1715 if (!fwspec) 1875 if (!fwspec)
1716 return -ENOMEM; 1876 return -ENOMEM;
1877
1878 dev->iommu_fwspec = fwspec;
1717 } 1879 }
1718 1880
1719 for (i = 0; i < num_ids; i++) 1881 for (i = 0; i < num_ids; i++)
1720 fwspec->ids[fwspec->num_ids + i] = ids[i]; 1882 fwspec->ids[fwspec->num_ids + i] = ids[i];
1721 1883
1722 fwspec->num_ids += num_ids; 1884 fwspec->num_ids += num_ids;
1723 dev->iommu_fwspec = fwspec;
1724 return 0; 1885 return 0;
1725} 1886}
1726EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); 1887EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 080beca0197d..b7268a14184f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
62 else { 62 else {
63 struct rb_node *prev_node = rb_prev(iovad->cached32_node); 63 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
64 struct iova *curr_iova = 64 struct iova *curr_iova =
65 container_of(iovad->cached32_node, struct iova, node); 65 rb_entry(iovad->cached32_node, struct iova, node);
66 *limit_pfn = curr_iova->pfn_lo - 1; 66 *limit_pfn = curr_iova->pfn_lo - 1;
67 return prev_node; 67 return prev_node;
68 } 68 }
@@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
86 if (!iovad->cached32_node) 86 if (!iovad->cached32_node)
87 return; 87 return;
88 curr = iovad->cached32_node; 88 curr = iovad->cached32_node;
89 cached_iova = container_of(curr, struct iova, node); 89 cached_iova = rb_entry(curr, struct iova, node);
90 90
91 if (free->pfn_lo >= cached_iova->pfn_lo) { 91 if (free->pfn_lo >= cached_iova->pfn_lo) {
92 struct rb_node *node = rb_next(&free->node); 92 struct rb_node *node = rb_next(&free->node);
93 struct iova *iova = container_of(node, struct iova, node); 93 struct iova *iova = rb_entry(node, struct iova, node);
94 94
95 /* only cache if it's below 32bit pfn */ 95 /* only cache if it's below 32bit pfn */
96 if (node && iova->pfn_lo < iovad->dma_32bit_pfn) 96 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
@@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
125 curr = __get_cached_rbnode(iovad, &limit_pfn); 125 curr = __get_cached_rbnode(iovad, &limit_pfn);
126 prev = curr; 126 prev = curr;
127 while (curr) { 127 while (curr) {
128 struct iova *curr_iova = container_of(curr, struct iova, node); 128 struct iova *curr_iova = rb_entry(curr, struct iova, node);
129 129
130 if (limit_pfn < curr_iova->pfn_lo) 130 if (limit_pfn < curr_iova->pfn_lo)
131 goto move_left; 131 goto move_left;
@@ -171,8 +171,7 @@ move_left:
171 171
172 /* Figure out where to put new node */ 172 /* Figure out where to put new node */
173 while (*entry) { 173 while (*entry) {
174 struct iova *this = container_of(*entry, 174 struct iova *this = rb_entry(*entry, struct iova, node);
175 struct iova, node);
176 parent = *entry; 175 parent = *entry;
177 176
178 if (new->pfn_lo < this->pfn_lo) 177 if (new->pfn_lo < this->pfn_lo)
@@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
201 struct rb_node **new = &(root->rb_node), *parent = NULL; 200 struct rb_node **new = &(root->rb_node), *parent = NULL;
202 /* Figure out where to put new node */ 201 /* Figure out where to put new node */
203 while (*new) { 202 while (*new) {
204 struct iova *this = container_of(*new, struct iova, node); 203 struct iova *this = rb_entry(*new, struct iova, node);
205 204
206 parent = *new; 205 parent = *new;
207 206
@@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
311 assert_spin_locked(&iovad->iova_rbtree_lock); 310 assert_spin_locked(&iovad->iova_rbtree_lock);
312 311
313 while (node) { 312 while (node) {
314 struct iova *iova = container_of(node, struct iova, node); 313 struct iova *iova = rb_entry(node, struct iova, node);
315 314
316 /* If pfn falls within iova's range, return iova */ 315 /* If pfn falls within iova's range, return iova */
317 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { 316 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
@@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad)
463 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); 462 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
464 node = rb_first(&iovad->rbroot); 463 node = rb_first(&iovad->rbroot);
465 while (node) { 464 while (node) {
466 struct iova *iova = container_of(node, struct iova, node); 465 struct iova *iova = rb_entry(node, struct iova, node);
467 466
468 rb_erase(node, &iovad->rbroot); 467 rb_erase(node, &iovad->rbroot);
469 free_iova_mem(iova); 468 free_iova_mem(iova);
@@ -477,7 +476,7 @@ static int
477__is_range_overlap(struct rb_node *node, 476__is_range_overlap(struct rb_node *node,
478 unsigned long pfn_lo, unsigned long pfn_hi) 477 unsigned long pfn_lo, unsigned long pfn_hi)
479{ 478{
480 struct iova *iova = container_of(node, struct iova, node); 479 struct iova *iova = rb_entry(node, struct iova, node);
481 480
482 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) 481 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
483 return 1; 482 return 1;
@@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad,
541 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); 540 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
542 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { 541 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
543 if (__is_range_overlap(node, pfn_lo, pfn_hi)) { 542 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
544 iova = container_of(node, struct iova, node); 543 iova = rb_entry(node, struct iova, node);
545 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); 544 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
546 if ((pfn_lo >= iova->pfn_lo) && 545 if ((pfn_lo >= iova->pfn_lo) &&
547 (pfn_hi <= iova->pfn_hi)) 546 (pfn_hi <= iova->pfn_hi))
@@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
578 577
579 spin_lock_irqsave(&from->iova_rbtree_lock, flags); 578 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
580 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { 579 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
581 struct iova *iova = container_of(node, struct iova, node); 580 struct iova *iova = rb_entry(node, struct iova, node);
582 struct iova *new_iova; 581 struct iova *new_iova;
583 582
584 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); 583 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ace331da6459..b7e14ee863f9 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -313,6 +313,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
313 domain->cfg.ias = 32; 313 domain->cfg.ias = 32;
314 domain->cfg.oas = 40; 314 domain->cfg.oas = 40;
315 domain->cfg.tlb = &ipmmu_gather_ops; 315 domain->cfg.tlb = &ipmmu_gather_ops;
316 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
317 domain->io_domain.geometry.force_aperture = true;
316 /* 318 /*
317 * TODO: Add support for coherent walk through CCI with DVM and remove 319 * TODO: Add support for coherent walk through CCI with DVM and remove
318 * cache handling. For now, delegate it to the io-pgtable code. 320 * cache handling. For now, delegate it to the io-pgtable code.
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index b09692bb5b0a..d0448353d501 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -371,6 +371,58 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
371 return 0; 371 return 0;
372} 372}
373 373
374/* Must be called under msm_iommu_lock */
375static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
376{
377 struct msm_iommu_dev *iommu, *ret = NULL;
378 struct msm_iommu_ctx_dev *master;
379
380 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
381 master = list_first_entry(&iommu->ctx_list,
382 struct msm_iommu_ctx_dev,
383 list);
384 if (master->of_node == dev->of_node) {
385 ret = iommu;
386 break;
387 }
388 }
389
390 return ret;
391}
392
393static int msm_iommu_add_device(struct device *dev)
394{
395 struct msm_iommu_dev *iommu;
396 unsigned long flags;
397 int ret = 0;
398
399 spin_lock_irqsave(&msm_iommu_lock, flags);
400
401 iommu = find_iommu_for_dev(dev);
402 if (iommu)
403 iommu_device_link(&iommu->iommu, dev);
404 else
405 ret = -ENODEV;
406
407 spin_unlock_irqrestore(&msm_iommu_lock, flags);
408
409 return ret;
410}
411
412static void msm_iommu_remove_device(struct device *dev)
413{
414 struct msm_iommu_dev *iommu;
415 unsigned long flags;
416
417 spin_lock_irqsave(&msm_iommu_lock, flags);
418
419 iommu = find_iommu_for_dev(dev);
420 if (iommu)
421 iommu_device_unlink(&iommu->iommu, dev);
422
423 spin_unlock_irqrestore(&msm_iommu_lock, flags);
424}
425
374static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 426static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
375{ 427{
376 int ret = 0; 428 int ret = 0;
@@ -646,6 +698,8 @@ static struct iommu_ops msm_iommu_ops = {
646 .unmap = msm_iommu_unmap, 698 .unmap = msm_iommu_unmap,
647 .map_sg = default_iommu_map_sg, 699 .map_sg = default_iommu_map_sg,
648 .iova_to_phys = msm_iommu_iova_to_phys, 700 .iova_to_phys = msm_iommu_iova_to_phys,
701 .add_device = msm_iommu_add_device,
702 .remove_device = msm_iommu_remove_device,
649 .pgsize_bitmap = MSM_IOMMU_PGSIZES, 703 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
650 .of_xlate = qcom_iommu_of_xlate, 704 .of_xlate = qcom_iommu_of_xlate,
651}; 705};
@@ -653,6 +707,7 @@ static struct iommu_ops msm_iommu_ops = {
653static int msm_iommu_probe(struct platform_device *pdev) 707static int msm_iommu_probe(struct platform_device *pdev)
654{ 708{
655 struct resource *r; 709 struct resource *r;
710 resource_size_t ioaddr;
656 struct msm_iommu_dev *iommu; 711 struct msm_iommu_dev *iommu;
657 int ret, par, val; 712 int ret, par, val;
658 713
@@ -696,6 +751,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
696 ret = PTR_ERR(iommu->base); 751 ret = PTR_ERR(iommu->base);
697 goto fail; 752 goto fail;
698 } 753 }
754 ioaddr = r->start;
699 755
700 iommu->irq = platform_get_irq(pdev, 0); 756 iommu->irq = platform_get_irq(pdev, 0);
701 if (iommu->irq < 0) { 757 if (iommu->irq < 0) {
@@ -737,7 +793,22 @@ static int msm_iommu_probe(struct platform_device *pdev)
737 } 793 }
738 794
739 list_add(&iommu->dev_node, &qcom_iommu_devices); 795 list_add(&iommu->dev_node, &qcom_iommu_devices);
740 of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops); 796
797 ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
798 "msm-smmu.%pa", &ioaddr);
799 if (ret) {
800 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
801 goto fail;
802 }
803
804 iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
805 iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
806
807 ret = iommu_device_register(&iommu->iommu);
808 if (ret) {
809 pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
810 goto fail;
811 }
741 812
742 pr_info("device mapped at %p, irq %d with %d ctx banks\n", 813 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
743 iommu->base, iommu->irq, iommu->ncb); 814 iommu->base, iommu->irq, iommu->ncb);
diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h
index 4ca25d50d679..ae92d2779c42 100644
--- a/drivers/iommu/msm_iommu.h
+++ b/drivers/iommu/msm_iommu.h
@@ -19,6 +19,7 @@
19#define MSM_IOMMU_H 19#define MSM_IOMMU_H
20 20
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/iommu.h>
22#include <linux/clk.h> 23#include <linux/clk.h>
23 24
24/* Sharability attributes of MSM IOMMU mappings */ 25/* Sharability attributes of MSM IOMMU mappings */
@@ -68,6 +69,8 @@ struct msm_iommu_dev {
68 struct list_head dom_node; 69 struct list_head dom_node;
69 struct list_head ctx_list; 70 struct list_head ctx_list;
70 DECLARE_BITMAP(context_map, IOMMU_MAX_CBS); 71 DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
72
73 struct iommu_device iommu;
71}; 74};
72 75
73/** 76/**
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 1479c76ece9e..5d14cd15198d 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -360,11 +360,15 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
360 360
361static int mtk_iommu_add_device(struct device *dev) 361static int mtk_iommu_add_device(struct device *dev)
362{ 362{
363 struct mtk_iommu_data *data;
363 struct iommu_group *group; 364 struct iommu_group *group;
364 365
365 if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) 366 if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
366 return -ENODEV; /* Not a iommu client device */ 367 return -ENODEV; /* Not a iommu client device */
367 368
369 data = dev->iommu_fwspec->iommu_priv;
370 iommu_device_link(&data->iommu, dev);
371
368 group = iommu_group_get_for_dev(dev); 372 group = iommu_group_get_for_dev(dev);
369 if (IS_ERR(group)) 373 if (IS_ERR(group))
370 return PTR_ERR(group); 374 return PTR_ERR(group);
@@ -375,9 +379,14 @@ static int mtk_iommu_add_device(struct device *dev)
375 379
376static void mtk_iommu_remove_device(struct device *dev) 380static void mtk_iommu_remove_device(struct device *dev)
377{ 381{
382 struct mtk_iommu_data *data;
383
378 if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) 384 if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
379 return; 385 return;
380 386
387 data = dev->iommu_fwspec->iommu_priv;
388 iommu_device_unlink(&data->iommu, dev);
389
381 iommu_group_remove_device(dev); 390 iommu_group_remove_device(dev);
382 iommu_fwspec_free(dev); 391 iommu_fwspec_free(dev);
383} 392}
@@ -497,6 +506,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
497 struct mtk_iommu_data *data; 506 struct mtk_iommu_data *data;
498 struct device *dev = &pdev->dev; 507 struct device *dev = &pdev->dev;
499 struct resource *res; 508 struct resource *res;
509 resource_size_t ioaddr;
500 struct component_match *match = NULL; 510 struct component_match *match = NULL;
501 void *protect; 511 void *protect;
502 int i, larb_nr, ret; 512 int i, larb_nr, ret;
@@ -519,6 +529,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
519 data->base = devm_ioremap_resource(dev, res); 529 data->base = devm_ioremap_resource(dev, res);
520 if (IS_ERR(data->base)) 530 if (IS_ERR(data->base))
521 return PTR_ERR(data->base); 531 return PTR_ERR(data->base);
532 ioaddr = res->start;
522 533
523 data->irq = platform_get_irq(pdev, 0); 534 data->irq = platform_get_irq(pdev, 0);
524 if (data->irq < 0) 535 if (data->irq < 0)
@@ -567,6 +578,18 @@ static int mtk_iommu_probe(struct platform_device *pdev)
567 if (ret) 578 if (ret)
568 return ret; 579 return ret;
569 580
581 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
582 "mtk-iommu.%pa", &ioaddr);
583 if (ret)
584 return ret;
585
586 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
587 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
588
589 ret = iommu_device_register(&data->iommu);
590 if (ret)
591 return ret;
592
570 if (!iommu_present(&platform_bus_type)) 593 if (!iommu_present(&platform_bus_type))
571 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); 594 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
572 595
@@ -577,6 +600,9 @@ static int mtk_iommu_remove(struct platform_device *pdev)
577{ 600{
578 struct mtk_iommu_data *data = platform_get_drvdata(pdev); 601 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
579 602
603 iommu_device_sysfs_remove(&data->iommu);
604 iommu_device_unregister(&data->iommu);
605
580 if (iommu_present(&platform_bus_type)) 606 if (iommu_present(&platform_bus_type))
581 bus_set_iommu(&platform_bus_type, NULL); 607 bus_set_iommu(&platform_bus_type, NULL);
582 608
@@ -655,7 +681,6 @@ static int mtk_iommu_init_fn(struct device_node *np)
655 return ret; 681 return ret;
656 } 682 }
657 683
658 of_iommu_set_ops(np, &mtk_iommu_ops);
659 return 0; 684 return 0;
660} 685}
661 686
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 50177f738e4e..2a28eadeea0e 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -47,6 +47,8 @@ struct mtk_iommu_data {
47 struct iommu_group *m4u_group; 47 struct iommu_group *m4u_group;
48 struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ 48 struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
49 bool enable_4GB; 49 bool enable_4GB;
50
51 struct iommu_device iommu;
50}; 52};
51 53
52static inline int compare_of(struct device *dev, void *data) 54static inline int compare_of(struct device *dev, void *data)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 0f57ddc4ecc2..2683e9fc0dcf 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -127,7 +127,7 @@ static const struct iommu_ops
127 "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) 127 "iommu-map-mask", &iommu_spec.np, iommu_spec.args))
128 return NULL; 128 return NULL;
129 129
130 ops = of_iommu_get_ops(iommu_spec.np); 130 ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode);
131 if (!ops || !ops->of_xlate || 131 if (!ops || !ops->of_xlate ||
132 iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || 132 iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
133 ops->of_xlate(&pdev->dev, &iommu_spec)) 133 ops->of_xlate(&pdev->dev, &iommu_spec))
@@ -157,7 +157,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
157 "#iommu-cells", idx, 157 "#iommu-cells", idx,
158 &iommu_spec)) { 158 &iommu_spec)) {
159 np = iommu_spec.np; 159 np = iommu_spec.np;
160 ops = of_iommu_get_ops(np); 160 ops = iommu_ops_from_fwnode(&np->fwnode);
161 161
162 if (!ops || !ops->of_xlate || 162 if (!ops || !ops->of_xlate ||
163 iommu_fwspec_init(dev, &np->fwnode, ops) || 163 iommu_fwspec_init(dev, &np->fwnode, ops) ||
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 69b040f47d56..9d4fefc59827 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1642,6 +1642,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
1642 1642
1643 inner_domain->parent = its_parent; 1643 inner_domain->parent = its_parent;
1644 inner_domain->bus_token = DOMAIN_BUS_NEXUS; 1644 inner_domain->bus_token = DOMAIN_BUS_NEXUS;
1645 inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
1645 info->ops = &its_msi_domain_ops; 1646 info->ops = &its_msi_domain_ops;
1646 info->data = its; 1647 info->data = its;
1647 inner_domain->host_data = info; 1648 inner_domain->host_data = info;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 23909804ffb8..0def99590d16 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
2733 if (intmask & SDHCI_INT_RETUNE) 2733 if (intmask & SDHCI_INT_RETUNE)
2734 mmc_retune_needed(host->mmc); 2734 mmc_retune_needed(host->mmc);
2735 2735
2736 if (intmask & SDHCI_INT_CARD_INT) { 2736 if ((intmask & SDHCI_INT_CARD_INT) &&
2737 (host->ier & SDHCI_INT_CARD_INT)) {
2737 sdhci_enable_sdio_irq_nolock(host, false); 2738 sdhci_enable_sdio_irq_nolock(host, false);
2738 host->thread_isr |= SDHCI_INT_CARD_INT; 2739 host->thread_isr |= SDHCI_INT_CARD_INT;
2739 result = IRQ_WAKE_THREAD; 2740 result = IRQ_WAKE_THREAD;
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index c12d2618eebf..3872ab96b80a 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
1152 if (skb == NULL) 1152 if (skb == NULL)
1153 break; 1153 break;
1154 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1154 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1155 if (pci_dma_mapping_error(np->pci_dev,
1156 np->rx_info[i].mapping)) {
1157 dev_kfree_skb(skb);
1158 np->rx_info[i].skb = NULL;
1159 break;
1160 }
1155 /* Grrr, we cannot offset to correctly align the IP header. */ 1161 /* Grrr, we cannot offset to correctly align the IP header. */
1156 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); 1162 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1157 } 1163 }
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1182{ 1188{
1183 struct netdev_private *np = netdev_priv(dev); 1189 struct netdev_private *np = netdev_priv(dev);
1184 unsigned int entry; 1190 unsigned int entry;
1191 unsigned int prev_tx;
1185 u32 status; 1192 u32 status;
1186 int i; 1193 int i, j;
1187 1194
1188 /* 1195 /*
1189 * be cautious here, wrapping the queue has weird semantics 1196 * be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1201 } 1208 }
1202#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ 1209#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1203 1210
1211 prev_tx = np->cur_tx;
1204 entry = np->cur_tx % TX_RING_SIZE; 1212 entry = np->cur_tx % TX_RING_SIZE;
1205 for (i = 0; i < skb_num_frags(skb); i++) { 1213 for (i = 0; i < skb_num_frags(skb); i++) {
1206 int wrap_ring = 0; 1214 int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1234 skb_frag_size(this_frag), 1242 skb_frag_size(this_frag),
1235 PCI_DMA_TODEVICE); 1243 PCI_DMA_TODEVICE);
1236 } 1244 }
1245 if (pci_dma_mapping_error(np->pci_dev,
1246 np->tx_info[entry].mapping)) {
1247 dev->stats.tx_dropped++;
1248 goto err_out;
1249 }
1237 1250
1238 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); 1251 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1239 np->tx_ring[entry].status = cpu_to_le32(status); 1252 np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1268 netif_stop_queue(dev); 1281 netif_stop_queue(dev);
1269 1282
1270 return NETDEV_TX_OK; 1283 return NETDEV_TX_OK;
1271}
1272 1284
1285err_out:
1286 entry = prev_tx % TX_RING_SIZE;
1287 np->tx_info[entry].skb = NULL;
1288 if (i > 0) {
1289 pci_unmap_single(np->pci_dev,
1290 np->tx_info[entry].mapping,
1291 skb_first_frag_len(skb),
1292 PCI_DMA_TODEVICE);
1293 np->tx_info[entry].mapping = 0;
1294 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1295 for (j = 1; j < i; j++) {
1296 pci_unmap_single(np->pci_dev,
1297 np->tx_info[entry].mapping,
1298 skb_frag_size(
1299 &skb_shinfo(skb)->frags[j-1]),
1300 PCI_DMA_TODEVICE);
1301 entry++;
1302 }
1303 }
1304 dev_kfree_skb_any(skb);
1305 np->cur_tx = prev_tx;
1306 return NETDEV_TX_OK;
1307}
1273 1308
1274/* The interrupt handler does all of the Rx thread work and cleans up 1309/* The interrupt handler does all of the Rx thread work and cleans up
1275 after the Tx thread. */ 1310 after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
1569 break; /* Better luck next round. */ 1604 break; /* Better luck next round. */
1570 np->rx_info[entry].mapping = 1605 np->rx_info[entry].mapping =
1571 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1606 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1607 if (pci_dma_mapping_error(np->pci_dev,
1608 np->rx_info[entry].mapping)) {
1609 dev_kfree_skb(skb);
1610 np->rx_info[entry].skb = NULL;
1611 break;
1612 }
1572 np->rx_ring[entry].rxaddr = 1613 np->rx_ring[entry].rxaddr =
1573 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); 1614 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1574 } 1615 }
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c0fb80acc2da..baba2db9d9c2 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -43,13 +43,13 @@
43#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 43#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
44#define MIN_RX_RING_SIZE 64 44#define MIN_RX_RING_SIZE 64
45#define MAX_RX_RING_SIZE 8192 45#define MAX_RX_RING_SIZE 8192
46#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ 46#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
47 * (bp)->rx_ring_size) 47 * (bp)->rx_ring_size)
48 48
49#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 49#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
50#define MIN_TX_RING_SIZE 64 50#define MIN_TX_RING_SIZE 64
51#define MAX_TX_RING_SIZE 4096 51#define MAX_TX_RING_SIZE 4096
52#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ 52#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
53 * (bp)->tx_ring_size) 53 * (bp)->tx_ring_size)
54 54
55/* level of occupied TX descriptors under which we wake up TX process */ 55/* level of occupied TX descriptors under which we wake up TX process */
@@ -78,6 +78,37 @@
78 */ 78 */
79#define MACB_HALT_TIMEOUT 1230 79#define MACB_HALT_TIMEOUT 1230
80 80
81/* DMA buffer descriptor might be different size
82 * depends on hardware configuration.
83 */
84static unsigned int macb_dma_desc_get_size(struct macb *bp)
85{
86#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
87 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
88 return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
89#endif
90 return sizeof(struct macb_dma_desc);
91}
92
93static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
94{
95#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
96 /* Dma buffer descriptor is 4 words length (instead of 2 words)
97 * for 64b GEM.
98 */
99 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
100 idx <<= 1;
101#endif
102 return idx;
103}
104
105#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
106static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
107{
108 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
109}
110#endif
111
81/* Ring buffer accessors */ 112/* Ring buffer accessors */
82static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 113static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
83{ 114{
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
87static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 118static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
88 unsigned int index) 119 unsigned int index)
89{ 120{
90 return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)]; 121 index = macb_tx_ring_wrap(queue->bp, index);
122 index = macb_adj_dma_desc_idx(queue->bp, index);
123 return &queue->tx_ring[index];
91} 124}
92 125
93static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 126static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
101 dma_addr_t offset; 134 dma_addr_t offset;
102 135
103 offset = macb_tx_ring_wrap(queue->bp, index) * 136 offset = macb_tx_ring_wrap(queue->bp, index) *
104 sizeof(struct macb_dma_desc); 137 macb_dma_desc_get_size(queue->bp);
105 138
106 return queue->tx_ring_dma + offset; 139 return queue->tx_ring_dma + offset;
107} 140}
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
113 146
114static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) 147static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
115{ 148{
116 return &bp->rx_ring[macb_rx_ring_wrap(bp, index)]; 149 index = macb_rx_ring_wrap(bp, index);
150 index = macb_adj_dma_desc_idx(bp, index);
151 return &bp->rx_ring[index];
117} 152}
118 153
119static void *macb_rx_buffer(struct macb *bp, unsigned int index) 154static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
560 } 595 }
561} 596}
562 597
563static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr) 598static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
564{ 599{
565 desc->addr = (u32)addr;
566#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 600#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
567 desc->addrh = (u32)(addr >> 32); 601 struct macb_dma_desc_64 *desc_64;
602
603 if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
604 desc_64 = macb_64b_desc(bp, desc);
605 desc_64->addrh = upper_32_bits(addr);
606 }
568#endif 607#endif
608 desc->addr = lower_32_bits(addr);
609}
610
611static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
612{
613 dma_addr_t addr = 0;
614#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
615 struct macb_dma_desc_64 *desc_64;
616
617 if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
618 desc_64 = macb_64b_desc(bp, desc);
619 addr = ((u64)(desc_64->addrh) << 32);
620 }
621#endif
622 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
623 return addr;
569} 624}
570 625
571static void macb_tx_error_task(struct work_struct *work) 626static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
649 704
650 /* Set end of TX queue */ 705 /* Set end of TX queue */
651 desc = macb_tx_desc(queue, 0); 706 desc = macb_tx_desc(queue, 0);
652 macb_set_addr(desc, 0); 707 macb_set_addr(bp, desc, 0);
653 desc->ctrl = MACB_BIT(TX_USED); 708 desc->ctrl = MACB_BIT(TX_USED);
654 709
655 /* Make descriptor updates visible to hardware */ 710 /* Make descriptor updates visible to hardware */
656 wmb(); 711 wmb();
657 712
658 /* Reinitialize the TX desc queue */ 713 /* Reinitialize the TX desc queue */
659 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); 714 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
660#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 715#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
661 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); 716 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
717 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
662#endif 718#endif
663 /* Make TX ring reflect state of hardware */ 719 /* Make TX ring reflect state of hardware */
664 queue->tx_head = 0; 720 queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
750 unsigned int entry; 806 unsigned int entry;
751 struct sk_buff *skb; 807 struct sk_buff *skb;
752 dma_addr_t paddr; 808 dma_addr_t paddr;
809 struct macb_dma_desc *desc;
753 810
754 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, 811 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
755 bp->rx_ring_size) > 0) { 812 bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
759 rmb(); 816 rmb();
760 817
761 bp->rx_prepared_head++; 818 bp->rx_prepared_head++;
819 desc = macb_rx_desc(bp, entry);
762 820
763 if (!bp->rx_skbuff[entry]) { 821 if (!bp->rx_skbuff[entry]) {
764 /* allocate sk_buff for this free entry in ring */ 822 /* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
782 840
783 if (entry == bp->rx_ring_size - 1) 841 if (entry == bp->rx_ring_size - 1)
784 paddr |= MACB_BIT(RX_WRAP); 842 paddr |= MACB_BIT(RX_WRAP);
785 macb_set_addr(&(bp->rx_ring[entry]), paddr); 843 macb_set_addr(bp, desc, paddr);
786 bp->rx_ring[entry].ctrl = 0; 844 desc->ctrl = 0;
787 845
788 /* properly align Ethernet header */ 846 /* properly align Ethernet header */
789 skb_reserve(skb, NET_IP_ALIGN); 847 skb_reserve(skb, NET_IP_ALIGN);
790 } else { 848 } else {
791 bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); 849 desc->addr &= ~MACB_BIT(RX_USED);
792 bp->rx_ring[entry].ctrl = 0; 850 desc->ctrl = 0;
793 } 851 }
794 } 852 }
795 853
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
835 bool rxused; 893 bool rxused;
836 894
837 entry = macb_rx_ring_wrap(bp, bp->rx_tail); 895 entry = macb_rx_ring_wrap(bp, bp->rx_tail);
838 desc = &bp->rx_ring[entry]; 896 desc = macb_rx_desc(bp, entry);
839 897
840 /* Make hw descriptor updates visible to CPU */ 898 /* Make hw descriptor updates visible to CPU */
841 rmb(); 899 rmb();
842 900
843 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 901 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
844 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 902 addr = macb_get_addr(bp, desc);
845#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
846 addr |= ((u64)(desc->addrh) << 32);
847#endif
848 ctrl = desc->ctrl; 903 ctrl = desc->ctrl;
849 904
850 if (!rxused) 905 if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
987static inline void macb_init_rx_ring(struct macb *bp) 1042static inline void macb_init_rx_ring(struct macb *bp)
988{ 1043{
989 dma_addr_t addr; 1044 dma_addr_t addr;
1045 struct macb_dma_desc *desc = NULL;
990 int i; 1046 int i;
991 1047
992 addr = bp->rx_buffers_dma; 1048 addr = bp->rx_buffers_dma;
993 for (i = 0; i < bp->rx_ring_size; i++) { 1049 for (i = 0; i < bp->rx_ring_size; i++) {
994 bp->rx_ring[i].addr = addr; 1050 desc = macb_rx_desc(bp, i);
995 bp->rx_ring[i].ctrl = 0; 1051 macb_set_addr(bp, desc, addr);
1052 desc->ctrl = 0;
996 addr += bp->rx_buffer_size; 1053 addr += bp->rx_buffer_size;
997 } 1054 }
998 bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP); 1055 desc->addr |= MACB_BIT(RX_WRAP);
999 bp->rx_tail = 0; 1056 bp->rx_tail = 0;
1000} 1057}
1001 1058
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
1008 1065
1009 for (tail = bp->rx_tail; budget > 0; tail++) { 1066 for (tail = bp->rx_tail; budget > 0; tail++) {
1010 struct macb_dma_desc *desc = macb_rx_desc(bp, tail); 1067 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
1011 u32 addr, ctrl; 1068 u32 ctrl;
1012 1069
1013 /* Make hw descriptor updates visible to CPU */ 1070 /* Make hw descriptor updates visible to CPU */
1014 rmb(); 1071 rmb();
1015 1072
1016 addr = desc->addr;
1017 ctrl = desc->ctrl; 1073 ctrl = desc->ctrl;
1018 1074
1019 if (!(addr & MACB_BIT(RX_USED))) 1075 if (!(desc->addr & MACB_BIT(RX_USED)))
1020 break; 1076 break;
1021 1077
1022 if (ctrl & MACB_BIT(RX_SOF)) { 1078 if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1336 i = tx_head; 1392 i = tx_head;
1337 entry = macb_tx_ring_wrap(bp, i); 1393 entry = macb_tx_ring_wrap(bp, i);
1338 ctrl = MACB_BIT(TX_USED); 1394 ctrl = MACB_BIT(TX_USED);
1339 desc = &queue->tx_ring[entry]; 1395 desc = macb_tx_desc(queue, entry);
1340 desc->ctrl = ctrl; 1396 desc->ctrl = ctrl;
1341 1397
1342 if (lso_ctrl) { 1398 if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1358 i--; 1414 i--;
1359 entry = macb_tx_ring_wrap(bp, i); 1415 entry = macb_tx_ring_wrap(bp, i);
1360 tx_skb = &queue->tx_skb[entry]; 1416 tx_skb = &queue->tx_skb[entry];
1361 desc = &queue->tx_ring[entry]; 1417 desc = macb_tx_desc(queue, entry);
1362 1418
1363 ctrl = (u32)tx_skb->size; 1419 ctrl = (u32)tx_skb->size;
1364 if (eof) { 1420 if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1379 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1435 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1380 1436
1381 /* Set TX buffer descriptor */ 1437 /* Set TX buffer descriptor */
1382 macb_set_addr(desc, tx_skb->mapping); 1438 macb_set_addr(bp, desc, tx_skb->mapping);
1383 /* desc->addr must be visible to hardware before clearing 1439 /* desc->addr must be visible to hardware before clearing
1384 * 'TX_USED' bit in desc->ctrl. 1440 * 'TX_USED' bit in desc->ctrl.
1385 */ 1441 */
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
1586 if (!skb) 1642 if (!skb)
1587 continue; 1643 continue;
1588 1644
1589 desc = &bp->rx_ring[i]; 1645 desc = macb_rx_desc(bp, i);
1590 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1646 addr = macb_get_addr(bp, desc);
1591#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1647
1592 addr |= ((u64)(desc->addrh) << 32);
1593#endif
1594 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1648 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1595 DMA_FROM_DEVICE); 1649 DMA_FROM_DEVICE);
1596 dev_kfree_skb_any(skb); 1650 dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
1711static void gem_init_rings(struct macb *bp) 1765static void gem_init_rings(struct macb *bp)
1712{ 1766{
1713 struct macb_queue *queue; 1767 struct macb_queue *queue;
1768 struct macb_dma_desc *desc = NULL;
1714 unsigned int q; 1769 unsigned int q;
1715 int i; 1770 int i;
1716 1771
1717 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1772 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1718 for (i = 0; i < bp->tx_ring_size; i++) { 1773 for (i = 0; i < bp->tx_ring_size; i++) {
1719 queue->tx_ring[i].addr = 0; 1774 desc = macb_tx_desc(queue, i);
1720 queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1775 macb_set_addr(bp, desc, 0);
1776 desc->ctrl = MACB_BIT(TX_USED);
1721 } 1777 }
1722 queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); 1778 desc->ctrl |= MACB_BIT(TX_WRAP);
1723 queue->tx_head = 0; 1779 queue->tx_head = 0;
1724 queue->tx_tail = 0; 1780 queue->tx_tail = 0;
1725 } 1781 }
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
1733static void macb_init_rings(struct macb *bp) 1789static void macb_init_rings(struct macb *bp)
1734{ 1790{
1735 int i; 1791 int i;
1792 struct macb_dma_desc *desc = NULL;
1736 1793
1737 macb_init_rx_ring(bp); 1794 macb_init_rx_ring(bp);
1738 1795
1739 for (i = 0; i < bp->tx_ring_size; i++) { 1796 for (i = 0; i < bp->tx_ring_size; i++) {
1740 bp->queues[0].tx_ring[i].addr = 0; 1797 desc = macb_tx_desc(&bp->queues[0], i);
1741 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); 1798 macb_set_addr(bp, desc, 0);
1799 desc->ctrl = MACB_BIT(TX_USED);
1742 } 1800 }
1743 bp->queues[0].tx_head = 0; 1801 bp->queues[0].tx_head = 0;
1744 bp->queues[0].tx_tail = 0; 1802 bp->queues[0].tx_tail = 0;
1745 bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); 1803 desc->ctrl |= MACB_BIT(TX_WRAP);
1746} 1804}
1747 1805
1748static void macb_reset_hw(struct macb *bp) 1806static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
1863 dmacfg &= ~GEM_BIT(TXCOEN); 1921 dmacfg &= ~GEM_BIT(TXCOEN);
1864 1922
1865#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1923#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1866 dmacfg |= GEM_BIT(ADDR64); 1924 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1925 dmacfg |= GEM_BIT(ADDR64);
1867#endif 1926#endif
1868 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 1927 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1869 dmacfg); 1928 dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
1910 macb_configure_dma(bp); 1969 macb_configure_dma(bp);
1911 1970
1912 /* Initialize TX and RX buffers */ 1971 /* Initialize TX and RX buffers */
1913 macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma)); 1972 macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
1914#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1973#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1915 macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32)); 1974 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1975 macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
1916#endif 1976#endif
1917 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1977 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1918 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); 1978 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1919#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1979#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1920 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); 1980 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1981 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1921#endif 1982#endif
1922 1983
1923 /* Enable interrupts */ 1984 /* Enable interrupts */
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
2627 queue->IMR = GEM_IMR(hw_q - 1); 2688 queue->IMR = GEM_IMR(hw_q - 1);
2628 queue->TBQP = GEM_TBQP(hw_q - 1); 2689 queue->TBQP = GEM_TBQP(hw_q - 1);
2629#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2690#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2630 queue->TBQPH = GEM_TBQPH(hw_q -1); 2691 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2692 queue->TBQPH = GEM_TBQPH(hw_q - 1);
2631#endif 2693#endif
2632 } else { 2694 } else {
2633 /* queue0 uses legacy registers */ 2695 /* queue0 uses legacy registers */
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
2637 queue->IMR = MACB_IMR; 2699 queue->IMR = MACB_IMR;
2638 queue->TBQP = MACB_TBQP; 2700 queue->TBQP = MACB_TBQP;
2639#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2701#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2640 queue->TBQPH = MACB_TBQPH; 2702 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2703 queue->TBQPH = MACB_TBQPH;
2641#endif 2704#endif
2642 } 2705 }
2643 2706
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
2730static int at91ether_start(struct net_device *dev) 2793static int at91ether_start(struct net_device *dev)
2731{ 2794{
2732 struct macb *lp = netdev_priv(dev); 2795 struct macb *lp = netdev_priv(dev);
2796 struct macb_dma_desc *desc;
2733 dma_addr_t addr; 2797 dma_addr_t addr;
2734 u32 ctl; 2798 u32 ctl;
2735 int i; 2799 int i;
2736 2800
2737 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 2801 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2738 (AT91ETHER_MAX_RX_DESCR * 2802 (AT91ETHER_MAX_RX_DESCR *
2739 sizeof(struct macb_dma_desc)), 2803 macb_dma_desc_get_size(lp)),
2740 &lp->rx_ring_dma, GFP_KERNEL); 2804 &lp->rx_ring_dma, GFP_KERNEL);
2741 if (!lp->rx_ring) 2805 if (!lp->rx_ring)
2742 return -ENOMEM; 2806 return -ENOMEM;
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
2748 if (!lp->rx_buffers) { 2812 if (!lp->rx_buffers) {
2749 dma_free_coherent(&lp->pdev->dev, 2813 dma_free_coherent(&lp->pdev->dev,
2750 AT91ETHER_MAX_RX_DESCR * 2814 AT91ETHER_MAX_RX_DESCR *
2751 sizeof(struct macb_dma_desc), 2815 macb_dma_desc_get_size(lp),
2752 lp->rx_ring, lp->rx_ring_dma); 2816 lp->rx_ring, lp->rx_ring_dma);
2753 lp->rx_ring = NULL; 2817 lp->rx_ring = NULL;
2754 return -ENOMEM; 2818 return -ENOMEM;
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
2756 2820
2757 addr = lp->rx_buffers_dma; 2821 addr = lp->rx_buffers_dma;
2758 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 2822 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2759 lp->rx_ring[i].addr = addr; 2823 desc = macb_rx_desc(lp, i);
2760 lp->rx_ring[i].ctrl = 0; 2824 macb_set_addr(lp, desc, addr);
2825 desc->ctrl = 0;
2761 addr += AT91ETHER_MAX_RBUFF_SZ; 2826 addr += AT91ETHER_MAX_RBUFF_SZ;
2762 } 2827 }
2763 2828
2764 /* Set the Wrap bit on the last descriptor */ 2829 /* Set the Wrap bit on the last descriptor */
2765 lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); 2830 desc->addr |= MACB_BIT(RX_WRAP);
2766 2831
2767 /* Reset buffer index */ 2832 /* Reset buffer index */
2768 lp->rx_tail = 0; 2833 lp->rx_tail = 0;
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
2834 2899
2835 dma_free_coherent(&lp->pdev->dev, 2900 dma_free_coherent(&lp->pdev->dev,
2836 AT91ETHER_MAX_RX_DESCR * 2901 AT91ETHER_MAX_RX_DESCR *
2837 sizeof(struct macb_dma_desc), 2902 macb_dma_desc_get_size(lp),
2838 lp->rx_ring, lp->rx_ring_dma); 2903 lp->rx_ring, lp->rx_ring_dma);
2839 lp->rx_ring = NULL; 2904 lp->rx_ring = NULL;
2840 2905
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2885static void at91ether_rx(struct net_device *dev) 2950static void at91ether_rx(struct net_device *dev)
2886{ 2951{
2887 struct macb *lp = netdev_priv(dev); 2952 struct macb *lp = netdev_priv(dev);
2953 struct macb_dma_desc *desc;
2888 unsigned char *p_recv; 2954 unsigned char *p_recv;
2889 struct sk_buff *skb; 2955 struct sk_buff *skb;
2890 unsigned int pktlen; 2956 unsigned int pktlen;
2891 2957
2892 while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { 2958 desc = macb_rx_desc(lp, lp->rx_tail);
2959 while (desc->addr & MACB_BIT(RX_USED)) {
2893 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 2960 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2894 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); 2961 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
2895 skb = netdev_alloc_skb(dev, pktlen + 2); 2962 skb = netdev_alloc_skb(dev, pktlen + 2);
2896 if (skb) { 2963 if (skb) {
2897 skb_reserve(skb, 2); 2964 skb_reserve(skb, 2);
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
2905 lp->stats.rx_dropped++; 2972 lp->stats.rx_dropped++;
2906 } 2973 }
2907 2974
2908 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) 2975 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
2909 lp->stats.multicast++; 2976 lp->stats.multicast++;
2910 2977
2911 /* reset ownership bit */ 2978 /* reset ownership bit */
2912 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); 2979 desc->addr &= ~MACB_BIT(RX_USED);
2913 2980
2914 /* wrap after last buffer */ 2981 /* wrap after last buffer */
2915 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 2982 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2916 lp->rx_tail = 0; 2983 lp->rx_tail = 0;
2917 else 2984 else
2918 lp->rx_tail++; 2985 lp->rx_tail++;
2986
2987 desc = macb_rx_desc(lp, lp->rx_tail);
2919 } 2988 }
2920} 2989}
2921 2990
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
3211 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 3280 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3212 3281
3213#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3282#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3214 if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) 3283 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3215 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 3284 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3285 bp->hw_dma_cap = HW_DMA_CAP_64B;
3286 } else
3287 bp->hw_dma_cap = HW_DMA_CAP_32B;
3216#endif 3288#endif
3217 3289
3218 spin_lock_init(&bp->lock); 3290 spin_lock_init(&bp->lock);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d67adad67be1..fc8550a5d47f 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -385,6 +385,8 @@
385/* Bitfields in DCFG6. */ 385/* Bitfields in DCFG6. */
386#define GEM_PBUF_LSO_OFFSET 27 386#define GEM_PBUF_LSO_OFFSET 27
387#define GEM_PBUF_LSO_SIZE 1 387#define GEM_PBUF_LSO_SIZE 1
388#define GEM_DAW64_OFFSET 23
389#define GEM_DAW64_SIZE 1
388 390
389/* Constants for CLK */ 391/* Constants for CLK */
390#define MACB_CLK_DIV8 0 392#define MACB_CLK_DIV8 0
@@ -487,12 +489,20 @@
487struct macb_dma_desc { 489struct macb_dma_desc {
488 u32 addr; 490 u32 addr;
489 u32 ctrl; 491 u32 ctrl;
492};
493
490#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 494#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
491 u32 addrh; 495enum macb_hw_dma_cap {
492 u32 resvd; 496 HW_DMA_CAP_32B,
493#endif 497 HW_DMA_CAP_64B,
494}; 498};
495 499
500struct macb_dma_desc_64 {
501 u32 addrh;
502 u32 resvd;
503};
504#endif
505
496/* DMA descriptor bitfields */ 506/* DMA descriptor bitfields */
497#define MACB_RX_USED_OFFSET 0 507#define MACB_RX_USED_OFFSET 0
498#define MACB_RX_USED_SIZE 1 508#define MACB_RX_USED_SIZE 1
@@ -874,6 +884,10 @@ struct macb {
874 unsigned int jumbo_max_len; 884 unsigned int jumbo_max_len;
875 885
876 u32 wol; 886 u32 wol;
887
888#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
889 enum macb_hw_dma_cap hw_dma_cap;
890#endif
877}; 891};
878 892
879static inline bool macb_is_gem(struct macb *bp) 893static inline bool macb_is_gem(struct macb *bp)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 67befedef709..578c7f8f11bf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
116 int speed = 2; 116 int speed = 2;
117 117
118 if (!xcv) { 118 if (!xcv) {
119 dev_err(&xcv->pdev->dev, 119 pr_err("XCV init not done, probe may have failed\n");
120 "XCV init not done, probe may have failed\n");
121 return; 120 return;
122 } 121 }
123 122
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 1a7f8ad7b9c6..cd49a54c538d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
362 status = -EPERM; 362 status = -EPERM;
363 goto err; 363 goto err;
364 } 364 }
365done: 365
366 /* Remember currently programmed MAC */
366 ether_addr_copy(adapter->dev_mac, addr->sa_data); 367 ether_addr_copy(adapter->dev_mac, addr->sa_data);
368done:
367 ether_addr_copy(netdev->dev_addr, addr->sa_data); 369 ether_addr_copy(netdev->dev_addr, addr->sa_data);
368 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); 370 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
369 return 0; 371 return 0;
@@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
3618{ 3620{
3619 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ 3621 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3620 if (!BEx_chip(adapter) || !be_virtfn(adapter) || 3622 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3621 check_privilege(adapter, BE_PRIV_FILTMGMT)) 3623 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
3622 be_dev_mac_del(adapter, adapter->pmac_id[0]); 3624 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3625 eth_zero_addr(adapter->dev_mac);
3626 }
3623 3627
3624 be_clear_uc_list(adapter); 3628 be_clear_uc_list(adapter);
3625 be_clear_mc_list(adapter); 3629 be_clear_mc_list(adapter);
@@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
3773 if (status) 3777 if (status)
3774 return status; 3778 return status;
3775 3779
3776 /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ 3780 /* Normally this condition usually true as the ->dev_mac is zeroed.
3777 if (!BEx_chip(adapter) || !be_virtfn(adapter) || 3781 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3778 check_privilege(adapter, BE_PRIV_FILTMGMT)) { 3782 * subsequent be_dev_mac_add() can fail (after fresh boot)
3783 */
3784 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3785 int old_pmac_id = -1;
3786
3787 /* Remember old programmed MAC if any - can happen on BE3 VF */
3788 if (!is_zero_ether_addr(adapter->dev_mac))
3789 old_pmac_id = adapter->pmac_id[0];
3790
3779 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); 3791 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
3780 if (status) 3792 if (status)
3781 return status; 3793 return status;
3794
3795 /* Delete the old programmed MAC as we successfully programmed
3796 * a new MAC
3797 */
3798 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3799 be_dev_mac_del(adapter, old_pmac_id);
3800
3782 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr); 3801 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
3783 } 3802 }
3784 3803
@@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
4552 4571
4553 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 4572 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4554 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 4573 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4574
4575 /* Initial MAC for BE3 VFs is already programmed by PF */
4576 if (BEx_chip(adapter) && be_virtfn(adapter))
4577 memcpy(adapter->dev_mac, mac, ETH_ALEN);
4555 } 4578 }
4556 4579
4557 return 0; 4580 return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index c1b671667920..957bfc220978 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
2010 if (!rxb->page) 2010 if (!rxb->page)
2011 continue; 2011 continue;
2012 2012
2013 dma_unmap_single(rx_queue->dev, rxb->dma, 2013 dma_unmap_page(rx_queue->dev, rxb->dma,
2014 PAGE_SIZE, DMA_FROM_DEVICE); 2014 PAGE_SIZE, DMA_FROM_DEVICE);
2015 __free_page(rxb->page); 2015 __free_page(rxb->page);
2016 2016
2017 rxb->page = NULL; 2017 rxb->page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c7e939945259..53daa6ca5d83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
158 return -ETIMEDOUT; 158 return -ETIMEDOUT;
159} 159}
160 160
161static int mlx4_comm_internal_err(u32 slave_read) 161int mlx4_comm_internal_err(u32 slave_read)
162{ 162{
163 return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == 163 return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
164 (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; 164 (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0e8b7c44931f..8258d08acd8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
222 return; 222 return;
223 223
224 mlx4_stop_catas_poll(dev); 224 mlx4_stop_catas_poll(dev);
225 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
226 mlx4_is_slave(dev)) {
227 /* In mlx4_remove_one on a VF */
228 u32 slave_read =
229 swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
230
231 if (mlx4_comm_internal_err(slave_read)) {
232 mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
233 __func__);
234 mlx4_enter_error_state(dev->persist);
235 }
236 }
225 mutex_lock(&intf_mutex); 237 mutex_lock(&intf_mutex);
226 238
227 list_for_each_entry(intf, &intf_list, list) 239 list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 88ee7d8a5923..086920b615af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
1220void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); 1220void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
1221 1221
1222void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); 1222void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
1223int mlx4_comm_internal_err(u32 slave_read);
1223 1224
1224int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, 1225int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
1225 enum mlx4_port_type *type); 1226 enum mlx4_port_type *type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3797cc7c1288..caa837e5e2b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1728 if (cmd->cmdif_rev > CMD_IF_REV) { 1728 if (cmd->cmdif_rev > CMD_IF_REV) {
1729 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", 1729 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1730 CMD_IF_REV, cmd->cmdif_rev); 1730 CMD_IF_REV, cmd->cmdif_rev);
1731 err = -ENOTSUPP; 1731 err = -EOPNOTSUPP;
1732 goto err_free_page; 1732 goto err_free_page;
1733 } 1733 }
1734 1734
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 951dbd58594d..d5ecb8f53fd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
791int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); 791int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
792 792
793int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); 793int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
794void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); 794void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
795 enum mlx5e_traffic_types tt);
795 796
796int mlx5e_open_locked(struct net_device *netdev); 797int mlx5e_open_locked(struct net_device *netdev);
797int mlx5e_close_locked(struct net_device *netdev); 798int mlx5e_close_locked(struct net_device *netdev);
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
863 864
864static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) 865static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
865{ 866{
866 return -ENOTSUPP; 867 return -EOPNOTSUPP;
867} 868}
868 869
869static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) 870static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
870{ 871{
871 return -ENOTSUPP; 872 return -EOPNOTSUPP;
872} 873}
873#else 874#else
874int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); 875int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index f0b460f47f29..0523ed47f597 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
89 int i; 89 int i;
90 90
91 if (!MLX5_CAP_GEN(priv->mdev, ets)) 91 if (!MLX5_CAP_GEN(priv->mdev, ets))
92 return -ENOTSUPP; 92 return -EOPNOTSUPP;
93 93
94 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; 94 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
95 for (i = 0; i < ets->ets_cap; i++) { 95 for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
236 int err; 236 int err;
237 237
238 if (!MLX5_CAP_GEN(priv->mdev, ets)) 238 if (!MLX5_CAP_GEN(priv->mdev, ets))
239 return -ENOTSUPP; 239 return -EOPNOTSUPP;
240 240
241 err = mlx5e_dbcnl_validate_ets(netdev, ets); 241 err = mlx5e_dbcnl_validate_ets(netdev, ets);
242 if (err) 242 if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
402 struct mlx5_core_dev *mdev = priv->mdev; 402 struct mlx5_core_dev *mdev = priv->mdev;
403 struct ieee_ets ets; 403 struct ieee_ets ets;
404 struct ieee_pfc pfc; 404 struct ieee_pfc pfc;
405 int err = -ENOTSUPP; 405 int err = -EOPNOTSUPP;
406 int i; 406 int i;
407 407
408 if (!MLX5_CAP_GEN(mdev, ets)) 408 if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
511 struct mlx5e_priv *priv = netdev_priv(netdev); 511 struct mlx5e_priv *priv = netdev_priv(netdev);
512 struct mlx5_core_dev *mdev = priv->mdev; 512 struct mlx5_core_dev *mdev = priv->mdev;
513 513
514 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
515 netdev_err(netdev, "%s, ets is not supported\n", __func__);
516 return;
517 }
518
514 if (priority >= CEE_DCBX_MAX_PRIO) { 519 if (priority >= CEE_DCBX_MAX_PRIO) {
515 netdev_err(netdev, 520 netdev_err(netdev,
516 "%s, priority is out of range\n", __func__); 521 "%s, priority is out of range\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 5197817e4b2f..bb67863aa361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
595 struct mlx5e_priv *priv = netdev_priv(netdev); 595 struct mlx5e_priv *priv = netdev_priv(netdev);
596 596
597 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) 597 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
598 return -ENOTSUPP; 598 return -EOPNOTSUPP;
599 599
600 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; 600 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
601 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; 601 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
620 int i; 620 int i;
621 621
622 if (!MLX5_CAP_GEN(mdev, cq_moderation)) 622 if (!MLX5_CAP_GEN(mdev, cq_moderation))
623 return -ENOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 mutex_lock(&priv->state_lock); 625 mutex_lock(&priv->state_lock);
626 626
@@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
980 980
981static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) 981static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
982{ 982{
983 struct mlx5_core_dev *mdev = priv->mdev;
984 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); 983 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
985 int i; 984 struct mlx5_core_dev *mdev = priv->mdev;
985 int ctxlen = MLX5_ST_SZ_BYTES(tirc);
986 int tt;
986 987
987 MLX5_SET(modify_tir_in, in, bitmask.hash, 1); 988 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
988 mlx5e_build_tir_ctx_hash(tirc, priv);
989 989
990 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 990 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
991 mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen); 991 memset(tirc, 0, ctxlen);
992 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
993 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
994 }
992} 995}
993 996
994static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, 997static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
996{ 999{
997 struct mlx5e_priv *priv = netdev_priv(dev); 1000 struct mlx5e_priv *priv = netdev_priv(dev);
998 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 1001 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1002 bool hash_changed = false;
999 void *in; 1003 void *in;
1000 1004
1001 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && 1005 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1017 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); 1021 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1018 } 1022 }
1019 1023
1020 if (key) 1024 if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
1025 hfunc != priv->params.rss_hfunc) {
1026 priv->params.rss_hfunc = hfunc;
1027 hash_changed = true;
1028 }
1029
1030 if (key) {
1021 memcpy(priv->params.toeplitz_hash_key, key, 1031 memcpy(priv->params.toeplitz_hash_key, key,
1022 sizeof(priv->params.toeplitz_hash_key)); 1032 sizeof(priv->params.toeplitz_hash_key));
1033 hash_changed = hash_changed ||
1034 priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
1035 }
1023 1036
1024 if (hfunc != ETH_RSS_HASH_NO_CHANGE) 1037 if (hash_changed)
1025 priv->params.rss_hfunc = hfunc; 1038 mlx5e_modify_tirs_hash(priv, in, inlen);
1026
1027 mlx5e_modify_tirs_hash(priv, in, inlen);
1028 1039
1029 mutex_unlock(&priv->state_lock); 1040 mutex_unlock(&priv->state_lock);
1030 1041
@@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1296 u32 mlx5_wol_mode; 1307 u32 mlx5_wol_mode;
1297 1308
1298 if (!wol_supported) 1309 if (!wol_supported)
1299 return -ENOTSUPP; 1310 return -EOPNOTSUPP;
1300 1311
1301 if (wol->wolopts & ~wol_supported) 1312 if (wol->wolopts & ~wol_supported)
1302 return -EINVAL; 1313 return -EINVAL;
@@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
1426 1437
1427 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && 1438 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
1428 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) 1439 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
1429 return -ENOTSUPP; 1440 return -EOPNOTSUPP;
1430 1441
1431 if (!rx_mode_changed) 1442 if (!rx_mode_changed)
1432 return 0; 1443 return 0;
@@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
1452 bool reset; 1463 bool reset;
1453 1464
1454 if (!MLX5_CAP_GEN(mdev, cqe_compression)) 1465 if (!MLX5_CAP_GEN(mdev, cqe_compression))
1455 return -ENOTSUPP; 1466 return -EOPNOTSUPP;
1456 1467
1457 if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { 1468 if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
1458 netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); 1469 netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1fe80de5d68f..a0e5a69402b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1089 MLX5_FLOW_NAMESPACE_KERNEL); 1089 MLX5_FLOW_NAMESPACE_KERNEL);
1090 1090
1091 if (!priv->fs.ns) 1091 if (!priv->fs.ns)
1092 return -EINVAL; 1092 return -EOPNOTSUPP;
1093 1093
1094 err = mlx5e_arfs_create_tables(priv); 1094 err = mlx5e_arfs_create_tables(priv);
1095 if (err) { 1095 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d088effd7160..f33f72d0237c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
92 ns = mlx5_get_flow_namespace(priv->mdev, 92 ns = mlx5_get_flow_namespace(priv->mdev,
93 MLX5_FLOW_NAMESPACE_ETHTOOL); 93 MLX5_FLOW_NAMESPACE_ETHTOOL);
94 if (!ns) 94 if (!ns)
95 return ERR_PTR(-ENOTSUPP); 95 return ERR_PTR(-EOPNOTSUPP);
96 96
97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, 97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
98 flow_table_properties_nic_receive.log_max_ft_size)), 98 flow_table_properties_nic_receive.log_max_ft_size)),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2b7dd315020c..f14ca3385fdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
2022 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); 2022 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
2023} 2023}
2024 2024
2025void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) 2025void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
2026 enum mlx5e_traffic_types tt)
2026{ 2027{
2028 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2029
2030#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2031 MLX5_HASH_FIELD_SEL_DST_IP)
2032
2033#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2034 MLX5_HASH_FIELD_SEL_DST_IP |\
2035 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2036 MLX5_HASH_FIELD_SEL_L4_DPORT)
2037
2038#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2039 MLX5_HASH_FIELD_SEL_DST_IP |\
2040 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2041
2027 MLX5_SET(tirc, tirc, rx_hash_fn, 2042 MLX5_SET(tirc, tirc, rx_hash_fn,
2028 mlx5e_rx_hash_fn(priv->params.rss_hfunc)); 2043 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
2029 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { 2044 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
2035 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2050 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2036 memcpy(rss_key, priv->params.toeplitz_hash_key, len); 2051 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
2037 } 2052 }
2053
2054 switch (tt) {
2055 case MLX5E_TT_IPV4_TCP:
2056 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2057 MLX5_L3_PROT_TYPE_IPV4);
2058 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2059 MLX5_L4_PROT_TYPE_TCP);
2060 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2061 MLX5_HASH_IP_L4PORTS);
2062 break;
2063
2064 case MLX5E_TT_IPV6_TCP:
2065 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2066 MLX5_L3_PROT_TYPE_IPV6);
2067 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2068 MLX5_L4_PROT_TYPE_TCP);
2069 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2070 MLX5_HASH_IP_L4PORTS);
2071 break;
2072
2073 case MLX5E_TT_IPV4_UDP:
2074 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2075 MLX5_L3_PROT_TYPE_IPV4);
2076 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2077 MLX5_L4_PROT_TYPE_UDP);
2078 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2079 MLX5_HASH_IP_L4PORTS);
2080 break;
2081
2082 case MLX5E_TT_IPV6_UDP:
2083 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2084 MLX5_L3_PROT_TYPE_IPV6);
2085 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2086 MLX5_L4_PROT_TYPE_UDP);
2087 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2088 MLX5_HASH_IP_L4PORTS);
2089 break;
2090
2091 case MLX5E_TT_IPV4_IPSEC_AH:
2092 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2093 MLX5_L3_PROT_TYPE_IPV4);
2094 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2095 MLX5_HASH_IP_IPSEC_SPI);
2096 break;
2097
2098 case MLX5E_TT_IPV6_IPSEC_AH:
2099 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2100 MLX5_L3_PROT_TYPE_IPV6);
2101 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2102 MLX5_HASH_IP_IPSEC_SPI);
2103 break;
2104
2105 case MLX5E_TT_IPV4_IPSEC_ESP:
2106 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2107 MLX5_L3_PROT_TYPE_IPV4);
2108 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2109 MLX5_HASH_IP_IPSEC_SPI);
2110 break;
2111
2112 case MLX5E_TT_IPV6_IPSEC_ESP:
2113 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2114 MLX5_L3_PROT_TYPE_IPV6);
2115 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2116 MLX5_HASH_IP_IPSEC_SPI);
2117 break;
2118
2119 case MLX5E_TT_IPV4:
2120 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2121 MLX5_L3_PROT_TYPE_IPV4);
2122 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2123 MLX5_HASH_IP);
2124 break;
2125
2126 case MLX5E_TT_IPV6:
2127 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2128 MLX5_L3_PROT_TYPE_IPV6);
2129 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2130 MLX5_HASH_IP);
2131 break;
2132 default:
2133 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2134 }
2038} 2135}
2039 2136
2040static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) 2137static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2404static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2501static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2405 enum mlx5e_traffic_types tt) 2502 enum mlx5e_traffic_types tt)
2406{ 2503{
2407 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2408
2409 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); 2504 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2410 2505
2411#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2412 MLX5_HASH_FIELD_SEL_DST_IP)
2413
2414#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2415 MLX5_HASH_FIELD_SEL_DST_IP |\
2416 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2417 MLX5_HASH_FIELD_SEL_L4_DPORT)
2418
2419#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2420 MLX5_HASH_FIELD_SEL_DST_IP |\
2421 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2422
2423 mlx5e_build_tir_ctx_lro(tirc, priv); 2506 mlx5e_build_tir_ctx_lro(tirc, priv);
2424 2507
2425 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); 2508 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2426 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); 2509 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2427 mlx5e_build_tir_ctx_hash(tirc, priv); 2510 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
2428
2429 switch (tt) {
2430 case MLX5E_TT_IPV4_TCP:
2431 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2432 MLX5_L3_PROT_TYPE_IPV4);
2433 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2434 MLX5_L4_PROT_TYPE_TCP);
2435 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2436 MLX5_HASH_IP_L4PORTS);
2437 break;
2438
2439 case MLX5E_TT_IPV6_TCP:
2440 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2441 MLX5_L3_PROT_TYPE_IPV6);
2442 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2443 MLX5_L4_PROT_TYPE_TCP);
2444 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2445 MLX5_HASH_IP_L4PORTS);
2446 break;
2447
2448 case MLX5E_TT_IPV4_UDP:
2449 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2450 MLX5_L3_PROT_TYPE_IPV4);
2451 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2452 MLX5_L4_PROT_TYPE_UDP);
2453 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2454 MLX5_HASH_IP_L4PORTS);
2455 break;
2456
2457 case MLX5E_TT_IPV6_UDP:
2458 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2459 MLX5_L3_PROT_TYPE_IPV6);
2460 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2461 MLX5_L4_PROT_TYPE_UDP);
2462 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2463 MLX5_HASH_IP_L4PORTS);
2464 break;
2465
2466 case MLX5E_TT_IPV4_IPSEC_AH:
2467 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2468 MLX5_L3_PROT_TYPE_IPV4);
2469 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2470 MLX5_HASH_IP_IPSEC_SPI);
2471 break;
2472
2473 case MLX5E_TT_IPV6_IPSEC_AH:
2474 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2475 MLX5_L3_PROT_TYPE_IPV6);
2476 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2477 MLX5_HASH_IP_IPSEC_SPI);
2478 break;
2479
2480 case MLX5E_TT_IPV4_IPSEC_ESP:
2481 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2482 MLX5_L3_PROT_TYPE_IPV4);
2483 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2484 MLX5_HASH_IP_IPSEC_SPI);
2485 break;
2486
2487 case MLX5E_TT_IPV6_IPSEC_ESP:
2488 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2489 MLX5_L3_PROT_TYPE_IPV6);
2490 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2491 MLX5_HASH_IP_IPSEC_SPI);
2492 break;
2493
2494 case MLX5E_TT_IPV4:
2495 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2496 MLX5_L3_PROT_TYPE_IPV4);
2497 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2498 MLX5_HASH_IP);
2499 break;
2500
2501 case MLX5E_TT_IPV6:
2502 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2503 MLX5_L3_PROT_TYPE_IPV6);
2504 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2505 MLX5_HASH_IP);
2506 break;
2507 default:
2508 WARN_ONCE(true,
2509 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
2510 }
2511} 2511}
2512 2512
2513static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2513static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
3331static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 3331static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3332{ 3332{
3333 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 3333 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3334 return -ENOTSUPP; 3334 return -EOPNOTSUPP;
3335 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || 3335 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3336 !MLX5_CAP_GEN(mdev, nic_flow_table) || 3336 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3337 !MLX5_CAP_ETH(mdev, csum_cap) || 3337 !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3343 < 3) { 3343 < 3) {
3344 mlx5_core_warn(mdev, 3344 mlx5_core_warn(mdev,
3345 "Not creating net device, some required device capabilities are missing\n"); 3345 "Not creating net device, some required device capabilities are missing\n");
3346 return -ENOTSUPP; 3346 return -EOPNOTSUPP;
3347 } 3347 }
3348 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) 3348 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3349 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); 3349 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 46bef6a26a8c..c5282b6aba8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
663 __be32 *saddr, 663 __be32 *saddr,
664 int *out_ttl) 664 int *out_ttl)
665{ 665{
666 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
666 struct rtable *rt; 667 struct rtable *rt;
667 struct neighbour *n = NULL; 668 struct neighbour *n = NULL;
668 int ttl; 669 int ttl;
@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
677#else 678#else
678 return -EOPNOTSUPP; 679 return -EOPNOTSUPP;
679#endif 680#endif
680 681 /* if the egress device isn't on the same HW e-switch, we use the uplink */
681 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { 682 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
682 pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__); 683 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
683 ip_rt_put(rt); 684 else
684 return -EOPNOTSUPP; 685 *out_dev = rt->dst.dev;
685 }
686 686
687 ttl = ip4_dst_hoplimit(&rt->dst); 687 ttl = ip4_dst_hoplimit(&rt->dst);
688 n = dst_neigh_lookup(&rt->dst, &fl4->daddr); 688 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
693 *out_n = n; 693 *out_n = n;
694 *saddr = fl4->saddr; 694 *saddr = fl4->saddr;
695 *out_ttl = ttl; 695 *out_ttl = ttl;
696 *out_dev = rt->dst.dev;
697 696
698 return 0; 697 return 0;
699} 698}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f14d9c9ba773..d0c8bf014453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
133 133
134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
136 return -ENOTSUPP; 136 return -EOPNOTSUPP;
137 137
138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", 138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
139 vport, vlan, qos, set_flags); 139 vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
353 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 353 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
354 if (!root_ns) { 354 if (!root_ns) {
355 esw_warn(dev, "Failed to get FDB flow namespace\n"); 355 esw_warn(dev, "Failed to get FDB flow namespace\n");
356 return -ENOMEM; 356 return -EOPNOTSUPP;
357 } 357 }
358 358
359 flow_group_in = mlx5_vzalloc(inlen); 359 flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
962 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); 962 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
963 if (!root_ns) { 963 if (!root_ns) {
964 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); 964 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
965 return -EIO; 965 return -EOPNOTSUPP;
966 } 966 }
967 967
968 flow_group_in = mlx5_vzalloc(inlen); 968 flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1079 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); 1079 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
1080 if (!root_ns) { 1080 if (!root_ns) {
1081 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); 1081 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1082 return -EIO; 1082 return -EOPNOTSUPP;
1083 } 1083 }
1084 1084
1085 flow_group_in = mlx5_vzalloc(inlen); 1085 flow_group_in = mlx5_vzalloc(inlen);
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1630 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || 1630 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1631 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1631 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1632 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); 1632 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1633 return -ENOTSUPP; 1633 return -EOPNOTSUPP;
1634 } 1634 }
1635 1635
1636 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) 1636 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 03293ed1cc22..595f7c7383b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
166 return 0; 166 return 0;
167 167
168out_notsupp: 168out_notsupp:
169 return -ENOTSUPP; 169 return -EOPNOTSUPP;
170} 170}
171 171
172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
425 if (!root_ns) { 425 if (!root_ns) {
426 esw_warn(dev, "Failed to get FDB flow namespace\n"); 426 esw_warn(dev, "Failed to get FDB flow namespace\n");
427 err = -EOPNOTSUPP;
427 goto ns_err; 428 goto ns_err;
428 } 429 }
429 430
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
535 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 536 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
536 if (!ns) { 537 if (!ns) {
537 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 538 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
538 return -ENOMEM; 539 return -EOPNOTSUPP;
539 } 540 }
540 541
541 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); 542 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
655 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); 656 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
656 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 657 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
657 if (err1) 658 if (err1)
658 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); 659 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
659 } 660 }
660 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 661 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
661 if (mlx5_eswitch_inline_mode_get(esw, 662 if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
674 int vport; 675 int vport;
675 int err; 676 int err;
676 677
678 /* disable PF RoCE so missed packets don't go through RoCE steering */
679 mlx5_dev_list_lock();
680 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
681 mlx5_dev_list_unlock();
682
677 err = esw_create_offloads_fdb_table(esw, nvports); 683 err = esw_create_offloads_fdb_table(esw, nvports);
678 if (err) 684 if (err)
679 return err; 685 goto create_fdb_err;
680 686
681 err = esw_create_offloads_table(esw); 687 err = esw_create_offloads_table(esw);
682 if (err) 688 if (err)
@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
696 goto err_reps; 702 goto err_reps;
697 } 703 }
698 704
699 /* disable PF RoCE so missed packets don't go through RoCE steering */
700 mlx5_dev_list_lock();
701 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
702 mlx5_dev_list_unlock();
703
704 return 0; 705 return 0;
705 706
706err_reps: 707err_reps:
@@ -717,6 +718,13 @@ create_fg_err:
717 718
718create_ft_err: 719create_ft_err:
719 esw_destroy_offloads_fdb_table(esw); 720 esw_destroy_offloads_fdb_table(esw);
721
722create_fdb_err:
723 /* enable back PF RoCE */
724 mlx5_dev_list_lock();
725 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
726 mlx5_dev_list_unlock();
727
720 return err; 728 return err;
721} 729}
722 730
@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
724{ 732{
725 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; 733 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
726 734
727 /* enable back PF RoCE */
728 mlx5_dev_list_lock();
729 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
730 mlx5_dev_list_unlock();
731
732 mlx5_eswitch_disable_sriov(esw); 735 mlx5_eswitch_disable_sriov(esw);
733 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 736 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
734 if (err) { 737 if (err) {
@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
738 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); 741 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
739 } 742 }
740 743
744 /* enable back PF RoCE */
745 mlx5_dev_list_lock();
746 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
747 mlx5_dev_list_unlock();
748
741 return err; 749 return err;
742} 750}
743 751
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index c4478ecd8056..b53fc85a2375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
322 flow_table_properties_nic_receive. 322 flow_table_properties_nic_receive.
323 flow_modify_en); 323 flow_modify_en);
324 if (!atomic_mod_cap) 324 if (!atomic_mod_cap)
325 return -ENOTSUPP; 325 return -EOPNOTSUPP;
326 opmod = 1; 326 opmod = 1;
327 327
328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); 328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0ac7a2fc916c..6346a8f5883b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
1822 struct mlx5_flow_table *ft; 1822 struct mlx5_flow_table *ft;
1823 1823
1824 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); 1824 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
1825 if (!ns) 1825 if (WARN_ON(!ns))
1826 return -EINVAL; 1826 return -EINVAL;
1827 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); 1827 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
1828 if (IS_ERR(ft)) { 1828 if (IS_ERR(ft)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d01e9f21d469..3c315eb8d270 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
807 return 0; 807 return 0;
808 } 808 }
809 809
810 return -ENOTSUPP; 810 return -EOPNOTSUPP;
811} 811}
812 812
813 813
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index d2ec9d232a70..fd12e0a377a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
620 u32 out[MLX5_ST_SZ_DW(qtct_reg)]; 620 u32 out[MLX5_ST_SZ_DW(qtct_reg)];
621 621
622 if (!MLX5_CAP_GEN(mdev, ets)) 622 if (!MLX5_CAP_GEN(mdev, ets))
623 return -ENOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), 625 return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
626 MLX5_REG_QETCR, 0, 1); 626 MLX5_REG_QETCR, 0, 1);
@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
632 u32 in[MLX5_ST_SZ_DW(qtct_reg)]; 632 u32 in[MLX5_ST_SZ_DW(qtct_reg)];
633 633
634 if (!MLX5_CAP_GEN(mdev, ets)) 634 if (!MLX5_CAP_GEN(mdev, ets))
635 return -ENOTSUPP; 635 return -EOPNOTSUPP;
636 636
637 memset(in, 0, sizeof(in)); 637 memset(in, 0, sizeof(in));
638 return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, 638 return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 269e4401c342..7129c30a2ab4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
532 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 532 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
533 return -EACCES; 533 return -EACCES;
534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) 534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
535 return -ENOTSUPP; 535 return -EOPNOTSUPP;
536 536
537 in = mlx5_vzalloc(inlen); 537 in = mlx5_vzalloc(inlen);
538 if (!in) 538 if (!in)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index be3c91c7f211..5484fd726d5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
305{ 305{
306 void __iomem *ioaddr = hw->pcsr; 306 void __iomem *ioaddr = hw->pcsr;
307 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 307 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
308 u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
308 int ret = 0; 309 int ret = 0;
309 310
311 /* Discard masked bits */
312 intr_status &= ~intr_mask;
313
310 /* Not used events (e.g. MMC interrupts) are not handled. */ 314 /* Not used events (e.g. MMC interrupts) are not handled. */
311 if ((intr_status & GMAC_INT_STATUS_MMCTIS)) 315 if ((intr_status & GMAC_INT_STATUS_MMCTIS))
312 x->mmc_tx_irq_n++; 316 x->mmc_tx_irq_n++;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5a1cc089acb7..86e5749226ef 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1295,6 +1295,9 @@ void netvsc_channel_cb(void *context)
1295 ndev = hv_get_drvdata(device); 1295 ndev = hv_get_drvdata(device);
1296 buffer = get_per_channel_state(channel); 1296 buffer = get_per_channel_state(channel);
1297 1297
1298 /* commit_rd_index() -> hv_signal_on_read() needs this. */
1299 init_cached_read_index(channel);
1300
1298 do { 1301 do {
1299 desc = get_next_pkt_raw(channel); 1302 desc = get_next_pkt_raw(channel);
1300 if (desc != NULL) { 1303 if (desc != NULL) {
@@ -1347,6 +1350,9 @@ void netvsc_channel_cb(void *context)
1347 1350
1348 bufferlen = bytes_recvd; 1351 bufferlen = bytes_recvd;
1349 } 1352 }
1353
1354 init_cached_read_index(channel);
1355
1350 } while (1); 1356 } while (1);
1351 1357
1352 if (bufferlen > NETVSC_PACKET_SIZE) 1358 if (bufferlen > NETVSC_PACKET_SIZE)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e55809c5beb7..6742070ca676 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1012,7 +1012,7 @@ static struct phy_driver ksphy_driver[] = {
1012 .phy_id = PHY_ID_KSZ8795, 1012 .phy_id = PHY_ID_KSZ8795,
1013 .phy_id_mask = MICREL_PHY_ID_MASK, 1013 .phy_id_mask = MICREL_PHY_ID_MASK,
1014 .name = "Micrel KSZ8795", 1014 .name = "Micrel KSZ8795",
1015 .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), 1015 .features = PHY_BASIC_FEATURES,
1016 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 1016 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
1017 .config_init = kszphy_config_init, 1017 .config_init = kszphy_config_init,
1018 .config_aneg = ksz8873mll_config_aneg, 1018 .config_aneg = ksz8873mll_config_aneg,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index d02ca1491d16..8d3e53fac1da 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -91,7 +91,7 @@
91 91
92#define IWL8000_FW_PRE "iwlwifi-8000C-" 92#define IWL8000_FW_PRE "iwlwifi-8000C-"
93#define IWL8000_MODULE_FIRMWARE(api) \ 93#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 94 IWL8000_FW_PRE __stringify(api) ".ucode"
95 95
96#define IWL8265_FW_PRE "iwlwifi-8265-" 96#define IWL8265_FW_PRE "iwlwifi-8265-"
97#define IWL8265_MODULE_FIRMWARE(api) \ 97#define IWL8265_MODULE_FIRMWARE(api) \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 636c8b03e318..09e9e2e3ed04 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1164 .frame_limit = IWL_FRAME_LIMIT, 1164 .frame_limit = IWL_FRAME_LIMIT,
1165 }; 1165 };
1166 1166
1167 /* Make sure reserved queue is still marked as such (or allocated) */ 1167 /* Make sure reserved queue is still marked as such (if allocated) */
1168 mvm->queue_info[mvm_sta->reserved_queue].status = 1168 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1169 IWL_MVM_QUEUE_RESERVED; 1169 mvm->queue_info[mvm_sta->reserved_queue].status =
1170 IWL_MVM_QUEUE_RESERVED;
1170 1171
1171 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1172 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1172 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; 1173 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 63a051be832e..bec7d9c46087 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
843 return; 843 return;
844 844
845 IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); 845 IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
846 thermal_zone_device_unregister(mvm->tz_device.tzone); 846 if (mvm->tz_device.tzone) {
847 mvm->tz_device.tzone = NULL; 847 thermal_zone_device_unregister(mvm->tz_device.tzone);
848 mvm->tz_device.tzone = NULL;
849 }
848} 850}
849 851
850static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) 852static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
853 return; 855 return;
854 856
855 IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); 857 IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
856 thermal_cooling_device_unregister(mvm->cooling_dev.cdev); 858 if (mvm->cooling_dev.cdev) {
857 mvm->cooling_dev.cdev = NULL; 859 thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
860 mvm->cooling_dev.cdev = NULL;
861 }
858} 862}
859#endif /* CONFIG_THERMAL */ 863#endif /* CONFIG_THERMAL */
860 864
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 17ac1dce3286..3dd8bcbb3011 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
532 link = kzalloc(sizeof(*link), GFP_KERNEL); 532 link = kzalloc(sizeof(*link), GFP_KERNEL);
533 if (!link) 533 if (!link)
534 return NULL; 534 return NULL;
535
535 INIT_LIST_HEAD(&link->sibling); 536 INIT_LIST_HEAD(&link->sibling);
536 INIT_LIST_HEAD(&link->children); 537 INIT_LIST_HEAD(&link->children);
537 INIT_LIST_HEAD(&link->link); 538 INIT_LIST_HEAD(&link->link);
538 link->pdev = pdev; 539 link->pdev = pdev;
539 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) { 540
541 /*
542 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
543 * hierarchies.
544 */
545 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
546 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
547 link->root = link;
548 } else {
540 struct pcie_link_state *parent; 549 struct pcie_link_state *parent;
550
541 parent = pdev->bus->parent->self->link_state; 551 parent = pdev->bus->parent->self->link_state;
542 if (!parent) { 552 if (!parent) {
543 kfree(link); 553 kfree(link);
544 return NULL; 554 return NULL;
545 } 555 }
556
546 link->parent = parent; 557 link->parent = parent;
558 link->root = link->parent->root;
547 list_add(&link->link, &parent->children); 559 list_add(&link->link, &parent->children);
548 } 560 }
549 /* Setup a pointer to the root port link */
550 if (!link->parent)
551 link->root = link;
552 else
553 link->root = link->parent->root;
554 561
555 list_add(&link->sibling, &link_list); 562 list_add(&link->sibling, &link_list);
556 pdev->link_state = link; 563 pdev->link_state = link;
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 09172043d589..c617ec49e9ed 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
217 BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15, 217 BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
218 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */ 218 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
219 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */ 219 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
220 BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */ 220 BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
221 BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18, 221 BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
222 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ 222 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
223 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ 223 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index c123488266ce..d94aef17348b 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
731 int reg) 731 int reg)
732{ 732{
733 struct byt_community *comm = byt_get_community(vg, offset); 733 struct byt_community *comm = byt_get_community(vg, offset);
734 u32 reg_offset = 0; 734 u32 reg_offset;
735 735
736 if (!comm) 736 if (!comm)
737 return NULL; 737 return NULL;
738 738
739 offset -= comm->pin_base; 739 offset -= comm->pin_base;
740 if (reg == BYT_INT_STAT_REG) 740 switch (reg) {
741 case BYT_INT_STAT_REG:
741 reg_offset = (offset / 32) * 4; 742 reg_offset = (offset / 32) * 4;
742 else 743 break;
744 case BYT_DEBOUNCE_REG:
745 reg_offset = 0;
746 break;
747 default:
743 reg_offset = comm->pad_map[offset] * 16; 748 reg_offset = comm->pad_map[offset] * 16;
749 break;
750 }
744 751
745 return comm->reg_base + reg_offset + reg; 752 return comm->reg_base + reg_offset + reg;
746} 753}
@@ -1243,10 +1250,12 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
1243 debounce = readl(db_reg); 1250 debounce = readl(db_reg);
1244 debounce &= ~BYT_DEBOUNCE_PULSE_MASK; 1251 debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
1245 1252
1253 if (arg)
1254 conf |= BYT_DEBOUNCE_EN;
1255 else
1256 conf &= ~BYT_DEBOUNCE_EN;
1257
1246 switch (arg) { 1258 switch (arg) {
1247 case 0:
1248 conf &= BYT_DEBOUNCE_EN;
1249 break;
1250 case 375: 1259 case 375:
1251 debounce |= BYT_DEBOUNCE_PULSE_375US; 1260 debounce |= BYT_DEBOUNCE_PULSE_375US;
1252 break; 1261 break;
@@ -1269,7 +1278,9 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
1269 debounce |= BYT_DEBOUNCE_PULSE_24MS; 1278 debounce |= BYT_DEBOUNCE_PULSE_24MS;
1270 break; 1279 break;
1271 default: 1280 default:
1272 ret = -EINVAL; 1281 if (arg)
1282 ret = -EINVAL;
1283 break;
1273 } 1284 }
1274 1285
1275 if (!ret) 1286 if (!ret)
@@ -1612,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
1612 continue; 1623 continue;
1613 } 1624 }
1614 1625
1626 raw_spin_lock(&vg->lock);
1615 pending = readl(reg); 1627 pending = readl(reg);
1628 raw_spin_unlock(&vg->lock);
1616 for_each_set_bit(pin, &pending, 32) { 1629 for_each_set_bit(pin, &pending, 32) {
1617 virq = irq_find_mapping(vg->chip.irqdomain, base + pin); 1630 virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
1618 generic_handle_irq(virq); 1631 generic_handle_irq(virq);
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index b21896126f76..4d4ef42a39b5 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
794 unsigned int i; 794 unsigned int i;
795 int ret; 795 int ret;
796 796
797 if (!mrfld_buf_available(mp, pin))
798 return -ENOTSUPP;
799
797 for (i = 0; i < nconfigs; i++) { 800 for (i = 0; i < nconfigs; i++) {
798 switch (pinconf_to_config_param(configs[i])) { 801 switch (pinconf_to_config_param(configs[i])) {
799 case PIN_CONFIG_BIAS_DISABLE: 802 case PIN_CONFIG_BIAS_DISABLE:
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0eb51e33cb1b..207a8de4e1ed 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
564 val = arg / 10 - 1; 564 val = arg / 10 - 1;
565 break; 565 break;
566 case PIN_CONFIG_BIAS_DISABLE: 566 case PIN_CONFIG_BIAS_DISABLE:
567 val = 0; 567 continue;
568 break;
569 case PIN_CONFIG_BIAS_PULL_UP: 568 case PIN_CONFIG_BIAS_PULL_UP:
570 if (arg == 0) 569 if (arg == 0)
571 return -EINVAL; 570 return -EINVAL;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index e6a512ebeae2..a3ade9e4ef47 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
272 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1, 272 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
273 BIT(3)), 273 BIT(3)),
274 AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100, 274 AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
275 AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)), 275 AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
276 AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100, 276 AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
277 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)), 277 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
278 AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100, 278 AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index a43b0e8a438d..988a7472c2ab 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -30,9 +30,6 @@
30#include <linux/of_gpio.h> 30#include <linux/of_gpio.h>
31#include <linux/regulator/of_regulator.h> 31#include <linux/regulator/of_regulator.h>
32#include <linux/regulator/machine.h> 32#include <linux/regulator/machine.h>
33#include <linux/acpi.h>
34#include <linux/property.h>
35#include <linux/gpio/consumer.h>
36 33
37struct fixed_voltage_data { 34struct fixed_voltage_data {
38 struct regulator_desc desc; 35 struct regulator_desc desc;
@@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev,
97 return config; 94 return config;
98} 95}
99 96
100/**
101 * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info
102 * @dev: device requesting for fixed_voltage_config
103 * @desc: regulator description
104 *
105 * Populates fixed_voltage_config structure by extracting data through ACPI
106 * interface, returns a pointer to the populated structure of NULL if memory
107 * alloc fails.
108 */
109static struct fixed_voltage_config *
110acpi_get_fixed_voltage_config(struct device *dev,
111 const struct regulator_desc *desc)
112{
113 struct fixed_voltage_config *config;
114 const char *supply_name;
115 struct gpio_desc *gpiod;
116 int ret;
117
118 config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
119 if (!config)
120 return ERR_PTR(-ENOMEM);
121
122 ret = device_property_read_string(dev, "supply-name", &supply_name);
123 if (!ret)
124 config->supply_name = supply_name;
125
126 gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS);
127 if (IS_ERR(gpiod))
128 return ERR_PTR(-ENODEV);
129
130 config->gpio = desc_to_gpio(gpiod);
131 config->enable_high = device_property_read_bool(dev,
132 "enable-active-high");
133 gpiod_put(gpiod);
134
135 return config;
136}
137
138static struct regulator_ops fixed_voltage_ops = { 97static struct regulator_ops fixed_voltage_ops = {
139}; 98};
140 99
@@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
155 &drvdata->desc); 114 &drvdata->desc);
156 if (IS_ERR(config)) 115 if (IS_ERR(config))
157 return PTR_ERR(config); 116 return PTR_ERR(config);
158 } else if (ACPI_HANDLE(&pdev->dev)) {
159 config = acpi_get_fixed_voltage_config(&pdev->dev,
160 &drvdata->desc);
161 if (IS_ERR(config))
162 return PTR_ERR(config);
163 } else { 117 } else {
164 config = dev_get_platdata(&pdev->dev); 118 config = dev_get_platdata(&pdev->dev);
165 } 119 }
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index 4864b9d742c0..716191046a70 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
452 vsel = 62; 452 vsel = 62;
453 else if ((min_uV > 1800000) && (min_uV <= 1900000)) 453 else if ((min_uV > 1800000) && (min_uV <= 1900000))
454 vsel = 61; 454 vsel = 61;
455 else if ((min_uV > 1350000) && (min_uV <= 1800000)) 455 else if ((min_uV > 1500000) && (min_uV <= 1800000))
456 vsel = 60; 456 vsel = 60;
457 else if ((min_uV > 1350000) && (min_uV <= 1500000)) 457 else if ((min_uV > 1350000) && (min_uV <= 1500000))
458 vsel = 59; 458 vsel = 59;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index c93c5a8fba32..5dc673dc9487 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121
1551 will be called rtc-mpc5121. 1551 will be called rtc-mpc5121.
1552 1552
1553config RTC_DRV_JZ4740 1553config RTC_DRV_JZ4740
1554 bool "Ingenic JZ4740 SoC" 1554 tristate "Ingenic JZ4740 SoC"
1555 depends on MACH_INGENIC || COMPILE_TEST 1555 depends on MACH_INGENIC || COMPILE_TEST
1556 help 1556 help
1557 If you say yes here you get support for the Ingenic JZ47xx SoCs RTC 1557 If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
1558 controllers. 1558 controllers.
1559 1559
1560 This driver can also be buillt as a module. If so, the module
1561 will be called rtc-jz4740.
1562
1560config RTC_DRV_LPC24XX 1563config RTC_DRV_LPC24XX
1561 tristate "NXP RTC for LPC178x/18xx/408x/43xx" 1564 tristate "NXP RTC for LPC178x/18xx/408x/43xx"
1562 depends on ARCH_LPC18XX || COMPILE_TEST 1565 depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 72918c1ba092..64989afffa3d 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -17,6 +17,7 @@
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h>
20#include <linux/of_device.h> 21#include <linux/of_device.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/reboot.h> 23#include <linux/reboot.h>
@@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void)
294 JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks); 295 JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
295 296
296 jz4740_rtc_poweroff(dev_for_power_off); 297 jz4740_rtc_poweroff(dev_for_power_off);
297 machine_halt(); 298 kernel_halt();
298} 299}
299 300
300static const struct of_device_id jz4740_rtc_of_match[] = { 301static const struct of_device_id jz4740_rtc_of_match[] = {
@@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = {
302 { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 }, 303 { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
303 {}, 304 {},
304}; 305};
306MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
305 307
306static int jz4740_rtc_probe(struct platform_device *pdev) 308static int jz4740_rtc_probe(struct platform_device *pdev)
307{ 309{
@@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = {
429 { "jz4780-rtc", ID_JZ4780 }, 431 { "jz4780-rtc", ID_JZ4780 },
430 {} 432 {}
431}; 433};
434MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
432 435
433static struct platform_driver jz4740_rtc_driver = { 436static struct platform_driver jz4740_rtc_driver = {
434 .probe = jz4740_rtc_probe, 437 .probe = jz4740_rtc_probe,
@@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = {
440 .id_table = jz4740_rtc_ids, 443 .id_table = jz4740_rtc_ids,
441}; 444};
442 445
443builtin_platform_driver(jz4740_rtc_driver); 446module_platform_driver(jz4740_rtc_driver);
447
448MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
449MODULE_LICENSE("GPL");
450MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
451MODULE_ALIAS("platform:jz4740-rtc");
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index ec91bd07f00a..c680d7641311 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
534{ 534{
535 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 535 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
536 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); 536 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
537 unsigned long flags;
537 int req_size; 538 int req_size;
539 int ret;
538 540
539 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); 541 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
540 542
@@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
562 req_size = sizeof(cmd->req.cmd); 564 req_size = sizeof(cmd->req.cmd);
563 } 565 }
564 566
565 if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) 567 ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
568 if (ret == -EIO) {
569 cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
570 spin_lock_irqsave(&req_vq->vq_lock, flags);
571 virtscsi_complete_cmd(vscsi, cmd);
572 spin_unlock_irqrestore(&req_vq->vq_lock, flags);
573 } else if (ret != 0) {
566 return SCSI_MLQUEUE_HOST_BUSY; 574 return SCSI_MLQUEUE_HOST_BUSY;
575 }
567 return 0; 576 return 0;
568} 577}
569 578
diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
index 113f3d6c4b3a..27f75b17679b 100644
--- a/drivers/staging/greybus/timesync_platform.c
+++ b/drivers/staging/greybus/timesync_platform.c
@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
45 45
46int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata) 46int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
47{ 47{
48 if (!arche_platform_change_state_cb)
49 return 0;
50
48 return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC, 51 return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
49 pdata); 52 pdata);
50} 53}
51 54
52void gb_timesync_platform_unlock_bus(void) 55void gb_timesync_platform_unlock_bus(void)
53{ 56{
57 if (!arche_platform_change_state_cb)
58 return;
59
54 arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL); 60 arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
55} 61}
56 62
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d2e50a27140c..24f9f98968a5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
37 /* CBM - Flash disk */ 37 /* CBM - Flash disk */
38 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, 38 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
39 39
40 /* WORLDE easy key (easykey.25) MIDI controller */
41 { USB_DEVICE(0x0218, 0x0401), .driver_info =
42 USB_QUIRK_CONFIG_INTF_STRINGS },
43
40 /* HP 5300/5370C scanner */ 44 /* HP 5300/5370C scanner */
41 { USB_DEVICE(0x03f0, 0x0701), .driver_info = 45 { USB_DEVICE(0x03f0, 0x0701), .driver_info =
42 USB_QUIRK_STRING_FETCH_255 }, 46 USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 5490fc51638e..fd80c1b9c823 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2269 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count) 2269 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2270 return -EINVAL; 2270 return -EINVAL;
2271 length = le32_to_cpu(d->dwSize); 2271 length = le32_to_cpu(d->dwSize);
2272 if (len < length)
2273 return -EINVAL;
2272 type = le32_to_cpu(d->dwPropertyDataType); 2274 type = le32_to_cpu(d->dwPropertyDataType);
2273 if (type < USB_EXT_PROP_UNICODE || 2275 if (type < USB_EXT_PROP_UNICODE ||
2274 type > USB_EXT_PROP_UNICODE_MULTI) { 2276 type > USB_EXT_PROP_UNICODE_MULTI) {
@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2277 return -EINVAL; 2279 return -EINVAL;
2278 } 2280 }
2279 pnl = le16_to_cpu(d->wPropertyNameLength); 2281 pnl = le16_to_cpu(d->wPropertyNameLength);
2282 if (length < 14 + pnl) {
2283 pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
2284 length, pnl, type);
2285 return -EINVAL;
2286 }
2280 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl)); 2287 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
2281 if (length != 14 + pnl + pdl) { 2288 if (length != 14 + pnl + pdl) {
2282 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n", 2289 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
2363 } 2370 }
2364 } 2371 }
2365 if (flags & (1 << i)) { 2372 if (flags & (1 << i)) {
2373 if (len < 4) {
2374 goto error;
2375 }
2366 os_descs_count = get_unaligned_le32(data); 2376 os_descs_count = get_unaligned_le32(data);
2367 data += 4; 2377 data += 4;
2368 len -= 4; 2378 len -= 4;
@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
2435 2445
2436 ENTER(); 2446 ENTER();
2437 2447
2438 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || 2448 if (unlikely(len < 16 ||
2449 get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2439 get_unaligned_le32(data + 4) != len)) 2450 get_unaligned_le32(data + 4) != len))
2440 goto error; 2451 goto error;
2441 str_count = get_unaligned_le32(data + 8); 2452 str_count = get_unaligned_le32(data + 8);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index fca288bbc800..772f15821242 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
594 | MUSB_PORT_STAT_RESUME; 594 | MUSB_PORT_STAT_RESUME;
595 musb->rh_timer = jiffies 595 musb->rh_timer = jiffies
596 + msecs_to_jiffies(USB_RESUME_TIMEOUT); 596 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
597 musb->need_finish_resume = 1;
598
599 musb->xceiv->otg->state = OTG_STATE_A_HOST; 597 musb->xceiv->otg->state = OTG_STATE_A_HOST;
600 musb->is_active = 1; 598 musb->is_active = 1;
601 musb_host_resume_root_hub(musb); 599 musb_host_resume_root_hub(musb);
600 schedule_delayed_work(&musb->finish_resume_work,
601 msecs_to_jiffies(USB_RESUME_TIMEOUT));
602 break; 602 break;
603 case OTG_STATE_B_WAIT_ACON: 603 case OTG_STATE_B_WAIT_ACON:
604 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; 604 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
1925static void musb_irq_work(struct work_struct *data) 1925static void musb_irq_work(struct work_struct *data)
1926{ 1926{
1927 struct musb *musb = container_of(data, struct musb, irq_work.work); 1927 struct musb *musb = container_of(data, struct musb, irq_work.work);
1928 int error;
1929
1930 error = pm_runtime_get_sync(musb->controller);
1931 if (error < 0) {
1932 dev_err(musb->controller, "Could not enable: %i\n", error);
1933
1934 return;
1935 }
1928 1936
1929 musb_pm_runtime_check_session(musb); 1937 musb_pm_runtime_check_session(musb);
1930 1938
@@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data)
1932 musb->xceiv_old_state = musb->xceiv->otg->state; 1940 musb->xceiv_old_state = musb->xceiv->otg->state;
1933 sysfs_notify(&musb->controller->kobj, NULL, "mode"); 1941 sysfs_notify(&musb->controller->kobj, NULL, "mode");
1934 } 1942 }
1943
1944 pm_runtime_mark_last_busy(musb->controller);
1945 pm_runtime_put_autosuspend(musb->controller);
1935} 1946}
1936 1947
1937static void musb_recover_from_babble(struct musb *musb) 1948static void musb_recover_from_babble(struct musb *musb)
@@ -2710,11 +2721,6 @@ static int musb_resume(struct device *dev)
2710 mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV; 2721 mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
2711 if ((devctl & mask) != (musb->context.devctl & mask)) 2722 if ((devctl & mask) != (musb->context.devctl & mask))
2712 musb->port1_status = 0; 2723 musb->port1_status = 0;
2713 if (musb->need_finish_resume) {
2714 musb->need_finish_resume = 0;
2715 schedule_delayed_work(&musb->finish_resume_work,
2716 msecs_to_jiffies(USB_RESUME_TIMEOUT));
2717 }
2718 2724
2719 /* 2725 /*
2720 * The USB HUB code expects the device to be in RPM_ACTIVE once it came 2726 * The USB HUB code expects the device to be in RPM_ACTIVE once it came
@@ -2766,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev)
2766 2772
2767 musb_restore_context(musb); 2773 musb_restore_context(musb);
2768 2774
2769 if (musb->need_finish_resume) {
2770 musb->need_finish_resume = 0;
2771 schedule_delayed_work(&musb->finish_resume_work,
2772 msecs_to_jiffies(USB_RESUME_TIMEOUT));
2773 }
2774
2775 spin_lock_irqsave(&musb->lock, flags); 2775 spin_lock_irqsave(&musb->lock, flags);
2776 error = musb_run_resume_work(musb); 2776 error = musb_run_resume_work(musb);
2777 if (error) 2777 if (error)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index ade902ea1221..ce5a18c98c6d 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -410,7 +410,6 @@ struct musb {
410 410
411 /* is_suspended means USB B_PERIPHERAL suspend */ 411 /* is_suspended means USB B_PERIPHERAL suspend */
412 unsigned is_suspended:1; 412 unsigned is_suspended:1;
413 unsigned need_finish_resume :1;
414 413
415 /* may_wakeup means remote wakeup is enabled */ 414 /* may_wakeup means remote wakeup is enabled */
416 unsigned may_wakeup:1; 415 unsigned may_wakeup:1;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7ce31a4c7e7f..42cc72e54c05 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
2007 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, 2007 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
2008 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, 2008 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
2009 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, 2009 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
2010 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
2010 { } /* Terminating entry */ 2011 { } /* Terminating entry */
2011}; 2012};
2012MODULE_DEVICE_TABLE(usb, option_ids); 2013MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 46fca6b75846..1db4b61bdf7b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
49 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, 49 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
51 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, 51 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
52 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
52 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, 53 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
53 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, 54 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
54 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) }, 55 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index e3b7af8adfb7..09d9be88209e 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
27#define ATEN_VENDOR_ID 0x0557 27#define ATEN_VENDOR_ID 0x0557
28#define ATEN_VENDOR_ID2 0x0547 28#define ATEN_VENDOR_ID2 0x0547
29#define ATEN_PRODUCT_ID 0x2008 29#define ATEN_PRODUCT_ID 0x2008
30#define ATEN_PRODUCT_ID2 0x2118
30 31
31#define IODATA_VENDOR_ID 0x04bb 32#define IODATA_VENDOR_ID 0x04bb
32#define IODATA_PRODUCT_ID 0x0a03 33#define IODATA_PRODUCT_ID 0x0a03
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 1bc6089b9008..696458db7e3c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
124 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ 124 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
125 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ 125 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
126 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ 126 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
127 {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
127 {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */ 128 {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
128 {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ 129 {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
129 {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */ 130 {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 128d10282d16..7690e5bf3cf1 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
1123 mutex_lock(&container->lock); 1123 mutex_lock(&container->lock);
1124 1124
1125 ret = tce_iommu_create_default_window(container); 1125 ret = tce_iommu_create_default_window(container);
1126 if (ret) 1126 if (!ret)
1127 return ret; 1127 ret = tce_iommu_create_window(container,
1128 1128 create.page_shift,
1129 ret = tce_iommu_create_window(container, create.page_shift, 1129 create.window_size, create.levels,
1130 create.window_size, create.levels, 1130 &create.start_addr);
1131 &create.start_addr);
1132 1131
1133 mutex_unlock(&container->lock); 1132 mutex_unlock(&container->lock);
1134 1133
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b3cc33fa6d26..bd6f293c4ebd 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -38,6 +38,8 @@
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/mdev.h> 39#include <linux/mdev.h>
40#include <linux/notifier.h> 40#include <linux/notifier.h>
41#include <linux/dma-iommu.h>
42#include <linux/irqdomain.h>
41 43
42#define DRIVER_VERSION "0.2" 44#define DRIVER_VERSION "0.2"
43#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" 45#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
@@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
1179 return NULL; 1181 return NULL;
1180} 1182}
1181 1183
1184static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
1185 phys_addr_t *base)
1186{
1187 struct list_head group_resv_regions;
1188 struct iommu_resv_region *region, *next;
1189 bool ret = false;
1190
1191 INIT_LIST_HEAD(&group_resv_regions);
1192 iommu_get_group_resv_regions(group, &group_resv_regions);
1193 list_for_each_entry(region, &group_resv_regions, list) {
1194 if (region->type & IOMMU_RESV_MSI) {
1195 *base = region->start;
1196 ret = true;
1197 goto out;
1198 }
1199 }
1200out:
1201 list_for_each_entry_safe(region, next, &group_resv_regions, list)
1202 kfree(region);
1203 return ret;
1204}
1205
1182static int vfio_iommu_type1_attach_group(void *iommu_data, 1206static int vfio_iommu_type1_attach_group(void *iommu_data,
1183 struct iommu_group *iommu_group) 1207 struct iommu_group *iommu_group)
1184{ 1208{
@@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1187 struct vfio_domain *domain, *d; 1211 struct vfio_domain *domain, *d;
1188 struct bus_type *bus = NULL, *mdev_bus; 1212 struct bus_type *bus = NULL, *mdev_bus;
1189 int ret; 1213 int ret;
1214 bool resv_msi, msi_remap;
1215 phys_addr_t resv_msi_base;
1190 1216
1191 mutex_lock(&iommu->lock); 1217 mutex_lock(&iommu->lock);
1192 1218
@@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1256 if (ret) 1282 if (ret)
1257 goto out_domain; 1283 goto out_domain;
1258 1284
1285 resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
1286
1259 INIT_LIST_HEAD(&domain->group_list); 1287 INIT_LIST_HEAD(&domain->group_list);
1260 list_add(&group->next, &domain->group_list); 1288 list_add(&group->next, &domain->group_list);
1261 1289
1262 if (!allow_unsafe_interrupts && 1290 msi_remap = resv_msi ? irq_domain_check_msi_remap() :
1263 !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) { 1291 iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
1292
1293 if (!allow_unsafe_interrupts && !msi_remap) {
1264 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", 1294 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
1265 __func__); 1295 __func__);
1266 ret = -EPERM; 1296 ret = -EPERM;
@@ -1302,6 +1332,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1302 if (ret) 1332 if (ret)
1303 goto out_detach; 1333 goto out_detach;
1304 1334
1335 if (resv_msi) {
1336 ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
1337 if (ret)
1338 goto out_detach;
1339 }
1340
1305 list_add(&domain->next, &iommu->domain_list); 1341 list_add(&domain->next, &iommu->domain_list);
1306 1342
1307 mutex_unlock(&iommu->lock); 1343 mutex_unlock(&iommu->lock);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d6432603880c..8f99fe08de02 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
130 130
131static void vhost_init_is_le(struct vhost_virtqueue *vq) 131static void vhost_init_is_le(struct vhost_virtqueue *vq)
132{ 132{
133 if (vhost_has_feature(vq, VIRTIO_F_VERSION_1)) 133 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
134 vq->is_le = true; 134 || virtio_legacy_is_little_endian();
135} 135}
136#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ 136#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
137 137
138static void vhost_reset_is_le(struct vhost_virtqueue *vq) 138static void vhost_reset_is_le(struct vhost_virtqueue *vq)
139{ 139{
140 vq->is_le = virtio_legacy_is_little_endian(); 140 vhost_init_is_le(vq);
141} 141}
142 142
143struct vhost_flush_struct { 143struct vhost_flush_struct {
@@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
1714 int r; 1714 int r;
1715 bool is_le = vq->is_le; 1715 bool is_le = vq->is_le;
1716 1716
1717 if (!vq->private_data) { 1717 if (!vq->private_data)
1718 vhost_reset_is_le(vq);
1719 return 0; 1718 return 0;
1720 }
1721 1719
1722 vhost_init_is_le(vq); 1720 vhost_init_is_le(vq);
1723 1721
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 7e38ed79c3fc..409aeaa49246 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
159 if (xen_domain()) 159 if (xen_domain())
160 return true; 160 return true;
161 161
162 /*
163 * On ARM-based machines, the DMA ops will do the right thing,
164 * so always use them with legacy devices.
165 */
166 if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
167 return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
168
169 return false; 162 return false;
170} 163}
171 164
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 8f6a2a5863b9..a27fc8791551 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
285 rc = -ENOMEM; 285 rc = -ENOMEM;
286 goto error_exit; 286 goto error_exit;
287 } 287 }
288 spin_lock_init(&cifsFile->file_info_lock);
288 file->private_data = cifsFile; 289 file->private_data = cifsFile;
289 cifsFile->tlink = cifs_get_tlink(tlink); 290 cifsFile->tlink = cifs_get_tlink(tlink);
290 tcon = tlink_tcon(tlink); 291 tcon = tlink_tcon(tlink);
diff --git a/fs/dax.c b/fs/dax.c
index 3af2da5e64ce..c45598b912e1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1031,6 +1031,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1031 struct blk_dax_ctl dax = { 0 }; 1031 struct blk_dax_ctl dax = { 0 };
1032 ssize_t map_len; 1032 ssize_t map_len;
1033 1033
1034 if (fatal_signal_pending(current)) {
1035 ret = -EINTR;
1036 break;
1037 }
1038
1034 dax.sector = dax_iomap_sector(iomap, pos); 1039 dax.sector = dax_iomap_sector(iomap, pos);
1035 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; 1040 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1036 map_len = dax_map_atomic(iomap->bdev, &dax); 1041 map_len = dax_map_atomic(iomap->bdev, &dax);
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 4304072161aa..40d61077bead 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
542 hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { 542 hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
543 if (invalidate) 543 if (invalidate)
544 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); 544 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
545 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
545 fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); 546 fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
546 } 547 }
547 } else { 548 } else {
@@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
560 wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, 561 wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
561 TASK_UNINTERRUPTIBLE); 562 TASK_UNINTERRUPTIBLE);
562 563
564 /* Make sure any pending writes are cancelled. */
565 if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
566 fscache_invalidate_writes(cookie);
567
563 /* Reset the cookie state if it wasn't relinquished */ 568 /* Reset the cookie state if it wasn't relinquished */
564 if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { 569 if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
565 atomic_inc(&cookie->n_active); 570 atomic_inc(&cookie->n_active);
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
index 9b28649df3a1..a8aa00be4444 100644
--- a/fs/fscache/netfs.c
+++ b/fs/fscache/netfs.c
@@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
48 cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; 48 cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
49 49
50 spin_lock_init(&cookie->lock); 50 spin_lock_init(&cookie->lock);
51 spin_lock_init(&cookie->stores_lock);
51 INIT_HLIST_HEAD(&cookie->backing_objects); 52 INIT_HLIST_HEAD(&cookie->backing_objects);
52 53
53 /* check the netfs type is not already present */ 54 /* check the netfs type is not already present */
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9e792e30f4db..7a182c87f378 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
30static const struct fscache_state *fscache_object_available(struct fscache_object *, int); 30static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
31static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); 31static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
32static const struct fscache_state *fscache_update_object(struct fscache_object *, int); 32static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
33static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
33 34
34#define __STATE_NAME(n) fscache_osm_##n 35#define __STATE_NAME(n) fscache_osm_##n
35#define STATE(n) (&__STATE_NAME(n)) 36#define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
91static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); 92static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
92static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); 93static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
93static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); 94static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
94static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); 95static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
95 96
96static WAIT_STATE(WAIT_FOR_INIT, "?INI", 97static WAIT_STATE(WAIT_FOR_INIT, "?INI",
97 TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); 98 TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@ execute_work_state:
229 event = -1; 230 event = -1;
230 if (new_state == NO_TRANSIT) { 231 if (new_state == NO_TRANSIT) {
231 _debug("{OBJ%x} %s notrans", object->debug_id, state->name); 232 _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
233 if (unlikely(state == STATE(OBJECT_DEAD))) {
234 _leave(" [dead]");
235 return;
236 }
232 fscache_enqueue_object(object); 237 fscache_enqueue_object(object);
233 event_mask = object->oob_event_mask; 238 event_mask = object->oob_event_mask;
234 goto unmask_events; 239 goto unmask_events;
@@ -239,7 +244,7 @@ execute_work_state:
239 object->state = state = new_state; 244 object->state = state = new_state;
240 245
241 if (state->work) { 246 if (state->work) {
242 if (unlikely(state->work == ((void *)2UL))) { 247 if (unlikely(state == STATE(OBJECT_DEAD))) {
243 _leave(" [dead]"); 248 _leave(" [dead]");
244 return; 249 return;
245 } 250 }
@@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
645 fscache_mark_object_dead(object); 650 fscache_mark_object_dead(object);
646 object->oob_event_mask = 0; 651 object->oob_event_mask = 0;
647 652
653 if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
654 /* Reject any new read/write ops and abort any that are pending. */
655 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
656 fscache_cancel_all_ops(object);
657 }
658
648 if (list_empty(&object->dependents) && 659 if (list_empty(&object->dependents) &&
649 object->n_ops == 0 && 660 object->n_ops == 0 &&
650 object->n_children == 0) 661 object->n_children == 0)
@@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
1077 } 1088 }
1078} 1089}
1079EXPORT_SYMBOL(fscache_object_mark_killed); 1090EXPORT_SYMBOL(fscache_object_mark_killed);
1091
1092/*
1093 * The object is dead. We can get here if an object gets queued by an event
1094 * that would lead to its death (such as EV_KILL) when the dispatcher is
1095 * already running (and so can be requeued) but hasn't yet cleared the event
1096 * mask.
1097 */
1098static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
1099 int event)
1100{
1101 if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
1102 &object->flags))
1103 return NO_TRANSIT;
1104
1105 WARN(true, "FS-Cache object redispatched after death");
1106 return NO_TRANSIT;
1107}
diff --git a/fs/iomap.c b/fs/iomap.c
index 354a123f170e..a51cb4c07d4d 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
114 114
115 BUG_ON(pos + len > iomap->offset + iomap->length); 115 BUG_ON(pos + len > iomap->offset + iomap->length);
116 116
117 if (fatal_signal_pending(current))
118 return -EINTR;
119
117 page = grab_cache_page_write_begin(inode->i_mapping, index, flags); 120 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
118 if (!page) 121 if (!page)
119 return -ENOMEM; 122 return -ENOMEM;
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 596205d939a1..1fc07a9c70e9 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
223 struct nfs4_layout_stateid *ls; 223 struct nfs4_layout_stateid *ls;
224 struct nfs4_stid *stp; 224 struct nfs4_stid *stp;
225 225
226 stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache); 226 stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
227 nfsd4_free_layout_stateid);
227 if (!stp) 228 if (!stp)
228 return NULL; 229 return NULL;
229 stp->sc_free = nfsd4_free_layout_stateid; 230
230 get_nfs4_file(fp); 231 get_nfs4_file(fp);
231 stp->sc_file = fp; 232 stp->sc_file = fp;
232 233
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 4b4beaaa4eaa..a0dee8ae9f97 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -633,8 +633,8 @@ out:
633 return co; 633 return co;
634} 634}
635 635
636struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, 636struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
637 struct kmem_cache *slab) 637 void (*sc_free)(struct nfs4_stid *))
638{ 638{
639 struct nfs4_stid *stid; 639 struct nfs4_stid *stid;
640 int new_id; 640 int new_id;
@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
650 idr_preload_end(); 650 idr_preload_end();
651 if (new_id < 0) 651 if (new_id < 0)
652 goto out_free; 652 goto out_free;
653
654 stid->sc_free = sc_free;
653 stid->sc_client = cl; 655 stid->sc_client = cl;
654 stid->sc_stateid.si_opaque.so_id = new_id; 656 stid->sc_stateid.si_opaque.so_id = new_id;
655 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 657 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
@@ -675,15 +677,12 @@ out_free:
675static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) 677static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
676{ 678{
677 struct nfs4_stid *stid; 679 struct nfs4_stid *stid;
678 struct nfs4_ol_stateid *stp;
679 680
680 stid = nfs4_alloc_stid(clp, stateid_slab); 681 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
681 if (!stid) 682 if (!stid)
682 return NULL; 683 return NULL;
683 684
684 stp = openlockstateid(stid); 685 return openlockstateid(stid);
685 stp->st_stid.sc_free = nfs4_free_ol_stateid;
686 return stp;
687} 686}
688 687
689static void nfs4_free_deleg(struct nfs4_stid *stid) 688static void nfs4_free_deleg(struct nfs4_stid *stid)
@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
781 goto out_dec; 780 goto out_dec;
782 if (delegation_blocked(&current_fh->fh_handle)) 781 if (delegation_blocked(&current_fh->fh_handle))
783 goto out_dec; 782 goto out_dec;
784 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); 783 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
785 if (dp == NULL) 784 if (dp == NULL)
786 goto out_dec; 785 goto out_dec;
787 786
788 dp->dl_stid.sc_free = nfs4_free_deleg;
789 /* 787 /*
790 * delegation seqid's are never incremented. The 4.1 special 788 * delegation seqid's are never incremented. The 4.1 special
791 * meaning of seqid 0 isn't meaningful, really, but let's avoid 789 * meaning of seqid 0 isn't meaningful, really, but let's avoid
@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5580 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); 5578 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5581 get_nfs4_file(fp); 5579 get_nfs4_file(fp);
5582 stp->st_stid.sc_file = fp; 5580 stp->st_stid.sc_file = fp;
5583 stp->st_stid.sc_free = nfs4_free_lock_stateid;
5584 stp->st_access_bmap = 0; 5581 stp->st_access_bmap = 0;
5585 stp->st_deny_bmap = open_stp->st_deny_bmap; 5582 stp->st_deny_bmap = open_stp->st_deny_bmap;
5586 stp->st_openstp = open_stp; 5583 stp->st_openstp = open_stp;
@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5623 lst = find_lock_stateid(lo, fi); 5620 lst = find_lock_stateid(lo, fi);
5624 if (lst == NULL) { 5621 if (lst == NULL) {
5625 spin_unlock(&clp->cl_lock); 5622 spin_unlock(&clp->cl_lock);
5626 ns = nfs4_alloc_stid(clp, stateid_slab); 5623 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5627 if (ns == NULL) 5624 if (ns == NULL)
5628 return NULL; 5625 return NULL;
5629 5626
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index c9399366f9df..4516e8b7d776 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
603__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 603__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
604 stateid_t *stateid, unsigned char typemask, 604 stateid_t *stateid, unsigned char typemask,
605 struct nfs4_stid **s, struct nfsd_net *nn); 605 struct nfs4_stid **s, struct nfsd_net *nn);
606struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, 606struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
607 struct kmem_cache *slab); 607 void (*sc_free)(struct nfs4_stid *));
608void nfs4_unhash_stid(struct nfs4_stid *s); 608void nfs4_unhash_stid(struct nfs4_stid *s);
609void nfs4_put_stid(struct nfs4_stid *s); 609void nfs4_put_stid(struct nfs4_stid *s);
610void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid); 610void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 26c6fdb4bf67..ca13236dbb1f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -332,37 +332,6 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
332 } 332 }
333} 333}
334 334
335static __be32
336nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
337 struct iattr *iap)
338{
339 struct inode *inode = d_inode(fhp->fh_dentry);
340 int host_err;
341
342 if (iap->ia_size < inode->i_size) {
343 __be32 err;
344
345 err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
346 NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
347 if (err)
348 return err;
349 }
350
351 host_err = get_write_access(inode);
352 if (host_err)
353 goto out_nfserrno;
354
355 host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
356 if (host_err)
357 goto out_put_write_access;
358 return 0;
359
360out_put_write_access:
361 put_write_access(inode);
362out_nfserrno:
363 return nfserrno(host_err);
364}
365
366/* 335/*
367 * Set various file attributes. After this call fhp needs an fh_put. 336 * Set various file attributes. After this call fhp needs an fh_put.
368 */ 337 */
@@ -377,7 +346,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
377 __be32 err; 346 __be32 err;
378 int host_err; 347 int host_err;
379 bool get_write_count; 348 bool get_write_count;
380 int size_change = 0;
381 349
382 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE)) 350 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
383 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; 351 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
@@ -390,11 +358,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
390 /* Get inode */ 358 /* Get inode */
391 err = fh_verify(rqstp, fhp, ftype, accmode); 359 err = fh_verify(rqstp, fhp, ftype, accmode);
392 if (err) 360 if (err)
393 goto out; 361 return err;
394 if (get_write_count) { 362 if (get_write_count) {
395 host_err = fh_want_write(fhp); 363 host_err = fh_want_write(fhp);
396 if (host_err) 364 if (host_err)
397 return nfserrno(host_err); 365 goto out_host_err;
398 } 366 }
399 367
400 dentry = fhp->fh_dentry; 368 dentry = fhp->fh_dentry;
@@ -405,50 +373,59 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
405 iap->ia_valid &= ~ATTR_MODE; 373 iap->ia_valid &= ~ATTR_MODE;
406 374
407 if (!iap->ia_valid) 375 if (!iap->ia_valid)
408 goto out; 376 return 0;
409 377
410 nfsd_sanitize_attrs(inode, iap); 378 nfsd_sanitize_attrs(inode, iap);
411 379
380 if (check_guard && guardtime != inode->i_ctime.tv_sec)
381 return nfserr_notsync;
382
412 /* 383 /*
413 * The size case is special, it changes the file in addition to the 384 * The size case is special, it changes the file in addition to the
414 * attributes. 385 * attributes, and file systems don't expect it to be mixed with
386 * "random" attribute changes. We thus split out the size change
387 * into a separate call for vfs_truncate, and do the rest as a
388 * a separate setattr call.
415 */ 389 */
416 if (iap->ia_valid & ATTR_SIZE) { 390 if (iap->ia_valid & ATTR_SIZE) {
417 err = nfsd_get_write_access(rqstp, fhp, iap); 391 struct path path = {
418 if (err) 392 .mnt = fhp->fh_export->ex_path.mnt,
419 goto out; 393 .dentry = dentry,
420 size_change = 1; 394 };
395 bool implicit_mtime = false;
421 396
422 /* 397 /*
423 * RFC5661, Section 18.30.4: 398 * vfs_truncate implicity updates the mtime IFF the file size
424 * Changing the size of a file with SETATTR indirectly 399 * actually changes. Avoid the additional seattr call below if
425 * changes the time_modify and change attributes. 400 * the only other attribute that the client sends is the mtime.
426 *
427 * (and similar for the older RFCs)
428 */ 401 */
429 if (iap->ia_size != i_size_read(inode)) 402 if (iap->ia_size != i_size_read(inode) &&
430 iap->ia_valid |= ATTR_MTIME; 403 ((iap->ia_valid & ~(ATTR_SIZE | ATTR_MTIME)) == 0))
431 } 404 implicit_mtime = true;
432 405
433 iap->ia_valid |= ATTR_CTIME; 406 host_err = vfs_truncate(&path, iap->ia_size);
407 if (host_err)
408 goto out_host_err;
434 409
435 if (check_guard && guardtime != inode->i_ctime.tv_sec) { 410 iap->ia_valid &= ~ATTR_SIZE;
436 err = nfserr_notsync; 411 if (implicit_mtime)
437 goto out_put_write_access; 412 iap->ia_valid &= ~ATTR_MTIME;
413 if (!iap->ia_valid)
414 goto done;
438 } 415 }
439 416
417 iap->ia_valid |= ATTR_CTIME;
418
440 fh_lock(fhp); 419 fh_lock(fhp);
441 host_err = notify_change(dentry, iap, NULL); 420 host_err = notify_change(dentry, iap, NULL);
442 fh_unlock(fhp); 421 fh_unlock(fhp);
443 err = nfserrno(host_err); 422 if (host_err)
423 goto out_host_err;
444 424
445out_put_write_access: 425done:
446 if (size_change) 426 host_err = commit_metadata(fhp);
447 put_write_access(inode); 427out_host_err:
448 if (!err) 428 return nfserrno(host_err);
449 err = nfserrno(commit_metadata(fhp));
450out:
451 return err;
452} 429}
453 430
454#if defined(CONFIG_NFSD_V4) 431#if defined(CONFIG_NFSD_V4)
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 63554e9f6e0c..719db1968d81 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -9,18 +9,15 @@
9#ifndef KSYM_ALIGN 9#ifndef KSYM_ALIGN
10#define KSYM_ALIGN 8 10#define KSYM_ALIGN 8
11#endif 11#endif
12#ifndef KCRC_ALIGN
13#define KCRC_ALIGN 8
14#endif
15#else 12#else
16#define __put .long 13#define __put .long
17#ifndef KSYM_ALIGN 14#ifndef KSYM_ALIGN
18#define KSYM_ALIGN 4 15#define KSYM_ALIGN 4
19#endif 16#endif
17#endif
20#ifndef KCRC_ALIGN 18#ifndef KCRC_ALIGN
21#define KCRC_ALIGN 4 19#define KCRC_ALIGN 4
22#endif 20#endif
23#endif
24 21
25#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 22#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
26#define KSYM(name) _##name 23#define KSYM(name) _##name
@@ -52,7 +49,11 @@ KSYM(__kstrtab_\name):
52 .section ___kcrctab\sec+\name,"a" 49 .section ___kcrctab\sec+\name,"a"
53 .balign KCRC_ALIGN 50 .balign KCRC_ALIGN
54KSYM(__kcrctab_\name): 51KSYM(__kcrctab_\name):
55 __put KSYM(__crc_\name) 52#if defined(CONFIG_MODULE_REL_CRCS)
53 .long KSYM(__crc_\name) - .
54#else
55 .long KSYM(__crc_\name)
56#endif
56 .weak KSYM(__crc_\name) 57 .weak KSYM(__crc_\name)
57 .previous 58 .previous
58#endif 59#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 192016e2b518..9c4ee144b5f6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -517,6 +517,7 @@ struct drm_device {
517 struct drm_minor *control; /**< Control node */ 517 struct drm_minor *control; /**< Control node */
518 struct drm_minor *primary; /**< Primary node */ 518 struct drm_minor *primary; /**< Primary node */
519 struct drm_minor *render; /**< Render node */ 519 struct drm_minor *render; /**< Render node */
520 bool registered;
520 521
521 /* currently active master for this device. Protected by master_mutex */ 522 /* currently active master for this device. Protected by master_mutex */
522 struct drm_master *master; 523 struct drm_master *master;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index a9b95246e26e..045a97cbeba2 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -381,6 +381,8 @@ struct drm_connector_funcs {
381 * core drm connector interfaces. Everything added from this callback 381 * core drm connector interfaces. Everything added from this callback
382 * should be unregistered in the early_unregister callback. 382 * should be unregistered in the early_unregister callback.
383 * 383 *
384 * This is called while holding drm_connector->mutex.
385 *
384 * Returns: 386 * Returns:
385 * 387 *
386 * 0 on success, or a negative error code on failure. 388 * 0 on success, or a negative error code on failure.
@@ -395,6 +397,8 @@ struct drm_connector_funcs {
395 * late_register(). It is called from drm_connector_unregister(), 397 * late_register(). It is called from drm_connector_unregister(),
396 * early in the driver unload sequence to disable userspace access 398 * early in the driver unload sequence to disable userspace access
397 * before data structures are torndown. 399 * before data structures are torndown.
400 *
401 * This is called while holding drm_connector->mutex.
398 */ 402 */
399 void (*early_unregister)(struct drm_connector *connector); 403 void (*early_unregister)(struct drm_connector *connector);
400 404
@@ -559,7 +563,6 @@ struct drm_cmdline_mode {
559 * @interlace_allowed: can this connector handle interlaced modes? 563 * @interlace_allowed: can this connector handle interlaced modes?
560 * @doublescan_allowed: can this connector handle doublescan? 564 * @doublescan_allowed: can this connector handle doublescan?
561 * @stereo_allowed: can this connector handle stereo modes? 565 * @stereo_allowed: can this connector handle stereo modes?
562 * @registered: is this connector exposed (registered) with userspace?
563 * @modes: modes available on this connector (from fill_modes() + user) 566 * @modes: modes available on this connector (from fill_modes() + user)
564 * @status: one of the drm_connector_status enums (connected, not, or unknown) 567 * @status: one of the drm_connector_status enums (connected, not, or unknown)
565 * @probed_modes: list of modes derived directly from the display 568 * @probed_modes: list of modes derived directly from the display
@@ -608,6 +611,13 @@ struct drm_connector {
608 char *name; 611 char *name;
609 612
610 /** 613 /**
614 * @mutex: Lock for general connector state, but currently only protects
615 * @registered. Most of the connector state is still protected by the
616 * mutex in &drm_mode_config.
617 */
618 struct mutex mutex;
619
620 /**
611 * @index: Compacted connector index, which matches the position inside 621 * @index: Compacted connector index, which matches the position inside
612 * the mode_config.list for drivers not supporting hot-add/removing. Can 622 * the mode_config.list for drivers not supporting hot-add/removing. Can
613 * be used as an array index. It is invariant over the lifetime of the 623 * be used as an array index. It is invariant over the lifetime of the
@@ -620,6 +630,10 @@ struct drm_connector {
620 bool interlace_allowed; 630 bool interlace_allowed;
621 bool doublescan_allowed; 631 bool doublescan_allowed;
622 bool stereo_allowed; 632 bool stereo_allowed;
633 /**
634 * @registered: Is this connector exposed (registered) with userspace?
635 * Protected by @mutex.
636 */
623 bool registered; 637 bool registered;
624 struct list_head modes; /* list of modes on this connector */ 638 struct list_head modes; /* list of modes on this connector */
625 639
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index a0875001b13c..df08a41d5be5 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -45,10 +45,9 @@ struct can_proto {
45extern int can_proto_register(const struct can_proto *cp); 45extern int can_proto_register(const struct can_proto *cp);
46extern void can_proto_unregister(const struct can_proto *cp); 46extern void can_proto_unregister(const struct can_proto *cp);
47 47
48extern int can_rx_register(struct net_device *dev, canid_t can_id, 48int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
49 canid_t mask, 49 void (*func)(struct sk_buff *, void *),
50 void (*func)(struct sk_buff *, void *), 50 void *data, char *ident, struct sock *sk);
51 void *data, char *ident);
52 51
53extern void can_rx_unregister(struct net_device *dev, canid_t can_id, 52extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
54 canid_t mask, 53 canid_t mask,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index d936a0021839..921acaaa1601 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -8,9 +8,7 @@ enum cpuhp_state {
8 CPUHP_CREATE_THREADS, 8 CPUHP_CREATE_THREADS,
9 CPUHP_PERF_PREPARE, 9 CPUHP_PERF_PREPARE,
10 CPUHP_PERF_X86_PREPARE, 10 CPUHP_PERF_X86_PREPARE,
11 CPUHP_PERF_X86_UNCORE_PREP,
12 CPUHP_PERF_X86_AMD_UNCORE_PREP, 11 CPUHP_PERF_X86_AMD_UNCORE_PREP,
13 CPUHP_PERF_X86_RAPL_PREP,
14 CPUHP_PERF_BFIN, 12 CPUHP_PERF_BFIN,
15 CPUHP_PERF_POWER, 13 CPUHP_PERF_POWER,
16 CPUHP_PERF_SUPERH, 14 CPUHP_PERF_SUPERH,
@@ -86,7 +84,6 @@ enum cpuhp_state {
86 CPUHP_AP_IRQ_ARMADA_XP_STARTING, 84 CPUHP_AP_IRQ_ARMADA_XP_STARTING,
87 CPUHP_AP_IRQ_BCM2836_STARTING, 85 CPUHP_AP_IRQ_BCM2836_STARTING,
88 CPUHP_AP_ARM_MVEBU_COHERENCY, 86 CPUHP_AP_ARM_MVEBU_COHERENCY,
89 CPUHP_AP_PERF_X86_UNCORE_STARTING,
90 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 87 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
91 CPUHP_AP_PERF_X86_STARTING, 88 CPUHP_AP_PERF_X86_STARTING,
92 CPUHP_AP_PERF_X86_AMD_IBS_STARTING, 89 CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 7f7e9a7e3839..5725c94b1f12 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -27,6 +27,7 @@ int iommu_dma_init(void);
27 27
28/* Domain management interface for IOMMU drivers */ 28/* Domain management interface for IOMMU drivers */
29int iommu_get_dma_cookie(struct iommu_domain *domain); 29int iommu_get_dma_cookie(struct iommu_domain *domain);
30int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
30void iommu_put_dma_cookie(struct iommu_domain *domain); 31void iommu_put_dma_cookie(struct iommu_domain *domain);
31 32
32/* Setup call for arch DMA mapping code */ 33/* Setup call for arch DMA mapping code */
@@ -34,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
34 u64 size, struct device *dev); 35 u64 size, struct device *dev);
35 36
36/* General helpers for DMA-API <-> IOMMU-API interaction */ 37/* General helpers for DMA-API <-> IOMMU-API interaction */
37int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); 38int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
39 unsigned long attrs);
38 40
39/* 41/*
40 * These implement the bulk of the relevant DMA mapping callbacks, but require 42 * These implement the bulk of the relevant DMA mapping callbacks, but require
@@ -65,7 +67,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
65 size_t size, enum dma_data_direction dir, unsigned long attrs); 67 size_t size, enum dma_data_direction dir, unsigned long attrs);
66void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 68void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
67 size_t size, enum dma_data_direction dir, unsigned long attrs); 69 size_t size, enum dma_data_direction dir, unsigned long attrs);
68int iommu_dma_supported(struct device *dev, u64 mask);
69int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 70int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
70 71
71/* The DMA API isn't _quite_ the whole story, though... */ 72/* The DMA API isn't _quite_ the whole story, though... */
@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
86 return -ENODEV; 87 return -ENODEV;
87} 88}
88 89
90static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
91{
92 return -ENODEV;
93}
94
89static inline void iommu_put_dma_cookie(struct iommu_domain *domain) 95static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
90{ 96{
91} 97}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 10c5a17b1f51..c24721a33b4c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -63,6 +63,13 @@
63#define DMA_ATTR_NO_WARN (1UL << 8) 63#define DMA_ATTR_NO_WARN (1UL << 8)
64 64
65/* 65/*
66 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
67 * accessible at an elevated privilege level (and ideally inaccessible or
68 * at least read-only at lesser-privileged levels).
69 */
70#define DMA_ATTR_PRIVILEGED (1UL << 9)
71
72/*
66 * A dma_addr_t can hold any valid DMA or bus address for the platform. 73 * A dma_addr_t can hold any valid DMA or bus address for the platform.
67 * It can be given to a device to use as a DMA source or target. A CPU cannot 74 * It can be given to a device to use as a DMA source or target. A CPU cannot
68 * reference a dma_addr_t directly because there may be translation between 75 * reference a dma_addr_t directly because there may be translation between
diff --git a/include/linux/export.h b/include/linux/export.h
index 2a0f61fbc731..1a1dfdb2a5c6 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -43,12 +43,19 @@ extern struct module __this_module;
43#ifdef CONFIG_MODVERSIONS 43#ifdef CONFIG_MODVERSIONS
44/* Mark the CRC weak since genksyms apparently decides not to 44/* Mark the CRC weak since genksyms apparently decides not to
45 * generate a checksums for some symbols */ 45 * generate a checksums for some symbols */
46#if defined(CONFIG_MODULE_REL_CRCS)
46#define __CRC_SYMBOL(sym, sec) \ 47#define __CRC_SYMBOL(sym, sec) \
47 extern __visible void *__crc_##sym __attribute__((weak)); \ 48 asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
48 static const unsigned long __kcrctab_##sym \ 49 " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
49 __used \ 50 " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
50 __attribute__((section("___kcrctab" sec "+" #sym), used)) \ 51 " .previous \n");
51 = (unsigned long) &__crc_##sym; 52#else
53#define __CRC_SYMBOL(sym, sec) \
54 asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
55 " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
56 " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
57 " .previous \n");
58#endif
52#else 59#else
53#define __CRC_SYMBOL(sym, sec) 60#define __CRC_SYMBOL(sym, sec)
54#endif 61#endif
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 13ba552e6c09..4c467ef50159 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -360,6 +360,7 @@ struct fscache_object {
360#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ 360#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
361#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ 361#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
362#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ 362#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
363#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
363 364
364 struct list_head cache_link; /* link in cache->object_list */ 365 struct list_head cache_link; /* link in cache->object_list */
365 struct hlist_node cookie_link; /* link in cookie->backing_objects */ 366 struct hlist_node cookie_link; /* link in cookie->backing_objects */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 42fe43fb0c80..183efde54269 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -128,6 +128,7 @@ struct hv_ring_buffer_info {
128 u32 ring_data_startoffset; 128 u32 ring_data_startoffset;
129 u32 priv_write_index; 129 u32 priv_write_index;
130 u32 priv_read_index; 130 u32 priv_read_index;
131 u32 cached_read_index;
131}; 132};
132 133
133/* 134/*
@@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
180 return write; 181 return write;
181} 182}
182 183
184static inline u32 hv_get_cached_bytes_to_write(
185 const struct hv_ring_buffer_info *rbi)
186{
187 u32 read_loc, write_loc, dsize, write;
188
189 dsize = rbi->ring_datasize;
190 read_loc = rbi->cached_read_index;
191 write_loc = rbi->ring_buffer->write_index;
192
193 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
194 read_loc - write_loc;
195 return write;
196}
183/* 197/*
184 * VMBUS version is 32 bit entity broken up into 198 * VMBUS version is 32 bit entity broken up into
185 * two 16 bit quantities: major_number. minor_number. 199 * two 16 bit quantities: major_number. minor_number.
@@ -1488,7 +1502,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
1488 1502
1489static inline void hv_signal_on_read(struct vmbus_channel *channel) 1503static inline void hv_signal_on_read(struct vmbus_channel *channel)
1490{ 1504{
1491 u32 cur_write_sz; 1505 u32 cur_write_sz, cached_write_sz;
1492 u32 pending_sz; 1506 u32 pending_sz;
1493 struct hv_ring_buffer_info *rbi = &channel->inbound; 1507 struct hv_ring_buffer_info *rbi = &channel->inbound;
1494 1508
@@ -1512,12 +1526,24 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
1512 1526
1513 cur_write_sz = hv_get_bytes_to_write(rbi); 1527 cur_write_sz = hv_get_bytes_to_write(rbi);
1514 1528
1515 if (cur_write_sz >= pending_sz) 1529 if (cur_write_sz < pending_sz)
1530 return;
1531
1532 cached_write_sz = hv_get_cached_bytes_to_write(rbi);
1533 if (cached_write_sz < pending_sz)
1516 vmbus_setevent(channel); 1534 vmbus_setevent(channel);
1517 1535
1518 return; 1536 return;
1519} 1537}
1520 1538
1539static inline void
1540init_cached_read_index(struct vmbus_channel *channel)
1541{
1542 struct hv_ring_buffer_info *rbi = &channel->inbound;
1543
1544 rbi->cached_read_index = rbi->ring_buffer->read_index;
1545}
1546
1521/* 1547/*
1522 * An API to support in-place processing of incoming VMBUS packets. 1548 * An API to support in-place processing of incoming VMBUS packets.
1523 */ 1549 */
@@ -1569,6 +1595,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
1569 * This call commits the read index and potentially signals the host. 1595 * This call commits the read index and potentially signals the host.
1570 * Here is the pattern for using the "in-place" consumption APIs: 1596 * Here is the pattern for using the "in-place" consumption APIs:
1571 * 1597 *
1598 * init_cached_read_index();
1599 *
1572 * while (get_next_pkt_raw() { 1600 * while (get_next_pkt_raw() {
1573 * process the packet "in-place"; 1601 * process the packet "in-place";
1574 * put_pkt_raw(); 1602 * put_pkt_raw();
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 23e129ef6726..c573a52ae440 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -29,6 +29,7 @@
29#include <linux/dma_remapping.h> 29#include <linux/dma_remapping.h>
30#include <linux/mmu_notifier.h> 30#include <linux/mmu_notifier.h>
31#include <linux/list.h> 31#include <linux/list.h>
32#include <linux/iommu.h>
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/iommu.h> 34#include <asm/iommu.h>
34 35
@@ -439,7 +440,7 @@ struct intel_iommu {
439 struct irq_domain *ir_domain; 440 struct irq_domain *ir_domain;
440 struct irq_domain *ir_msi_domain; 441 struct irq_domain *ir_msi_domain;
441#endif 442#endif
442 struct device *iommu_dev; /* IOMMU-sysfs device */ 443 struct iommu_device iommu; /* IOMMU core code handle */
443 int node; 444 int node;
444 u32 flags; /* Software defined flags */ 445 u32 flags; /* Software defined flags */
445}; 446};
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0ff5111f6959..6a6de187ddc0 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -31,6 +31,13 @@
31#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 31#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
32#define IOMMU_NOEXEC (1 << 3) 32#define IOMMU_NOEXEC (1 << 3)
33#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 33#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
34/*
35 * This is to make the IOMMU API setup privileged
36 * mapppings accessible by the master only at higher
37 * privileged execution level and inaccessible at
38 * less privileged levels.
39 */
40#define IOMMU_PRIV (1 << 5)
34 41
35struct iommu_ops; 42struct iommu_ops;
36struct iommu_group; 43struct iommu_group;
@@ -117,18 +124,25 @@ enum iommu_attr {
117 DOMAIN_ATTR_MAX, 124 DOMAIN_ATTR_MAX,
118}; 125};
119 126
127/* These are the possible reserved region types */
128#define IOMMU_RESV_DIRECT (1 << 0)
129#define IOMMU_RESV_RESERVED (1 << 1)
130#define IOMMU_RESV_MSI (1 << 2)
131
120/** 132/**
121 * struct iommu_dm_region - descriptor for a direct mapped memory region 133 * struct iommu_resv_region - descriptor for a reserved memory region
122 * @list: Linked list pointers 134 * @list: Linked list pointers
123 * @start: System physical start address of the region 135 * @start: System physical start address of the region
124 * @length: Length of the region in bytes 136 * @length: Length of the region in bytes
125 * @prot: IOMMU Protection flags (READ/WRITE/...) 137 * @prot: IOMMU Protection flags (READ/WRITE/...)
138 * @type: Type of the reserved region
126 */ 139 */
127struct iommu_dm_region { 140struct iommu_resv_region {
128 struct list_head list; 141 struct list_head list;
129 phys_addr_t start; 142 phys_addr_t start;
130 size_t length; 143 size_t length;
131 int prot; 144 int prot;
145 int type;
132}; 146};
133 147
134#ifdef CONFIG_IOMMU_API 148#ifdef CONFIG_IOMMU_API
@@ -150,9 +164,9 @@ struct iommu_dm_region {
150 * @device_group: find iommu group for a particular device 164 * @device_group: find iommu group for a particular device
151 * @domain_get_attr: Query domain attributes 165 * @domain_get_attr: Query domain attributes
152 * @domain_set_attr: Change domain attributes 166 * @domain_set_attr: Change domain attributes
153 * @get_dm_regions: Request list of direct mapping requirements for a device 167 * @get_resv_regions: Request list of reserved regions for a device
154 * @put_dm_regions: Free list of direct mapping requirements for a device 168 * @put_resv_regions: Free list of reserved regions for a device
155 * @apply_dm_region: Temporary helper call-back for iova reserved ranges 169 * @apply_resv_region: Temporary helper call-back for iova reserved ranges
156 * @domain_window_enable: Configure and enable a particular window for a domain 170 * @domain_window_enable: Configure and enable a particular window for a domain
157 * @domain_window_disable: Disable a particular window for a domain 171 * @domain_window_disable: Disable a particular window for a domain
158 * @domain_set_windows: Set the number of windows for a domain 172 * @domain_set_windows: Set the number of windows for a domain
@@ -184,11 +198,12 @@ struct iommu_ops {
184 int (*domain_set_attr)(struct iommu_domain *domain, 198 int (*domain_set_attr)(struct iommu_domain *domain,
185 enum iommu_attr attr, void *data); 199 enum iommu_attr attr, void *data);
186 200
187 /* Request/Free a list of direct mapping requirements for a device */ 201 /* Request/Free a list of reserved regions for a device */
188 void (*get_dm_regions)(struct device *dev, struct list_head *list); 202 void (*get_resv_regions)(struct device *dev, struct list_head *list);
189 void (*put_dm_regions)(struct device *dev, struct list_head *list); 203 void (*put_resv_regions)(struct device *dev, struct list_head *list);
190 void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain, 204 void (*apply_resv_region)(struct device *dev,
191 struct iommu_dm_region *region); 205 struct iommu_domain *domain,
206 struct iommu_resv_region *region);
192 207
193 /* Window handling functions */ 208 /* Window handling functions */
194 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, 209 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
@@ -204,6 +219,42 @@ struct iommu_ops {
204 unsigned long pgsize_bitmap; 219 unsigned long pgsize_bitmap;
205}; 220};
206 221
222/**
223 * struct iommu_device - IOMMU core representation of one IOMMU hardware
224 * instance
225 * @list: Used by the iommu-core to keep a list of registered iommus
226 * @ops: iommu-ops for talking to this iommu
227 * @dev: struct device for sysfs handling
228 */
229struct iommu_device {
230 struct list_head list;
231 const struct iommu_ops *ops;
232 struct fwnode_handle *fwnode;
233 struct device dev;
234};
235
236int iommu_device_register(struct iommu_device *iommu);
237void iommu_device_unregister(struct iommu_device *iommu);
238int iommu_device_sysfs_add(struct iommu_device *iommu,
239 struct device *parent,
240 const struct attribute_group **groups,
241 const char *fmt, ...) __printf(4, 5);
242void iommu_device_sysfs_remove(struct iommu_device *iommu);
243int iommu_device_link(struct iommu_device *iommu, struct device *link);
244void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
245
246static inline void iommu_device_set_ops(struct iommu_device *iommu,
247 const struct iommu_ops *ops)
248{
249 iommu->ops = ops;
250}
251
252static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
253 struct fwnode_handle *fwnode)
254{
255 iommu->fwnode = fwnode;
256}
257
207#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ 258#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
208#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ 259#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
209#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ 260#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@@ -233,9 +284,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
233extern void iommu_set_fault_handler(struct iommu_domain *domain, 284extern void iommu_set_fault_handler(struct iommu_domain *domain,
234 iommu_fault_handler_t handler, void *token); 285 iommu_fault_handler_t handler, void *token);
235 286
236extern void iommu_get_dm_regions(struct device *dev, struct list_head *list); 287extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
237extern void iommu_put_dm_regions(struct device *dev, struct list_head *list); 288extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
238extern int iommu_request_dm_for_dev(struct device *dev); 289extern int iommu_request_dm_for_dev(struct device *dev);
290extern struct iommu_resv_region *
291iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
292extern int iommu_get_group_resv_regions(struct iommu_group *group,
293 struct list_head *head);
239 294
240extern int iommu_attach_group(struct iommu_domain *domain, 295extern int iommu_attach_group(struct iommu_domain *domain,
241 struct iommu_group *group); 296 struct iommu_group *group);
@@ -267,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
267 void *data); 322 void *data);
268extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, 323extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
269 void *data); 324 void *data);
270struct device *iommu_device_create(struct device *parent, void *drvdata,
271 const struct attribute_group **groups,
272 const char *fmt, ...) __printf(4, 5);
273void iommu_device_destroy(struct device *dev);
274int iommu_device_link(struct device *dev, struct device *link);
275void iommu_device_unlink(struct device *dev, struct device *link);
276 325
277/* Window handling function prototypes */ 326/* Window handling function prototypes */
278extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 327extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -352,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
352 const struct iommu_ops *ops); 401 const struct iommu_ops *ops);
353void iommu_fwspec_free(struct device *dev); 402void iommu_fwspec_free(struct device *dev);
354int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 403int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
355void iommu_register_instance(struct fwnode_handle *fwnode, 404const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
356 const struct iommu_ops *ops);
357const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
358 405
359#else /* CONFIG_IOMMU_API */ 406#else /* CONFIG_IOMMU_API */
360 407
361struct iommu_ops {}; 408struct iommu_ops {};
362struct iommu_group {}; 409struct iommu_group {};
363struct iommu_fwspec {}; 410struct iommu_fwspec {};
411struct iommu_device {};
364 412
365static inline bool iommu_present(struct bus_type *bus) 413static inline bool iommu_present(struct bus_type *bus)
366{ 414{
@@ -443,16 +491,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
443{ 491{
444} 492}
445 493
446static inline void iommu_get_dm_regions(struct device *dev, 494static inline void iommu_get_resv_regions(struct device *dev,
447 struct list_head *list) 495 struct list_head *list)
448{ 496{
449} 497}
450 498
451static inline void iommu_put_dm_regions(struct device *dev, 499static inline void iommu_put_resv_regions(struct device *dev,
452 struct list_head *list) 500 struct list_head *list)
453{ 501{
454} 502}
455 503
504static inline int iommu_get_group_resv_regions(struct iommu_group *group,
505 struct list_head *head)
506{
507 return -ENODEV;
508}
509
456static inline int iommu_request_dm_for_dev(struct device *dev) 510static inline int iommu_request_dm_for_dev(struct device *dev)
457{ 511{
458 return -ENODEV; 512 return -ENODEV;
@@ -546,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
546 return -EINVAL; 600 return -EINVAL;
547} 601}
548 602
549static inline struct device *iommu_device_create(struct device *parent, 603static inline int iommu_device_register(struct iommu_device *iommu)
550 void *drvdata, 604{
551 const struct attribute_group **groups, 605 return -ENODEV;
552 const char *fmt, ...) 606}
607
608static inline void iommu_device_set_ops(struct iommu_device *iommu,
609 const struct iommu_ops *ops)
610{
611}
612
613static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
614 struct fwnode_handle *fwnode)
615{
616}
617
618static inline void iommu_device_unregister(struct iommu_device *iommu)
553{ 619{
554 return ERR_PTR(-ENODEV);
555} 620}
556 621
557static inline void iommu_device_destroy(struct device *dev) 622static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
623 struct device *parent,
624 const struct attribute_group **groups,
625 const char *fmt, ...)
626{
627 return -ENODEV;
628}
629
630static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
558{ 631{
559} 632}
560 633
@@ -584,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
584 return -ENODEV; 657 return -ENODEV;
585} 658}
586 659
587static inline void iommu_register_instance(struct fwnode_handle *fwnode,
588 const struct iommu_ops *ops)
589{
590}
591
592static inline 660static inline
593const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) 661const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
594{ 662{
595 return NULL; 663 return NULL;
596} 664}
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e79875574b39..39e3254e5769 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -184,6 +184,7 @@ struct irq_data {
184 * 184 *
185 * IRQD_TRIGGER_MASK - Mask for the trigger type bits 185 * IRQD_TRIGGER_MASK - Mask for the trigger type bits
186 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending 186 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
187 * IRQD_ACTIVATED - Interrupt has already been activated
187 * IRQD_NO_BALANCING - Balancing disabled for this IRQ 188 * IRQD_NO_BALANCING - Balancing disabled for this IRQ
188 * IRQD_PER_CPU - Interrupt is per cpu 189 * IRQD_PER_CPU - Interrupt is per cpu
189 * IRQD_AFFINITY_SET - Interrupt affinity was set 190 * IRQD_AFFINITY_SET - Interrupt affinity was set
@@ -202,6 +203,7 @@ struct irq_data {
202enum { 203enum {
203 IRQD_TRIGGER_MASK = 0xf, 204 IRQD_TRIGGER_MASK = 0xf,
204 IRQD_SETAFFINITY_PENDING = (1 << 8), 205 IRQD_SETAFFINITY_PENDING = (1 << 8),
206 IRQD_ACTIVATED = (1 << 9),
205 IRQD_NO_BALANCING = (1 << 10), 207 IRQD_NO_BALANCING = (1 << 10),
206 IRQD_PER_CPU = (1 << 11), 208 IRQD_PER_CPU = (1 << 11),
207 IRQD_AFFINITY_SET = (1 << 12), 209 IRQD_AFFINITY_SET = (1 << 12),
@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
312 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; 314 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
313} 315}
314 316
317static inline bool irqd_is_activated(struct irq_data *d)
318{
319 return __irqd_to_state(d) & IRQD_ACTIVATED;
320}
321
322static inline void irqd_set_activated(struct irq_data *d)
323{
324 __irqd_to_state(d) |= IRQD_ACTIVATED;
325}
326
327static inline void irqd_clr_activated(struct irq_data *d)
328{
329 __irqd_to_state(d) &= ~IRQD_ACTIVATED;
330}
331
315#undef __irqd_to_state 332#undef __irqd_to_state
316 333
317static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 334static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ffb84604c1de..188eced6813e 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -183,6 +183,12 @@ enum {
183 /* Irq domain is an IPI domain with single virq */ 183 /* Irq domain is an IPI domain with single virq */
184 IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), 184 IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
185 185
186 /* Irq domain implements MSIs */
187 IRQ_DOMAIN_FLAG_MSI = (1 << 4),
188
189 /* Irq domain implements MSI remapping */
190 IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
191
186 /* 192 /*
187 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved 193 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
188 * for implementation specific purposes and ignored by the 194 * for implementation specific purposes and ignored by the
@@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
216 void *host_data); 222 void *host_data);
217extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, 223extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
218 enum irq_domain_bus_token bus_token); 224 enum irq_domain_bus_token bus_token);
225extern bool irq_domain_check_msi_remap(void);
219extern void irq_set_default_host(struct irq_domain *host); 226extern void irq_set_default_host(struct irq_domain *host);
220extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, 227extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
221 irq_hw_number_t hwirq, int node, 228 irq_hw_number_t hwirq, int node,
@@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
446{ 453{
447 return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; 454 return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
448} 455}
456
457static inline bool irq_domain_is_msi(struct irq_domain *domain)
458{
459 return domain->flags & IRQ_DOMAIN_FLAG_MSI;
460}
461
462static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
463{
464 return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
465}
466
467extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
468
449#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 469#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
450static inline void irq_domain_activate_irq(struct irq_data *data) { } 470static inline void irq_domain_activate_irq(struct irq_data *data) { }
451static inline void irq_domain_deactivate_irq(struct irq_data *data) { } 471static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
477{ 497{
478 return false; 498 return false;
479} 499}
500
501static inline bool irq_domain_is_msi(struct irq_domain *domain)
502{
503 return false;
504}
505
506static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
507{
508 return false;
509}
510
511static inline bool
512irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
513{
514 return false;
515}
480#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 516#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
481 517
482#else /* CONFIG_IRQ_DOMAIN */ 518#else /* CONFIG_IRQ_DOMAIN */
diff --git a/include/linux/log2.h b/include/linux/log2.h
index fd7ff3d91e6a..ef3d4f67118c 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
203 * ... and so on. 203 * ... and so on.
204 */ 204 */
205 205
206#define order_base_2(n) ilog2(roundup_pow_of_two(n)) 206static inline __attribute_const__
207int __order_base_2(unsigned long n)
208{
209 return n > 1 ? ilog2(n - 1) + 1 : 0;
210}
207 211
212#define order_base_2(n) \
213( \
214 __builtin_constant_p(n) ? ( \
215 ((n) == 0 || (n) == 1) ? 0 : \
216 ilog2((n) - 1) + 1) : \
217 __order_base_2(n) \
218)
208#endif /* _LINUX_LOG2_H */ 219#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index c1784c0b4f35..134a2f69c21a 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
85extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 85extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
86/* VM interface that may be used by firmware interface */ 86/* VM interface that may be used by firmware interface */
87extern int online_pages(unsigned long, unsigned long, int); 87extern int online_pages(unsigned long, unsigned long, int);
88extern int test_pages_in_a_zone(unsigned long, unsigned long); 88extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
89 unsigned long *valid_start, unsigned long *valid_end);
89extern void __offline_isolated_pages(unsigned long, unsigned long); 90extern void __offline_isolated_pages(unsigned long, unsigned long);
90 91
91typedef void (*online_page_callback_t)(struct page *page); 92typedef void (*online_page_callback_t)(struct page *page);
diff --git a/include/linux/module.h b/include/linux/module.h
index 7c84273d60b9..cc7cba219b20 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -346,7 +346,7 @@ struct module {
346 346
347 /* Exported symbols */ 347 /* Exported symbols */
348 const struct kernel_symbol *syms; 348 const struct kernel_symbol *syms;
349 const unsigned long *crcs; 349 const s32 *crcs;
350 unsigned int num_syms; 350 unsigned int num_syms;
351 351
352 /* Kernel parameters. */ 352 /* Kernel parameters. */
@@ -359,18 +359,18 @@ struct module {
359 /* GPL-only exported symbols. */ 359 /* GPL-only exported symbols. */
360 unsigned int num_gpl_syms; 360 unsigned int num_gpl_syms;
361 const struct kernel_symbol *gpl_syms; 361 const struct kernel_symbol *gpl_syms;
362 const unsigned long *gpl_crcs; 362 const s32 *gpl_crcs;
363 363
364#ifdef CONFIG_UNUSED_SYMBOLS 364#ifdef CONFIG_UNUSED_SYMBOLS
365 /* unused exported symbols. */ 365 /* unused exported symbols. */
366 const struct kernel_symbol *unused_syms; 366 const struct kernel_symbol *unused_syms;
367 const unsigned long *unused_crcs; 367 const s32 *unused_crcs;
368 unsigned int num_unused_syms; 368 unsigned int num_unused_syms;
369 369
370 /* GPL-only, unused exported symbols. */ 370 /* GPL-only, unused exported symbols. */
371 unsigned int num_unused_gpl_syms; 371 unsigned int num_unused_gpl_syms;
372 const struct kernel_symbol *unused_gpl_syms; 372 const struct kernel_symbol *unused_gpl_syms;
373 const unsigned long *unused_gpl_crcs; 373 const s32 *unused_gpl_crcs;
374#endif 374#endif
375 375
376#ifdef CONFIG_MODULE_SIG 376#ifdef CONFIG_MODULE_SIG
@@ -382,7 +382,7 @@ struct module {
382 382
383 /* symbols that will be GPL-only in the near future. */ 383 /* symbols that will be GPL-only in the near future. */
384 const struct kernel_symbol *gpl_future_syms; 384 const struct kernel_symbol *gpl_future_syms;
385 const unsigned long *gpl_future_crcs; 385 const s32 *gpl_future_crcs;
386 unsigned int num_gpl_future_syms; 386 unsigned int num_gpl_future_syms;
387 387
388 /* Exception table */ 388 /* Exception table */
@@ -523,7 +523,7 @@ struct module *find_module(const char *name);
523 523
524struct symsearch { 524struct symsearch {
525 const struct kernel_symbol *start, *stop; 525 const struct kernel_symbol *start, *stop;
526 const unsigned long *crcs; 526 const s32 *crcs;
527 enum { 527 enum {
528 NOT_GPL_ONLY, 528 NOT_GPL_ONLY,
529 GPL_ONLY, 529 GPL_ONLY,
@@ -539,7 +539,7 @@ struct symsearch {
539 */ 539 */
540const struct kernel_symbol *find_symbol(const char *name, 540const struct kernel_symbol *find_symbol(const char *name,
541 struct module **owner, 541 struct module **owner,
542 const unsigned long **crc, 542 const s32 **crc,
543 bool gplok, 543 bool gplok,
544 bool warn); 544 bool warn);
545 545
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9bde9558b596..70ad0291d517 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -866,11 +866,15 @@ struct netdev_xdp {
866 * of useless work if you return NETDEV_TX_BUSY. 866 * of useless work if you return NETDEV_TX_BUSY.
867 * Required; cannot be NULL. 867 * Required; cannot be NULL.
868 * 868 *
869 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 869 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
870 * netdev_features_t features); 870 * struct net_device *dev
871 * Adjusts the requested feature flags according to device-specific 871 * netdev_features_t features);
872 * constraints, and returns the resulting flags. Must not modify 872 * Called by core transmit path to determine if device is capable of
873 * the device state. 873 * performing offload operations on a given packet. This is to give
874 * the device an opportunity to implement any restrictions that cannot
875 * be otherwise expressed by feature flags. The check is called with
876 * the set of features that the stack has calculated and it returns
877 * those the driver believes to be appropriate.
874 * 878 *
875 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 879 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
876 * void *accel_priv, select_queue_fallback_t fallback); 880 * void *accel_priv, select_queue_fallback_t fallback);
@@ -1028,6 +1032,12 @@ struct netdev_xdp {
1028 * Called to release previously enslaved netdev. 1032 * Called to release previously enslaved netdev.
1029 * 1033 *
1030 * Feature/offload setting functions. 1034 * Feature/offload setting functions.
1035 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1036 * netdev_features_t features);
1037 * Adjusts the requested feature flags according to device-specific
1038 * constraints, and returns the resulting flags. Must not modify
1039 * the device state.
1040 *
1031 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1041 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1032 * Called to update device configuration to new features. Passed 1042 * Called to update device configuration to new features. Passed
1033 * feature set might be less than what was returned by ndo_fix_features()). 1043 * feature set might be less than what was returned by ndo_fix_features()).
@@ -1100,15 +1110,6 @@ struct netdev_xdp {
1100 * Callback to use for xmit over the accelerated station. This 1110 * Callback to use for xmit over the accelerated station. This
1101 * is used in place of ndo_start_xmit on accelerated net 1111 * is used in place of ndo_start_xmit on accelerated net
1102 * devices. 1112 * devices.
1103 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1104 * struct net_device *dev
1105 * netdev_features_t features);
1106 * Called by core transmit path to determine if device is capable of
1107 * performing offload operations on a given packet. This is to give
1108 * the device an opportunity to implement any restrictions that cannot
1109 * be otherwise expressed by feature flags. The check is called with
1110 * the set of features that the stack has calculated and it returns
1111 * those the driver believes to be appropriate.
1112 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1113 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1113 * int queue_index, u32 maxrate); 1114 * int queue_index, u32 maxrate);
1114 * Called when a user wants to set a max-rate limitation of specific 1115 * Called when a user wants to set a max-rate limitation of specific
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 6a7fc5051099..13394ac83c66 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
31 31
32#endif /* CONFIG_OF_IOMMU */ 32#endif /* CONFIG_OF_IOMMU */
33 33
34static inline void of_iommu_set_ops(struct device_node *np,
35 const struct iommu_ops *ops)
36{
37 iommu_register_instance(&np->fwnode, ops);
38}
39
40static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
41{
42 return iommu_get_instance(&np->fwnode);
43}
44
45extern struct of_device_id __iommu_of_table; 34extern struct of_device_id __iommu_of_table;
46 35
47typedef int (*of_iommu_init_fn)(struct device_node *); 36typedef int (*of_iommu_init_fn)(struct device_node *);
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 1c7eec09e5eb..3a481a49546e 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
204static inline bool percpu_ref_tryget(struct percpu_ref *ref) 204static inline bool percpu_ref_tryget(struct percpu_ref *ref)
205{ 205{
206 unsigned long __percpu *percpu_count; 206 unsigned long __percpu *percpu_count;
207 int ret; 207 bool ret;
208 208
209 rcu_read_lock_sched(); 209 rcu_read_lock_sched();
210 210
@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
238static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) 238static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
239{ 239{
240 unsigned long __percpu *percpu_count; 240 unsigned long __percpu *percpu_count;
241 int ret = false; 241 bool ret = false;
242 242
243 rcu_read_lock_sched(); 243 rcu_read_lock_sched();
244 244
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 7afe991e900e..dbf0abba33b8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
776{ 776{
777 u32 hash; 777 u32 hash;
778 778
779 /* @flowlabel may include more than a flow label, eg, the traffic class.
780 * Here we want only the flow label value.
781 */
782 flowlabel &= IPV6_FLOWLABEL_MASK;
783
779 if (flowlabel || 784 if (flowlabel ||
780 net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || 785 net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
781 (!autolabel && 786 (!autolabel &&
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index f0db7788f887..3dc91a46e8b8 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1384,6 +1384,8 @@ enum ethtool_link_mode_bit_indices {
1384 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, 1384 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
1385 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, 1385 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
1386 ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, 1386 ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
1387 ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
1388 ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
1387 1389
1388 1390
1389 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit 1391 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
@@ -1393,7 +1395,7 @@ enum ethtool_link_mode_bit_indices {
1393 */ 1395 */
1394 1396
1395 __ETHTOOL_LINK_MODE_LAST 1397 __ETHTOOL_LINK_MODE_LAST
1396 = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 1398 = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
1397}; 1399};
1398 1400
1399#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ 1401#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/init/Kconfig b/init/Kconfig
index e1a937348a3e..4dd8bd232a1d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1987,6 +1987,10 @@ config MODVERSIONS
1987 make them incompatible with the kernel you are running. If 1987 make them incompatible with the kernel you are running. If
1988 unsure, say N. 1988 unsure, say N.
1989 1989
1990config MODULE_REL_CRCS
1991 bool
1992 depends on MODVERSIONS
1993
1990config MODULE_SRCVERSION_ALL 1994config MODULE_SRCVERSION_ALL
1991 bool "Source checksum for all modules" 1995 bool "Source checksum for all modules"
1992 help 1996 help
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2ee9ec3051b2..688dd02af985 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5221,6 +5221,11 @@ err_free_css:
5221 return ERR_PTR(err); 5221 return ERR_PTR(err);
5222} 5222}
5223 5223
5224/*
5225 * The returned cgroup is fully initialized including its control mask, but
5226 * it isn't associated with its kernfs_node and doesn't have the control
5227 * mask applied.
5228 */
5224static struct cgroup *cgroup_create(struct cgroup *parent) 5229static struct cgroup *cgroup_create(struct cgroup *parent)
5225{ 5230{
5226 struct cgroup_root *root = parent->root; 5231 struct cgroup_root *root = parent->root;
@@ -5288,11 +5293,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
5288 5293
5289 cgroup_propagate_control(cgrp); 5294 cgroup_propagate_control(cgrp);
5290 5295
5291 /* @cgrp doesn't have dir yet so the following will only create csses */
5292 ret = cgroup_apply_control_enable(cgrp);
5293 if (ret)
5294 goto out_destroy;
5295
5296 return cgrp; 5296 return cgrp;
5297 5297
5298out_cancel_ref: 5298out_cancel_ref:
@@ -5300,9 +5300,6 @@ out_cancel_ref:
5300out_free_cgrp: 5300out_free_cgrp:
5301 kfree(cgrp); 5301 kfree(cgrp);
5302 return ERR_PTR(ret); 5302 return ERR_PTR(ret);
5303out_destroy:
5304 cgroup_destroy_locked(cgrp);
5305 return ERR_PTR(ret);
5306} 5303}
5307 5304
5308static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 5305static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 110b38a58493..e5aaa806702d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1469static void 1469static void
1470list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1470list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1471{ 1471{
1472
1473 lockdep_assert_held(&ctx->lock); 1472 lockdep_assert_held(&ctx->lock);
1474 1473
1475 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1474 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
1624{ 1623{
1625 struct perf_event *group_leader = event->group_leader, *pos; 1624 struct perf_event *group_leader = event->group_leader, *pos;
1626 1625
1626 lockdep_assert_held(&event->ctx->lock);
1627
1627 /* 1628 /*
1628 * We can have double attach due to group movement in perf_event_open. 1629 * We can have double attach due to group movement in perf_event_open.
1629 */ 1630 */
@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
1697 struct perf_event *sibling, *tmp; 1698 struct perf_event *sibling, *tmp;
1698 struct list_head *list = NULL; 1699 struct list_head *list = NULL;
1699 1700
1701 lockdep_assert_held(&event->ctx->lock);
1702
1700 /* 1703 /*
1701 * We can have double detach due to exit/hot-unplug + close. 1704 * We can have double detach due to exit/hot-unplug + close.
1702 */ 1705 */
@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
1895 */ 1898 */
1896static void perf_remove_from_context(struct perf_event *event, unsigned long flags) 1899static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
1897{ 1900{
1898 lockdep_assert_held(&event->ctx->mutex); 1901 struct perf_event_context *ctx = event->ctx;
1902
1903 lockdep_assert_held(&ctx->mutex);
1899 1904
1900 event_function_call(event, __perf_remove_from_context, (void *)flags); 1905 event_function_call(event, __perf_remove_from_context, (void *)flags);
1906
1907 /*
1908 * The above event_function_call() can NO-OP when it hits
1909 * TASK_TOMBSTONE. In that case we must already have been detached
1910 * from the context (by perf_event_exit_event()) but the grouping
1911 * might still be in-tact.
1912 */
1913 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1914 if ((flags & DETACH_GROUP) &&
1915 (event->attach_state & PERF_ATTACH_GROUP)) {
1916 /*
1917 * Since in that case we cannot possibly be scheduled, simply
1918 * detach now.
1919 */
1920 raw_spin_lock_irq(&ctx->lock);
1921 perf_group_detach(event);
1922 raw_spin_unlock_irq(&ctx->lock);
1923 }
1901} 1924}
1902 1925
1903/* 1926/*
@@ -6609,6 +6632,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6609 char *buf = NULL; 6632 char *buf = NULL;
6610 char *name; 6633 char *name;
6611 6634
6635 if (vma->vm_flags & VM_READ)
6636 prot |= PROT_READ;
6637 if (vma->vm_flags & VM_WRITE)
6638 prot |= PROT_WRITE;
6639 if (vma->vm_flags & VM_EXEC)
6640 prot |= PROT_EXEC;
6641
6642 if (vma->vm_flags & VM_MAYSHARE)
6643 flags = MAP_SHARED;
6644 else
6645 flags = MAP_PRIVATE;
6646
6647 if (vma->vm_flags & VM_DENYWRITE)
6648 flags |= MAP_DENYWRITE;
6649 if (vma->vm_flags & VM_MAYEXEC)
6650 flags |= MAP_EXECUTABLE;
6651 if (vma->vm_flags & VM_LOCKED)
6652 flags |= MAP_LOCKED;
6653 if (vma->vm_flags & VM_HUGETLB)
6654 flags |= MAP_HUGETLB;
6655
6612 if (file) { 6656 if (file) {
6613 struct inode *inode; 6657 struct inode *inode;
6614 dev_t dev; 6658 dev_t dev;
@@ -6635,27 +6679,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6635 maj = MAJOR(dev); 6679 maj = MAJOR(dev);
6636 min = MINOR(dev); 6680 min = MINOR(dev);
6637 6681
6638 if (vma->vm_flags & VM_READ)
6639 prot |= PROT_READ;
6640 if (vma->vm_flags & VM_WRITE)
6641 prot |= PROT_WRITE;
6642 if (vma->vm_flags & VM_EXEC)
6643 prot |= PROT_EXEC;
6644
6645 if (vma->vm_flags & VM_MAYSHARE)
6646 flags = MAP_SHARED;
6647 else
6648 flags = MAP_PRIVATE;
6649
6650 if (vma->vm_flags & VM_DENYWRITE)
6651 flags |= MAP_DENYWRITE;
6652 if (vma->vm_flags & VM_MAYEXEC)
6653 flags |= MAP_EXECUTABLE;
6654 if (vma->vm_flags & VM_LOCKED)
6655 flags |= MAP_LOCKED;
6656 if (vma->vm_flags & VM_HUGETLB)
6657 flags |= MAP_HUGETLB;
6658
6659 goto got_name; 6682 goto got_name;
6660 } else { 6683 } else {
6661 if (vma->vm_ops && vma->vm_ops->name) { 6684 if (vma->vm_ops && vma->vm_ops->name) {
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 8c0a0ae43521..31805f237396 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -278,6 +278,31 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
278EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); 278EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
279 279
280/** 280/**
281 * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
282 * IRQ remapping
283 *
284 * Return: false if any MSI irq domain does not support IRQ remapping,
285 * true otherwise (including if there is no MSI irq domain)
286 */
287bool irq_domain_check_msi_remap(void)
288{
289 struct irq_domain *h;
290 bool ret = true;
291
292 mutex_lock(&irq_domain_mutex);
293 list_for_each_entry(h, &irq_domain_list, link) {
294 if (irq_domain_is_msi(h) &&
295 !irq_domain_hierarchical_is_msi_remap(h)) {
296 ret = false;
297 break;
298 }
299 }
300 mutex_unlock(&irq_domain_mutex);
301 return ret;
302}
303EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
304
305/**
281 * irq_set_default_host() - Set a "default" irq domain 306 * irq_set_default_host() - Set a "default" irq domain
282 * @domain: default domain pointer 307 * @domain: default domain pointer
283 * 308 *
@@ -1346,6 +1371,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
1346} 1371}
1347EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); 1372EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1348 1373
1374static void __irq_domain_activate_irq(struct irq_data *irq_data)
1375{
1376 if (irq_data && irq_data->domain) {
1377 struct irq_domain *domain = irq_data->domain;
1378
1379 if (irq_data->parent_data)
1380 __irq_domain_activate_irq(irq_data->parent_data);
1381 if (domain->ops->activate)
1382 domain->ops->activate(domain, irq_data);
1383 }
1384}
1385
1386static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1387{
1388 if (irq_data && irq_data->domain) {
1389 struct irq_domain *domain = irq_data->domain;
1390
1391 if (domain->ops->deactivate)
1392 domain->ops->deactivate(domain, irq_data);
1393 if (irq_data->parent_data)
1394 __irq_domain_deactivate_irq(irq_data->parent_data);
1395 }
1396}
1397
1349/** 1398/**
1350 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1399 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1351 * interrupt 1400 * interrupt
@@ -1356,13 +1405,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1356 */ 1405 */
1357void irq_domain_activate_irq(struct irq_data *irq_data) 1406void irq_domain_activate_irq(struct irq_data *irq_data)
1358{ 1407{
1359 if (irq_data && irq_data->domain) { 1408 if (!irqd_is_activated(irq_data)) {
1360 struct irq_domain *domain = irq_data->domain; 1409 __irq_domain_activate_irq(irq_data);
1361 1410 irqd_set_activated(irq_data);
1362 if (irq_data->parent_data)
1363 irq_domain_activate_irq(irq_data->parent_data);
1364 if (domain->ops->activate)
1365 domain->ops->activate(domain, irq_data);
1366 } 1411 }
1367} 1412}
1368 1413
@@ -1376,13 +1421,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
1376 */ 1421 */
1377void irq_domain_deactivate_irq(struct irq_data *irq_data) 1422void irq_domain_deactivate_irq(struct irq_data *irq_data)
1378{ 1423{
1379 if (irq_data && irq_data->domain) { 1424 if (irqd_is_activated(irq_data)) {
1380 struct irq_domain *domain = irq_data->domain; 1425 __irq_domain_deactivate_irq(irq_data);
1381 1426 irqd_clr_activated(irq_data);
1382 if (domain->ops->deactivate)
1383 domain->ops->deactivate(domain, irq_data);
1384 if (irq_data->parent_data)
1385 irq_domain_deactivate_irq(irq_data->parent_data);
1386 } 1427 }
1387} 1428}
1388 1429
@@ -1392,6 +1433,20 @@ static void irq_domain_check_hierarchy(struct irq_domain *domain)
1392 if (domain->ops->alloc) 1433 if (domain->ops->alloc)
1393 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; 1434 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
1394} 1435}
1436
1437/**
1438 * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
1439 * parent has MSI remapping support
1440 * @domain: domain pointer
1441 */
1442bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
1443{
1444 for (; domain; domain = domain->parent) {
1445 if (irq_domain_is_msi_remap(domain))
1446 return true;
1447 }
1448 return false;
1449}
1395#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 1450#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1396/** 1451/**
1397 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain 1452 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index ee230063f033..ddc2f5427f75 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -270,8 +270,8 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
270 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 270 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
271 msi_domain_update_chip_ops(info); 271 msi_domain_update_chip_ops(info);
272 272
273 return irq_domain_create_hierarchy(parent, 0, 0, fwnode, 273 return irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
274 &msi_domain_ops, info); 274 fwnode, &msi_domain_ops, info);
275} 275}
276 276
277int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, 277int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
diff --git a/kernel/module.c b/kernel/module.c
index 38d4270925d4..3d8f126208e3 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -389,16 +389,16 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
389extern const struct kernel_symbol __stop___ksymtab_gpl[]; 389extern const struct kernel_symbol __stop___ksymtab_gpl[];
390extern const struct kernel_symbol __start___ksymtab_gpl_future[]; 390extern const struct kernel_symbol __start___ksymtab_gpl_future[];
391extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; 391extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
392extern const unsigned long __start___kcrctab[]; 392extern const s32 __start___kcrctab[];
393extern const unsigned long __start___kcrctab_gpl[]; 393extern const s32 __start___kcrctab_gpl[];
394extern const unsigned long __start___kcrctab_gpl_future[]; 394extern const s32 __start___kcrctab_gpl_future[];
395#ifdef CONFIG_UNUSED_SYMBOLS 395#ifdef CONFIG_UNUSED_SYMBOLS
396extern const struct kernel_symbol __start___ksymtab_unused[]; 396extern const struct kernel_symbol __start___ksymtab_unused[];
397extern const struct kernel_symbol __stop___ksymtab_unused[]; 397extern const struct kernel_symbol __stop___ksymtab_unused[];
398extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; 398extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
399extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; 399extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
400extern const unsigned long __start___kcrctab_unused[]; 400extern const s32 __start___kcrctab_unused[];
401extern const unsigned long __start___kcrctab_unused_gpl[]; 401extern const s32 __start___kcrctab_unused_gpl[];
402#endif 402#endif
403 403
404#ifndef CONFIG_MODVERSIONS 404#ifndef CONFIG_MODVERSIONS
@@ -497,7 +497,7 @@ struct find_symbol_arg {
497 497
498 /* Output */ 498 /* Output */
499 struct module *owner; 499 struct module *owner;
500 const unsigned long *crc; 500 const s32 *crc;
501 const struct kernel_symbol *sym; 501 const struct kernel_symbol *sym;
502}; 502};
503 503
@@ -563,7 +563,7 @@ static bool find_symbol_in_section(const struct symsearch *syms,
563 * (optional) module which owns it. Needs preempt disabled or module_mutex. */ 563 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
564const struct kernel_symbol *find_symbol(const char *name, 564const struct kernel_symbol *find_symbol(const char *name,
565 struct module **owner, 565 struct module **owner,
566 const unsigned long **crc, 566 const s32 **crc,
567 bool gplok, 567 bool gplok,
568 bool warn) 568 bool warn)
569{ 569{
@@ -1249,23 +1249,17 @@ static int try_to_force_load(struct module *mod, const char *reason)
1249} 1249}
1250 1250
1251#ifdef CONFIG_MODVERSIONS 1251#ifdef CONFIG_MODVERSIONS
1252/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ 1252
1253static unsigned long maybe_relocated(unsigned long crc, 1253static u32 resolve_rel_crc(const s32 *crc)
1254 const struct module *crc_owner)
1255{ 1254{
1256#ifdef ARCH_RELOCATES_KCRCTAB 1255 return *(u32 *)((void *)crc + *crc);
1257 if (crc_owner == NULL)
1258 return crc - (unsigned long)reloc_start;
1259#endif
1260 return crc;
1261} 1256}
1262 1257
1263static int check_version(Elf_Shdr *sechdrs, 1258static int check_version(Elf_Shdr *sechdrs,
1264 unsigned int versindex, 1259 unsigned int versindex,
1265 const char *symname, 1260 const char *symname,
1266 struct module *mod, 1261 struct module *mod,
1267 const unsigned long *crc, 1262 const s32 *crc)
1268 const struct module *crc_owner)
1269{ 1263{
1270 unsigned int i, num_versions; 1264 unsigned int i, num_versions;
1271 struct modversion_info *versions; 1265 struct modversion_info *versions;
@@ -1283,13 +1277,19 @@ static int check_version(Elf_Shdr *sechdrs,
1283 / sizeof(struct modversion_info); 1277 / sizeof(struct modversion_info);
1284 1278
1285 for (i = 0; i < num_versions; i++) { 1279 for (i = 0; i < num_versions; i++) {
1280 u32 crcval;
1281
1286 if (strcmp(versions[i].name, symname) != 0) 1282 if (strcmp(versions[i].name, symname) != 0)
1287 continue; 1283 continue;
1288 1284
1289 if (versions[i].crc == maybe_relocated(*crc, crc_owner)) 1285 if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
1286 crcval = resolve_rel_crc(crc);
1287 else
1288 crcval = *crc;
1289 if (versions[i].crc == crcval)
1290 return 1; 1290 return 1;
1291 pr_debug("Found checksum %lX vs module %lX\n", 1291 pr_debug("Found checksum %X vs module %lX\n",
1292 maybe_relocated(*crc, crc_owner), versions[i].crc); 1292 crcval, versions[i].crc);
1293 goto bad_version; 1293 goto bad_version;
1294 } 1294 }
1295 1295
@@ -1307,7 +1307,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1307 unsigned int versindex, 1307 unsigned int versindex,
1308 struct module *mod) 1308 struct module *mod)
1309{ 1309{
1310 const unsigned long *crc; 1310 const s32 *crc;
1311 1311
1312 /* 1312 /*
1313 * Since this should be found in kernel (which can't be removed), no 1313 * Since this should be found in kernel (which can't be removed), no
@@ -1321,8 +1321,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1321 } 1321 }
1322 preempt_enable(); 1322 preempt_enable();
1323 return check_version(sechdrs, versindex, 1323 return check_version(sechdrs, versindex,
1324 VMLINUX_SYMBOL_STR(module_layout), mod, crc, 1324 VMLINUX_SYMBOL_STR(module_layout), mod, crc);
1325 NULL);
1326} 1325}
1327 1326
1328/* First part is kernel version, which we ignore if module has crcs. */ 1327/* First part is kernel version, which we ignore if module has crcs. */
@@ -1340,8 +1339,7 @@ static inline int check_version(Elf_Shdr *sechdrs,
1340 unsigned int versindex, 1339 unsigned int versindex,
1341 const char *symname, 1340 const char *symname,
1342 struct module *mod, 1341 struct module *mod,
1343 const unsigned long *crc, 1342 const s32 *crc)
1344 const struct module *crc_owner)
1345{ 1343{
1346 return 1; 1344 return 1;
1347} 1345}
@@ -1368,7 +1366,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
1368{ 1366{
1369 struct module *owner; 1367 struct module *owner;
1370 const struct kernel_symbol *sym; 1368 const struct kernel_symbol *sym;
1371 const unsigned long *crc; 1369 const s32 *crc;
1372 int err; 1370 int err;
1373 1371
1374 /* 1372 /*
@@ -1383,8 +1381,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
1383 if (!sym) 1381 if (!sym)
1384 goto unlock; 1382 goto unlock;
1385 1383
1386 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, 1384 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) {
1387 owner)) {
1388 sym = ERR_PTR(-EINVAL); 1385 sym = ERR_PTR(-EINVAL);
1389 goto getname; 1386 goto getname;
1390 } 1387 }
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 775569ec50d0..af344a1bf0d0 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -266,7 +266,7 @@ out:
266static struct cpumask save_cpumask; 266static struct cpumask save_cpumask;
267static bool disable_migrate; 267static bool disable_migrate;
268 268
269static void move_to_next_cpu(void) 269static void move_to_next_cpu(bool initmask)
270{ 270{
271 static struct cpumask *current_mask; 271 static struct cpumask *current_mask;
272 int next_cpu; 272 int next_cpu;
@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
275 return; 275 return;
276 276
277 /* Just pick the first CPU on first iteration */ 277 /* Just pick the first CPU on first iteration */
278 if (!current_mask) { 278 if (initmask) {
279 current_mask = &save_cpumask; 279 current_mask = &save_cpumask;
280 get_online_cpus(); 280 get_online_cpus();
281 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); 281 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
330static int kthread_fn(void *data) 330static int kthread_fn(void *data)
331{ 331{
332 u64 interval; 332 u64 interval;
333 bool initmask = true;
333 334
334 while (!kthread_should_stop()) { 335 while (!kthread_should_stop()) {
335 336
336 move_to_next_cpu(); 337 move_to_next_cpu(initmask);
338 initmask = false;
337 339
338 local_irq_disable(); 340 local_irq_disable();
339 get_sample(); 341 get_sample();
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index a133ecd741e4..7ad9e53ad174 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1372,7 +1372,7 @@ kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1372 return a1 + a2 + a3 + a4 + a5 + a6; 1372 return a1 + a2 + a3 + a4 + a5 + a6;
1373} 1373}
1374 1374
1375static struct __init trace_event_file * 1375static __init struct trace_event_file *
1376find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) 1376find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1377{ 1377{
1378 struct trace_event_file *file; 1378 struct trace_event_file *file;
diff --git a/mm/filemap.c b/mm/filemap.c
index b772a33ef640..3f9afded581b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1791,6 +1791,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1791 1791
1792 cond_resched(); 1792 cond_resched();
1793find_page: 1793find_page:
1794 if (fatal_signal_pending(current)) {
1795 error = -EINTR;
1796 goto out;
1797 }
1798
1794 page = find_get_page(mapping, index); 1799 page = find_get_page(mapping, index);
1795 if (!page) { 1800 if (!page) {
1796 page_cache_sync_readahead(mapping, 1801 page_cache_sync_readahead(mapping,
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index b82b3e215157..f479365530b6 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,6 +13,7 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/ftrace.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/mm.h> 18#include <linux/mm.h>
18#include <linux/printk.h> 19#include <linux/printk.h>
@@ -300,6 +301,8 @@ void kasan_report(unsigned long addr, size_t size,
300 if (likely(!kasan_report_enabled())) 301 if (likely(!kasan_report_enabled()))
301 return; 302 return;
302 303
304 disable_trace_on_warning();
305
303 info.access_addr = (void *)addr; 306 info.access_addr = (void *)addr;
304 info.access_size = size; 307 info.access_size = size;
305 info.is_write = is_write; 308 info.is_write = is_write;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index ca2723d47338..b8c11e063ff0 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1483} 1483}
1484 1484
1485/* 1485/*
1486 * Confirm all pages in a range [start, end) is belongs to the same zone. 1486 * Confirm all pages in a range [start, end) belong to the same zone.
1487 * When true, return its valid [start, end).
1487 */ 1488 */
1488int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) 1489int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1490 unsigned long *valid_start, unsigned long *valid_end)
1489{ 1491{
1490 unsigned long pfn, sec_end_pfn; 1492 unsigned long pfn, sec_end_pfn;
1493 unsigned long start, end;
1491 struct zone *zone = NULL; 1494 struct zone *zone = NULL;
1492 struct page *page; 1495 struct page *page;
1493 int i; 1496 int i;
1494 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); 1497 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
1495 pfn < end_pfn; 1498 pfn < end_pfn;
1496 pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { 1499 pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
1497 /* Make sure the memory section is present first */ 1500 /* Make sure the memory section is present first */
1498 if (!present_section_nr(pfn_to_section_nr(pfn))) 1501 if (!present_section_nr(pfn_to_section_nr(pfn)))
1499 continue; 1502 continue;
@@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
1509 page = pfn_to_page(pfn + i); 1512 page = pfn_to_page(pfn + i);
1510 if (zone && page_zone(page) != zone) 1513 if (zone && page_zone(page) != zone)
1511 return 0; 1514 return 0;
1515 if (!zone)
1516 start = pfn + i;
1512 zone = page_zone(page); 1517 zone = page_zone(page);
1518 end = pfn + MAX_ORDER_NR_PAGES;
1513 } 1519 }
1514 } 1520 }
1515 return 1; 1521
1522 if (zone) {
1523 *valid_start = start;
1524 *valid_end = end;
1525 return 1;
1526 } else {
1527 return 0;
1528 }
1516} 1529}
1517 1530
1518/* 1531/*
@@ -1839,6 +1852,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
1839 long offlined_pages; 1852 long offlined_pages;
1840 int ret, drain, retry_max, node; 1853 int ret, drain, retry_max, node;
1841 unsigned long flags; 1854 unsigned long flags;
1855 unsigned long valid_start, valid_end;
1842 struct zone *zone; 1856 struct zone *zone;
1843 struct memory_notify arg; 1857 struct memory_notify arg;
1844 1858
@@ -1849,10 +1863,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
1849 return -EINVAL; 1863 return -EINVAL;
1850 /* This makes hotplug much easier...and readable. 1864 /* This makes hotplug much easier...and readable.
1851 we assume this for now. .*/ 1865 we assume this for now. .*/
1852 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 1866 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
1853 return -EINVAL; 1867 return -EINVAL;
1854 1868
1855 zone = page_zone(pfn_to_page(start_pfn)); 1869 zone = page_zone(pfn_to_page(valid_start));
1856 node = zone_to_nid(zone); 1870 node = zone_to_nid(zone);
1857 nr_pages = end_pfn - start_pfn; 1871 nr_pages = end_pfn - start_pfn;
1858 1872
diff --git a/mm/shmem.c b/mm/shmem.c
index bb53285a1d99..3a7587a0314d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -415,6 +415,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
415 struct shrink_control *sc, unsigned long nr_to_split) 415 struct shrink_control *sc, unsigned long nr_to_split)
416{ 416{
417 LIST_HEAD(list), *pos, *next; 417 LIST_HEAD(list), *pos, *next;
418 LIST_HEAD(to_remove);
418 struct inode *inode; 419 struct inode *inode;
419 struct shmem_inode_info *info; 420 struct shmem_inode_info *info;
420 struct page *page; 421 struct page *page;
@@ -441,9 +442,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
441 /* Check if there's anything to gain */ 442 /* Check if there's anything to gain */
442 if (round_up(inode->i_size, PAGE_SIZE) == 443 if (round_up(inode->i_size, PAGE_SIZE) ==
443 round_up(inode->i_size, HPAGE_PMD_SIZE)) { 444 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
444 list_del_init(&info->shrinklist); 445 list_move(&info->shrinklist, &to_remove);
445 removed++; 446 removed++;
446 iput(inode);
447 goto next; 447 goto next;
448 } 448 }
449 449
@@ -454,6 +454,13 @@ next:
454 } 454 }
455 spin_unlock(&sbinfo->shrinklist_lock); 455 spin_unlock(&sbinfo->shrinklist_lock);
456 456
457 list_for_each_safe(pos, next, &to_remove) {
458 info = list_entry(pos, struct shmem_inode_info, shrinklist);
459 inode = &info->vfs_inode;
460 list_del_init(&info->shrinklist);
461 iput(inode);
462 }
463
457 list_for_each_safe(pos, next, &list) { 464 list_for_each_safe(pos, next, &list) {
458 int ret; 465 int ret;
459 466
diff --git a/mm/zswap.c b/mm/zswap.c
index 067a0d62f318..cabf09e0128b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
78 78
79/* Enable/disable zswap (disabled by default) */ 79/* Enable/disable zswap (disabled by default) */
80static bool zswap_enabled; 80static bool zswap_enabled;
81module_param_named(enabled, zswap_enabled, bool, 0644); 81static int zswap_enabled_param_set(const char *,
82 const struct kernel_param *);
83static struct kernel_param_ops zswap_enabled_param_ops = {
84 .set = zswap_enabled_param_set,
85 .get = param_get_bool,
86};
87module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
82 88
83/* Crypto compressor to use */ 89/* Crypto compressor to use */
84#define ZSWAP_COMPRESSOR_DEFAULT "lzo" 90#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
176/* used by param callback function */ 182/* used by param callback function */
177static bool zswap_init_started; 183static bool zswap_init_started;
178 184
185/* fatal error during init */
186static bool zswap_init_failed;
187
179/********************************* 188/*********************************
180* helpers and fwd declarations 189* helpers and fwd declarations
181**********************************/ 190**********************************/
@@ -624,6 +633,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
624 char *s = strstrip((char *)val); 633 char *s = strstrip((char *)val);
625 int ret; 634 int ret;
626 635
636 if (zswap_init_failed) {
637 pr_err("can't set param, initialization failed\n");
638 return -ENODEV;
639 }
640
627 /* no change required */ 641 /* no change required */
628 if (!strcmp(s, *(char **)kp->arg)) 642 if (!strcmp(s, *(char **)kp->arg))
629 return 0; 643 return 0;
@@ -703,6 +717,17 @@ static int zswap_zpool_param_set(const char *val,
703 return __zswap_param_set(val, kp, NULL, zswap_compressor); 717 return __zswap_param_set(val, kp, NULL, zswap_compressor);
704} 718}
705 719
720static int zswap_enabled_param_set(const char *val,
721 const struct kernel_param *kp)
722{
723 if (zswap_init_failed) {
724 pr_err("can't enable, initialization failed\n");
725 return -ENODEV;
726 }
727
728 return param_set_bool(val, kp);
729}
730
706/********************************* 731/*********************************
707* writeback code 732* writeback code
708**********************************/ 733**********************************/
@@ -1201,6 +1226,9 @@ hp_fail:
1201dstmem_fail: 1226dstmem_fail:
1202 zswap_entry_cache_destroy(); 1227 zswap_entry_cache_destroy();
1203cache_fail: 1228cache_fail:
1229 /* if built-in, we aren't unloaded on failure; don't allow use */
1230 zswap_init_failed = true;
1231 zswap_enabled = false;
1204 return -ENOMEM; 1232 return -ENOMEM;
1205} 1233}
1206/* must be late so crypto has time to come up */ 1234/* must be late so crypto has time to come up */
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 1108079d934f..5488e4a6ccd0 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
445 * @func: callback function on filter match 445 * @func: callback function on filter match
446 * @data: returned parameter for callback function 446 * @data: returned parameter for callback function
447 * @ident: string for calling module identification 447 * @ident: string for calling module identification
448 * @sk: socket pointer (might be NULL)
448 * 449 *
449 * Description: 450 * Description:
450 * Invokes the callback function with the received sk_buff and the given 451 * Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
468 */ 469 */
469int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, 470int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
470 void (*func)(struct sk_buff *, void *), void *data, 471 void (*func)(struct sk_buff *, void *), void *data,
471 char *ident) 472 char *ident, struct sock *sk)
472{ 473{
473 struct receiver *r; 474 struct receiver *r;
474 struct hlist_head *rl; 475 struct hlist_head *rl;
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
496 r->func = func; 497 r->func = func;
497 r->data = data; 498 r->data = data;
498 r->ident = ident; 499 r->ident = ident;
500 r->sk = sk;
499 501
500 hlist_add_head_rcu(&r->list, rl); 502 hlist_add_head_rcu(&r->list, rl);
501 d->entries++; 503 d->entries++;
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
520static void can_rx_delete_receiver(struct rcu_head *rp) 522static void can_rx_delete_receiver(struct rcu_head *rp)
521{ 523{
522 struct receiver *r = container_of(rp, struct receiver, rcu); 524 struct receiver *r = container_of(rp, struct receiver, rcu);
525 struct sock *sk = r->sk;
523 526
524 kmem_cache_free(rcv_cache, r); 527 kmem_cache_free(rcv_cache, r);
528 if (sk)
529 sock_put(sk);
525} 530}
526 531
527/** 532/**
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
596 spin_unlock(&can_rcvlists_lock); 601 spin_unlock(&can_rcvlists_lock);
597 602
598 /* schedule the receiver item for deletion */ 603 /* schedule the receiver item for deletion */
599 if (r) 604 if (r) {
605 if (r->sk)
606 sock_hold(r->sk);
600 call_rcu(&r->rcu, can_rx_delete_receiver); 607 call_rcu(&r->rcu, can_rx_delete_receiver);
608 }
601} 609}
602EXPORT_SYMBOL(can_rx_unregister); 610EXPORT_SYMBOL(can_rx_unregister);
603 611
diff --git a/net/can/af_can.h b/net/can/af_can.h
index fca0fe9fc45a..b86f5129e838 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -50,13 +50,14 @@
50 50
51struct receiver { 51struct receiver {
52 struct hlist_node list; 52 struct hlist_node list;
53 struct rcu_head rcu;
54 canid_t can_id; 53 canid_t can_id;
55 canid_t mask; 54 canid_t mask;
56 unsigned long matches; 55 unsigned long matches;
57 void (*func)(struct sk_buff *, void *); 56 void (*func)(struct sk_buff *, void *);
58 void *data; 57 void *data;
59 char *ident; 58 char *ident;
59 struct sock *sk;
60 struct rcu_head rcu;
60}; 61};
61 62
62#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) 63#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 21ac75390e3d..95d13b233c65 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
734 734
735static void bcm_remove_op(struct bcm_op *op) 735static void bcm_remove_op(struct bcm_op *op)
736{ 736{
737 hrtimer_cancel(&op->timer); 737 if (op->tsklet.func) {
738 hrtimer_cancel(&op->thrtimer); 738 while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
739 739 test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
740 if (op->tsklet.func) 740 hrtimer_active(&op->timer)) {
741 tasklet_kill(&op->tsklet); 741 hrtimer_cancel(&op->timer);
742 tasklet_kill(&op->tsklet);
743 }
744 }
742 745
743 if (op->thrtsklet.func) 746 if (op->thrtsklet.func) {
744 tasklet_kill(&op->thrtsklet); 747 while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
748 test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
749 hrtimer_active(&op->thrtimer)) {
750 hrtimer_cancel(&op->thrtimer);
751 tasklet_kill(&op->thrtsklet);
752 }
753 }
745 754
746 if ((op->frames) && (op->frames != &op->sframe)) 755 if ((op->frames) && (op->frames != &op->sframe))
747 kfree(op->frames); 756 kfree(op->frames);
@@ -1216,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1216 err = can_rx_register(dev, op->can_id, 1225 err = can_rx_register(dev, op->can_id,
1217 REGMASK(op->can_id), 1226 REGMASK(op->can_id),
1218 bcm_rx_handler, op, 1227 bcm_rx_handler, op,
1219 "bcm"); 1228 "bcm", sk);
1220 1229
1221 op->rx_reg_dev = dev; 1230 op->rx_reg_dev = dev;
1222 dev_put(dev); 1231 dev_put(dev);
@@ -1225,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1225 } else 1234 } else
1226 err = can_rx_register(NULL, op->can_id, 1235 err = can_rx_register(NULL, op->can_id,
1227 REGMASK(op->can_id), 1236 REGMASK(op->can_id),
1228 bcm_rx_handler, op, "bcm"); 1237 bcm_rx_handler, op, "bcm", sk);
1229 if (err) { 1238 if (err) {
1230 /* this bcm rx op is broken -> remove it */ 1239 /* this bcm rx op is broken -> remove it */
1231 list_del(&op->list); 1240 list_del(&op->list);
diff --git a/net/can/gw.c b/net/can/gw.c
index a54ab0c82104..7056a1a2bb70 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
442{ 442{
443 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, 443 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
444 gwj->ccgw.filter.can_mask, can_can_gw_rcv, 444 gwj->ccgw.filter.can_mask, can_can_gw_rcv,
445 gwj, "gw"); 445 gwj, "gw", NULL);
446} 446}
447 447
448static inline void cgw_unregister_filter(struct cgw_job *gwj) 448static inline void cgw_unregister_filter(struct cgw_job *gwj)
diff --git a/net/can/raw.c b/net/can/raw.c
index b075f028d7e2..6dc546a06673 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
190 for (i = 0; i < count; i++) { 190 for (i = 0; i < count; i++) {
191 err = can_rx_register(dev, filter[i].can_id, 191 err = can_rx_register(dev, filter[i].can_id,
192 filter[i].can_mask, 192 filter[i].can_mask,
193 raw_rcv, sk, "raw"); 193 raw_rcv, sk, "raw", sk);
194 if (err) { 194 if (err) {
195 /* clean up successfully registered filters */ 195 /* clean up successfully registered filters */
196 while (--i >= 0) 196 while (--i >= 0)
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
211 211
212 if (err_mask) 212 if (err_mask)
213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, 213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
214 raw_rcv, sk, "raw"); 214 raw_rcv, sk, "raw", sk);
215 215
216 return err; 216 return err;
217} 217}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1d5331a1b1dc..8ce50dc3ab8c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2518,9 +2518,11 @@ u32 __tcp_select_window(struct sock *sk)
2518 int full_space = min_t(int, tp->window_clamp, allowed_space); 2518 int full_space = min_t(int, tp->window_clamp, allowed_space);
2519 int window; 2519 int window;
2520 2520
2521 if (mss > full_space) 2521 if (unlikely(mss > full_space)) {
2522 mss = full_space; 2522 mss = full_space;
2523 2523 if (mss <= 0)
2524 return 0;
2525 }
2524 if (free_space < (full_space >> 1)) { 2526 if (free_space < (full_space >> 1)) {
2525 icsk->icsk_ack.quick = 0; 2527 icsk->icsk_ack.quick = 0;
2526 2528
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2c0df09e9036..b6a94ff0bbd0 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1344,7 +1344,7 @@ emsgsize:
1344 */ 1344 */
1345 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP && 1345 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1346 headersize == sizeof(struct ipv6hdr) && 1346 headersize == sizeof(struct ipv6hdr) &&
1347 length < mtu - headersize && 1347 length <= mtu - headersize &&
1348 !(flags & MSG_MORE) && 1348 !(flags & MSG_MORE) &&
1349 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) 1349 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1350 csummode = CHECKSUM_PARTIAL; 1350 csummode = CHECKSUM_PARTIAL;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index ff8ee06491c3..75fac933c209 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -441,7 +441,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
441 if (i + sizeof(*tel) > optlen) 441 if (i + sizeof(*tel) > optlen)
442 break; 442 break;
443 443
444 tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i; 444 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
445 /* return index of option if found and valid */ 445 /* return index of option if found and valid */
446 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 446 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
447 tel->length == 1) 447 tel->length == 1)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 970db7a41684..5752789acc13 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -568,9 +568,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
568 &mask->icmp.type, 568 &mask->icmp.type,
569 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 569 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
570 sizeof(key->icmp.type)); 570 sizeof(key->icmp.type));
571 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 571 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
572 &mask->icmp.code, 572 &mask->icmp.code,
573 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 573 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
574 sizeof(key->icmp.code)); 574 sizeof(key->icmp.code));
575 } 575 }
576 576
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index f935429bd5ef..b12bc2abea93 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -16,16 +16,11 @@
16#include <net/sch_generic.h> 16#include <net/sch_generic.h>
17#include <net/pkt_cls.h> 17#include <net/pkt_cls.h>
18 18
19struct cls_mall_filter { 19struct cls_mall_head {
20 struct tcf_exts exts; 20 struct tcf_exts exts;
21 struct tcf_result res; 21 struct tcf_result res;
22 u32 handle; 22 u32 handle;
23 struct rcu_head rcu;
24 u32 flags; 23 u32 flags;
25};
26
27struct cls_mall_head {
28 struct cls_mall_filter *filter;
29 struct rcu_head rcu; 24 struct rcu_head rcu;
30}; 25};
31 26
@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
33 struct tcf_result *res) 28 struct tcf_result *res)
34{ 29{
35 struct cls_mall_head *head = rcu_dereference_bh(tp->root); 30 struct cls_mall_head *head = rcu_dereference_bh(tp->root);
36 struct cls_mall_filter *f = head->filter;
37 31
38 if (tc_skip_sw(f->flags)) 32 if (tc_skip_sw(head->flags))
39 return -1; 33 return -1;
40 34
41 return tcf_exts_exec(skb, &f->exts, res); 35 return tcf_exts_exec(skb, &head->exts, res);
42} 36}
43 37
44static int mall_init(struct tcf_proto *tp) 38static int mall_init(struct tcf_proto *tp)
45{ 39{
46 struct cls_mall_head *head;
47
48 head = kzalloc(sizeof(*head), GFP_KERNEL);
49 if (!head)
50 return -ENOBUFS;
51
52 rcu_assign_pointer(tp->root, head);
53
54 return 0; 40 return 0;
55} 41}
56 42
57static void mall_destroy_filter(struct rcu_head *head) 43static void mall_destroy_rcu(struct rcu_head *rcu)
58{ 44{
59 struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu); 45 struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
46 rcu);
60 47
61 tcf_exts_destroy(&f->exts); 48 tcf_exts_destroy(&head->exts);
62 49 kfree(head);
63 kfree(f);
64} 50}
65 51
66static int mall_replace_hw_filter(struct tcf_proto *tp, 52static int mall_replace_hw_filter(struct tcf_proto *tp,
67 struct cls_mall_filter *f, 53 struct cls_mall_head *head,
68 unsigned long cookie) 54 unsigned long cookie)
69{ 55{
70 struct net_device *dev = tp->q->dev_queue->dev; 56 struct net_device *dev = tp->q->dev_queue->dev;
@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
74 offload.type = TC_SETUP_MATCHALL; 60 offload.type = TC_SETUP_MATCHALL;
75 offload.cls_mall = &mall_offload; 61 offload.cls_mall = &mall_offload;
76 offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; 62 offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
77 offload.cls_mall->exts = &f->exts; 63 offload.cls_mall->exts = &head->exts;
78 offload.cls_mall->cookie = cookie; 64 offload.cls_mall->cookie = cookie;
79 65
80 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, 66 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
82} 68}
83 69
84static void mall_destroy_hw_filter(struct tcf_proto *tp, 70static void mall_destroy_hw_filter(struct tcf_proto *tp,
85 struct cls_mall_filter *f, 71 struct cls_mall_head *head,
86 unsigned long cookie) 72 unsigned long cookie)
87{ 73{
88 struct net_device *dev = tp->q->dev_queue->dev; 74 struct net_device *dev = tp->q->dev_queue->dev;
@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
103{ 89{
104 struct cls_mall_head *head = rtnl_dereference(tp->root); 90 struct cls_mall_head *head = rtnl_dereference(tp->root);
105 struct net_device *dev = tp->q->dev_queue->dev; 91 struct net_device *dev = tp->q->dev_queue->dev;
106 struct cls_mall_filter *f = head->filter;
107 92
108 if (!force && f) 93 if (!head)
109 return false; 94 return true;
110 95
111 if (f) { 96 if (tc_should_offload(dev, tp, head->flags))
112 if (tc_should_offload(dev, tp, f->flags)) 97 mall_destroy_hw_filter(tp, head, (unsigned long) head);
113 mall_destroy_hw_filter(tp, f, (unsigned long) f);
114 98
115 call_rcu(&f->rcu, mall_destroy_filter); 99 call_rcu(&head->rcu, mall_destroy_rcu);
116 }
117 kfree_rcu(head, rcu);
118 return true; 100 return true;
119} 101}
120 102
121static unsigned long mall_get(struct tcf_proto *tp, u32 handle) 103static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
122{ 104{
123 struct cls_mall_head *head = rtnl_dereference(tp->root); 105 return 0UL;
124 struct cls_mall_filter *f = head->filter;
125
126 if (f && f->handle == handle)
127 return (unsigned long) f;
128 return 0;
129} 106}
130 107
131static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { 108static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
134}; 111};
135 112
136static int mall_set_parms(struct net *net, struct tcf_proto *tp, 113static int mall_set_parms(struct net *net, struct tcf_proto *tp,
137 struct cls_mall_filter *f, 114 struct cls_mall_head *head,
138 unsigned long base, struct nlattr **tb, 115 unsigned long base, struct nlattr **tb,
139 struct nlattr *est, bool ovr) 116 struct nlattr *est, bool ovr)
140{ 117{
@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
147 return err; 124 return err;
148 125
149 if (tb[TCA_MATCHALL_CLASSID]) { 126 if (tb[TCA_MATCHALL_CLASSID]) {
150 f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); 127 head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
151 tcf_bind_filter(tp, &f->res, base); 128 tcf_bind_filter(tp, &head->res, base);
152 } 129 }
153 130
154 tcf_exts_change(tp, &f->exts, &e); 131 tcf_exts_change(tp, &head->exts, &e);
155 132
156 return 0; 133 return 0;
157} 134}
@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
162 unsigned long *arg, bool ovr) 139 unsigned long *arg, bool ovr)
163{ 140{
164 struct cls_mall_head *head = rtnl_dereference(tp->root); 141 struct cls_mall_head *head = rtnl_dereference(tp->root);
165 struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
166 struct net_device *dev = tp->q->dev_queue->dev; 142 struct net_device *dev = tp->q->dev_queue->dev;
167 struct cls_mall_filter *f;
168 struct nlattr *tb[TCA_MATCHALL_MAX + 1]; 143 struct nlattr *tb[TCA_MATCHALL_MAX + 1];
144 struct cls_mall_head *new;
169 u32 flags = 0; 145 u32 flags = 0;
170 int err; 146 int err;
171 147
172 if (!tca[TCA_OPTIONS]) 148 if (!tca[TCA_OPTIONS])
173 return -EINVAL; 149 return -EINVAL;
174 150
175 if (head->filter) 151 if (head)
176 return -EBUSY; 152 return -EEXIST;
177
178 if (fold)
179 return -EINVAL;
180 153
181 err = nla_parse_nested(tb, TCA_MATCHALL_MAX, 154 err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
182 tca[TCA_OPTIONS], mall_policy); 155 tca[TCA_OPTIONS], mall_policy);
@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
189 return -EINVAL; 162 return -EINVAL;
190 } 163 }
191 164
192 f = kzalloc(sizeof(*f), GFP_KERNEL); 165 new = kzalloc(sizeof(*new), GFP_KERNEL);
193 if (!f) 166 if (!new)
194 return -ENOBUFS; 167 return -ENOBUFS;
195 168
196 tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0); 169 tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
197 170
198 if (!handle) 171 if (!handle)
199 handle = 1; 172 handle = 1;
200 f->handle = handle; 173 new->handle = handle;
201 f->flags = flags; 174 new->flags = flags;
202 175
203 err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); 176 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
204 if (err) 177 if (err)
205 goto errout; 178 goto errout;
206 179
207 if (tc_should_offload(dev, tp, flags)) { 180 if (tc_should_offload(dev, tp, flags)) {
208 err = mall_replace_hw_filter(tp, f, (unsigned long) f); 181 err = mall_replace_hw_filter(tp, new, (unsigned long) new);
209 if (err) { 182 if (err) {
210 if (tc_skip_sw(flags)) 183 if (tc_skip_sw(flags))
211 goto errout; 184 goto errout;
@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
214 } 187 }
215 } 188 }
216 189
217 *arg = (unsigned long) f; 190 *arg = (unsigned long) head;
218 rcu_assign_pointer(head->filter, f); 191 rcu_assign_pointer(tp->root, new);
219 192 if (head)
193 call_rcu(&head->rcu, mall_destroy_rcu);
220 return 0; 194 return 0;
221 195
222errout: 196errout:
223 kfree(f); 197 kfree(new);
224 return err; 198 return err;
225} 199}
226 200
227static int mall_delete(struct tcf_proto *tp, unsigned long arg) 201static int mall_delete(struct tcf_proto *tp, unsigned long arg)
228{ 202{
229 struct cls_mall_head *head = rtnl_dereference(tp->root); 203 return -EOPNOTSUPP;
230 struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
231 struct net_device *dev = tp->q->dev_queue->dev;
232
233 if (tc_should_offload(dev, tp, f->flags))
234 mall_destroy_hw_filter(tp, f, (unsigned long) f);
235
236 RCU_INIT_POINTER(head->filter, NULL);
237 tcf_unbind_filter(tp, &f->res);
238 call_rcu(&f->rcu, mall_destroy_filter);
239 return 0;
240} 204}
241 205
242static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg) 206static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
243{ 207{
244 struct cls_mall_head *head = rtnl_dereference(tp->root); 208 struct cls_mall_head *head = rtnl_dereference(tp->root);
245 struct cls_mall_filter *f = head->filter;
246 209
247 if (arg->count < arg->skip) 210 if (arg->count < arg->skip)
248 goto skip; 211 goto skip;
249 if (arg->fn(tp, (unsigned long) f, arg) < 0) 212 if (arg->fn(tp, (unsigned long) head, arg) < 0)
250 arg->stop = 1; 213 arg->stop = 1;
251skip: 214skip:
252 arg->count++; 215 arg->count++;
@@ -255,28 +218,28 @@ skip:
255static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, 218static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
256 struct sk_buff *skb, struct tcmsg *t) 219 struct sk_buff *skb, struct tcmsg *t)
257{ 220{
258 struct cls_mall_filter *f = (struct cls_mall_filter *) fh; 221 struct cls_mall_head *head = (struct cls_mall_head *) fh;
259 struct nlattr *nest; 222 struct nlattr *nest;
260 223
261 if (!f) 224 if (!head)
262 return skb->len; 225 return skb->len;
263 226
264 t->tcm_handle = f->handle; 227 t->tcm_handle = head->handle;
265 228
266 nest = nla_nest_start(skb, TCA_OPTIONS); 229 nest = nla_nest_start(skb, TCA_OPTIONS);
267 if (!nest) 230 if (!nest)
268 goto nla_put_failure; 231 goto nla_put_failure;
269 232
270 if (f->res.classid && 233 if (head->res.classid &&
271 nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid)) 234 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
272 goto nla_put_failure; 235 goto nla_put_failure;
273 236
274 if (tcf_exts_dump(skb, &f->exts)) 237 if (tcf_exts_dump(skb, &head->exts))
275 goto nla_put_failure; 238 goto nla_put_failure;
276 239
277 nla_nest_end(skb, nest); 240 nla_nest_end(skb, nest);
278 241
279 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 242 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
280 goto nla_put_failure; 243 goto nla_put_failure;
281 244
282 return skb->len; 245 return skb->len;
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index dc6fb79a361f..25d9a9cf7b66 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
260 if (!oa->data) 260 if (!oa->data)
261 return -ENOMEM; 261 return -ENOMEM;
262 262
263 creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); 263 creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
264 if (!creds) { 264 if (!creds) {
265 kfree(oa->data); 265 kfree(oa->data);
266 return -ENOMEM; 266 return -ENOMEM;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index eadcd4d359d9..d883116ebaa4 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -164,6 +164,7 @@ cmd_gensymtypes_c = \
164 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ 164 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \
165 $(GENKSYMS) $(if $(1), -T $(2)) \ 165 $(GENKSYMS) $(if $(1), -T $(2)) \
166 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 166 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
167 $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \
167 $(if $(KBUILD_PRESERVE),-p) \ 168 $(if $(KBUILD_PRESERVE),-p) \
168 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) 169 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
169 170
@@ -337,6 +338,7 @@ cmd_gensymtypes_S = \
337 $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ 338 $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \
338 $(GENKSYMS) $(if $(1), -T $(2)) \ 339 $(GENKSYMS) $(if $(1), -T $(2)) \
339 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 340 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
341 $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \
340 $(if $(KBUILD_PRESERVE),-p) \ 342 $(if $(KBUILD_PRESERVE),-p) \
341 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) 343 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
342 344
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index 06121ce524a7..c9235d8340f1 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -44,7 +44,7 @@ char *cur_filename, *source_file;
44int in_source_file; 44int in_source_file;
45 45
46static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, 46static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
47 flag_preserve, flag_warnings; 47 flag_preserve, flag_warnings, flag_rel_crcs;
48static const char *mod_prefix = ""; 48static const char *mod_prefix = "";
49 49
50static int errors; 50static int errors;
@@ -693,7 +693,10 @@ void export_symbol(const char *name)
693 fputs(">\n", debugfile); 693 fputs(">\n", debugfile);
694 694
695 /* Used as a linker script. */ 695 /* Used as a linker script. */
696 printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc); 696 printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" :
697 "SECTIONS { .rodata : ALIGN(4) { "
698 "%s__crc_%s = .; LONG(0x%08lx); } }\n",
699 mod_prefix, name, crc);
697 } 700 }
698} 701}
699 702
@@ -730,7 +733,7 @@ void error_with_pos(const char *fmt, ...)
730 733
731static void genksyms_usage(void) 734static void genksyms_usage(void)
732{ 735{
733 fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n" 736 fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n"
734#ifdef __GNU_LIBRARY__ 737#ifdef __GNU_LIBRARY__
735 " -s, --symbol-prefix Select symbol prefix\n" 738 " -s, --symbol-prefix Select symbol prefix\n"
736 " -d, --debug Increment the debug level (repeatable)\n" 739 " -d, --debug Increment the debug level (repeatable)\n"
@@ -742,6 +745,7 @@ static void genksyms_usage(void)
742 " -q, --quiet Disable warnings (default)\n" 745 " -q, --quiet Disable warnings (default)\n"
743 " -h, --help Print this message\n" 746 " -h, --help Print this message\n"
744 " -V, --version Print the release version\n" 747 " -V, --version Print the release version\n"
748 " -R, --relative-crc Emit section relative symbol CRCs\n"
745#else /* __GNU_LIBRARY__ */ 749#else /* __GNU_LIBRARY__ */
746 " -s Select symbol prefix\n" 750 " -s Select symbol prefix\n"
747 " -d Increment the debug level (repeatable)\n" 751 " -d Increment the debug level (repeatable)\n"
@@ -753,6 +757,7 @@ static void genksyms_usage(void)
753 " -q Disable warnings (default)\n" 757 " -q Disable warnings (default)\n"
754 " -h Print this message\n" 758 " -h Print this message\n"
755 " -V Print the release version\n" 759 " -V Print the release version\n"
760 " -R Emit section relative symbol CRCs\n"
756#endif /* __GNU_LIBRARY__ */ 761#endif /* __GNU_LIBRARY__ */
757 , stderr); 762 , stderr);
758} 763}
@@ -774,13 +779,14 @@ int main(int argc, char **argv)
774 {"preserve", 0, 0, 'p'}, 779 {"preserve", 0, 0, 'p'},
775 {"version", 0, 0, 'V'}, 780 {"version", 0, 0, 'V'},
776 {"help", 0, 0, 'h'}, 781 {"help", 0, 0, 'h'},
782 {"relative-crc", 0, 0, 'R'},
777 {0, 0, 0, 0} 783 {0, 0, 0, 0}
778 }; 784 };
779 785
780 while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph", 786 while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phR",
781 &long_opts[0], NULL)) != EOF) 787 &long_opts[0], NULL)) != EOF)
782#else /* __GNU_LIBRARY__ */ 788#else /* __GNU_LIBRARY__ */
783 while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF) 789 while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF)
784#endif /* __GNU_LIBRARY__ */ 790#endif /* __GNU_LIBRARY__ */
785 switch (o) { 791 switch (o) {
786 case 's': 792 case 's':
@@ -823,6 +829,9 @@ int main(int argc, char **argv)
823 case 'h': 829 case 'h':
824 genksyms_usage(); 830 genksyms_usage();
825 return 0; 831 return 0;
832 case 'R':
833 flag_rel_crcs = 1;
834 break;
826 default: 835 default:
827 genksyms_usage(); 836 genksyms_usage();
828 return 1; 837 return 1;
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 299b92ca1ae0..5d554419170b 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -219,6 +219,10 @@ static int symbol_valid(struct sym_entry *s)
219 "_SDA2_BASE_", /* ppc */ 219 "_SDA2_BASE_", /* ppc */
220 NULL }; 220 NULL };
221 221
222 static char *special_prefixes[] = {
223 "__crc_", /* modversions */
224 NULL };
225
222 static char *special_suffixes[] = { 226 static char *special_suffixes[] = {
223 "_veneer", /* arm */ 227 "_veneer", /* arm */
224 "_from_arm", /* arm */ 228 "_from_arm", /* arm */
@@ -259,6 +263,14 @@ static int symbol_valid(struct sym_entry *s)
259 if (strcmp(sym_name, special_symbols[i]) == 0) 263 if (strcmp(sym_name, special_symbols[i]) == 0)
260 return 0; 264 return 0;
261 265
266 for (i = 0; special_prefixes[i]; i++) {
267 int l = strlen(special_prefixes[i]);
268
269 if (l <= strlen(sym_name) &&
270 strncmp(sym_name, special_prefixes[i], l) == 0)
271 return 0;
272 }
273
262 for (i = 0; special_suffixes[i]; i++) { 274 for (i = 0; special_suffixes[i]; i++) {
263 int l = strlen(sym_name) - strlen(special_suffixes[i]); 275 int l = strlen(sym_name) - strlen(special_suffixes[i]);
264 276
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 29c89a6bad3d..4dedd0d3d3a7 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -621,6 +621,16 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
621 if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) { 621 if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
622 is_crc = true; 622 is_crc = true;
623 crc = (unsigned int) sym->st_value; 623 crc = (unsigned int) sym->st_value;
624 if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) {
625 unsigned int *crcp;
626
627 /* symbol points to the CRC in the ELF object */
628 crcp = (void *)info->hdr + sym->st_value +
629 info->sechdrs[sym->st_shndx].sh_offset -
630 (info->hdr->e_type != ET_REL ?
631 info->sechdrs[sym->st_shndx].sh_addr : 0);
632 crc = *crcp;
633 }
624 sym_update_crc(symname + strlen(CRC_PFX), mod, crc, 634 sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
625 export); 635 export);
626 } 636 }
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 5e0dea2cdc01..039636ffb6c8 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
150 *type = INSN_RETURN; 150 *type = INSN_RETURN;
151 break; 151 break;
152 152
153 case 0xc5: /* iret */
154 case 0xca: /* retf */ 153 case 0xca: /* retf */
155 case 0xcb: /* retf */ 154 case 0xcb: /* retf */
155 case 0xcf: /* iret */
156 *type = INSN_CONTEXT_SWITCH; 156 *type = INSN_CONTEXT_SWITCH;
157 break; 157 break;
158 158