summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/dsp/fsl,dsp.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml7
-rw-r--r--Documentation/devicetree/bindings/media/rc.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml2
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst42
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile4
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts23
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi26
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi37
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/include/asm/xen/xen-ops.h6
-rw-r--r--arch/arm/mach-aspeed/Kconfig1
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c4
-rw-r--r--arch/arm/xen/Makefile1
-rw-r--r--arch/arm/xen/efi.c28
-rw-r--r--arch/arm/xen/enlighten.c3
-rw-r--r--arch/arm/xen/mm.c5
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h24
-rw-r--r--arch/arm64/include/asm/xen/xen-ops.h7
-rw-r--r--arch/arm64/kvm/hyp/switch.c17
-rw-r--r--arch/arm64/kvm/hyp/tlb.c36
-rw-r--r--arch/arm64/xen/Makefile1
-rw-r--r--arch/csky/abiv1/alignment.c62
-rw-r--r--arch/csky/abiv1/cacheflush.c70
-rw-r--r--arch/csky/abiv1/inc/abi/cacheflush.h45
-rw-r--r--arch/csky/abiv1/inc/abi/page.h5
-rw-r--r--arch/csky/abiv1/mmap.c75
-rw-r--r--arch/csky/include/asm/barrier.h15
-rw-r--r--arch/csky/include/asm/cache.h1
-rw-r--r--arch/csky/include/asm/io.h23
-rw-r--r--arch/csky/include/asm/pgtable.h10
-rw-r--r--arch/csky/kernel/entry.S54
-rw-r--r--arch/csky/kernel/perf_event.c4
-rw-r--r--arch/csky/kernel/process.c2
-rw-r--r--arch/csky/mm/cachev1.c7
-rw-r--r--arch/csky/mm/cachev2.c11
-rw-r--r--arch/csky/mm/dma-mapping.c76
-rw-r--r--arch/csky/mm/init.c16
-rw-r--r--arch/csky/mm/ioremap.c27
-rw-r--r--arch/mips/boot/dts/qca/ar9331.dtsi2
-rw-r--r--arch/mips/fw/arc/memory.c1
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd.h1
-rw-r--r--arch/mips/include/asm/unistd.h1
-rw-r--r--arch/mips/kernel/cpu-bugs64.c14
-rw-r--r--arch/mips/kernel/setup.c5
-rw-r--r--arch/mips/kernel/syscall.c1
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl2
-rw-r--r--arch/mips/loongson64/common/mem.c35
-rw-r--r--arch/mips/loongson64/common/serial.c2
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c11
-rw-r--r--arch/mips/pmcs-msp71xx/msp_prom.c4
-rw-r--r--arch/mips/vdso/Makefile2
-rw-r--r--arch/mips/vdso/gettimeofday.c269
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h8
-rw-r--r--arch/powerpc/kvm/book3s.c8
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c2
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c7
-rw-r--r--arch/powerpc/mm/init_64.c17
-rw-r--r--arch/riscv/include/asm/asm.h1
-rw-r--r--arch/riscv/kernel/entry.S21
-rw-r--r--arch/riscv/mm/init.c12
-rw-r--r--arch/s390/configs/debug_defconfig24
-rw-r--r--arch/s390/configs/defconfig25
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/include/asm/atomic_ops.h2
-rw-r--r--arch/s390/include/asm/bitops.h8
-rw-r--r--arch/s390/include/asm/cpacf.h2
-rw-r--r--arch/s390/include/asm/cpu_mf.h8
-rw-r--r--arch/s390/include/asm/hugetlb.h9
-rw-r--r--arch/s390/include/asm/jump_label.h4
-rw-r--r--arch/s390/include/asm/pgtable.h25
-rw-r--r--arch/s390/include/asm/qdio.h2
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c4
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/pci/pci_clp.c2
-rw-r--r--arch/x86/include/asm/kvm_host.h7
-rw-r--r--arch/x86/kvm/cpuid.c102
-rw-r--r--arch/x86/kvm/lapic.c13
-rw-r--r--arch/x86/kvm/mmu.c65
-rw-r--r--arch/x86/kvm/vmx/nested.c2
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c7
-rw-r--r--arch/x86/kvm/vmx/vmx.c15
-rw-r--r--arch/x86/kvm/x86.c72
-rw-r--r--arch/x86/xen/efi.c14
-rw-r--r--block/blk-mq.c12
-rw-r--r--block/sed-opal.c6
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/bus/ti-sysc.c52
-rw-r--r--drivers/char/random.c64
-rw-r--r--drivers/clocksource/timer-of.c4
-rw-r--r--drivers/firmware/arm_scmi/reset.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c13
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c13
-rw-r--r--drivers/gpu/drm/drm_writeback.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c184
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c5
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c12
-rw-r--r--drivers/i2c/busses/i2c-riic.c1
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c14
-rw-r--r--drivers/iommu/amd_iommu.c229
-rw-r--r--drivers/iommu/amd_iommu_types.h4
-rw-r--r--drivers/mfd/db8500-prcmu.c53
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c7
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c352
-rw-r--r--drivers/mmc/host/sdhci-pci.h5
-rw-r--r--drivers/mmc/host/sdhci-tegra.c48
-rw-r--r--drivers/mmc/host/sdhci.c27
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/nvdimm/btt.c8
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c7
-rw-r--r--drivers/nvdimm/nd.h6
-rw-r--r--drivers/nvdimm/pfn_devs.c77
-rw-r--r--drivers/nvdimm/region.c4
-rw-r--r--drivers/nvdimm/region_devs.c7
-rw-r--r--drivers/nvdimm/security.c4
-rw-r--r--drivers/nvme/host/core.c132
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c20
-rw-r--r--drivers/nvme/host/rdma.c19
-rw-r--r--drivers/nvme/host/tcp.c4
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c16
-rw-r--r--drivers/nvme/target/tcp.c12
-rw-r--r--drivers/reset/reset-scmi.c1
-rw-r--r--drivers/s390/block/dasd_eckd.c81
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c3
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/db8500_thermal.c486
-rw-r--r--drivers/thermal/thermal_mmio.c7
-rw-r--r--drivers/xen/balloon.c24
-rw-r--r--drivers/xen/efi.c84
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c20
-rw-r--r--fs/afs/dynroot.c7
-rw-r--r--fs/afs/inode.c2
-rw-r--r--fs/afs/internal.h1
-rw-r--r--fs/btrfs/extent_io.c13
-rw-r--r--fs/btrfs/qgroup.c38
-rw-r--r--fs/btrfs/relocation.c9
-rw-r--r--fs/btrfs/tests/btrfs-tests.c8
-rw-r--r--fs/btrfs/volumes.c8
-rw-r--r--fs/cifs/cifs_ioctl.h9
-rw-r--r--fs/cifs/cifsacl.h81
-rw-r--r--fs/cifs/cifsglob.h6
-rw-r--r--fs/cifs/cifsproto.h3
-rw-r--r--fs/cifs/cifssmb.c3
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/cifs/ioctl.c29
-rw-r--r--fs/cifs/sess.c3
-rw-r--r--fs/cifs/smb2inode.c34
-rw-r--r--fs/cifs/smb2ops.c10
-rw-r--r--fs/cifs/smb2pdu.c23
-rw-r--r--fs/cifs/smb2proto.h3
-rw-r--r--fs/cifs/smbfsctl.h11
-rw-r--r--fs/cifs/xattr.c2
-rw-r--r--fs/erofs/data.c10
-rw-r--r--fs/erofs/super.c4
-rw-r--r--fs/erofs/zdata.c12
-rw-r--r--fs/ext4/inode.c3
-rw-r--r--fs/fhandle.c2
-rw-r--r--fs/io_uring.c8
-rw-r--r--fs/statfs.c17
-rw-r--r--include/linux/bitops.h7
-rw-r--r--include/linux/huge_mm.h7
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/memremap.h1
-rw-r--r--include/linux/platform_data/db8500_thermal.h29
-rw-r--r--include/linux/uaccess.h70
-rw-r--r--include/trace/events/kmem.h7
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/linux/nvme_ioctl.h23
-rw-r--r--include/uapi/linux/pg.h5
-rw-r--r--include/uapi/linux/sched.h30
-rw-r--r--include/xen/xen-ops.h25
-rw-r--r--kernel/events/core.c47
-rw-r--r--kernel/fork.c45
-rw-r--r--kernel/sched/core.c43
-rw-r--r--kernel/sched/membarrier.c1
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c62
-rw-r--r--kernel/trace/trace.h10
-rw-r--r--kernel/trace/trace_events_filter.c6
-rw-r--r--kernel/trace/trace_probe.c16
-rw-r--r--lib/strnlen_user.c8
-rw-r--r--lib/test_user_copy.c136
-rw-r--r--lib/usercopy.c55
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h4
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc2
-rw-r--r--tools/testing/selftests/kvm/Makefile3
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h3
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h14
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h3
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c2
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c201
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c156
-rw-r--r--tools/testing/selftests/pidfd/Makefile2
-rw-r--r--virt/kvm/arm/vgic/trace.h2
-rw-r--r--virt/kvm/kvm_main.c10
239 files changed, 3195 insertions, 2178 deletions
diff --git a/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml b/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
index 3248595dc93c..f04870d84542 100644
--- a/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
+++ b/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
@@ -85,4 +85,5 @@ examples:
85 <&pd IMX_SC_R_DSP_RAM>; 85 <&pd IMX_SC_R_DSP_RAM>;
86 mbox-names = "txdb0", "txdb1", "rxdb0", "rxdb1"; 86 mbox-names = "txdb0", "txdb1", "rxdb0", "rxdb1";
87 mboxes = <&lsio_mu13 2 0>, <&lsio_mu13 2 1>, <&lsio_mu13 3 0>, <&lsio_mu13 3 1>; 87 mboxes = <&lsio_mu13 2 0>, <&lsio_mu13 2 1>, <&lsio_mu13 3 0>, <&lsio_mu13 3 1>;
88 memory-region = <&dsp_reserved>;
88 }; 89 };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
index 676ec42e1438..567a33a83dce 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
@@ -43,13 +43,9 @@ properties:
43 43
44 dvdd-supply: 44 dvdd-supply:
45 description: DVdd voltage supply 45 description: DVdd voltage supply
46 items:
47 - const: dvdd
48 46
49 avdd-supply: 47 avdd-supply:
50 description: AVdd voltage supply 48 description: AVdd voltage supply
51 items:
52 - const: avdd
53 49
54 adi,rejection-60-Hz-enable: 50 adi,rejection-60-Hz-enable:
55 description: | 51 description: |
@@ -99,6 +95,9 @@ required:
99examples: 95examples:
100 - | 96 - |
101 spi0 { 97 spi0 {
98 #address-cells = <1>;
99 #size-cells = <0>;
100
102 adc@0 { 101 adc@0 {
103 compatible = "adi,ad7192"; 102 compatible = "adi,ad7192";
104 reg = <0>; 103 reg = <0>;
diff --git a/Documentation/devicetree/bindings/media/rc.yaml b/Documentation/devicetree/bindings/media/rc.yaml
index 3d5c154fd230..9054555e6608 100644
--- a/Documentation/devicetree/bindings/media/rc.yaml
+++ b/Documentation/devicetree/bindings/media/rc.yaml
@@ -73,7 +73,6 @@ properties:
73 - rc-genius-tvgo-a11mce 73 - rc-genius-tvgo-a11mce
74 - rc-gotview7135 74 - rc-gotview7135
75 - rc-hauppauge 75 - rc-hauppauge
76 - rc-hauppauge
77 - rc-hisi-poplar 76 - rc-hisi-poplar
78 - rc-hisi-tv-demo 77 - rc-hisi-tv-demo
79 - rc-imon-mce 78 - rc-imon-mce
diff --git a/Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml
index 8a56a8526cef..a97482179cf5 100644
--- a/Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml
@@ -37,7 +37,7 @@ properties:
37 - description: exclusive PHY reset line 37 - description: exclusive PHY reset line
38 - description: shared reset line between the PCIe PHY and PCIe controller 38 - description: shared reset line between the PCIe PHY and PCIe controller
39 39
40 resets-names: 40 reset-names:
41 items: 41 items:
42 - const: phy 42 - const: phy
43 - const: pcie 43 - const: pcie
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index 402636356fbe..a3c3349046c4 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -143,6 +143,20 @@ via their employer, they cannot enter individual non-disclosure agreements
143in their role as Linux kernel developers. They will, however, agree to 143in their role as Linux kernel developers. They will, however, agree to
144adhere to this documented process and the Memorandum of Understanding. 144adhere to this documented process and the Memorandum of Understanding.
145 145
146The disclosing party should provide a list of contacts for all other
147entities who have already been, or should be, informed about the issue.
148This serves several purposes:
149
150 - The list of disclosed entities allows communication accross the
151 industry, e.g. other OS vendors, HW vendors, etc.
152
153 - The disclosed entities can be contacted to name experts who should
154 participate in the mitigation development.
155
156 - If an expert which is required to handle an issue is employed by an
157 listed entity or member of an listed entity, then the response teams can
158 request the disclosure of that expert from that entity. This ensures
159 that the expert is also part of the entity's response team.
146 160
147Disclosure 161Disclosure
148"""""""""" 162""""""""""
@@ -158,10 +172,7 @@ Mitigation development
158"""""""""""""""""""""" 172""""""""""""""""""""""
159 173
160The initial response team sets up an encrypted mailing-list or repurposes 174The initial response team sets up an encrypted mailing-list or repurposes
161an existing one if appropriate. The disclosing party should provide a list 175an existing one if appropriate.
162of contacts for all other parties who have already been, or should be,
163informed about the issue. The response team contacts these parties so they
164can name experts who should be subscribed to the mailing-list.
165 176
166Using a mailing-list is close to the normal Linux development process and 177Using a mailing-list is close to the normal Linux development process and
167has been successfully used in developing mitigations for various hardware 178has been successfully used in developing mitigations for various hardware
@@ -175,9 +186,24 @@ development branch against the mainline kernel and backport branches for
175stable kernel versions as necessary. 186stable kernel versions as necessary.
176 187
177The initial response team will identify further experts from the Linux 188The initial response team will identify further experts from the Linux
178kernel developer community as needed and inform the disclosing party about 189kernel developer community as needed. Bringing in experts can happen at any
179their participation. Bringing in experts can happen at any time of the 190time of the development process and needs to be handled in a timely manner.
180development process and often needs to be handled in a timely manner. 191
192If an expert is employed by or member of an entity on the disclosure list
193provided by the disclosing party, then participation will be requested from
194the relevant entity.
195
196If not, then the disclosing party will be informed about the experts
197participation. The experts are covered by the Memorandum of Understanding
198and the disclosing party is requested to acknowledge the participation. In
199case that the disclosing party has a compelling reason to object, then this
200objection has to be raised within five work days and resolved with the
201incident team immediately. If the disclosing party does not react within
202five work days this is taken as silent acknowledgement.
203
204After acknowledgement or resolution of an objection the expert is disclosed
205by the incident team and brought into the development process.
206
181 207
182Coordinated release 208Coordinated release
183""""""""""""""""""" 209"""""""""""""""""""
@@ -216,7 +242,7 @@ an involved disclosed party. The current ambassadors list:
216 ARM 242 ARM
217 AMD 243 AMD
218 IBM 244 IBM
219 Intel 245 Intel Tony Luck <tony.luck@intel.com>
220 Qualcomm Trilok Soni <tsoni@codeaurora.org> 246 Qualcomm Trilok Soni <tsoni@codeaurora.org>
221 247
222 Microsoft Sasha Levin <sashal@kernel.org> 248 Microsoft Sasha Levin <sashal@kernel.org>
diff --git a/MAINTAINERS b/MAINTAINERS
index 296de2b51c83..55199ef7fa74 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6112,7 +6112,10 @@ M: Gao Xiang <gaoxiang25@huawei.com>
6112M: Chao Yu <yuchao0@huawei.com> 6112M: Chao Yu <yuchao0@huawei.com>
6113L: linux-erofs@lists.ozlabs.org 6113L: linux-erofs@lists.ozlabs.org
6114S: Maintained 6114S: Maintained
6115T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
6116F: Documentation/filesystems/erofs.txt
6115F: fs/erofs/ 6117F: fs/erofs/
6118F: include/trace/events/erofs.h
6116 6119
6117ERRSEQ ERROR TRACKING INFRASTRUCTURE 6120ERRSEQ ERROR TRACKING INFRASTRUCTURE
6118M: Jeff Layton <jlayton@kernel.org> 6121M: Jeff Layton <jlayton@kernel.org>
@@ -9075,6 +9078,7 @@ F: security/keys/
9075KGDB / KDB /debug_core 9078KGDB / KDB /debug_core
9076M: Jason Wessel <jason.wessel@windriver.com> 9079M: Jason Wessel <jason.wessel@windriver.com>
9077M: Daniel Thompson <daniel.thompson@linaro.org> 9080M: Daniel Thompson <daniel.thompson@linaro.org>
9081R: Douglas Anderson <dianders@chromium.org>
9078W: http://kgdb.wiki.kernel.org/ 9082W: http://kgdb.wiki.kernel.org/
9079L: kgdb-bugreport@lists.sourceforge.net 9083L: kgdb-bugreport@lists.sourceforge.net
9080T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git 9084T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
diff --git a/Makefile b/Makefile
index d456746da347..6f54f2f95743 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 3 3PATCHLEVEL = 4
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = 5EXTRAVERSION = -rc1
6NAME = Bobtail Squid 6NAME = Bobtail Squid
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index ebfe28c2f544..a1fd3e63e86e 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -124,10 +124,11 @@
124 }; 124 };
125 125
126 lcd0: display@0 { 126 lcd0: display@0 {
127 compatible = "panel-dpi"; 127 /* This isn't the exact LCD, but the timings meet spec */
128 /* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */
129 compatible = "newhaven,nhd-4.3-480272ef-atxl";
128 label = "15"; 130 label = "15";
129 status = "okay"; 131 backlight = <&bl>;
130 pinctrl-names = "default";
131 enable-gpios = <&gpio6 16 GPIO_ACTIVE_HIGH>; /* gpio176, lcd INI */ 132 enable-gpios = <&gpio6 16 GPIO_ACTIVE_HIGH>; /* gpio176, lcd INI */
132 vcc-supply = <&vdd_io_reg>; 133 vcc-supply = <&vdd_io_reg>;
133 134
@@ -136,22 +137,6 @@
136 remote-endpoint = <&dpi_out>; 137 remote-endpoint = <&dpi_out>;
137 }; 138 };
138 }; 139 };
139
140 panel-timing {
141 clock-frequency = <9000000>;
142 hactive = <480>;
143 vactive = <272>;
144 hfront-porch = <3>;
145 hback-porch = <2>;
146 hsync-len = <42>;
147 vback-porch = <3>;
148 vfront-porch = <4>;
149 vsync-len = <11>;
150 hsync-active = <0>;
151 vsync-active = <0>;
152 de-active = <1>;
153 pixelclk-active = <1>;
154 };
155 }; 140 };
156 141
157 bl: backlight { 142 bl: backlight {
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 5563ee54c960..b56524cc7fe2 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -228,6 +228,20 @@
228 >; 228 >;
229 }; 229 };
230 230
231 i2c2_pins: pinmux_i2c2_pins {
232 pinctrl-single,pins = <
233 OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
234 OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
235 >;
236 };
237
238 i2c3_pins: pinmux_i2c3_pins {
239 pinctrl-single,pins = <
240 OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
241 OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
242 >;
243 };
244
231 tsc2004_pins: pinmux_tsc2004_pins { 245 tsc2004_pins: pinmux_tsc2004_pins {
232 pinctrl-single,pins = < 246 pinctrl-single,pins = <
233 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE4) /* mcbsp4_dr.gpio_153 */ 247 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE4) /* mcbsp4_dr.gpio_153 */
@@ -249,18 +263,6 @@
249 OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */ 263 OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
250 >; 264 >;
251 }; 265 };
252 i2c2_pins: pinmux_i2c2_pins {
253 pinctrl-single,pins = <
254 OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
255 OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
256 >;
257 };
258 i2c3_pins: pinmux_i2c3_pins {
259 pinctrl-single,pins = <
260 OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
261 OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
262 >;
263 };
264}; 266};
265 267
266&omap3_pmx_core2 { 268&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
index 642e809e757a..449cc7616da6 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
@@ -108,7 +108,6 @@
108&dss { 108&dss {
109 status = "ok"; 109 status = "ok";
110 vdds_dsi-supply = <&vpll2>; 110 vdds_dsi-supply = <&vpll2>;
111 vdda_video-supply = <&video_reg>;
112 pinctrl-names = "default"; 111 pinctrl-names = "default";
113 pinctrl-0 = <&dss_dpi_pins1>; 112 pinctrl-0 = <&dss_dpi_pins1>;
114 port { 113 port {
@@ -124,44 +123,20 @@
124 display0 = &lcd0; 123 display0 = &lcd0;
125 }; 124 };
126 125
127 video_reg: video_reg {
128 pinctrl-names = "default";
129 pinctrl-0 = <&panel_pwr_pins>;
130 compatible = "regulator-fixed";
131 regulator-name = "fixed-supply";
132 regulator-min-microvolt = <3300000>;
133 regulator-max-microvolt = <3300000>;
134 gpio = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */
135 };
136
137 lcd0: display { 126 lcd0: display {
138 compatible = "panel-dpi"; 127 /* This isn't the exact LCD, but the timings meet spec */
128 /* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */
129 compatible = "newhaven,nhd-4.3-480272ef-atxl";
139 label = "15"; 130 label = "15";
140 status = "okay";
141 /* default-on; */
142 pinctrl-names = "default"; 131 pinctrl-names = "default";
143 132 pinctrl-0 = <&panel_pwr_pins>;
133 backlight = <&bl>;
134 enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;
144 port { 135 port {
145 lcd_in: endpoint { 136 lcd_in: endpoint {
146 remote-endpoint = <&dpi_out>; 137 remote-endpoint = <&dpi_out>;
147 }; 138 };
148 }; 139 };
149
150 panel-timing {
151 clock-frequency = <9000000>;
152 hactive = <480>;
153 vactive = <272>;
154 hfront-porch = <3>;
155 hback-porch = <2>;
156 hsync-len = <42>;
157 vback-porch = <3>;
158 vfront-porch = <4>;
159 vsync-len = <11>;
160 hsync-active = <0>;
161 vsync-active = <0>;
162 de-active = <1>;
163 pixelclk-active = <1>;
164 };
165 }; 140 };
166 141
167 bl: backlight { 142 bl: backlight {
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index c7bf9c493646..64eb896907bf 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -363,6 +363,7 @@ CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1=m
363CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m 363CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m
364CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m 364CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m
365CONFIG_DRM_TILCDC=m 365CONFIG_DRM_TILCDC=m
366CONFIG_DRM_PANEL_SIMPLE=m
366CONFIG_FB=y 367CONFIG_FB=y
367CONFIG_FIRMWARE_EDID=y 368CONFIG_FIRMWARE_EDID=y
368CONFIG_FB_MODE_HELPERS=y 369CONFIG_FB_MODE_HELPERS=y
diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h
deleted file mode 100644
index ec154e719b11..000000000000
--- a/arch/arm/include/asm/xen/xen-ops.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_XEN_OPS_H
2#define _ASM_XEN_OPS_H
3
4void xen_efi_runtime_setup(void);
5
6#endif /* _ASM_XEN_OPS_H */
diff --git a/arch/arm/mach-aspeed/Kconfig b/arch/arm/mach-aspeed/Kconfig
index 56007b0b6120..e8d6e9957d65 100644
--- a/arch/arm/mach-aspeed/Kconfig
+++ b/arch/arm/mach-aspeed/Kconfig
@@ -26,7 +26,6 @@ config MACH_ASPEED_G4
26config MACH_ASPEED_G5 26config MACH_ASPEED_G5
27 bool "Aspeed SoC 5th Generation" 27 bool "Aspeed SoC 5th Generation"
28 depends on ARCH_MULTI_V6 28 depends on ARCH_MULTI_V6
29 select CPU_V6
30 select PINCTRL_ASPEED_G5 29 select PINCTRL_ASPEED_G5
31 select FTTMR010_TIMER 30 select FTTMR010_TIMER
32 help 31 help
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 6c6f8fce854e..d942a3357090 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -491,11 +491,11 @@ static int ti_sysc_clkdm_init(struct device *dev,
491 struct clk *fck, struct clk *ick, 491 struct clk *fck, struct clk *ick,
492 struct ti_sysc_cookie *cookie) 492 struct ti_sysc_cookie *cookie)
493{ 493{
494 if (fck) 494 if (!IS_ERR(fck))
495 cookie->clkdm = ti_sysc_find_one_clockdomain(fck); 495 cookie->clkdm = ti_sysc_find_one_clockdomain(fck);
496 if (cookie->clkdm) 496 if (cookie->clkdm)
497 return 0; 497 return 0;
498 if (ick) 498 if (!IS_ERR(ick))
499 cookie->clkdm = ti_sysc_find_one_clockdomain(ick); 499 cookie->clkdm = ti_sysc_find_one_clockdomain(ick);
500 if (cookie->clkdm) 500 if (cookie->clkdm)
501 return 0; 501 return 0;
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 7ed28982c4c3..c32d04713ba0 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1,3 +1,2 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o 2obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
3obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
deleted file mode 100644
index d687a73044bf..000000000000
--- a/arch/arm/xen/efi.c
+++ /dev/null
@@ -1,28 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2015, Linaro Limited, Shannon Zhao
4 */
5
6#include <linux/efi.h>
7#include <xen/xen-ops.h>
8#include <asm/xen/xen-ops.h>
9
10/* Set XEN EFI runtime services function pointers. Other fields of struct efi,
11 * e.g. efi.systab, will be set like normal EFI.
12 */
13void __init xen_efi_runtime_setup(void)
14{
15 efi.get_time = xen_efi_get_time;
16 efi.set_time = xen_efi_set_time;
17 efi.get_wakeup_time = xen_efi_get_wakeup_time;
18 efi.set_wakeup_time = xen_efi_set_wakeup_time;
19 efi.get_variable = xen_efi_get_variable;
20 efi.get_next_variable = xen_efi_get_next_variable;
21 efi.set_variable = xen_efi_set_variable;
22 efi.query_variable_info = xen_efi_query_variable_info;
23 efi.update_capsule = xen_efi_update_capsule;
24 efi.query_capsule_caps = xen_efi_query_capsule_caps;
25 efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
26 efi.reset_system = xen_efi_reset_system;
27}
28EXPORT_SYMBOL_GPL(xen_efi_runtime_setup);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 1e57692552d9..dd6804a64f1a 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -15,7 +15,6 @@
15#include <xen/xen-ops.h> 15#include <xen/xen-ops.h>
16#include <asm/xen/hypervisor.h> 16#include <asm/xen/hypervisor.h>
17#include <asm/xen/hypercall.h> 17#include <asm/xen/hypercall.h>
18#include <asm/xen/xen-ops.h>
19#include <asm/system_misc.h> 18#include <asm/system_misc.h>
20#include <asm/efi.h> 19#include <asm/efi.h>
21#include <linux/interrupt.h> 20#include <linux/interrupt.h>
@@ -437,7 +436,7 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
437EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); 436EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
438EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); 437EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
439EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op); 438EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
440EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op); 439EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw);
441EXPORT_SYMBOL_GPL(HYPERVISOR_multicall); 440EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
442EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist); 441EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
443EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op); 442EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op);
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 2b2c208408bb..38fa917c8585 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -28,7 +28,10 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int order)
28 28
29 for_each_memblock(memory, reg) { 29 for_each_memblock(memory, reg) {
30 if (reg->base < (phys_addr_t)0xffffffff) { 30 if (reg->base < (phys_addr_t)0xffffffff) {
31 flags |= __GFP_DMA; 31 if (IS_ENABLED(CONFIG_ZONE_DMA32))
32 flags |= __GFP_DMA32;
33 else
34 flags |= __GFP_DMA;
32 break; 35 break;
33 } 36 }
34 } 37 }
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 86825aa20852..97f21cc66657 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -47,30 +47,6 @@
47#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) 47#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
48#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) 48#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
49 49
50/**
51 * hyp_alternate_select - Generates patchable code sequences that are
52 * used to switch between two implementations of a function, depending
53 * on the availability of a feature.
54 *
55 * @fname: a symbol name that will be defined as a function returning a
56 * function pointer whose type will match @orig and @alt
57 * @orig: A pointer to the default function, as returned by @fname when
58 * @cond doesn't hold
59 * @alt: A pointer to the alternate function, as returned by @fname
60 * when @cond holds
61 * @cond: a CPU feature (as described in asm/cpufeature.h)
62 */
63#define hyp_alternate_select(fname, orig, alt, cond) \
64typeof(orig) * __hyp_text fname(void) \
65{ \
66 typeof(alt) *val = orig; \
67 asm volatile(ALTERNATIVE("nop \n", \
68 "mov %0, %1 \n", \
69 cond) \
70 : "+r" (val) : "r" (alt)); \
71 return val; \
72}
73
74int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); 50int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
75 51
76void __vgic_v3_save_state(struct kvm_vcpu *vcpu); 52void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/xen/xen-ops.h b/arch/arm64/include/asm/xen/xen-ops.h
deleted file mode 100644
index e6e784051932..000000000000
--- a/arch/arm64/include/asm/xen/xen-ops.h
+++ /dev/null
@@ -1,7 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_XEN_OPS_H
3#define _ASM_XEN_OPS_H
4
5void xen_efi_runtime_setup(void);
6
7#endif /* _ASM_XEN_OPS_H */
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index bd978ad71936..3d3815020e36 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -229,20 +229,6 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
229 } 229 }
230} 230}
231 231
232static bool __hyp_text __true_value(void)
233{
234 return true;
235}
236
237static bool __hyp_text __false_value(void)
238{
239 return false;
240}
241
242static hyp_alternate_select(__check_arm_834220,
243 __false_value, __true_value,
244 ARM64_WORKAROUND_834220);
245
246static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) 232static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
247{ 233{
248 u64 par, tmp; 234 u64 par, tmp;
@@ -298,7 +284,8 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
298 * resolve the IPA using the AT instruction. 284 * resolve the IPA using the AT instruction.
299 */ 285 */
300 if (!(esr & ESR_ELx_S1PTW) && 286 if (!(esr & ESR_ELx_S1PTW) &&
301 (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { 287 (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
288 (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
302 if (!__translate_far_to_hpfar(far, &hpfar)) 289 if (!__translate_far_to_hpfar(far, &hpfar))
303 return false; 290 return false;
304 } else { 291 } else {
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index c466060b76d6..eb0efc5557f3 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -67,10 +67,14 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
67 isb(); 67 isb();
68} 68}
69 69
70static hyp_alternate_select(__tlb_switch_to_guest, 70static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
71 __tlb_switch_to_guest_nvhe, 71 struct tlb_inv_context *cxt)
72 __tlb_switch_to_guest_vhe, 72{
73 ARM64_HAS_VIRT_HOST_EXTN); 73 if (has_vhe())
74 __tlb_switch_to_guest_vhe(kvm, cxt);
75 else
76 __tlb_switch_to_guest_nvhe(kvm, cxt);
77}
74 78
75static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, 79static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
76 struct tlb_inv_context *cxt) 80 struct tlb_inv_context *cxt)
@@ -98,10 +102,14 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
98 write_sysreg(0, vttbr_el2); 102 write_sysreg(0, vttbr_el2);
99} 103}
100 104
101static hyp_alternate_select(__tlb_switch_to_host, 105static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
102 __tlb_switch_to_host_nvhe, 106 struct tlb_inv_context *cxt)
103 __tlb_switch_to_host_vhe, 107{
104 ARM64_HAS_VIRT_HOST_EXTN); 108 if (has_vhe())
109 __tlb_switch_to_host_vhe(kvm, cxt);
110 else
111 __tlb_switch_to_host_nvhe(kvm, cxt);
112}
105 113
106void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) 114void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
107{ 115{
@@ -111,7 +119,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
111 119
112 /* Switch to requested VMID */ 120 /* Switch to requested VMID */
113 kvm = kern_hyp_va(kvm); 121 kvm = kern_hyp_va(kvm);
114 __tlb_switch_to_guest()(kvm, &cxt); 122 __tlb_switch_to_guest(kvm, &cxt);
115 123
116 /* 124 /*
117 * We could do so much better if we had the VA as well. 125 * We could do so much better if we had the VA as well.
@@ -154,7 +162,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
154 if (!has_vhe() && icache_is_vpipt()) 162 if (!has_vhe() && icache_is_vpipt())
155 __flush_icache_all(); 163 __flush_icache_all();
156 164
157 __tlb_switch_to_host()(kvm, &cxt); 165 __tlb_switch_to_host(kvm, &cxt);
158} 166}
159 167
160void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) 168void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -165,13 +173,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
165 173
166 /* Switch to requested VMID */ 174 /* Switch to requested VMID */
167 kvm = kern_hyp_va(kvm); 175 kvm = kern_hyp_va(kvm);
168 __tlb_switch_to_guest()(kvm, &cxt); 176 __tlb_switch_to_guest(kvm, &cxt);
169 177
170 __tlbi(vmalls12e1is); 178 __tlbi(vmalls12e1is);
171 dsb(ish); 179 dsb(ish);
172 isb(); 180 isb();
173 181
174 __tlb_switch_to_host()(kvm, &cxt); 182 __tlb_switch_to_host(kvm, &cxt);
175} 183}
176 184
177void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) 185void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -180,13 +188,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
180 struct tlb_inv_context cxt; 188 struct tlb_inv_context cxt;
181 189
182 /* Switch to requested VMID */ 190 /* Switch to requested VMID */
183 __tlb_switch_to_guest()(kvm, &cxt); 191 __tlb_switch_to_guest(kvm, &cxt);
184 192
185 __tlbi(vmalle1); 193 __tlbi(vmalle1);
186 dsb(nsh); 194 dsb(nsh);
187 isb(); 195 isb();
188 196
189 __tlb_switch_to_host()(kvm, &cxt); 197 __tlb_switch_to_host(kvm, &cxt);
190} 198}
191 199
192void __hyp_text __kvm_flush_vm_context(void) 200void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile
index a4fc65f3928d..b66215e8658e 100644
--- a/arch/arm64/xen/Makefile
+++ b/arch/arm64/xen/Makefile
@@ -1,4 +1,3 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o) 2xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
3obj-y := xen-arm.o hypercall.o 3obj-y := xen-arm.o hypercall.o
4obj-$(CONFIG_XEN_EFI) += $(addprefix ../../arm/xen/, efi.o)
diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c
index 27ef5b2c43ab..cb2a0d94a144 100644
--- a/arch/csky/abiv1/alignment.c
+++ b/arch/csky/abiv1/alignment.c
@@ -5,8 +5,10 @@
5#include <linux/uaccess.h> 5#include <linux/uaccess.h>
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7 7
8static int align_enable = 1; 8static int align_kern_enable = 1;
9static int align_count; 9static int align_usr_enable = 1;
10static int align_kern_count = 0;
11static int align_usr_count = 0;
10 12
11static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx) 13static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx)
12{ 14{
@@ -32,9 +34,6 @@ static int ldb_asm(uint32_t addr, uint32_t *valp)
32 uint32_t val; 34 uint32_t val;
33 int err; 35 int err;
34 36
35 if (!access_ok((void *)addr, 1))
36 return 1;
37
38 asm volatile ( 37 asm volatile (
39 "movi %0, 0\n" 38 "movi %0, 0\n"
40 "1:\n" 39 "1:\n"
@@ -67,9 +66,6 @@ static int stb_asm(uint32_t addr, uint32_t val)
67{ 66{
68 int err; 67 int err;
69 68
70 if (!access_ok((void *)addr, 1))
71 return 1;
72
73 asm volatile ( 69 asm volatile (
74 "movi %0, 0\n" 70 "movi %0, 0\n"
75 "1:\n" 71 "1:\n"
@@ -203,8 +199,6 @@ static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
203 if (stb_asm(addr, byte3)) 199 if (stb_asm(addr, byte3))
204 return 1; 200 return 1;
205 201
206 align_count++;
207
208 return 0; 202 return 0;
209} 203}
210 204
@@ -226,7 +220,14 @@ void csky_alignment(struct pt_regs *regs)
226 uint32_t addr = 0; 220 uint32_t addr = 0;
227 221
228 if (!user_mode(regs)) 222 if (!user_mode(regs))
223 goto kernel_area;
224
225 if (!align_usr_enable) {
226 pr_err("%s user disabled.\n", __func__);
229 goto bad_area; 227 goto bad_area;
228 }
229
230 align_usr_count++;
230 231
231 ret = get_user(tmp, (uint16_t *)instruction_pointer(regs)); 232 ret = get_user(tmp, (uint16_t *)instruction_pointer(regs));
232 if (ret) { 233 if (ret) {
@@ -234,6 +235,19 @@ void csky_alignment(struct pt_regs *regs)
234 goto bad_area; 235 goto bad_area;
235 } 236 }
236 237
238 goto good_area;
239
240kernel_area:
241 if (!align_kern_enable) {
242 pr_err("%s kernel disabled.\n", __func__);
243 goto bad_area;
244 }
245
246 align_kern_count++;
247
248 tmp = *(uint16_t *)instruction_pointer(regs);
249
250good_area:
237 opcode = (uint32_t)tmp; 251 opcode = (uint32_t)tmp;
238 252
239 rx = opcode & 0xf; 253 rx = opcode & 0xf;
@@ -286,18 +300,32 @@ bad_area:
286 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr); 300 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
287} 301}
288 302
289static struct ctl_table alignment_tbl[4] = { 303static struct ctl_table alignment_tbl[5] = {
304 {
305 .procname = "kernel_enable",
306 .data = &align_kern_enable,
307 .maxlen = sizeof(align_kern_enable),
308 .mode = 0666,
309 .proc_handler = &proc_dointvec
310 },
311 {
312 .procname = "user_enable",
313 .data = &align_usr_enable,
314 .maxlen = sizeof(align_usr_enable),
315 .mode = 0666,
316 .proc_handler = &proc_dointvec
317 },
290 { 318 {
291 .procname = "enable", 319 .procname = "kernel_count",
292 .data = &align_enable, 320 .data = &align_kern_count,
293 .maxlen = sizeof(align_enable), 321 .maxlen = sizeof(align_kern_count),
294 .mode = 0666, 322 .mode = 0666,
295 .proc_handler = &proc_dointvec 323 .proc_handler = &proc_dointvec
296 }, 324 },
297 { 325 {
298 .procname = "count", 326 .procname = "user_count",
299 .data = &align_count, 327 .data = &align_usr_count,
300 .maxlen = sizeof(align_count), 328 .maxlen = sizeof(align_usr_count),
301 .mode = 0666, 329 .mode = 0666,
302 .proc_handler = &proc_dointvec 330 .proc_handler = &proc_dointvec
303 }, 331 },
diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c
index 10af8b6fe322..9f1fe80cc847 100644
--- a/arch/csky/abiv1/cacheflush.c
+++ b/arch/csky/abiv1/cacheflush.c
@@ -11,42 +11,66 @@
11#include <asm/cacheflush.h> 11#include <asm/cacheflush.h>
12#include <asm/cachectl.h> 12#include <asm/cachectl.h>
13 13
14#define PG_dcache_clean PG_arch_1
15
14void flush_dcache_page(struct page *page) 16void flush_dcache_page(struct page *page)
15{ 17{
16 struct address_space *mapping = page_mapping(page); 18 struct address_space *mapping;
17 unsigned long addr;
18 19
19 if (mapping && !mapping_mapped(mapping)) { 20 if (page == ZERO_PAGE(0))
20 set_bit(PG_arch_1, &(page)->flags);
21 return; 21 return;
22 }
23 22
24 /* 23 mapping = page_mapping_file(page);
25 * We could delay the flush for the !page_mapping case too. But that 24
26 * case is for exec env/arg pages and those are %99 certainly going to 25 if (mapping && !page_mapcount(page))
27 * get faulted into the tlb (and thus flushed) anyways. 26 clear_bit(PG_dcache_clean, &page->flags);
28 */ 27 else {
29 addr = (unsigned long) page_address(page); 28 dcache_wbinv_all();
30 dcache_wb_range(addr, addr + PAGE_SIZE); 29 if (mapping)
30 icache_inv_all();
31 set_bit(PG_dcache_clean, &page->flags);
32 }
31} 33}
34EXPORT_SYMBOL(flush_dcache_page);
32 35
33void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 36void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
34 pte_t *pte) 37 pte_t *ptep)
35{ 38{
36 unsigned long addr; 39 unsigned long pfn = pte_pfn(*ptep);
37 struct page *page; 40 struct page *page;
38 unsigned long pfn;
39 41
40 pfn = pte_pfn(*pte); 42 if (!pfn_valid(pfn))
41 if (unlikely(!pfn_valid(pfn)))
42 return; 43 return;
43 44
44 page = pfn_to_page(pfn); 45 page = pfn_to_page(pfn);
45 addr = (unsigned long) page_address(page); 46 if (page == ZERO_PAGE(0))
47 return;
48
49 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
50 dcache_wbinv_all();
46 51
47 if (vma->vm_flags & VM_EXEC || 52 if (page_mapping_file(page)) {
48 pages_do_alias(addr, address & PAGE_MASK)) 53 if (vma->vm_flags & VM_EXEC)
49 cache_wbinv_all(); 54 icache_inv_all();
55 }
56}
57
58void flush_kernel_dcache_page(struct page *page)
59{
60 struct address_space *mapping;
61
62 mapping = page_mapping_file(page);
63
64 if (!mapping || mapping_mapped(mapping))
65 dcache_wbinv_all();
66}
67EXPORT_SYMBOL(flush_kernel_dcache_page);
68
69void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
70 unsigned long end)
71{
72 dcache_wbinv_all();
50 73
51 clear_bit(PG_arch_1, &(page)->flags); 74 if (vma->vm_flags & VM_EXEC)
75 icache_inv_all();
52} 76}
diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h
index 5f663aef9b1b..79ef9e8c1afd 100644
--- a/arch/csky/abiv1/inc/abi/cacheflush.h
+++ b/arch/csky/abiv1/inc/abi/cacheflush.h
@@ -4,46 +4,63 @@
4#ifndef __ABI_CSKY_CACHEFLUSH_H 4#ifndef __ABI_CSKY_CACHEFLUSH_H
5#define __ABI_CSKY_CACHEFLUSH_H 5#define __ABI_CSKY_CACHEFLUSH_H
6 6
7#include <linux/compiler.h> 7#include <linux/mm.h>
8#include <asm/string.h> 8#include <asm/string.h>
9#include <asm/cache.h> 9#include <asm/cache.h>
10 10
11#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 11#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
12extern void flush_dcache_page(struct page *); 12extern void flush_dcache_page(struct page *);
13 13
14#define flush_cache_mm(mm) cache_wbinv_all() 14#define flush_cache_mm(mm) dcache_wbinv_all()
15#define flush_cache_page(vma, page, pfn) cache_wbinv_all() 15#define flush_cache_page(vma, page, pfn) cache_wbinv_all()
16#define flush_cache_dup_mm(mm) cache_wbinv_all() 16#define flush_cache_dup_mm(mm) cache_wbinv_all()
17 17
18#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
19extern void flush_kernel_dcache_page(struct page *);
20
21#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
22#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
23
24static inline void flush_kernel_vmap_range(void *addr, int size)
25{
26 dcache_wbinv_all();
27}
28static inline void invalidate_kernel_vmap_range(void *addr, int size)
29{
30 dcache_wbinv_all();
31}
32
33#define ARCH_HAS_FLUSH_ANON_PAGE
34static inline void flush_anon_page(struct vm_area_struct *vma,
35 struct page *page, unsigned long vmaddr)
36{
37 if (PageAnon(page))
38 cache_wbinv_all();
39}
40
18/* 41/*
19 * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. 42 * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
20 * Use cache_wbinv_all() here and need to be improved in future. 43 * Use cache_wbinv_all() here and need to be improved in future.
21 */ 44 */
22#define flush_cache_range(vma, start, end) cache_wbinv_all() 45extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
23#define flush_cache_vmap(start, end) cache_wbinv_range(start, end) 46#define flush_cache_vmap(start, end) cache_wbinv_all()
24#define flush_cache_vunmap(start, end) cache_wbinv_range(start, end) 47#define flush_cache_vunmap(start, end) cache_wbinv_all()
25 48
26#define flush_icache_page(vma, page) cache_wbinv_all() 49#define flush_icache_page(vma, page) do {} while (0);
27#define flush_icache_range(start, end) cache_wbinv_range(start, end) 50#define flush_icache_range(start, end) cache_wbinv_range(start, end)
28 51
29#define flush_icache_user_range(vma, pg, adr, len) \ 52#define flush_icache_user_range(vma,page,addr,len) \
30 cache_wbinv_range(adr, adr + len) 53 flush_dcache_page(page)
31 54
32#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 55#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
33do { \ 56do { \
34 cache_wbinv_all(); \
35 memcpy(dst, src, len); \ 57 memcpy(dst, src, len); \
36 cache_wbinv_all(); \
37} while (0) 58} while (0)
38 59
39#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 60#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
40do { \ 61do { \
41 cache_wbinv_all(); \
42 memcpy(dst, src, len); \ 62 memcpy(dst, src, len); \
43 cache_wbinv_all(); \ 63 cache_wbinv_all(); \
44} while (0) 64} while (0)
45 65
46#define flush_dcache_mmap_lock(mapping) do {} while (0)
47#define flush_dcache_mmap_unlock(mapping) do {} while (0)
48
49#endif /* __ABI_CSKY_CACHEFLUSH_H */ 66#endif /* __ABI_CSKY_CACHEFLUSH_H */
diff --git a/arch/csky/abiv1/inc/abi/page.h b/arch/csky/abiv1/inc/abi/page.h
index 6336e92a103a..c864519117c7 100644
--- a/arch/csky/abiv1/inc/abi/page.h
+++ b/arch/csky/abiv1/inc/abi/page.h
@@ -1,13 +1,14 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 3
4extern unsigned long shm_align_mask; 4#include <asm/shmparam.h>
5
5extern void flush_dcache_page(struct page *page); 6extern void flush_dcache_page(struct page *page);
6 7
7static inline unsigned long pages_do_alias(unsigned long addr1, 8static inline unsigned long pages_do_alias(unsigned long addr1,
8 unsigned long addr2) 9 unsigned long addr2)
9{ 10{
10 return (addr1 ^ addr2) & shm_align_mask; 11 return (addr1 ^ addr2) & (SHMLBA-1);
11} 12}
12 13
13static inline void clear_user_page(void *addr, unsigned long vaddr, 14static inline void clear_user_page(void *addr, unsigned long vaddr,
diff --git a/arch/csky/abiv1/mmap.c b/arch/csky/abiv1/mmap.c
index b462fd50b23a..6792aca49999 100644
--- a/arch/csky/abiv1/mmap.c
+++ b/arch/csky/abiv1/mmap.c
@@ -9,58 +9,63 @@
9#include <linux/random.h> 9#include <linux/random.h>
10#include <linux/io.h> 10#include <linux/io.h>
11 11
12unsigned long shm_align_mask = (0x4000 >> 1) - 1; /* Sane caches */ 12#define COLOUR_ALIGN(addr,pgoff) \
13 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
14 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
13 15
14#define COLOUR_ALIGN(addr, pgoff) \ 16/*
15 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 17 * We need to ensure that shared mappings are correctly aligned to
16 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 18 * avoid aliasing issues with VIPT caches. We need to ensure that
17 19 * a specific page of an object is always mapped at a multiple of
18unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 20 * SHMLBA bytes.
21 *
22 * We unconditionally provide this function for all cases.
23 */
24unsigned long
25arch_get_unmapped_area(struct file *filp, unsigned long addr,
19 unsigned long len, unsigned long pgoff, unsigned long flags) 26 unsigned long len, unsigned long pgoff, unsigned long flags)
20{ 27{
21 struct vm_area_struct *vmm; 28 struct mm_struct *mm = current->mm;
22 int do_color_align; 29 struct vm_area_struct *vma;
30 int do_align = 0;
31 struct vm_unmapped_area_info info;
32
33 /*
34 * We only need to do colour alignment if either the I or D
35 * caches alias.
36 */
37 do_align = filp || (flags & MAP_SHARED);
23 38
39 /*
40 * We enforce the MAP_FIXED case.
41 */
24 if (flags & MAP_FIXED) { 42 if (flags & MAP_FIXED) {
25 /* 43 if (flags & MAP_SHARED &&
26 * We do not accept a shared mapping if it would violate 44 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
27 * cache aliasing constraints.
28 */
29 if ((flags & MAP_SHARED) &&
30 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
31 return -EINVAL; 45 return -EINVAL;
32 return addr; 46 return addr;
33 } 47 }
34 48
35 if (len > TASK_SIZE) 49 if (len > TASK_SIZE)
36 return -ENOMEM; 50 return -ENOMEM;
37 do_color_align = 0; 51
38 if (filp || (flags & MAP_SHARED))
39 do_color_align = 1;
40 if (addr) { 52 if (addr) {
41 if (do_color_align) 53 if (do_align)
42 addr = COLOUR_ALIGN(addr, pgoff); 54 addr = COLOUR_ALIGN(addr, pgoff);
43 else 55 else
44 addr = PAGE_ALIGN(addr); 56 addr = PAGE_ALIGN(addr);
45 vmm = find_vma(current->mm, addr); 57
58 vma = find_vma(mm, addr);
46 if (TASK_SIZE - len >= addr && 59 if (TASK_SIZE - len >= addr &&
47 (!vmm || addr + len <= vmm->vm_start)) 60 (!vma || addr + len <= vm_start_gap(vma)))
48 return addr; 61 return addr;
49 } 62 }
50 addr = TASK_UNMAPPED_BASE;
51 if (do_color_align)
52 addr = COLOUR_ALIGN(addr, pgoff);
53 else
54 addr = PAGE_ALIGN(addr);
55 63
56 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 64 info.flags = 0;
57 /* At this point: (!vmm || addr < vmm->vm_end). */ 65 info.length = len;
58 if (TASK_SIZE - len < addr) 66 info.low_limit = mm->mmap_base;
59 return -ENOMEM; 67 info.high_limit = TASK_SIZE;
60 if (!vmm || addr + len <= vmm->vm_start) 68 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
61 return addr; 69 info.align_offset = pgoff << PAGE_SHIFT;
62 addr = vmm->vm_end; 70 return vm_unmapped_area(&info);
63 if (do_color_align)
64 addr = COLOUR_ALIGN(addr, pgoff);
65 }
66} 71}
diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h
index 476eb786f22d..a430e7fddf35 100644
--- a/arch/csky/include/asm/barrier.h
+++ b/arch/csky/include/asm/barrier.h
@@ -9,11 +9,12 @@
9#define nop() asm volatile ("nop\n":::"memory") 9#define nop() asm volatile ("nop\n":::"memory")
10 10
11/* 11/*
12 * sync: completion barrier 12 * sync: completion barrier, all sync.xx instructions
13 * sync.s: completion barrier and shareable to other cores 13 * guarantee the last response recieved by bus transaction
14 * sync.i: completion barrier with flush cpu pipeline 14 * made by ld/st instructions before sync.s
15 * sync.is: completion barrier with flush cpu pipeline and shareable to 15 * sync.s: inherit from sync, but also shareable to other cores
16 * other cores 16 * sync.i: inherit from sync, but also flush cpu pipeline
17 * sync.is: the same with sync.i + sync.s
17 * 18 *
18 * bar.brwarw: ordering barrier for all load/store instructions before it 19 * bar.brwarw: ordering barrier for all load/store instructions before it
19 * bar.brwarws: ordering barrier for all load/store instructions before it 20 * bar.brwarws: ordering barrier for all load/store instructions before it
@@ -27,9 +28,7 @@
27 */ 28 */
28 29
29#ifdef CONFIG_CPU_HAS_CACHEV2 30#ifdef CONFIG_CPU_HAS_CACHEV2
30#define mb() asm volatile ("bar.brwarw\n":::"memory") 31#define mb() asm volatile ("sync.s\n":::"memory")
31#define rmb() asm volatile ("bar.brar\n":::"memory")
32#define wmb() asm volatile ("bar.bwaw\n":::"memory")
33 32
34#ifdef CONFIG_SMP 33#ifdef CONFIG_SMP
35#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory") 34#define __smp_mb() asm volatile ("bar.brwarws\n":::"memory")
diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h
index d68373463676..1d5fc2f78fd7 100644
--- a/arch/csky/include/asm/cache.h
+++ b/arch/csky/include/asm/cache.h
@@ -24,6 +24,7 @@ void cache_wbinv_range(unsigned long start, unsigned long end);
24void cache_wbinv_all(void); 24void cache_wbinv_all(void);
25 25
26void dma_wbinv_range(unsigned long start, unsigned long end); 26void dma_wbinv_range(unsigned long start, unsigned long end);
27void dma_inv_range(unsigned long start, unsigned long end);
27void dma_wb_range(unsigned long start, unsigned long end); 28void dma_wb_range(unsigned long start, unsigned long end);
28 29
29#endif 30#endif
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index c1dfa9c10e36..80d071e2567f 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -4,17 +4,10 @@
4#ifndef __ASM_CSKY_IO_H 4#ifndef __ASM_CSKY_IO_H
5#define __ASM_CSKY_IO_H 5#define __ASM_CSKY_IO_H
6 6
7#include <abi/pgtable-bits.h> 7#include <asm/pgtable.h>
8#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/version.h> 9#include <linux/version.h>
10 10
11extern void __iomem *ioremap(phys_addr_t offset, size_t size);
12
13extern void iounmap(void *addr);
14
15extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
16 size_t size, unsigned long flags);
17
18/* 11/*
19 * I/O memory access primitives. Reads are ordered relative to any 12 * I/O memory access primitives. Reads are ordered relative to any
20 * following Normal memory access. Writes are ordered relative to any prior 13 * following Normal memory access. Writes are ordered relative to any prior
@@ -40,9 +33,17 @@ extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
40#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); }) 33#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
41#endif 34#endif
42 35
43#define ioremap_nocache(phy, sz) ioremap(phy, sz) 36/*
44#define ioremap_wc ioremap_nocache 37 * I/O memory mapping functions.
45#define ioremap_wt ioremap_nocache 38 */
39extern void __iomem *ioremap_cache(phys_addr_t addr, size_t size);
40extern void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot);
41extern void iounmap(void *addr);
42
43#define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL))
44#define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL))
45#define ioremap_nocache(addr, size) ioremap((addr), (size))
46#define ioremap_cache ioremap_cache
46 47
47#include <asm-generic/io.h> 48#include <asm-generic/io.h>
48 49
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index 0040b3a05b61..7c21985c60dc 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -258,6 +258,16 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
258{ 258{
259 unsigned long prot = pgprot_val(_prot); 259 unsigned long prot = pgprot_val(_prot);
260 260
261 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
262
263 return __pgprot(prot);
264}
265
266#define pgprot_writecombine pgprot_writecombine
267static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
268{
269 unsigned long prot = pgprot_val(_prot);
270
261 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 271 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
262 272
263 return __pgprot(prot); 273 return __pgprot(prot);
diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S
index a7e84ccccbd8..a7a5b67df898 100644
--- a/arch/csky/kernel/entry.S
+++ b/arch/csky/kernel/entry.S
@@ -17,6 +17,12 @@
17#define PTE_INDX_SHIFT 10 17#define PTE_INDX_SHIFT 10
18#define _PGDIR_SHIFT 22 18#define _PGDIR_SHIFT 22
19 19
20.macro zero_fp
21#ifdef CONFIG_STACKTRACE
22 movi r8, 0
23#endif
24.endm
25
20.macro tlbop_begin name, val0, val1, val2 26.macro tlbop_begin name, val0, val1, val2
21ENTRY(csky_\name) 27ENTRY(csky_\name)
22 mtcr a3, ss2 28 mtcr a3, ss2
@@ -96,6 +102,7 @@ ENTRY(csky_\name)
96 SAVE_ALL 0 102 SAVE_ALL 0
97.endm 103.endm
98.macro tlbop_end is_write 104.macro tlbop_end is_write
105 zero_fp
99 RD_MEH a2 106 RD_MEH a2
100 psrset ee, ie 107 psrset ee, ie
101 mov a0, sp 108 mov a0, sp
@@ -120,6 +127,7 @@ tlbop_end 1
120 127
121ENTRY(csky_systemcall) 128ENTRY(csky_systemcall)
122 SAVE_ALL TRAP0_SIZE 129 SAVE_ALL TRAP0_SIZE
130 zero_fp
123 131
124 psrset ee, ie 132 psrset ee, ie
125 133
@@ -136,9 +144,9 @@ ENTRY(csky_systemcall)
136 mov r9, sp 144 mov r9, sp
137 bmaski r10, THREAD_SHIFT 145 bmaski r10, THREAD_SHIFT
138 andn r9, r10 146 andn r9, r10
139 ldw r8, (r9, TINFO_FLAGS) 147 ldw r12, (r9, TINFO_FLAGS)
140 ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 148 ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
141 cmpnei r8, 0 149 cmpnei r12, 0
142 bt csky_syscall_trace 150 bt csky_syscall_trace
143#if defined(__CSKYABIV2__) 151#if defined(__CSKYABIV2__)
144 subi sp, 8 152 subi sp, 8
@@ -180,7 +188,7 @@ csky_syscall_trace:
180 188
181ENTRY(ret_from_kernel_thread) 189ENTRY(ret_from_kernel_thread)
182 jbsr schedule_tail 190 jbsr schedule_tail
183 mov a0, r8 191 mov a0, r10
184 jsr r9 192 jsr r9
185 jbsr ret_from_exception 193 jbsr ret_from_exception
186 194
@@ -189,9 +197,9 @@ ENTRY(ret_from_fork)
189 mov r9, sp 197 mov r9, sp
190 bmaski r10, THREAD_SHIFT 198 bmaski r10, THREAD_SHIFT
191 andn r9, r10 199 andn r9, r10
192 ldw r8, (r9, TINFO_FLAGS) 200 ldw r12, (r9, TINFO_FLAGS)
193 ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 201 ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
194 cmpnei r8, 0 202 cmpnei r12, 0
195 bf ret_from_exception 203 bf ret_from_exception
196 mov a0, sp /* sp = pt_regs pointer */ 204 mov a0, sp /* sp = pt_regs pointer */
197 jbsr syscall_trace_exit 205 jbsr syscall_trace_exit
@@ -209,9 +217,9 @@ ret_from_exception:
209 bmaski r10, THREAD_SHIFT 217 bmaski r10, THREAD_SHIFT
210 andn r9, r10 218 andn r9, r10
211 219
212 ldw r8, (r9, TINFO_FLAGS) 220 ldw r12, (r9, TINFO_FLAGS)
213 andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 221 andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
214 cmpnei r8, 0 222 cmpnei r12, 0
215 bt exit_work 223 bt exit_work
2161: 2241:
217 RESTORE_ALL 225 RESTORE_ALL
@@ -220,11 +228,11 @@ exit_work:
220 lrw syscallid, ret_from_exception 228 lrw syscallid, ret_from_exception
221 mov lr, syscallid 229 mov lr, syscallid
222 230
223 btsti r8, TIF_NEED_RESCHED 231 btsti r12, TIF_NEED_RESCHED
224 bt work_resched 232 bt work_resched
225 233
226 mov a0, sp 234 mov a0, sp
227 mov a1, r8 235 mov a1, r12
228 jmpi do_notify_resume 236 jmpi do_notify_resume
229 237
230work_resched: 238work_resched:
@@ -232,6 +240,7 @@ work_resched:
232 240
233ENTRY(csky_trap) 241ENTRY(csky_trap)
234 SAVE_ALL 0 242 SAVE_ALL 0
243 zero_fp
235 psrset ee 244 psrset ee
236 mov a0, sp /* Push Stack pointer arg */ 245 mov a0, sp /* Push Stack pointer arg */
237 jbsr trap_c /* Call C-level trap handler */ 246 jbsr trap_c /* Call C-level trap handler */
@@ -265,6 +274,7 @@ ENTRY(csky_get_tls)
265 274
266ENTRY(csky_irq) 275ENTRY(csky_irq)
267 SAVE_ALL 0 276 SAVE_ALL 0
277 zero_fp
268 psrset ee 278 psrset ee
269 279
270#ifdef CONFIG_PREEMPT 280#ifdef CONFIG_PREEMPT
@@ -276,27 +286,23 @@ ENTRY(csky_irq)
276 * Get task_struct->stack.preempt_count for current, 286 * Get task_struct->stack.preempt_count for current,
277 * and increase 1. 287 * and increase 1.
278 */ 288 */
279 ldw r8, (r9, TINFO_PREEMPT) 289 ldw r12, (r9, TINFO_PREEMPT)
280 addi r8, 1 290 addi r12, 1
281 stw r8, (r9, TINFO_PREEMPT) 291 stw r12, (r9, TINFO_PREEMPT)
282#endif 292#endif
283 293
284 mov a0, sp 294 mov a0, sp
285 jbsr csky_do_IRQ 295 jbsr csky_do_IRQ
286 296
287#ifdef CONFIG_PREEMPT 297#ifdef CONFIG_PREEMPT
288 subi r8, 1 298 subi r12, 1
289 stw r8, (r9, TINFO_PREEMPT) 299 stw r12, (r9, TINFO_PREEMPT)
290 cmpnei r8, 0 300 cmpnei r12, 0
291 bt 2f 301 bt 2f
292 ldw r8, (r9, TINFO_FLAGS) 302 ldw r12, (r9, TINFO_FLAGS)
293 btsti r8, TIF_NEED_RESCHED 303 btsti r12, TIF_NEED_RESCHED
294 bf 2f 304 bf 2f
2951:
296 jbsr preempt_schedule_irq /* irq en/disable is done inside */ 305 jbsr preempt_schedule_irq /* irq en/disable is done inside */
297 ldw r7, (r9, TINFO_FLAGS) /* get new tasks TI_FLAGS */
298 btsti r7, TIF_NEED_RESCHED
299 bt 1b /* go again */
300#endif 306#endif
3012: 3072:
302 jmpi ret_from_exception 308 jmpi ret_from_exception
diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c
index 4c1a1934d76a..1a29f1157449 100644
--- a/arch/csky/kernel/perf_event.c
+++ b/arch/csky/kernel/perf_event.c
@@ -1306,7 +1306,7 @@ int csky_pmu_device_probe(struct platform_device *pdev,
1306 &csky_pmu.count_width)) { 1306 &csky_pmu.count_width)) {
1307 csky_pmu.count_width = DEFAULT_COUNT_WIDTH; 1307 csky_pmu.count_width = DEFAULT_COUNT_WIDTH;
1308 } 1308 }
1309 csky_pmu.max_period = BIT(csky_pmu.count_width) - 1; 1309 csky_pmu.max_period = BIT_ULL(csky_pmu.count_width) - 1;
1310 1310
1311 csky_pmu.plat_device = pdev; 1311 csky_pmu.plat_device = pdev;
1312 1312
@@ -1337,7 +1337,7 @@ int csky_pmu_device_probe(struct platform_device *pdev,
1337 return ret; 1337 return ret;
1338} 1338}
1339 1339
1340const static struct of_device_id csky_pmu_of_device_ids[] = { 1340static const struct of_device_id csky_pmu_of_device_ids[] = {
1341 {.compatible = "csky,csky-pmu"}, 1341 {.compatible = "csky,csky-pmu"},
1342 {}, 1342 {},
1343}; 1343};
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index e555740c0be5..f320d9248a22 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -55,7 +55,7 @@ int copy_thread(unsigned long clone_flags,
55 if (unlikely(p->flags & PF_KTHREAD)) { 55 if (unlikely(p->flags & PF_KTHREAD)) {
56 memset(childregs, 0, sizeof(struct pt_regs)); 56 memset(childregs, 0, sizeof(struct pt_regs));
57 childstack->r15 = (unsigned long) ret_from_kernel_thread; 57 childstack->r15 = (unsigned long) ret_from_kernel_thread;
58 childstack->r8 = kthread_arg; 58 childstack->r10 = kthread_arg;
59 childstack->r9 = usp; 59 childstack->r9 = usp;
60 childregs->sr = mfcr("psr"); 60 childregs->sr = mfcr("psr");
61 } else { 61 } else {
diff --git a/arch/csky/mm/cachev1.c b/arch/csky/mm/cachev1.c
index b8a75cce0b8c..494ec912abff 100644
--- a/arch/csky/mm/cachev1.c
+++ b/arch/csky/mm/cachev1.c
@@ -120,7 +120,12 @@ void dma_wbinv_range(unsigned long start, unsigned long end)
120 cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); 120 cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
121} 121}
122 122
123void dma_inv_range(unsigned long start, unsigned long end)
124{
125 cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
126}
127
123void dma_wb_range(unsigned long start, unsigned long end) 128void dma_wb_range(unsigned long start, unsigned long end)
124{ 129{
125 cache_op_range(start, end, DATA_CACHE|CACHE_INV, 1); 130 cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
126} 131}
diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c
index baaf05d69f44..b61be6518e21 100644
--- a/arch/csky/mm/cachev2.c
+++ b/arch/csky/mm/cachev2.c
@@ -69,11 +69,20 @@ void dma_wbinv_range(unsigned long start, unsigned long end)
69 sync_is(); 69 sync_is();
70} 70}
71 71
72void dma_inv_range(unsigned long start, unsigned long end)
73{
74 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
75
76 for (; i < end; i += L1_CACHE_BYTES)
77 asm volatile("dcache.iva %0\n"::"r"(i):"memory");
78 sync_is();
79}
80
72void dma_wb_range(unsigned long start, unsigned long end) 81void dma_wb_range(unsigned long start, unsigned long end)
73{ 82{
74 unsigned long i = start & ~(L1_CACHE_BYTES - 1); 83 unsigned long i = start & ~(L1_CACHE_BYTES - 1);
75 84
76 for (; i < end; i += L1_CACHE_BYTES) 85 for (; i < end; i += L1_CACHE_BYTES)
77 asm volatile("dcache.civa %0\n"::"r"(i):"memory"); 86 asm volatile("dcache.cva %0\n"::"r"(i):"memory");
78 sync_is(); 87 sync_is();
79} 88}
diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
index 602a60d47a94..06e85b565454 100644
--- a/arch/csky/mm/dma-mapping.c
+++ b/arch/csky/mm/dma-mapping.c
@@ -14,69 +14,50 @@
14#include <linux/version.h> 14#include <linux/version.h>
15#include <asm/cache.h> 15#include <asm/cache.h>
16 16
17void arch_dma_prep_coherent(struct page *page, size_t size)
18{
19 if (PageHighMem(page)) {
20 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
21
22 do {
23 void *ptr = kmap_atomic(page);
24 size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE;
25
26 memset(ptr, 0, _size);
27 dma_wbinv_range((unsigned long)ptr,
28 (unsigned long)ptr + _size);
29
30 kunmap_atomic(ptr);
31
32 page++;
33 size -= PAGE_SIZE;
34 count--;
35 } while (count);
36 } else {
37 void *ptr = page_address(page);
38
39 memset(ptr, 0, size);
40 dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size);
41 }
42}
43
44static inline void cache_op(phys_addr_t paddr, size_t size, 17static inline void cache_op(phys_addr_t paddr, size_t size,
45 void (*fn)(unsigned long start, unsigned long end)) 18 void (*fn)(unsigned long start, unsigned long end))
46{ 19{
47 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); 20 struct page *page = phys_to_page(paddr);
48 unsigned int offset = paddr & ~PAGE_MASK; 21 void *start = __va(page_to_phys(page));
49 size_t left = size; 22 unsigned long offset = offset_in_page(paddr);
50 unsigned long start; 23 size_t left = size;
51 24
52 do { 25 do {
53 size_t len = left; 26 size_t len = left;
54 27
28 if (offset + len > PAGE_SIZE)
29 len = PAGE_SIZE - offset;
30
55 if (PageHighMem(page)) { 31 if (PageHighMem(page)) {
56 void *addr; 32 start = kmap_atomic(page);
57 33
58 if (offset + len > PAGE_SIZE) { 34 fn((unsigned long)start + offset,
59 if (offset >= PAGE_SIZE) { 35 (unsigned long)start + offset + len);
60 page += offset >> PAGE_SHIFT;
61 offset &= ~PAGE_MASK;
62 }
63 len = PAGE_SIZE - offset;
64 }
65 36
66 addr = kmap_atomic(page); 37 kunmap_atomic(start);
67 start = (unsigned long)(addr + offset);
68 fn(start, start + len);
69 kunmap_atomic(addr);
70 } else { 38 } else {
71 start = (unsigned long)phys_to_virt(paddr); 39 fn((unsigned long)start + offset,
72 fn(start, start + size); 40 (unsigned long)start + offset + len);
73 } 41 }
74 offset = 0; 42 offset = 0;
43
75 page++; 44 page++;
45 start += PAGE_SIZE;
76 left -= len; 46 left -= len;
77 } while (left); 47 } while (left);
78} 48}
79 49
50static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
51{
52 memset((void *)start, 0, end - start);
53 dma_wbinv_range(start, end);
54}
55
56void arch_dma_prep_coherent(struct page *page, size_t size)
57{
58 cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
59}
60
80void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 61void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
81 size_t size, enum dma_data_direction dir) 62 size_t size, enum dma_data_direction dir)
82{ 63{
@@ -98,11 +79,10 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
98{ 79{
99 switch (dir) { 80 switch (dir) {
100 case DMA_TO_DEVICE: 81 case DMA_TO_DEVICE:
101 cache_op(paddr, size, dma_wb_range); 82 return;
102 break;
103 case DMA_FROM_DEVICE: 83 case DMA_FROM_DEVICE:
104 case DMA_BIDIRECTIONAL: 84 case DMA_BIDIRECTIONAL:
105 cache_op(paddr, size, dma_wbinv_range); 85 cache_op(paddr, size, dma_inv_range);
106 break; 86 break;
107 default: 87 default:
108 BUG(); 88 BUG();
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index eb0dc9e5065f..d4c2292ea46b 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -60,22 +60,6 @@ void __init mem_init(void)
60 mem_init_print_info(NULL); 60 mem_init_print_info(NULL);
61} 61}
62 62
63#ifdef CONFIG_BLK_DEV_INITRD
64void free_initrd_mem(unsigned long start, unsigned long end)
65{
66 if (start < end)
67 pr_info("Freeing initrd memory: %ldk freed\n",
68 (end - start) >> 10);
69
70 for (; start < end; start += PAGE_SIZE) {
71 ClearPageReserved(virt_to_page(start));
72 init_page_count(virt_to_page(start));
73 free_page(start);
74 totalram_pages_inc();
75 }
76}
77#endif
78
79extern char __init_begin[], __init_end[]; 63extern char __init_begin[], __init_end[];
80 64
81void free_initmem(void) 65void free_initmem(void)
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c
index 8473b6bdf512..e13cd3497628 100644
--- a/arch/csky/mm/ioremap.c
+++ b/arch/csky/mm/ioremap.c
@@ -8,12 +8,12 @@
8 8
9#include <asm/pgtable.h> 9#include <asm/pgtable.h>
10 10
11void __iomem *ioremap(phys_addr_t addr, size_t size) 11static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
12 pgprot_t prot, void *caller)
12{ 13{
13 phys_addr_t last_addr; 14 phys_addr_t last_addr;
14 unsigned long offset, vaddr; 15 unsigned long offset, vaddr;
15 struct vm_struct *area; 16 struct vm_struct *area;
16 pgprot_t prot;
17 17
18 last_addr = addr + size - 1; 18 last_addr = addr + size - 1;
19 if (!size || last_addr < addr) 19 if (!size || last_addr < addr)
@@ -23,15 +23,12 @@ void __iomem *ioremap(phys_addr_t addr, size_t size)
23 addr &= PAGE_MASK; 23 addr &= PAGE_MASK;
24 size = PAGE_ALIGN(size + offset); 24 size = PAGE_ALIGN(size + offset);
25 25
26 area = get_vm_area_caller(size, VM_ALLOC, __builtin_return_address(0)); 26 area = get_vm_area_caller(size, VM_IOREMAP, caller);
27 if (!area) 27 if (!area)
28 return NULL; 28 return NULL;
29 29
30 vaddr = (unsigned long)area->addr; 30 vaddr = (unsigned long)area->addr;
31 31
32 prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE |
33 _PAGE_GLOBAL | _CACHE_UNCACHED | _PAGE_SO);
34
35 if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { 32 if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
36 free_vm_area(area); 33 free_vm_area(area);
37 return NULL; 34 return NULL;
@@ -39,7 +36,20 @@ void __iomem *ioremap(phys_addr_t addr, size_t size)
39 36
40 return (void __iomem *)(vaddr + offset); 37 return (void __iomem *)(vaddr + offset);
41} 38}
42EXPORT_SYMBOL(ioremap); 39
40void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
41{
42 return __ioremap_caller(phys_addr, size, prot,
43 __builtin_return_address(0));
44}
45EXPORT_SYMBOL(__ioremap);
46
47void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
48{
49 return __ioremap_caller(phys_addr, size, PAGE_KERNEL,
50 __builtin_return_address(0));
51}
52EXPORT_SYMBOL(ioremap_cache);
43 53
44void iounmap(void __iomem *addr) 54void iounmap(void __iomem *addr)
45{ 55{
@@ -51,10 +61,9 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
51 unsigned long size, pgprot_t vma_prot) 61 unsigned long size, pgprot_t vma_prot)
52{ 62{
53 if (!pfn_valid(pfn)) { 63 if (!pfn_valid(pfn)) {
54 vma_prot.pgprot |= _PAGE_SO;
55 return pgprot_noncached(vma_prot); 64 return pgprot_noncached(vma_prot);
56 } else if (file->f_flags & O_SYNC) { 65 } else if (file->f_flags & O_SYNC) {
57 return pgprot_noncached(vma_prot); 66 return pgprot_writecombine(vma_prot);
58 } 67 }
59 68
60 return vma_prot; 69 return vma_prot;
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
index 63a9f33aa43e..5cfc9d347826 100644
--- a/arch/mips/boot/dts/qca/ar9331.dtsi
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -99,7 +99,7 @@
99 99
100 miscintc: interrupt-controller@18060010 { 100 miscintc: interrupt-controller@18060010 {
101 compatible = "qca,ar7240-misc-intc"; 101 compatible = "qca,ar7240-misc-intc";
102 reg = <0x18060010 0x4>; 102 reg = <0x18060010 0x8>;
103 103
104 interrupt-parent = <&cpuintc>; 104 interrupt-parent = <&cpuintc>;
105 interrupts = <6>; 105 interrupts = <6>;
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index af44b35d79a1..b4328b3b5288 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -160,7 +160,6 @@ void __init prom_meminit(void)
160 160
161void __init prom_free_prom_memory(void) 161void __init prom_free_prom_memory(void)
162{ 162{
163 unsigned long addr;
164 int i; 163 int i;
165 164
166 if (prom_flags & PROM_FLAG_DONT_FREE_TEMP) 165 if (prom_flags & PROM_FLAG_DONT_FREE_TEMP)
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd.h b/arch/mips/include/asm/octeon/cvmx-ipd.h
index cbdc14b77435..adab7b54c3b4 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd.h
@@ -36,6 +36,7 @@
36#include <asm/octeon/octeon-feature.h> 36#include <asm/octeon/octeon-feature.h>
37 37
38#include <asm/octeon/cvmx-ipd-defs.h> 38#include <asm/octeon/cvmx-ipd-defs.h>
39#include <asm/octeon/cvmx-pip-defs.h>
39 40
40enum cvmx_ipd_mode { 41enum cvmx_ipd_mode {
41 CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */ 42 CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 071053ece677..5d70babfc9ee 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -52,6 +52,7 @@
52# endif 52# endif
53#define __ARCH_WANT_SYS_FORK 53#define __ARCH_WANT_SYS_FORK
54#define __ARCH_WANT_SYS_CLONE 54#define __ARCH_WANT_SYS_CLONE
55#define __ARCH_WANT_SYS_CLONE3
55 56
56/* whitelists for checksyscalls */ 57/* whitelists for checksyscalls */
57#define __IGNORE_fadvise64_64 58#define __IGNORE_fadvise64_64
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index fa62cd1dff93..6a7afe7ef4d3 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -24,7 +24,8 @@ static char r4kwar[] __initdata =
24static char daddiwar[] __initdata = 24static char daddiwar[] __initdata =
25 "Enable CPU_DADDI_WORKAROUNDS to rectify."; 25 "Enable CPU_DADDI_WORKAROUNDS to rectify.";
26 26
27static inline void align_mod(const int align, const int mod) 27static __always_inline __init
28void align_mod(const int align, const int mod)
28{ 29{
29 asm volatile( 30 asm volatile(
30 ".set push\n\t" 31 ".set push\n\t"
@@ -38,8 +39,9 @@ static inline void align_mod(const int align, const int mod)
38 : "n"(align), "n"(mod)); 39 : "n"(align), "n"(mod));
39} 40}
40 41
41static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w, 42static __always_inline __init
42 const int align, const int mod) 43void mult_sh_align_mod(long *v1, long *v2, long *w,
44 const int align, const int mod)
43{ 45{
44 unsigned long flags; 46 unsigned long flags;
45 int m1, m2; 47 int m1, m2;
@@ -113,7 +115,7 @@ static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w,
113 *w = lw; 115 *w = lw;
114} 116}
115 117
116static inline void check_mult_sh(void) 118static __always_inline __init void check_mult_sh(void)
117{ 119{
118 long v1[8], v2[8], w[8]; 120 long v1[8], v2[8], w[8];
119 int bug, fix, i; 121 int bug, fix, i;
@@ -176,7 +178,7 @@ asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
176 exception_exit(prev_state); 178 exception_exit(prev_state);
177} 179}
178 180
179static inline void check_daddi(void) 181static __init void check_daddi(void)
180{ 182{
181 extern asmlinkage void handle_daddi_ov(void); 183 extern asmlinkage void handle_daddi_ov(void);
182 unsigned long flags; 184 unsigned long flags;
@@ -242,7 +244,7 @@ static inline void check_daddi(void)
242 244
243int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1; 245int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
244 246
245static inline void check_daddiu(void) 247static __init void check_daddiu(void)
246{ 248{
247 long v, w, tmp; 249 long v, w, tmp;
248 250
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index b8249c233754..5eec13b8d222 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -108,6 +108,9 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
108 return; 108 return;
109 } 109 }
110 110
111 if (start < PHYS_OFFSET)
112 return;
113
111 memblock_add(start, size); 114 memblock_add(start, size);
112 /* Reserve any memory except the ordinary RAM ranges. */ 115 /* Reserve any memory except the ordinary RAM ranges. */
113 switch (type) { 116 switch (type) {
@@ -321,7 +324,7 @@ static void __init bootmem_init(void)
321 * Reserve any memory between the start of RAM and PHYS_OFFSET 324 * Reserve any memory between the start of RAM and PHYS_OFFSET
322 */ 325 */
323 if (ramstart > PHYS_OFFSET) 326 if (ramstart > PHYS_OFFSET)
324 memblock_reserve(PHYS_OFFSET, PFN_UP(ramstart) - PHYS_OFFSET); 327 memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
325 328
326 if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) { 329 if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
327 pr_info("Wasting %lu bytes for tracking %lu unused pages\n", 330 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index b0e25e913bdb..3f16f3823031 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -80,6 +80,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
80 80
81save_static_function(sys_fork); 81save_static_function(sys_fork);
82save_static_function(sys_clone); 82save_static_function(sys_clone);
83save_static_function(sys_clone3);
83 84
84SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) 85SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
85{ 86{
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index c9c879ec9b6d..e7c5ab38e403 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -373,4 +373,4 @@
373432 n32 fsmount sys_fsmount 373432 n32 fsmount sys_fsmount
374433 n32 fspick sys_fspick 374433 n32 fspick sys_fspick
375434 n32 pidfd_open sys_pidfd_open 375434 n32 pidfd_open sys_pidfd_open
376# 435 reserved for clone3 376435 n32 clone3 __sys_clone3
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index bbce9159caa1..13cd66581f3b 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -349,4 +349,4 @@
349432 n64 fsmount sys_fsmount 349432 n64 fsmount sys_fsmount
350433 n64 fspick sys_fspick 350433 n64 fspick sys_fspick
351434 n64 pidfd_open sys_pidfd_open 351434 n64 pidfd_open sys_pidfd_open
352# 435 reserved for clone3 352435 n64 clone3 __sys_clone3
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 9653591428ec..353539ea4140 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -422,4 +422,4 @@
422432 o32 fsmount sys_fsmount 422432 o32 fsmount sys_fsmount
423433 o32 fspick sys_fspick 423433 o32 fspick sys_fspick
424434 o32 pidfd_open sys_pidfd_open 424434 o32 pidfd_open sys_pidfd_open
425# 435 reserved for clone3 425435 o32 clone3 __sys_clone3
diff --git a/arch/mips/loongson64/common/mem.c b/arch/mips/loongson64/common/mem.c
index 4abb92e0fc39..4254ac4ec616 100644
--- a/arch/mips/loongson64/common/mem.c
+++ b/arch/mips/loongson64/common/mem.c
@@ -3,6 +3,7 @@
3 */ 3 */
4#include <linux/fs.h> 4#include <linux/fs.h>
5#include <linux/fcntl.h> 5#include <linux/fcntl.h>
6#include <linux/memblock.h>
6#include <linux/mm.h> 7#include <linux/mm.h>
7 8
8#include <asm/bootinfo.h> 9#include <asm/bootinfo.h>
@@ -64,24 +65,22 @@ void __init prom_init_memory(void)
64 node_id = loongson_memmap->map[i].node_id; 65 node_id = loongson_memmap->map[i].node_id;
65 mem_type = loongson_memmap->map[i].mem_type; 66 mem_type = loongson_memmap->map[i].mem_type;
66 67
67 if (node_id == 0) { 68 if (node_id != 0)
68 switch (mem_type) { 69 continue;
69 case SYSTEM_RAM_LOW: 70
70 add_memory_region(loongson_memmap->map[i].mem_start, 71 switch (mem_type) {
71 (u64)loongson_memmap->map[i].mem_size << 20, 72 case SYSTEM_RAM_LOW:
72 BOOT_MEM_RAM); 73 memblock_add(loongson_memmap->map[i].mem_start,
73 break; 74 (u64)loongson_memmap->map[i].mem_size << 20);
74 case SYSTEM_RAM_HIGH: 75 break;
75 add_memory_region(loongson_memmap->map[i].mem_start, 76 case SYSTEM_RAM_HIGH:
76 (u64)loongson_memmap->map[i].mem_size << 20, 77 memblock_add(loongson_memmap->map[i].mem_start,
77 BOOT_MEM_RAM); 78 (u64)loongson_memmap->map[i].mem_size << 20);
78 break; 79 break;
79 case SYSTEM_RAM_RESERVED: 80 case SYSTEM_RAM_RESERVED:
80 add_memory_region(loongson_memmap->map[i].mem_start, 81 memblock_reserve(loongson_memmap->map[i].mem_start,
81 (u64)loongson_memmap->map[i].mem_size << 20, 82 (u64)loongson_memmap->map[i].mem_size << 20);
82 BOOT_MEM_RESERVED); 83 break;
83 break;
84 }
85 } 84 }
86 } 85 }
87} 86}
diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c
index ffefc1cb2612..98c3a7feb10f 100644
--- a/arch/mips/loongson64/common/serial.c
+++ b/arch/mips/loongson64/common/serial.c
@@ -110,7 +110,7 @@ static int __init serial_init(void)
110} 110}
111module_init(serial_init); 111module_init(serial_init);
112 112
113static void __init serial_exit(void) 113static void __exit serial_exit(void)
114{ 114{
115 platform_device_unregister(&uart8250_device); 115 platform_device_unregister(&uart8250_device);
116} 116}
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 414e97de5dc0..8f20d2cb3767 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -142,8 +142,6 @@ static void __init szmem(unsigned int node)
142 (u32)node_id, mem_type, mem_start, mem_size); 142 (u32)node_id, mem_type, mem_start, mem_size);
143 pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", 143 pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
144 start_pfn, end_pfn, num_physpages); 144 start_pfn, end_pfn, num_physpages);
145 add_memory_region((node_id << 44) + mem_start,
146 (u64)mem_size << 20, BOOT_MEM_RAM);
147 memblock_add_node(PFN_PHYS(start_pfn), 145 memblock_add_node(PFN_PHYS(start_pfn),
148 PFN_PHYS(end_pfn - start_pfn), node); 146 PFN_PHYS(end_pfn - start_pfn), node);
149 break; 147 break;
@@ -156,16 +154,12 @@ static void __init szmem(unsigned int node)
156 (u32)node_id, mem_type, mem_start, mem_size); 154 (u32)node_id, mem_type, mem_start, mem_size);
157 pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", 155 pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
158 start_pfn, end_pfn, num_physpages); 156 start_pfn, end_pfn, num_physpages);
159 add_memory_region((node_id << 44) + mem_start,
160 (u64)mem_size << 20, BOOT_MEM_RAM);
161 memblock_add_node(PFN_PHYS(start_pfn), 157 memblock_add_node(PFN_PHYS(start_pfn),
162 PFN_PHYS(end_pfn - start_pfn), node); 158 PFN_PHYS(end_pfn - start_pfn), node);
163 break; 159 break;
164 case SYSTEM_RAM_RESERVED: 160 case SYSTEM_RAM_RESERVED:
165 pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", 161 pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
166 (u32)node_id, mem_type, mem_start, mem_size); 162 (u32)node_id, mem_type, mem_start, mem_size);
167 add_memory_region((node_id << 44) + mem_start,
168 (u64)mem_size << 20, BOOT_MEM_RESERVED);
169 memblock_reserve(((node_id << 44) + mem_start), 163 memblock_reserve(((node_id << 44) + mem_start),
170 mem_size << 20); 164 mem_size << 20);
171 break; 165 break;
@@ -191,8 +185,6 @@ static void __init node_mem_init(unsigned int node)
191 NODE_DATA(node)->node_start_pfn = start_pfn; 185 NODE_DATA(node)->node_start_pfn = start_pfn;
192 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; 186 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
193 187
194 free_bootmem_with_active_regions(node, end_pfn);
195
196 if (node == 0) { 188 if (node == 0) {
197 /* kernel end address */ 189 /* kernel end address */
198 unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end)); 190 unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end));
@@ -209,8 +201,6 @@ static void __init node_mem_init(unsigned int node)
209 memblock_reserve((node_addrspace_offset | 0xfe000000), 201 memblock_reserve((node_addrspace_offset | 0xfe000000),
210 32 << 20); 202 32 << 20);
211 } 203 }
212
213 sparse_memory_present_with_active_regions(node);
214} 204}
215 205
216static __init void prom_meminit(void) 206static __init void prom_meminit(void)
@@ -227,6 +217,7 @@ static __init void prom_meminit(void)
227 cpumask_clear(&__node_data[(node)]->cpumask); 217 cpumask_clear(&__node_data[(node)]->cpumask);
228 } 218 }
229 } 219 }
220 memblocks_present();
230 max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); 221 max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
231 222
232 for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { 223 for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
diff --git a/arch/mips/pmcs-msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c
index dfb527961a27..800a21b8b8b0 100644
--- a/arch/mips/pmcs-msp71xx/msp_prom.c
+++ b/arch/mips/pmcs-msp71xx/msp_prom.c
@@ -61,6 +61,7 @@ int init_debug = 1;
61/* memory blocks */ 61/* memory blocks */
62struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS]; 62struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
63 63
64#define MAX_PROM_MEM 5
64static phys_addr_t prom_mem_base[MAX_PROM_MEM] __initdata; 65static phys_addr_t prom_mem_base[MAX_PROM_MEM] __initdata;
65static phys_addr_t prom_mem_size[MAX_PROM_MEM] __initdata; 66static phys_addr_t prom_mem_size[MAX_PROM_MEM] __initdata;
66static unsigned int nr_prom_mem __initdata; 67static unsigned int nr_prom_mem __initdata;
@@ -358,7 +359,7 @@ void __init prom_meminit(void)
358 p++; 359 p++;
359 360
360 if (type == BOOT_MEM_ROM_DATA) { 361 if (type == BOOT_MEM_ROM_DATA) {
361 if (nr_prom_mem >= 5) { 362 if (nr_prom_mem >= MAX_PROM_MEM) {
362 pr_err("Too many ROM DATA regions"); 363 pr_err("Too many ROM DATA regions");
363 continue; 364 continue;
364 } 365 }
@@ -377,7 +378,6 @@ void __init prom_free_prom_memory(void)
377 char *ptr; 378 char *ptr;
378 int len = 0; 379 int len = 0;
379 int i; 380 int i;
380 unsigned long addr;
381 381
382 /* 382 /*
383 * preserve environment variables and command line from pmon/bbload 383 * preserve environment variables and command line from pmon/bbload
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 69cfa0a5339e..807f0f782f75 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -59,7 +59,7 @@ CFLAGS_REMOVE_vgettimeofday.o = -pg
59ifndef CONFIG_CPU_MIPSR6 59ifndef CONFIG_CPU_MIPSR6
60 ifeq ($(call ld-ifversion, -lt, 225000000, y),y) 60 ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
61 $(warning MIPS VDSO requires binutils >= 2.25) 61 $(warning MIPS VDSO requires binutils >= 2.25)
62 obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y)) 62 obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
63 ccflags-vdso += -DDISABLE_MIPS_VDSO 63 ccflags-vdso += -DDISABLE_MIPS_VDSO
64 endif 64 endif
65endif 65endif
diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c
deleted file mode 100644
index e8243c7fd5b5..000000000000
--- a/arch/mips/vdso/gettimeofday.c
+++ /dev/null
@@ -1,269 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2015 Imagination Technologies
4 * Author: Alex Smith <alex.smith@imgtec.com>
5 */
6
7#include "vdso.h"
8
9#include <linux/compiler.h>
10#include <linux/time.h>
11
12#include <asm/clocksource.h>
13#include <asm/io.h>
14#include <asm/unistd.h>
15#include <asm/vdso.h>
16
17#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
18
19static __always_inline long gettimeofday_fallback(struct timeval *_tv,
20 struct timezone *_tz)
21{
22 register struct timezone *tz asm("a1") = _tz;
23 register struct timeval *tv asm("a0") = _tv;
24 register long ret asm("v0");
25 register long nr asm("v0") = __NR_gettimeofday;
26 register long error asm("a3");
27
28 asm volatile(
29 " syscall\n"
30 : "=r" (ret), "=r" (error)
31 : "r" (tv), "r" (tz), "r" (nr)
32 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
33 "$14", "$15", "$24", "$25", "hi", "lo", "memory");
34
35 return error ? -ret : ret;
36}
37
38#endif
39
40static __always_inline long clock_gettime_fallback(clockid_t _clkid,
41 struct timespec *_ts)
42{
43 register struct timespec *ts asm("a1") = _ts;
44 register clockid_t clkid asm("a0") = _clkid;
45 register long ret asm("v0");
46 register long nr asm("v0") = __NR_clock_gettime;
47 register long error asm("a3");
48
49 asm volatile(
50 " syscall\n"
51 : "=r" (ret), "=r" (error)
52 : "r" (clkid), "r" (ts), "r" (nr)
53 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
54 "$14", "$15", "$24", "$25", "hi", "lo", "memory");
55
56 return error ? -ret : ret;
57}
58
59static __always_inline int do_realtime_coarse(struct timespec *ts,
60 const union mips_vdso_data *data)
61{
62 u32 start_seq;
63
64 do {
65 start_seq = vdso_data_read_begin(data);
66
67 ts->tv_sec = data->xtime_sec;
68 ts->tv_nsec = data->xtime_nsec >> data->cs_shift;
69 } while (vdso_data_read_retry(data, start_seq));
70
71 return 0;
72}
73
74static __always_inline int do_monotonic_coarse(struct timespec *ts,
75 const union mips_vdso_data *data)
76{
77 u32 start_seq;
78 u64 to_mono_sec;
79 u64 to_mono_nsec;
80
81 do {
82 start_seq = vdso_data_read_begin(data);
83
84 ts->tv_sec = data->xtime_sec;
85 ts->tv_nsec = data->xtime_nsec >> data->cs_shift;
86
87 to_mono_sec = data->wall_to_mono_sec;
88 to_mono_nsec = data->wall_to_mono_nsec;
89 } while (vdso_data_read_retry(data, start_seq));
90
91 ts->tv_sec += to_mono_sec;
92 timespec_add_ns(ts, to_mono_nsec);
93
94 return 0;
95}
96
97#ifdef CONFIG_CSRC_R4K
98
99static __always_inline u64 read_r4k_count(void)
100{
101 unsigned int count;
102
103 __asm__ __volatile__(
104 " .set push\n"
105 " .set mips32r2\n"
106 " rdhwr %0, $2\n"
107 " .set pop\n"
108 : "=r" (count));
109
110 return count;
111}
112
113#endif
114
115#ifdef CONFIG_CLKSRC_MIPS_GIC
116
117static __always_inline u64 read_gic_count(const union mips_vdso_data *data)
118{
119 void __iomem *gic = get_gic(data);
120 u32 hi, hi2, lo;
121
122 do {
123 hi = __raw_readl(gic + sizeof(lo));
124 lo = __raw_readl(gic);
125 hi2 = __raw_readl(gic + sizeof(lo));
126 } while (hi2 != hi);
127
128 return (((u64)hi) << 32) + lo;
129}
130
131#endif
132
133static __always_inline u64 get_ns(const union mips_vdso_data *data)
134{
135 u64 cycle_now, delta, nsec;
136
137 switch (data->clock_mode) {
138#ifdef CONFIG_CSRC_R4K
139 case VDSO_CLOCK_R4K:
140 cycle_now = read_r4k_count();
141 break;
142#endif
143#ifdef CONFIG_CLKSRC_MIPS_GIC
144 case VDSO_CLOCK_GIC:
145 cycle_now = read_gic_count(data);
146 break;
147#endif
148 default:
149 return 0;
150 }
151
152 delta = (cycle_now - data->cs_cycle_last) & data->cs_mask;
153
154 nsec = (delta * data->cs_mult) + data->xtime_nsec;
155 nsec >>= data->cs_shift;
156
157 return nsec;
158}
159
160static __always_inline int do_realtime(struct timespec *ts,
161 const union mips_vdso_data *data)
162{
163 u32 start_seq;
164 u64 ns;
165
166 do {
167 start_seq = vdso_data_read_begin(data);
168
169 if (data->clock_mode == VDSO_CLOCK_NONE)
170 return -ENOSYS;
171
172 ts->tv_sec = data->xtime_sec;
173 ns = get_ns(data);
174 } while (vdso_data_read_retry(data, start_seq));
175
176 ts->tv_nsec = 0;
177 timespec_add_ns(ts, ns);
178
179 return 0;
180}
181
182static __always_inline int do_monotonic(struct timespec *ts,
183 const union mips_vdso_data *data)
184{
185 u32 start_seq;
186 u64 ns;
187 u64 to_mono_sec;
188 u64 to_mono_nsec;
189
190 do {
191 start_seq = vdso_data_read_begin(data);
192
193 if (data->clock_mode == VDSO_CLOCK_NONE)
194 return -ENOSYS;
195
196 ts->tv_sec = data->xtime_sec;
197 ns = get_ns(data);
198
199 to_mono_sec = data->wall_to_mono_sec;
200 to_mono_nsec = data->wall_to_mono_nsec;
201 } while (vdso_data_read_retry(data, start_seq));
202
203 ts->tv_sec += to_mono_sec;
204 ts->tv_nsec = 0;
205 timespec_add_ns(ts, ns + to_mono_nsec);
206
207 return 0;
208}
209
210#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
211
212/*
213 * This is behind the ifdef so that we don't provide the symbol when there's no
214 * possibility of there being a usable clocksource, because there's nothing we
215 * can do without it. When libc fails the symbol lookup it should fall back on
216 * the standard syscall path.
217 */
218int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
219{
220 const union mips_vdso_data *data = get_vdso_data();
221 struct timespec ts;
222 int ret;
223
224 ret = do_realtime(&ts, data);
225 if (ret)
226 return gettimeofday_fallback(tv, tz);
227
228 if (tv) {
229 tv->tv_sec = ts.tv_sec;
230 tv->tv_usec = ts.tv_nsec / 1000;
231 }
232
233 if (tz) {
234 tz->tz_minuteswest = data->tz_minuteswest;
235 tz->tz_dsttime = data->tz_dsttime;
236 }
237
238 return 0;
239}
240
241#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
242
243int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
244{
245 const union mips_vdso_data *data = get_vdso_data();
246 int ret = -1;
247
248 switch (clkid) {
249 case CLOCK_REALTIME_COARSE:
250 ret = do_realtime_coarse(ts, data);
251 break;
252 case CLOCK_MONOTONIC_COARSE:
253 ret = do_monotonic_coarse(ts, data);
254 break;
255 case CLOCK_REALTIME:
256 ret = do_realtime(ts, data);
257 break;
258 case CLOCK_MONOTONIC:
259 ret = do_monotonic(ts, data);
260 break;
261 default:
262 break;
263 }
264
265 if (ret)
266 ret = clock_gettime_fallback(clkid, ts);
267
268 return ret;
269}
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 574eca33f893..d97db3ad9aae 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -254,7 +254,13 @@ extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
254extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 254extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
255extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, 255extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
256 unsigned long addr, pmd_t *pmdp); 256 unsigned long addr, pmd_t *pmdp);
257extern int radix__has_transparent_hugepage(void); 257static inline int radix__has_transparent_hugepage(void)
258{
259 /* For radix 2M at PMD level means thp */
260 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
261 return 1;
262 return 0;
263}
258#endif 264#endif
259 265
260extern int __meminit radix__vmemmap_create_mapping(unsigned long start, 266extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index d7fcdfa7fee4..ec2547cc5ecb 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -36,8 +36,8 @@
36#include "book3s.h" 36#include "book3s.h"
37#include "trace.h" 37#include "trace.h"
38 38
39#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM 39#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 40#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
41 41
42/* #define EXIT_DEBUG */ 42/* #define EXIT_DEBUG */
43 43
@@ -69,8 +69,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
69 { "pthru_all", VCPU_STAT(pthru_all) }, 69 { "pthru_all", VCPU_STAT(pthru_all) },
70 { "pthru_host", VCPU_STAT(pthru_host) }, 70 { "pthru_host", VCPU_STAT(pthru_host) },
71 { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) }, 71 { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
72 { "largepages_2M", VM_STAT(num_2M_pages) }, 72 { "largepages_2M", VM_STAT(num_2M_pages, .mode = 0444) },
73 { "largepages_1G", VM_STAT(num_1G_pages) }, 73 { "largepages_1G", VM_STAT(num_1G_pages, .mode = 0444) },
74 { NULL } 74 { NULL }
75}; 75};
76 76
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index d1f390ac9cdb..64733b9cb20a 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -406,6 +406,8 @@ int hash__has_transparent_hugepage(void)
406 406
407 return 1; 407 return 1;
408} 408}
409EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
410
409#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 411#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
410 412
411#ifdef CONFIG_STRICT_KERNEL_RWX 413#ifdef CONFIG_STRICT_KERNEL_RWX
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 3a1fbf9cb8f8..6ee17d09649c 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1027,13 +1027,6 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1027 return old_pmd; 1027 return old_pmd;
1028} 1028}
1029 1029
1030int radix__has_transparent_hugepage(void)
1031{
1032 /* For radix 2M at PMD level means thp */
1033 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
1034 return 1;
1035 return 0;
1036}
1037#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1030#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1038 1031
1039void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, 1032void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index a44f6281ca3a..4e08246acd79 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -172,6 +172,21 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
172 vmemmap_list = vmem_back; 172 vmemmap_list = vmem_back;
173} 173}
174 174
175static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
176 unsigned long page_size)
177{
178 unsigned long nr_pfn = page_size / sizeof(struct page);
179 unsigned long start_pfn = page_to_pfn((struct page *)start);
180
181 if ((start_pfn + nr_pfn) > altmap->end_pfn)
182 return true;
183
184 if (start_pfn < altmap->base_pfn)
185 return true;
186
187 return false;
188}
189
175int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 190int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
176 struct vmem_altmap *altmap) 191 struct vmem_altmap *altmap)
177{ 192{
@@ -194,7 +209,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
194 * fail due to alignment issues when using 16MB hugepages, so 209 * fail due to alignment issues when using 16MB hugepages, so
195 * fall back to system memory if the altmap allocation fail. 210 * fall back to system memory if the altmap allocation fail.
196 */ 211 */
197 if (altmap) { 212 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
198 p = altmap_alloc_block_buf(page_size, altmap); 213 p = altmap_alloc_block_buf(page_size, altmap);
199 if (!p) 214 if (!p)
200 pr_debug("altmap block allocation failed, falling back to system memory"); 215 pr_debug("altmap block allocation failed, falling back to system memory");
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 5a02b7d50940..9c992a88d858 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -22,6 +22,7 @@
22 22
23#define REG_L __REG_SEL(ld, lw) 23#define REG_L __REG_SEL(ld, lw)
24#define REG_S __REG_SEL(sd, sw) 24#define REG_S __REG_SEL(sd, sw)
25#define REG_SC __REG_SEL(sc.d, sc.w)
25#define SZREG __REG_SEL(8, 4) 26#define SZREG __REG_SEL(8, 4)
26#define LGREG __REG_SEL(3, 2) 27#define LGREG __REG_SEL(3, 2)
27 28
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index da7aa88113c2..2d592da1e776 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -98,7 +98,26 @@ _save_context:
98 */ 98 */
99 .macro RESTORE_ALL 99 .macro RESTORE_ALL
100 REG_L a0, PT_SSTATUS(sp) 100 REG_L a0, PT_SSTATUS(sp)
101 REG_L a2, PT_SEPC(sp) 101 /*
102 * The current load reservation is effectively part of the processor's
103 * state, in the sense that load reservations cannot be shared between
104 * different hart contexts. We can't actually save and restore a load
105 * reservation, so instead here we clear any existing reservation --
106 * it's always legal for implementations to clear load reservations at
107 * any point (as long as the forward progress guarantee is kept, but
108 * we'll ignore that here).
109 *
110 * Dangling load reservations can be the result of taking a trap in the
111 * middle of an LR/SC sequence, but can also be the result of a taken
112 * forward branch around an SC -- which is how we implement CAS. As a
113 * result we need to clear reservations between the last CAS and the
114 * jump back to the new context. While it is unlikely the store
115 * completes, implementations are allowed to expand reservations to be
116 * arbitrarily large.
117 */
118 REG_L a2, PT_SEPC(sp)
119 REG_SC x0, a2, PT_SEPC(sp)
120
102 csrw CSR_SSTATUS, a0 121 csrw CSR_SSTATUS, a0
103 csrw CSR_SEPC, a2 122 csrw CSR_SEPC, a2
104 123
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index f0ba71304b6e..83f7d12042fb 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -11,6 +11,7 @@
11#include <linux/swap.h> 11#include <linux/swap.h>
12#include <linux/sizes.h> 12#include <linux/sizes.h>
13#include <linux/of_fdt.h> 13#include <linux/of_fdt.h>
14#include <linux/libfdt.h>
14 15
15#include <asm/fixmap.h> 16#include <asm/fixmap.h>
16#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
@@ -82,6 +83,8 @@ disable:
82} 83}
83#endif /* CONFIG_BLK_DEV_INITRD */ 84#endif /* CONFIG_BLK_DEV_INITRD */
84 85
86static phys_addr_t dtb_early_pa __initdata;
87
85void __init setup_bootmem(void) 88void __init setup_bootmem(void)
86{ 89{
87 struct memblock_region *reg; 90 struct memblock_region *reg;
@@ -117,7 +120,12 @@ void __init setup_bootmem(void)
117 setup_initrd(); 120 setup_initrd();
118#endif /* CONFIG_BLK_DEV_INITRD */ 121#endif /* CONFIG_BLK_DEV_INITRD */
119 122
120 early_init_fdt_reserve_self(); 123 /*
124 * Avoid using early_init_fdt_reserve_self() since __pa() does
125 * not work for DTB pointers that are fixmap addresses
126 */
127 memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
128
121 early_init_fdt_scan_reserved_mem(); 129 early_init_fdt_scan_reserved_mem();
122 memblock_allow_resize(); 130 memblock_allow_resize();
123 memblock_dump_all(); 131 memblock_dump_all();
@@ -393,6 +401,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
393 401
394 /* Save pointer to DTB for early FDT parsing */ 402 /* Save pointer to DTB for early FDT parsing */
395 dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK); 403 dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
404 /* Save physical address for memblock reservation */
405 dtb_early_pa = dtb_pa;
396} 406}
397 407
398static void __init setup_vm_final(void) 408static void __init setup_vm_final(void)
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 347f48702edb..38d64030aacf 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -44,6 +44,7 @@ CONFIG_NR_CPUS=512
44CONFIG_NUMA=y 44CONFIG_NUMA=y
45CONFIG_HZ_100=y 45CONFIG_HZ_100=y
46CONFIG_KEXEC_FILE=y 46CONFIG_KEXEC_FILE=y
47CONFIG_KEXEC_SIG=y
47CONFIG_EXPOLINE=y 48CONFIG_EXPOLINE=y
48CONFIG_EXPOLINE_AUTO=y 49CONFIG_EXPOLINE_AUTO=y
49CONFIG_CHSC_SCH=y 50CONFIG_CHSC_SCH=y
@@ -69,12 +70,13 @@ CONFIG_MODULE_UNLOAD=y
69CONFIG_MODULE_FORCE_UNLOAD=y 70CONFIG_MODULE_FORCE_UNLOAD=y
70CONFIG_MODVERSIONS=y 71CONFIG_MODVERSIONS=y
71CONFIG_MODULE_SRCVERSION_ALL=y 72CONFIG_MODULE_SRCVERSION_ALL=y
72CONFIG_MODULE_SIG=y
73CONFIG_MODULE_SIG_SHA256=y 73CONFIG_MODULE_SIG_SHA256=y
74CONFIG_UNUSED_SYMBOLS=y
74CONFIG_BLK_DEV_INTEGRITY=y 75CONFIG_BLK_DEV_INTEGRITY=y
75CONFIG_BLK_DEV_THROTTLING=y 76CONFIG_BLK_DEV_THROTTLING=y
76CONFIG_BLK_WBT=y 77CONFIG_BLK_WBT=y
77CONFIG_BLK_CGROUP_IOLATENCY=y 78CONFIG_BLK_CGROUP_IOLATENCY=y
79CONFIG_BLK_CGROUP_IOCOST=y
78CONFIG_PARTITION_ADVANCED=y 80CONFIG_PARTITION_ADVANCED=y
79CONFIG_IBM_PARTITION=y 81CONFIG_IBM_PARTITION=y
80CONFIG_BSD_DISKLABEL=y 82CONFIG_BSD_DISKLABEL=y
@@ -370,6 +372,7 @@ CONFIG_NETLINK_DIAG=m
370CONFIG_CGROUP_NET_PRIO=y 372CONFIG_CGROUP_NET_PRIO=y
371CONFIG_BPF_JIT=y 373CONFIG_BPF_JIT=y
372CONFIG_NET_PKTGEN=m 374CONFIG_NET_PKTGEN=m
375# CONFIG_NET_DROP_MONITOR is not set
373CONFIG_PCI=y 376CONFIG_PCI=y
374CONFIG_PCI_DEBUG=y 377CONFIG_PCI_DEBUG=y
375CONFIG_HOTPLUG_PCI=y 378CONFIG_HOTPLUG_PCI=y
@@ -424,6 +427,7 @@ CONFIG_DM_CRYPT=m
424CONFIG_DM_SNAPSHOT=m 427CONFIG_DM_SNAPSHOT=m
425CONFIG_DM_THIN_PROVISIONING=m 428CONFIG_DM_THIN_PROVISIONING=m
426CONFIG_DM_WRITECACHE=m 429CONFIG_DM_WRITECACHE=m
430CONFIG_DM_CLONE=m
427CONFIG_DM_MIRROR=m 431CONFIG_DM_MIRROR=m
428CONFIG_DM_LOG_USERSPACE=m 432CONFIG_DM_LOG_USERSPACE=m
429CONFIG_DM_RAID=m 433CONFIG_DM_RAID=m
@@ -435,6 +439,7 @@ CONFIG_DM_DELAY=m
435CONFIG_DM_UEVENT=y 439CONFIG_DM_UEVENT=y
436CONFIG_DM_FLAKEY=m 440CONFIG_DM_FLAKEY=m
437CONFIG_DM_VERITY=m 441CONFIG_DM_VERITY=m
442CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
438CONFIG_DM_SWITCH=m 443CONFIG_DM_SWITCH=m
439CONFIG_NETDEVICES=y 444CONFIG_NETDEVICES=y
440CONFIG_BONDING=m 445CONFIG_BONDING=m
@@ -489,6 +494,7 @@ CONFIG_MLX5_CORE_EN=y
489# CONFIG_NET_VENDOR_NVIDIA is not set 494# CONFIG_NET_VENDOR_NVIDIA is not set
490# CONFIG_NET_VENDOR_OKI is not set 495# CONFIG_NET_VENDOR_OKI is not set
491# CONFIG_NET_VENDOR_PACKET_ENGINES is not set 496# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
497# CONFIG_NET_VENDOR_PENSANDO is not set
492# CONFIG_NET_VENDOR_QLOGIC is not set 498# CONFIG_NET_VENDOR_QLOGIC is not set
493# CONFIG_NET_VENDOR_QUALCOMM is not set 499# CONFIG_NET_VENDOR_QUALCOMM is not set
494# CONFIG_NET_VENDOR_RDC is not set 500# CONFIG_NET_VENDOR_RDC is not set
@@ -538,15 +544,16 @@ CONFIG_WATCHDOG=y
538CONFIG_WATCHDOG_NOWAYOUT=y 544CONFIG_WATCHDOG_NOWAYOUT=y
539CONFIG_SOFT_WATCHDOG=m 545CONFIG_SOFT_WATCHDOG=m
540CONFIG_DIAG288_WATCHDOG=m 546CONFIG_DIAG288_WATCHDOG=m
541CONFIG_DRM=y 547CONFIG_FB=y
542CONFIG_DRM_VIRTIO_GPU=y
543CONFIG_FRAMEBUFFER_CONSOLE=y 548CONFIG_FRAMEBUFFER_CONSOLE=y
549CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
544# CONFIG_HID is not set 550# CONFIG_HID is not set
545# CONFIG_USB_SUPPORT is not set 551# CONFIG_USB_SUPPORT is not set
546CONFIG_INFINIBAND=m 552CONFIG_INFINIBAND=m
547CONFIG_INFINIBAND_USER_ACCESS=m 553CONFIG_INFINIBAND_USER_ACCESS=m
548CONFIG_MLX4_INFINIBAND=m 554CONFIG_MLX4_INFINIBAND=m
549CONFIG_MLX5_INFINIBAND=m 555CONFIG_MLX5_INFINIBAND=m
556CONFIG_SYNC_FILE=y
550CONFIG_VFIO=m 557CONFIG_VFIO=m
551CONFIG_VFIO_PCI=m 558CONFIG_VFIO_PCI=m
552CONFIG_VFIO_MDEV=m 559CONFIG_VFIO_MDEV=m
@@ -580,6 +587,8 @@ CONFIG_NILFS2_FS=m
580CONFIG_FS_DAX=y 587CONFIG_FS_DAX=y
581CONFIG_EXPORTFS_BLOCK_OPS=y 588CONFIG_EXPORTFS_BLOCK_OPS=y
582CONFIG_FS_ENCRYPTION=y 589CONFIG_FS_ENCRYPTION=y
590CONFIG_FS_VERITY=y
591CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
583CONFIG_FANOTIFY=y 592CONFIG_FANOTIFY=y
584CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 593CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
585CONFIG_QUOTA_NETLINK_INTERFACE=y 594CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -589,6 +598,7 @@ CONFIG_QFMT_V2=m
589CONFIG_AUTOFS4_FS=m 598CONFIG_AUTOFS4_FS=m
590CONFIG_FUSE_FS=y 599CONFIG_FUSE_FS=y
591CONFIG_CUSE=m 600CONFIG_CUSE=m
601CONFIG_VIRTIO_FS=m
592CONFIG_OVERLAY_FS=m 602CONFIG_OVERLAY_FS=m
593CONFIG_FSCACHE=m 603CONFIG_FSCACHE=m
594CONFIG_CACHEFILES=m 604CONFIG_CACHEFILES=m
@@ -648,12 +658,15 @@ CONFIG_FORTIFY_SOURCE=y
648CONFIG_SECURITY_SELINUX=y 658CONFIG_SECURITY_SELINUX=y
649CONFIG_SECURITY_SELINUX_BOOTPARAM=y 659CONFIG_SECURITY_SELINUX_BOOTPARAM=y
650CONFIG_SECURITY_SELINUX_DISABLE=y 660CONFIG_SECURITY_SELINUX_DISABLE=y
661CONFIG_SECURITY_LOCKDOWN_LSM=y
662CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
651CONFIG_INTEGRITY_SIGNATURE=y 663CONFIG_INTEGRITY_SIGNATURE=y
652CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y 664CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
653CONFIG_IMA=y 665CONFIG_IMA=y
654CONFIG_IMA_DEFAULT_HASH_SHA256=y 666CONFIG_IMA_DEFAULT_HASH_SHA256=y
655CONFIG_IMA_WRITE_POLICY=y 667CONFIG_IMA_WRITE_POLICY=y
656CONFIG_IMA_APPRAISE=y 668CONFIG_IMA_APPRAISE=y
669CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
657CONFIG_CRYPTO_USER=m 670CONFIG_CRYPTO_USER=m
658# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 671# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
659CONFIG_CRYPTO_PCRYPT=m 672CONFIG_CRYPTO_PCRYPT=m
@@ -664,10 +677,6 @@ CONFIG_CRYPTO_ECDH=m
664CONFIG_CRYPTO_ECRDSA=m 677CONFIG_CRYPTO_ECRDSA=m
665CONFIG_CRYPTO_CHACHA20POLY1305=m 678CONFIG_CRYPTO_CHACHA20POLY1305=m
666CONFIG_CRYPTO_AEGIS128=m 679CONFIG_CRYPTO_AEGIS128=m
667CONFIG_CRYPTO_AEGIS128L=m
668CONFIG_CRYPTO_AEGIS256=m
669CONFIG_CRYPTO_MORUS640=m
670CONFIG_CRYPTO_MORUS1280=m
671CONFIG_CRYPTO_CFB=m 680CONFIG_CRYPTO_CFB=m
672CONFIG_CRYPTO_LRW=m 681CONFIG_CRYPTO_LRW=m
673CONFIG_CRYPTO_PCBC=m 682CONFIG_CRYPTO_PCBC=m
@@ -739,7 +748,6 @@ CONFIG_DEBUG_INFO=y
739CONFIG_DEBUG_INFO_DWARF4=y 748CONFIG_DEBUG_INFO_DWARF4=y
740CONFIG_GDB_SCRIPTS=y 749CONFIG_GDB_SCRIPTS=y
741CONFIG_FRAME_WARN=1024 750CONFIG_FRAME_WARN=1024
742CONFIG_UNUSED_SYMBOLS=y
743CONFIG_HEADERS_INSTALL=y 751CONFIG_HEADERS_INSTALL=y
744CONFIG_HEADERS_CHECK=y 752CONFIG_HEADERS_CHECK=y
745CONFIG_DEBUG_SECTION_MISMATCH=y 753CONFIG_DEBUG_SECTION_MISMATCH=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 8514b8b9500f..25f799849582 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -44,6 +44,7 @@ CONFIG_NUMA=y
44# CONFIG_NUMA_EMU is not set 44# CONFIG_NUMA_EMU is not set
45CONFIG_HZ_100=y 45CONFIG_HZ_100=y
46CONFIG_KEXEC_FILE=y 46CONFIG_KEXEC_FILE=y
47CONFIG_KEXEC_SIG=y
47CONFIG_EXPOLINE=y 48CONFIG_EXPOLINE=y
48CONFIG_EXPOLINE_AUTO=y 49CONFIG_EXPOLINE_AUTO=y
49CONFIG_CHSC_SCH=y 50CONFIG_CHSC_SCH=y
@@ -66,11 +67,12 @@ CONFIG_MODULE_UNLOAD=y
66CONFIG_MODULE_FORCE_UNLOAD=y 67CONFIG_MODULE_FORCE_UNLOAD=y
67CONFIG_MODVERSIONS=y 68CONFIG_MODVERSIONS=y
68CONFIG_MODULE_SRCVERSION_ALL=y 69CONFIG_MODULE_SRCVERSION_ALL=y
69CONFIG_MODULE_SIG=y
70CONFIG_MODULE_SIG_SHA256=y 70CONFIG_MODULE_SIG_SHA256=y
71CONFIG_UNUSED_SYMBOLS=y
71CONFIG_BLK_DEV_THROTTLING=y 72CONFIG_BLK_DEV_THROTTLING=y
72CONFIG_BLK_WBT=y 73CONFIG_BLK_WBT=y
73CONFIG_BLK_CGROUP_IOLATENCY=y 74CONFIG_BLK_CGROUP_IOLATENCY=y
75CONFIG_BLK_CGROUP_IOCOST=y
74CONFIG_PARTITION_ADVANCED=y 76CONFIG_PARTITION_ADVANCED=y
75CONFIG_IBM_PARTITION=y 77CONFIG_IBM_PARTITION=y
76CONFIG_BSD_DISKLABEL=y 78CONFIG_BSD_DISKLABEL=y
@@ -363,6 +365,7 @@ CONFIG_NETLINK_DIAG=m
363CONFIG_CGROUP_NET_PRIO=y 365CONFIG_CGROUP_NET_PRIO=y
364CONFIG_BPF_JIT=y 366CONFIG_BPF_JIT=y
365CONFIG_NET_PKTGEN=m 367CONFIG_NET_PKTGEN=m
368# CONFIG_NET_DROP_MONITOR is not set
366CONFIG_PCI=y 369CONFIG_PCI=y
367CONFIG_HOTPLUG_PCI=y 370CONFIG_HOTPLUG_PCI=y
368CONFIG_HOTPLUG_PCI_S390=y 371CONFIG_HOTPLUG_PCI_S390=y
@@ -418,6 +421,7 @@ CONFIG_DM_CRYPT=m
418CONFIG_DM_SNAPSHOT=m 421CONFIG_DM_SNAPSHOT=m
419CONFIG_DM_THIN_PROVISIONING=m 422CONFIG_DM_THIN_PROVISIONING=m
420CONFIG_DM_WRITECACHE=m 423CONFIG_DM_WRITECACHE=m
424CONFIG_DM_CLONE=m
421CONFIG_DM_MIRROR=m 425CONFIG_DM_MIRROR=m
422CONFIG_DM_LOG_USERSPACE=m 426CONFIG_DM_LOG_USERSPACE=m
423CONFIG_DM_RAID=m 427CONFIG_DM_RAID=m
@@ -429,6 +433,7 @@ CONFIG_DM_DELAY=m
429CONFIG_DM_UEVENT=y 433CONFIG_DM_UEVENT=y
430CONFIG_DM_FLAKEY=m 434CONFIG_DM_FLAKEY=m
431CONFIG_DM_VERITY=m 435CONFIG_DM_VERITY=m
436CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
432CONFIG_DM_SWITCH=m 437CONFIG_DM_SWITCH=m
433CONFIG_DM_INTEGRITY=m 438CONFIG_DM_INTEGRITY=m
434CONFIG_NETDEVICES=y 439CONFIG_NETDEVICES=y
@@ -484,6 +489,7 @@ CONFIG_MLX5_CORE_EN=y
484# CONFIG_NET_VENDOR_NVIDIA is not set 489# CONFIG_NET_VENDOR_NVIDIA is not set
485# CONFIG_NET_VENDOR_OKI is not set 490# CONFIG_NET_VENDOR_OKI is not set
486# CONFIG_NET_VENDOR_PACKET_ENGINES is not set 491# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
492# CONFIG_NET_VENDOR_PENSANDO is not set
487# CONFIG_NET_VENDOR_QLOGIC is not set 493# CONFIG_NET_VENDOR_QLOGIC is not set
488# CONFIG_NET_VENDOR_QUALCOMM is not set 494# CONFIG_NET_VENDOR_QUALCOMM is not set
489# CONFIG_NET_VENDOR_RDC is not set 495# CONFIG_NET_VENDOR_RDC is not set
@@ -533,16 +539,16 @@ CONFIG_WATCHDOG_CORE=y
533CONFIG_WATCHDOG_NOWAYOUT=y 539CONFIG_WATCHDOG_NOWAYOUT=y
534CONFIG_SOFT_WATCHDOG=m 540CONFIG_SOFT_WATCHDOG=m
535CONFIG_DIAG288_WATCHDOG=m 541CONFIG_DIAG288_WATCHDOG=m
536CONFIG_DRM=y 542CONFIG_FB=y
537CONFIG_DRM_VIRTIO_GPU=y
538# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
539CONFIG_FRAMEBUFFER_CONSOLE=y 543CONFIG_FRAMEBUFFER_CONSOLE=y
544CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
540# CONFIG_HID is not set 545# CONFIG_HID is not set
541# CONFIG_USB_SUPPORT is not set 546# CONFIG_USB_SUPPORT is not set
542CONFIG_INFINIBAND=m 547CONFIG_INFINIBAND=m
543CONFIG_INFINIBAND_USER_ACCESS=m 548CONFIG_INFINIBAND_USER_ACCESS=m
544CONFIG_MLX4_INFINIBAND=m 549CONFIG_MLX4_INFINIBAND=m
545CONFIG_MLX5_INFINIBAND=m 550CONFIG_MLX5_INFINIBAND=m
551CONFIG_SYNC_FILE=y
546CONFIG_VFIO=m 552CONFIG_VFIO=m
547CONFIG_VFIO_PCI=m 553CONFIG_VFIO_PCI=m
548CONFIG_VFIO_MDEV=m 554CONFIG_VFIO_MDEV=m
@@ -573,6 +579,8 @@ CONFIG_NILFS2_FS=m
573CONFIG_FS_DAX=y 579CONFIG_FS_DAX=y
574CONFIG_EXPORTFS_BLOCK_OPS=y 580CONFIG_EXPORTFS_BLOCK_OPS=y
575CONFIG_FS_ENCRYPTION=y 581CONFIG_FS_ENCRYPTION=y
582CONFIG_FS_VERITY=y
583CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
576CONFIG_FANOTIFY=y 584CONFIG_FANOTIFY=y
577CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 585CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
578CONFIG_QUOTA_NETLINK_INTERFACE=y 586CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -581,6 +589,7 @@ CONFIG_QFMT_V2=m
581CONFIG_AUTOFS4_FS=m 589CONFIG_AUTOFS4_FS=m
582CONFIG_FUSE_FS=y 590CONFIG_FUSE_FS=y
583CONFIG_CUSE=m 591CONFIG_CUSE=m
592CONFIG_VIRTIO_FS=m
584CONFIG_OVERLAY_FS=m 593CONFIG_OVERLAY_FS=m
585CONFIG_FSCACHE=m 594CONFIG_FSCACHE=m
586CONFIG_CACHEFILES=m 595CONFIG_CACHEFILES=m
@@ -639,12 +648,15 @@ CONFIG_SECURITY_NETWORK=y
639CONFIG_SECURITY_SELINUX=y 648CONFIG_SECURITY_SELINUX=y
640CONFIG_SECURITY_SELINUX_BOOTPARAM=y 649CONFIG_SECURITY_SELINUX_BOOTPARAM=y
641CONFIG_SECURITY_SELINUX_DISABLE=y 650CONFIG_SECURITY_SELINUX_DISABLE=y
651CONFIG_SECURITY_LOCKDOWN_LSM=y
652CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
642CONFIG_INTEGRITY_SIGNATURE=y 653CONFIG_INTEGRITY_SIGNATURE=y
643CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y 654CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
644CONFIG_IMA=y 655CONFIG_IMA=y
645CONFIG_IMA_DEFAULT_HASH_SHA256=y 656CONFIG_IMA_DEFAULT_HASH_SHA256=y
646CONFIG_IMA_WRITE_POLICY=y 657CONFIG_IMA_WRITE_POLICY=y
647CONFIG_IMA_APPRAISE=y 658CONFIG_IMA_APPRAISE=y
659CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
648CONFIG_CRYPTO_FIPS=y 660CONFIG_CRYPTO_FIPS=y
649CONFIG_CRYPTO_USER=m 661CONFIG_CRYPTO_USER=m
650# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 662# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
@@ -656,10 +668,6 @@ CONFIG_CRYPTO_ECDH=m
656CONFIG_CRYPTO_ECRDSA=m 668CONFIG_CRYPTO_ECRDSA=m
657CONFIG_CRYPTO_CHACHA20POLY1305=m 669CONFIG_CRYPTO_CHACHA20POLY1305=m
658CONFIG_CRYPTO_AEGIS128=m 670CONFIG_CRYPTO_AEGIS128=m
659CONFIG_CRYPTO_AEGIS128L=m
660CONFIG_CRYPTO_AEGIS256=m
661CONFIG_CRYPTO_MORUS640=m
662CONFIG_CRYPTO_MORUS1280=m
663CONFIG_CRYPTO_CFB=m 671CONFIG_CRYPTO_CFB=m
664CONFIG_CRYPTO_LRW=m 672CONFIG_CRYPTO_LRW=m
665CONFIG_CRYPTO_OFB=m 673CONFIG_CRYPTO_OFB=m
@@ -727,7 +735,6 @@ CONFIG_DEBUG_INFO=y
727CONFIG_DEBUG_INFO_DWARF4=y 735CONFIG_DEBUG_INFO_DWARF4=y
728CONFIG_GDB_SCRIPTS=y 736CONFIG_GDB_SCRIPTS=y
729CONFIG_FRAME_WARN=1024 737CONFIG_FRAME_WARN=1024
730CONFIG_UNUSED_SYMBOLS=y
731CONFIG_DEBUG_SECTION_MISMATCH=y 738CONFIG_DEBUG_SECTION_MISMATCH=y
732CONFIG_MAGIC_SYSRQ=y 739CONFIG_MAGIC_SYSRQ=y
733CONFIG_DEBUG_MEMORY_INIT=y 740CONFIG_DEBUG_MEMORY_INIT=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index be09a208b608..20c51e5d9353 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -61,7 +61,7 @@ CONFIG_RAW_DRIVER=y
61CONFIG_CONFIGFS_FS=y 61CONFIG_CONFIGFS_FS=y
62# CONFIG_MISC_FILESYSTEMS is not set 62# CONFIG_MISC_FILESYSTEMS is not set
63# CONFIG_NETWORK_FILESYSTEMS is not set 63# CONFIG_NETWORK_FILESYSTEMS is not set
64# CONFIG_DIMLIB is not set 64CONFIG_LSM="yama,loadpin,safesetid,integrity"
65CONFIG_PRINTK_TIME=y 65CONFIG_PRINTK_TIME=y
66CONFIG_DEBUG_INFO=y 66CONFIG_DEBUG_INFO=y
67CONFIG_DEBUG_FS=y 67CONFIG_DEBUG_FS=y
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index d3f09526ee19..61467b9eecc7 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -41,7 +41,7 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg")
41#undef __ATOMIC_OP 41#undef __ATOMIC_OP
42 42
43#define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \ 43#define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
44static inline void op_name(op_type val, op_type *ptr) \ 44static __always_inline void op_name(op_type val, op_type *ptr) \
45{ \ 45{ \
46 asm volatile( \ 46 asm volatile( \
47 op_string " %[ptr],%[val]\n" \ 47 op_string " %[ptr],%[val]\n" \
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index b8833ac983fa..eb7eed43e780 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -56,7 +56,7 @@ __bitops_byte(unsigned long nr, volatile unsigned long *ptr)
56 return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 56 return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
57} 57}
58 58
59static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr) 59static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
60{ 60{
61 unsigned long *addr = __bitops_word(nr, ptr); 61 unsigned long *addr = __bitops_word(nr, ptr);
62 unsigned long mask; 62 unsigned long mask;
@@ -77,7 +77,7 @@ static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
77 __atomic64_or(mask, (long *)addr); 77 __atomic64_or(mask, (long *)addr);
78} 78}
79 79
80static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr) 80static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
81{ 81{
82 unsigned long *addr = __bitops_word(nr, ptr); 82 unsigned long *addr = __bitops_word(nr, ptr);
83 unsigned long mask; 83 unsigned long mask;
@@ -98,8 +98,8 @@ static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
98 __atomic64_and(mask, (long *)addr); 98 __atomic64_and(mask, (long *)addr);
99} 99}
100 100
101static inline void arch_change_bit(unsigned long nr, 101static __always_inline void arch_change_bit(unsigned long nr,
102 volatile unsigned long *ptr) 102 volatile unsigned long *ptr)
103{ 103{
104 unsigned long *addr = __bitops_word(nr, ptr); 104 unsigned long *addr = __bitops_word(nr, ptr);
105 unsigned long mask; 105 unsigned long mask;
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index a092f63aac6a..c0f3bfeddcbe 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -171,7 +171,7 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
171 * 171 *
172 * Returns 1 if @func is available for @opcode, 0 otherwise 172 * Returns 1 if @func is available for @opcode, 0 otherwise
173 */ 173 */
174static inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask) 174static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
175{ 175{
176 register unsigned long r0 asm("0") = 0; /* query function */ 176 register unsigned long r0 asm("0") = 0; /* query function */
177 register unsigned long r1 asm("1") = (unsigned long) mask; 177 register unsigned long r1 asm("1") = (unsigned long) mask;
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index ceeb552d3472..819803a97c2b 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -28,6 +28,8 @@ asm(".include \"asm/cpu_mf-insn.h\"\n");
28 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \ 28 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \
29 CPU_MF_INT_SF_LSDA) 29 CPU_MF_INT_SF_LSDA)
30 30
31#define CPU_MF_SF_RIBM_NOTAV 0x1 /* Sampling unavailable */
32
31/* CPU measurement facility support */ 33/* CPU measurement facility support */
32static inline int cpum_cf_avail(void) 34static inline int cpum_cf_avail(void)
33{ 35{
@@ -69,7 +71,8 @@ struct hws_qsi_info_block { /* Bit(s) */
69 unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ 71 unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
70 unsigned long tear; /* 24-31: TEAR contents */ 72 unsigned long tear; /* 24-31: TEAR contents */
71 unsigned long dear; /* 32-39: DEAR contents */ 73 unsigned long dear; /* 32-39: DEAR contents */
72 unsigned int rsvrd0; /* 40-43: reserved */ 74 unsigned int rsvrd0:24; /* 40-42: reserved */
75 unsigned int ribm:8; /* 43: Reserved by IBM */
73 unsigned int cpu_speed; /* 44-47: CPU speed */ 76 unsigned int cpu_speed; /* 44-47: CPU speed */
74 unsigned long long rsvrd1; /* 48-55: reserved */ 77 unsigned long long rsvrd1; /* 48-55: reserved */
75 unsigned long long rsvrd2; /* 56-63: reserved */ 78 unsigned long long rsvrd2; /* 56-63: reserved */
@@ -220,7 +223,8 @@ enum stcctm_ctr_set {
220 MT_DIAG = 5, 223 MT_DIAG = 5,
221 MT_DIAG_CLEARING = 9, /* clears loss-of-MT-ctr-data alert */ 224 MT_DIAG_CLEARING = 9, /* clears loss-of-MT-ctr-data alert */
222}; 225};
223static inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest) 226
227static __always_inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
224{ 228{
225 int cc; 229 int cc;
226 230
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index bb59dd964590..de8f0bf5f238 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -12,8 +12,6 @@
12#include <asm/page.h> 12#include <asm/page.h>
13#include <asm/pgtable.h> 13#include <asm/pgtable.h>
14 14
15
16#define is_hugepage_only_range(mm, addr, len) 0
17#define hugetlb_free_pgd_range free_pgd_range 15#define hugetlb_free_pgd_range free_pgd_range
18#define hugepages_supported() (MACHINE_HAS_EDAT1) 16#define hugepages_supported() (MACHINE_HAS_EDAT1)
19 17
@@ -23,6 +21,13 @@ pte_t huge_ptep_get(pte_t *ptep);
23pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 21pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
24 unsigned long addr, pte_t *ptep); 22 unsigned long addr, pte_t *ptep);
25 23
24static inline bool is_hugepage_only_range(struct mm_struct *mm,
25 unsigned long addr,
26 unsigned long len)
27{
28 return false;
29}
30
26/* 31/*
27 * If the arch doesn't supply something else, assume that hugepage 32 * If the arch doesn't supply something else, assume that hugepage
28 * size aligned regions are ok without further preparation. 33 * size aligned regions are ok without further preparation.
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index e548ec1ec12c..39f747d63758 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -20,7 +20,7 @@
20 * We use a brcl 0,2 instruction for jump labels at compile time so it 20 * We use a brcl 0,2 instruction for jump labels at compile time so it
21 * can be easily distinguished from a hotpatch generated instruction. 21 * can be easily distinguished from a hotpatch generated instruction.
22 */ 22 */
23static inline bool arch_static_branch(struct static_key *key, bool branch) 23static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
24{ 24{
25 asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" 25 asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
26 ".pushsection __jump_table,\"aw\"\n" 26 ".pushsection __jump_table,\"aw\"\n"
@@ -34,7 +34,7 @@ label:
34 return true; 34 return true;
35} 35}
36 36
37static inline bool arch_static_branch_jump(struct static_key *key, bool branch) 37static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
38{ 38{
39 asm_volatile_goto("0: brcl 15,%l[label]\n" 39 asm_volatile_goto("0: brcl 15,%l[label]\n"
40 ".pushsection __jump_table,\"aw\"\n" 40 ".pushsection __jump_table,\"aw\"\n"
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 36c578c0ff96..5ff98d76a66c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -997,9 +997,9 @@ static inline pte_t pte_mkhuge(pte_t pte)
997#define IPTE_NODAT 0x400 997#define IPTE_NODAT 0x400
998#define IPTE_GUEST_ASCE 0x800 998#define IPTE_GUEST_ASCE 0x800
999 999
1000static inline void __ptep_ipte(unsigned long address, pte_t *ptep, 1000static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1001 unsigned long opt, unsigned long asce, 1001 unsigned long opt, unsigned long asce,
1002 int local) 1002 int local)
1003{ 1003{
1004 unsigned long pto = (unsigned long) ptep; 1004 unsigned long pto = (unsigned long) ptep;
1005 1005
@@ -1020,8 +1020,8 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1020 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 1020 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1021} 1021}
1022 1022
1023static inline void __ptep_ipte_range(unsigned long address, int nr, 1023static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1024 pte_t *ptep, int local) 1024 pte_t *ptep, int local)
1025{ 1025{
1026 unsigned long pto = (unsigned long) ptep; 1026 unsigned long pto = (unsigned long) ptep;
1027 1027
@@ -1269,7 +1269,8 @@ static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
1269 1269
1270#define pte_offset_kernel(pmd, address) pte_offset(pmd, address) 1270#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
1271#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1271#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1272#define pte_unmap(pte) do { } while (0) 1272
1273static inline void pte_unmap(pte_t *pte) { }
1273 1274
1274static inline bool gup_fast_permitted(unsigned long start, unsigned long end) 1275static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1275{ 1276{
@@ -1435,9 +1436,9 @@ static inline void __pmdp_csp(pmd_t *pmdp)
1435#define IDTE_NODAT 0x1000 1436#define IDTE_NODAT 0x1000
1436#define IDTE_GUEST_ASCE 0x2000 1437#define IDTE_GUEST_ASCE 0x2000
1437 1438
1438static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, 1439static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1439 unsigned long opt, unsigned long asce, 1440 unsigned long opt, unsigned long asce,
1440 int local) 1441 int local)
1441{ 1442{
1442 unsigned long sto; 1443 unsigned long sto;
1443 1444
@@ -1461,9 +1462,9 @@ static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1461 } 1462 }
1462} 1463}
1463 1464
1464static inline void __pudp_idte(unsigned long addr, pud_t *pudp, 1465static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1465 unsigned long opt, unsigned long asce, 1466 unsigned long opt, unsigned long asce,
1466 int local) 1467 int local)
1467{ 1468{
1468 unsigned long r3o; 1469 unsigned long r3o;
1469 1470
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 78e8a888306d..e3f238e8c611 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -111,7 +111,7 @@ struct qib {
111 /* private: */ 111 /* private: */
112 u8 res[88]; 112 u8 res[88];
113 /* public: */ 113 /* public: */
114 u8 parm[QDIO_MAX_BUFFERS_PER_Q]; 114 u8 parm[128];
115} __attribute__ ((packed, aligned(256))); 115} __attribute__ ((packed, aligned(256)));
116 116
117/** 117/**
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index 5f1fd1581330..2654e348801a 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -390,7 +390,7 @@ static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
390 390
391 debug_sprintf_event(cf_diag_dbg, 6, 391 debug_sprintf_event(cf_diag_dbg, 6,
392 "%s ctrset %d ctrset_size %zu cfvn %d csvn %d" 392 "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
393 " need %zd rc:%d\n", 393 " need %zd rc %d\n",
394 __func__, ctrset, ctrset_size, cpuhw->info.cfvn, 394 __func__, ctrset, ctrset_size, cpuhw->info.cfvn,
395 cpuhw->info.csvn, need, rc); 395 cpuhw->info.csvn, need, rc);
396 return need; 396 return need;
@@ -567,7 +567,7 @@ static int cf_diag_add(struct perf_event *event, int flags)
567 int err = 0; 567 int err = 0;
568 568
569 debug_sprintf_event(cf_diag_dbg, 5, 569 debug_sprintf_event(cf_diag_dbg, 5,
570 "%s event %p cpu %d flags %#x cpuhw:%p\n", 570 "%s event %p cpu %d flags %#x cpuhw %p\n",
571 __func__, event, event->cpu, flags, cpuhw); 571 __func__, event, event->cpu, flags, cpuhw);
572 572
573 if (cpuhw->flags & PMU_F_IN_USE) { 573 if (cpuhw->flags & PMU_F_IN_USE) {
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 544a02e944c6..3d8b12a9a6ff 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -803,6 +803,12 @@ static int __hw_perf_event_init(struct perf_event *event)
803 goto out; 803 goto out;
804 } 804 }
805 805
806 if (si.ribm & CPU_MF_SF_RIBM_NOTAV) {
807 pr_warn("CPU Measurement Facility sampling is temporarily not available\n");
808 err = -EBUSY;
809 goto out;
810 }
811
806 /* Always enable basic sampling */ 812 /* Always enable basic sampling */
807 SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; 813 SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE;
808 814
@@ -895,7 +901,7 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
895 901
896 /* Check online status of the CPU to which the event is pinned */ 902 /* Check online status of the CPU to which the event is pinned */
897 if (event->cpu >= 0 && !cpu_online(event->cpu)) 903 if (event->cpu >= 0 && !cpu_online(event->cpu))
898 return -ENODEV; 904 return -ENODEV;
899 905
900 /* Force reset of idle/hv excludes regardless of what the 906 /* Force reset of idle/hv excludes regardless of what the
901 * user requested. 907 * user requested.
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f6db0f1bc867..d047e846e1b9 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -332,7 +332,7 @@ static inline int plo_test_bit(unsigned char nr)
332 return cc == 0; 332 return cc == 0;
333} 333}
334 334
335static inline void __insn32_query(unsigned int opcode, u8 query[32]) 335static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
336{ 336{
337 register unsigned long r0 asm("0") = 0; /* query function */ 337 register unsigned long r0 asm("0") = 0; /* query function */
338 register unsigned long r1 asm("1") = (unsigned long) query; 338 register unsigned long r1 asm("1") = (unsigned long) query;
@@ -340,9 +340,9 @@ static inline void __insn32_query(unsigned int opcode, u8 query[32])
340 asm volatile( 340 asm volatile(
341 /* Parameter regs are ignored */ 341 /* Parameter regs are ignored */
342 " .insn rrf,%[opc] << 16,2,4,6,0\n" 342 " .insn rrf,%[opc] << 16,2,4,6,0\n"
343 : "=m" (*query) 343 :
344 : "d" (r0), "a" (r1), [opc] "i" (opcode) 344 : "d" (r0), "a" (r1), [opc] "i" (opcode)
345 : "cc"); 345 : "cc", "memory");
346} 346}
347 347
348#define INSN_SORTL 0xb938 348#define INSN_SORTL 0xb938
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 9bdff4defef1..e585a62d6530 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -66,7 +66,7 @@ static inline int clp_get_ilp(unsigned long *ilp)
66/* 66/*
67 * Call Logical Processor with c=0, the give constant lps and an lpcb request. 67 * Call Logical Processor with c=0, the give constant lps and an lpcb request.
68 */ 68 */
69static inline int clp_req(void *data, unsigned int lps) 69static __always_inline int clp_req(void *data, unsigned int lps)
70{ 70{
71 struct { u8 _[CLP_BLK_SIZE]; } *req = data; 71 struct { u8 _[CLP_BLK_SIZE]; } *req = data;
72 u64 ignored; 72 u64 ignored;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 23edf56cf577..50eb430b0ad8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -219,13 +219,6 @@ enum {
219 PFERR_WRITE_MASK | \ 219 PFERR_WRITE_MASK | \
220 PFERR_PRESENT_MASK) 220 PFERR_PRESENT_MASK)
221 221
222/*
223 * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
224 * Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting
225 * with the SVE bit in EPT PTEs.
226 */
227#define SPTE_SPECIAL_MASK (1ULL << 62)
228
229/* apic attention bits */ 222/* apic attention bits */
230#define KVM_APIC_CHECK_VAPIC 0 223#define KVM_APIC_CHECK_VAPIC 0
231/* 224/*
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 63316036f85a..9c5029cf6f3f 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -485,6 +485,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
485 485
486 /* cpuid 0x80000008.ebx */ 486 /* cpuid 0x80000008.ebx */
487 const u32 kvm_cpuid_8000_0008_ebx_x86_features = 487 const u32 kvm_cpuid_8000_0008_ebx_x86_features =
488 F(CLZERO) | F(XSAVEERPTR) |
488 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | 489 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
489 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON); 490 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON);
490 491
@@ -618,16 +619,20 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
618 */ 619 */
619 case 0x1f: 620 case 0x1f:
620 case 0xb: { 621 case 0xb: {
621 int i, level_type; 622 int i;
622 623
623 /* read more entries until level_type is zero */ 624 /*
624 for (i = 1; ; ++i) { 625 * We filled in entry[0] for CPUID(EAX=<function>,
626 * ECX=00H) above. If its level type (ECX[15:8]) is
627 * zero, then the leaf is unimplemented, and we're
628 * done. Otherwise, continue to populate entries
629 * until the level type (ECX[15:8]) of the previously
630 * added entry is zero.
631 */
632 for (i = 1; entry[i - 1].ecx & 0xff00; ++i) {
625 if (*nent >= maxnent) 633 if (*nent >= maxnent)
626 goto out; 634 goto out;
627 635
628 level_type = entry[i - 1].ecx & 0xff00;
629 if (!level_type)
630 break;
631 do_host_cpuid(&entry[i], function, i); 636 do_host_cpuid(&entry[i], function, i);
632 ++*nent; 637 ++*nent;
633 } 638 }
@@ -969,53 +974,66 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
969EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); 974EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
970 975
971/* 976/*
972 * If no match is found, check whether we exceed the vCPU's limit 977 * If the basic or extended CPUID leaf requested is higher than the
973 * and return the content of the highest valid _standard_ leaf instead. 978 * maximum supported basic or extended leaf, respectively, then it is
974 * This is to satisfy the CPUID specification. 979 * out of range.
975 */ 980 */
976static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu, 981static bool cpuid_function_in_range(struct kvm_vcpu *vcpu, u32 function)
977 u32 function, u32 index)
978{ 982{
979 struct kvm_cpuid_entry2 *maxlevel; 983 struct kvm_cpuid_entry2 *max;
980 984
981 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); 985 max = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
982 if (!maxlevel || maxlevel->eax >= function) 986 return max && function <= max->eax;
983 return NULL;
984 if (function & 0x80000000) {
985 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
986 if (!maxlevel)
987 return NULL;
988 }
989 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
990} 987}
991 988
992bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, 989bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
993 u32 *ecx, u32 *edx, bool check_limit) 990 u32 *ecx, u32 *edx, bool check_limit)
994{ 991{
995 u32 function = *eax, index = *ecx; 992 u32 function = *eax, index = *ecx;
996 struct kvm_cpuid_entry2 *best; 993 struct kvm_cpuid_entry2 *entry;
997 bool entry_found = true; 994 struct kvm_cpuid_entry2 *max;
998 995 bool found;
999 best = kvm_find_cpuid_entry(vcpu, function, index);
1000
1001 if (!best) {
1002 entry_found = false;
1003 if (!check_limit)
1004 goto out;
1005 996
1006 best = check_cpuid_limit(vcpu, function, index); 997 entry = kvm_find_cpuid_entry(vcpu, function, index);
998 found = entry;
999 /*
1000 * Intel CPUID semantics treats any query for an out-of-range
1001 * leaf as if the highest basic leaf (i.e. CPUID.0H:EAX) were
1002 * requested. AMD CPUID semantics returns all zeroes for any
1003 * undefined leaf, whether or not the leaf is in range.
1004 */
1005 if (!entry && check_limit && !guest_cpuid_is_amd(vcpu) &&
1006 !cpuid_function_in_range(vcpu, function)) {
1007 max = kvm_find_cpuid_entry(vcpu, 0, 0);
1008 if (max) {
1009 function = max->eax;
1010 entry = kvm_find_cpuid_entry(vcpu, function, index);
1011 }
1007 } 1012 }
1008 1013 if (entry) {
1009out: 1014 *eax = entry->eax;
1010 if (best) { 1015 *ebx = entry->ebx;
1011 *eax = best->eax; 1016 *ecx = entry->ecx;
1012 *ebx = best->ebx; 1017 *edx = entry->edx;
1013 *ecx = best->ecx; 1018 } else {
1014 *edx = best->edx;
1015 } else
1016 *eax = *ebx = *ecx = *edx = 0; 1019 *eax = *ebx = *ecx = *edx = 0;
1017 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found); 1020 /*
1018 return entry_found; 1021 * When leaf 0BH or 1FH is defined, CL is pass-through
1022 * and EDX is always the x2APIC ID, even for undefined
1023 * subleaves. Index 1 will exist iff the leaf is
1024 * implemented, so we pass through CL iff leaf 1
1025 * exists. EDX can be copied from any existing index.
1026 */
1027 if (function == 0xb || function == 0x1f) {
1028 entry = kvm_find_cpuid_entry(vcpu, function, 1);
1029 if (entry) {
1030 *ecx = index & 0xff;
1031 *edx = entry->edx;
1032 }
1033 }
1034 }
1035 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, found);
1036 return found;
1019} 1037}
1020EXPORT_SYMBOL_GPL(kvm_cpuid); 1038EXPORT_SYMBOL_GPL(kvm_cpuid);
1021 1039
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3a3a6854dcca..87b0fcc23ef8 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -66,9 +66,10 @@
66#define X2APIC_BROADCAST 0xFFFFFFFFul 66#define X2APIC_BROADCAST 0xFFFFFFFFul
67 67
68static bool lapic_timer_advance_dynamic __read_mostly; 68static bool lapic_timer_advance_dynamic __read_mostly;
69#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 69#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
70#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 5000 70#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
71#define LAPIC_TIMER_ADVANCE_ADJUST_INIT 1000 71#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
72#define LAPIC_TIMER_ADVANCE_NS_MAX 5000
72/* step-by-step approximation to mitigate fluctuation */ 73/* step-by-step approximation to mitigate fluctuation */
73#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8 74#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
74 75
@@ -1504,8 +1505,8 @@ static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1504 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP; 1505 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1505 } 1506 }
1506 1507
1507 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_ADJUST_MAX)) 1508 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1508 timer_advance_ns = LAPIC_TIMER_ADVANCE_ADJUST_INIT; 1509 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1509 apic->lapic_timer.timer_advance_ns = timer_advance_ns; 1510 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1510} 1511}
1511 1512
@@ -2302,7 +2303,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2302 HRTIMER_MODE_ABS_HARD); 2303 HRTIMER_MODE_ABS_HARD);
2303 apic->lapic_timer.timer.function = apic_timer_fn; 2304 apic->lapic_timer.timer.function = apic_timer_fn;
2304 if (timer_advance_ns == -1) { 2305 if (timer_advance_ns == -1) {
2305 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_ADJUST_INIT; 2306 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2306 lapic_timer_advance_dynamic = true; 2307 lapic_timer_advance_dynamic = true;
2307 } else { 2308 } else {
2308 apic->lapic_timer.timer_advance_ns = timer_advance_ns; 2309 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5269aa057dfa..24c23c66b226 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -83,7 +83,17 @@ module_param(dbg, bool, 0644);
83#define PTE_PREFETCH_NUM 8 83#define PTE_PREFETCH_NUM 8
84 84
85#define PT_FIRST_AVAIL_BITS_SHIFT 10 85#define PT_FIRST_AVAIL_BITS_SHIFT 10
86#define PT64_SECOND_AVAIL_BITS_SHIFT 52 86#define PT64_SECOND_AVAIL_BITS_SHIFT 54
87
88/*
89 * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
90 * Access Tracking SPTEs.
91 */
92#define SPTE_SPECIAL_MASK (3ULL << 52)
93#define SPTE_AD_ENABLED_MASK (0ULL << 52)
94#define SPTE_AD_DISABLED_MASK (1ULL << 52)
95#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52)
96#define SPTE_MMIO_MASK (3ULL << 52)
87 97
88#define PT64_LEVEL_BITS 9 98#define PT64_LEVEL_BITS 9
89 99
@@ -219,12 +229,11 @@ static u64 __read_mostly shadow_present_mask;
219static u64 __read_mostly shadow_me_mask; 229static u64 __read_mostly shadow_me_mask;
220 230
221/* 231/*
222 * SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value. 232 * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK;
223 * Non-present SPTEs with shadow_acc_track_value set are in place for access 233 * shadow_acc_track_mask is the set of bits to be cleared in non-accessed
224 * tracking. 234 * pages.
225 */ 235 */
226static u64 __read_mostly shadow_acc_track_mask; 236static u64 __read_mostly shadow_acc_track_mask;
227static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK;
228 237
229/* 238/*
230 * The mask/shift to use for saving the original R/X bits when marking the PTE 239 * The mask/shift to use for saving the original R/X bits when marking the PTE
@@ -304,7 +313,7 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
304{ 313{
305 BUG_ON((u64)(unsigned)access_mask != access_mask); 314 BUG_ON((u64)(unsigned)access_mask != access_mask);
306 BUG_ON((mmio_mask & mmio_value) != mmio_value); 315 BUG_ON((mmio_mask & mmio_value) != mmio_value);
307 shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK; 316 shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
308 shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; 317 shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
309 shadow_mmio_access_mask = access_mask; 318 shadow_mmio_access_mask = access_mask;
310} 319}
@@ -320,10 +329,27 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
320 return sp->role.ad_disabled; 329 return sp->role.ad_disabled;
321} 330}
322 331
332static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
333{
334 /*
335 * When using the EPT page-modification log, the GPAs in the log
336 * would come from L2 rather than L1. Therefore, we need to rely
337 * on write protection to record dirty pages. This also bypasses
338 * PML, since writes now result in a vmexit.
339 */
340 return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
341}
342
323static inline bool spte_ad_enabled(u64 spte) 343static inline bool spte_ad_enabled(u64 spte)
324{ 344{
325 MMU_WARN_ON(is_mmio_spte(spte)); 345 MMU_WARN_ON(is_mmio_spte(spte));
326 return !(spte & shadow_acc_track_value); 346 return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK;
347}
348
349static inline bool spte_ad_need_write_protect(u64 spte)
350{
351 MMU_WARN_ON(is_mmio_spte(spte));
352 return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
327} 353}
328 354
329static inline u64 spte_shadow_accessed_mask(u64 spte) 355static inline u64 spte_shadow_accessed_mask(u64 spte)
@@ -461,7 +487,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
461{ 487{
462 BUG_ON(!dirty_mask != !accessed_mask); 488 BUG_ON(!dirty_mask != !accessed_mask);
463 BUG_ON(!accessed_mask && !acc_track_mask); 489 BUG_ON(!accessed_mask && !acc_track_mask);
464 BUG_ON(acc_track_mask & shadow_acc_track_value); 490 BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK);
465 491
466 shadow_user_mask = user_mask; 492 shadow_user_mask = user_mask;
467 shadow_accessed_mask = accessed_mask; 493 shadow_accessed_mask = accessed_mask;
@@ -1589,16 +1615,16 @@ static bool spte_clear_dirty(u64 *sptep)
1589 1615
1590 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); 1616 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
1591 1617
1618 MMU_WARN_ON(!spte_ad_enabled(spte));
1592 spte &= ~shadow_dirty_mask; 1619 spte &= ~shadow_dirty_mask;
1593
1594 return mmu_spte_update(sptep, spte); 1620 return mmu_spte_update(sptep, spte);
1595} 1621}
1596 1622
1597static bool wrprot_ad_disabled_spte(u64 *sptep) 1623static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1598{ 1624{
1599 bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT, 1625 bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1600 (unsigned long *)sptep); 1626 (unsigned long *)sptep);
1601 if (was_writable) 1627 if (was_writable && !spte_ad_enabled(*sptep))
1602 kvm_set_pfn_dirty(spte_to_pfn(*sptep)); 1628 kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1603 1629
1604 return was_writable; 1630 return was_writable;
@@ -1617,10 +1643,10 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1617 bool flush = false; 1643 bool flush = false;
1618 1644
1619 for_each_rmap_spte(rmap_head, &iter, sptep) 1645 for_each_rmap_spte(rmap_head, &iter, sptep)
1620 if (spte_ad_enabled(*sptep)) 1646 if (spte_ad_need_write_protect(*sptep))
1621 flush |= spte_clear_dirty(sptep); 1647 flush |= spte_wrprot_for_clear_dirty(sptep);
1622 else 1648 else
1623 flush |= wrprot_ad_disabled_spte(sptep); 1649 flush |= spte_clear_dirty(sptep);
1624 1650
1625 return flush; 1651 return flush;
1626} 1652}
@@ -1631,6 +1657,11 @@ static bool spte_set_dirty(u64 *sptep)
1631 1657
1632 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); 1658 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
1633 1659
1660 /*
1661 * Similar to the !kvm_x86_ops->slot_disable_log_dirty case,
1662 * do not bother adding back write access to pages marked
1663 * SPTE_AD_WRPROT_ONLY_MASK.
1664 */
1634 spte |= shadow_dirty_mask; 1665 spte |= shadow_dirty_mask;
1635 1666
1636 return mmu_spte_update(sptep, spte); 1667 return mmu_spte_update(sptep, spte);
@@ -2622,7 +2653,7 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2622 shadow_user_mask | shadow_x_mask | shadow_me_mask; 2653 shadow_user_mask | shadow_x_mask | shadow_me_mask;
2623 2654
2624 if (sp_ad_disabled(sp)) 2655 if (sp_ad_disabled(sp))
2625 spte |= shadow_acc_track_value; 2656 spte |= SPTE_AD_DISABLED_MASK;
2626 else 2657 else
2627 spte |= shadow_accessed_mask; 2658 spte |= shadow_accessed_mask;
2628 2659
@@ -2968,7 +2999,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2968 2999
2969 sp = page_header(__pa(sptep)); 3000 sp = page_header(__pa(sptep));
2970 if (sp_ad_disabled(sp)) 3001 if (sp_ad_disabled(sp))
2971 spte |= shadow_acc_track_value; 3002 spte |= SPTE_AD_DISABLED_MASK;
3003 else if (kvm_vcpu_ad_need_write_protect(vcpu))
3004 spte |= SPTE_AD_WRPROT_ONLY_MASK;
2972 3005
2973 /* 3006 /*
2974 * For the EPT case, shadow_present_mask is 0 if hardware 3007 * For the EPT case, shadow_present_mask is 0 if hardware
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 41abc62c9a8a..e76eb4f07f6c 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2610,7 +2610,7 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2610 2610
2611 /* VM-entry exception error code */ 2611 /* VM-entry exception error code */
2612 if (CC(has_error_code && 2612 if (CC(has_error_code &&
2613 vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))) 2613 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
2614 return -EINVAL; 2614 return -EINVAL;
2615 2615
2616 /* VM-entry interruption-info field: reserved bits */ 2616 /* VM-entry interruption-info field: reserved bits */
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 4dea0e0e7e39..3e9c059099e9 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -262,6 +262,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
262static void intel_pmu_refresh(struct kvm_vcpu *vcpu) 262static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
263{ 263{
264 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 264 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
265 struct x86_pmu_capability x86_pmu;
265 struct kvm_cpuid_entry2 *entry; 266 struct kvm_cpuid_entry2 *entry;
266 union cpuid10_eax eax; 267 union cpuid10_eax eax;
267 union cpuid10_edx edx; 268 union cpuid10_edx edx;
@@ -283,8 +284,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
283 if (!pmu->version) 284 if (!pmu->version)
284 return; 285 return;
285 286
287 perf_get_x86_pmu_capability(&x86_pmu);
288
286 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, 289 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
287 INTEL_PMC_MAX_GENERIC); 290 x86_pmu.num_counters_gp);
288 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; 291 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
289 pmu->available_event_types = ~entry->ebx & 292 pmu->available_event_types = ~entry->ebx &
290 ((1ull << eax.split.mask_length) - 1); 293 ((1ull << eax.split.mask_length) - 1);
@@ -294,7 +297,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
294 } else { 297 } else {
295 pmu->nr_arch_fixed_counters = 298 pmu->nr_arch_fixed_counters =
296 min_t(int, edx.split.num_counters_fixed, 299 min_t(int, edx.split.num_counters_fixed,
297 INTEL_PMC_MAX_FIXED); 300 x86_pmu.num_counters_fixed);
298 pmu->counter_bitmask[KVM_PMC_FIXED] = 301 pmu->counter_bitmask[KVM_PMC_FIXED] =
299 ((u64)1 << edx.split.bit_width_fixed) - 1; 302 ((u64)1 << edx.split.bit_width_fixed) - 1;
300 } 303 }
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d4575ffb3cec..e7970a2e8eae 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -209,6 +209,11 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
209 struct page *page; 209 struct page *page;
210 unsigned int i; 210 unsigned int i;
211 211
212 if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
213 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
214 return 0;
215 }
216
212 if (!enable_ept) { 217 if (!enable_ept) {
213 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; 218 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
214 return 0; 219 return 0;
@@ -7995,12 +8000,10 @@ static int __init vmx_init(void)
7995 * contain 'auto' which will be turned into the default 'cond' 8000 * contain 'auto' which will be turned into the default 'cond'
7996 * mitigation mode. 8001 * mitigation mode.
7997 */ 8002 */
7998 if (boot_cpu_has(X86_BUG_L1TF)) { 8003 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
7999 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); 8004 if (r) {
8000 if (r) { 8005 vmx_exit();
8001 vmx_exit(); 8006 return r;
8002 return r;
8003 }
8004 } 8007 }
8005 8008
8006#ifdef CONFIG_KEXEC_CORE 8009#ifdef CONFIG_KEXEC_CORE
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0ed07d8d2caa..661e2bf38526 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -92,8 +92,8 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
92static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); 92static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
93#endif 93#endif
94 94
95#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM 95#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
96#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 96#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
97 97
98#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ 98#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
99 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) 99 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
@@ -212,7 +212,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
212 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, 212 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
213 { "mmu_unsync", VM_STAT(mmu_unsync) }, 213 { "mmu_unsync", VM_STAT(mmu_unsync) },
214 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, 214 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
215 { "largepages", VM_STAT(lpages) }, 215 { "largepages", VM_STAT(lpages, .mode = 0444) },
216 { "max_mmu_page_hash_collisions", 216 { "max_mmu_page_hash_collisions",
217 VM_STAT(max_mmu_page_hash_collisions) }, 217 VM_STAT(max_mmu_page_hash_collisions) },
218 { NULL } 218 { NULL }
@@ -885,34 +885,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
885} 885}
886EXPORT_SYMBOL_GPL(kvm_set_xcr); 886EXPORT_SYMBOL_GPL(kvm_set_xcr);
887 887
888int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 888static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
889{ 889{
890 unsigned long old_cr4 = kvm_read_cr4(vcpu);
891 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
892 X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
893
894 if (cr4 & CR4_RESERVED_BITS) 890 if (cr4 & CR4_RESERVED_BITS)
895 return 1; 891 return -EINVAL;
896 892
897 if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE)) 893 if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
898 return 1; 894 return -EINVAL;
899 895
900 if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP)) 896 if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
901 return 1; 897 return -EINVAL;
902 898
903 if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP)) 899 if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
904 return 1; 900 return -EINVAL;
905 901
906 if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE)) 902 if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
907 return 1; 903 return -EINVAL;
908 904
909 if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE)) 905 if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
910 return 1; 906 return -EINVAL;
911 907
912 if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57)) 908 if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
913 return 1; 909 return -EINVAL;
914 910
915 if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP)) 911 if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
912 return -EINVAL;
913
914 return 0;
915}
916
917int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
918{
919 unsigned long old_cr4 = kvm_read_cr4(vcpu);
920 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
921 X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
922
923 if (kvm_valid_cr4(vcpu, cr4))
916 return 1; 924 return 1;
917 925
918 if (is_long_mode(vcpu)) { 926 if (is_long_mode(vcpu)) {
@@ -1161,13 +1169,6 @@ static u32 msrs_to_save[] = {
1161 MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13, 1169 MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13,
1162 MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15, 1170 MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15,
1163 MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17, 1171 MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17,
1164 MSR_ARCH_PERFMON_PERFCTR0 + 18, MSR_ARCH_PERFMON_PERFCTR0 + 19,
1165 MSR_ARCH_PERFMON_PERFCTR0 + 20, MSR_ARCH_PERFMON_PERFCTR0 + 21,
1166 MSR_ARCH_PERFMON_PERFCTR0 + 22, MSR_ARCH_PERFMON_PERFCTR0 + 23,
1167 MSR_ARCH_PERFMON_PERFCTR0 + 24, MSR_ARCH_PERFMON_PERFCTR0 + 25,
1168 MSR_ARCH_PERFMON_PERFCTR0 + 26, MSR_ARCH_PERFMON_PERFCTR0 + 27,
1169 MSR_ARCH_PERFMON_PERFCTR0 + 28, MSR_ARCH_PERFMON_PERFCTR0 + 29,
1170 MSR_ARCH_PERFMON_PERFCTR0 + 30, MSR_ARCH_PERFMON_PERFCTR0 + 31,
1171 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1, 1172 MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
1172 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3, 1173 MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
1173 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5, 1174 MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
@@ -1177,13 +1178,6 @@ static u32 msrs_to_save[] = {
1177 MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, 1178 MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
1178 MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, 1179 MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
1179 MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17, 1180 MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
1180 MSR_ARCH_PERFMON_EVENTSEL0 + 18, MSR_ARCH_PERFMON_EVENTSEL0 + 19,
1181 MSR_ARCH_PERFMON_EVENTSEL0 + 20, MSR_ARCH_PERFMON_EVENTSEL0 + 21,
1182 MSR_ARCH_PERFMON_EVENTSEL0 + 22, MSR_ARCH_PERFMON_EVENTSEL0 + 23,
1183 MSR_ARCH_PERFMON_EVENTSEL0 + 24, MSR_ARCH_PERFMON_EVENTSEL0 + 25,
1184 MSR_ARCH_PERFMON_EVENTSEL0 + 26, MSR_ARCH_PERFMON_EVENTSEL0 + 27,
1185 MSR_ARCH_PERFMON_EVENTSEL0 + 28, MSR_ARCH_PERFMON_EVENTSEL0 + 29,
1186 MSR_ARCH_PERFMON_EVENTSEL0 + 30, MSR_ARCH_PERFMON_EVENTSEL0 + 31,
1187}; 1181};
1188 1182
1189static unsigned num_msrs_to_save; 1183static unsigned num_msrs_to_save;
@@ -5097,13 +5091,14 @@ out:
5097 5091
5098static void kvm_init_msr_list(void) 5092static void kvm_init_msr_list(void)
5099{ 5093{
5094 struct x86_pmu_capability x86_pmu;
5100 u32 dummy[2]; 5095 u32 dummy[2];
5101 unsigned i, j; 5096 unsigned i, j;
5102 5097
5103 BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4, 5098 BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
5104 "Please update the fixed PMCs in msrs_to_save[]"); 5099 "Please update the fixed PMCs in msrs_to_save[]");
5105 BUILD_BUG_ON_MSG(INTEL_PMC_MAX_GENERIC != 32, 5100
5106 "Please update the generic perfctr/eventsel MSRs in msrs_to_save[]"); 5101 perf_get_x86_pmu_capability(&x86_pmu);
5107 5102
5108 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) { 5103 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
5109 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) 5104 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
@@ -5145,6 +5140,15 @@ static void kvm_init_msr_list(void)
5145 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) 5140 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
5146 continue; 5141 continue;
5147 break; 5142 break;
5143 case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
5144 if (msrs_to_save[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
5145 min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
5146 continue;
5147 break;
5148 case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
5149 if (msrs_to_save[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
5150 min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
5151 continue;
5148 } 5152 }
5149 default: 5153 default:
5150 break; 5154 break;
@@ -8714,10 +8718,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
8714 8718
8715static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 8719static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
8716{ 8720{
8717 if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
8718 (sregs->cr4 & X86_CR4_OSXSAVE))
8719 return -EINVAL;
8720
8721 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { 8721 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
8722 /* 8722 /*
8723 * When EFER.LME and CR0.PG are set, the processor is in 8723 * When EFER.LME and CR0.PG are set, the processor is in
@@ -8736,7 +8736,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
8736 return -EINVAL; 8736 return -EINVAL;
8737 } 8737 }
8738 8738
8739 return 0; 8739 return kvm_valid_cr4(vcpu, sregs->cr4);
8740} 8740}
8741 8741
8742static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 8742static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index 0d3365cb64de..a04551ee5568 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -57,19 +57,7 @@ static efi_system_table_t __init *xen_efi_probe(void)
57 return NULL; 57 return NULL;
58 58
59 /* Here we know that Xen runs on EFI platform. */ 59 /* Here we know that Xen runs on EFI platform. */
60 60 xen_efi_runtime_setup();
61 efi.get_time = xen_efi_get_time;
62 efi.set_time = xen_efi_set_time;
63 efi.get_wakeup_time = xen_efi_get_wakeup_time;
64 efi.set_wakeup_time = xen_efi_set_wakeup_time;
65 efi.get_variable = xen_efi_get_variable;
66 efi.get_next_variable = xen_efi_get_next_variable;
67 efi.set_variable = xen_efi_set_variable;
68 efi.query_variable_info = xen_efi_query_variable_info;
69 efi.update_capsule = xen_efi_update_capsule;
70 efi.query_capsule_caps = xen_efi_query_capsule_caps;
71 efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
72 efi.reset_system = xen_efi_reset_system;
73 61
74 efi_systab_xen.tables = info->cfg.addr; 62 efi_systab_xen.tables = info->cfg.addr;
75 efi_systab_xen.nr_tables = info->cfg.nent; 63 efi_systab_xen.nr_tables = info->cfg.nent;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6e3b15f70cd7..ec791156e9cc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1992,10 +1992,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1992 /* bypass scheduler for flush rq */ 1992 /* bypass scheduler for flush rq */
1993 blk_insert_flush(rq); 1993 blk_insert_flush(rq);
1994 blk_mq_run_hw_queue(data.hctx, true); 1994 blk_mq_run_hw_queue(data.hctx, true);
1995 } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) { 1995 } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
1996 !blk_queue_nonrot(q))) {
1996 /* 1997 /*
1997 * Use plugging if we have a ->commit_rqs() hook as well, as 1998 * Use plugging if we have a ->commit_rqs() hook as well, as
1998 * we know the driver uses bd->last in a smart fashion. 1999 * we know the driver uses bd->last in a smart fashion.
2000 *
2001 * Use normal plugging if this disk is slow HDD, as sequential
2002 * IO may benefit a lot from plug merging.
1999 */ 2003 */
2000 unsigned int request_count = plug->rq_count; 2004 unsigned int request_count = plug->rq_count;
2001 struct request *last = NULL; 2005 struct request *last = NULL;
@@ -2012,6 +2016,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
2012 } 2016 }
2013 2017
2014 blk_add_rq_to_plug(plug, rq); 2018 blk_add_rq_to_plug(plug, rq);
2019 } else if (q->elevator) {
2020 blk_mq_sched_insert_request(rq, false, true, true);
2015 } else if (plug && !blk_queue_nomerges(q)) { 2021 } else if (plug && !blk_queue_nomerges(q)) {
2016 /* 2022 /*
2017 * We do limited plugging. If the bio can be merged, do that. 2023 * We do limited plugging. If the bio can be merged, do that.
@@ -2035,8 +2041,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
2035 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 2041 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
2036 &cookie); 2042 &cookie);
2037 } 2043 }
2038 } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && 2044 } else if ((q->nr_hw_queues > 1 && is_sync) ||
2039 !data.hctx->dispatch_busy)) { 2045 !data.hctx->dispatch_busy) {
2040 blk_mq_try_issue_directly(data.hctx, rq, &cookie); 2046 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
2041 } else { 2047 } else {
2042 blk_mq_sched_insert_request(rq, false, true, true); 2048 blk_mq_sched_insert_request(rq, false, true, true);
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 4e95a9792162..b4c761973ac1 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -129,7 +129,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
129 { 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x84, 0x01 }, 129 { 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x84, 0x01 },
130 130
131 /* tables */ 131 /* tables */
132 [OPAL_TABLE_TABLE] 132 [OPAL_TABLE_TABLE] =
133 { 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01 }, 133 { 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01 },
134 [OPAL_LOCKINGRANGE_GLOBAL] = 134 [OPAL_LOCKINGRANGE_GLOBAL] =
135 { 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00, 0x01 }, 135 { 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00, 0x01 },
@@ -372,8 +372,8 @@ static void check_geometry(struct opal_dev *dev, const void *data)
372{ 372{
373 const struct d0_geometry_features *geo = data; 373 const struct d0_geometry_features *geo = data;
374 374
375 dev->align = geo->alignment_granularity; 375 dev->align = be64_to_cpu(geo->alignment_granularity);
376 dev->lowest_lba = geo->lowest_aligned_lba; 376 dev->lowest_lba = be64_to_cpu(geo->lowest_aligned_lba);
377} 377}
378 378
379static int execute_step(struct opal_dev *dev, 379static int execute_step(struct opal_dev *dev,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1410fa893653..f6f77eaa7217 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -994,6 +994,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
994 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 994 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
995 blk_queue_write_cache(lo->lo_queue, true, false); 995 blk_queue_write_cache(lo->lo_queue, true, false);
996 996
997 if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
998 /* In case of direct I/O, match underlying block size */
999 unsigned short bsize = bdev_logical_block_size(
1000 inode->i_sb->s_bdev);
1001
1002 blk_queue_logical_block_size(lo->lo_queue, bsize);
1003 blk_queue_physical_block_size(lo->lo_queue, bsize);
1004 blk_queue_io_min(lo->lo_queue, bsize);
1005 }
1006
997 loop_update_rotational(lo); 1007 loop_update_rotational(lo);
998 loop_update_dio(lo); 1008 loop_update_dio(lo);
999 set_capacity(lo->lo_disk, size); 1009 set_capacity(lo->lo_disk, size);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 9207ac291341..ad50efb470aa 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -280,9 +280,6 @@ static int sysc_get_one_clock(struct sysc *ddata, const char *name)
280 280
281 ddata->clocks[index] = devm_clk_get(ddata->dev, name); 281 ddata->clocks[index] = devm_clk_get(ddata->dev, name);
282 if (IS_ERR(ddata->clocks[index])) { 282 if (IS_ERR(ddata->clocks[index])) {
283 if (PTR_ERR(ddata->clocks[index]) == -ENOENT)
284 return 0;
285
286 dev_err(ddata->dev, "clock get error for %s: %li\n", 283 dev_err(ddata->dev, "clock get error for %s: %li\n",
287 name, PTR_ERR(ddata->clocks[index])); 284 name, PTR_ERR(ddata->clocks[index]));
288 285
@@ -357,7 +354,7 @@ static int sysc_get_clocks(struct sysc *ddata)
357 continue; 354 continue;
358 355
359 error = sysc_get_one_clock(ddata, name); 356 error = sysc_get_one_clock(ddata, name);
360 if (error && error != -ENOENT) 357 if (error)
361 return error; 358 return error;
362 } 359 }
363 360
@@ -1632,17 +1629,19 @@ static int sysc_init_module(struct sysc *ddata)
1632 if (error) 1629 if (error)
1633 return error; 1630 return error;
1634 1631
1635 if (manage_clocks) { 1632 sysc_clkdm_deny_idle(ddata);
1636 sysc_clkdm_deny_idle(ddata);
1637 1633
1638 error = sysc_enable_opt_clocks(ddata); 1634 /*
1639 if (error) 1635 * Always enable clocks. The bootloader may or may not have enabled
1640 return error; 1636 * the related clocks.
1637 */
1638 error = sysc_enable_opt_clocks(ddata);
1639 if (error)
1640 return error;
1641 1641
1642 error = sysc_enable_main_clocks(ddata); 1642 error = sysc_enable_main_clocks(ddata);
1643 if (error) 1643 if (error)
1644 goto err_opt_clocks; 1644 goto err_opt_clocks;
1645 }
1646 1645
1647 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) { 1646 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
1648 error = sysc_rstctrl_reset_deassert(ddata, true); 1647 error = sysc_rstctrl_reset_deassert(ddata, true);
@@ -1660,7 +1659,7 @@ static int sysc_init_module(struct sysc *ddata)
1660 goto err_main_clocks; 1659 goto err_main_clocks;
1661 } 1660 }
1662 1661
1663 if (!ddata->legacy_mode && manage_clocks) { 1662 if (!ddata->legacy_mode) {
1664 error = sysc_enable_module(ddata->dev); 1663 error = sysc_enable_module(ddata->dev);
1665 if (error) 1664 if (error)
1666 goto err_main_clocks; 1665 goto err_main_clocks;
@@ -1677,6 +1676,7 @@ err_main_clocks:
1677 if (manage_clocks) 1676 if (manage_clocks)
1678 sysc_disable_main_clocks(ddata); 1677 sysc_disable_main_clocks(ddata);
1679err_opt_clocks: 1678err_opt_clocks:
1679 /* No re-enable of clockdomain autoidle to prevent module autoidle */
1680 if (manage_clocks) { 1680 if (manage_clocks) {
1681 sysc_disable_opt_clocks(ddata); 1681 sysc_disable_opt_clocks(ddata);
1682 sysc_clkdm_allow_idle(ddata); 1682 sysc_clkdm_allow_idle(ddata);
@@ -2357,6 +2357,27 @@ static void ti_sysc_idle(struct work_struct *work)
2357 2357
2358 ddata = container_of(work, struct sysc, idle_work.work); 2358 ddata = container_of(work, struct sysc, idle_work.work);
2359 2359
2360 /*
2361 * One time decrement of clock usage counts if left on from init.
2362 * Note that we disable opt clocks unconditionally in this case
2363 * as they are enabled unconditionally during init without
2364 * considering sysc_opt_clks_needed() at that point.
2365 */
2366 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
2367 SYSC_QUIRK_NO_IDLE_ON_INIT)) {
2368 sysc_disable_main_clocks(ddata);
2369 sysc_disable_opt_clocks(ddata);
2370 sysc_clkdm_allow_idle(ddata);
2371 }
2372
2373 /* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */
2374 if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
2375 return;
2376
2377 /*
2378 * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT
2379 * and SYSC_QUIRK_NO_RESET_ON_INIT
2380 */
2360 if (pm_runtime_active(ddata->dev)) 2381 if (pm_runtime_active(ddata->dev))
2361 pm_runtime_put_sync(ddata->dev); 2382 pm_runtime_put_sync(ddata->dev);
2362} 2383}
@@ -2445,7 +2466,8 @@ static int sysc_probe(struct platform_device *pdev)
2445 INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle); 2466 INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
2446 2467
2447 /* At least earlycon won't survive without deferred idle */ 2468 /* At least earlycon won't survive without deferred idle */
2448 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE_ON_INIT | 2469 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
2470 SYSC_QUIRK_NO_IDLE_ON_INIT |
2449 SYSC_QUIRK_NO_RESET_ON_INIT)) { 2471 SYSC_QUIRK_NO_RESET_ON_INIT)) {
2450 schedule_delayed_work(&ddata->idle_work, 3000); 2472 schedule_delayed_work(&ddata->idle_work, 3000);
2451 } else { 2473 } else {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d3beed084c0a..de434feb873a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1732,6 +1732,56 @@ void get_random_bytes(void *buf, int nbytes)
1732} 1732}
1733EXPORT_SYMBOL(get_random_bytes); 1733EXPORT_SYMBOL(get_random_bytes);
1734 1734
1735
1736/*
1737 * Each time the timer fires, we expect that we got an unpredictable
1738 * jump in the cycle counter. Even if the timer is running on another
1739 * CPU, the timer activity will be touching the stack of the CPU that is
1740 * generating entropy..
1741 *
1742 * Note that we don't re-arm the timer in the timer itself - we are
1743 * happy to be scheduled away, since that just makes the load more
1744 * complex, but we do not want the timer to keep ticking unless the
1745 * entropy loop is running.
1746 *
1747 * So the re-arming always happens in the entropy loop itself.
1748 */
1749static void entropy_timer(struct timer_list *t)
1750{
1751 credit_entropy_bits(&input_pool, 1);
1752}
1753
1754/*
1755 * If we have an actual cycle counter, see if we can
1756 * generate enough entropy with timing noise
1757 */
1758static void try_to_generate_entropy(void)
1759{
1760 struct {
1761 unsigned long now;
1762 struct timer_list timer;
1763 } stack;
1764
1765 stack.now = random_get_entropy();
1766
1767 /* Slow counter - or none. Don't even bother */
1768 if (stack.now == random_get_entropy())
1769 return;
1770
1771 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1772 while (!crng_ready()) {
1773 if (!timer_pending(&stack.timer))
1774 mod_timer(&stack.timer, jiffies+1);
1775 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1776 schedule();
1777 stack.now = random_get_entropy();
1778 }
1779
1780 del_timer_sync(&stack.timer);
1781 destroy_timer_on_stack(&stack.timer);
1782 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1783}
1784
1735/* 1785/*
1736 * Wait for the urandom pool to be seeded and thus guaranteed to supply 1786 * Wait for the urandom pool to be seeded and thus guaranteed to supply
1737 * cryptographically secure random numbers. This applies to: the /dev/urandom 1787 * cryptographically secure random numbers. This applies to: the /dev/urandom
@@ -1746,7 +1796,17 @@ int wait_for_random_bytes(void)
1746{ 1796{
1747 if (likely(crng_ready())) 1797 if (likely(crng_ready()))
1748 return 0; 1798 return 0;
1749 return wait_event_interruptible(crng_init_wait, crng_ready()); 1799
1800 do {
1801 int ret;
1802 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1803 if (ret)
1804 return ret > 0 ? 0 : ret;
1805
1806 try_to_generate_entropy();
1807 } while (!crng_ready());
1808
1809 return 0;
1750} 1810}
1751EXPORT_SYMBOL(wait_for_random_bytes); 1811EXPORT_SYMBOL(wait_for_random_bytes);
1752 1812
@@ -2460,4 +2520,4 @@ void add_bootloader_randomness(const void *buf, unsigned int size)
2460 else 2520 else
2461 add_device_randomness(buf, size); 2521 add_device_randomness(buf, size);
2462} 2522}
2463EXPORT_SYMBOL_GPL(add_bootloader_randomness); \ No newline at end of file 2523EXPORT_SYMBOL_GPL(add_bootloader_randomness);
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index d8c2bd4391d0..11ff701ff4bb 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -25,7 +25,9 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
25 25
26 struct clock_event_device *clkevt = &to->clkevt; 26 struct clock_event_device *clkevt = &to->clkevt;
27 27
28 of_irq->percpu ? free_percpu_irq(of_irq->irq, clkevt) : 28 if (of_irq->percpu)
29 free_percpu_irq(of_irq->irq, clkevt);
30 else
29 free_irq(of_irq->irq, clkevt); 31 free_irq(of_irq->irq, clkevt);
30} 32}
31 33
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 64cc81915581..ab42c21c5517 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -150,7 +150,7 @@ static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
150 dom = t->tx.buf; 150 dom = t->tx.buf;
151 dom->domain_id = cpu_to_le32(domain); 151 dom->domain_id = cpu_to_le32(domain);
152 dom->flags = cpu_to_le32(flags); 152 dom->flags = cpu_to_le32(flags);
153 dom->domain_id = cpu_to_le32(state); 153 dom->reset_state = cpu_to_le32(state);
154 154
155 if (rdom->async_reset) 155 if (rdom->async_reset)
156 ret = scmi_do_xfer_with_response(handle, t); 156 ret = scmi_do_xfer_with_response(handle, t);
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 42e2c1f57152..00962a659009 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
54 amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ 54 amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
55 amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ 55 amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
56 amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ 56 amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
57 amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o 57 amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
58 58
59amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o 59amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
60 60
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index eba42c752bca..82155ac3288a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -189,7 +189,7 @@ static int acp_hw_init(void *handle)
189 u32 val = 0; 189 u32 val = 0;
190 u32 count = 0; 190 u32 count = 0;
191 struct device *dev; 191 struct device *dev;
192 struct i2s_platform_data *i2s_pdata; 192 struct i2s_platform_data *i2s_pdata = NULL;
193 193
194 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 194 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
195 195
@@ -231,20 +231,21 @@ static int acp_hw_init(void *handle)
231 adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), 231 adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
232 GFP_KERNEL); 232 GFP_KERNEL);
233 233
234 if (adev->acp.acp_cell == NULL) 234 if (adev->acp.acp_cell == NULL) {
235 return -ENOMEM; 235 r = -ENOMEM;
236 goto failure;
237 }
236 238
237 adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); 239 adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
238 if (adev->acp.acp_res == NULL) { 240 if (adev->acp.acp_res == NULL) {
239 kfree(adev->acp.acp_cell); 241 r = -ENOMEM;
240 return -ENOMEM; 242 goto failure;
241 } 243 }
242 244
243 i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); 245 i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
244 if (i2s_pdata == NULL) { 246 if (i2s_pdata == NULL) {
245 kfree(adev->acp.acp_res); 247 r = -ENOMEM;
246 kfree(adev->acp.acp_cell); 248 goto failure;
247 return -ENOMEM;
248 } 249 }
249 250
250 switch (adev->asic_type) { 251 switch (adev->asic_type) {
@@ -341,14 +342,14 @@ static int acp_hw_init(void *handle)
341 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 342 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
342 ACP_DEVS); 343 ACP_DEVS);
343 if (r) 344 if (r)
344 return r; 345 goto failure;
345 346
346 for (i = 0; i < ACP_DEVS ; i++) { 347 for (i = 0; i < ACP_DEVS ; i++) {
347 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); 348 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
348 r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); 349 r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
349 if (r) { 350 if (r) {
350 dev_err(dev, "Failed to add dev to genpd\n"); 351 dev_err(dev, "Failed to add dev to genpd\n");
351 return r; 352 goto failure;
352 } 353 }
353 } 354 }
354 355
@@ -367,7 +368,8 @@ static int acp_hw_init(void *handle)
367 break; 368 break;
368 if (--count == 0) { 369 if (--count == 0) {
369 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 370 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
370 return -ETIMEDOUT; 371 r = -ETIMEDOUT;
372 goto failure;
371 } 373 }
372 udelay(100); 374 udelay(100);
373 } 375 }
@@ -384,7 +386,8 @@ static int acp_hw_init(void *handle)
384 break; 386 break;
385 if (--count == 0) { 387 if (--count == 0) {
386 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 388 dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
387 return -ETIMEDOUT; 389 r = -ETIMEDOUT;
390 goto failure;
388 } 391 }
389 udelay(100); 392 udelay(100);
390 } 393 }
@@ -393,6 +396,13 @@ static int acp_hw_init(void *handle)
393 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; 396 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
394 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); 397 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
395 return 0; 398 return 0;
399
400failure:
401 kfree(i2s_pdata);
402 kfree(adev->acp.acp_res);
403 kfree(adev->acp.acp_cell);
404 kfree(adev->acp.acp_genpd);
405 return r;
396} 406}
397 407
398/** 408/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 264677ab248a..6f8aaf655a9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -81,9 +81,10 @@
81 * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS. 81 * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
82 * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS. 82 * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
83 * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches 83 * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
84 * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
84 */ 85 */
85#define KMS_DRIVER_MAJOR 3 86#define KMS_DRIVER_MAJOR 3
86#define KMS_DRIVER_MINOR 34 87#define KMS_DRIVER_MINOR 35
87#define KMS_DRIVER_PATCHLEVEL 0 88#define KMS_DRIVER_PATCHLEVEL 0
88 89
89#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256 90#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 554a59b3c4a6..6ee4021910e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -165,6 +165,7 @@ struct amdgpu_gfx_config {
165 uint32_t num_sc_per_sh; 165 uint32_t num_sc_per_sh;
166 uint32_t num_packer_per_sc; 166 uint32_t num_packer_per_sc;
167 uint32_t pa_sc_tile_steering_override; 167 uint32_t pa_sc_tile_steering_override;
168 uint64_t tcc_disabled_mask;
168}; 169};
169 170
170struct amdgpu_cu_info { 171struct amdgpu_cu_info {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index f6147528be64..f2c097983f48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -787,6 +787,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
787 dev_info.pa_sc_tile_steering_override = 787 dev_info.pa_sc_tile_steering_override =
788 adev->gfx.config.pa_sc_tile_steering_override; 788 adev->gfx.config.pa_sc_tile_steering_override;
789 789
790 dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
791
790 return copy_to_user(out, &dev_info, 792 return copy_to_user(out, &dev_info,
791 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 793 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
792 } 794 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e2fb141ff2e5..5251352f5922 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -603,14 +603,12 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
603 struct ttm_bo_global *glob = adev->mman.bdev.glob; 603 struct ttm_bo_global *glob = adev->mman.bdev.glob;
604 struct amdgpu_vm_bo_base *bo_base; 604 struct amdgpu_vm_bo_base *bo_base;
605 605
606#if 0
607 if (vm->bulk_moveable) { 606 if (vm->bulk_moveable) {
608 spin_lock(&glob->lru_lock); 607 spin_lock(&glob->lru_lock);
609 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); 608 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
610 spin_unlock(&glob->lru_lock); 609 spin_unlock(&glob->lru_lock);
611 return; 610 return;
612 } 611 }
613#endif
614 612
615 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); 613 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
616 614
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 638c821611ab..957811b73672 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -1691,6 +1691,17 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
1691 } 1691 }
1692} 1692}
1693 1693
1694static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
1695{
1696 /* TCCs are global (not instanced). */
1697 uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
1698 RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
1699
1700 adev->gfx.config.tcc_disabled_mask =
1701 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1702 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1703}
1704
1694static void gfx_v10_0_constants_init(struct amdgpu_device *adev) 1705static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
1695{ 1706{
1696 u32 tmp; 1707 u32 tmp;
@@ -1702,6 +1713,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
1702 1713
1703 gfx_v10_0_setup_rb(adev); 1714 gfx_v10_0_setup_rb(adev);
1704 gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info); 1715 gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
1716 gfx_v10_0_get_tcc_info(adev);
1705 adev->gfx.config.pa_sc_tile_steering_override = 1717 adev->gfx.config.pa_sc_tile_steering_override =
1706 gfx_v10_0_init_pa_sc_tile_steering_override(adev); 1718 gfx_v10_0_init_pa_sc_tile_steering_override(adev);
1707 1719
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 85393a99a848..de9b995b65b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -317,10 +317,12 @@ static int nv_asic_reset(struct amdgpu_device *adev)
317 struct smu_context *smu = &adev->smu; 317 struct smu_context *smu = &adev->smu;
318 318
319 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 319 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
320 amdgpu_inc_vram_lost(adev); 320 if (!adev->in_suspend)
321 amdgpu_inc_vram_lost(adev);
321 ret = smu_baco_reset(smu); 322 ret = smu_baco_reset(smu);
322 } else { 323 } else {
323 amdgpu_inc_vram_lost(adev); 324 if (!adev->in_suspend)
325 amdgpu_inc_vram_lost(adev);
324 ret = nv_asic_mode1_reset(adev); 326 ret = nv_asic_mode1_reset(adev);
325 } 327 }
326 328
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f70658a536a9..f8ab80c8801b 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -558,12 +558,14 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
558{ 558{
559 switch (soc15_asic_reset_method(adev)) { 559 switch (soc15_asic_reset_method(adev)) {
560 case AMD_RESET_METHOD_BACO: 560 case AMD_RESET_METHOD_BACO:
561 amdgpu_inc_vram_lost(adev); 561 if (!adev->in_suspend)
562 amdgpu_inc_vram_lost(adev);
562 return soc15_asic_baco_reset(adev); 563 return soc15_asic_baco_reset(adev);
563 case AMD_RESET_METHOD_MODE2: 564 case AMD_RESET_METHOD_MODE2:
564 return soc15_mode2_reset(adev); 565 return soc15_mode2_reset(adev);
565 default: 566 default:
566 amdgpu_inc_vram_lost(adev); 567 if (!adev->in_suspend)
568 amdgpu_inc_vram_lost(adev);
567 return soc15_asic_mode1_reset(adev); 569 return soc15_asic_mode1_reset(adev);
568 } 570 }
569} 571}
@@ -771,8 +773,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
771#if defined(CONFIG_DRM_AMD_DC) 773#if defined(CONFIG_DRM_AMD_DC)
772 else if (amdgpu_device_has_dc_support(adev)) 774 else if (amdgpu_device_has_dc_support(adev))
773 amdgpu_device_ip_block_add(adev, &dm_ip_block); 775 amdgpu_device_ip_block_add(adev, &dm_ip_block);
774#else
775# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
776#endif 776#endif
777 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 777 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
778 break; 778 break;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8cab6da512a0..a52f0b13a2c8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2385,8 +2385,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2385 2385
2386 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 2386 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
2387 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 2387 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
2388 if (adev->asic_type == CHIP_RENOIR)
2389 dm->dc->debug.disable_stutter = true;
2390 2388
2391 return 0; 2389 return 0;
2392fail: 2390fail:
@@ -6019,7 +6017,9 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6019 struct drm_crtc *crtc; 6017 struct drm_crtc *crtc;
6020 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 6018 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6021 int i; 6019 int i;
6020#ifdef CONFIG_DEBUG_FS
6022 enum amdgpu_dm_pipe_crc_source source; 6021 enum amdgpu_dm_pipe_crc_source source;
6022#endif
6023 6023
6024 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 6024 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6025 new_crtc_state, i) { 6025 new_crtc_state, i) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 1787b9bf800a..76d54885374a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -668,6 +668,7 @@ struct clock_source *dce100_clock_source_create(
668 return &clk_src->base; 668 return &clk_src->base;
669 } 669 }
670 670
671 kfree(clk_src);
671 BREAK_TO_DEBUGGER(); 672 BREAK_TO_DEBUGGER();
672 return NULL; 673 return NULL;
673} 674}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 318e9c2e2ca8..89620adc81d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -714,6 +714,7 @@ struct clock_source *dce110_clock_source_create(
714 return &clk_src->base; 714 return &clk_src->base;
715 } 715 }
716 716
717 kfree(clk_src);
717 BREAK_TO_DEBUGGER(); 718 BREAK_TO_DEBUGGER();
718 return NULL; 719 return NULL;
719} 720}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 83e1878161c9..21a657e79306 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -687,6 +687,7 @@ struct clock_source *dce112_clock_source_create(
687 return &clk_src->base; 687 return &clk_src->base;
688 } 688 }
689 689
690 kfree(clk_src);
690 BREAK_TO_DEBUGGER(); 691 BREAK_TO_DEBUGGER();
691 return NULL; 692 return NULL;
692} 693}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 8b85e5274bba..7c52f7f9196c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -500,6 +500,7 @@ static struct clock_source *dce120_clock_source_create(
500 return &clk_src->base; 500 return &clk_src->base;
501 } 501 }
502 502
503 kfree(clk_src);
503 BREAK_TO_DEBUGGER(); 504 BREAK_TO_DEBUGGER();
504 return NULL; 505 return NULL;
505} 506}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 4625df9f9fd2..643ccb0ade00 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -701,6 +701,7 @@ struct clock_source *dce80_clock_source_create(
701 return &clk_src->base; 701 return &clk_src->base;
702 } 702 }
703 703
704 kfree(clk_src);
704 BREAK_TO_DEBUGGER(); 705 BREAK_TO_DEBUGGER();
705 return NULL; 706 return NULL;
706} 707}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 59305e411a66..1599bb971111 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -786,6 +786,7 @@ struct clock_source *dcn10_clock_source_create(
786 return &clk_src->base; 786 return &clk_src->base;
787 } 787 }
788 788
789 kfree(clk_src);
789 BREAK_TO_DEBUGGER(); 790 BREAK_TO_DEBUGGER();
790 return NULL; 791 return NULL;
791} 792}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index b4e3ce22ed52..5a2763daff4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1077,6 +1077,7 @@ struct clock_source *dcn20_clock_source_create(
1077 return &clk_src->base; 1077 return &clk_src->base;
1078 } 1078 }
1079 1079
1080 kfree(clk_src);
1080 BREAK_TO_DEBUGGER(); 1081 BREAK_TO_DEBUGGER();
1081 return NULL; 1082 return NULL;
1082} 1083}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 8cd9de8b1a7a..ef673bffc241 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -3,7 +3,17 @@
3 3
4DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o 4DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
5 5
6CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -mpreferred-stack-boundary=4 6ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
7 cc_stack_align := -mpreferred-stack-boundary=4
8else ifneq ($(call cc-option, -mstack-alignment=16),)
9 cc_stack_align := -mstack-alignment=16
10endif
11
12CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align)
13
14ifdef CONFIG_CC_IS_CLANG
15CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
16endif
7 17
8AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) 18AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
9 19
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 456cd0e3289c..3b6ed60dcd35 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -39,9 +39,6 @@
39 * ways. Unless there is something clearly wrong with it the code should 39 * ways. Unless there is something clearly wrong with it the code should
40 * remain as-is as it provides us with a guarantee from HW that it is correct. 40 * remain as-is as it provides us with a guarantee from HW that it is correct.
41 */ 41 */
42
43typedef unsigned int uint;
44
45typedef struct { 42typedef struct {
46 double DPPCLK; 43 double DPPCLK;
47 double DISPCLK; 44 double DISPCLK;
@@ -4774,7 +4771,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
4774 mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0; 4771 mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
4775 mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0; 4772 mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
4776 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { 4773 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
4777 uint m; 4774 unsigned int m;
4778 4775
4779 locals->cursor_bw[k] = 0; 4776 locals->cursor_bw[k] = 0;
4780 locals->cursor_bw_pre[k] = 0; 4777 locals->cursor_bw_pre[k] = 0;
@@ -5285,7 +5282,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
5285 double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank; 5282 double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank;
5286 double FullDETBufferingTimeYStutterCriticalPlane = 0; 5283 double FullDETBufferingTimeYStutterCriticalPlane = 0;
5287 double TimeToFinishSwathTransferStutterCriticalPlane = 0; 5284 double TimeToFinishSwathTransferStutterCriticalPlane = 0;
5288 uint k, j; 5285 unsigned int k, j;
5289 5286
5290 mode_lib->vba.TotalActiveDPP = 0; 5287 mode_lib->vba.TotalActiveDPP = 0;
5291 mode_lib->vba.TotalDCCActiveDPP = 0; 5288 mode_lib->vba.TotalDCCActiveDPP = 0;
@@ -5507,7 +5504,7 @@ static void CalculateDCFCLKDeepSleep(
5507 double DPPCLK[], 5504 double DPPCLK[],
5508 double *DCFCLKDeepSleep) 5505 double *DCFCLKDeepSleep)
5509{ 5506{
5510 uint k; 5507 unsigned int k;
5511 double DisplayPipeLineDeliveryTimeLuma; 5508 double DisplayPipeLineDeliveryTimeLuma;
5512 double DisplayPipeLineDeliveryTimeChroma; 5509 double DisplayPipeLineDeliveryTimeChroma;
5513 //double DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX]; 5510 //double DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX];
@@ -5727,7 +5724,7 @@ static void CalculatePixelDeliveryTimes(
5727 double DisplayPipeRequestDeliveryTimeChromaPrefetch[]) 5724 double DisplayPipeRequestDeliveryTimeChromaPrefetch[])
5728{ 5725{
5729 double req_per_swath_ub; 5726 double req_per_swath_ub;
5730 uint k; 5727 unsigned int k;
5731 5728
5732 for (k = 0; k < NumberOfActivePlanes; ++k) { 5729 for (k = 0; k < NumberOfActivePlanes; ++k) {
5733 if (VRatio[k] <= 1) { 5730 if (VRatio[k] <= 1) {
@@ -5869,7 +5866,7 @@ static void CalculateMetaAndPTETimes(
5869 unsigned int dpte_groups_per_row_chroma_ub; 5866 unsigned int dpte_groups_per_row_chroma_ub;
5870 unsigned int num_group_per_lower_vm_stage; 5867 unsigned int num_group_per_lower_vm_stage;
5871 unsigned int num_req_per_lower_vm_stage; 5868 unsigned int num_req_per_lower_vm_stage;
5872 uint k; 5869 unsigned int k;
5873 5870
5874 for (k = 0; k < NumberOfActivePlanes; ++k) { 5871 for (k = 0; k < NumberOfActivePlanes; ++k) {
5875 if (GPUVMEnable == true) { 5872 if (GPUVMEnable == true) {
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 33960fb38a5d..4acf139ea014 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -843,6 +843,8 @@ static int smu_sw_init(void *handle)
843 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 843 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
844 smu->smu_baco.platform_support = false; 844 smu->smu_baco.platform_support = false;
845 845
846 mutex_init(&smu->sensor_lock);
847
846 smu->watermarks_bitmap = 0; 848 smu->watermarks_bitmap = 0;
847 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 849 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
848 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 850 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index f1f072012fac..d493a3f8c07a 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -1018,6 +1018,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
1018 if (!data || !size) 1018 if (!data || !size)
1019 return -EINVAL; 1019 return -EINVAL;
1020 1020
1021 mutex_lock(&smu->sensor_lock);
1021 switch (sensor) { 1022 switch (sensor) {
1022 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 1023 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1023 *(uint32_t *)data = pptable->FanMaximumRpm; 1024 *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1044,6 +1045,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
1044 default: 1045 default:
1045 ret = smu_smc_read_sensor(smu, sensor, data, size); 1046 ret = smu_smc_read_sensor(smu, sensor, data, size);
1046 } 1047 }
1048 mutex_unlock(&smu->sensor_lock);
1047 1049
1048 return ret; 1050 return ret;
1049} 1051}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 6109815a0401..23171a4d9a31 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -344,6 +344,7 @@ struct smu_context
344 const struct smu_funcs *funcs; 344 const struct smu_funcs *funcs;
345 const struct pptable_funcs *ppt_funcs; 345 const struct pptable_funcs *ppt_funcs;
346 struct mutex mutex; 346 struct mutex mutex;
347 struct mutex sensor_lock;
347 uint64_t pool_size; 348 uint64_t pool_size;
348 349
349 struct smu_table_context smu_table; 350 struct smu_table_context smu_table;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 12c0e469bf35..0b461404af6b 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -547,7 +547,7 @@ static int navi10_get_metrics_table(struct smu_context *smu,
547 struct smu_table_context *smu_table= &smu->smu_table; 547 struct smu_table_context *smu_table= &smu->smu_table;
548 int ret = 0; 548 int ret = 0;
549 549
550 if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { 550 if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
551 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 551 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
552 (void *)smu_table->metrics_table, false); 552 (void *)smu_table->metrics_table, false);
553 if (ret) { 553 if (ret) {
@@ -1386,6 +1386,7 @@ static int navi10_read_sensor(struct smu_context *smu,
1386 if(!data || !size) 1386 if(!data || !size)
1387 return -EINVAL; 1387 return -EINVAL;
1388 1388
1389 mutex_lock(&smu->sensor_lock);
1389 switch (sensor) { 1390 switch (sensor) {
1390 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 1391 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1391 *(uint32_t *)data = pptable->FanMaximumRpm; 1392 *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1409,6 +1410,7 @@ static int navi10_read_sensor(struct smu_context *smu,
1409 default: 1410 default:
1410 ret = smu_smc_read_sensor(smu, sensor, data, size); 1411 ret = smu_smc_read_sensor(smu, sensor, data, size);
1411 } 1412 }
1413 mutex_unlock(&smu->sensor_lock);
1412 1414
1413 return ret; 1415 return ret;
1414} 1416}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 64386ee3f878..bbd8ebd58434 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -3023,6 +3023,7 @@ static int vega20_read_sensor(struct smu_context *smu,
3023 if(!data || !size) 3023 if(!data || !size)
3024 return -EINVAL; 3024 return -EINVAL;
3025 3025
3026 mutex_lock(&smu->sensor_lock);
3026 switch (sensor) { 3027 switch (sensor) {
3027 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 3028 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
3028 *(uint32_t *)data = pptable->FanMaximumRpm; 3029 *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -3048,6 +3049,7 @@ static int vega20_read_sensor(struct smu_context *smu,
3048 default: 3049 default:
3049 ret = smu_smc_read_sensor(smu, sensor, data, size); 3050 ret = smu_smc_read_sensor(smu, sensor, data, size);
3050 } 3051 }
3052 mutex_unlock(&smu->sensor_lock);
3051 3053
3052 return ret; 3054 return ret;
3053} 3055}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index 2851cac94d86..b72840c06ab7 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -43,9 +43,8 @@ komeda_wb_encoder_atomic_check(struct drm_encoder *encoder,
43 struct komeda_data_flow_cfg dflow; 43 struct komeda_data_flow_cfg dflow;
44 int err; 44 int err;
45 45
46 if (!writeback_job || !writeback_job->fb) { 46 if (!writeback_job)
47 return 0; 47 return 0;
48 }
49 48
50 if (!crtc_st->active) { 49 if (!crtc_st->active) {
51 DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n"); 50 DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n");
@@ -166,8 +165,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
166 &komeda_wb_encoder_helper_funcs, 165 &komeda_wb_encoder_helper_funcs,
167 formats, n_formats); 166 formats, n_formats);
168 komeda_put_fourcc_list(formats); 167 komeda_put_fourcc_list(formats);
169 if (err) 168 if (err) {
169 kfree(kwb_conn);
170 return err; 170 return err;
171 }
171 172
172 drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs); 173 drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
173 174
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 22c0847986df..875a3a9eabfa 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -131,7 +131,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
131 struct drm_framebuffer *fb; 131 struct drm_framebuffer *fb;
132 int i, n_planes; 132 int i, n_planes;
133 133
134 if (!conn_state->writeback_job || !conn_state->writeback_job->fb) 134 if (!conn_state->writeback_job)
135 return 0; 135 return 0;
136 136
137 fb = conn_state->writeback_job->fb; 137 fb = conn_state->writeback_job->fb;
@@ -248,7 +248,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
248 248
249 mw_state = to_mw_state(conn_state); 249 mw_state = to_mw_state(conn_state);
250 250
251 if (conn_state->writeback_job && conn_state->writeback_job->fb) { 251 if (conn_state->writeback_job) {
252 struct drm_framebuffer *fb = conn_state->writeback_job->fb; 252 struct drm_framebuffer *fb = conn_state->writeback_job->fb;
253 253
254 DRM_DEV_DEBUG_DRIVER(drm->dev, 254 DRM_DEV_DEBUG_DRIVER(drm->dev,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 419381abbdd1..14aeaf736321 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -430,10 +430,15 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
430 return -EINVAL; 430 return -EINVAL;
431 } 431 }
432 432
433 if (writeback_job->out_fence && !writeback_job->fb) { 433 if (!writeback_job->fb) {
434 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", 434 if (writeback_job->out_fence) {
435 connector->base.id, connector->name); 435 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
436 return -EINVAL; 436 connector->base.id, connector->name);
437 return -EINVAL;
438 }
439
440 drm_writeback_cleanup_job(writeback_job);
441 state->writeback_job = NULL;
437 } 442 }
438 443
439 return 0; 444 return 0;
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index ff138b6ec48b..43d9e3bb3a94 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -324,6 +324,9 @@ void drm_writeback_cleanup_job(struct drm_writeback_job *job)
324 if (job->fb) 324 if (job->fb)
325 drm_framebuffer_put(job->fb); 325 drm_framebuffer_put(job->fb);
326 326
327 if (job->out_fence)
328 dma_fence_put(job->out_fence);
329
327 kfree(job); 330 kfree(job);
328} 331}
329EXPORT_SYMBOL(drm_writeback_cleanup_job); 332EXPORT_SYMBOL(drm_writeback_cleanup_job);
@@ -366,25 +369,29 @@ drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
366{ 369{
367 unsigned long flags; 370 unsigned long flags;
368 struct drm_writeback_job *job; 371 struct drm_writeback_job *job;
372 struct dma_fence *out_fence;
369 373
370 spin_lock_irqsave(&wb_connector->job_lock, flags); 374 spin_lock_irqsave(&wb_connector->job_lock, flags);
371 job = list_first_entry_or_null(&wb_connector->job_queue, 375 job = list_first_entry_or_null(&wb_connector->job_queue,
372 struct drm_writeback_job, 376 struct drm_writeback_job,
373 list_entry); 377 list_entry);
374 if (job) { 378 if (job)
375 list_del(&job->list_entry); 379 list_del(&job->list_entry);
376 if (job->out_fence) { 380
377 if (status)
378 dma_fence_set_error(job->out_fence, status);
379 dma_fence_signal(job->out_fence);
380 dma_fence_put(job->out_fence);
381 }
382 }
383 spin_unlock_irqrestore(&wb_connector->job_lock, flags); 381 spin_unlock_irqrestore(&wb_connector->job_lock, flags);
384 382
385 if (WARN_ON(!job)) 383 if (WARN_ON(!job))
386 return; 384 return;
387 385
386 out_fence = job->out_fence;
387 if (out_fence) {
388 if (status)
389 dma_fence_set_error(out_fence, status);
390 dma_fence_signal(out_fence);
391 dma_fence_put(out_fence);
392 job->out_fence = NULL;
393 }
394
388 INIT_WORK(&job->cleanup_work, cleanup_work); 395 INIT_WORK(&job->cleanup_work, cleanup_work);
389 queue_work(system_long_wq, &job->cleanup_work); 396 queue_work(system_long_wq, &job->cleanup_work);
390} 397}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index b51d1ceb8739..ce05e805b08f 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -7261,7 +7261,7 @@ retry:
7261 pipe_config->fdi_lanes = lane; 7261 pipe_config->fdi_lanes = lane;
7262 7262
7263 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7263 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7264 link_bw, &pipe_config->fdi_m_n, false); 7264 link_bw, &pipe_config->fdi_m_n, false, false);
7265 7265
7266 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7266 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7267 if (ret == -EDEADLK) 7267 if (ret == -EDEADLK)
@@ -7508,11 +7508,15 @@ void
7508intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 7508intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7509 int pixel_clock, int link_clock, 7509 int pixel_clock, int link_clock,
7510 struct intel_link_m_n *m_n, 7510 struct intel_link_m_n *m_n,
7511 bool constant_n) 7511 bool constant_n, bool fec_enable)
7512{ 7512{
7513 m_n->tu = 64; 7513 u32 data_clock = bits_per_pixel * pixel_clock;
7514
7515 if (fec_enable)
7516 data_clock = intel_dp_mode_to_fec_clock(data_clock);
7514 7517
7515 compute_m_n(bits_per_pixel * pixel_clock, 7518 m_n->tu = 64;
7519 compute_m_n(data_clock,
7516 link_clock * nlanes * 8, 7520 link_clock * nlanes * 8,
7517 &m_n->gmch_m, &m_n->gmch_n, 7521 &m_n->gmch_m, &m_n->gmch_n,
7518 constant_n); 7522 constant_n);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index e57e6969051d..01fa87ad3270 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -414,7 +414,7 @@ enum phy_fia {
414void intel_link_compute_m_n(u16 bpp, int nlanes, 414void intel_link_compute_m_n(u16 bpp, int nlanes,
415 int pixel_clock, int link_clock, 415 int pixel_clock, int link_clock,
416 struct intel_link_m_n *m_n, 416 struct intel_link_m_n *m_n,
417 bool constant_n); 417 bool constant_n, bool fec_enable);
418bool is_ccs_modifier(u64 modifier); 418bool is_ccs_modifier(u64 modifier);
419void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); 419void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
420u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 420u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 921ad0a2f7ba..57e9f0ba331b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -78,8 +78,8 @@
78#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 78#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
79#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 79#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
80 80
81/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */ 81/* DP DSC FEC Overhead factor = 1/(0.972261) */
82#define DP_DSC_FEC_OVERHEAD_FACTOR 976 82#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
83 83
84/* Compliance test status bits */ 84/* Compliance test status bits */
85#define INTEL_DP_RESOLUTION_SHIFT_MASK 0 85#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -494,6 +494,97 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
494 return 0; 494 return 0;
495} 495}
496 496
497u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
498{
499 return div_u64(mul_u32_u32(mode_clock, 1000000U),
500 DP_DSC_FEC_OVERHEAD_FACTOR);
501}
502
503static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
504 u32 mode_clock, u32 mode_hdisplay)
505{
506 u32 bits_per_pixel, max_bpp_small_joiner_ram;
507 int i;
508
509 /*
510 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
511 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
512 * for SST -> TimeSlotsPerMTP is 1,
513 * for MST -> TimeSlotsPerMTP has to be calculated
514 */
515 bits_per_pixel = (link_clock * lane_count * 8) /
516 intel_dp_mode_to_fec_clock(mode_clock);
517 DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
518
519 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
520 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
521 DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
522
523 /*
524 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
525 * check, output bpp from small joiner RAM check)
526 */
527 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
528
529 /* Error out if the max bpp is less than smallest allowed valid bpp */
530 if (bits_per_pixel < valid_dsc_bpp[0]) {
531 DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
532 bits_per_pixel, valid_dsc_bpp[0]);
533 return 0;
534 }
535
536 /* Find the nearest match in the array of known BPPs from VESA */
537 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
538 if (bits_per_pixel < valid_dsc_bpp[i + 1])
539 break;
540 }
541 bits_per_pixel = valid_dsc_bpp[i];
542
543 /*
544 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
545 * fractional part is 0
546 */
547 return bits_per_pixel << 4;
548}
549
550static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
551 int mode_clock, int mode_hdisplay)
552{
553 u8 min_slice_count, i;
554 int max_slice_width;
555
556 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
557 min_slice_count = DIV_ROUND_UP(mode_clock,
558 DP_DSC_MAX_ENC_THROUGHPUT_0);
559 else
560 min_slice_count = DIV_ROUND_UP(mode_clock,
561 DP_DSC_MAX_ENC_THROUGHPUT_1);
562
563 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
564 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
565 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
566 max_slice_width);
567 return 0;
568 }
569 /* Also take into account max slice width */
570 min_slice_count = min_t(u8, min_slice_count,
571 DIV_ROUND_UP(mode_hdisplay,
572 max_slice_width));
573
574 /* Find the closest match to the valid slice count values */
575 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
576 if (valid_dsc_slicecount[i] >
577 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
578 false))
579 break;
580 if (min_slice_count <= valid_dsc_slicecount[i])
581 return valid_dsc_slicecount[i];
582 }
583
584 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
585 return 0;
586}
587
497static enum drm_mode_status 588static enum drm_mode_status
498intel_dp_mode_valid(struct drm_connector *connector, 589intel_dp_mode_valid(struct drm_connector *connector,
499 struct drm_display_mode *mode) 590 struct drm_display_mode *mode)
@@ -2226,7 +2317,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
2226 adjusted_mode->crtc_clock, 2317 adjusted_mode->crtc_clock,
2227 pipe_config->port_clock, 2318 pipe_config->port_clock,
2228 &pipe_config->dp_m_n, 2319 &pipe_config->dp_m_n,
2229 constant_n); 2320 constant_n, pipe_config->fec_enable);
2230 2321
2231 if (intel_connector->panel.downclock_mode != NULL && 2322 if (intel_connector->panel.downclock_mode != NULL &&
2232 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 2323 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2236,7 +2327,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
2236 intel_connector->panel.downclock_mode->clock, 2327 intel_connector->panel.downclock_mode->clock,
2237 pipe_config->port_clock, 2328 pipe_config->port_clock,
2238 &pipe_config->dp_m2_n2, 2329 &pipe_config->dp_m2_n2,
2239 constant_n); 2330 constant_n, pipe_config->fec_enable);
2240 } 2331 }
2241 2332
2242 if (!HAS_DDI(dev_priv)) 2333 if (!HAS_DDI(dev_priv))
@@ -4323,91 +4414,6 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4323 DP_DPRX_ESI_LEN; 4414 DP_DPRX_ESI_LEN;
4324} 4415}
4325 4416
4326u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
4327 int mode_clock, int mode_hdisplay)
4328{
4329 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4330 int i;
4331
4332 /*
4333 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4334 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4335 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4336 * for MST -> TimeSlotsPerMTP has to be calculated
4337 */
4338 bits_per_pixel = (link_clock * lane_count * 8 *
4339 DP_DSC_FEC_OVERHEAD_FACTOR) /
4340 mode_clock;
4341
4342 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4343 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4344 mode_hdisplay;
4345
4346 /*
4347 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4348 * check, output bpp from small joiner RAM check)
4349 */
4350 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4351
4352 /* Error out if the max bpp is less than smallest allowed valid bpp */
4353 if (bits_per_pixel < valid_dsc_bpp[0]) {
4354 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4355 return 0;
4356 }
4357
4358 /* Find the nearest match in the array of known BPPs from VESA */
4359 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4360 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4361 break;
4362 }
4363 bits_per_pixel = valid_dsc_bpp[i];
4364
4365 /*
4366 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4367 * fractional part is 0
4368 */
4369 return bits_per_pixel << 4;
4370}
4371
4372u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4373 int mode_clock,
4374 int mode_hdisplay)
4375{
4376 u8 min_slice_count, i;
4377 int max_slice_width;
4378
4379 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4380 min_slice_count = DIV_ROUND_UP(mode_clock,
4381 DP_DSC_MAX_ENC_THROUGHPUT_0);
4382 else
4383 min_slice_count = DIV_ROUND_UP(mode_clock,
4384 DP_DSC_MAX_ENC_THROUGHPUT_1);
4385
4386 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4387 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4388 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4389 max_slice_width);
4390 return 0;
4391 }
4392 /* Also take into account max slice width */
4393 min_slice_count = min_t(u8, min_slice_count,
4394 DIV_ROUND_UP(mode_hdisplay,
4395 max_slice_width));
4396
4397 /* Find the closest match to the valid slice count values */
4398 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4399 if (valid_dsc_slicecount[i] >
4400 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4401 false))
4402 break;
4403 if (min_slice_count <= valid_dsc_slicecount[i])
4404 return valid_dsc_slicecount[i];
4405 }
4406
4407 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4408 return 0;
4409}
4410
4411static void 4417static void
4412intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp, 4418intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4413 const struct intel_crtc_state *crtc_state) 4419 const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 657bbb1f5ed0..00981fb9414b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -102,10 +102,6 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
102bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); 102bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
103bool 103bool
104intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status); 104intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
105u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
106 int mode_clock, int mode_hdisplay);
107u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
108 int mode_hdisplay);
109 105
110bool intel_dp_read_dpcd(struct intel_dp *intel_dp); 106bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
111bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); 107bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
@@ -118,4 +114,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
118 return ~((1 << lane_count) - 1) & 0xf; 114 return ~((1 << lane_count) - 1) & 0xf;
119} 115}
120 116
117u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
118
121#endif /* __INTEL_DP_H__ */ 119#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 6df240a01b8c..600873c796d0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -81,7 +81,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
81 adjusted_mode->crtc_clock, 81 adjusted_mode->crtc_clock,
82 crtc_state->port_clock, 82 crtc_state->port_clock,
83 &crtc_state->dp_m_n, 83 &crtc_state->dp_m_n,
84 constant_n); 84 constant_n, crtc_state->fec_enable);
85 crtc_state->dp_m_n.tu = slots; 85 crtc_state->dp_m_n.tu = slots;
86 86
87 return 0; 87 return 0;
@@ -615,7 +615,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
615 intel_encoder->type = INTEL_OUTPUT_DP_MST; 615 intel_encoder->type = INTEL_OUTPUT_DP_MST;
616 intel_encoder->power_domain = intel_dig_port->base.power_domain; 616 intel_encoder->power_domain = intel_dig_port->base.power_domain;
617 intel_encoder->port = intel_dig_port->base.port; 617 intel_encoder->port = intel_dig_port->base.port;
618 intel_encoder->crtc_mask = BIT(pipe); 618 intel_encoder->crtc_mask = 0x7;
619 intel_encoder->cloneable = 0; 619 intel_encoder->cloneable = 0;
620 620
621 intel_encoder->compute_config = intel_dp_mst_compute_config; 621 intel_encoder->compute_config = intel_dp_mst_compute_config;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index dea63be1964f..cae25e493128 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -1528,6 +1528,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
1528 int src_x, src_w, src_h, crtc_w, crtc_h; 1528 int src_x, src_w, src_h, crtc_w, crtc_h;
1529 const struct drm_display_mode *adjusted_mode = 1529 const struct drm_display_mode *adjusted_mode =
1530 &crtc_state->base.adjusted_mode; 1530 &crtc_state->base.adjusted_mode;
1531 unsigned int stride = plane_state->color_plane[0].stride;
1531 unsigned int cpp = fb->format->cpp[0]; 1532 unsigned int cpp = fb->format->cpp[0];
1532 unsigned int width_bytes; 1533 unsigned int width_bytes;
1533 int min_width, min_height; 1534 int min_width, min_height;
@@ -1569,9 +1570,9 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
1569 return -EINVAL; 1570 return -EINVAL;
1570 } 1571 }
1571 1572
1572 if (width_bytes > 4096 || fb->pitches[0] > 4096) { 1573 if (stride > 4096) {
1573 DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n", 1574 DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
1574 fb->pitches[0], 4096); 1575 stride, 4096);
1575 return -EINVAL; 1576 return -EINVAL;
1576 } 1577 }
1577 1578
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index e226324adb69..4bdd63b57100 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1083,7 +1083,7 @@ static const struct dss_features omap34xx_dss_feats = {
1083 1083
1084static const struct dss_features omap3630_dss_feats = { 1084static const struct dss_features omap3630_dss_feats = {
1085 .model = DSS_MODEL_OMAP3, 1085 .model = DSS_MODEL_OMAP3,
1086 .fck_div_max = 32, 1086 .fck_div_max = 31,
1087 .fck_freq_max = 173000000, 1087 .fck_freq_max = 173000000,
1088 .dss_fck_multiplier = 1, 1088 .dss_fck_multiplier = 1,
1089 .parent_clk_name = "dpll4_ck", 1089 .parent_clk_name = "dpll4_ck",
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index ae07290bba6a..04efa78d70b6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -147,7 +147,7 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
147 struct drm_device *dev = encoder->dev; 147 struct drm_device *dev = encoder->dev;
148 struct drm_framebuffer *fb; 148 struct drm_framebuffer *fb;
149 149
150 if (!conn_state->writeback_job || !conn_state->writeback_job->fb) 150 if (!conn_state->writeback_job)
151 return 0; 151 return 0;
152 152
153 fb = conn_state->writeback_job->fb; 153 fb = conn_state->writeback_job->fb;
@@ -221,7 +221,7 @@ void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
221 unsigned int i; 221 unsigned int i;
222 222
223 state = rcrtc->writeback.base.state; 223 state = rcrtc->writeback.base.state;
224 if (!state || !state->writeback_job || !state->writeback_job->fb) 224 if (!state || !state->writeback_job)
225 return; 225 return;
226 226
227 fb = state->writeback_job->fb; 227 fb = state->writeback_job->fb;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 525dc1c0f1c1..530edb3b51cc 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -7,6 +7,7 @@
7#include <linux/gpio.h> 7#include <linux/gpio.h>
8#include <linux/mod_devicetable.h> 8#include <linux/mod_devicetable.h>
9#include <linux/of_gpio.h> 9#include <linux/of_gpio.h>
10#include <linux/pinctrl/consumer.h>
10#include <linux/platform_device.h> 11#include <linux/platform_device.h>
11 12
12#include <drm/drm_atomic_helper.h> 13#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 1ce4d7142b6e..bf720206727f 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -231,7 +231,7 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
231 int i; 231 int i;
232 232
233 conn_state = drm_atomic_get_new_connector_state(state, conn); 233 conn_state = drm_atomic_get_new_connector_state(state, conn);
234 if (!conn_state->writeback_job || !conn_state->writeback_job->fb) 234 if (!conn_state->writeback_job)
235 return 0; 235 return 0;
236 236
237 crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); 237 crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
@@ -271,8 +271,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
271 u32 ctrl; 271 u32 ctrl;
272 int i; 272 int i;
273 273
274 if (WARN_ON(!conn_state->writeback_job || 274 if (WARN_ON(!conn_state->writeback_job))
275 !conn_state->writeback_job->fb))
276 return; 275 return;
277 276
278 mode = &conn_state->crtc->state->adjusted_mode; 277 mode = &conn_state->crtc->state->adjusted_mode;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c09791fb4929..f1c714acc280 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1736,6 +1736,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1736 case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: 1736 case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
1737 case PCI_DEVICE_ID_INTEL_DNV_SMBUS: 1737 case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
1738 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: 1738 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
1739 priv->features |= FEATURE_BLOCK_PROC;
1739 priv->features |= FEATURE_I2C_BLOCK_READ; 1740 priv->features |= FEATURE_I2C_BLOCK_READ;
1740 priv->features |= FEATURE_IRQ; 1741 priv->features |= FEATURE_IRQ;
1741 priv->features |= FEATURE_SMBUS_PEC; 1742 priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index a89bfce5388e..17abf60c94ae 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -355,11 +355,13 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
355{ 355{
356 dma_addr_t rx_dma; 356 dma_addr_t rx_dma;
357 unsigned long time_left; 357 unsigned long time_left;
358 void *dma_buf; 358 void *dma_buf = NULL;
359 struct geni_se *se = &gi2c->se; 359 struct geni_se *se = &gi2c->se;
360 size_t len = msg->len; 360 size_t len = msg->len;
361 361
362 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32); 362 if (!of_machine_is_compatible("lenovo,yoga-c630"))
363 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
364
363 if (dma_buf) 365 if (dma_buf)
364 geni_se_select_mode(se, GENI_SE_DMA); 366 geni_se_select_mode(se, GENI_SE_DMA);
365 else 367 else
@@ -394,11 +396,13 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
394{ 396{
395 dma_addr_t tx_dma; 397 dma_addr_t tx_dma;
396 unsigned long time_left; 398 unsigned long time_left;
397 void *dma_buf; 399 void *dma_buf = NULL;
398 struct geni_se *se = &gi2c->se; 400 struct geni_se *se = &gi2c->se;
399 size_t len = msg->len; 401 size_t len = msg->len;
400 402
401 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32); 403 if (!of_machine_is_compatible("lenovo,yoga-c630"))
404 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
405
402 if (dma_buf) 406 if (dma_buf)
403 geni_se_select_mode(se, GENI_SE_DMA); 407 geni_se_select_mode(se, GENI_SE_DMA);
404 else 408 else
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index f31413fd9521..800414886f6b 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -202,6 +202,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
202 if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) { 202 if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
203 /* We got a NACKIE */ 203 /* We got a NACKIE */
204 readb(riic->base + RIIC_ICDRR); /* dummy read */ 204 readb(riic->base + RIIC_ICDRR); /* dummy read */
205 riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
205 riic->err = -ENXIO; 206 riic->err = -ENXIO;
206 } else if (riic->bytes_left) { 207 } else if (riic->bytes_left) {
207 return IRQ_NONE; 208 return IRQ_NONE;
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 92ff9991bae8..db9763cb4dae 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -33,11 +33,13 @@ struct eeprom_data {
33 u16 address_mask; 33 u16 address_mask;
34 u8 num_address_bytes; 34 u8 num_address_bytes;
35 u8 idx_write_cnt; 35 u8 idx_write_cnt;
36 bool read_only;
36 u8 buffer[]; 37 u8 buffer[];
37}; 38};
38 39
39#define I2C_SLAVE_BYTELEN GENMASK(15, 0) 40#define I2C_SLAVE_BYTELEN GENMASK(15, 0)
40#define I2C_SLAVE_FLAG_ADDR16 BIT(16) 41#define I2C_SLAVE_FLAG_ADDR16 BIT(16)
42#define I2C_SLAVE_FLAG_RO BIT(17)
41#define I2C_SLAVE_DEVICE_MAGIC(_len, _flags) ((_flags) | (_len)) 43#define I2C_SLAVE_DEVICE_MAGIC(_len, _flags) ((_flags) | (_len))
42 44
43static int i2c_slave_eeprom_slave_cb(struct i2c_client *client, 45static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
@@ -53,9 +55,11 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client,
53 eeprom->buffer_idx = *val | (eeprom->buffer_idx << 8); 55 eeprom->buffer_idx = *val | (eeprom->buffer_idx << 8);
54 eeprom->idx_write_cnt++; 56 eeprom->idx_write_cnt++;
55 } else { 57 } else {
56 spin_lock(&eeprom->buffer_lock); 58 if (!eeprom->read_only) {
57 eeprom->buffer[eeprom->buffer_idx++ & eeprom->address_mask] = *val; 59 spin_lock(&eeprom->buffer_lock);
58 spin_unlock(&eeprom->buffer_lock); 60 eeprom->buffer[eeprom->buffer_idx++ & eeprom->address_mask] = *val;
61 spin_unlock(&eeprom->buffer_lock);
62 }
59 } 63 }
60 break; 64 break;
61 65
@@ -130,6 +134,7 @@ static int i2c_slave_eeprom_probe(struct i2c_client *client, const struct i2c_de
130 eeprom->idx_write_cnt = 0; 134 eeprom->idx_write_cnt = 0;
131 eeprom->num_address_bytes = flag_addr16 ? 2 : 1; 135 eeprom->num_address_bytes = flag_addr16 ? 2 : 1;
132 eeprom->address_mask = size - 1; 136 eeprom->address_mask = size - 1;
137 eeprom->read_only = FIELD_GET(I2C_SLAVE_FLAG_RO, id->driver_data);
133 spin_lock_init(&eeprom->buffer_lock); 138 spin_lock_init(&eeprom->buffer_lock);
134 i2c_set_clientdata(client, eeprom); 139 i2c_set_clientdata(client, eeprom);
135 140
@@ -165,8 +170,11 @@ static int i2c_slave_eeprom_remove(struct i2c_client *client)
165 170
166static const struct i2c_device_id i2c_slave_eeprom_id[] = { 171static const struct i2c_device_id i2c_slave_eeprom_id[] = {
167 { "slave-24c02", I2C_SLAVE_DEVICE_MAGIC(2048 / 8, 0) }, 172 { "slave-24c02", I2C_SLAVE_DEVICE_MAGIC(2048 / 8, 0) },
173 { "slave-24c02ro", I2C_SLAVE_DEVICE_MAGIC(2048 / 8, I2C_SLAVE_FLAG_RO) },
168 { "slave-24c32", I2C_SLAVE_DEVICE_MAGIC(32768 / 8, I2C_SLAVE_FLAG_ADDR16) }, 174 { "slave-24c32", I2C_SLAVE_DEVICE_MAGIC(32768 / 8, I2C_SLAVE_FLAG_ADDR16) },
175 { "slave-24c32ro", I2C_SLAVE_DEVICE_MAGIC(32768 / 8, I2C_SLAVE_FLAG_ADDR16 | I2C_SLAVE_FLAG_RO) },
169 { "slave-24c64", I2C_SLAVE_DEVICE_MAGIC(65536 / 8, I2C_SLAVE_FLAG_ADDR16) }, 176 { "slave-24c64", I2C_SLAVE_DEVICE_MAGIC(65536 / 8, I2C_SLAVE_FLAG_ADDR16) },
177 { "slave-24c64ro", I2C_SLAVE_DEVICE_MAGIC(65536 / 8, I2C_SLAVE_FLAG_ADDR16 | I2C_SLAVE_FLAG_RO) },
170 { } 178 { }
171}; 179};
172MODULE_DEVICE_TABLE(i2c, i2c_slave_eeprom_id); 180MODULE_DEVICE_TABLE(i2c, i2c_slave_eeprom_id);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 97975bb7f347..2369b8af81f3 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -70,7 +70,6 @@
70 */ 70 */
71#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38)) 71#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
72 72
73static DEFINE_SPINLOCK(amd_iommu_devtable_lock);
74static DEFINE_SPINLOCK(pd_bitmap_lock); 73static DEFINE_SPINLOCK(pd_bitmap_lock);
75 74
76/* List of all available dev_data structures */ 75/* List of all available dev_data structures */
@@ -202,6 +201,7 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid)
202 if (!dev_data) 201 if (!dev_data)
203 return NULL; 202 return NULL;
204 203
204 spin_lock_init(&dev_data->lock);
205 dev_data->devid = devid; 205 dev_data->devid = devid;
206 ratelimit_default_init(&dev_data->rs); 206 ratelimit_default_init(&dev_data->rs);
207 207
@@ -501,6 +501,29 @@ static void iommu_uninit_device(struct device *dev)
501 */ 501 */
502} 502}
503 503
504/*
505 * Helper function to get the first pte of a large mapping
506 */
507static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
508 unsigned long *count)
509{
510 unsigned long pte_mask, pg_size, cnt;
511 u64 *fpte;
512
513 pg_size = PTE_PAGE_SIZE(*pte);
514 cnt = PAGE_SIZE_PTE_COUNT(pg_size);
515 pte_mask = ~((cnt << 3) - 1);
516 fpte = (u64 *)(((unsigned long)pte) & pte_mask);
517
518 if (page_size)
519 *page_size = pg_size;
520
521 if (count)
522 *count = cnt;
523
524 return fpte;
525}
526
504/**************************************************************************** 527/****************************************************************************
505 * 528 *
506 * Interrupt handling functions 529 * Interrupt handling functions
@@ -1311,8 +1334,12 @@ static void domain_flush_np_cache(struct protection_domain *domain,
1311 dma_addr_t iova, size_t size) 1334 dma_addr_t iova, size_t size)
1312{ 1335{
1313 if (unlikely(amd_iommu_np_cache)) { 1336 if (unlikely(amd_iommu_np_cache)) {
1337 unsigned long flags;
1338
1339 spin_lock_irqsave(&domain->lock, flags);
1314 domain_flush_pages(domain, iova, size); 1340 domain_flush_pages(domain, iova, size);
1315 domain_flush_complete(domain); 1341 domain_flush_complete(domain);
1342 spin_unlock_irqrestore(&domain->lock, flags);
1316 } 1343 }
1317} 1344}
1318 1345
@@ -1425,7 +1452,7 @@ static void free_pagetable(struct protection_domain *domain)
1425 BUG_ON(domain->mode < PAGE_MODE_NONE || 1452 BUG_ON(domain->mode < PAGE_MODE_NONE ||
1426 domain->mode > PAGE_MODE_6_LEVEL); 1453 domain->mode > PAGE_MODE_6_LEVEL);
1427 1454
1428 free_sub_pt(root, domain->mode, freelist); 1455 freelist = free_sub_pt(root, domain->mode, freelist);
1429 1456
1430 free_page_list(freelist); 1457 free_page_list(freelist);
1431} 1458}
@@ -1435,10 +1462,11 @@ static void free_pagetable(struct protection_domain *domain)
1435 * another level increases the size of the address space by 9 bits to a size up 1462 * another level increases the size of the address space by 9 bits to a size up
1436 * to 64 bits. 1463 * to 64 bits.
1437 */ 1464 */
1438static void increase_address_space(struct protection_domain *domain, 1465static bool increase_address_space(struct protection_domain *domain,
1439 gfp_t gfp) 1466 gfp_t gfp)
1440{ 1467{
1441 unsigned long flags; 1468 unsigned long flags;
1469 bool ret = false;
1442 u64 *pte; 1470 u64 *pte;
1443 1471
1444 spin_lock_irqsave(&domain->lock, flags); 1472 spin_lock_irqsave(&domain->lock, flags);
@@ -1455,19 +1483,21 @@ static void increase_address_space(struct protection_domain *domain,
1455 iommu_virt_to_phys(domain->pt_root)); 1483 iommu_virt_to_phys(domain->pt_root));
1456 domain->pt_root = pte; 1484 domain->pt_root = pte;
1457 domain->mode += 1; 1485 domain->mode += 1;
1458 domain->updated = true; 1486
1487 ret = true;
1459 1488
1460out: 1489out:
1461 spin_unlock_irqrestore(&domain->lock, flags); 1490 spin_unlock_irqrestore(&domain->lock, flags);
1462 1491
1463 return; 1492 return ret;
1464} 1493}
1465 1494
1466static u64 *alloc_pte(struct protection_domain *domain, 1495static u64 *alloc_pte(struct protection_domain *domain,
1467 unsigned long address, 1496 unsigned long address,
1468 unsigned long page_size, 1497 unsigned long page_size,
1469 u64 **pte_page, 1498 u64 **pte_page,
1470 gfp_t gfp) 1499 gfp_t gfp,
1500 bool *updated)
1471{ 1501{
1472 int level, end_lvl; 1502 int level, end_lvl;
1473 u64 *pte, *page; 1503 u64 *pte, *page;
@@ -1475,7 +1505,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
1475 BUG_ON(!is_power_of_2(page_size)); 1505 BUG_ON(!is_power_of_2(page_size));
1476 1506
1477 while (address > PM_LEVEL_SIZE(domain->mode)) 1507 while (address > PM_LEVEL_SIZE(domain->mode))
1478 increase_address_space(domain, gfp); 1508 *updated = increase_address_space(domain, gfp) || *updated;
1479 1509
1480 level = domain->mode - 1; 1510 level = domain->mode - 1;
1481 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; 1511 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
@@ -1489,9 +1519,32 @@ static u64 *alloc_pte(struct protection_domain *domain,
1489 __pte = *pte; 1519 __pte = *pte;
1490 pte_level = PM_PTE_LEVEL(__pte); 1520 pte_level = PM_PTE_LEVEL(__pte);
1491 1521
1492 if (!IOMMU_PTE_PRESENT(__pte) || 1522 /*
1523 * If we replace a series of large PTEs, we need
1524 * to tear down all of them.
1525 */
1526 if (IOMMU_PTE_PRESENT(__pte) &&
1493 pte_level == PAGE_MODE_7_LEVEL) { 1527 pte_level == PAGE_MODE_7_LEVEL) {
1528 unsigned long count, i;
1529 u64 *lpte;
1530
1531 lpte = first_pte_l7(pte, NULL, &count);
1532
1533 /*
1534 * Unmap the replicated PTEs that still match the
1535 * original large mapping
1536 */
1537 for (i = 0; i < count; ++i)
1538 cmpxchg64(&lpte[i], __pte, 0ULL);
1539
1540 *updated = true;
1541 continue;
1542 }
1543
1544 if (!IOMMU_PTE_PRESENT(__pte) ||
1545 pte_level == PAGE_MODE_NONE) {
1494 page = (u64 *)get_zeroed_page(gfp); 1546 page = (u64 *)get_zeroed_page(gfp);
1547
1495 if (!page) 1548 if (!page)
1496 return NULL; 1549 return NULL;
1497 1550
@@ -1500,8 +1553,8 @@ static u64 *alloc_pte(struct protection_domain *domain,
1500 /* pte could have been changed somewhere. */ 1553 /* pte could have been changed somewhere. */
1501 if (cmpxchg64(pte, __pte, __npte) != __pte) 1554 if (cmpxchg64(pte, __pte, __npte) != __pte)
1502 free_page((unsigned long)page); 1555 free_page((unsigned long)page);
1503 else if (pte_level == PAGE_MODE_7_LEVEL) 1556 else if (IOMMU_PTE_PRESENT(__pte))
1504 domain->updated = true; 1557 *updated = true;
1505 1558
1506 continue; 1559 continue;
1507 } 1560 }
@@ -1566,17 +1619,12 @@ static u64 *fetch_pte(struct protection_domain *domain,
1566 *page_size = PTE_LEVEL_PAGE_SIZE(level); 1619 *page_size = PTE_LEVEL_PAGE_SIZE(level);
1567 } 1620 }
1568 1621
1569 if (PM_PTE_LEVEL(*pte) == 0x07) { 1622 /*
1570 unsigned long pte_mask; 1623 * If we have a series of large PTEs, make
1571 1624 * sure to return a pointer to the first one.
1572 /* 1625 */
1573 * If we have a series of large PTEs, make 1626 if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
1574 * sure to return a pointer to the first one. 1627 pte = first_pte_l7(pte, page_size, NULL);
1575 */
1576 *page_size = pte_mask = PTE_PAGE_SIZE(*pte);
1577 pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
1578 pte = (u64 *)(((unsigned long)pte) & pte_mask);
1579 }
1580 1628
1581 return pte; 1629 return pte;
1582} 1630}
@@ -1615,26 +1663,29 @@ static int iommu_map_page(struct protection_domain *dom,
1615 gfp_t gfp) 1663 gfp_t gfp)
1616{ 1664{
1617 struct page *freelist = NULL; 1665 struct page *freelist = NULL;
1666 bool updated = false;
1618 u64 __pte, *pte; 1667 u64 __pte, *pte;
1619 int i, count; 1668 int ret, i, count;
1620 1669
1621 BUG_ON(!IS_ALIGNED(bus_addr, page_size)); 1670 BUG_ON(!IS_ALIGNED(bus_addr, page_size));
1622 BUG_ON(!IS_ALIGNED(phys_addr, page_size)); 1671 BUG_ON(!IS_ALIGNED(phys_addr, page_size));
1623 1672
1673 ret = -EINVAL;
1624 if (!(prot & IOMMU_PROT_MASK)) 1674 if (!(prot & IOMMU_PROT_MASK))
1625 return -EINVAL; 1675 goto out;
1626 1676
1627 count = PAGE_SIZE_PTE_COUNT(page_size); 1677 count = PAGE_SIZE_PTE_COUNT(page_size);
1628 pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp); 1678 pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
1629 1679
1680 ret = -ENOMEM;
1630 if (!pte) 1681 if (!pte)
1631 return -ENOMEM; 1682 goto out;
1632 1683
1633 for (i = 0; i < count; ++i) 1684 for (i = 0; i < count; ++i)
1634 freelist = free_clear_pte(&pte[i], pte[i], freelist); 1685 freelist = free_clear_pte(&pte[i], pte[i], freelist);
1635 1686
1636 if (freelist != NULL) 1687 if (freelist != NULL)
1637 dom->updated = true; 1688 updated = true;
1638 1689
1639 if (count > 1) { 1690 if (count > 1) {
1640 __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size); 1691 __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
@@ -1650,12 +1701,21 @@ static int iommu_map_page(struct protection_domain *dom,
1650 for (i = 0; i < count; ++i) 1701 for (i = 0; i < count; ++i)
1651 pte[i] = __pte; 1702 pte[i] = __pte;
1652 1703
1653 update_domain(dom); 1704 ret = 0;
1705
1706out:
1707 if (updated) {
1708 unsigned long flags;
1709
1710 spin_lock_irqsave(&dom->lock, flags);
1711 update_domain(dom);
1712 spin_unlock_irqrestore(&dom->lock, flags);
1713 }
1654 1714
1655 /* Everything flushed out, free pages now */ 1715 /* Everything flushed out, free pages now */
1656 free_page_list(freelist); 1716 free_page_list(freelist);
1657 1717
1658 return 0; 1718 return ret;
1659} 1719}
1660 1720
1661static unsigned long iommu_unmap_page(struct protection_domain *dom, 1721static unsigned long iommu_unmap_page(struct protection_domain *dom,
@@ -1806,8 +1866,12 @@ static void free_gcr3_table(struct protection_domain *domain)
1806 1866
1807static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom) 1867static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
1808{ 1868{
1869 unsigned long flags;
1870
1871 spin_lock_irqsave(&dom->domain.lock, flags);
1809 domain_flush_tlb(&dom->domain); 1872 domain_flush_tlb(&dom->domain);
1810 domain_flush_complete(&dom->domain); 1873 domain_flush_complete(&dom->domain);
1874 spin_unlock_irqrestore(&dom->domain.lock, flags);
1811} 1875}
1812 1876
1813static void iova_domain_flush_tlb(struct iova_domain *iovad) 1877static void iova_domain_flush_tlb(struct iova_domain *iovad)
@@ -2022,36 +2086,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
2022 domain->dev_cnt -= 1; 2086 domain->dev_cnt -= 1;
2023} 2087}
2024 2088
2025/*
2026 * If a device is not yet associated with a domain, this function makes the
2027 * device visible in the domain
2028 */
2029static int __attach_device(struct iommu_dev_data *dev_data,
2030 struct protection_domain *domain)
2031{
2032 int ret;
2033
2034 /* lock domain */
2035 spin_lock(&domain->lock);
2036
2037 ret = -EBUSY;
2038 if (dev_data->domain != NULL)
2039 goto out_unlock;
2040
2041 /* Attach alias group root */
2042 do_attach(dev_data, domain);
2043
2044 ret = 0;
2045
2046out_unlock:
2047
2048 /* ready */
2049 spin_unlock(&domain->lock);
2050
2051 return ret;
2052}
2053
2054
2055static void pdev_iommuv2_disable(struct pci_dev *pdev) 2089static void pdev_iommuv2_disable(struct pci_dev *pdev)
2056{ 2090{
2057 pci_disable_ats(pdev); 2091 pci_disable_ats(pdev);
@@ -2133,19 +2167,28 @@ static int attach_device(struct device *dev,
2133 unsigned long flags; 2167 unsigned long flags;
2134 int ret; 2168 int ret;
2135 2169
2170 spin_lock_irqsave(&domain->lock, flags);
2171
2136 dev_data = get_dev_data(dev); 2172 dev_data = get_dev_data(dev);
2137 2173
2174 spin_lock(&dev_data->lock);
2175
2176 ret = -EBUSY;
2177 if (dev_data->domain != NULL)
2178 goto out;
2179
2138 if (!dev_is_pci(dev)) 2180 if (!dev_is_pci(dev))
2139 goto skip_ats_check; 2181 goto skip_ats_check;
2140 2182
2141 pdev = to_pci_dev(dev); 2183 pdev = to_pci_dev(dev);
2142 if (domain->flags & PD_IOMMUV2_MASK) { 2184 if (domain->flags & PD_IOMMUV2_MASK) {
2185 ret = -EINVAL;
2143 if (!dev_data->passthrough) 2186 if (!dev_data->passthrough)
2144 return -EINVAL; 2187 goto out;
2145 2188
2146 if (dev_data->iommu_v2) { 2189 if (dev_data->iommu_v2) {
2147 if (pdev_iommuv2_enable(pdev) != 0) 2190 if (pdev_iommuv2_enable(pdev) != 0)
2148 return -EINVAL; 2191 goto out;
2149 2192
2150 dev_data->ats.enabled = true; 2193 dev_data->ats.enabled = true;
2151 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 2194 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
@@ -2158,9 +2201,9 @@ static int attach_device(struct device *dev,
2158 } 2201 }
2159 2202
2160skip_ats_check: 2203skip_ats_check:
2161 spin_lock_irqsave(&amd_iommu_devtable_lock, flags); 2204 ret = 0;
2162 ret = __attach_device(dev_data, domain); 2205
2163 spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2206 do_attach(dev_data, domain);
2164 2207
2165 /* 2208 /*
2166 * We might boot into a crash-kernel here. The crashed kernel 2209 * We might boot into a crash-kernel here. The crashed kernel
@@ -2169,23 +2212,14 @@ skip_ats_check:
2169 */ 2212 */
2170 domain_flush_tlb_pde(domain); 2213 domain_flush_tlb_pde(domain);
2171 2214
2172 return ret; 2215 domain_flush_complete(domain);
2173}
2174
2175/*
2176 * Removes a device from a protection domain (unlocked)
2177 */
2178static void __detach_device(struct iommu_dev_data *dev_data)
2179{
2180 struct protection_domain *domain;
2181
2182 domain = dev_data->domain;
2183 2216
2184 spin_lock(&domain->lock); 2217out:
2218 spin_unlock(&dev_data->lock);
2185 2219
2186 do_detach(dev_data); 2220 spin_unlock_irqrestore(&domain->lock, flags);
2187 2221
2188 spin_unlock(&domain->lock); 2222 return ret;
2189} 2223}
2190 2224
2191/* 2225/*
@@ -2200,6 +2234,10 @@ static void detach_device(struct device *dev)
2200 dev_data = get_dev_data(dev); 2234 dev_data = get_dev_data(dev);
2201 domain = dev_data->domain; 2235 domain = dev_data->domain;
2202 2236
2237 spin_lock_irqsave(&domain->lock, flags);
2238
2239 spin_lock(&dev_data->lock);
2240
2203 /* 2241 /*
2204 * First check if the device is still attached. It might already 2242 * First check if the device is still attached. It might already
2205 * be detached from its domain because the generic 2243 * be detached from its domain because the generic
@@ -2207,15 +2245,12 @@ static void detach_device(struct device *dev)
2207 * our alias handling. 2245 * our alias handling.
2208 */ 2246 */
2209 if (WARN_ON(!dev_data->domain)) 2247 if (WARN_ON(!dev_data->domain))
2210 return; 2248 goto out;
2211 2249
2212 /* lock device table */ 2250 do_detach(dev_data);
2213 spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
2214 __detach_device(dev_data);
2215 spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2216 2251
2217 if (!dev_is_pci(dev)) 2252 if (!dev_is_pci(dev))
2218 return; 2253 goto out;
2219 2254
2220 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) 2255 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2221 pdev_iommuv2_disable(to_pci_dev(dev)); 2256 pdev_iommuv2_disable(to_pci_dev(dev));
@@ -2223,6 +2258,11 @@ static void detach_device(struct device *dev)
2223 pci_disable_ats(to_pci_dev(dev)); 2258 pci_disable_ats(to_pci_dev(dev));
2224 2259
2225 dev_data->ats.enabled = false; 2260 dev_data->ats.enabled = false;
2261
2262out:
2263 spin_unlock(&dev_data->lock);
2264
2265 spin_unlock_irqrestore(&domain->lock, flags);
2226} 2266}
2227 2267
2228static int amd_iommu_add_device(struct device *dev) 2268static int amd_iommu_add_device(struct device *dev)
@@ -2354,15 +2394,10 @@ static void update_device_table(struct protection_domain *domain)
2354 2394
2355static void update_domain(struct protection_domain *domain) 2395static void update_domain(struct protection_domain *domain)
2356{ 2396{
2357 if (!domain->updated)
2358 return;
2359
2360 update_device_table(domain); 2397 update_device_table(domain);
2361 2398
2362 domain_flush_devices(domain); 2399 domain_flush_devices(domain);
2363 domain_flush_tlb_pde(domain); 2400 domain_flush_tlb_pde(domain);
2364
2365 domain->updated = false;
2366} 2401}
2367 2402
2368static int dir2prot(enum dma_data_direction direction) 2403static int dir2prot(enum dma_data_direction direction)
@@ -2392,6 +2427,7 @@ static dma_addr_t __map_single(struct device *dev,
2392{ 2427{
2393 dma_addr_t offset = paddr & ~PAGE_MASK; 2428 dma_addr_t offset = paddr & ~PAGE_MASK;
2394 dma_addr_t address, start, ret; 2429 dma_addr_t address, start, ret;
2430 unsigned long flags;
2395 unsigned int pages; 2431 unsigned int pages;
2396 int prot = 0; 2432 int prot = 0;
2397 int i; 2433 int i;
@@ -2429,8 +2465,10 @@ out_unmap:
2429 iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); 2465 iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
2430 } 2466 }
2431 2467
2468 spin_lock_irqsave(&dma_dom->domain.lock, flags);
2432 domain_flush_tlb(&dma_dom->domain); 2469 domain_flush_tlb(&dma_dom->domain);
2433 domain_flush_complete(&dma_dom->domain); 2470 domain_flush_complete(&dma_dom->domain);
2471 spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
2434 2472
2435 dma_ops_free_iova(dma_dom, address, pages); 2473 dma_ops_free_iova(dma_dom, address, pages);
2436 2474
@@ -2459,8 +2497,12 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
2459 } 2497 }
2460 2498
2461 if (amd_iommu_unmap_flush) { 2499 if (amd_iommu_unmap_flush) {
2500 unsigned long flags;
2501
2502 spin_lock_irqsave(&dma_dom->domain.lock, flags);
2462 domain_flush_tlb(&dma_dom->domain); 2503 domain_flush_tlb(&dma_dom->domain);
2463 domain_flush_complete(&dma_dom->domain); 2504 domain_flush_complete(&dma_dom->domain);
2505 spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
2464 dma_ops_free_iova(dma_dom, dma_addr, pages); 2506 dma_ops_free_iova(dma_dom, dma_addr, pages);
2465 } else { 2507 } else {
2466 pages = __roundup_pow_of_two(pages); 2508 pages = __roundup_pow_of_two(pages);
@@ -2866,16 +2908,16 @@ static void cleanup_domain(struct protection_domain *domain)
2866 struct iommu_dev_data *entry; 2908 struct iommu_dev_data *entry;
2867 unsigned long flags; 2909 unsigned long flags;
2868 2910
2869 spin_lock_irqsave(&amd_iommu_devtable_lock, flags); 2911 spin_lock_irqsave(&domain->lock, flags);
2870 2912
2871 while (!list_empty(&domain->dev_list)) { 2913 while (!list_empty(&domain->dev_list)) {
2872 entry = list_first_entry(&domain->dev_list, 2914 entry = list_first_entry(&domain->dev_list,
2873 struct iommu_dev_data, list); 2915 struct iommu_dev_data, list);
2874 BUG_ON(!entry->domain); 2916 BUG_ON(!entry->domain);
2875 __detach_device(entry); 2917 do_detach(entry);
2876 } 2918 }
2877 2919
2878 spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2920 spin_unlock_irqrestore(&domain->lock, flags);
2879} 2921}
2880 2922
2881static void protection_domain_free(struct protection_domain *domain) 2923static void protection_domain_free(struct protection_domain *domain)
@@ -3226,9 +3268,12 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
3226static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain) 3268static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
3227{ 3269{
3228 struct protection_domain *dom = to_pdomain(domain); 3270 struct protection_domain *dom = to_pdomain(domain);
3271 unsigned long flags;
3229 3272
3273 spin_lock_irqsave(&dom->lock, flags);
3230 domain_flush_tlb_pde(dom); 3274 domain_flush_tlb_pde(dom);
3231 domain_flush_complete(dom); 3275 domain_flush_complete(dom);
3276 spin_unlock_irqrestore(&dom->lock, flags);
3232} 3277}
3233 3278
3234static void amd_iommu_iotlb_sync(struct iommu_domain *domain, 3279static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
@@ -3290,7 +3335,6 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3290 3335
3291 /* Update data structure */ 3336 /* Update data structure */
3292 domain->mode = PAGE_MODE_NONE; 3337 domain->mode = PAGE_MODE_NONE;
3293 domain->updated = true;
3294 3338
3295 /* Make changes visible to IOMMUs */ 3339 /* Make changes visible to IOMMUs */
3296 update_domain(domain); 3340 update_domain(domain);
@@ -3336,7 +3380,6 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3336 3380
3337 domain->glx = levels; 3381 domain->glx = levels;
3338 domain->flags |= PD_IOMMUV2_MASK; 3382 domain->flags |= PD_IOMMUV2_MASK;
3339 domain->updated = true;
3340 3383
3341 update_domain(domain); 3384 update_domain(domain);
3342 3385
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 9ac229e92b07..c9c1612d52e0 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -475,7 +475,6 @@ struct protection_domain {
475 int glx; /* Number of levels for GCR3 table */ 475 int glx; /* Number of levels for GCR3 table */
476 u64 *gcr3_tbl; /* Guest CR3 table */ 476 u64 *gcr3_tbl; /* Guest CR3 table */
477 unsigned long flags; /* flags to find out type of domain */ 477 unsigned long flags; /* flags to find out type of domain */
478 bool updated; /* complete domain flush required */
479 unsigned dev_cnt; /* devices assigned to this domain */ 478 unsigned dev_cnt; /* devices assigned to this domain */
480 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ 479 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
481}; 480};
@@ -634,6 +633,9 @@ struct devid_map {
634 * This struct contains device specific data for the IOMMU 633 * This struct contains device specific data for the IOMMU
635 */ 634 */
636struct iommu_dev_data { 635struct iommu_dev_data {
636 /*Protect against attach/detach races */
637 spinlock_t lock;
638
637 struct list_head list; /* For domain->dev_list */ 639 struct list_head list; /* For domain->dev_list */
638 struct llist_node dev_data_list; /* For global dev_data_list */ 640 struct llist_node dev_data_list; /* For global dev_data_list */
639 struct protection_domain *domain; /* Domain the device is bound to */ 641 struct protection_domain *domain; /* Domain the device is bound to */
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 0e019cc5da42..dfac6afa82ca 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -36,7 +36,6 @@
36#include <linux/regulator/db8500-prcmu.h> 36#include <linux/regulator/db8500-prcmu.h>
37#include <linux/regulator/machine.h> 37#include <linux/regulator/machine.h>
38#include <linux/platform_data/ux500_wdt.h> 38#include <linux/platform_data/ux500_wdt.h>
39#include <linux/platform_data/db8500_thermal.h>
40#include "dbx500-prcmu-regs.h" 39#include "dbx500-prcmu-regs.h"
41 40
42/* Index of different voltages to be used when accessing AVSData */ 41/* Index of different voltages to be used when accessing AVSData */
@@ -3014,53 +3013,6 @@ static struct ux500_wdt_data db8500_wdt_pdata = {
3014 .timeout = 600, /* 10 minutes */ 3013 .timeout = 600, /* 10 minutes */
3015 .has_28_bits_resolution = true, 3014 .has_28_bits_resolution = true,
3016}; 3015};
3017/*
3018 * Thermal Sensor
3019 */
3020
3021static struct resource db8500_thsens_resources[] = {
3022 {
3023 .name = "IRQ_HOTMON_LOW",
3024 .start = IRQ_PRCMU_HOTMON_LOW,
3025 .end = IRQ_PRCMU_HOTMON_LOW,
3026 .flags = IORESOURCE_IRQ,
3027 },
3028 {
3029 .name = "IRQ_HOTMON_HIGH",
3030 .start = IRQ_PRCMU_HOTMON_HIGH,
3031 .end = IRQ_PRCMU_HOTMON_HIGH,
3032 .flags = IORESOURCE_IRQ,
3033 },
3034};
3035
3036static struct db8500_thsens_platform_data db8500_thsens_data = {
3037 .trip_points[0] = {
3038 .temp = 70000,
3039 .type = THERMAL_TRIP_ACTIVE,
3040 .cdev_name = {
3041 [0] = "thermal-cpufreq-0",
3042 },
3043 },
3044 .trip_points[1] = {
3045 .temp = 75000,
3046 .type = THERMAL_TRIP_ACTIVE,
3047 .cdev_name = {
3048 [0] = "thermal-cpufreq-0",
3049 },
3050 },
3051 .trip_points[2] = {
3052 .temp = 80000,
3053 .type = THERMAL_TRIP_ACTIVE,
3054 .cdev_name = {
3055 [0] = "thermal-cpufreq-0",
3056 },
3057 },
3058 .trip_points[3] = {
3059 .temp = 85000,
3060 .type = THERMAL_TRIP_CRITICAL,
3061 },
3062 .num_trips = 4,
3063};
3064 3016
3065static const struct mfd_cell common_prcmu_devs[] = { 3017static const struct mfd_cell common_prcmu_devs[] = {
3066 { 3018 {
@@ -3084,10 +3036,7 @@ static const struct mfd_cell db8500_prcmu_devs[] = {
3084 }, 3036 },
3085 { 3037 {
3086 .name = "db8500-thermal", 3038 .name = "db8500-thermal",
3087 .num_resources = ARRAY_SIZE(db8500_thsens_resources), 3039 .of_compatible = "stericsson,db8500-thermal",
3088 .resources = db8500_thsens_resources,
3089 .platform_data = &db8500_thsens_data,
3090 .pdata_size = sizeof(db8500_thsens_data),
3091 }, 3040 },
3092}; 3041};
3093 3042
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 3a52f5703286..49ea02c467bf 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -94,6 +94,7 @@ config MMC_SDHCI_PCI
94 depends on MMC_SDHCI && PCI 94 depends on MMC_SDHCI && PCI
95 select MMC_CQHCI 95 select MMC_CQHCI
96 select IOSF_MBI if X86 96 select IOSF_MBI if X86
97 select MMC_SDHCI_IO_ACCESSORS
97 help 98 help
98 This selects the PCI Secure Digital Host Controller Interface. 99 This selects the PCI Secure Digital Host Controller Interface.
99 Most controllers found today are PCI devices. 100 Most controllers found today are PCI devices.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 390ee162fe71..11c4598e91d9 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
13obj-$(CONFIG_MMC_SDHCI) += sdhci.o 13obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o \ 15sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o \
16 sdhci-pci-dwc-mshc.o 16 sdhci-pci-dwc-mshc.o sdhci-pci-gli.o
17obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o 17obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
18obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o 18obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
19obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o 19obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 3271c2d76629..1d1953dfc54b 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -495,7 +495,12 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
495 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); 495 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
496 496
497 value = sdhci_readl(host, ESDHC_DMA_SYSCTL); 497 value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
498 value |= ESDHC_DMA_SNOOP; 498
499 if (of_dma_is_coherent(dev->of_node))
500 value |= ESDHC_DMA_SNOOP;
501 else
502 value &= ~ESDHC_DMA_SNOOP;
503
499 sdhci_writel(host, value, ESDHC_DMA_SYSCTL); 504 sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
500 return 0; 505 return 0;
501} 506}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index e1ca185d7328..eaffa85bc728 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1685,6 +1685,8 @@ static const struct pci_device_id pci_ids[] = {
1685 SDHCI_PCI_DEVICE(O2, SEABIRD1, o2), 1685 SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
1686 SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan), 1686 SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
1687 SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps), 1687 SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
1688 SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
1689 SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
1688 SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd), 1690 SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
1689 /* Generic SD host controller */ 1691 /* Generic SD host controller */
1690 {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)}, 1692 {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
new file mode 100644
index 000000000000..5eea8d70a85d
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -0,0 +1,352 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2019 Genesys Logic, Inc.
4 *
5 * Authors: Ben Chuang <ben.chuang@genesyslogic.com.tw>
6 *
7 * Version: v0.9.0 (2019-08-08)
8 */
9
10#include <linux/bitfield.h>
11#include <linux/bits.h>
12#include <linux/pci.h>
13#include <linux/mmc/mmc.h>
14#include <linux/delay.h>
15#include "sdhci.h"
16#include "sdhci-pci.h"
17
18/* Genesys Logic extra registers */
19#define SDHCI_GLI_9750_WT 0x800
20#define SDHCI_GLI_9750_WT_EN BIT(0)
21#define GLI_9750_WT_EN_ON 0x1
22#define GLI_9750_WT_EN_OFF 0x0
23
24#define SDHCI_GLI_9750_DRIVING 0x860
25#define SDHCI_GLI_9750_DRIVING_1 GENMASK(11, 0)
26#define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26)
27#define GLI_9750_DRIVING_1_VALUE 0xFFF
28#define GLI_9750_DRIVING_2_VALUE 0x3
29
30#define SDHCI_GLI_9750_PLL 0x864
31#define SDHCI_GLI_9750_PLL_TX2_INV BIT(23)
32#define SDHCI_GLI_9750_PLL_TX2_DLY GENMASK(22, 20)
33#define GLI_9750_PLL_TX2_INV_VALUE 0x1
34#define GLI_9750_PLL_TX2_DLY_VALUE 0x0
35
36#define SDHCI_GLI_9750_SW_CTRL 0x874
37#define SDHCI_GLI_9750_SW_CTRL_4 GENMASK(7, 6)
38#define GLI_9750_SW_CTRL_4_VALUE 0x3
39
40#define SDHCI_GLI_9750_MISC 0x878
41#define SDHCI_GLI_9750_MISC_TX1_INV BIT(2)
42#define SDHCI_GLI_9750_MISC_RX_INV BIT(3)
43#define SDHCI_GLI_9750_MISC_TX1_DLY GENMASK(6, 4)
44#define GLI_9750_MISC_TX1_INV_VALUE 0x0
45#define GLI_9750_MISC_RX_INV_ON 0x1
46#define GLI_9750_MISC_RX_INV_OFF 0x0
47#define GLI_9750_MISC_RX_INV_VALUE GLI_9750_MISC_RX_INV_OFF
48#define GLI_9750_MISC_TX1_DLY_VALUE 0x5
49
50#define SDHCI_GLI_9750_TUNING_CONTROL 0x540
51#define SDHCI_GLI_9750_TUNING_CONTROL_EN BIT(4)
52#define GLI_9750_TUNING_CONTROL_EN_ON 0x1
53#define GLI_9750_TUNING_CONTROL_EN_OFF 0x0
54#define SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_1 BIT(16)
55#define SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_2 GENMASK(20, 19)
56#define GLI_9750_TUNING_CONTROL_GLITCH_1_VALUE 0x1
57#define GLI_9750_TUNING_CONTROL_GLITCH_2_VALUE 0x2
58
59#define SDHCI_GLI_9750_TUNING_PARAMETERS 0x544
60#define SDHCI_GLI_9750_TUNING_PARAMETERS_RX_DLY GENMASK(2, 0)
61#define GLI_9750_TUNING_PARAMETERS_RX_DLY_VALUE 0x1
62
63#define GLI_MAX_TUNING_LOOP 40
64
65/* Genesys Logic chipset */
66static inline void gl9750_wt_on(struct sdhci_host *host)
67{
68 u32 wt_value;
69 u32 wt_enable;
70
71 wt_value = sdhci_readl(host, SDHCI_GLI_9750_WT);
72 wt_enable = FIELD_GET(SDHCI_GLI_9750_WT_EN, wt_value);
73
74 if (wt_enable == GLI_9750_WT_EN_ON)
75 return;
76
77 wt_value &= ~SDHCI_GLI_9750_WT_EN;
78 wt_value |= FIELD_PREP(SDHCI_GLI_9750_WT_EN, GLI_9750_WT_EN_ON);
79
80 sdhci_writel(host, wt_value, SDHCI_GLI_9750_WT);
81}
82
83static inline void gl9750_wt_off(struct sdhci_host *host)
84{
85 u32 wt_value;
86 u32 wt_enable;
87
88 wt_value = sdhci_readl(host, SDHCI_GLI_9750_WT);
89 wt_enable = FIELD_GET(SDHCI_GLI_9750_WT_EN, wt_value);
90
91 if (wt_enable == GLI_9750_WT_EN_OFF)
92 return;
93
94 wt_value &= ~SDHCI_GLI_9750_WT_EN;
95 wt_value |= FIELD_PREP(SDHCI_GLI_9750_WT_EN, GLI_9750_WT_EN_OFF);
96
97 sdhci_writel(host, wt_value, SDHCI_GLI_9750_WT);
98}
99
100static void gli_set_9750(struct sdhci_host *host)
101{
102 u32 driving_value;
103 u32 pll_value;
104 u32 sw_ctrl_value;
105 u32 misc_value;
106 u32 parameter_value;
107 u32 control_value;
108 u16 ctrl2;
109
110 gl9750_wt_on(host);
111
112 driving_value = sdhci_readl(host, SDHCI_GLI_9750_DRIVING);
113 pll_value = sdhci_readl(host, SDHCI_GLI_9750_PLL);
114 sw_ctrl_value = sdhci_readl(host, SDHCI_GLI_9750_SW_CTRL);
115 misc_value = sdhci_readl(host, SDHCI_GLI_9750_MISC);
116 parameter_value = sdhci_readl(host, SDHCI_GLI_9750_TUNING_PARAMETERS);
117 control_value = sdhci_readl(host, SDHCI_GLI_9750_TUNING_CONTROL);
118
119 driving_value &= ~(SDHCI_GLI_9750_DRIVING_1);
120 driving_value &= ~(SDHCI_GLI_9750_DRIVING_2);
121 driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_1,
122 GLI_9750_DRIVING_1_VALUE);
123 driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2,
124 GLI_9750_DRIVING_2_VALUE);
125 sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING);
126
127 sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4;
128 sw_ctrl_value |= FIELD_PREP(SDHCI_GLI_9750_SW_CTRL_4,
129 GLI_9750_SW_CTRL_4_VALUE);
130 sdhci_writel(host, sw_ctrl_value, SDHCI_GLI_9750_SW_CTRL);
131
132 /* reset the tuning flow after reinit and before starting tuning */
133 pll_value &= ~SDHCI_GLI_9750_PLL_TX2_INV;
134 pll_value &= ~SDHCI_GLI_9750_PLL_TX2_DLY;
135 pll_value |= FIELD_PREP(SDHCI_GLI_9750_PLL_TX2_INV,
136 GLI_9750_PLL_TX2_INV_VALUE);
137 pll_value |= FIELD_PREP(SDHCI_GLI_9750_PLL_TX2_DLY,
138 GLI_9750_PLL_TX2_DLY_VALUE);
139
140 misc_value &= ~SDHCI_GLI_9750_MISC_TX1_INV;
141 misc_value &= ~SDHCI_GLI_9750_MISC_RX_INV;
142 misc_value &= ~SDHCI_GLI_9750_MISC_TX1_DLY;
143 misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_TX1_INV,
144 GLI_9750_MISC_TX1_INV_VALUE);
145 misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_RX_INV,
146 GLI_9750_MISC_RX_INV_VALUE);
147 misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_TX1_DLY,
148 GLI_9750_MISC_TX1_DLY_VALUE);
149
150 parameter_value &= ~SDHCI_GLI_9750_TUNING_PARAMETERS_RX_DLY;
151 parameter_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_PARAMETERS_RX_DLY,
152 GLI_9750_TUNING_PARAMETERS_RX_DLY_VALUE);
153
154 control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_1;
155 control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_2;
156 control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_1,
157 GLI_9750_TUNING_CONTROL_GLITCH_1_VALUE);
158 control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_GLITCH_2,
159 GLI_9750_TUNING_CONTROL_GLITCH_2_VALUE);
160
161 sdhci_writel(host, pll_value, SDHCI_GLI_9750_PLL);
162 sdhci_writel(host, misc_value, SDHCI_GLI_9750_MISC);
163
164 /* disable tuned clk */
165 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
166 ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
167 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
168
169 /* enable tuning parameters control */
170 control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_EN;
171 control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_EN,
172 GLI_9750_TUNING_CONTROL_EN_ON);
173 sdhci_writel(host, control_value, SDHCI_GLI_9750_TUNING_CONTROL);
174
175 /* write tuning parameters */
176 sdhci_writel(host, parameter_value, SDHCI_GLI_9750_TUNING_PARAMETERS);
177
178 /* disable tuning parameters control */
179 control_value &= ~SDHCI_GLI_9750_TUNING_CONTROL_EN;
180 control_value |= FIELD_PREP(SDHCI_GLI_9750_TUNING_CONTROL_EN,
181 GLI_9750_TUNING_CONTROL_EN_OFF);
182 sdhci_writel(host, control_value, SDHCI_GLI_9750_TUNING_CONTROL);
183
184 /* clear tuned clk */
185 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
186 ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
187 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
188
189 gl9750_wt_off(host);
190}
191
192static void gli_set_9750_rx_inv(struct sdhci_host *host, bool b)
193{
194 u32 misc_value;
195
196 gl9750_wt_on(host);
197
198 misc_value = sdhci_readl(host, SDHCI_GLI_9750_MISC);
199 misc_value &= ~SDHCI_GLI_9750_MISC_RX_INV;
200 if (b) {
201 misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_RX_INV,
202 GLI_9750_MISC_RX_INV_ON);
203 } else {
204 misc_value |= FIELD_PREP(SDHCI_GLI_9750_MISC_RX_INV,
205 GLI_9750_MISC_RX_INV_OFF);
206 }
207 sdhci_writel(host, misc_value, SDHCI_GLI_9750_MISC);
208
209 gl9750_wt_off(host);
210}
211
212static int __sdhci_execute_tuning_9750(struct sdhci_host *host, u32 opcode)
213{
214 int i;
215 int rx_inv;
216
217 for (rx_inv = 0; rx_inv < 2; rx_inv++) {
218 gli_set_9750_rx_inv(host, !!rx_inv);
219 sdhci_start_tuning(host);
220
221 for (i = 0; i < GLI_MAX_TUNING_LOOP; i++) {
222 u16 ctrl;
223
224 sdhci_send_tuning(host, opcode);
225
226 if (!host->tuning_done) {
227 sdhci_abort_tuning(host, opcode);
228 break;
229 }
230
231 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
232 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
233 if (ctrl & SDHCI_CTRL_TUNED_CLK)
234 return 0; /* Success! */
235 break;
236 }
237 }
238 }
239 if (!host->tuning_done) {
240 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
241 mmc_hostname(host->mmc));
242 return -ETIMEDOUT;
243 }
244
245 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
246 mmc_hostname(host->mmc));
247 sdhci_reset_tuning(host);
248
249 return -EAGAIN;
250}
251
252static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode)
253{
254 host->mmc->retune_period = 0;
255 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
256 host->mmc->retune_period = host->tuning_count;
257
258 gli_set_9750(host);
259 host->tuning_err = __sdhci_execute_tuning_9750(host, opcode);
260 sdhci_end_tuning(host);
261
262 return 0;
263}
264
265static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot)
266{
267 struct sdhci_host *host = slot->host;
268
269 slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
270 sdhci_enable_v4_mode(host);
271
272 return 0;
273}
274
275static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot)
276{
277 struct sdhci_host *host = slot->host;
278
279 slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
280 sdhci_enable_v4_mode(host);
281
282 return 0;
283}
284
285static void sdhci_gli_voltage_switch(struct sdhci_host *host)
286{
287 /*
288 * According to Section 3.6.1 signal voltage switch procedure in
289 * SD Host Controller Simplified Spec. 4.20, steps 6~8 are as
290 * follows:
291 * (6) Set 1.8V Signal Enable in the Host Control 2 register.
292 * (7) Wait 5ms. 1.8V voltage regulator shall be stable within this
293 * period.
294 * (8) If 1.8V Signal Enable is cleared by Host Controller, go to
295 * step (12).
296 *
297 * Wait 5ms after set 1.8V signal enable in Host Control 2 register
298 * to ensure 1.8V signal enable bit is set by GL9750/GL9755.
299 */
300 usleep_range(5000, 5500);
301}
302
303static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
304{
305 sdhci_reset(host, mask);
306 gli_set_9750(host);
307}
308
309static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
310{
311 u32 value;
312
313 value = readl(host->ioaddr + reg);
314 if (unlikely(reg == SDHCI_MAX_CURRENT && !(value & 0xff)))
315 value |= 0xc8;
316
317 return value;
318}
319
320static const struct sdhci_ops sdhci_gl9755_ops = {
321 .set_clock = sdhci_set_clock,
322 .enable_dma = sdhci_pci_enable_dma,
323 .set_bus_width = sdhci_set_bus_width,
324 .reset = sdhci_reset,
325 .set_uhs_signaling = sdhci_set_uhs_signaling,
326 .voltage_switch = sdhci_gli_voltage_switch,
327};
328
329const struct sdhci_pci_fixes sdhci_gl9755 = {
330 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
331 .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
332 .probe_slot = gli_probe_slot_gl9755,
333 .ops = &sdhci_gl9755_ops,
334};
335
336static const struct sdhci_ops sdhci_gl9750_ops = {
337 .read_l = sdhci_gl9750_readl,
338 .set_clock = sdhci_set_clock,
339 .enable_dma = sdhci_pci_enable_dma,
340 .set_bus_width = sdhci_set_bus_width,
341 .reset = sdhci_gl9750_reset,
342 .set_uhs_signaling = sdhci_set_uhs_signaling,
343 .voltage_switch = sdhci_gli_voltage_switch,
344 .platform_execute_tuning = gl9750_execute_tuning,
345};
346
347const struct sdhci_pci_fixes sdhci_gl9750 = {
348 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
349 .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
350 .probe_slot = gli_probe_slot_gl9750,
351 .ops = &sdhci_gl9750_ops,
352};
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 1abc9d47a4c0..558202fe64c6 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -68,6 +68,9 @@
68 68
69#define PCI_DEVICE_ID_SYNOPSYS_DWC_MSHC 0xc202 69#define PCI_DEVICE_ID_SYNOPSYS_DWC_MSHC 0xc202
70 70
71#define PCI_DEVICE_ID_GLI_9755 0x9755
72#define PCI_DEVICE_ID_GLI_9750 0x9750
73
71/* 74/*
72 * PCI device class and mask 75 * PCI device class and mask
73 */ 76 */
@@ -188,5 +191,7 @@ int sdhci_pci_enable_dma(struct sdhci_host *host);
188extern const struct sdhci_pci_fixes sdhci_arasan; 191extern const struct sdhci_pci_fixes sdhci_arasan;
189extern const struct sdhci_pci_fixes sdhci_snps; 192extern const struct sdhci_pci_fixes sdhci_snps;
190extern const struct sdhci_pci_fixes sdhci_o2; 193extern const struct sdhci_pci_fixes sdhci_o2;
194extern const struct sdhci_pci_fixes sdhci_gl9750;
195extern const struct sdhci_pci_fixes sdhci_gl9755;
191 196
192#endif /* __SDHCI_PCI_H */ 197#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 02d8f524bb9e..7bc950520fd9 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <linux/delay.h> 6#include <linux/delay.h>
7#include <linux/dma-mapping.h>
7#include <linux/err.h> 8#include <linux/err.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/init.h> 10#include <linux/init.h>
@@ -104,6 +105,7 @@
104 105
105struct sdhci_tegra_soc_data { 106struct sdhci_tegra_soc_data {
106 const struct sdhci_pltfm_data *pdata; 107 const struct sdhci_pltfm_data *pdata;
108 u64 dma_mask;
107 u32 nvquirks; 109 u32 nvquirks;
108 u8 min_tap_delay; 110 u8 min_tap_delay;
109 u8 max_tap_delay; 111 u8 max_tap_delay;
@@ -1233,11 +1235,25 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1233 .update_dcmd_desc = sdhci_tegra_update_dcmd_desc, 1235 .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1234}; 1236};
1235 1237
1238static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1239{
1240 struct sdhci_pltfm_host *platform = sdhci_priv(host);
1241 struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1242 const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1243 struct device *dev = mmc_dev(host->mmc);
1244
1245 if (soc->dma_mask)
1246 return dma_set_mask_and_coherent(dev, soc->dma_mask);
1247
1248 return 0;
1249}
1250
1236static const struct sdhci_ops tegra_sdhci_ops = { 1251static const struct sdhci_ops tegra_sdhci_ops = {
1237 .get_ro = tegra_sdhci_get_ro, 1252 .get_ro = tegra_sdhci_get_ro,
1238 .read_w = tegra_sdhci_readw, 1253 .read_w = tegra_sdhci_readw,
1239 .write_l = tegra_sdhci_writel, 1254 .write_l = tegra_sdhci_writel,
1240 .set_clock = tegra_sdhci_set_clock, 1255 .set_clock = tegra_sdhci_set_clock,
1256 .set_dma_mask = tegra_sdhci_set_dma_mask,
1241 .set_bus_width = sdhci_set_bus_width, 1257 .set_bus_width = sdhci_set_bus_width,
1242 .reset = tegra_sdhci_reset, 1258 .reset = tegra_sdhci_reset,
1243 .platform_execute_tuning = tegra_sdhci_execute_tuning, 1259 .platform_execute_tuning = tegra_sdhci_execute_tuning,
@@ -1257,6 +1273,7 @@ static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1257 1273
1258static const struct sdhci_tegra_soc_data soc_data_tegra20 = { 1274static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1259 .pdata = &sdhci_tegra20_pdata, 1275 .pdata = &sdhci_tegra20_pdata,
1276 .dma_mask = DMA_BIT_MASK(32),
1260 .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | 1277 .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1261 NVQUIRK_ENABLE_BLOCK_GAP_DET, 1278 NVQUIRK_ENABLE_BLOCK_GAP_DET,
1262}; 1279};
@@ -1283,6 +1300,7 @@ static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1283 1300
1284static const struct sdhci_tegra_soc_data soc_data_tegra30 = { 1301static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1285 .pdata = &sdhci_tegra30_pdata, 1302 .pdata = &sdhci_tegra30_pdata,
1303 .dma_mask = DMA_BIT_MASK(32),
1286 .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 | 1304 .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1287 NVQUIRK_ENABLE_SDR50 | 1305 NVQUIRK_ENABLE_SDR50 |
1288 NVQUIRK_ENABLE_SDR104 | 1306 NVQUIRK_ENABLE_SDR104 |
@@ -1295,6 +1313,7 @@ static const struct sdhci_ops tegra114_sdhci_ops = {
1295 .write_w = tegra_sdhci_writew, 1313 .write_w = tegra_sdhci_writew,
1296 .write_l = tegra_sdhci_writel, 1314 .write_l = tegra_sdhci_writel,
1297 .set_clock = tegra_sdhci_set_clock, 1315 .set_clock = tegra_sdhci_set_clock,
1316 .set_dma_mask = tegra_sdhci_set_dma_mask,
1298 .set_bus_width = sdhci_set_bus_width, 1317 .set_bus_width = sdhci_set_bus_width,
1299 .reset = tegra_sdhci_reset, 1318 .reset = tegra_sdhci_reset,
1300 .platform_execute_tuning = tegra_sdhci_execute_tuning, 1319 .platform_execute_tuning = tegra_sdhci_execute_tuning,
@@ -1316,6 +1335,7 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1316 1335
1317static const struct sdhci_tegra_soc_data soc_data_tegra114 = { 1336static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1318 .pdata = &sdhci_tegra114_pdata, 1337 .pdata = &sdhci_tegra114_pdata,
1338 .dma_mask = DMA_BIT_MASK(32),
1319}; 1339};
1320 1340
1321static const struct sdhci_pltfm_data sdhci_tegra124_pdata = { 1341static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
@@ -1325,22 +1345,13 @@ static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1325 SDHCI_QUIRK_NO_HISPD_BIT | 1345 SDHCI_QUIRK_NO_HISPD_BIT |
1326 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1346 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1327 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1347 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1328 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1348 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1329 /*
1330 * The TRM states that the SD/MMC controller found on
1331 * Tegra124 can address 34 bits (the maximum supported by
1332 * the Tegra memory controller), but tests show that DMA
1333 * to or from above 4 GiB doesn't work. This is possibly
1334 * caused by missing programming, though it's not obvious
1335 * what sequence is required. Mark 64-bit DMA broken for
1336 * now to fix this for existing users (e.g. Nyan boards).
1337 */
1338 SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
1339 .ops = &tegra114_sdhci_ops, 1349 .ops = &tegra114_sdhci_ops,
1340}; 1350};
1341 1351
1342static const struct sdhci_tegra_soc_data soc_data_tegra124 = { 1352static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1343 .pdata = &sdhci_tegra124_pdata, 1353 .pdata = &sdhci_tegra124_pdata,
1354 .dma_mask = DMA_BIT_MASK(34),
1344}; 1355};
1345 1356
1346static const struct sdhci_ops tegra210_sdhci_ops = { 1357static const struct sdhci_ops tegra210_sdhci_ops = {
@@ -1349,6 +1360,7 @@ static const struct sdhci_ops tegra210_sdhci_ops = {
1349 .write_w = tegra210_sdhci_writew, 1360 .write_w = tegra210_sdhci_writew,
1350 .write_l = tegra_sdhci_writel, 1361 .write_l = tegra_sdhci_writel,
1351 .set_clock = tegra_sdhci_set_clock, 1362 .set_clock = tegra_sdhci_set_clock,
1363 .set_dma_mask = tegra_sdhci_set_dma_mask,
1352 .set_bus_width = sdhci_set_bus_width, 1364 .set_bus_width = sdhci_set_bus_width,
1353 .reset = tegra_sdhci_reset, 1365 .reset = tegra_sdhci_reset,
1354 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1366 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
@@ -1369,6 +1381,7 @@ static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1369 1381
1370static const struct sdhci_tegra_soc_data soc_data_tegra210 = { 1382static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1371 .pdata = &sdhci_tegra210_pdata, 1383 .pdata = &sdhci_tegra210_pdata,
1384 .dma_mask = DMA_BIT_MASK(34),
1372 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1385 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1373 NVQUIRK_HAS_PADCALIB | 1386 NVQUIRK_HAS_PADCALIB |
1374 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1387 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
@@ -1383,6 +1396,7 @@ static const struct sdhci_ops tegra186_sdhci_ops = {
1383 .read_w = tegra_sdhci_readw, 1396 .read_w = tegra_sdhci_readw,
1384 .write_l = tegra_sdhci_writel, 1397 .write_l = tegra_sdhci_writel,
1385 .set_clock = tegra_sdhci_set_clock, 1398 .set_clock = tegra_sdhci_set_clock,
1399 .set_dma_mask = tegra_sdhci_set_dma_mask,
1386 .set_bus_width = sdhci_set_bus_width, 1400 .set_bus_width = sdhci_set_bus_width,
1387 .reset = tegra_sdhci_reset, 1401 .reset = tegra_sdhci_reset,
1388 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, 1402 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
@@ -1398,20 +1412,13 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1398 SDHCI_QUIRK_NO_HISPD_BIT | 1412 SDHCI_QUIRK_NO_HISPD_BIT |
1399 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | 1413 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1400 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, 1414 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1401 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1415 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1402 /* SDHCI controllers on Tegra186 support 40-bit addressing.
1403 * IOVA addresses are 48-bit wide on Tegra186.
1404 * With 64-bit dma mask used for SDHCI, accesses can
1405 * be broken. Disable 64-bit dma, which would fall back
1406 * to 32-bit dma mask. Ideally 40-bit dma mask would work,
1407 * But it is not supported as of now.
1408 */
1409 SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
1410 .ops = &tegra186_sdhci_ops, 1416 .ops = &tegra186_sdhci_ops,
1411}; 1417};
1412 1418
1413static const struct sdhci_tegra_soc_data soc_data_tegra186 = { 1419static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1414 .pdata = &sdhci_tegra186_pdata, 1420 .pdata = &sdhci_tegra186_pdata,
1421 .dma_mask = DMA_BIT_MASK(40),
1415 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1422 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1416 NVQUIRK_HAS_PADCALIB | 1423 NVQUIRK_HAS_PADCALIB |
1417 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1424 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
@@ -1424,6 +1431,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1424 1431
1425static const struct sdhci_tegra_soc_data soc_data_tegra194 = { 1432static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1426 .pdata = &sdhci_tegra186_pdata, 1433 .pdata = &sdhci_tegra186_pdata,
1434 .dma_mask = DMA_BIT_MASK(39),
1427 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL | 1435 .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1428 NVQUIRK_HAS_PADCALIB | 1436 NVQUIRK_HAS_PADCALIB |
1429 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP | 1437 NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 4b297f397326..b056400e34b1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2874,6 +2874,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2874static void sdhci_adma_show_error(struct sdhci_host *host) 2874static void sdhci_adma_show_error(struct sdhci_host *host)
2875{ 2875{
2876 void *desc = host->adma_table; 2876 void *desc = host->adma_table;
2877 dma_addr_t dma = host->adma_addr;
2877 2878
2878 sdhci_dumpregs(host); 2879 sdhci_dumpregs(host);
2879 2880
@@ -2881,18 +2882,21 @@ static void sdhci_adma_show_error(struct sdhci_host *host)
2881 struct sdhci_adma2_64_desc *dma_desc = desc; 2882 struct sdhci_adma2_64_desc *dma_desc = desc;
2882 2883
2883 if (host->flags & SDHCI_USE_64_BIT_DMA) 2884 if (host->flags & SDHCI_USE_64_BIT_DMA)
2884 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 2885 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2885 desc, le32_to_cpu(dma_desc->addr_hi), 2886 (unsigned long long)dma,
2887 le32_to_cpu(dma_desc->addr_hi),
2886 le32_to_cpu(dma_desc->addr_lo), 2888 le32_to_cpu(dma_desc->addr_lo),
2887 le16_to_cpu(dma_desc->len), 2889 le16_to_cpu(dma_desc->len),
2888 le16_to_cpu(dma_desc->cmd)); 2890 le16_to_cpu(dma_desc->cmd));
2889 else 2891 else
2890 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 2892 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2891 desc, le32_to_cpu(dma_desc->addr_lo), 2893 (unsigned long long)dma,
2894 le32_to_cpu(dma_desc->addr_lo),
2892 le16_to_cpu(dma_desc->len), 2895 le16_to_cpu(dma_desc->len),
2893 le16_to_cpu(dma_desc->cmd)); 2896 le16_to_cpu(dma_desc->cmd));
2894 2897
2895 desc += host->desc_sz; 2898 desc += host->desc_sz;
2899 dma += host->desc_sz;
2896 2900
2897 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 2901 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2898 break; 2902 break;
@@ -2968,7 +2972,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2968 != MMC_BUS_TEST_R) 2972 != MMC_BUS_TEST_R)
2969 host->data->error = -EILSEQ; 2973 host->data->error = -EILSEQ;
2970 else if (intmask & SDHCI_INT_ADMA_ERROR) { 2974 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2971 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); 2975 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
2976 intmask);
2972 sdhci_adma_show_error(host); 2977 sdhci_adma_show_error(host);
2973 host->data->error = -EIO; 2978 host->data->error = -EIO;
2974 if (host->ops->adma_workaround) 2979 if (host->ops->adma_workaround)
@@ -3776,18 +3781,14 @@ int sdhci_setup_host(struct sdhci_host *host)
3776 host->flags &= ~SDHCI_USE_ADMA; 3781 host->flags &= ~SDHCI_USE_ADMA;
3777 } 3782 }
3778 3783
3779 /*
3780 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3781 * and *must* do 64-bit DMA. A driver has the opportunity to change
3782 * that during the first call to ->enable_dma(). Similarly
3783 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3784 * implement.
3785 */
3786 if (sdhci_can_64bit_dma(host)) 3784 if (sdhci_can_64bit_dma(host))
3787 host->flags |= SDHCI_USE_64_BIT_DMA; 3785 host->flags |= SDHCI_USE_64_BIT_DMA;
3788 3786
3789 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3787 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3790 ret = sdhci_set_dma_mask(host); 3788 if (host->ops->set_dma_mask)
3789 ret = host->ops->set_dma_mask(host);
3790 else
3791 ret = sdhci_set_dma_mask(host);
3791 3792
3792 if (!ret && host->ops->enable_dma) 3793 if (!ret && host->ops->enable_dma)
3793 ret = host->ops->enable_dma(host); 3794 ret = host->ops->enable_dma(host);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a29c4cd2d92e..0ed3e0eaef5f 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -622,6 +622,7 @@ struct sdhci_ops {
622 622
623 u32 (*irq)(struct sdhci_host *host, u32 intmask); 623 u32 (*irq)(struct sdhci_host *host, u32 intmask);
624 624
625 int (*set_dma_mask)(struct sdhci_host *host);
625 int (*enable_dma)(struct sdhci_host *host); 626 int (*enable_dma)(struct sdhci_host *host);
626 unsigned int (*get_max_clock)(struct sdhci_host *host); 627 unsigned int (*get_max_clock)(struct sdhci_host *host);
627 unsigned int (*get_min_clock)(struct sdhci_host *host); 628 unsigned int (*get_min_clock)(struct sdhci_host *host);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index a8d56887ec88..3e9f45aec8d1 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -392,9 +392,9 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; 392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
393 if (++(arena->freelist[lane].seq) == 4) 393 if (++(arena->freelist[lane].seq) == 4)
394 arena->freelist[lane].seq = 1; 394 arena->freelist[lane].seq = 1;
395 if (ent_e_flag(ent->old_map)) 395 if (ent_e_flag(le32_to_cpu(ent->old_map)))
396 arena->freelist[lane].has_err = 1; 396 arena->freelist[lane].has_err = 1;
397 arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map)); 397 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
398 398
399 return ret; 399 return ret;
400} 400}
@@ -560,8 +560,8 @@ static int btt_freelist_init(struct arena_info *arena)
560 * FIXME: if error clearing fails during init, we want to make 560 * FIXME: if error clearing fails during init, we want to make
561 * the BTT read-only 561 * the BTT read-only
562 */ 562 */
563 if (ent_e_flag(log_new.old_map) && 563 if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
564 !ent_normal(log_new.old_map)) { 564 !ent_normal(le32_to_cpu(log_new.old_map))) {
565 arena->freelist[i].has_err = 1; 565 arena->freelist[i].has_err = 1;
566 ret = arena_clear_freelist_error(arena, i); 566 ret = arena_clear_freelist_error(arena, i);
567 if (ret) 567 if (ret)
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 75a58a6e9615..d47412dcdf38 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -180,7 +180,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
180 sector_t sector; 180 sector_t sector;
181 181
182 /* make sure device is a region */ 182 /* make sure device is a region */
183 if (!is_nd_pmem(dev)) 183 if (!is_memory(dev))
184 return 0; 184 return 0;
185 185
186 nd_region = to_nd_region(dev); 186 nd_region = to_nd_region(dev);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 43401325c874..cca0a3ba1d2c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1987,7 +1987,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
1987 nd_mapping = &nd_region->mapping[i]; 1987 nd_mapping = &nd_region->mapping[i];
1988 label_ent = list_first_entry_or_null(&nd_mapping->labels, 1988 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1989 typeof(*label_ent), list); 1989 typeof(*label_ent), list);
1990 label0 = label_ent ? label_ent->label : 0; 1990 label0 = label_ent ? label_ent->label : NULL;
1991 1991
1992 if (!label0) { 1992 if (!label0) {
1993 WARN_ON(1); 1993 WARN_ON(1);
@@ -2322,8 +2322,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
2322 continue; 2322 continue;
2323 2323
2324 /* skip labels that describe extents outside of the region */ 2324 /* skip labels that describe extents outside of the region */
2325 if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end) 2325 if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
2326 continue; 2326 __le64_to_cpu(nd_label->dpa) > map_end)
2327 continue;
2327 2328
2328 i = add_namespace_resource(nd_region, nd_label, devs, count); 2329 i = add_namespace_resource(nd_region, nd_label, devs, count);
2329 if (i < 0) 2330 if (i < 0)
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index e89af4b2d8e9..ee5c04070ef9 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -289,11 +289,7 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region)
289struct nd_pfn *to_nd_pfn(struct device *dev); 289struct nd_pfn *to_nd_pfn(struct device *dev);
290#if IS_ENABLED(CONFIG_NVDIMM_PFN) 290#if IS_ENABLED(CONFIG_NVDIMM_PFN)
291 291
292#ifdef CONFIG_TRANSPARENT_HUGEPAGE 292#define MAX_NVDIMM_ALIGN 4
293#define PFN_DEFAULT_ALIGNMENT HPAGE_PMD_SIZE
294#else
295#define PFN_DEFAULT_ALIGNMENT PAGE_SIZE
296#endif
297 293
298int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns); 294int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
299bool is_nd_pfn(struct device *dev); 295bool is_nd_pfn(struct device *dev);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index bb9cc5cf0873..60d81fae06ee 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -103,39 +103,42 @@ static ssize_t align_show(struct device *dev,
103 return sprintf(buf, "%ld\n", nd_pfn->align); 103 return sprintf(buf, "%ld\n", nd_pfn->align);
104} 104}
105 105
106static const unsigned long *nd_pfn_supported_alignments(void) 106static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
107{ 107{
108 /*
109 * This needs to be a non-static variable because the *_SIZE
110 * macros aren't always constants.
111 */
112 const unsigned long supported_alignments[] = {
113 PAGE_SIZE,
114#ifdef CONFIG_TRANSPARENT_HUGEPAGE
115 HPAGE_PMD_SIZE,
116#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
117 HPAGE_PUD_SIZE,
118#endif
119#endif
120 0,
121 };
122 static unsigned long data[ARRAY_SIZE(supported_alignments)];
123 108
124 memcpy(data, supported_alignments, sizeof(data)); 109 alignments[0] = PAGE_SIZE;
110
111 if (has_transparent_hugepage()) {
112 alignments[1] = HPAGE_PMD_SIZE;
113 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
114 alignments[2] = HPAGE_PUD_SIZE;
115 }
116
117 return alignments;
118}
119
120/*
121 * Use pmd mapping if supported as default alignment
122 */
123static unsigned long nd_pfn_default_alignment(void)
124{
125 125
126 return data; 126 if (has_transparent_hugepage())
127 return HPAGE_PMD_SIZE;
128 return PAGE_SIZE;
127} 129}
128 130
129static ssize_t align_store(struct device *dev, 131static ssize_t align_store(struct device *dev,
130 struct device_attribute *attr, const char *buf, size_t len) 132 struct device_attribute *attr, const char *buf, size_t len)
131{ 133{
132 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 134 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
135 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
133 ssize_t rc; 136 ssize_t rc;
134 137
135 nd_device_lock(dev); 138 nd_device_lock(dev);
136 nvdimm_bus_lock(dev); 139 nvdimm_bus_lock(dev);
137 rc = nd_size_select_store(dev, buf, &nd_pfn->align, 140 rc = nd_size_select_store(dev, buf, &nd_pfn->align,
138 nd_pfn_supported_alignments()); 141 nd_pfn_supported_alignments(aligns));
139 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 142 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
140 buf[len - 1] == '\n' ? "" : "\n"); 143 buf[len - 1] == '\n' ? "" : "\n");
141 nvdimm_bus_unlock(dev); 144 nvdimm_bus_unlock(dev);
@@ -259,7 +262,10 @@ static DEVICE_ATTR_RO(size);
259static ssize_t supported_alignments_show(struct device *dev, 262static ssize_t supported_alignments_show(struct device *dev,
260 struct device_attribute *attr, char *buf) 263 struct device_attribute *attr, char *buf)
261{ 264{
262 return nd_size_select_show(0, nd_pfn_supported_alignments(), buf); 265 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
266
267 return nd_size_select_show(0,
268 nd_pfn_supported_alignments(aligns), buf);
263} 269}
264static DEVICE_ATTR_RO(supported_alignments); 270static DEVICE_ATTR_RO(supported_alignments);
265 271
@@ -302,7 +308,7 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
302 return NULL; 308 return NULL;
303 309
304 nd_pfn->mode = PFN_MODE_NONE; 310 nd_pfn->mode = PFN_MODE_NONE;
305 nd_pfn->align = PFN_DEFAULT_ALIGNMENT; 311 nd_pfn->align = nd_pfn_default_alignment();
306 dev = &nd_pfn->dev; 312 dev = &nd_pfn->dev;
307 device_initialize(&nd_pfn->dev); 313 device_initialize(&nd_pfn->dev);
308 if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { 314 if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
@@ -412,6 +418,21 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
412 return 0; 418 return 0;
413} 419}
414 420
421static bool nd_supported_alignment(unsigned long align)
422{
423 int i;
424 unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
425
426 if (align == 0)
427 return false;
428
429 nd_pfn_supported_alignments(supported);
430 for (i = 0; supported[i]; i++)
431 if (align == supported[i])
432 return true;
433 return false;
434}
435
415/** 436/**
416 * nd_pfn_validate - read and validate info-block 437 * nd_pfn_validate - read and validate info-block
417 * @nd_pfn: fsdax namespace runtime state / properties 438 * @nd_pfn: fsdax namespace runtime state / properties
@@ -496,6 +517,18 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
496 return -EOPNOTSUPP; 517 return -EOPNOTSUPP;
497 } 518 }
498 519
520 /*
521 * Check whether the we support the alignment. For Dax if the
522 * superblock alignment is not matching, we won't initialize
523 * the device.
524 */
525 if (!nd_supported_alignment(align) &&
526 !memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
527 dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
528 "%ld:%ld\n", nd_pfn->align, align);
529 return -EOPNOTSUPP;
530 }
531
499 if (!nd_pfn->uuid) { 532 if (!nd_pfn->uuid) {
500 /* 533 /*
501 * When probing a namepace via nd_pfn_probe() the uuid 534 * When probing a namepace via nd_pfn_probe() the uuid
@@ -639,9 +672,11 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
639 struct nd_namespace_common *ndns = nd_pfn->ndns; 672 struct nd_namespace_common *ndns = nd_pfn->ndns;
640 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 673 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
641 resource_size_t base = nsio->res.start + start_pad; 674 resource_size_t base = nsio->res.start + start_pad;
675 resource_size_t end = nsio->res.end - end_trunc;
642 struct vmem_altmap __altmap = { 676 struct vmem_altmap __altmap = {
643 .base_pfn = init_altmap_base(base), 677 .base_pfn = init_altmap_base(base),
644 .reserve = init_altmap_reserve(base), 678 .reserve = init_altmap_reserve(base),
679 .end_pfn = PHYS_PFN(end),
645 }; 680 };
646 681
647 memcpy(res, &nsio->res, sizeof(*res)); 682 memcpy(res, &nsio->res, sizeof(*res));
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 37bf8719a2a4..0f6978e72e7c 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -34,7 +34,7 @@ static int nd_region_probe(struct device *dev)
34 if (rc) 34 if (rc)
35 return rc; 35 return rc;
36 36
37 if (is_nd_pmem(&nd_region->dev)) { 37 if (is_memory(&nd_region->dev)) {
38 struct resource ndr_res; 38 struct resource ndr_res;
39 39
40 if (devm_init_badblocks(dev, &nd_region->bb)) 40 if (devm_init_badblocks(dev, &nd_region->bb))
@@ -123,7 +123,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
123 struct nd_region *nd_region = to_nd_region(dev); 123 struct nd_region *nd_region = to_nd_region(dev);
124 struct resource res; 124 struct resource res;
125 125
126 if (is_nd_pmem(&nd_region->dev)) { 126 if (is_memory(&nd_region->dev)) {
127 res.start = nd_region->ndr_start; 127 res.start = nd_region->ndr_start;
128 res.end = nd_region->ndr_start + 128 res.end = nd_region->ndr_start +
129 nd_region->ndr_size - 1; 129 nd_region->ndr_size - 1;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 3fd6b59abd33..ef423ba1a711 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -632,11 +632,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
632 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr) 632 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
633 return 0; 633 return 0;
634 634
635 if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) 635 if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
636 return 0; 636 return 0;
637 637
638 if (a == &dev_attr_resource.attr) { 638 if (a == &dev_attr_resource.attr) {
639 if (is_nd_pmem(dev)) 639 if (is_memory(dev))
640 return 0400; 640 return 0400;
641 else 641 else
642 return 0; 642 return 0;
@@ -1168,6 +1168,9 @@ EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1168 1168
1169bool is_nvdimm_sync(struct nd_region *nd_region) 1169bool is_nvdimm_sync(struct nd_region *nd_region)
1170{ 1170{
1171 if (is_nd_volatile(&nd_region->dev))
1172 return true;
1173
1171 return is_nd_pmem(&nd_region->dev) && 1174 return is_nd_pmem(&nd_region->dev) &&
1172 !test_bit(ND_REGION_ASYNC, &nd_region->flags); 1175 !test_bit(ND_REGION_ASYNC, &nd_region->flags);
1173} 1176}
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 9e45b207ff01..89b85970912d 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
177 || !nvdimm->sec.flags) 177 || !nvdimm->sec.flags)
178 return -EIO; 178 return -EIO;
179 179
180 /* No need to go further if security is disabled */
181 if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
182 return 0;
183
180 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { 184 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
181 dev_dbg(dev, "Security operation in progress.\n"); 185 dev_dbg(dev, "Security operation in progress.\n");
182 return -EBUSY; 186 return -EBUSY;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 108f60b46804..fd7dea36c3b6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -102,10 +102,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
102 */ 102 */
103 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 103 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
104 return; 104 return;
105 revalidate_disk(ns->disk);
106 blk_set_queue_dying(ns->queue); 105 blk_set_queue_dying(ns->queue);
107 /* Forcibly unquiesce queues to avoid blocking dispatch */ 106 /* Forcibly unquiesce queues to avoid blocking dispatch */
108 blk_mq_unquiesce_queue(ns->queue); 107 blk_mq_unquiesce_queue(ns->queue);
108 /*
109 * Revalidate after unblocking dispatchers that may be holding bd_butex
110 */
111 revalidate_disk(ns->disk);
109} 112}
110 113
111static void nvme_queue_scan(struct nvme_ctrl *ctrl) 114static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -847,7 +850,7 @@ out:
847static int nvme_submit_user_cmd(struct request_queue *q, 850static int nvme_submit_user_cmd(struct request_queue *q,
848 struct nvme_command *cmd, void __user *ubuffer, 851 struct nvme_command *cmd, void __user *ubuffer,
849 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 852 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
850 u32 meta_seed, u32 *result, unsigned timeout) 853 u32 meta_seed, u64 *result, unsigned timeout)
851{ 854{
852 bool write = nvme_is_write(cmd); 855 bool write = nvme_is_write(cmd);
853 struct nvme_ns *ns = q->queuedata; 856 struct nvme_ns *ns = q->queuedata;
@@ -888,7 +891,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
888 else 891 else
889 ret = nvme_req(req)->status; 892 ret = nvme_req(req)->status;
890 if (result) 893 if (result)
891 *result = le32_to_cpu(nvme_req(req)->result.u32); 894 *result = le64_to_cpu(nvme_req(req)->result.u64);
892 if (meta && !ret && !write) { 895 if (meta && !ret && !write) {
893 if (copy_to_user(meta_buffer, meta, meta_len)) 896 if (copy_to_user(meta_buffer, meta, meta_len))
894 ret = -EFAULT; 897 ret = -EFAULT;
@@ -1335,6 +1338,54 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1335 struct nvme_command c; 1338 struct nvme_command c;
1336 unsigned timeout = 0; 1339 unsigned timeout = 0;
1337 u32 effects; 1340 u32 effects;
1341 u64 result;
1342 int status;
1343
1344 if (!capable(CAP_SYS_ADMIN))
1345 return -EACCES;
1346 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
1347 return -EFAULT;
1348 if (cmd.flags)
1349 return -EINVAL;
1350
1351 memset(&c, 0, sizeof(c));
1352 c.common.opcode = cmd.opcode;
1353 c.common.flags = cmd.flags;
1354 c.common.nsid = cpu_to_le32(cmd.nsid);
1355 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
1356 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
1357 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
1358 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
1359 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
1360 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
1361 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
1362 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
1363
1364 if (cmd.timeout_ms)
1365 timeout = msecs_to_jiffies(cmd.timeout_ms);
1366
1367 effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1368 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1369 (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
1370 (void __user *)(uintptr_t)cmd.metadata,
1371 cmd.metadata_len, 0, &result, timeout);
1372 nvme_passthru_end(ctrl, effects);
1373
1374 if (status >= 0) {
1375 if (put_user(result, &ucmd->result))
1376 return -EFAULT;
1377 }
1378
1379 return status;
1380}
1381
1382static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1383 struct nvme_passthru_cmd64 __user *ucmd)
1384{
1385 struct nvme_passthru_cmd64 cmd;
1386 struct nvme_command c;
1387 unsigned timeout = 0;
1388 u32 effects;
1338 int status; 1389 int status;
1339 1390
1340 if (!capable(CAP_SYS_ADMIN)) 1391 if (!capable(CAP_SYS_ADMIN))
@@ -1405,6 +1456,41 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
1405 srcu_read_unlock(&head->srcu, idx); 1456 srcu_read_unlock(&head->srcu, idx);
1406} 1457}
1407 1458
1459static bool is_ctrl_ioctl(unsigned int cmd)
1460{
1461 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
1462 return true;
1463 if (is_sed_ioctl(cmd))
1464 return true;
1465 return false;
1466}
1467
1468static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
1469 void __user *argp,
1470 struct nvme_ns_head *head,
1471 int srcu_idx)
1472{
1473 struct nvme_ctrl *ctrl = ns->ctrl;
1474 int ret;
1475
1476 nvme_get_ctrl(ns->ctrl);
1477 nvme_put_ns_from_disk(head, srcu_idx);
1478
1479 switch (cmd) {
1480 case NVME_IOCTL_ADMIN_CMD:
1481 ret = nvme_user_cmd(ctrl, NULL, argp);
1482 break;
1483 case NVME_IOCTL_ADMIN64_CMD:
1484 ret = nvme_user_cmd64(ctrl, NULL, argp);
1485 break;
1486 default:
1487 ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
1488 break;
1489 }
1490 nvme_put_ctrl(ctrl);
1491 return ret;
1492}
1493
1408static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 1494static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1409 unsigned int cmd, unsigned long arg) 1495 unsigned int cmd, unsigned long arg)
1410{ 1496{
@@ -1422,20 +1508,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1422 * seperately and drop the ns SRCU reference early. This avoids a 1508 * seperately and drop the ns SRCU reference early. This avoids a
1423 * deadlock when deleting namespaces using the passthrough interface. 1509 * deadlock when deleting namespaces using the passthrough interface.
1424 */ 1510 */
1425 if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) { 1511 if (is_ctrl_ioctl(cmd))
1426 struct nvme_ctrl *ctrl = ns->ctrl; 1512 return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
1427
1428 nvme_get_ctrl(ns->ctrl);
1429 nvme_put_ns_from_disk(head, srcu_idx);
1430
1431 if (cmd == NVME_IOCTL_ADMIN_CMD)
1432 ret = nvme_user_cmd(ctrl, NULL, argp);
1433 else
1434 ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
1435
1436 nvme_put_ctrl(ctrl);
1437 return ret;
1438 }
1439 1513
1440 switch (cmd) { 1514 switch (cmd) {
1441 case NVME_IOCTL_ID: 1515 case NVME_IOCTL_ID:
@@ -1448,6 +1522,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
1448 case NVME_IOCTL_SUBMIT_IO: 1522 case NVME_IOCTL_SUBMIT_IO:
1449 ret = nvme_submit_io(ns, argp); 1523 ret = nvme_submit_io(ns, argp);
1450 break; 1524 break;
1525 case NVME_IOCTL_IO64_CMD:
1526 ret = nvme_user_cmd64(ns->ctrl, ns, argp);
1527 break;
1451 default: 1528 default:
1452 if (ns->ndev) 1529 if (ns->ndev)
1453 ret = nvme_nvm_ioctl(ns, cmd, arg); 1530 ret = nvme_nvm_ioctl(ns, cmd, arg);
@@ -2289,6 +2366,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
2289 .vid = 0x14a4, 2366 .vid = 0x14a4,
2290 .fr = "22301111", 2367 .fr = "22301111",
2291 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2368 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2369 },
2370 {
2371 /*
2372 * This Kingston E8FK11.T firmware version has no interrupt
2373 * after resume with actions related to suspend to idle
2374 * https://bugzilla.kernel.org/show_bug.cgi?id=204887
2375 */
2376 .vid = 0x2646,
2377 .fr = "E8FK11.T",
2378 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2292 } 2379 }
2293}; 2380};
2294 2381
@@ -2540,8 +2627,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2540 list_add_tail(&subsys->entry, &nvme_subsystems); 2627 list_add_tail(&subsys->entry, &nvme_subsystems);
2541 } 2628 }
2542 2629
2543 if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2630 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
2544 dev_name(ctrl->device))) { 2631 dev_name(ctrl->device));
2632 if (ret) {
2545 dev_err(ctrl->device, 2633 dev_err(ctrl->device,
2546 "failed to create sysfs link from subsystem.\n"); 2634 "failed to create sysfs link from subsystem.\n");
2547 goto out_put_subsystem; 2635 goto out_put_subsystem;
@@ -2838,6 +2926,8 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
2838 switch (cmd) { 2926 switch (cmd) {
2839 case NVME_IOCTL_ADMIN_CMD: 2927 case NVME_IOCTL_ADMIN_CMD:
2840 return nvme_user_cmd(ctrl, NULL, argp); 2928 return nvme_user_cmd(ctrl, NULL, argp);
2929 case NVME_IOCTL_ADMIN64_CMD:
2930 return nvme_user_cmd64(ctrl, NULL, argp);
2841 case NVME_IOCTL_IO_CMD: 2931 case NVME_IOCTL_IO_CMD:
2842 return nvme_dev_user_cmd(ctrl, argp); 2932 return nvme_dev_user_cmd(ctrl, argp);
2843 case NVME_IOCTL_RESET: 2933 case NVME_IOCTL_RESET:
@@ -3045,6 +3135,8 @@ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3045 3135
3046nvme_show_int_function(cntlid); 3136nvme_show_int_function(cntlid);
3047nvme_show_int_function(numa_node); 3137nvme_show_int_function(numa_node);
3138nvme_show_int_function(queue_count);
3139nvme_show_int_function(sqsize);
3048 3140
3049static ssize_t nvme_sysfs_delete(struct device *dev, 3141static ssize_t nvme_sysfs_delete(struct device *dev,
3050 struct device_attribute *attr, const char *buf, 3142 struct device_attribute *attr, const char *buf,
@@ -3125,6 +3217,8 @@ static struct attribute *nvme_dev_attrs[] = {
3125 &dev_attr_address.attr, 3217 &dev_attr_address.attr,
3126 &dev_attr_state.attr, 3218 &dev_attr_state.attr,
3127 &dev_attr_numa_node.attr, 3219 &dev_attr_numa_node.attr,
3220 &dev_attr_queue_count.attr,
3221 &dev_attr_sqsize.attr,
3128 NULL 3222 NULL
3129}; 3223};
3130 3224
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b5013c101b35..38a83ef5bcd3 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -221,6 +221,7 @@ struct nvme_ctrl {
221 u16 oacs; 221 u16 oacs;
222 u16 nssa; 222 u16 nssa;
223 u16 nr_streams; 223 u16 nr_streams;
224 u16 sqsize;
224 u32 max_namespaces; 225 u32 max_namespaces;
225 atomic_t abort_limit; 226 atomic_t abort_limit;
226 u8 vwc; 227 u8 vwc;
@@ -269,7 +270,6 @@ struct nvme_ctrl {
269 u16 hmmaxd; 270 u16 hmmaxd;
270 271
271 /* Fabrics only */ 272 /* Fabrics only */
272 u16 sqsize;
273 u32 ioccsz; 273 u32 ioccsz;
274 u32 iorcsz; 274 u32 iorcsz;
275 u16 icdoff; 275 u16 icdoff;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c0808f9eb8ab..bb88681f4dc3 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2946,11 +2946,21 @@ static int nvme_suspend(struct device *dev)
2946 if (ret < 0) 2946 if (ret < 0)
2947 goto unfreeze; 2947 goto unfreeze;
2948 2948
2949 /*
2950 * A saved state prevents pci pm from generically controlling the
2951 * device's power. If we're using protocol specific settings, we don't
2952 * want pci interfering.
2953 */
2954 pci_save_state(pdev);
2955
2949 ret = nvme_set_power_state(ctrl, ctrl->npss); 2956 ret = nvme_set_power_state(ctrl, ctrl->npss);
2950 if (ret < 0) 2957 if (ret < 0)
2951 goto unfreeze; 2958 goto unfreeze;
2952 2959
2953 if (ret) { 2960 if (ret) {
2961 /* discard the saved state */
2962 pci_load_saved_state(pdev, NULL);
2963
2954 /* 2964 /*
2955 * Clearing npss forces a controller reset on resume. The 2965 * Clearing npss forces a controller reset on resume. The
2956 * correct value will be resdicovered then. 2966 * correct value will be resdicovered then.
@@ -2958,14 +2968,7 @@ static int nvme_suspend(struct device *dev)
2958 nvme_dev_disable(ndev, true); 2968 nvme_dev_disable(ndev, true);
2959 ctrl->npss = 0; 2969 ctrl->npss = 0;
2960 ret = 0; 2970 ret = 0;
2961 goto unfreeze;
2962 } 2971 }
2963 /*
2964 * A saved state prevents pci pm from generically controlling the
2965 * device's power. If we're using protocol specific settings, we don't
2966 * want pci interfering.
2967 */
2968 pci_save_state(pdev);
2969unfreeze: 2972unfreeze:
2970 nvme_unfreeze(ctrl); 2973 nvme_unfreeze(ctrl);
2971 return ret; 2974 return ret;
@@ -3090,6 +3093,9 @@ static const struct pci_device_id nvme_id_table[] = {
3090 .driver_data = NVME_QUIRK_LIGHTNVM, }, 3093 .driver_data = NVME_QUIRK_LIGHTNVM, },
3091 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ 3094 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
3092 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3095 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3096 { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
3097 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
3098 NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3093 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3099 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
3094 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, 3100 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
3095 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 3101 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index dfa07bb9dfeb..4d280160dd3f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -427,7 +427,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
427static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev) 427static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
428{ 428{
429 return min_t(u32, NVME_RDMA_MAX_SEGMENTS, 429 return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
430 ibdev->attrs.max_fast_reg_page_list_len); 430 ibdev->attrs.max_fast_reg_page_list_len - 1);
431} 431}
432 432
433static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) 433static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
@@ -437,7 +437,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
437 const int cq_factor = send_wr_factor + 1; /* + RECV */ 437 const int cq_factor = send_wr_factor + 1; /* + RECV */
438 int comp_vector, idx = nvme_rdma_queue_idx(queue); 438 int comp_vector, idx = nvme_rdma_queue_idx(queue);
439 enum ib_poll_context poll_ctx; 439 enum ib_poll_context poll_ctx;
440 int ret; 440 int ret, pages_per_mr;
441 441
442 queue->device = nvme_rdma_find_get_device(queue->cm_id); 442 queue->device = nvme_rdma_find_get_device(queue->cm_id);
443 if (!queue->device) { 443 if (!queue->device) {
@@ -479,10 +479,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
479 goto out_destroy_qp; 479 goto out_destroy_qp;
480 } 480 }
481 481
482 /*
483 * Currently we don't use SG_GAPS MR's so if the first entry is
484 * misaligned we'll end up using two entries for a single data page,
485 * so one additional entry is required.
486 */
487 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
482 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, 488 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
483 queue->queue_size, 489 queue->queue_size,
484 IB_MR_TYPE_MEM_REG, 490 IB_MR_TYPE_MEM_REG,
485 nvme_rdma_get_max_fr_pages(ibdev), 0); 491 pages_per_mr, 0);
486 if (ret) { 492 if (ret) {
487 dev_err(queue->ctrl->ctrl.device, 493 dev_err(queue->ctrl->ctrl.device,
488 "failed to initialize MR pool sized %d for QID %d\n", 494 "failed to initialize MR pool sized %d for QID %d\n",
@@ -614,7 +620,8 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
614 if (!ret) { 620 if (!ret) {
615 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); 621 set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
616 } else { 622 } else {
617 __nvme_rdma_stop_queue(queue); 623 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
624 __nvme_rdma_stop_queue(queue);
618 dev_info(ctrl->ctrl.device, 625 dev_info(ctrl->ctrl.device,
619 "failed to connect queue: %d ret=%d\n", idx, ret); 626 "failed to connect queue: %d ret=%d\n", idx, ret);
620 } 627 }
@@ -820,8 +827,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
820 if (error) 827 if (error)
821 goto out_stop_queue; 828 goto out_stop_queue;
822 829
823 ctrl->ctrl.max_hw_sectors = 830 ctrl->ctrl.max_segments = ctrl->max_fr_pages;
824 (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); 831 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
825 832
826 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 833 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
827 834
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 4ffd5957637a..385a5212c10f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1042,7 +1042,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
1042{ 1042{
1043 struct nvme_tcp_queue *queue = 1043 struct nvme_tcp_queue *queue =
1044 container_of(w, struct nvme_tcp_queue, io_work); 1044 container_of(w, struct nvme_tcp_queue, io_work);
1045 unsigned long start = jiffies + msecs_to_jiffies(1); 1045 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1046 1046
1047 do { 1047 do {
1048 bool pending = false; 1048 bool pending = false;
@@ -1067,7 +1067,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
1067 if (!pending) 1067 if (!pending)
1068 return; 1068 return;
1069 1069
1070 } while (time_after(jiffies, start)); /* quota is exhausted */ 1070 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1071 1071
1072 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 1072 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1073} 1073}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index de0bff70ebb6..32008d85172b 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -11,10 +11,10 @@
11void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) 11void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
12{ 12{
13 const struct queue_limits *ql = &bdev_get_queue(bdev)->limits; 13 const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
14 /* Number of physical blocks per logical block. */ 14 /* Number of logical blocks per physical block. */
15 const u32 ppl = ql->physical_block_size / ql->logical_block_size; 15 const u32 lpp = ql->physical_block_size / ql->logical_block_size;
16 /* Physical blocks per logical block, 0's based. */ 16 /* Logical blocks per physical block, 0's based. */
17 const __le16 ppl0b = to0based(ppl); 17 const __le16 lpp0b = to0based(lpp);
18 18
19 /* 19 /*
20 * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN, 20 * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@@ -25,9 +25,9 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
25 * field from the identify controller data structure should be used. 25 * field from the identify controller data structure should be used.
26 */ 26 */
27 id->nsfeat |= 1 << 1; 27 id->nsfeat |= 1 << 1;
28 id->nawun = ppl0b; 28 id->nawun = lpp0b;
29 id->nawupf = ppl0b; 29 id->nawupf = lpp0b;
30 id->nacwu = ppl0b; 30 id->nacwu = lpp0b;
31 31
32 /* 32 /*
33 * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and 33 * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
@@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
36 */ 36 */
37 id->nsfeat |= 1 << 4; 37 id->nsfeat |= 1 << 4;
38 /* NPWG = Namespace Preferred Write Granularity. 0's based */ 38 /* NPWG = Namespace Preferred Write Granularity. 0's based */
39 id->npwg = ppl0b; 39 id->npwg = lpp0b;
40 /* NPWA = Namespace Preferred Write Alignment. 0's based */ 40 /* NPWA = Namespace Preferred Write Alignment. 0's based */
41 id->npwa = id->npwg; 41 id->npwa = id->npwg;
42 /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ 42 /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index bf4f03474e89..d535080b781f 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -348,8 +348,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
348 348
349 return 0; 349 return 0;
350err: 350err:
351 if (cmd->req.sg_cnt) 351 sgl_free(cmd->req.sg);
352 sgl_free(cmd->req.sg);
353 return NVME_SC_INTERNAL; 352 return NVME_SC_INTERNAL;
354} 353}
355 354
@@ -554,8 +553,7 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
554 553
555 if (queue->nvme_sq.sqhd_disabled) { 554 if (queue->nvme_sq.sqhd_disabled) {
556 kfree(cmd->iov); 555 kfree(cmd->iov);
557 if (cmd->req.sg_cnt) 556 sgl_free(cmd->req.sg);
558 sgl_free(cmd->req.sg);
559 } 557 }
560 558
561 return 1; 559 return 1;
@@ -586,8 +584,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
586 return -EAGAIN; 584 return -EAGAIN;
587 585
588 kfree(cmd->iov); 586 kfree(cmd->iov);
589 if (cmd->req.sg_cnt) 587 sgl_free(cmd->req.sg);
590 sgl_free(cmd->req.sg);
591 cmd->queue->snd_cmd = NULL; 588 cmd->queue->snd_cmd = NULL;
592 nvmet_tcp_put_cmd(cmd); 589 nvmet_tcp_put_cmd(cmd);
593 return 1; 590 return 1;
@@ -1310,8 +1307,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
1310 nvmet_req_uninit(&cmd->req); 1307 nvmet_req_uninit(&cmd->req);
1311 nvmet_tcp_unmap_pdu_iovec(cmd); 1308 nvmet_tcp_unmap_pdu_iovec(cmd);
1312 kfree(cmd->iov); 1309 kfree(cmd->iov);
1313 if (cmd->req.sg_cnt) 1310 sgl_free(cmd->req.sg);
1314 sgl_free(cmd->req.sg);
1315} 1311}
1316 1312
1317static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) 1313static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
diff --git a/drivers/reset/reset-scmi.c b/drivers/reset/reset-scmi.c
index c6d3c8427f14..b46df80ec6c3 100644
--- a/drivers/reset/reset-scmi.c
+++ b/drivers/reset/reset-scmi.c
@@ -102,6 +102,7 @@ static int scmi_reset_probe(struct scmi_device *sdev)
102 data->rcdev.owner = THIS_MODULE; 102 data->rcdev.owner = THIS_MODULE;
103 data->rcdev.of_node = np; 103 data->rcdev.of_node = np;
104 data->rcdev.nr_resets = handle->reset_ops->num_domains_get(handle); 104 data->rcdev.nr_resets = handle->reset_ops->num_domains_get(handle);
105 data->handle = handle;
105 106
106 return devm_reset_controller_register(dev, &data->rcdev); 107 return devm_reset_controller_register(dev, &data->rcdev);
107} 108}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index fc53e1e221f0..c94184d080f8 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1553,8 +1553,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
1553 if (rc == 0) { 1553 if (rc == 0) {
1554 memcpy(&private->vsq, vsq, sizeof(*vsq)); 1554 memcpy(&private->vsq, vsq, sizeof(*vsq));
1555 } else { 1555 } else {
1556 dev_warn(&device->cdev->dev, 1556 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1557 "Reading the volume storage information failed with rc=%d\n", rc); 1557 "Reading the volume storage information failed with rc=%d", rc);
1558 } 1558 }
1559 1559
1560 if (useglobal) 1560 if (useglobal)
@@ -1737,8 +1737,8 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1737 if (rc == 0) { 1737 if (rc == 0) {
1738 dasd_eckd_cpy_ext_pool_data(device, lcq); 1738 dasd_eckd_cpy_ext_pool_data(device, lcq);
1739 } else { 1739 } else {
1740 dev_warn(&device->cdev->dev, 1740 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1741 "Reading the logical configuration failed with rc=%d\n", rc); 1741 "Reading the logical configuration failed with rc=%d", rc);
1742 } 1742 }
1743 1743
1744 dasd_sfree_request(cqr, cqr->memdev); 1744 dasd_sfree_request(cqr, cqr->memdev);
@@ -2020,14 +2020,10 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
2020 dasd_eckd_read_features(device); 2020 dasd_eckd_read_features(device);
2021 2021
2022 /* Read Volume Information */ 2022 /* Read Volume Information */
2023 rc = dasd_eckd_read_vol_info(device); 2023 dasd_eckd_read_vol_info(device);
2024 if (rc)
2025 goto out_err3;
2026 2024
2027 /* Read Extent Pool Information */ 2025 /* Read Extent Pool Information */
2028 rc = dasd_eckd_read_ext_pool_info(device); 2026 dasd_eckd_read_ext_pool_info(device);
2029 if (rc)
2030 goto out_err3;
2031 2027
2032 /* Read Device Characteristics */ 2028 /* Read Device Characteristics */
2033 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 2029 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -2059,9 +2055,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
2059 if (readonly) 2055 if (readonly)
2060 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 2056 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2061 2057
2062 if (dasd_eckd_is_ese(device))
2063 dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1);
2064
2065 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 2058 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2066 "with %d cylinders, %d heads, %d sectors%s\n", 2059 "with %d cylinders, %d heads, %d sectors%s\n",
2067 private->rdc_data.dev_type, 2060 private->rdc_data.dev_type,
@@ -3695,14 +3688,6 @@ static int dasd_eckd_release_space(struct dasd_device *device,
3695 return -EINVAL; 3688 return -EINVAL;
3696} 3689}
3697 3690
3698static struct dasd_ccw_req *
3699dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block,
3700 struct request *req, sector_t first_trk,
3701 sector_t last_trk)
3702{
3703 return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1);
3704}
3705
3706static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 3691static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3707 struct dasd_device *startdev, 3692 struct dasd_device *startdev,
3708 struct dasd_block *block, 3693 struct dasd_block *block,
@@ -4447,10 +4432,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4447 cmdwtd = private->features.feature[12] & 0x40; 4432 cmdwtd = private->features.feature[12] & 0x40;
4448 use_prefix = private->features.feature[8] & 0x01; 4433 use_prefix = private->features.feature[8] & 0x01;
4449 4434
4450 if (req_op(req) == REQ_OP_DISCARD)
4451 return dasd_eckd_build_cp_discard(startdev, block, req,
4452 first_trk, last_trk);
4453
4454 cqr = NULL; 4435 cqr = NULL;
4455 if (cdlspecial || dasd_page_cache) { 4436 if (cdlspecial || dasd_page_cache) {
4456 /* do nothing, just fall through to the cmd mode single case */ 4437 /* do nothing, just fall through to the cmd mode single case */
@@ -4729,14 +4710,12 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4729 struct dasd_block *block, 4710 struct dasd_block *block,
4730 struct request *req) 4711 struct request *req)
4731{ 4712{
4732 struct dasd_device *startdev = NULL;
4733 struct dasd_eckd_private *private; 4713 struct dasd_eckd_private *private;
4734 struct dasd_ccw_req *cqr; 4714 struct dasd_device *startdev;
4735 unsigned long flags; 4715 unsigned long flags;
4716 struct dasd_ccw_req *cqr;
4736 4717
4737 /* Discard requests can only be processed on base devices */ 4718 startdev = dasd_alias_get_start_dev(base);
4738 if (req_op(req) != REQ_OP_DISCARD)
4739 startdev = dasd_alias_get_start_dev(base);
4740 if (!startdev) 4719 if (!startdev)
4741 startdev = base; 4720 startdev = base;
4742 private = startdev->private; 4721 private = startdev->private;
@@ -5663,14 +5642,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
5663 dasd_eckd_read_features(device); 5642 dasd_eckd_read_features(device);
5664 5643
5665 /* Read Volume Information */ 5644 /* Read Volume Information */
5666 rc = dasd_eckd_read_vol_info(device); 5645 dasd_eckd_read_vol_info(device);
5667 if (rc)
5668 goto out_err2;
5669 5646
5670 /* Read Extent Pool Information */ 5647 /* Read Extent Pool Information */
5671 rc = dasd_eckd_read_ext_pool_info(device); 5648 dasd_eckd_read_ext_pool_info(device);
5672 if (rc)
5673 goto out_err2;
5674 5649
5675 /* Read Device Characteristics */ 5650 /* Read Device Characteristics */
5676 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 5651 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -6521,20 +6496,8 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
6521 unsigned int logical_block_size = block->bp_block; 6496 unsigned int logical_block_size = block->bp_block;
6522 struct request_queue *q = block->request_queue; 6497 struct request_queue *q = block->request_queue;
6523 struct dasd_device *device = block->base; 6498 struct dasd_device *device = block->base;
6524 struct dasd_eckd_private *private;
6525 unsigned int max_discard_sectors;
6526 unsigned int max_bytes;
6527 unsigned int ext_bytes; /* Extent Size in Bytes */
6528 int recs_per_trk;
6529 int trks_per_cyl;
6530 int ext_limit;
6531 int ext_size; /* Extent Size in Cylinders */
6532 int max; 6499 int max;
6533 6500
6534 private = device->private;
6535 trks_per_cyl = private->rdc_data.trk_per_cyl;
6536 recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size);
6537
6538 if (device->features & DASD_FEATURE_USERAW) { 6501 if (device->features & DASD_FEATURE_USERAW) {
6539 /* 6502 /*
6540 * the max_blocks value for raw_track access is 256 6503 * the max_blocks value for raw_track access is 256
@@ -6555,28 +6518,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
6555 /* With page sized segments each segment can be translated into one idaw/tidaw */ 6518 /* With page sized segments each segment can be translated into one idaw/tidaw */
6556 blk_queue_max_segment_size(q, PAGE_SIZE); 6519 blk_queue_max_segment_size(q, PAGE_SIZE);
6557 blk_queue_segment_boundary(q, PAGE_SIZE - 1); 6520 blk_queue_segment_boundary(q, PAGE_SIZE - 1);
6558
6559 if (dasd_eckd_is_ese(device)) {
6560 /*
6561 * Depending on the extent size, up to UINT_MAX bytes can be
6562 * accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the
6563 * device limits should be exceeded.
6564 */
6565 ext_size = dasd_eckd_ext_size(device);
6566 ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX);
6567 ext_bytes = ext_size * trks_per_cyl * recs_per_trk *
6568 logical_block_size;
6569 max_bytes = UINT_MAX - (UINT_MAX % ext_bytes);
6570 if (max_bytes / ext_bytes > ext_limit)
6571 max_bytes = ext_bytes * ext_limit;
6572
6573 max_discard_sectors = max_bytes / 512;
6574
6575 blk_queue_max_discard_sectors(q, max_discard_sectors);
6576 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
6577 q->limits.discard_granularity = ext_bytes;
6578 q->limits.discard_alignment = ext_bytes;
6579 }
6580} 6521}
6581 6522
6582static struct ccw_driver dasd_eckd_driver = { 6523static struct ccw_driver dasd_eckd_driver = {
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index f4ca1d29d61b..cd164886132f 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -113,7 +113,7 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
113 irq_ptr->qib.pfmt = qib_param_field_format; 113 irq_ptr->qib.pfmt = qib_param_field_format;
114 if (qib_param_field) 114 if (qib_param_field)
115 memcpy(irq_ptr->qib.parm, qib_param_field, 115 memcpy(irq_ptr->qib.parm, qib_param_field,
116 QDIO_MAX_BUFFERS_PER_Q); 116 sizeof(irq_ptr->qib.parm));
117 117
118 if (!input_slib_elements) 118 if (!input_slib_elements)
119 goto output; 119 goto output;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index a7868c8133ee..dda274351c21 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4715,8 +4715,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
4715 4715
4716 QETH_CARD_TEXT(card, 2, "qdioest"); 4716 QETH_CARD_TEXT(card, 2, "qdioest");
4717 4717
4718 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q, 4718 qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL);
4719 GFP_KERNEL);
4720 if (!qib_param_field) { 4719 if (!qib_param_field) {
4721 rc = -ENOMEM; 4720 rc = -ENOMEM;
4722 goto out_free_nothing; 4721 goto out_free_nothing;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 9966364a6deb..001a21abcc28 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -310,7 +310,7 @@ config DOVE_THERMAL
310 310
311config DB8500_THERMAL 311config DB8500_THERMAL
312 tristate "DB8500 thermal management" 312 tristate "DB8500 thermal management"
313 depends on MFD_DB8500_PRCMU 313 depends on MFD_DB8500_PRCMU && OF
314 default y 314 default y
315 help 315 help
316 Adds DB8500 thermal management implementation according to the thermal 316 Adds DB8500 thermal management implementation according to the thermal
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index b71a999d17d6..372dbbaaafb8 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -3,9 +3,9 @@
3 * db8500_thermal.c - DB8500 Thermal Management Implementation 3 * db8500_thermal.c - DB8500 Thermal Management Implementation
4 * 4 *
5 * Copyright (C) 2012 ST-Ericsson 5 * Copyright (C) 2012 ST-Ericsson
6 * Copyright (C) 2012 Linaro Ltd. 6 * Copyright (C) 2012-2019 Linaro Ltd.
7 * 7 *
8 * Author: Hongbo Zhang <hongbo.zhang@linaro.com> 8 * Authors: Hongbo Zhang, Linus Walleij
9 */ 9 */
10 10
11#include <linux/cpu_cooling.h> 11#include <linux/cpu_cooling.h>
@@ -13,7 +13,6 @@
13#include <linux/mfd/dbx500-prcmu.h> 13#include <linux/mfd/dbx500-prcmu.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/platform_data/db8500_thermal.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
19#include <linux/thermal.h> 18#include <linux/thermal.h>
@@ -21,453 +20,201 @@
21#define PRCMU_DEFAULT_MEASURE_TIME 0xFFF 20#define PRCMU_DEFAULT_MEASURE_TIME 0xFFF
22#define PRCMU_DEFAULT_LOW_TEMP 0 21#define PRCMU_DEFAULT_LOW_TEMP 0
23 22
23/**
24 * db8500_thermal_points - the interpolation points that trigger
25 * interrupts
26 */
27static const unsigned long db8500_thermal_points[] = {
28 15000,
29 20000,
30 25000,
31 30000,
32 35000,
33 40000,
34 45000,
35 50000,
36 55000,
37 60000,
38 65000,
39 70000,
40 75000,
41 80000,
42 /*
43 * This is where things start to get really bad for the
44 * SoC and the thermal zones should be set up to trigger
45 * critical temperature at 85000 mC so we don't get above
46 * this point.
47 */
48 85000,
49 90000,
50 95000,
51 100000,
52};
53
24struct db8500_thermal_zone { 54struct db8500_thermal_zone {
25 struct thermal_zone_device *therm_dev; 55 struct thermal_zone_device *tz;
26 struct mutex th_lock;
27 struct work_struct therm_work;
28 struct db8500_thsens_platform_data *trip_tab;
29 enum thermal_device_mode mode;
30 enum thermal_trend trend; 56 enum thermal_trend trend;
31 unsigned long cur_temp_pseudo; 57 unsigned long interpolated_temp;
32 unsigned int cur_index; 58 unsigned int cur_index;
33}; 59};
34 60
35/* Local function to check if thermal zone matches cooling devices */
36static int db8500_thermal_match_cdev(struct thermal_cooling_device *cdev,
37 struct db8500_trip_point *trip_point)
38{
39 int i;
40
41 if (!strlen(cdev->type))
42 return -EINVAL;
43
44 for (i = 0; i < COOLING_DEV_MAX; i++) {
45 if (!strcmp(trip_point->cdev_name[i], cdev->type))
46 return 0;
47 }
48
49 return -ENODEV;
50}
51
52/* Callback to bind cooling device to thermal zone */
53static int db8500_cdev_bind(struct thermal_zone_device *thermal,
54 struct thermal_cooling_device *cdev)
55{
56 struct db8500_thermal_zone *pzone = thermal->devdata;
57 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
58 unsigned long max_state, upper, lower;
59 int i, ret = -EINVAL;
60
61 cdev->ops->get_max_state(cdev, &max_state);
62
63 for (i = 0; i < ptrips->num_trips; i++) {
64 if (db8500_thermal_match_cdev(cdev, &ptrips->trip_points[i]))
65 continue;
66
67 upper = lower = i > max_state ? max_state : i;
68
69 ret = thermal_zone_bind_cooling_device(thermal, i, cdev,
70 upper, lower, THERMAL_WEIGHT_DEFAULT);
71
72 dev_info(&cdev->device, "%s bind to %d: %d-%s\n", cdev->type,
73 i, ret, ret ? "fail" : "succeed");
74 }
75
76 return ret;
77}
78
79/* Callback to unbind cooling device from thermal zone */
80static int db8500_cdev_unbind(struct thermal_zone_device *thermal,
81 struct thermal_cooling_device *cdev)
82{
83 struct db8500_thermal_zone *pzone = thermal->devdata;
84 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
85 int i, ret = -EINVAL;
86
87 for (i = 0; i < ptrips->num_trips; i++) {
88 if (db8500_thermal_match_cdev(cdev, &ptrips->trip_points[i]))
89 continue;
90
91 ret = thermal_zone_unbind_cooling_device(thermal, i, cdev);
92
93 dev_info(&cdev->device, "%s unbind from %d: %s\n", cdev->type,
94 i, ret ? "fail" : "succeed");
95 }
96
97 return ret;
98}
99
100/* Callback to get current temperature */ 61/* Callback to get current temperature */
101static int db8500_sys_get_temp(struct thermal_zone_device *thermal, int *temp) 62static int db8500_thermal_get_temp(void *data, int *temp)
102{ 63{
103 struct db8500_thermal_zone *pzone = thermal->devdata; 64 struct db8500_thermal_zone *th = data;
104 65
105 /* 66 /*
106 * TODO: There is no PRCMU interface to get temperature data currently, 67 * TODO: There is no PRCMU interface to get temperature data currently,
107 * so a pseudo temperature is returned , it works for thermal framework 68 * so a pseudo temperature is returned , it works for thermal framework
108 * and this will be fixed when the PRCMU interface is available. 69 * and this will be fixed when the PRCMU interface is available.
109 */ 70 */
110 *temp = pzone->cur_temp_pseudo; 71 *temp = th->interpolated_temp;
111 72
112 return 0; 73 return 0;
113} 74}
114 75
115/* Callback to get temperature changing trend */ 76/* Callback to get temperature changing trend */
116static int db8500_sys_get_trend(struct thermal_zone_device *thermal, 77static int db8500_thermal_get_trend(void *data, int trip, enum thermal_trend *trend)
117 int trip, enum thermal_trend *trend)
118{
119 struct db8500_thermal_zone *pzone = thermal->devdata;
120
121 *trend = pzone->trend;
122
123 return 0;
124}
125
126/* Callback to get thermal zone mode */
127static int db8500_sys_get_mode(struct thermal_zone_device *thermal,
128 enum thermal_device_mode *mode)
129{
130 struct db8500_thermal_zone *pzone = thermal->devdata;
131
132 mutex_lock(&pzone->th_lock);
133 *mode = pzone->mode;
134 mutex_unlock(&pzone->th_lock);
135
136 return 0;
137}
138
139/* Callback to set thermal zone mode */
140static int db8500_sys_set_mode(struct thermal_zone_device *thermal,
141 enum thermal_device_mode mode)
142{
143 struct db8500_thermal_zone *pzone = thermal->devdata;
144
145 mutex_lock(&pzone->th_lock);
146
147 pzone->mode = mode;
148 if (mode == THERMAL_DEVICE_ENABLED)
149 schedule_work(&pzone->therm_work);
150
151 mutex_unlock(&pzone->th_lock);
152
153 return 0;
154}
155
156/* Callback to get trip point type */
157static int db8500_sys_get_trip_type(struct thermal_zone_device *thermal,
158 int trip, enum thermal_trip_type *type)
159{
160 struct db8500_thermal_zone *pzone = thermal->devdata;
161 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
162
163 if (trip >= ptrips->num_trips)
164 return -EINVAL;
165
166 *type = ptrips->trip_points[trip].type;
167
168 return 0;
169}
170
171/* Callback to get trip point temperature */
172static int db8500_sys_get_trip_temp(struct thermal_zone_device *thermal,
173 int trip, int *temp)
174{ 78{
175 struct db8500_thermal_zone *pzone = thermal->devdata; 79 struct db8500_thermal_zone *th = data;
176 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
177 80
178 if (trip >= ptrips->num_trips) 81 *trend = th->trend;
179 return -EINVAL;
180
181 *temp = ptrips->trip_points[trip].temp;
182 82
183 return 0; 83 return 0;
184} 84}
185 85
186/* Callback to get critical trip point temperature */ 86static struct thermal_zone_of_device_ops thdev_ops = {
187static int db8500_sys_get_crit_temp(struct thermal_zone_device *thermal, 87 .get_temp = db8500_thermal_get_temp,
188 int *temp) 88 .get_trend = db8500_thermal_get_trend,
189{
190 struct db8500_thermal_zone *pzone = thermal->devdata;
191 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
192 int i;
193
194 for (i = ptrips->num_trips - 1; i > 0; i--) {
195 if (ptrips->trip_points[i].type == THERMAL_TRIP_CRITICAL) {
196 *temp = ptrips->trip_points[i].temp;
197 return 0;
198 }
199 }
200
201 return -EINVAL;
202}
203
204static struct thermal_zone_device_ops thdev_ops = {
205 .bind = db8500_cdev_bind,
206 .unbind = db8500_cdev_unbind,
207 .get_temp = db8500_sys_get_temp,
208 .get_trend = db8500_sys_get_trend,
209 .get_mode = db8500_sys_get_mode,
210 .set_mode = db8500_sys_set_mode,
211 .get_trip_type = db8500_sys_get_trip_type,
212 .get_trip_temp = db8500_sys_get_trip_temp,
213 .get_crit_temp = db8500_sys_get_crit_temp,
214}; 89};
215 90
216static void db8500_thermal_update_config(struct db8500_thermal_zone *pzone, 91static void db8500_thermal_update_config(struct db8500_thermal_zone *th,
217 unsigned int idx, enum thermal_trend trend, 92 unsigned int idx,
218 unsigned long next_low, unsigned long next_high) 93 enum thermal_trend trend,
94 unsigned long next_low,
95 unsigned long next_high)
219{ 96{
220 prcmu_stop_temp_sense(); 97 prcmu_stop_temp_sense();
221 98
222 pzone->cur_index = idx; 99 th->cur_index = idx;
223 pzone->cur_temp_pseudo = (next_low + next_high)/2; 100 th->interpolated_temp = (next_low + next_high)/2;
224 pzone->trend = trend; 101 th->trend = trend;
225 102
103 /*
104 * The PRCMU accept absolute temperatures in celsius so divide
105 * down the millicelsius with 1000
106 */
226 prcmu_config_hotmon((u8)(next_low/1000), (u8)(next_high/1000)); 107 prcmu_config_hotmon((u8)(next_low/1000), (u8)(next_high/1000));
227 prcmu_start_temp_sense(PRCMU_DEFAULT_MEASURE_TIME); 108 prcmu_start_temp_sense(PRCMU_DEFAULT_MEASURE_TIME);
228} 109}
229 110
230static irqreturn_t prcmu_low_irq_handler(int irq, void *irq_data) 111static irqreturn_t prcmu_low_irq_handler(int irq, void *irq_data)
231{ 112{
232 struct db8500_thermal_zone *pzone = irq_data; 113 struct db8500_thermal_zone *th = irq_data;
233 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab; 114 unsigned int idx = th->cur_index;
234 unsigned int idx = pzone->cur_index;
235 unsigned long next_low, next_high; 115 unsigned long next_low, next_high;
236 116
237 if (unlikely(idx == 0)) 117 if (idx == 0)
238 /* Meaningless for thermal management, ignoring it */ 118 /* Meaningless for thermal management, ignoring it */
239 return IRQ_HANDLED; 119 return IRQ_HANDLED;
240 120
241 if (idx == 1) { 121 if (idx == 1) {
242 next_high = ptrips->trip_points[0].temp; 122 next_high = db8500_thermal_points[0];
243 next_low = PRCMU_DEFAULT_LOW_TEMP; 123 next_low = PRCMU_DEFAULT_LOW_TEMP;
244 } else { 124 } else {
245 next_high = ptrips->trip_points[idx-1].temp; 125 next_high = db8500_thermal_points[idx - 1];
246 next_low = ptrips->trip_points[idx-2].temp; 126 next_low = db8500_thermal_points[idx - 2];
247 } 127 }
248 idx -= 1; 128 idx -= 1;
249 129
250 db8500_thermal_update_config(pzone, idx, THERMAL_TREND_DROPPING, 130 db8500_thermal_update_config(th, idx, THERMAL_TREND_DROPPING,
251 next_low, next_high); 131 next_low, next_high);
252 132 dev_dbg(&th->tz->device,
253 dev_dbg(&pzone->therm_dev->device,
254 "PRCMU set max %ld, min %ld\n", next_high, next_low); 133 "PRCMU set max %ld, min %ld\n", next_high, next_low);
255 134
256 schedule_work(&pzone->therm_work); 135 thermal_zone_device_update(th->tz, THERMAL_EVENT_UNSPECIFIED);
257 136
258 return IRQ_HANDLED; 137 return IRQ_HANDLED;
259} 138}
260 139
261static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data) 140static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data)
262{ 141{
263 struct db8500_thermal_zone *pzone = irq_data; 142 struct db8500_thermal_zone *th = irq_data;
264 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab; 143 unsigned int idx = th->cur_index;
265 unsigned int idx = pzone->cur_index;
266 unsigned long next_low, next_high; 144 unsigned long next_low, next_high;
145 int num_points = ARRAY_SIZE(db8500_thermal_points);
267 146
268 if (idx < ptrips->num_trips - 1) { 147 if (idx < num_points - 1) {
269 next_high = ptrips->trip_points[idx+1].temp; 148 next_high = db8500_thermal_points[idx+1];
270 next_low = ptrips->trip_points[idx].temp; 149 next_low = db8500_thermal_points[idx];
271 idx += 1; 150 idx += 1;
272 151
273 db8500_thermal_update_config(pzone, idx, THERMAL_TREND_RAISING, 152 db8500_thermal_update_config(th, idx, THERMAL_TREND_RAISING,
274 next_low, next_high); 153 next_low, next_high);
275 154
276 dev_dbg(&pzone->therm_dev->device, 155 dev_info(&th->tz->device,
277 "PRCMU set max %ld, min %ld\n", next_high, next_low); 156 "PRCMU set max %ld, min %ld\n", next_high, next_low);
278 } else if (idx == ptrips->num_trips - 1) 157 } else if (idx == num_points - 1)
279 pzone->cur_temp_pseudo = ptrips->trip_points[idx].temp + 1; 158 /* So we roof out 1 degree over the max point */
159 th->interpolated_temp = db8500_thermal_points[idx] + 1;
280 160
281 schedule_work(&pzone->therm_work); 161 thermal_zone_device_update(th->tz, THERMAL_EVENT_UNSPECIFIED);
282 162
283 return IRQ_HANDLED; 163 return IRQ_HANDLED;
284} 164}
285 165
286static void db8500_thermal_work(struct work_struct *work)
287{
288 enum thermal_device_mode cur_mode;
289 struct db8500_thermal_zone *pzone;
290
291 pzone = container_of(work, struct db8500_thermal_zone, therm_work);
292
293 mutex_lock(&pzone->th_lock);
294 cur_mode = pzone->mode;
295 mutex_unlock(&pzone->th_lock);
296
297 if (cur_mode == THERMAL_DEVICE_DISABLED)
298 return;
299
300 thermal_zone_device_update(pzone->therm_dev, THERMAL_EVENT_UNSPECIFIED);
301 dev_dbg(&pzone->therm_dev->device, "thermal work finished.\n");
302}
303
304#ifdef CONFIG_OF
305static struct db8500_thsens_platform_data*
306 db8500_thermal_parse_dt(struct platform_device *pdev)
307{
308 struct db8500_thsens_platform_data *ptrips;
309 struct device_node *np = pdev->dev.of_node;
310 char prop_name[32];
311 const char *tmp_str;
312 u32 tmp_data;
313 int i, j;
314
315 ptrips = devm_kzalloc(&pdev->dev, sizeof(*ptrips), GFP_KERNEL);
316 if (!ptrips)
317 return NULL;
318
319 if (of_property_read_u32(np, "num-trips", &tmp_data))
320 goto err_parse_dt;
321
322 if (tmp_data > THERMAL_MAX_TRIPS)
323 goto err_parse_dt;
324
325 ptrips->num_trips = tmp_data;
326
327 for (i = 0; i < ptrips->num_trips; i++) {
328 sprintf(prop_name, "trip%d-temp", i);
329 if (of_property_read_u32(np, prop_name, &tmp_data))
330 goto err_parse_dt;
331
332 ptrips->trip_points[i].temp = tmp_data;
333
334 sprintf(prop_name, "trip%d-type", i);
335 if (of_property_read_string(np, prop_name, &tmp_str))
336 goto err_parse_dt;
337
338 if (!strcmp(tmp_str, "active"))
339 ptrips->trip_points[i].type = THERMAL_TRIP_ACTIVE;
340 else if (!strcmp(tmp_str, "passive"))
341 ptrips->trip_points[i].type = THERMAL_TRIP_PASSIVE;
342 else if (!strcmp(tmp_str, "hot"))
343 ptrips->trip_points[i].type = THERMAL_TRIP_HOT;
344 else if (!strcmp(tmp_str, "critical"))
345 ptrips->trip_points[i].type = THERMAL_TRIP_CRITICAL;
346 else
347 goto err_parse_dt;
348
349 sprintf(prop_name, "trip%d-cdev-num", i);
350 if (of_property_read_u32(np, prop_name, &tmp_data))
351 goto err_parse_dt;
352
353 if (tmp_data > COOLING_DEV_MAX)
354 goto err_parse_dt;
355
356 for (j = 0; j < tmp_data; j++) {
357 sprintf(prop_name, "trip%d-cdev-name%d", i, j);
358 if (of_property_read_string(np, prop_name, &tmp_str))
359 goto err_parse_dt;
360
361 if (strlen(tmp_str) >= THERMAL_NAME_LENGTH)
362 goto err_parse_dt;
363
364 strcpy(ptrips->trip_points[i].cdev_name[j], tmp_str);
365 }
366 }
367 return ptrips;
368
369err_parse_dt:
370 dev_err(&pdev->dev, "Parsing device tree data error.\n");
371 return NULL;
372}
373#else
374static inline struct db8500_thsens_platform_data*
375 db8500_thermal_parse_dt(struct platform_device *pdev)
376{
377 return NULL;
378}
379#endif
380
381static int db8500_thermal_probe(struct platform_device *pdev) 166static int db8500_thermal_probe(struct platform_device *pdev)
382{ 167{
383 struct db8500_thermal_zone *pzone = NULL; 168 struct db8500_thermal_zone *th = NULL;
384 struct db8500_thsens_platform_data *ptrips = NULL; 169 struct device *dev = &pdev->dev;
385 struct device_node *np = pdev->dev.of_node;
386 int low_irq, high_irq, ret = 0; 170 int low_irq, high_irq, ret = 0;
387 unsigned long dft_low, dft_high;
388 171
389 if (np) 172 th = devm_kzalloc(dev, sizeof(*th), GFP_KERNEL);
390 ptrips = db8500_thermal_parse_dt(pdev); 173 if (!th)
391 else
392 ptrips = dev_get_platdata(&pdev->dev);
393
394 if (!ptrips)
395 return -EINVAL;
396
397 pzone = devm_kzalloc(&pdev->dev, sizeof(*pzone), GFP_KERNEL);
398 if (!pzone)
399 return -ENOMEM; 174 return -ENOMEM;
400 175
401 mutex_init(&pzone->th_lock);
402 mutex_lock(&pzone->th_lock);
403
404 pzone->mode = THERMAL_DEVICE_DISABLED;
405 pzone->trip_tab = ptrips;
406
407 INIT_WORK(&pzone->therm_work, db8500_thermal_work);
408
409 low_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW"); 176 low_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW");
410 if (low_irq < 0) { 177 if (low_irq < 0) {
411 dev_err(&pdev->dev, "Get IRQ_HOTMON_LOW failed.\n"); 178 dev_err(dev, "Get IRQ_HOTMON_LOW failed\n");
412 ret = low_irq; 179 return low_irq;
413 goto out_unlock;
414 } 180 }
415 181
416 ret = devm_request_threaded_irq(&pdev->dev, low_irq, NULL, 182 ret = devm_request_threaded_irq(dev, low_irq, NULL,
417 prcmu_low_irq_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT, 183 prcmu_low_irq_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
418 "dbx500_temp_low", pzone); 184 "dbx500_temp_low", th);
419 if (ret < 0) { 185 if (ret < 0) {
420 dev_err(&pdev->dev, "Failed to allocate temp low irq.\n"); 186 dev_err(dev, "failed to allocate temp low irq\n");
421 goto out_unlock; 187 return ret;
422 } 188 }
423 189
424 high_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH"); 190 high_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH");
425 if (high_irq < 0) { 191 if (high_irq < 0) {
426 dev_err(&pdev->dev, "Get IRQ_HOTMON_HIGH failed.\n"); 192 dev_err(dev, "Get IRQ_HOTMON_HIGH failed\n");
427 ret = high_irq; 193 return high_irq;
428 goto out_unlock;
429 } 194 }
430 195
431 ret = devm_request_threaded_irq(&pdev->dev, high_irq, NULL, 196 ret = devm_request_threaded_irq(dev, high_irq, NULL,
432 prcmu_high_irq_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT, 197 prcmu_high_irq_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
433 "dbx500_temp_high", pzone); 198 "dbx500_temp_high", th);
434 if (ret < 0) { 199 if (ret < 0) {
435 dev_err(&pdev->dev, "Failed to allocate temp high irq.\n"); 200 dev_err(dev, "failed to allocate temp high irq\n");
436 goto out_unlock; 201 return ret;
437 } 202 }
438 203
439 pzone->therm_dev = thermal_zone_device_register("db8500_thermal_zone", 204 /* register of thermal sensor and get info from DT */
440 ptrips->num_trips, 0, pzone, &thdev_ops, NULL, 0, 0); 205 th->tz = devm_thermal_zone_of_sensor_register(dev, 0, th, &thdev_ops);
441 206 if (IS_ERR(th->tz)) {
442 if (IS_ERR(pzone->therm_dev)) { 207 dev_err(dev, "register thermal zone sensor failed\n");
443 dev_err(&pdev->dev, "Register thermal zone device failed.\n"); 208 return PTR_ERR(th->tz);
444 ret = PTR_ERR(pzone->therm_dev);
445 goto out_unlock;
446 } 209 }
447 dev_info(&pdev->dev, "Thermal zone device registered.\n"); 210 dev_info(dev, "thermal zone sensor registered\n");
448
449 dft_low = PRCMU_DEFAULT_LOW_TEMP;
450 dft_high = ptrips->trip_points[0].temp;
451
452 db8500_thermal_update_config(pzone, 0, THERMAL_TREND_STABLE,
453 dft_low, dft_high);
454
455 platform_set_drvdata(pdev, pzone);
456 pzone->mode = THERMAL_DEVICE_ENABLED;
457 211
458out_unlock: 212 /* Start measuring at the lowest point */
459 mutex_unlock(&pzone->th_lock); 213 db8500_thermal_update_config(th, 0, THERMAL_TREND_STABLE,
214 PRCMU_DEFAULT_LOW_TEMP,
215 db8500_thermal_points[0]);
460 216
461 return ret; 217 platform_set_drvdata(pdev, th);
462}
463
464static int db8500_thermal_remove(struct platform_device *pdev)
465{
466 struct db8500_thermal_zone *pzone = platform_get_drvdata(pdev);
467
468 thermal_zone_device_unregister(pzone->therm_dev);
469 cancel_work_sync(&pzone->therm_work);
470 mutex_destroy(&pzone->th_lock);
471 218
472 return 0; 219 return 0;
473} 220}
@@ -475,9 +222,6 @@ static int db8500_thermal_remove(struct platform_device *pdev)
475static int db8500_thermal_suspend(struct platform_device *pdev, 222static int db8500_thermal_suspend(struct platform_device *pdev,
476 pm_message_t state) 223 pm_message_t state)
477{ 224{
478 struct db8500_thermal_zone *pzone = platform_get_drvdata(pdev);
479
480 flush_work(&pzone->therm_work);
481 prcmu_stop_temp_sense(); 225 prcmu_stop_temp_sense();
482 226
483 return 0; 227 return 0;
@@ -485,26 +229,21 @@ static int db8500_thermal_suspend(struct platform_device *pdev,
485 229
486static int db8500_thermal_resume(struct platform_device *pdev) 230static int db8500_thermal_resume(struct platform_device *pdev)
487{ 231{
488 struct db8500_thermal_zone *pzone = platform_get_drvdata(pdev); 232 struct db8500_thermal_zone *th = platform_get_drvdata(pdev);
489 struct db8500_thsens_platform_data *ptrips = pzone->trip_tab;
490 unsigned long dft_low, dft_high;
491
492 dft_low = PRCMU_DEFAULT_LOW_TEMP;
493 dft_high = ptrips->trip_points[0].temp;
494 233
495 db8500_thermal_update_config(pzone, 0, THERMAL_TREND_STABLE, 234 /* Resume and start measuring at the lowest point */
496 dft_low, dft_high); 235 db8500_thermal_update_config(th, 0, THERMAL_TREND_STABLE,
236 PRCMU_DEFAULT_LOW_TEMP,
237 db8500_thermal_points[0]);
497 238
498 return 0; 239 return 0;
499} 240}
500 241
501#ifdef CONFIG_OF
502static const struct of_device_id db8500_thermal_match[] = { 242static const struct of_device_id db8500_thermal_match[] = {
503 { .compatible = "stericsson,db8500-thermal" }, 243 { .compatible = "stericsson,db8500-thermal" },
504 {}, 244 {},
505}; 245};
506MODULE_DEVICE_TABLE(of, db8500_thermal_match); 246MODULE_DEVICE_TABLE(of, db8500_thermal_match);
507#endif
508 247
509static struct platform_driver db8500_thermal_driver = { 248static struct platform_driver db8500_thermal_driver = {
510 .driver = { 249 .driver = {
@@ -514,7 +253,6 @@ static struct platform_driver db8500_thermal_driver = {
514 .probe = db8500_thermal_probe, 253 .probe = db8500_thermal_probe,
515 .suspend = db8500_thermal_suspend, 254 .suspend = db8500_thermal_suspend,
516 .resume = db8500_thermal_resume, 255 .resume = db8500_thermal_resume,
517 .remove = db8500_thermal_remove,
518}; 256};
519 257
520module_platform_driver(db8500_thermal_driver); 258module_platform_driver(db8500_thermal_driver);
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
index de3cceea23bc..40524fa13533 100644
--- a/drivers/thermal/thermal_mmio.c
+++ b/drivers/thermal/thermal_mmio.c
@@ -53,13 +53,6 @@ static int thermal_mmio_probe(struct platform_device *pdev)
53 return -ENOMEM; 53 return -ENOMEM;
54 54
55 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 55 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
56 if (IS_ERR(resource)) {
57 dev_err(&pdev->dev,
58 "fail to get platform memory resource (%ld)\n",
59 PTR_ERR(resource));
60 return PTR_ERR(resource);
61 }
62
63 sensor->mmio_base = devm_ioremap_resource(&pdev->dev, resource); 56 sensor->mmio_base = devm_ioremap_resource(&pdev->dev, resource);
64 if (IS_ERR(sensor->mmio_base)) { 57 if (IS_ERR(sensor->mmio_base)) {
65 dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n", 58 dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n",
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 4e11de6cde81..5bae515c8e25 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -156,8 +156,10 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
156 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) 156 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
157 157
158/* balloon_append: add the given page to the balloon. */ 158/* balloon_append: add the given page to the balloon. */
159static void __balloon_append(struct page *page) 159static void balloon_append(struct page *page)
160{ 160{
161 __SetPageOffline(page);
162
161 /* Lowmem is re-populated first, so highmem pages go at list tail. */ 163 /* Lowmem is re-populated first, so highmem pages go at list tail. */
162 if (PageHighMem(page)) { 164 if (PageHighMem(page)) {
163 list_add_tail(&page->lru, &ballooned_pages); 165 list_add_tail(&page->lru, &ballooned_pages);
@@ -169,11 +171,6 @@ static void __balloon_append(struct page *page)
169 wake_up(&balloon_wq); 171 wake_up(&balloon_wq);
170} 172}
171 173
172static void balloon_append(struct page *page)
173{
174 __balloon_append(page);
175}
176
177/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ 174/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
178static struct page *balloon_retrieve(bool require_lowmem) 175static struct page *balloon_retrieve(bool require_lowmem)
179{ 176{
@@ -192,6 +189,7 @@ static struct page *balloon_retrieve(bool require_lowmem)
192 else 189 else
193 balloon_stats.balloon_low--; 190 balloon_stats.balloon_low--;
194 191
192 __ClearPageOffline(page);
195 return page; 193 return page;
196} 194}
197 195
@@ -377,8 +375,7 @@ static void xen_online_page(struct page *page, unsigned int order)
377 for (i = 0; i < size; i++) { 375 for (i = 0; i < size; i++) {
378 p = pfn_to_page(start_pfn + i); 376 p = pfn_to_page(start_pfn + i);
379 __online_page_set_limits(p); 377 __online_page_set_limits(p);
380 __SetPageOffline(p); 378 balloon_append(p);
381 __balloon_append(p);
382 } 379 }
383 mutex_unlock(&balloon_mutex); 380 mutex_unlock(&balloon_mutex);
384} 381}
@@ -444,7 +441,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
444 xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]); 441 xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
445 442
446 /* Relinquish the page back to the allocator. */ 443 /* Relinquish the page back to the allocator. */
447 __ClearPageOffline(page);
448 free_reserved_page(page); 444 free_reserved_page(page);
449 } 445 }
450 446
@@ -471,7 +467,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
471 state = BP_EAGAIN; 467 state = BP_EAGAIN;
472 break; 468 break;
473 } 469 }
474 __SetPageOffline(page);
475 adjust_managed_page_count(page, -1); 470 adjust_managed_page_count(page, -1);
476 xenmem_reservation_scrub_page(page); 471 xenmem_reservation_scrub_page(page);
477 list_add(&page->lru, &pages); 472 list_add(&page->lru, &pages);
@@ -611,7 +606,6 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
611 while (pgno < nr_pages) { 606 while (pgno < nr_pages) {
612 page = balloon_retrieve(true); 607 page = balloon_retrieve(true);
613 if (page) { 608 if (page) {
614 __ClearPageOffline(page);
615 pages[pgno++] = page; 609 pages[pgno++] = page;
616#ifdef CONFIG_XEN_HAVE_PVMMU 610#ifdef CONFIG_XEN_HAVE_PVMMU
617 /* 611 /*
@@ -653,10 +647,8 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
653 mutex_lock(&balloon_mutex); 647 mutex_lock(&balloon_mutex);
654 648
655 for (i = 0; i < nr_pages; i++) { 649 for (i = 0; i < nr_pages; i++) {
656 if (pages[i]) { 650 if (pages[i])
657 __SetPageOffline(pages[i]);
658 balloon_append(pages[i]); 651 balloon_append(pages[i]);
659 }
660 } 652 }
661 653
662 balloon_stats.target_unpopulated -= nr_pages; 654 balloon_stats.target_unpopulated -= nr_pages;
@@ -674,7 +666,6 @@ static void __init balloon_add_region(unsigned long start_pfn,
674 unsigned long pages) 666 unsigned long pages)
675{ 667{
676 unsigned long pfn, extra_pfn_end; 668 unsigned long pfn, extra_pfn_end;
677 struct page *page;
678 669
679 /* 670 /*
680 * If the amount of usable memory has been limited (e.g., with 671 * If the amount of usable memory has been limited (e.g., with
@@ -684,11 +675,10 @@ static void __init balloon_add_region(unsigned long start_pfn,
684 extra_pfn_end = min(max_pfn, start_pfn + pages); 675 extra_pfn_end = min(max_pfn, start_pfn + pages);
685 676
686 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { 677 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
687 page = pfn_to_page(pfn);
688 /* totalram_pages and totalhigh_pages do not 678 /* totalram_pages and totalhigh_pages do not
689 include the boot-time balloon extension, so 679 include the boot-time balloon extension, so
690 don't subtract from it. */ 680 don't subtract from it. */
691 __balloon_append(page); 681 balloon_append(pfn_to_page(pfn));
692 } 682 }
693 683
694 balloon_stats.total_pages += extra_pfn_end - start_pfn; 684 balloon_stats.total_pages += extra_pfn_end - start_pfn;
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index 89d60f8e3c18..d1ff2186ebb4 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -40,7 +40,7 @@
40 40
41#define efi_data(op) (op.u.efi_runtime_call) 41#define efi_data(op) (op.u.efi_runtime_call)
42 42
43efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) 43static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
44{ 44{
45 struct xen_platform_op op = INIT_EFI_OP(get_time); 45 struct xen_platform_op op = INIT_EFI_OP(get_time);
46 46
@@ -61,9 +61,8 @@ efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
61 61
62 return efi_data(op).status; 62 return efi_data(op).status;
63} 63}
64EXPORT_SYMBOL_GPL(xen_efi_get_time);
65 64
66efi_status_t xen_efi_set_time(efi_time_t *tm) 65static efi_status_t xen_efi_set_time(efi_time_t *tm)
67{ 66{
68 struct xen_platform_op op = INIT_EFI_OP(set_time); 67 struct xen_platform_op op = INIT_EFI_OP(set_time);
69 68
@@ -75,10 +74,10 @@ efi_status_t xen_efi_set_time(efi_time_t *tm)
75 74
76 return efi_data(op).status; 75 return efi_data(op).status;
77} 76}
78EXPORT_SYMBOL_GPL(xen_efi_set_time);
79 77
80efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, 78static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled,
81 efi_time_t *tm) 79 efi_bool_t *pending,
80 efi_time_t *tm)
82{ 81{
83 struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time); 82 struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time);
84 83
@@ -98,9 +97,8 @@ efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
98 97
99 return efi_data(op).status; 98 return efi_data(op).status;
100} 99}
101EXPORT_SYMBOL_GPL(xen_efi_get_wakeup_time);
102 100
103efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) 101static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
104{ 102{
105 struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time); 103 struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time);
106 104
@@ -117,11 +115,10 @@ efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
117 115
118 return efi_data(op).status; 116 return efi_data(op).status;
119} 117}
120EXPORT_SYMBOL_GPL(xen_efi_set_wakeup_time);
121 118
122efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor, 119static efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
123 u32 *attr, unsigned long *data_size, 120 u32 *attr, unsigned long *data_size,
124 void *data) 121 void *data)
125{ 122{
126 struct xen_platform_op op = INIT_EFI_OP(get_variable); 123 struct xen_platform_op op = INIT_EFI_OP(get_variable);
127 124
@@ -141,11 +138,10 @@ efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
141 138
142 return efi_data(op).status; 139 return efi_data(op).status;
143} 140}
144EXPORT_SYMBOL_GPL(xen_efi_get_variable);
145 141
146efi_status_t xen_efi_get_next_variable(unsigned long *name_size, 142static efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
147 efi_char16_t *name, 143 efi_char16_t *name,
148 efi_guid_t *vendor) 144 efi_guid_t *vendor)
149{ 145{
150 struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name); 146 struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name);
151 147
@@ -165,11 +161,10 @@ efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
165 161
166 return efi_data(op).status; 162 return efi_data(op).status;
167} 163}
168EXPORT_SYMBOL_GPL(xen_efi_get_next_variable);
169 164
170efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor, 165static efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
171 u32 attr, unsigned long data_size, 166 u32 attr, unsigned long data_size,
172 void *data) 167 void *data)
173{ 168{
174 struct xen_platform_op op = INIT_EFI_OP(set_variable); 169 struct xen_platform_op op = INIT_EFI_OP(set_variable);
175 170
@@ -186,11 +181,10 @@ efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
186 181
187 return efi_data(op).status; 182 return efi_data(op).status;
188} 183}
189EXPORT_SYMBOL_GPL(xen_efi_set_variable);
190 184
191efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space, 185static efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
192 u64 *remaining_space, 186 u64 *remaining_space,
193 u64 *max_variable_size) 187 u64 *max_variable_size)
194{ 188{
195 struct xen_platform_op op = INIT_EFI_OP(query_variable_info); 189 struct xen_platform_op op = INIT_EFI_OP(query_variable_info);
196 190
@@ -208,9 +202,8 @@ efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
208 202
209 return efi_data(op).status; 203 return efi_data(op).status;
210} 204}
211EXPORT_SYMBOL_GPL(xen_efi_query_variable_info);
212 205
213efi_status_t xen_efi_get_next_high_mono_count(u32 *count) 206static efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
214{ 207{
215 struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count); 208 struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count);
216 209
@@ -221,10 +214,9 @@ efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
221 214
222 return efi_data(op).status; 215 return efi_data(op).status;
223} 216}
224EXPORT_SYMBOL_GPL(xen_efi_get_next_high_mono_count);
225 217
226efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules, 218static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
227 unsigned long count, unsigned long sg_list) 219 unsigned long count, unsigned long sg_list)
228{ 220{
229 struct xen_platform_op op = INIT_EFI_OP(update_capsule); 221 struct xen_platform_op op = INIT_EFI_OP(update_capsule);
230 222
@@ -241,11 +233,9 @@ efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
241 233
242 return efi_data(op).status; 234 return efi_data(op).status;
243} 235}
244EXPORT_SYMBOL_GPL(xen_efi_update_capsule);
245 236
246efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules, 237static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
247 unsigned long count, u64 *max_size, 238 unsigned long count, u64 *max_size, int *reset_type)
248 int *reset_type)
249{ 239{
250 struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities); 240 struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities);
251 241
@@ -264,10 +254,9 @@ efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
264 254
265 return efi_data(op).status; 255 return efi_data(op).status;
266} 256}
267EXPORT_SYMBOL_GPL(xen_efi_query_capsule_caps);
268 257
269void xen_efi_reset_system(int reset_type, efi_status_t status, 258static void xen_efi_reset_system(int reset_type, efi_status_t status,
270 unsigned long data_size, efi_char16_t *data) 259 unsigned long data_size, efi_char16_t *data)
271{ 260{
272 switch (reset_type) { 261 switch (reset_type) {
273 case EFI_RESET_COLD: 262 case EFI_RESET_COLD:
@@ -281,4 +270,25 @@ void xen_efi_reset_system(int reset_type, efi_status_t status,
281 BUG(); 270 BUG();
282 } 271 }
283} 272}
284EXPORT_SYMBOL_GPL(xen_efi_reset_system); 273
274/*
275 * Set XEN EFI runtime services function pointers. Other fields of struct efi,
276 * e.g. efi.systab, will be set like normal EFI.
277 */
278void __init xen_efi_runtime_setup(void)
279{
280 efi.get_time = xen_efi_get_time;
281 efi.set_time = xen_efi_set_time;
282 efi.get_wakeup_time = xen_efi_get_wakeup_time;
283 efi.set_wakeup_time = xen_efi_set_wakeup_time;
284 efi.get_variable = xen_efi_get_variable;
285 efi.get_next_variable = xen_efi_get_next_variable;
286 efi.set_variable = xen_efi_set_variable;
287 efi.set_variable_nonblocking = xen_efi_set_variable;
288 efi.query_variable_info = xen_efi_query_variable_info;
289 efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
290 efi.update_capsule = xen_efi_update_capsule;
291 efi.query_capsule_caps = xen_efi_query_capsule_caps;
292 efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
293 efi.reset_system = xen_efi_reset_system;
294}
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 08adc590f631..597af455a522 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -55,6 +55,7 @@
55#include <linux/string.h> 55#include <linux/string.h>
56#include <linux/slab.h> 56#include <linux/slab.h>
57#include <linux/miscdevice.h> 57#include <linux/miscdevice.h>
58#include <linux/workqueue.h>
58 59
59#include <xen/xenbus.h> 60#include <xen/xenbus.h>
60#include <xen/xen.h> 61#include <xen/xen.h>
@@ -116,6 +117,8 @@ struct xenbus_file_priv {
116 wait_queue_head_t read_waitq; 117 wait_queue_head_t read_waitq;
117 118
118 struct kref kref; 119 struct kref kref;
120
121 struct work_struct wq;
119}; 122};
120 123
121/* Read out any raw xenbus messages queued up. */ 124/* Read out any raw xenbus messages queued up. */
@@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch,
300 mutex_unlock(&adap->dev_data->reply_mutex); 303 mutex_unlock(&adap->dev_data->reply_mutex);
301} 304}
302 305
303static void xenbus_file_free(struct kref *kref) 306static void xenbus_worker(struct work_struct *wq)
304{ 307{
305 struct xenbus_file_priv *u; 308 struct xenbus_file_priv *u;
306 struct xenbus_transaction_holder *trans, *tmp; 309 struct xenbus_transaction_holder *trans, *tmp;
307 struct watch_adapter *watch, *tmp_watch; 310 struct watch_adapter *watch, *tmp_watch;
308 struct read_buffer *rb, *tmp_rb; 311 struct read_buffer *rb, *tmp_rb;
309 312
310 u = container_of(kref, struct xenbus_file_priv, kref); 313 u = container_of(wq, struct xenbus_file_priv, wq);
311 314
312 /* 315 /*
313 * No need for locking here because there are no other users, 316 * No need for locking here because there are no other users,
@@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref)
333 kfree(u); 336 kfree(u);
334} 337}
335 338
339static void xenbus_file_free(struct kref *kref)
340{
341 struct xenbus_file_priv *u;
342
343 /*
344 * We might be called in xenbus_thread().
345 * Use workqueue to avoid deadlock.
346 */
347 u = container_of(kref, struct xenbus_file_priv, kref);
348 schedule_work(&u->wq);
349}
350
336static struct xenbus_transaction_holder *xenbus_get_transaction( 351static struct xenbus_transaction_holder *xenbus_get_transaction(
337 struct xenbus_file_priv *u, uint32_t tx_id) 352 struct xenbus_file_priv *u, uint32_t tx_id)
338{ 353{
@@ -650,6 +665,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
650 INIT_LIST_HEAD(&u->watches); 665 INIT_LIST_HEAD(&u->watches);
651 INIT_LIST_HEAD(&u->read_buffers); 666 INIT_LIST_HEAD(&u->read_buffers);
652 init_waitqueue_head(&u->read_waitq); 667 init_waitqueue_head(&u->read_waitq);
668 INIT_WORK(&u->wq, xenbus_worker);
653 669
654 mutex_init(&u->reply_mutex); 670 mutex_init(&u->reply_mutex);
655 mutex_init(&u->msgbuffer_mutex); 671 mutex_init(&u->msgbuffer_mutex);
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index bcd1bafb0278..4150280509ff 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -10,13 +10,6 @@
10#include <linux/dns_resolver.h> 10#include <linux/dns_resolver.h>
11#include "internal.h" 11#include "internal.h"
12 12
13const struct file_operations afs_dynroot_file_operations = {
14 .open = dcache_dir_open,
15 .release = dcache_dir_close,
16 .iterate_shared = dcache_readdir,
17 .llseek = dcache_dir_lseek,
18};
19
20/* 13/*
21 * Probe to see if a cell may exist. This prevents positive dentries from 14 * Probe to see if a cell may exist. This prevents positive dentries from
22 * being created unnecessarily. 15 * being created unnecessarily.
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 7b1c18c32f48..46d2d7cb461d 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -443,7 +443,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
443 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; 443 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
444 if (root) { 444 if (root) {
445 inode->i_op = &afs_dynroot_inode_operations; 445 inode->i_op = &afs_dynroot_inode_operations;
446 inode->i_fop = &afs_dynroot_file_operations; 446 inode->i_fop = &simple_dir_operations;
447 } else { 447 } else {
448 inode->i_op = &afs_autocell_inode_operations; 448 inode->i_op = &afs_autocell_inode_operations;
449 } 449 }
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 9cdfabaeaa0b..759e0578012c 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -910,7 +910,6 @@ extern int afs_silly_iput(struct dentry *, struct inode *);
910/* 910/*
911 * dynroot.c 911 * dynroot.c
912 */ 912 */
913extern const struct file_operations afs_dynroot_file_operations;
914extern const struct inode_operations afs_dynroot_inode_operations; 913extern const struct inode_operations afs_dynroot_inode_operations;
915extern const struct dentry_operations afs_dynroot_dentry_operations; 914extern const struct dentry_operations afs_dynroot_dentry_operations;
916 915
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7b32b6af322d..cceaf05aada2 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3745,12 +3745,21 @@ err_unlock:
3745static void set_btree_ioerr(struct page *page) 3745static void set_btree_ioerr(struct page *page)
3746{ 3746{
3747 struct extent_buffer *eb = (struct extent_buffer *)page->private; 3747 struct extent_buffer *eb = (struct extent_buffer *)page->private;
3748 struct btrfs_fs_info *fs_info;
3748 3749
3749 SetPageError(page); 3750 SetPageError(page);
3750 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) 3751 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3751 return; 3752 return;
3752 3753
3753 /* 3754 /*
3755 * If we error out, we should add back the dirty_metadata_bytes
3756 * to make it consistent.
3757 */
3758 fs_info = eb->fs_info;
3759 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3760 eb->len, fs_info->dirty_metadata_batch);
3761
3762 /*
3754 * If writeback for a btree extent that doesn't belong to a log tree 3763 * If writeback for a btree extent that doesn't belong to a log tree
3755 * failed, increment the counter transaction->eb_write_errors. 3764 * failed, increment the counter transaction->eb_write_errors.
3756 * We do this because while the transaction is running and before it's 3765 * We do this because while the transaction is running and before it's
@@ -3986,6 +3995,10 @@ retry:
3986 if (!ret) { 3995 if (!ret) {
3987 free_extent_buffer(eb); 3996 free_extent_buffer(eb);
3988 continue; 3997 continue;
3998 } else if (ret < 0) {
3999 done = 1;
4000 free_extent_buffer(eb);
4001 break;
3989 } 4002 }
3990 4003
3991 ret = write_one_eb(eb, wbc, &epd); 4004 ret = write_one_eb(eb, wbc, &epd);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 8d3bd799ac7d..c4bb69941c77 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -3166,9 +3166,6 @@ out:
3166 btrfs_free_path(path); 3166 btrfs_free_path(path);
3167 3167
3168 mutex_lock(&fs_info->qgroup_rescan_lock); 3168 mutex_lock(&fs_info->qgroup_rescan_lock);
3169 if (!btrfs_fs_closing(fs_info))
3170 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3171
3172 if (err > 0 && 3169 if (err > 0 &&
3173 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { 3170 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3174 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; 3171 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -3184,16 +3181,30 @@ out:
3184 trans = btrfs_start_transaction(fs_info->quota_root, 1); 3181 trans = btrfs_start_transaction(fs_info->quota_root, 1);
3185 if (IS_ERR(trans)) { 3182 if (IS_ERR(trans)) {
3186 err = PTR_ERR(trans); 3183 err = PTR_ERR(trans);
3184 trans = NULL;
3187 btrfs_err(fs_info, 3185 btrfs_err(fs_info,
3188 "fail to start transaction for status update: %d", 3186 "fail to start transaction for status update: %d",
3189 err); 3187 err);
3190 goto done;
3191 } 3188 }
3192 ret = update_qgroup_status_item(trans); 3189
3193 if (ret < 0) { 3190 mutex_lock(&fs_info->qgroup_rescan_lock);
3194 err = ret; 3191 if (!btrfs_fs_closing(fs_info))
3195 btrfs_err(fs_info, "fail to update qgroup status: %d", err); 3192 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3193 if (trans) {
3194 ret = update_qgroup_status_item(trans);
3195 if (ret < 0) {
3196 err = ret;
3197 btrfs_err(fs_info, "fail to update qgroup status: %d",
3198 err);
3199 }
3196 } 3200 }
3201 fs_info->qgroup_rescan_running = false;
3202 complete_all(&fs_info->qgroup_rescan_completion);
3203 mutex_unlock(&fs_info->qgroup_rescan_lock);
3204
3205 if (!trans)
3206 return;
3207
3197 btrfs_end_transaction(trans); 3208 btrfs_end_transaction(trans);
3198 3209
3199 if (btrfs_fs_closing(fs_info)) { 3210 if (btrfs_fs_closing(fs_info)) {
@@ -3204,12 +3215,6 @@ out:
3204 } else { 3215 } else {
3205 btrfs_err(fs_info, "qgroup scan failed with %d", err); 3216 btrfs_err(fs_info, "qgroup scan failed with %d", err);
3206 } 3217 }
3207
3208done:
3209 mutex_lock(&fs_info->qgroup_rescan_lock);
3210 fs_info->qgroup_rescan_running = false;
3211 mutex_unlock(&fs_info->qgroup_rescan_lock);
3212 complete_all(&fs_info->qgroup_rescan_completion);
3213} 3218}
3214 3219
3215/* 3220/*
@@ -3437,6 +3442,9 @@ cleanup:
3437 while ((unode = ulist_next(&reserved->range_changed, &uiter))) 3442 while ((unode = ulist_next(&reserved->range_changed, &uiter)))
3438 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val, 3443 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
3439 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL); 3444 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
3445 /* Also free data bytes of already reserved one */
3446 btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
3447 orig_reserved, BTRFS_QGROUP_RSV_DATA);
3440 extent_changeset_release(reserved); 3448 extent_changeset_release(reserved);
3441 return ret; 3449 return ret;
3442} 3450}
@@ -3481,7 +3489,7 @@ static int qgroup_free_reserved_data(struct inode *inode,
3481 * EXTENT_QGROUP_RESERVED, we won't double free. 3489 * EXTENT_QGROUP_RESERVED, we won't double free.
3482 * So not need to rush. 3490 * So not need to rush.
3483 */ 3491 */
3484 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree, 3492 ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
3485 free_start, free_start + free_len - 1, 3493 free_start, free_start + free_len - 1,
3486 EXTENT_QGROUP_RESERVED, &changeset); 3494 EXTENT_QGROUP_RESERVED, &changeset);
3487 if (ret < 0) 3495 if (ret < 0)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 2f0e25afa486..00504657b602 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1435,6 +1435,13 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1435 int clear_rsv = 0; 1435 int clear_rsv = 0;
1436 int ret; 1436 int ret;
1437 1437
1438 /*
1439 * The subvolume has reloc tree but the swap is finished, no need to
1440 * create/update the dead reloc tree
1441 */
1442 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
1443 return 0;
1444
1438 if (root->reloc_root) { 1445 if (root->reloc_root) {
1439 reloc_root = root->reloc_root; 1446 reloc_root = root->reloc_root;
1440 reloc_root->last_trans = trans->transid; 1447 reloc_root->last_trans = trans->transid;
@@ -2187,7 +2194,6 @@ static int clean_dirty_subvols(struct reloc_control *rc)
2187 /* Merged subvolume, cleanup its reloc root */ 2194 /* Merged subvolume, cleanup its reloc root */
2188 struct btrfs_root *reloc_root = root->reloc_root; 2195 struct btrfs_root *reloc_root = root->reloc_root;
2189 2196
2190 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
2191 list_del_init(&root->reloc_dirty_list); 2197 list_del_init(&root->reloc_dirty_list);
2192 root->reloc_root = NULL; 2198 root->reloc_root = NULL;
2193 if (reloc_root) { 2199 if (reloc_root) {
@@ -2196,6 +2202,7 @@ static int clean_dirty_subvols(struct reloc_control *rc)
2196 if (ret2 < 0 && !ret) 2202 if (ret2 < 0 && !ret)
2197 ret = ret2; 2203 ret = ret2;
2198 } 2204 }
2205 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
2199 btrfs_put_fs_root(root); 2206 btrfs_put_fs_root(root);
2200 } else { 2207 } else {
2201 /* Orphan reloc tree, just clean it up */ 2208 /* Orphan reloc tree, just clean it up */
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index b5e80563efaa..99fe9bf3fdac 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -52,7 +52,13 @@ static struct file_system_type test_type = {
52 52
53struct inode *btrfs_new_test_inode(void) 53struct inode *btrfs_new_test_inode(void)
54{ 54{
55 return new_inode(test_mnt->mnt_sb); 55 struct inode *inode;
56
57 inode = new_inode(test_mnt->mnt_sb);
58 if (inode)
59 inode_init_owner(inode, NULL, S_IFREG);
60
61 return inode;
56} 62}
57 63
58static int btrfs_init_test_fs(void) 64static int btrfs_init_test_fs(void)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a324480bc88b..cdd7af424033 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4063,7 +4063,13 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
4063 } 4063 }
4064 4064
4065 num_devices = btrfs_num_devices(fs_info); 4065 num_devices = btrfs_num_devices(fs_info);
4066 allowed = 0; 4066
4067 /*
4068 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4069 * special bit for it, to make it easier to distinguish. Thus we need
4070 * to set it manually, or balance would refuse the profile.
4071 */
4072 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4067 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) 4073 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4068 if (num_devices >= btrfs_raid_array[i].devs_min) 4074 if (num_devices >= btrfs_raid_array[i].devs_min)
4069 allowed |= btrfs_raid_array[i].bg_flag; 4075 allowed |= btrfs_raid_array[i].bg_flag;
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index 6c3bd07868d7..0f0dc1c1fe41 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -57,9 +57,18 @@ struct smb_query_info {
57 /* char buffer[]; */ 57 /* char buffer[]; */
58} __packed; 58} __packed;
59 59
60struct smb3_key_debug_info {
61 __u64 Suid;
62 __u16 cipher_type;
63 __u8 auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */
64 __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
65 __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
66} __packed;
67
60#define CIFS_IOCTL_MAGIC 0xCF 68#define CIFS_IOCTL_MAGIC 0xCF
61#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int) 69#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
62#define CIFS_IOC_SET_INTEGRITY _IO(CIFS_IOCTL_MAGIC, 4) 70#define CIFS_IOC_SET_INTEGRITY _IO(CIFS_IOCTL_MAGIC, 4)
63#define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info) 71#define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info)
64#define CIFS_ENUMERATE_SNAPSHOTS _IOR(CIFS_IOCTL_MAGIC, 6, struct smb_snapshot_array) 72#define CIFS_ENUMERATE_SNAPSHOTS _IOR(CIFS_IOCTL_MAGIC, 6, struct smb_snapshot_array)
65#define CIFS_QUERY_INFO _IOWR(CIFS_IOCTL_MAGIC, 7, struct smb_query_info) 73#define CIFS_QUERY_INFO _IOWR(CIFS_IOCTL_MAGIC, 7, struct smb_query_info)
74#define CIFS_DUMP_KEY _IOWR(CIFS_IOCTL_MAGIC, 8, struct smb3_key_debug_info)
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index eb428349f29a..439b99cefeb0 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -90,8 +90,39 @@ struct cifs_acl {
90 __le32 num_aces; 90 __le32 num_aces;
91} __attribute__((packed)); 91} __attribute__((packed));
92 92
93/* ACE types - see MS-DTYP 2.4.4.1 */
94#define ACCESS_ALLOWED_ACE_TYPE 0x00
95#define ACCESS_DENIED_ACE_TYPE 0x01
96#define SYSTEM_AUDIT_ACE_TYPE 0x02
97#define SYSTEM_ALARM_ACE_TYPE 0x03
98#define ACCESS_ALLOWED_COMPOUND_ACE_TYPE 0x04
99#define ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
100#define ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
101#define SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
102#define SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
103#define ACCESS_ALLOWED_CALLBACK_ACE_TYPE 0x09
104#define ACCESS_DENIED_CALLBACK_ACE_TYPE 0x0A
105#define ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE 0x0B
106#define ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE 0x0C
107#define SYSTEM_AUDIT_CALLBACK_ACE_TYPE 0x0D
108#define SYSTEM_ALARM_CALLBACK_ACE_TYPE 0x0E /* Reserved */
109#define SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE 0x0F
110#define SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE 0x10 /* reserved */
111#define SYSTEM_MANDATORY_LABEL_ACE_TYPE 0x11
112#define SYSTEM_RESOURCE_ATTRIBUTE_ACE_TYPE 0x12
113#define SYSTEM_SCOPED_POLICY_ID_ACE_TYPE 0x13
114
115/* ACE flags */
116#define OBJECT_INHERIT_ACE 0x01
117#define CONTAINER_INHERIT_ACE 0x02
118#define NO_PROPAGATE_INHERIT_ACE 0x04
119#define INHERIT_ONLY_ACE 0x08
120#define INHERITED_ACE 0x10
121#define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
122#define FAILED_ACCESS_ACE_FLAG 0x80
123
93struct cifs_ace { 124struct cifs_ace {
94 __u8 type; 125 __u8 type; /* see above and MS-DTYP 2.4.4.1 */
95 __u8 flags; 126 __u8 flags;
96 __le16 size; 127 __le16 size;
97 __le32 access_req; 128 __le32 access_req;
@@ -99,6 +130,54 @@ struct cifs_ace {
99} __attribute__((packed)); 130} __attribute__((packed));
100 131
101/* 132/*
133 * The current SMB3 form of security descriptor is similar to what was used for
134 * cifs (see above) but some fields are split, and fields in the struct below
135 * matches names of fields to the the spec, MS-DTYP (see sections 2.4.5 and
136 * 2.4.6). Note that "CamelCase" fields are used in this struct in order to
137 * match the MS-DTYP and MS-SMB2 specs which define the wire format.
138 */
139struct smb3_sd {
140 __u8 Revision; /* revision level, MUST be one */
141 __u8 Sbz1; /* only meaningful if 'RM' flag set below */
142 __le16 Control;
143 __le32 OffsetOwner;
144 __le32 OffsetGroup;
145 __le32 OffsetSacl;
146 __le32 OffsetDacl;
147} __packed;
148
149/* Meaning of 'Control' field flags */
150#define ACL_CONTROL_SR 0x0001 /* Self relative */
151#define ACL_CONTROL_RM 0x0002 /* Resource manager control bits */
152#define ACL_CONTROL_PS 0x0004 /* SACL protected from inherits */
153#define ACL_CONTROL_PD 0x0008 /* DACL protected from inherits */
154#define ACL_CONTROL_SI 0x0010 /* SACL Auto-Inherited */
155#define ACL_CONTROL_DI 0x0020 /* DACL Auto-Inherited */
156#define ACL_CONTROL_SC 0x0040 /* SACL computed through inheritance */
157#define ACL_CONTROL_DC 0x0080 /* DACL computed through inheritence */
158#define ACL_CONTROL_SS 0x0100 /* Create server ACL */
159#define ACL_CONTROL_DT 0x0200 /* DACL provided by trusteed source */
160#define ACL_CONTROL_SD 0x0400 /* SACL defaulted */
161#define ACL_CONTROL_SP 0x0800 /* SACL is present on object */
162#define ACL_CONTROL_DD 0x1000 /* DACL defaulted */
163#define ACL_CONTROL_DP 0x2000 /* DACL is present on object */
164#define ACL_CONTROL_GD 0x4000 /* Group was defaulted */
165#define ACL_CONTROL_OD 0x8000 /* User was defaulted */
166
167/* Meaning of AclRevision flags */
168#define ACL_REVISION 0x02 /* See section 2.4.4.1 of MS-DTYP */
169#define ACL_REVISION_DS 0x04 /* Additional AceTypes allowed */
170
171struct smb3_acl {
172 u8 AclRevision; /* revision level */
173 u8 Sbz1; /* MBZ */
174 __le16 AclSize;
175 __le16 AceCount;
176 __le16 Sbz2; /* MBZ */
177} __packed;
178
179
180/*
102 * Minimum security identifier can be one for system defined Users 181 * Minimum security identifier can be one for system defined Users
103 * and Groups such as NULL SID and World or Built-in accounts such 182 * and Groups such as NULL SID and World or Built-in accounts such
104 * as Administrator and Guest and consists of 183 * as Administrator and Guest and consists of
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 54e204589cb9..2e960e1049db 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -331,8 +331,9 @@ struct smb_version_operations {
331 umode_t mode, struct cifs_tcon *tcon, 331 umode_t mode, struct cifs_tcon *tcon,
332 const char *full_path, 332 const char *full_path,
333 struct cifs_sb_info *cifs_sb); 333 struct cifs_sb_info *cifs_sb);
334 int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *, 334 int (*mkdir)(const unsigned int xid, struct inode *inode, umode_t mode,
335 struct cifs_sb_info *); 335 struct cifs_tcon *tcon, const char *name,
336 struct cifs_sb_info *sb);
336 /* set info on created directory */ 337 /* set info on created directory */
337 void (*mkdir_setinfo)(struct inode *, const char *, 338 void (*mkdir_setinfo)(struct inode *, const char *,
338 struct cifs_sb_info *, struct cifs_tcon *, 339 struct cifs_sb_info *, struct cifs_tcon *,
@@ -1209,6 +1210,7 @@ struct cifs_search_info {
1209 bool smallBuf:1; /* so we know which buf_release function to call */ 1210 bool smallBuf:1; /* so we know which buf_release function to call */
1210}; 1211};
1211 1212
1213#define ACL_NO_MODE -1
1212struct cifs_open_parms { 1214struct cifs_open_parms {
1213 struct cifs_tcon *tcon; 1215 struct cifs_tcon *tcon;
1214 struct cifs_sb_info *cifs_sb; 1216 struct cifs_sb_info *cifs_sb;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 99b1b1ef558c..e53e9f62b87b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -372,7 +372,8 @@ extern int CIFSSMBUnixSetPathInfo(const unsigned int xid,
372 const struct nls_table *nls_codepage, 372 const struct nls_table *nls_codepage,
373 int remap); 373 int remap);
374 374
375extern int CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, 375extern int CIFSSMBMkDir(const unsigned int xid, struct inode *inode,
376 umode_t mode, struct cifs_tcon *tcon,
376 const char *name, struct cifs_sb_info *cifs_sb); 377 const char *name, struct cifs_sb_info *cifs_sb);
377extern int CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, 378extern int CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon,
378 const char *name, struct cifs_sb_info *cifs_sb); 379 const char *name, struct cifs_sb_info *cifs_sb);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index dbee2132e419..4f554f019a98 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1078,7 +1078,8 @@ RmDirRetry:
1078} 1078}
1079 1079
1080int 1080int
1081CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, 1081CIFSSMBMkDir(const unsigned int xid, struct inode *inode, umode_t mode,
1082 struct cifs_tcon *tcon, const char *name,
1082 struct cifs_sb_info *cifs_sb) 1083 struct cifs_sb_info *cifs_sb)
1083{ 1084{
1084 int rc = 0; 1085 int rc = 0;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 26cdfbf1e164..3bae2e53f0b8 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1622,13 +1622,14 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
1622 } 1622 }
1623 1623
1624 /* BB add setting the equivalent of mode via CreateX w/ACLs */ 1624 /* BB add setting the equivalent of mode via CreateX w/ACLs */
1625 rc = server->ops->mkdir(xid, tcon, full_path, cifs_sb); 1625 rc = server->ops->mkdir(xid, inode, mode, tcon, full_path, cifs_sb);
1626 if (rc) { 1626 if (rc) {
1627 cifs_dbg(FYI, "cifs_mkdir returned 0x%x\n", rc); 1627 cifs_dbg(FYI, "cifs_mkdir returned 0x%x\n", rc);
1628 d_drop(direntry); 1628 d_drop(direntry);
1629 goto mkdir_out; 1629 goto mkdir_out;
1630 } 1630 }
1631 1631
1632 /* TODO: skip this for smb2/smb3 */
1632 rc = cifs_mkdir_qinfo(inode, direntry, mode, full_path, cifs_sb, tcon, 1633 rc = cifs_mkdir_qinfo(inode, direntry, mode, full_path, cifs_sb, tcon,
1633 xid); 1634 xid);
1634mkdir_out: 1635mkdir_out:
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 76ddd98b6298..1a01e108d75e 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -164,6 +164,7 @@ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
164long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) 164long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
165{ 165{
166 struct inode *inode = file_inode(filep); 166 struct inode *inode = file_inode(filep);
167 struct smb3_key_debug_info pkey_inf;
167 int rc = -ENOTTY; /* strange error - but the precedent */ 168 int rc = -ENOTTY; /* strange error - but the precedent */
168 unsigned int xid; 169 unsigned int xid;
169 struct cifsFileInfo *pSMBFile = filep->private_data; 170 struct cifsFileInfo *pSMBFile = filep->private_data;
@@ -270,6 +271,34 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
270 else 271 else
271 rc = -EOPNOTSUPP; 272 rc = -EOPNOTSUPP;
272 break; 273 break;
274 case CIFS_DUMP_KEY:
275 if (pSMBFile == NULL)
276 break;
277 if (!capable(CAP_SYS_ADMIN)) {
278 rc = -EACCES;
279 break;
280 }
281
282 tcon = tlink_tcon(pSMBFile->tlink);
283 if (!smb3_encryption_required(tcon)) {
284 rc = -EOPNOTSUPP;
285 break;
286 }
287 pkey_inf.cipher_type =
288 le16_to_cpu(tcon->ses->server->cipher_type);
289 pkey_inf.Suid = tcon->ses->Suid;
290 memcpy(pkey_inf.auth_key, tcon->ses->auth_key.response,
291 16 /* SMB2_NTLMV2_SESSKEY_SIZE */);
292 memcpy(pkey_inf.smb3decryptionkey,
293 tcon->ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE);
294 memcpy(pkey_inf.smb3encryptionkey,
295 tcon->ses->smb3encryptionkey, SMB3_SIGN_KEY_SIZE);
296 if (copy_to_user((void __user *)arg, &pkey_inf,
297 sizeof(struct smb3_key_debug_info)))
298 rc = -EFAULT;
299 else
300 rc = 0;
301 break;
273 default: 302 default:
274 cifs_dbg(FYI, "unsupported ioctl\n"); 303 cifs_dbg(FYI, "unsupported ioctl\n");
275 break; 304 break;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 4c764ff7edd2..85bd644f9773 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -698,7 +698,6 @@ sess_auth_lanman(struct sess_data *sess_data)
698 char *bcc_ptr; 698 char *bcc_ptr;
699 struct cifs_ses *ses = sess_data->ses; 699 struct cifs_ses *ses = sess_data->ses;
700 char lnm_session_key[CIFS_AUTH_RESP_SIZE]; 700 char lnm_session_key[CIFS_AUTH_RESP_SIZE];
701 __u32 capabilities;
702 __u16 bytes_remaining; 701 __u16 bytes_remaining;
703 702
704 /* lanman 2 style sessionsetup */ 703 /* lanman 2 style sessionsetup */
@@ -709,7 +708,7 @@ sess_auth_lanman(struct sess_data *sess_data)
709 708
710 pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; 709 pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
711 bcc_ptr = sess_data->iov[2].iov_base; 710 bcc_ptr = sess_data->iov[2].iov_base;
712 capabilities = cifs_ssetup_hdr(ses, pSMB); 711 (void)cifs_ssetup_hdr(ses, pSMB);
713 712
714 pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; 713 pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
715 714
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index d2a3fb7e5c8d..4121ac1163ca 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -51,7 +51,7 @@ static int
51smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, 51smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
52 struct cifs_sb_info *cifs_sb, const char *full_path, 52 struct cifs_sb_info *cifs_sb, const char *full_path,
53 __u32 desired_access, __u32 create_disposition, 53 __u32 desired_access, __u32 create_disposition,
54 __u32 create_options, void *ptr, int command, 54 __u32 create_options, umode_t mode, void *ptr, int command,
55 struct cifsFileInfo *cfile) 55 struct cifsFileInfo *cfile)
56{ 56{
57 int rc; 57 int rc;
@@ -103,6 +103,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
103 oparms.create_options |= CREATE_OPEN_BACKUP_INTENT; 103 oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
104 oparms.fid = &fid; 104 oparms.fid = &fid;
105 oparms.reconnect = false; 105 oparms.reconnect = false;
106 oparms.mode = mode;
106 107
107 memset(&open_iov, 0, sizeof(open_iov)); 108 memset(&open_iov, 0, sizeof(open_iov));
108 rqst[num_rqst].rq_iov = open_iov; 109 rqst[num_rqst].rq_iov = open_iov;
@@ -478,7 +479,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
478 cifs_get_readable_path(tcon, full_path, &cfile); 479 cifs_get_readable_path(tcon, full_path, &cfile);
479 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, 480 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
480 FILE_READ_ATTRIBUTES, FILE_OPEN, create_options, 481 FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
481 smb2_data, SMB2_OP_QUERY_INFO, cfile); 482 ACL_NO_MODE, smb2_data, SMB2_OP_QUERY_INFO, cfile);
482 if (rc == -EOPNOTSUPP) { 483 if (rc == -EOPNOTSUPP) {
483 *symlink = true; 484 *symlink = true;
484 create_options |= OPEN_REPARSE_POINT; 485 create_options |= OPEN_REPARSE_POINT;
@@ -486,8 +487,8 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
486 /* Failed on a symbolic link - query a reparse point info */ 487 /* Failed on a symbolic link - query a reparse point info */
487 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, 488 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
488 FILE_READ_ATTRIBUTES, FILE_OPEN, 489 FILE_READ_ATTRIBUTES, FILE_OPEN,
489 create_options, smb2_data, 490 create_options, ACL_NO_MODE,
490 SMB2_OP_QUERY_INFO, NULL); 491 smb2_data, SMB2_OP_QUERY_INFO, NULL);
491 } 492 }
492 if (rc) 493 if (rc)
493 goto out; 494 goto out;
@@ -499,12 +500,14 @@ out:
499} 500}
500 501
501int 502int
502smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, 503smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
504 struct cifs_tcon *tcon, const char *name,
503 struct cifs_sb_info *cifs_sb) 505 struct cifs_sb_info *cifs_sb)
504{ 506{
505 return smb2_compound_op(xid, tcon, cifs_sb, name, 507 return smb2_compound_op(xid, tcon, cifs_sb, name,
506 FILE_WRITE_ATTRIBUTES, FILE_CREATE, 508 FILE_WRITE_ATTRIBUTES, FILE_CREATE,
507 CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR, NULL); 509 CREATE_NOT_FILE, mode, NULL, SMB2_OP_MKDIR,
510 NULL);
508} 511}
509 512
510void 513void
@@ -525,8 +528,8 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
525 cifs_get_writable_path(tcon, name, &cfile); 528 cifs_get_writable_path(tcon, name, &cfile);
526 tmprc = smb2_compound_op(xid, tcon, cifs_sb, name, 529 tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
527 FILE_WRITE_ATTRIBUTES, FILE_CREATE, 530 FILE_WRITE_ATTRIBUTES, FILE_CREATE,
528 CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO, 531 CREATE_NOT_FILE, ACL_NO_MODE,
529 cfile); 532 &data, SMB2_OP_SET_INFO, cfile);
530 if (tmprc == 0) 533 if (tmprc == 0)
531 cifs_i->cifsAttrs = dosattrs; 534 cifs_i->cifsAttrs = dosattrs;
532} 535}
@@ -536,7 +539,7 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
536 struct cifs_sb_info *cifs_sb) 539 struct cifs_sb_info *cifs_sb)
537{ 540{
538 return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, 541 return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
539 CREATE_NOT_FILE, 542 CREATE_NOT_FILE, ACL_NO_MODE,
540 NULL, SMB2_OP_RMDIR, NULL); 543 NULL, SMB2_OP_RMDIR, NULL);
541} 544}
542 545
@@ -546,7 +549,7 @@ smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
546{ 549{
547 return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, 550 return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
548 CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT, 551 CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
549 NULL, SMB2_OP_DELETE, NULL); 552 ACL_NO_MODE, NULL, SMB2_OP_DELETE, NULL);
550} 553}
551 554
552static int 555static int
@@ -564,7 +567,8 @@ smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
564 goto smb2_rename_path; 567 goto smb2_rename_path;
565 } 568 }
566 rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access, 569 rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
567 FILE_OPEN, 0, smb2_to_name, command, cfile); 570 FILE_OPEN, 0, ACL_NO_MODE, smb2_to_name,
571 command, cfile);
568smb2_rename_path: 572smb2_rename_path:
569 kfree(smb2_to_name); 573 kfree(smb2_to_name);
570 return rc; 574 return rc;
@@ -601,8 +605,8 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
601 __le64 eof = cpu_to_le64(size); 605 __le64 eof = cpu_to_le64(size);
602 606
603 return smb2_compound_op(xid, tcon, cifs_sb, full_path, 607 return smb2_compound_op(xid, tcon, cifs_sb, full_path,
604 FILE_WRITE_DATA, FILE_OPEN, 0, &eof, 608 FILE_WRITE_DATA, FILE_OPEN, 0, ACL_NO_MODE,
605 SMB2_OP_SET_EOF, NULL); 609 &eof, SMB2_OP_SET_EOF, NULL);
606} 610}
607 611
608int 612int
@@ -623,8 +627,8 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
623 return PTR_ERR(tlink); 627 return PTR_ERR(tlink);
624 628
625 rc = smb2_compound_op(xid, tlink_tcon(tlink), cifs_sb, full_path, 629 rc = smb2_compound_op(xid, tlink_tcon(tlink), cifs_sb, full_path,
626 FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf, 630 FILE_WRITE_ATTRIBUTES, FILE_OPEN,
627 SMB2_OP_SET_INFO, NULL); 631 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, NULL);
628 cifs_put_tlink(tlink); 632 cifs_put_tlink(tlink);
629 return rc; 633 return rc;
630} 634}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index eaed18061314..4c0922596467 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -751,6 +751,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
751 goto oshr_exit; 751 goto oshr_exit;
752 } 752 }
753 753
754 atomic_inc(&tcon->num_remote_opens);
755
754 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; 756 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
755 oparms.fid->persistent_fid = o_rsp->PersistentFileId; 757 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
756 oparms.fid->volatile_fid = o_rsp->VolatileFileId; 758 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
@@ -1176,6 +1178,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1176 1178
1177 rc = compound_send_recv(xid, ses, flags, 3, rqst, 1179 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1178 resp_buftype, rsp_iov); 1180 resp_buftype, rsp_iov);
1181 /* no need to bump num_remote_opens because handle immediately closed */
1179 1182
1180 sea_exit: 1183 sea_exit:
1181 kfree(ea); 1184 kfree(ea);
@@ -1518,6 +1521,8 @@ smb2_ioctl_query_info(const unsigned int xid,
1518 resp_buftype, rsp_iov); 1521 resp_buftype, rsp_iov);
1519 if (rc) 1522 if (rc)
1520 goto iqinf_exit; 1523 goto iqinf_exit;
1524
1525 /* No need to bump num_remote_opens since handle immediately closed */
1521 if (qi.flags & PASSTHRU_FSCTL) { 1526 if (qi.flags & PASSTHRU_FSCTL) {
1522 pqi = (struct smb_query_info __user *)arg; 1527 pqi = (struct smb_query_info __user *)arg;
1523 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base; 1528 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
@@ -3328,6 +3333,11 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3328 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE) 3333 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3329 return; 3334 return;
3330 3335
3336 /* Check if the server granted an oplock rather than a lease */
3337 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3338 return smb2_set_oplock_level(cinode, oplock, epoch,
3339 purge_cache);
3340
3331 if (oplock & SMB2_LEASE_READ_CACHING_HE) { 3341 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
3332 new_oplock |= CIFS_CACHE_READ_FLG; 3342 new_oplock |= CIFS_CACHE_READ_FLG;
3333 strcat(message, "R"); 3343 strcat(message, "R");
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 87066f1af12c..85f9d614d968 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -751,6 +751,8 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
751 unsigned int num = *num_iovec; 751 unsigned int num = *num_iovec;
752 752
753 iov[num].iov_base = create_posix_buf(mode); 753 iov[num].iov_base = create_posix_buf(mode);
754 if (mode == -1)
755 cifs_dbg(VFS, "illegal mode\n"); /* BB REMOVEME */
754 if (iov[num].iov_base == NULL) 756 if (iov[num].iov_base == NULL)
755 return -ENOMEM; 757 return -ENOMEM;
756 iov[num].iov_len = sizeof(struct create_posix); 758 iov[num].iov_len = sizeof(struct create_posix);
@@ -2352,6 +2354,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
2352 rqst.rq_iov = iov; 2354 rqst.rq_iov = iov;
2353 rqst.rq_nvec = n_iov; 2355 rqst.rq_nvec = n_iov;
2354 2356
2357 /* no need to inc num_remote_opens because we close it just below */
2355 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE, 2358 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
2356 FILE_WRITE_ATTRIBUTES); 2359 FILE_WRITE_ATTRIBUTES);
2357 /* resource #4: response buffer */ 2360 /* resource #4: response buffer */
@@ -2416,6 +2419,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
2416 /* File attributes ignored on open (used in create though) */ 2419 /* File attributes ignored on open (used in create though) */
2417 req->FileAttributes = cpu_to_le32(file_attributes); 2420 req->FileAttributes = cpu_to_le32(file_attributes);
2418 req->ShareAccess = FILE_SHARE_ALL_LE; 2421 req->ShareAccess = FILE_SHARE_ALL_LE;
2422
2419 req->CreateDisposition = cpu_to_le32(oparms->disposition); 2423 req->CreateDisposition = cpu_to_le32(oparms->disposition);
2420 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); 2424 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
2421 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)); 2425 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
@@ -2517,6 +2521,23 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
2517 return rc; 2521 return rc;
2518 } 2522 }
2519 2523
2524 /* TODO: add handling for the mode on create */
2525 if (oparms->disposition == FILE_CREATE)
2526 cifs_dbg(VFS, "mode is 0x%x\n", oparms->mode); /* BB REMOVEME */
2527
2528 if ((oparms->disposition == FILE_CREATE) && (oparms->mode != -1)) {
2529 if (n_iov > 2) {
2530 struct create_context *ccontext =
2531 (struct create_context *)iov[n_iov-1].iov_base;
2532 ccontext->Next =
2533 cpu_to_le32(iov[n_iov-1].iov_len);
2534 }
2535
2536 /* rc = add_sd_context(iov, &n_iov, oparms->mode); */
2537 if (rc)
2538 return rc;
2539 }
2540
2520 if (n_iov > 2) { 2541 if (n_iov > 2) {
2521 struct create_context *ccontext = 2542 struct create_context *ccontext =
2522 (struct create_context *)iov[n_iov-1].iov_base; 2543 (struct create_context *)iov[n_iov-1].iov_base;
@@ -3180,7 +3201,7 @@ SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
3180 * See MS-SMB2 2.2.35 and 2.2.36 3201 * See MS-SMB2 2.2.35 and 2.2.36
3181 */ 3202 */
3182 3203
3183int 3204static int
3184SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst, 3205SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
3185 struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, 3206 struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid,
3186 u32 completion_filter, bool watch_tree) 3207 u32 completion_filter, bool watch_tree)
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 67a91b11fd59..da3a6d580808 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -84,7 +84,8 @@ extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
84 umode_t mode, struct cifs_tcon *tcon, 84 umode_t mode, struct cifs_tcon *tcon,
85 const char *full_path, 85 const char *full_path,
86 struct cifs_sb_info *cifs_sb); 86 struct cifs_sb_info *cifs_sb);
87extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, 87extern int smb2_mkdir(const unsigned int xid, struct inode *inode,
88 umode_t mode, struct cifs_tcon *tcon,
88 const char *name, struct cifs_sb_info *cifs_sb); 89 const char *name, struct cifs_sb_info *cifs_sb);
89extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path, 90extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
90 struct cifs_sb_info *cifs_sb, 91 struct cifs_sb_info *cifs_sb,
diff --git a/fs/cifs/smbfsctl.h b/fs/cifs/smbfsctl.h
index 08628e6a42ac..1ff28529cf4b 100644
--- a/fs/cifs/smbfsctl.h
+++ b/fs/cifs/smbfsctl.h
@@ -144,6 +144,17 @@
144#define IO_REPARSE_APPXSTREAM 0xC0000014 144#define IO_REPARSE_APPXSTREAM 0xC0000014
145/* NFS symlinks, Win 8/SMB3 and later */ 145/* NFS symlinks, Win 8/SMB3 and later */
146#define IO_REPARSE_TAG_NFS 0x80000014 146#define IO_REPARSE_TAG_NFS 0x80000014
147/*
148 * AzureFileSync - see
149 * https://docs.microsoft.com/en-us/azure/storage/files/storage-sync-cloud-tiering
150 */
151#define IO_REPARSE_TAG_AZ_FILE_SYNC 0x8000001e
152/* WSL reparse tags */
153#define IO_REPARSE_TAG_LX_SYMLINK 0xA000001D
154#define IO_REPARSE_TAG_AF_UNIX 0x80000023
155#define IO_REPARSE_TAG_LX_FIFO 0x80000024
156#define IO_REPARSE_TAG_LX_CHR 0x80000025
157#define IO_REPARSE_TAG_LX_BLK 0x80000026
147 158
148/* fsctl flags */ 159/* fsctl flags */
149/* If Flags is set to this value, the request is an FSCTL not ioctl request */ 160/* If Flags is set to this value, the request is an FSCTL not ioctl request */
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 9076150758d8..db4ba8f6077e 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -31,7 +31,7 @@
31#include "cifs_fs_sb.h" 31#include "cifs_fs_sb.h"
32#include "cifs_unicode.h" 32#include "cifs_unicode.h"
33 33
34#define MAX_EA_VALUE_SIZE 65535 34#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
35#define CIFS_XATTR_CIFS_ACL "system.cifs_acl" 35#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
36#define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */ 36#define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
37#define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */ 37#define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 8a9fcbd0e8ac..fc3a8d8064f8 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -34,11 +34,15 @@ static void erofs_readendio(struct bio *bio)
34 34
35struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr) 35struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
36{ 36{
37 struct inode *const bd_inode = sb->s_bdev->bd_inode; 37 struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
38 struct address_space *const mapping = bd_inode->i_mapping; 38 struct page *page;
39 39
40 return read_cache_page_gfp(mapping, blkaddr, 40 page = read_cache_page_gfp(mapping, blkaddr,
41 mapping_gfp_constraint(mapping, ~__GFP_FS)); 41 mapping_gfp_constraint(mapping, ~__GFP_FS));
42 /* should already be PageUptodate */
43 if (!IS_ERR(page))
44 lock_page(page);
45 return page;
42} 46}
43 47
44static int erofs_map_blocks_flatmode(struct inode *inode, 48static int erofs_map_blocks_flatmode(struct inode *inode,
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index caf9a95173b0..0e369494f2f2 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -105,9 +105,9 @@ static int erofs_read_superblock(struct super_block *sb)
105 int ret; 105 int ret;
106 106
107 page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL); 107 page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
108 if (!page) { 108 if (IS_ERR(page)) {
109 erofs_err(sb, "cannot read erofs superblock"); 109 erofs_err(sb, "cannot read erofs superblock");
110 return -EIO; 110 return PTR_ERR(page);
111 } 111 }
112 112
113 sbi = EROFS_SB(sb); 113 sbi = EROFS_SB(sb);
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 96e34c90f814..fad80c97d247 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -575,7 +575,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
575 struct erofs_map_blocks *const map = &fe->map; 575 struct erofs_map_blocks *const map = &fe->map;
576 struct z_erofs_collector *const clt = &fe->clt; 576 struct z_erofs_collector *const clt = &fe->clt;
577 const loff_t offset = page_offset(page); 577 const loff_t offset = page_offset(page);
578 bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED); 578 bool tight = true;
579 579
580 enum z_erofs_cache_alloctype cache_strategy; 580 enum z_erofs_cache_alloctype cache_strategy;
581 enum z_erofs_page_type page_type; 581 enum z_erofs_page_type page_type;
@@ -628,8 +628,16 @@ restart_now:
628 preload_compressed_pages(clt, MNGD_MAPPING(sbi), 628 preload_compressed_pages(clt, MNGD_MAPPING(sbi),
629 cache_strategy, pagepool); 629 cache_strategy, pagepool);
630 630
631 tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED);
632hitted: 631hitted:
632 /*
633 * Ensure the current partial page belongs to this submit chain rather
634 * than other concurrent submit chains or the noio(bypass) chain since
635 * those chains are handled asynchronously thus the page cannot be used
636 * for inplace I/O or pagevec (should be processed in strict order.)
637 */
638 tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
639 clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
640
633 cur = end - min_t(unsigned int, offset + end - map->m_la, end); 641 cur = end - min_t(unsigned int, offset + end - map->m_la, end);
634 if (!(map->m_flags & EROFS_MAP_MAPPED)) { 642 if (!(map->m_flags & EROFS_MAP_MAPPED)) {
635 zero_user_segment(page, cur, end); 643 zero_user_segment(page, cur, end);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 123e3dee7733..516faa280ced 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4551,6 +4551,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
4551 struct buffer_head *bh; 4551 struct buffer_head *bh;
4552 struct super_block *sb = inode->i_sb; 4552 struct super_block *sb = inode->i_sb;
4553 ext4_fsblk_t block; 4553 ext4_fsblk_t block;
4554 struct blk_plug plug;
4554 int inodes_per_block, inode_offset; 4555 int inodes_per_block, inode_offset;
4555 4556
4556 iloc->bh = NULL; 4557 iloc->bh = NULL;
@@ -4639,6 +4640,7 @@ make_io:
4639 * If we need to do any I/O, try to pre-readahead extra 4640 * If we need to do any I/O, try to pre-readahead extra
4640 * blocks from the inode table. 4641 * blocks from the inode table.
4641 */ 4642 */
4643 blk_start_plug(&plug);
4642 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4644 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4643 ext4_fsblk_t b, end, table; 4645 ext4_fsblk_t b, end, table;
4644 unsigned num; 4646 unsigned num;
@@ -4669,6 +4671,7 @@ make_io:
4669 get_bh(bh); 4671 get_bh(bh);
4670 bh->b_end_io = end_buffer_read_sync; 4672 bh->b_end_io = end_buffer_read_sync;
4671 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); 4673 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
4674 blk_finish_plug(&plug);
4672 wait_on_buffer(bh); 4675 wait_on_buffer(bh);
4673 if (!buffer_uptodate(bh)) { 4676 if (!buffer_uptodate(bh)) {
4674 EXT4_ERROR_INODE_BLOCK(inode, block, 4677 EXT4_ERROR_INODE_BLOCK(inode, block,
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 0ee727485615..01263ffbc4c0 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -246,7 +246,7 @@ static long do_handle_open(int mountdirfd, struct file_handle __user *ufh,
246 * sys_open_by_handle_at: Open the file handle 246 * sys_open_by_handle_at: Open the file handle
247 * @mountdirfd: directory file descriptor 247 * @mountdirfd: directory file descriptor
248 * @handle: file handle to be opened 248 * @handle: file handle to be opened
249 * @flag: open flags. 249 * @flags: open flags.
250 * 250 *
251 * @mountdirfd indicate the directory file descriptor 251 * @mountdirfd indicate the directory file descriptor
252 * of the mount point. file handle is decoded relative 252 * of the mount point. file handle is decoded relative
diff --git a/fs/io_uring.c b/fs/io_uring.c
index aa8ac557493c..8a0381f1a43b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1892,15 +1892,15 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1892 unsigned count, req_dist, tail_index; 1892 unsigned count, req_dist, tail_index;
1893 struct io_ring_ctx *ctx = req->ctx; 1893 struct io_ring_ctx *ctx = req->ctx;
1894 struct list_head *entry; 1894 struct list_head *entry;
1895 struct timespec ts; 1895 struct timespec64 ts;
1896 1896
1897 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) 1897 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1898 return -EINVAL; 1898 return -EINVAL;
1899 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags || 1899 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
1900 sqe->len != 1) 1900 sqe->len != 1)
1901 return -EINVAL; 1901 return -EINVAL;
1902 if (copy_from_user(&ts, (void __user *) (unsigned long) sqe->addr, 1902
1903 sizeof(ts))) 1903 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
1904 return -EFAULT; 1904 return -EFAULT;
1905 1905
1906 /* 1906 /*
@@ -1934,7 +1934,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1934 1934
1935 hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1935 hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1936 req->timeout.timer.function = io_timeout_fn; 1936 req->timeout.timer.function = io_timeout_fn;
1937 hrtimer_start(&req->timeout.timer, timespec_to_ktime(ts), 1937 hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
1938 HRTIMER_MODE_REL); 1938 HRTIMER_MODE_REL);
1939 return 0; 1939 return 0;
1940} 1940}
diff --git a/fs/statfs.c b/fs/statfs.c
index eea7af6f2f22..2616424012ea 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -318,19 +318,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
318static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf) 318static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
319{ 319{
320 struct compat_statfs64 buf; 320 struct compat_statfs64 buf;
321 if (sizeof(ubuf->f_bsize) == 4) { 321
322 if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen | 322 if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
323 kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL) 323 return -EOVERFLOW;
324 return -EOVERFLOW; 324
325 /* f_files and f_ffree may be -1; it's okay
326 * to stuff that into 32 bits */
327 if (kbuf->f_files != 0xffffffffffffffffULL
328 && (kbuf->f_files & 0xffffffff00000000ULL))
329 return -EOVERFLOW;
330 if (kbuf->f_ffree != 0xffffffffffffffffULL
331 && (kbuf->f_ffree & 0xffffffff00000000ULL))
332 return -EOVERFLOW;
333 }
334 memset(&buf, 0, sizeof(struct compat_statfs64)); 325 memset(&buf, 0, sizeof(struct compat_statfs64));
335 buf.f_type = kbuf->f_type; 326 buf.f_type = kbuf->f_type;
336 buf.f_bsize = kbuf->f_bsize; 327 buf.f_bsize = kbuf->f_bsize;
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index cf074bce3eb3..c94a9ff9f082 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -4,6 +4,13 @@
4#include <asm/types.h> 4#include <asm/types.h>
5#include <linux/bits.h> 5#include <linux/bits.h>
6 6
7/* Set bits in the first 'n' bytes when loaded from memory */
8#ifdef __LITTLE_ENDIAN
9# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
10#else
11# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
12#endif
13
7#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) 14#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
8#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) 15#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
9 16
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 61c9ffd89b05..93d5cf0bc716 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -108,7 +108,12 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
108 108
109 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) 109 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
110 return true; 110 return true;
111 111 /*
112 * For dax vmas, try to always use hugepage mappings. If the kernel does
113 * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
114 * mappings, and device-dax namespaces, that try to guarantee a given
115 * mapping size, will fail to enable
116 */
112 if (vma_is_dax(vma)) 117 if (vma_is_dax(vma))
113 return true; 118 return true;
114 119
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index fcb46b3374c6..719fc3e15ea4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1090,6 +1090,7 @@ enum kvm_stat_kind {
1090 1090
1091struct kvm_stat_data { 1091struct kvm_stat_data {
1092 int offset; 1092 int offset;
1093 int mode;
1093 struct kvm *kvm; 1094 struct kvm *kvm;
1094}; 1095};
1095 1096
@@ -1097,6 +1098,7 @@ struct kvm_stats_debugfs_item {
1097 const char *name; 1098 const char *name;
1098 int offset; 1099 int offset;
1099 enum kvm_stat_kind kind; 1100 enum kvm_stat_kind kind;
1101 int mode;
1100}; 1102};
1101extern struct kvm_stats_debugfs_item debugfs_entries[]; 1103extern struct kvm_stats_debugfs_item debugfs_entries[];
1102extern struct dentry *kvm_debugfs_dir; 1104extern struct dentry *kvm_debugfs_dir;
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index bef51e35d8d2..6fefb09af7c3 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -17,6 +17,7 @@ struct device;
17 */ 17 */
18struct vmem_altmap { 18struct vmem_altmap {
19 const unsigned long base_pfn; 19 const unsigned long base_pfn;
20 const unsigned long end_pfn;
20 const unsigned long reserve; 21 const unsigned long reserve;
21 unsigned long free; 22 unsigned long free;
22 unsigned long align; 23 unsigned long align;
diff --git a/include/linux/platform_data/db8500_thermal.h b/include/linux/platform_data/db8500_thermal.h
deleted file mode 100644
index 55e55750a165..000000000000
--- a/include/linux/platform_data/db8500_thermal.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * db8500_thermal.h - DB8500 Thermal Management Implementation
4 *
5 * Copyright (C) 2012 ST-Ericsson
6 * Copyright (C) 2012 Linaro Ltd.
7 *
8 * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
9 */
10
11#ifndef _DB8500_THERMAL_H_
12#define _DB8500_THERMAL_H_
13
14#include <linux/thermal.h>
15
16#define COOLING_DEV_MAX 8
17
18struct db8500_trip_point {
19 unsigned long temp;
20 enum thermal_trip_type type;
21 char cdev_name[COOLING_DEV_MAX][THERMAL_NAME_LENGTH];
22};
23
24struct db8500_thsens_platform_data {
25 struct db8500_trip_point trip_points[THERMAL_MAX_TRIPS];
26 int num_trips;
27};
28
29#endif /* _DB8500_THERMAL_H_ */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 70bbdc38dc37..e47d0522a1f4 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -231,6 +231,76 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
231 231
232#endif /* ARCH_HAS_NOCACHE_UACCESS */ 232#endif /* ARCH_HAS_NOCACHE_UACCESS */
233 233
234extern __must_check int check_zeroed_user(const void __user *from, size_t size);
235
236/**
237 * copy_struct_from_user: copy a struct from userspace
238 * @dst: Destination address, in kernel space. This buffer must be @ksize
239 * bytes long.
240 * @ksize: Size of @dst struct.
241 * @src: Source address, in userspace.
242 * @usize: (Alleged) size of @src struct.
243 *
244 * Copies a struct from userspace to kernel space, in a way that guarantees
245 * backwards-compatibility for struct syscall arguments (as long as future
246 * struct extensions are made such that all new fields are *appended* to the
247 * old struct, and zeroed-out new fields have the same meaning as the old
248 * struct).
249 *
250 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
251 * The recommended usage is something like the following:
252 *
253 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
254 * {
255 * int err;
256 * struct foo karg = {};
257 *
258 * if (usize > PAGE_SIZE)
259 * return -E2BIG;
260 * if (usize < FOO_SIZE_VER0)
261 * return -EINVAL;
262 *
263 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
264 * if (err)
265 * return err;
266 *
267 * // ...
268 * }
269 *
270 * There are three cases to consider:
271 * * If @usize == @ksize, then it's copied verbatim.
272 * * If @usize < @ksize, then the userspace has passed an old struct to a
273 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
274 * are to be zero-filled.
275 * * If @usize > @ksize, then the userspace has passed a new struct to an
276 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
277 * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
278 *
279 * Returns (in all cases, some data may have been copied):
280 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
281 * * -EFAULT: access to userspace failed.
282 */
283static __always_inline __must_check int
284copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
285 size_t usize)
286{
287 size_t size = min(ksize, usize);
288 size_t rest = max(ksize, usize) - size;
289
290 /* Deal with trailing bytes. */
291 if (usize < ksize) {
292 memset(dst + size, 0, rest);
293 } else if (usize > ksize) {
294 int ret = check_zeroed_user(src + size, rest);
295 if (ret <= 0)
296 return ret ?: -E2BIG;
297 }
298 /* Copy the interoperable parts of the struct. */
299 if (copy_from_user(dst, src, size))
300 return -EFAULT;
301 return 0;
302}
303
234/* 304/*
235 * probe_kernel_read(): safely attempt to read from a location 305 * probe_kernel_read(): safely attempt to read from a location
236 * @dst: pointer to the buffer that shall take the data 306 * @dst: pointer to the buffer that shall take the data
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index eb57e3037deb..69e8bb8963db 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -35,8 +35,8 @@ DECLARE_EVENT_CLASS(kmem_alloc,
35 __entry->gfp_flags = gfp_flags; 35 __entry->gfp_flags = gfp_flags;
36 ), 36 ),
37 37
38 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", 38 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
39 __entry->call_site, 39 (void *)__entry->call_site,
40 __entry->ptr, 40 __entry->ptr,
41 __entry->bytes_req, 41 __entry->bytes_req,
42 __entry->bytes_alloc, 42 __entry->bytes_alloc,
@@ -131,7 +131,8 @@ DECLARE_EVENT_CLASS(kmem_free,
131 __entry->ptr = ptr; 131 __entry->ptr = ptr;
132 ), 132 ),
133 133
134 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) 134 TP_printk("call_site=%pS ptr=%p",
135 (void *)__entry->call_site, __entry->ptr)
135); 136);
136 137
137DEFINE_EVENT(kmem_free, kfree, 138DEFINE_EVENT(kmem_free, kfree,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index c99b4f2482c6..4fe35d600ab8 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -1003,6 +1003,8 @@ struct drm_amdgpu_info_device {
1003 __u64 high_va_max; 1003 __u64 high_va_max;
1004 /* gfx10 pa_sc_tile_steering_override */ 1004 /* gfx10 pa_sc_tile_steering_override */
1005 __u32 pa_sc_tile_steering_override; 1005 __u32 pa_sc_tile_steering_override;
1006 /* disabled TCCs */
1007 __u64 tcc_disabled_mask;
1006}; 1008};
1007 1009
1008struct drm_amdgpu_info_hw_ip { 1010struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
index 1c215ea1798e..e168dc59e9a0 100644
--- a/include/uapi/linux/nvme_ioctl.h
+++ b/include/uapi/linux/nvme_ioctl.h
@@ -45,6 +45,27 @@ struct nvme_passthru_cmd {
45 __u32 result; 45 __u32 result;
46}; 46};
47 47
48struct nvme_passthru_cmd64 {
49 __u8 opcode;
50 __u8 flags;
51 __u16 rsvd1;
52 __u32 nsid;
53 __u32 cdw2;
54 __u32 cdw3;
55 __u64 metadata;
56 __u64 addr;
57 __u32 metadata_len;
58 __u32 data_len;
59 __u32 cdw10;
60 __u32 cdw11;
61 __u32 cdw12;
62 __u32 cdw13;
63 __u32 cdw14;
64 __u32 cdw15;
65 __u32 timeout_ms;
66 __u64 result;
67};
68
48#define nvme_admin_cmd nvme_passthru_cmd 69#define nvme_admin_cmd nvme_passthru_cmd
49 70
50#define NVME_IOCTL_ID _IO('N', 0x40) 71#define NVME_IOCTL_ID _IO('N', 0x40)
@@ -54,5 +75,7 @@ struct nvme_passthru_cmd {
54#define NVME_IOCTL_RESET _IO('N', 0x44) 75#define NVME_IOCTL_RESET _IO('N', 0x44)
55#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) 76#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
56#define NVME_IOCTL_RESCAN _IO('N', 0x46) 77#define NVME_IOCTL_RESCAN _IO('N', 0x46)
78#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
79#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
57 80
58#endif /* _UAPI_LINUX_NVME_IOCTL_H */ 81#endif /* _UAPI_LINUX_NVME_IOCTL_H */
diff --git a/include/uapi/linux/pg.h b/include/uapi/linux/pg.h
index 364c350e85cd..62b6f69bd9fb 100644
--- a/include/uapi/linux/pg.h
+++ b/include/uapi/linux/pg.h
@@ -35,6 +35,9 @@
35 35
36*/ 36*/
37 37
38#ifndef _UAPI_LINUX_PG_H
39#define _UAPI_LINUX_PG_H
40
38#define PG_MAGIC 'P' 41#define PG_MAGIC 'P'
39#define PG_RESET 'Z' 42#define PG_RESET 'Z'
40#define PG_COMMAND 'C' 43#define PG_COMMAND 'C'
@@ -61,4 +64,4 @@ struct pg_read_hdr {
61 64
62}; 65};
63 66
64/* end of pg.h */ 67#endif /* _UAPI_LINUX_PG_H */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index b3105ac1381a..99335e1f4a27 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -33,8 +33,31 @@
33#define CLONE_NEWNET 0x40000000 /* New network namespace */ 33#define CLONE_NEWNET 0x40000000 /* New network namespace */
34#define CLONE_IO 0x80000000 /* Clone io context */ 34#define CLONE_IO 0x80000000 /* Clone io context */
35 35
36/* 36#ifndef __ASSEMBLY__
37 * Arguments for the clone3 syscall 37/**
38 * struct clone_args - arguments for the clone3 syscall
39 * @flags: Flags for the new process as listed above.
40 * All flags are valid except for CSIGNAL and
41 * CLONE_DETACHED.
42 * @pidfd: If CLONE_PIDFD is set, a pidfd will be
43 * returned in this argument.
44 * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the
45 * child process will be returned in the child's
46 * memory.
47 * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of
48 * the child process will be returned in the
49 * parent's memory.
50 * @exit_signal: The exit_signal the parent process will be
51 * sent when the child exits.
52 * @stack: Specify the location of the stack for the
53 * child process.
54 * @stack_size: The size of the stack for the child process.
55 * @tls: If CLONE_SETTLS is set, the tls descriptor
56 * is set to tls.
57 *
58 * The structure is versioned by size and thus extensible.
59 * New struct members must go at the end of the struct and
60 * must be properly 64bit aligned.
38 */ 61 */
39struct clone_args { 62struct clone_args {
40 __aligned_u64 flags; 63 __aligned_u64 flags;
@@ -46,6 +69,9 @@ struct clone_args {
46 __aligned_u64 stack_size; 69 __aligned_u64 stack_size;
47 __aligned_u64 tls; 70 __aligned_u64 tls;
48}; 71};
72#endif
73
74#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
49 75
50/* 76/*
51 * Scheduling policies 77 * Scheduling policies
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 98b30c1613b2..d89969aa9942 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -212,30 +212,7 @@ int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
212 212
213bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); 213bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
214 214
215efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc); 215void xen_efi_runtime_setup(void);
216efi_status_t xen_efi_set_time(efi_time_t *tm);
217efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
218 efi_time_t *tm);
219efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm);
220efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
221 u32 *attr, unsigned long *data_size,
222 void *data);
223efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
224 efi_char16_t *name, efi_guid_t *vendor);
225efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
226 u32 attr, unsigned long data_size,
227 void *data);
228efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
229 u64 *remaining_space,
230 u64 *max_variable_size);
231efi_status_t xen_efi_get_next_high_mono_count(u32 *count);
232efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
233 unsigned long count, unsigned long sg_list);
234efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
235 unsigned long count, u64 *max_size,
236 int *reset_type);
237void xen_efi_reset_system(int reset_type, efi_status_t status,
238 unsigned long data_size, efi_char16_t *data);
239 216
240 217
241#ifdef CONFIG_PREEMPT 218#ifdef CONFIG_PREEMPT
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4655adbbae10..3f0cb82e4fbc 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10586,55 +10586,26 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
10586 u32 size; 10586 u32 size;
10587 int ret; 10587 int ret;
10588 10588
10589 if (!access_ok(uattr, PERF_ATTR_SIZE_VER0)) 10589 /* Zero the full structure, so that a short copy will be nice. */
10590 return -EFAULT;
10591
10592 /*
10593 * zero the full structure, so that a short copy will be nice.
10594 */
10595 memset(attr, 0, sizeof(*attr)); 10590 memset(attr, 0, sizeof(*attr));
10596 10591
10597 ret = get_user(size, &uattr->size); 10592 ret = get_user(size, &uattr->size);
10598 if (ret) 10593 if (ret)
10599 return ret; 10594 return ret;
10600 10595
10601 if (size > PAGE_SIZE) /* silly large */ 10596 /* ABI compatibility quirk: */
10602 goto err_size; 10597 if (!size)
10603
10604 if (!size) /* abi compat */
10605 size = PERF_ATTR_SIZE_VER0; 10598 size = PERF_ATTR_SIZE_VER0;
10606 10599 if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE)
10607 if (size < PERF_ATTR_SIZE_VER0)
10608 goto err_size; 10600 goto err_size;
10609 10601
10610 /* 10602 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
10611 * If we're handed a bigger struct than we know of, 10603 if (ret) {
10612 * ensure all the unknown bits are 0 - i.e. new 10604 if (ret == -E2BIG)
10613 * user-space does not rely on any kernel feature 10605 goto err_size;
10614 * extensions we dont know about yet. 10606 return ret;
10615 */
10616 if (size > sizeof(*attr)) {
10617 unsigned char __user *addr;
10618 unsigned char __user *end;
10619 unsigned char val;
10620
10621 addr = (void __user *)uattr + sizeof(*attr);
10622 end = (void __user *)uattr + size;
10623
10624 for (; addr < end; addr++) {
10625 ret = get_user(val, addr);
10626 if (ret)
10627 return ret;
10628 if (val)
10629 goto err_size;
10630 }
10631 size = sizeof(*attr);
10632 } 10607 }
10633 10608
10634 ret = copy_from_user(attr, uattr, size);
10635 if (ret)
10636 return -EFAULT;
10637
10638 attr->size = size; 10609 attr->size = size;
10639 10610
10640 if (attr->__reserved_1) 10611 if (attr->__reserved_1)
diff --git a/kernel/fork.c b/kernel/fork.c
index f9572f416126..1f6c45f6a734 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2525,39 +2525,19 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2525#ifdef __ARCH_WANT_SYS_CLONE3 2525#ifdef __ARCH_WANT_SYS_CLONE3
2526noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, 2526noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
2527 struct clone_args __user *uargs, 2527 struct clone_args __user *uargs,
2528 size_t size) 2528 size_t usize)
2529{ 2529{
2530 int err;
2530 struct clone_args args; 2531 struct clone_args args;
2531 2532
2532 if (unlikely(size > PAGE_SIZE)) 2533 if (unlikely(usize > PAGE_SIZE))
2533 return -E2BIG; 2534 return -E2BIG;
2534 2535 if (unlikely(usize < CLONE_ARGS_SIZE_VER0))
2535 if (unlikely(size < sizeof(struct clone_args)))
2536 return -EINVAL; 2536 return -EINVAL;
2537 2537
2538 if (unlikely(!access_ok(uargs, size))) 2538 err = copy_struct_from_user(&args, sizeof(args), uargs, usize);
2539 return -EFAULT; 2539 if (err)
2540 2540 return err;
2541 if (size > sizeof(struct clone_args)) {
2542 unsigned char __user *addr;
2543 unsigned char __user *end;
2544 unsigned char val;
2545
2546 addr = (void __user *)uargs + sizeof(struct clone_args);
2547 end = (void __user *)uargs + size;
2548
2549 for (; addr < end; addr++) {
2550 if (get_user(val, addr))
2551 return -EFAULT;
2552 if (val)
2553 return -E2BIG;
2554 }
2555
2556 size = sizeof(struct clone_args);
2557 }
2558
2559 if (copy_from_user(&args, uargs, size))
2560 return -EFAULT;
2561 2541
2562 /* 2542 /*
2563 * Verify that higher 32bits of exit_signal are unset and that 2543 * Verify that higher 32bits of exit_signal are unset and that
@@ -2604,6 +2584,17 @@ static bool clone3_args_valid(const struct kernel_clone_args *kargs)
2604 return true; 2584 return true;
2605} 2585}
2606 2586
2587/**
2588 * clone3 - create a new process with specific properties
2589 * @uargs: argument structure
2590 * @size: size of @uargs
2591 *
2592 * clone3() is the extensible successor to clone()/clone2().
2593 * It takes a struct as argument that is versioned by its size.
2594 *
2595 * Return: On success, a positive PID for the child process.
2596 * On error, a negative errno number.
2597 */
2607SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size) 2598SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
2608{ 2599{
2609 int err; 2600 int err;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7880f4f64d0e..dd05a378631a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5106,9 +5106,6 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
5106 u32 size; 5106 u32 size;
5107 int ret; 5107 int ret;
5108 5108
5109 if (!access_ok(uattr, SCHED_ATTR_SIZE_VER0))
5110 return -EFAULT;
5111
5112 /* Zero the full structure, so that a short copy will be nice: */ 5109 /* Zero the full structure, so that a short copy will be nice: */
5113 memset(attr, 0, sizeof(*attr)); 5110 memset(attr, 0, sizeof(*attr));
5114 5111
@@ -5116,45 +5113,19 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
5116 if (ret) 5113 if (ret)
5117 return ret; 5114 return ret;
5118 5115
5119 /* Bail out on silly large: */
5120 if (size > PAGE_SIZE)
5121 goto err_size;
5122
5123 /* ABI compatibility quirk: */ 5116 /* ABI compatibility quirk: */
5124 if (!size) 5117 if (!size)
5125 size = SCHED_ATTR_SIZE_VER0; 5118 size = SCHED_ATTR_SIZE_VER0;
5126 5119 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
5127 if (size < SCHED_ATTR_SIZE_VER0)
5128 goto err_size; 5120 goto err_size;
5129 5121
5130 /* 5122 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
5131 * If we're handed a bigger struct than we know of, 5123 if (ret) {
5132 * ensure all the unknown bits are 0 - i.e. new 5124 if (ret == -E2BIG)
5133 * user-space does not rely on any kernel feature 5125 goto err_size;
5134 * extensions we dont know about yet. 5126 return ret;
5135 */
5136 if (size > sizeof(*attr)) {
5137 unsigned char __user *addr;
5138 unsigned char __user *end;
5139 unsigned char val;
5140
5141 addr = (void __user *)uattr + sizeof(*attr);
5142 end = (void __user *)uattr + size;
5143
5144 for (; addr < end; addr++) {
5145 ret = get_user(val, addr);
5146 if (ret)
5147 return ret;
5148 if (val)
5149 goto err_size;
5150 }
5151 size = sizeof(*attr);
5152 } 5127 }
5153 5128
5154 ret = copy_from_user(attr, uattr, size);
5155 if (ret)
5156 return -EFAULT;
5157
5158 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 5129 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
5159 size < SCHED_ATTR_SIZE_VER1) 5130 size < SCHED_ATTR_SIZE_VER1)
5160 return -EINVAL; 5131 return -EINVAL;
@@ -5354,7 +5325,7 @@ sched_attr_copy_to_user(struct sched_attr __user *uattr,
5354 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 5325 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
5355 * @pid: the pid in question. 5326 * @pid: the pid in question.
5356 * @uattr: structure containing the extended parameters. 5327 * @uattr: structure containing the extended parameters.
5357 * @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility. 5328 * @usize: sizeof(attr) for fwd/bwd comp.
5358 * @flags: for future extension. 5329 * @flags: for future extension.
5359 */ 5330 */
5360SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 5331SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index a39bed2c784f..168479a7d61b 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -174,7 +174,6 @@ static int membarrier_private_expedited(int flags)
174 */ 174 */
175 if (cpu == raw_smp_processor_id()) 175 if (cpu == raw_smp_processor_id())
176 continue; 176 continue;
177 rcu_read_lock();
178 p = rcu_dereference(cpu_rq(cpu)->curr); 177 p = rcu_dereference(cpu_rq(cpu)->curr);
179 if (p && p->mm == mm) 178 if (p && p->mm == mm)
180 __cpumask_set_cpu(cpu, tmpmask); 179 __cpumask_set_cpu(cpu, tmpmask);
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index c1f5bb590b5e..b5a65e212df2 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -42,39 +42,39 @@ static int bc_shutdown(struct clock_event_device *evt)
42 */ 42 */
43static int bc_set_next(ktime_t expires, struct clock_event_device *bc) 43static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
44{ 44{
45 int bc_moved;
46 /* 45 /*
47 * We try to cancel the timer first. If the callback is on 46 * This is called either from enter/exit idle code or from the
48 * flight on some other cpu then we let it handle it. If we 47 * broadcast handler. In all cases tick_broadcast_lock is held.
49 * were able to cancel the timer nothing can rearm it as we
50 * own broadcast_lock.
51 * 48 *
52 * However we can also be called from the event handler of 49 * hrtimer_cancel() cannot be called here neither from the
53 * ce_broadcast_hrtimer itself when it expires. We cannot 50 * broadcast handler nor from the enter/exit idle code. The idle
54 * restart the timer because we are in the callback, but we 51 * code can run into the problem described in bc_shutdown() and the
55 * can set the expiry time and let the callback return 52 * broadcast handler cannot wait for itself to complete for obvious
56 * HRTIMER_RESTART. 53 * reasons.
57 * 54 *
58 * Since we are in the idle loop at this point and because 55 * Each caller tries to arm the hrtimer on its own CPU, but if the
59 * hrtimer_{start/cancel} functions call into tracing, 56 * hrtimer callbback function is currently running, then
60 * calls to these functions must be bound within RCU_NONIDLE. 57 * hrtimer_start() cannot move it and the timer stays on the CPU on
58 * which it is assigned at the moment.
59 *
60 * As this can be called from idle code, the hrtimer_start()
61 * invocation has to be wrapped with RCU_NONIDLE() as
62 * hrtimer_start() can call into tracing.
61 */ 63 */
62 RCU_NONIDLE( 64 RCU_NONIDLE( {
63 { 65 hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED_HARD);
64 bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0; 66 /*
65 if (bc_moved) { 67 * The core tick broadcast mode expects bc->bound_on to be set
66 hrtimer_start(&bctimer, expires, 68 * correctly to prevent a CPU which has the broadcast hrtimer
67 HRTIMER_MODE_ABS_PINNED_HARD); 69 * armed from going deep idle.
68 } 70 *
69 } 71 * As tick_broadcast_lock is held, nothing can change the cpu
70 ); 72 * base which was just established in hrtimer_start() above. So
71 73 * the below access is safe even without holding the hrtimer
72 if (bc_moved) { 74 * base lock.
73 /* Bind the "device" to the cpu */ 75 */
74 bc->bound_on = smp_processor_id(); 76 bc->bound_on = bctimer.base->cpu_base->cpu;
75 } else if (bc->bound_on == smp_processor_id()) { 77 } );
76 hrtimer_set_expires(&bctimer, expires);
77 }
78 return 0; 78 return 0;
79} 79}
80 80
@@ -100,10 +100,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
100{ 100{
101 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); 101 ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
102 102
103 if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
104 if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
105 return HRTIMER_RESTART;
106
107 return HRTIMER_NORESTART; 103 return HRTIMER_NORESTART;
108} 104}
109 105
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 26b0a08f3c7d..f801d154ff6a 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -365,11 +365,11 @@ static inline struct trace_array *top_trace_array(void)
365 __builtin_types_compatible_p(typeof(var), type *) 365 __builtin_types_compatible_p(typeof(var), type *)
366 366
367#undef IF_ASSIGN 367#undef IF_ASSIGN
368#define IF_ASSIGN(var, entry, etype, id) \ 368#define IF_ASSIGN(var, entry, etype, id) \
369 if (FTRACE_CMP_TYPE(var, etype)) { \ 369 if (FTRACE_CMP_TYPE(var, etype)) { \
370 var = (typeof(var))(entry); \ 370 var = (typeof(var))(entry); \
371 WARN_ON(id && (entry)->type != id); \ 371 WARN_ON(id != 0 && (entry)->type != id); \
372 break; \ 372 break; \
373 } 373 }
374 374
375/* Will cause compile errors if type is not found. */ 375/* Will cause compile errors if type is not found. */
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index c773b8fb270c..c9a74f82b14a 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -452,8 +452,10 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
452 452
453 switch (*next) { 453 switch (*next) {
454 case '(': /* #2 */ 454 case '(': /* #2 */
455 if (top - op_stack > nr_parens) 455 if (top - op_stack > nr_parens) {
456 return ERR_PTR(-EINVAL); 456 ret = -EINVAL;
457 goto out_free;
458 }
457 *(++top) = invert; 459 *(++top) = invert;
458 continue; 460 continue;
459 case '!': /* #3 */ 461 case '!': /* #3 */
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index baf58a3612c0..905b10af5d5c 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -178,6 +178,16 @@ void __trace_probe_log_err(int offset, int err_type)
178 if (!command) 178 if (!command)
179 return; 179 return;
180 180
181 if (trace_probe_log.index >= trace_probe_log.argc) {
182 /**
183 * Set the error position is next to the last arg + space.
184 * Note that len includes the terminal null and the cursor
185 * appaers at pos + 1.
186 */
187 pos = len;
188 offset = 0;
189 }
190
181 /* And make a command string from argv array */ 191 /* And make a command string from argv array */
182 p = command; 192 p = command;
183 for (i = 0; i < trace_probe_log.argc; i++) { 193 for (i = 0; i < trace_probe_log.argc; i++) {
@@ -1084,6 +1094,12 @@ int trace_probe_compare_arg_type(struct trace_probe *a, struct trace_probe *b)
1084{ 1094{
1085 int i; 1095 int i;
1086 1096
1097 /* In case of more arguments */
1098 if (a->nr_args < b->nr_args)
1099 return a->nr_args + 1;
1100 if (a->nr_args > b->nr_args)
1101 return b->nr_args + 1;
1102
1087 for (i = 0; i < a->nr_args; i++) { 1103 for (i = 0; i < a->nr_args; i++) {
1088 if ((b->nr_args <= i) || 1104 if ((b->nr_args <= i) ||
1089 ((a->args[i].type != b->args[i].type) || 1105 ((a->args[i].type != b->args[i].type) ||
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 28ff554a1be8..6c0005d5dd5c 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -3,16 +3,10 @@
3#include <linux/export.h> 3#include <linux/export.h>
4#include <linux/uaccess.h> 4#include <linux/uaccess.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/bitops.h>
6 7
7#include <asm/word-at-a-time.h> 8#include <asm/word-at-a-time.h>
8 9
9/* Set bits in the first 'n' bytes when loaded from memory */
10#ifdef __LITTLE_ENDIAN
11# define aligned_byte_mask(n) ((1ul << 8*(n))-1)
12#else
13# define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
14#endif
15
16/* 10/*
17 * Do a strnlen, return length of string *with* final '\0'. 11 * Do a strnlen, return length of string *with* final '\0'.
18 * 'count' is the user-supplied count, while 'max' is the 12 * 'count' is the user-supplied count, while 'max' is the
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 67bcd5dfd847..e365ace06538 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,14 +31,133 @@
31# define TEST_U64 31# define TEST_U64
32#endif 32#endif
33 33
34#define test(condition, msg) \ 34#define test(condition, msg, ...) \
35({ \ 35({ \
36 int cond = (condition); \ 36 int cond = (condition); \
37 if (cond) \ 37 if (cond) \
38 pr_warn("%s\n", msg); \ 38 pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__); \
39 cond; \ 39 cond; \
40}) 40})
41 41
42static bool is_zeroed(void *from, size_t size)
43{
44 return memchr_inv(from, 0x0, size) == NULL;
45}
46
47static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
48{
49 int ret = 0;
50 size_t start, end, i;
51 size_t zero_start = size / 4;
52 size_t zero_end = size - zero_start;
53
54 /*
55 * We conduct a series of check_nonzero_user() tests on a block of memory
56 * with the following byte-pattern (trying every possible [start,end]
57 * pair):
58 *
59 * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
60 *
61 * And we verify that check_nonzero_user() acts identically to memchr_inv().
62 */
63
64 memset(kmem, 0x0, size);
65 for (i = 1; i < zero_start; i += 2)
66 kmem[i] = 0xff;
67 for (i = zero_end; i < size; i += 2)
68 kmem[i] = 0xff;
69
70 ret |= test(copy_to_user(umem, kmem, size),
71 "legitimate copy_to_user failed");
72
73 for (start = 0; start <= size; start++) {
74 for (end = start; end <= size; end++) {
75 size_t len = end - start;
76 int retval = check_zeroed_user(umem + start, len);
77 int expected = is_zeroed(kmem + start, len);
78
79 ret |= test(retval != expected,
80 "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
81 retval, expected, start, end);
82 }
83 }
84
85 return ret;
86}
87
88static int test_copy_struct_from_user(char *kmem, char __user *umem,
89 size_t size)
90{
91 int ret = 0;
92 char *umem_src = NULL, *expected = NULL;
93 size_t ksize, usize;
94
95 umem_src = kmalloc(size, GFP_KERNEL);
96 if ((ret |= test(umem_src == NULL, "kmalloc failed")))
97 goto out_free;
98
99 expected = kmalloc(size, GFP_KERNEL);
100 if ((ret |= test(expected == NULL, "kmalloc failed")))
101 goto out_free;
102
103 /* Fill umem with a fixed byte pattern. */
104 memset(umem_src, 0x3e, size);
105 ret |= test(copy_to_user(umem, umem_src, size),
106 "legitimate copy_to_user failed");
107
108 /* Check basic case -- (usize == ksize). */
109 ksize = size;
110 usize = size;
111
112 memcpy(expected, umem_src, ksize);
113
114 memset(kmem, 0x0, size);
115 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
116 "copy_struct_from_user(usize == ksize) failed");
117 ret |= test(memcmp(kmem, expected, ksize),
118 "copy_struct_from_user(usize == ksize) gives unexpected copy");
119
120 /* Old userspace case -- (usize < ksize). */
121 ksize = size;
122 usize = size / 2;
123
124 memcpy(expected, umem_src, usize);
125 memset(expected + usize, 0x0, ksize - usize);
126
127 memset(kmem, 0x0, size);
128 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
129 "copy_struct_from_user(usize < ksize) failed");
130 ret |= test(memcmp(kmem, expected, ksize),
131 "copy_struct_from_user(usize < ksize) gives unexpected copy");
132
133 /* New userspace (-E2BIG) case -- (usize > ksize). */
134 ksize = size / 2;
135 usize = size;
136
137 memset(kmem, 0x0, size);
138 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
139 "copy_struct_from_user(usize > ksize) didn't give E2BIG");
140
141 /* New userspace (success) case -- (usize > ksize). */
142 ksize = size / 2;
143 usize = size;
144
145 memcpy(expected, umem_src, ksize);
146 ret |= test(clear_user(umem + ksize, usize - ksize),
147 "legitimate clear_user failed");
148
149 memset(kmem, 0x0, size);
150 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
151 "copy_struct_from_user(usize > ksize) failed");
152 ret |= test(memcmp(kmem, expected, ksize),
153 "copy_struct_from_user(usize > ksize) gives unexpected copy");
154
155out_free:
156 kfree(expected);
157 kfree(umem_src);
158 return ret;
159}
160
42static int __init test_user_copy_init(void) 161static int __init test_user_copy_init(void)
43{ 162{
44 int ret = 0; 163 int ret = 0;
@@ -106,6 +225,11 @@ static int __init test_user_copy_init(void)
106#endif 225#endif
107#undef test_legit 226#undef test_legit
108 227
228 /* Test usage of check_nonzero_user(). */
229 ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
230 /* Test usage of copy_struct_from_user(). */
231 ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
232
109 /* 233 /*
110 * Invalid usage: none of these copies should succeed. 234 * Invalid usage: none of these copies should succeed.
111 */ 235 */
diff --git a/lib/usercopy.c b/lib/usercopy.c
index c2bfbcaeb3dc..cbb4d9ec00f2 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,5 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/uaccess.h> 2#include <linux/uaccess.h>
3#include <linux/bitops.h>
3 4
4/* out-of-line parts */ 5/* out-of-line parts */
5 6
@@ -31,3 +32,57 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
31} 32}
32EXPORT_SYMBOL(_copy_to_user); 33EXPORT_SYMBOL(_copy_to_user);
33#endif 34#endif
35
36/**
37 * check_zeroed_user: check if a userspace buffer only contains zero bytes
38 * @from: Source address, in userspace.
39 * @size: Size of buffer.
40 *
41 * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for
42 * userspace addresses (and is more efficient because we don't care where the
43 * first non-zero byte is).
44 *
45 * Returns:
46 * * 0: There were non-zero bytes present in the buffer.
47 * * 1: The buffer was full of zero bytes.
48 * * -EFAULT: access to userspace failed.
49 */
50int check_zeroed_user(const void __user *from, size_t size)
51{
52 unsigned long val;
53 uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
54
55 if (unlikely(size == 0))
56 return 1;
57
58 from -= align;
59 size += align;
60
61 if (!user_access_begin(from, size))
62 return -EFAULT;
63
64 unsafe_get_user(val, (unsigned long __user *) from, err_fault);
65 if (align)
66 val &= ~aligned_byte_mask(align);
67
68 while (size > sizeof(unsigned long)) {
69 if (unlikely(val))
70 goto done;
71
72 from += sizeof(unsigned long);
73 size -= sizeof(unsigned long);
74
75 unsafe_get_user(val, (unsigned long __user *) from, err_fault);
76 }
77
78 if (size < sizeof(unsigned long))
79 val &= aligned_byte_mask(size);
80
81done:
82 user_access_end();
83 return (val == 0);
84err_fault:
85 user_access_end();
86 return -EFAULT;
87}
88EXPORT_SYMBOL(check_zeroed_user);
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index 448d686da8b1..0bf5640f1f07 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -4,6 +4,7 @@
4 */ 4 */
5#ifndef __NFIT_TEST_H__ 5#ifndef __NFIT_TEST_H__
6#define __NFIT_TEST_H__ 6#define __NFIT_TEST_H__
7#include <linux/acpi.h>
7#include <linux/list.h> 8#include <linux/list.h>
8#include <linux/uuid.h> 9#include <linux/uuid.h>
9#include <linux/ioport.h> 10#include <linux/ioport.h>
@@ -202,9 +203,6 @@ struct nd_intel_lss {
202 __u32 status; 203 __u32 status;
203} __packed; 204} __packed;
204 205
205union acpi_object;
206typedef void *acpi_handle;
207
208typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t); 206typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
209typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle, 207typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
210 const guid_t *guid, u64 rev, u64 func, 208 const guid_t *guid, u64 rev, u64 func,
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
index 8a4025e912cb..ef1e9bafb098 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
@@ -95,7 +95,7 @@ echo 'p:kprobes/testevent _do_fork abcd=\1' > kprobe_events
95check_error 'p:kprobes/testevent _do_fork ^bcd=\1' # DIFF_ARG_TYPE 95check_error 'p:kprobes/testevent _do_fork ^bcd=\1' # DIFF_ARG_TYPE
96check_error 'p:kprobes/testevent _do_fork ^abcd=\1:u8' # DIFF_ARG_TYPE 96check_error 'p:kprobes/testevent _do_fork ^abcd=\1:u8' # DIFF_ARG_TYPE
97check_error 'p:kprobes/testevent _do_fork ^abcd=\"foo"' # DIFF_ARG_TYPE 97check_error 'p:kprobes/testevent _do_fork ^abcd=\"foo"' # DIFF_ARG_TYPE
98check_error '^p:kprobes/testevent _do_fork' # SAME_PROBE 98check_error '^p:kprobes/testevent _do_fork abcd=\1' # SAME_PROBE
99fi 99fi
100 100
101exit 0 101exit 0
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 62c591f87dab..c5ec868fa1e5 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -22,6 +22,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smm_test
22TEST_GEN_PROGS_x86_64 += x86_64/state_test 22TEST_GEN_PROGS_x86_64 += x86_64/state_test
23TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test 23TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
24TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test 24TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
25TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
25TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test 26TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
26TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test 27TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
27TEST_GEN_PROGS_x86_64 += clear_dirty_log_test 28TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
@@ -48,7 +49,7 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
48 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I.. 49 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
49 50
50no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \ 51no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
51 $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie) 52 $(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
52 53
53# On s390, build the testcases KVM-enabled 54# On s390, build the testcases KVM-enabled
54pgste-option = $(call try-run, echo 'int main() { return 0; }' | \ 55pgste-option = $(call try-run, echo 'int main() { return 0; }' | \
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 0c17f2ee685e..ff234018219c 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -1083,6 +1083,9 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
1083#define VMX_BASIC_MEM_TYPE_WB 6LLU 1083#define VMX_BASIC_MEM_TYPE_WB 6LLU
1084#define VMX_BASIC_INOUT 0x0040000000000000LLU 1084#define VMX_BASIC_INOUT 0x0040000000000000LLU
1085 1085
1086/* VMX_EPT_VPID_CAP bits */
1087#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21)
1088
1086/* MSR_IA32_VMX_MISC bits */ 1089/* MSR_IA32_VMX_MISC bits */
1087#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) 1090#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
1088#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F 1091#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 69b17055f63d..6ae5a47fe067 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -569,6 +569,10 @@ struct vmx_pages {
569 void *enlightened_vmcs_hva; 569 void *enlightened_vmcs_hva;
570 uint64_t enlightened_vmcs_gpa; 570 uint64_t enlightened_vmcs_gpa;
571 void *enlightened_vmcs; 571 void *enlightened_vmcs;
572
573 void *eptp_hva;
574 uint64_t eptp_gpa;
575 void *eptp;
572}; 576};
573 577
574struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); 578struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
@@ -576,4 +580,14 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
576void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); 580void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
577bool load_vmcs(struct vmx_pages *vmx); 581bool load_vmcs(struct vmx_pages *vmx);
578 582
583void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
584 uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
585void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
586 uint64_t nested_paddr, uint64_t paddr, uint64_t size,
587 uint32_t eptp_memslot);
588void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
589 uint32_t memslot, uint32_t eptp_memslot);
590void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
591 uint32_t eptp_memslot);
592
579#endif /* SELFTEST_KVM_VMX_H */ 593#endif /* SELFTEST_KVM_VMX_H */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 80a338b5403c..41cf45416060 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -705,7 +705,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
705 * on error (e.g. currently no memory region using memslot as a KVM 705 * on error (e.g. currently no memory region using memslot as a KVM
706 * memory slot ID). 706 * memory slot ID).
707 */ 707 */
708static struct userspace_mem_region * 708struct userspace_mem_region *
709memslot2region(struct kvm_vm *vm, uint32_t memslot) 709memslot2region(struct kvm_vm *vm, uint32_t memslot)
710{ 710{
711 struct userspace_mem_region *region; 711 struct userspace_mem_region *region;
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index f36262e0f655..ac50c42750cf 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -68,4 +68,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
68void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent); 68void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
69void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent); 69void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
70 70
71struct userspace_mem_region *
72memslot2region(struct kvm_vm *vm, uint32_t memslot);
73
71#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */ 74#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index c53dbc6bc568..6698cb741e10 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1085,7 +1085,7 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
1085 for (i = 0; i < nmsrs; i++) 1085 for (i = 0; i < nmsrs; i++)
1086 state->msrs.entries[i].index = list->indices[i]; 1086 state->msrs.entries[i].index = list->indices[i];
1087 r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs); 1087 r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
1088 TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed at %x)", 1088 TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)",
1089 r, r == nmsrs ? -1 : list->indices[r]); 1089 r, r == nmsrs ? -1 : list->indices[r]);
1090 1090
1091 r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs); 1091 r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 9cef0455b819..fab8f6b0bf52 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -7,11 +7,39 @@
7 7
8#include "test_util.h" 8#include "test_util.h"
9#include "kvm_util.h" 9#include "kvm_util.h"
10#include "../kvm_util_internal.h"
10#include "processor.h" 11#include "processor.h"
11#include "vmx.h" 12#include "vmx.h"
12 13
14#define PAGE_SHIFT_4K 12
15
16#define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000
17
13bool enable_evmcs; 18bool enable_evmcs;
14 19
20struct eptPageTableEntry {
21 uint64_t readable:1;
22 uint64_t writable:1;
23 uint64_t executable:1;
24 uint64_t memory_type:3;
25 uint64_t ignore_pat:1;
26 uint64_t page_size:1;
27 uint64_t accessed:1;
28 uint64_t dirty:1;
29 uint64_t ignored_11_10:2;
30 uint64_t address:40;
31 uint64_t ignored_62_52:11;
32 uint64_t suppress_ve:1;
33};
34
35struct eptPageTablePointer {
36 uint64_t memory_type:3;
37 uint64_t page_walk_length:3;
38 uint64_t ad_enabled:1;
39 uint64_t reserved_11_07:5;
40 uint64_t address:40;
41 uint64_t reserved_63_52:12;
42};
15int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) 43int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
16{ 44{
17 uint16_t evmcs_ver; 45 uint16_t evmcs_ver;
@@ -174,15 +202,35 @@ bool load_vmcs(struct vmx_pages *vmx)
174 */ 202 */
175static inline void init_vmcs_control_fields(struct vmx_pages *vmx) 203static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
176{ 204{
205 uint32_t sec_exec_ctl = 0;
206
177 vmwrite(VIRTUAL_PROCESSOR_ID, 0); 207 vmwrite(VIRTUAL_PROCESSOR_ID, 0);
178 vmwrite(POSTED_INTR_NV, 0); 208 vmwrite(POSTED_INTR_NV, 0);
179 209
180 vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS)); 210 vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
181 if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, 0)) 211
212 if (vmx->eptp_gpa) {
213 uint64_t ept_paddr;
214 struct eptPageTablePointer eptp = {
215 .memory_type = VMX_BASIC_MEM_TYPE_WB,
216 .page_walk_length = 3, /* + 1 */
217 .ad_enabled = !!(rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & VMX_EPT_VPID_CAP_AD_BITS),
218 .address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
219 };
220
221 memcpy(&ept_paddr, &eptp, sizeof(ept_paddr));
222 vmwrite(EPT_POINTER, ept_paddr);
223 sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT;
224 }
225
226 if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl))
182 vmwrite(CPU_BASED_VM_EXEC_CONTROL, 227 vmwrite(CPU_BASED_VM_EXEC_CONTROL,
183 rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); 228 rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
184 else 229 else {
185 vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS)); 230 vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS));
231 GUEST_ASSERT(!sec_exec_ctl);
232 }
233
186 vmwrite(EXCEPTION_BITMAP, 0); 234 vmwrite(EXCEPTION_BITMAP, 0);
187 vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); 235 vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
188 vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ 236 vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
@@ -327,3 +375,152 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
327 init_vmcs_host_state(); 375 init_vmcs_host_state();
328 init_vmcs_guest_state(guest_rip, guest_rsp); 376 init_vmcs_guest_state(guest_rip, guest_rsp);
329} 377}
378
379void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
380 uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
381{
382 uint16_t index[4];
383 struct eptPageTableEntry *pml4e;
384
385 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
386 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
387
388 TEST_ASSERT((nested_paddr % vm->page_size) == 0,
389 "Nested physical address not on page boundary,\n"
390 " nested_paddr: 0x%lx vm->page_size: 0x%x",
391 nested_paddr, vm->page_size);
392 TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
393 "Physical address beyond beyond maximum supported,\n"
394 " nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
395 paddr, vm->max_gfn, vm->page_size);
396 TEST_ASSERT((paddr % vm->page_size) == 0,
397 "Physical address not on page boundary,\n"
398 " paddr: 0x%lx vm->page_size: 0x%x",
399 paddr, vm->page_size);
400 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
401 "Physical address beyond beyond maximum supported,\n"
402 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
403 paddr, vm->max_gfn, vm->page_size);
404
405 index[0] = (nested_paddr >> 12) & 0x1ffu;
406 index[1] = (nested_paddr >> 21) & 0x1ffu;
407 index[2] = (nested_paddr >> 30) & 0x1ffu;
408 index[3] = (nested_paddr >> 39) & 0x1ffu;
409
410 /* Allocate page directory pointer table if not present. */
411 pml4e = vmx->eptp_hva;
412 if (!pml4e[index[3]].readable) {
413 pml4e[index[3]].address = vm_phy_page_alloc(vm,
414 KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
415 >> vm->page_shift;
416 pml4e[index[3]].writable = true;
417 pml4e[index[3]].readable = true;
418 pml4e[index[3]].executable = true;
419 }
420
421 /* Allocate page directory table if not present. */
422 struct eptPageTableEntry *pdpe;
423 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
424 if (!pdpe[index[2]].readable) {
425 pdpe[index[2]].address = vm_phy_page_alloc(vm,
426 KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
427 >> vm->page_shift;
428 pdpe[index[2]].writable = true;
429 pdpe[index[2]].readable = true;
430 pdpe[index[2]].executable = true;
431 }
432
433 /* Allocate page table if not present. */
434 struct eptPageTableEntry *pde;
435 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
436 if (!pde[index[1]].readable) {
437 pde[index[1]].address = vm_phy_page_alloc(vm,
438 KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
439 >> vm->page_shift;
440 pde[index[1]].writable = true;
441 pde[index[1]].readable = true;
442 pde[index[1]].executable = true;
443 }
444
445 /* Fill in page table entry. */
446 struct eptPageTableEntry *pte;
447 pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
448 pte[index[0]].address = paddr >> vm->page_shift;
449 pte[index[0]].writable = true;
450 pte[index[0]].readable = true;
451 pte[index[0]].executable = true;
452
453 /*
454 * For now mark these as accessed and dirty because the only
455 * testcase we have needs that. Can be reconsidered later.
456 */
457 pte[index[0]].accessed = true;
458 pte[index[0]].dirty = true;
459}
460
461/*
462 * Map a range of EPT guest physical addresses to the VM's physical address
463 *
464 * Input Args:
465 * vm - Virtual Machine
466 * nested_paddr - Nested guest physical address to map
467 * paddr - VM Physical Address
468 * size - The size of the range to map
469 * eptp_memslot - Memory region slot for new virtual translation tables
470 *
471 * Output Args: None
472 *
473 * Return: None
474 *
475 * Within the VM given by vm, creates a nested guest translation for the
476 * page range starting at nested_paddr to the page range starting at paddr.
477 */
478void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
479 uint64_t nested_paddr, uint64_t paddr, uint64_t size,
480 uint32_t eptp_memslot)
481{
482 size_t page_size = vm->page_size;
483 size_t npages = size / page_size;
484
485 TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
486 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
487
488 while (npages--) {
489 nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot);
490 nested_paddr += page_size;
491 paddr += page_size;
492 }
493}
494
495/* Prepare an identity extended page table that maps all the
496 * physical pages in VM.
497 */
498void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
499 uint32_t memslot, uint32_t eptp_memslot)
500{
501 sparsebit_idx_t i, last;
502 struct userspace_mem_region *region =
503 memslot2region(vm, memslot);
504
505 i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
506 last = i + (region->region.memory_size >> vm->page_shift);
507 for (;;) {
508 i = sparsebit_next_clear(region->unused_phy_pages, i);
509 if (i > last)
510 break;
511
512 nested_map(vmx, vm,
513 (uint64_t)i << vm->page_shift,
514 (uint64_t)i << vm->page_shift,
515 1 << vm->page_shift,
516 eptp_memslot);
517 }
518}
519
520void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
521 uint32_t eptp_memslot)
522{
523 vmx->eptp = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
524 vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
525 vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
526}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
new file mode 100644
index 000000000000..0bca1cfe2c1e
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -0,0 +1,156 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM dirty page logging test
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7
8#define _GNU_SOURCE /* for program_invocation_name */
9
10#include <stdio.h>
11#include <stdlib.h>
12#include <linux/bitmap.h>
13#include <linux/bitops.h>
14
15#include "test_util.h"
16#include "kvm_util.h"
17#include "processor.h"
18#include "vmx.h"
19
20#define VCPU_ID 1
21
22/* The memory slot index to track dirty pages */
23#define TEST_MEM_SLOT_INDEX 1
24#define TEST_MEM_SIZE 3
25
26/* L1 guest test virtual memory offset */
27#define GUEST_TEST_MEM 0xc0000000
28
29/* L2 guest test virtual memory offset */
30#define NESTED_TEST_MEM1 0xc0001000
31#define NESTED_TEST_MEM2 0xc0002000
32
33static void l2_guest_code(void)
34{
35 *(volatile uint64_t *)NESTED_TEST_MEM1;
36 *(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
37 GUEST_SYNC(true);
38 GUEST_SYNC(false);
39
40 *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
41 GUEST_SYNC(true);
42 *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
43 GUEST_SYNC(true);
44 GUEST_SYNC(false);
45
46 /* Exit to L1 and never come back. */
47 vmcall();
48}
49
50void l1_guest_code(struct vmx_pages *vmx)
51{
52#define L2_GUEST_STACK_SIZE 64
53 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
54
55 GUEST_ASSERT(vmx->vmcs_gpa);
56 GUEST_ASSERT(prepare_for_vmx_operation(vmx));
57 GUEST_ASSERT(load_vmcs(vmx));
58
59 prepare_vmcs(vmx, l2_guest_code,
60 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
61
62 GUEST_SYNC(false);
63 GUEST_ASSERT(!vmlaunch());
64 GUEST_SYNC(false);
65 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
66 GUEST_DONE();
67}
68
69int main(int argc, char *argv[])
70{
71 vm_vaddr_t vmx_pages_gva = 0;
72 struct vmx_pages *vmx;
73 unsigned long *bmap;
74 uint64_t *host_test_mem;
75
76 struct kvm_vm *vm;
77 struct kvm_run *run;
78 struct ucall uc;
79 bool done = false;
80
81 /* Create VM */
82 vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
83 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
84 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
85 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
86 run = vcpu_state(vm, VCPU_ID);
87
88 /* Add an extra memory slot for testing dirty logging */
89 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
90 GUEST_TEST_MEM,
91 TEST_MEM_SLOT_INDEX,
92 TEST_MEM_SIZE,
93 KVM_MEM_LOG_DIRTY_PAGES);
94
95 /*
96 * Add an identity map for GVA range [0xc0000000, 0xc0002000). This
97 * affects both L1 and L2. However...
98 */
99 virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM,
100 TEST_MEM_SIZE * 4096, 0);
101
102 /*
103 * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
104 * 0xc0000000.
105 *
106 * Note that prepare_eptp should be called only L1's GPA map is done,
107 * meaning after the last call to virt_map.
108 */
109 prepare_eptp(vmx, vm, 0);
110 nested_map_memslot(vmx, vm, 0, 0);
111 nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
112 nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
113
114 bmap = bitmap_alloc(TEST_MEM_SIZE);
115 host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
116
117 while (!done) {
118 memset(host_test_mem, 0xaa, TEST_MEM_SIZE * 4096);
119 _vcpu_run(vm, VCPU_ID);
120 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
121 "Unexpected exit reason: %u (%s),\n",
122 run->exit_reason,
123 exit_reason_str(run->exit_reason));
124
125 switch (get_ucall(vm, VCPU_ID, &uc)) {
126 case UCALL_ABORT:
127 TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
128 __FILE__, uc.args[1]);
129 /* NOT REACHED */
130 case UCALL_SYNC:
131 /*
132 * The nested guest wrote at offset 0x1000 in the memslot, but the
133 * dirty bitmap must be filled in according to L1 GPA, not L2.
134 */
135 kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
136 if (uc.args[1]) {
137 TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n");
138 TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n");
139 } else {
140 TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n");
141 TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n");
142 }
143
144 TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n");
145 TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n");
146 TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n");
147 TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n");
148 break;
149 case UCALL_DONE:
150 done = true;
151 break;
152 default:
153 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
154 }
155 }
156}
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
index 464c9b76148f..7550f08822a3 100644
--- a/tools/testing/selftests/pidfd/Makefile
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -1,5 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2CFLAGS += -g -I../../../../usr/include/ -lpthread 2CFLAGS += -g -I../../../../usr/include/ -pthread
3 3
4TEST_GEN_PROGS := pidfd_test pidfd_open_test pidfd_poll_test pidfd_wait 4TEST_GEN_PROGS := pidfd_test pidfd_open_test pidfd_poll_test pidfd_wait
5 5
diff --git a/virt/kvm/arm/vgic/trace.h b/virt/kvm/arm/vgic/trace.h
index 55fed77a9f73..4fd4f6db181b 100644
--- a/virt/kvm/arm/vgic/trace.h
+++ b/virt/kvm/arm/vgic/trace.h
@@ -30,7 +30,7 @@ TRACE_EVENT(vgic_update_irq_pending,
30#endif /* _TRACE_VGIC_H */ 30#endif /* _TRACE_VGIC_H */
31 31
32#undef TRACE_INCLUDE_PATH 32#undef TRACE_INCLUDE_PATH
33#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm/vgic 33#define TRACE_INCLUDE_PATH ../../virt/kvm/arm/vgic
34#undef TRACE_INCLUDE_FILE 34#undef TRACE_INCLUDE_FILE
35#define TRACE_INCLUDE_FILE trace 35#define TRACE_INCLUDE_FILE trace
36 36
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e6de3159e682..fd68fbe0a75d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -617,8 +617,9 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
617 617
618 stat_data->kvm = kvm; 618 stat_data->kvm = kvm;
619 stat_data->offset = p->offset; 619 stat_data->offset = p->offset;
620 stat_data->mode = p->mode ? p->mode : 0644;
620 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; 621 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
621 debugfs_create_file(p->name, 0644, kvm->debugfs_dentry, 622 debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry,
622 stat_data, stat_fops_per_vm[p->kind]); 623 stat_data, stat_fops_per_vm[p->kind]);
623 } 624 }
624 return 0; 625 return 0;
@@ -3929,7 +3930,9 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
3929 if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) 3930 if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
3930 return -ENOENT; 3931 return -ENOENT;
3931 3932
3932 if (simple_attr_open(inode, file, get, set, fmt)) { 3933 if (simple_attr_open(inode, file, get,
3934 stat_data->mode & S_IWUGO ? set : NULL,
3935 fmt)) {
3933 kvm_put_kvm(stat_data->kvm); 3936 kvm_put_kvm(stat_data->kvm);
3934 return -ENOMEM; 3937 return -ENOMEM;
3935 } 3938 }
@@ -4177,7 +4180,8 @@ static void kvm_init_debug(void)
4177 4180
4178 kvm_debugfs_num_entries = 0; 4181 kvm_debugfs_num_entries = 0;
4179 for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) { 4182 for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
4180 debugfs_create_file(p->name, 0644, kvm_debugfs_dir, 4183 int mode = p->mode ? p->mode : 0644;
4184 debugfs_create_file(p->name, mode, kvm_debugfs_dir,
4181 (void *)(long)p->offset, 4185 (void *)(long)p->offset,
4182 stat_fops[p->kind]); 4186 stat_fops[p->kind]);
4183 } 4187 }