aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h11
-rw-r--r--include/linux/adxl.h13
-rw-r--r--include/linux/amba/mmci.h11
-rw-r--r--include/linux/amifd.h63
-rw-r--r--include/linux/amifdreg.h82
-rw-r--r--include/linux/arch_topology.h1
-rw-r--r--include/linux/avf/virtchnl.h17
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bio.h78
-rw-r--r--include/linux/blk-cgroup.h145
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk-pm.h24
-rw-r--r--include/linux/blk_types.h3
-rw-r--r--include/linux/blkdev.h199
-rw-r--r--include/linux/bpf-cgroup.h55
-rw-r--r--include/linux/bpf.h88
-rw-r--r--include/linux/bpf_types.h8
-rw-r--r--include/linux/bpf_verifier.h46
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/bvec.h3
-rw-r--r--include/linux/cgroup-defs.h5
-rw-r--r--include/linux/cgroup.h32
-rw-r--r--include/linux/clocksource.h8
-rw-r--r--include/linux/compat.h108
-rw-r--r--include/linux/compat_time.h32
-rw-r--r--include/linux/compiler-gcc.h21
-rw-r--r--include/linux/compiler.h73
-rw-r--r--include/linux/compiler_types.h9
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/coredump.h4
-rw-r--r--include/linux/coresight.h41
-rw-r--r--include/linux/cpufeature.h2
-rw-r--r--include/linux/cpuidle.h11
-rw-r--r--include/linux/crash_dump.h4
-rw-r--r--include/linux/crc-t10dif.h1
-rw-r--r--include/linux/crypto.h110
-rw-r--r--include/linux/cuda.h4
-rw-r--r--include/linux/debug_locks.h4
-rw-r--r--include/linux/delayacct.h23
-rw-r--r--include/linux/devfreq.h8
-rw-r--r--include/linux/device-mapper.h18
-rw-r--r--include/linux/device.h37
-rw-r--r--include/linux/dma-debug.h8
-rw-r--r--include/linux/dma-direct.h10
-rw-r--r--include/linux/dma-mapping.h42
-rw-r--r--include/linux/dma-noncoherent.h27
-rw-r--r--include/linux/dma/sprd-dma.h69
-rw-r--r--include/linux/dns_resolver.h4
-rw-r--r--include/linux/edac.h5
-rw-r--r--include/linux/efi.h51
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/elfcore-compat.h8
-rw-r--r--include/linux/ethtool.h33
-rw-r--r--include/linux/f2fs_fs.h10
-rw-r--r--include/linux/fanotify.h59
-rw-r--r--include/linux/fb.h11
-rw-r--r--include/linux/filter.h43
-rw-r--r--include/linux/fpga/fpga-bridge.h4
-rw-r--r--include/linux/fpga/fpga-mgr.h24
-rw-r--r--include/linux/fpga/fpga-region.h4
-rw-r--r--include/linux/fs.h107
-rw-r--r--include/linux/fsl/mc.h14
-rw-r--r--include/linux/fsl_ifc.h2
-rw-r--r--include/linux/fsnotify_backend.h30
-rw-r--r--include/linux/genhd.h10
-rw-r--r--include/linux/gpio/consumer.h80
-rw-r--r--include/linux/gpio/driver.h46
-rw-r--r--include/linux/hdmi.h4
-rw-r--r--include/linux/hid.h29
-rw-r--r--include/linux/hmm.h2
-rw-r--r--include/linux/huge_mm.h10
-rw-r--r--include/linux/hugetlb.h14
-rw-r--r--include/linux/hw_random.h3
-rw-r--r--include/linux/hwmon.h2
-rw-r--r--include/linux/hyperv.h14
-rw-r--r--include/linux/idr.h18
-rw-r--r--include/linux/ieee80211.h112
-rw-r--r--include/linux/if_tun.h14
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/intel-iommu.h72
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/iomap.h4
-rw-r--r--include/linux/iommu.h10
-rw-r--r--include/linux/iova.h1
-rw-r--r--include/linux/ipmi.h2
-rw-r--r--include/linux/ipmi_smi.h2
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/irq.h3
-rw-r--r--include/linux/irqchip/arm-gic-common.h6
-rw-r--r--include/linux/irqchip/arm-gic-v3.h9
-rw-r--r--include/linux/irqchip/arm-gic.h5
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--include/linux/jump_label.h65
-rw-r--r--include/linux/kernfs.h9
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/leds.h15
-rw-r--r--include/linux/libfdt_env.h1
-rw-r--r--include/linux/lightnvm.h166
-rw-r--r--include/linux/linkage.h1
-rw-r--r--include/linux/linkmode.h76
-rw-r--r--include/linux/list.h23
-rw-r--r--include/linux/lockdep.h7
-rw-r--r--include/linux/lsm_hooks.h16
-rw-r--r--include/linux/math64.h3
-rw-r--r--include/linux/memblock.h15
-rw-r--r--include/linux/memcontrol.h15
-rw-r--r--include/linux/memremap.h6
-rw-r--r--include/linux/mfd/cros_ec_commands.h11
-rw-r--r--include/linux/mfd/da9063/pdata.h16
-rw-r--r--include/linux/mfd/ingenic-tcu.h56
-rw-r--r--include/linux/mfd/intel_msic.h7
-rw-r--r--include/linux/mfd/intel_soc_pmic.h13
-rw-r--r--include/linux/mfd/intel_soc_pmic_bxtwc.h10
-rw-r--r--include/linux/mfd/madera/core.h2
-rw-r--r--include/linux/mfd/madera/pdata.h1
-rw-r--r--include/linux/mfd/max14577-private.h11
-rw-r--r--include/linux/mfd/max14577.h11
-rw-r--r--include/linux/mfd/max77686-private.h15
-rw-r--r--include/linux/mfd/max77686.h15
-rw-r--r--include/linux/mfd/max77693-common.h6
-rw-r--r--include/linux/mfd/max77693-private.h15
-rw-r--r--include/linux/mfd/max77693.h15
-rw-r--r--include/linux/mfd/max77843-private.h6
-rw-r--r--include/linux/mfd/max8997-private.h15
-rw-r--r--include/linux/mfd/max8997.h16
-rw-r--r--include/linux/mfd/max8998-private.h15
-rw-r--r--include/linux/mfd/max8998.h15
-rw-r--r--include/linux/mfd/mc13xxx.h1
-rw-r--r--include/linux/mfd/rohm-bd718x7.h372
-rw-r--r--include/linux/mfd/samsung/core.h11
-rw-r--r--include/linux/mfd/samsung/irq.h10
-rw-r--r--include/linux/mfd/samsung/rtc.h15
-rw-r--r--include/linux/mfd/samsung/s2mpa01.h7
-rw-r--r--include/linux/mfd/samsung/s2mps11.h9
-rw-r--r--include/linux/mfd/samsung/s2mps13.h14
-rw-r--r--include/linux/mfd/samsung/s2mps14.h14
-rw-r--r--include/linux/mfd/samsung/s2mps15.h11
-rw-r--r--include/linux/mfd/samsung/s2mpu02.h14
-rw-r--r--include/linux/mfd/samsung/s5m8763.h10
-rw-r--r--include/linux/mfd/samsung/s5m8767.h10
-rw-r--r--include/linux/mfd/ti-lmu.h3
-rw-r--r--include/linux/mfd/tmio.h7
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/mii.h101
-rw-r--r--include/linux/mlx5/cq.h1
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h92
-rw-r--r--include/linux/mlx5/fs.h40
-rw-r--r--include/linux/mlx5/mlx5_ifc.h264
-rw-r--r--include/linux/mlx5/qp.h1
-rw-r--r--include/linux/mlx5/srq.h1
-rw-r--r--include/linux/mlx5/transobj.h2
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h72
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mm_types_task.h2
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmu_notifier.h27
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mroute_base.h11
-rw-r--r--include/linux/msi.h17
-rw-r--r--include/linux/mtd/blktrans.h5
-rw-r--r--include/linux/mtd/jedec.h91
-rw-r--r--include/linux/mtd/nand_bch.h11
-rw-r--r--include/linux/mtd/nand_ecc.h12
-rw-r--r--include/linux/mtd/onfi.h178
-rw-r--r--include/linux/mtd/platnand.h74
-rw-r--r--include/linux/mtd/rawnand.h635
-rw-r--r--include/linux/mtd/spi-nor.h119
-rw-r--r--include/linux/ndctl.h22
-rw-r--r--include/linux/netdevice.h51
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h3
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h3
-rw-r--r--include/linux/netlink.h3
-rw-r--r--include/linux/netpoll.h9
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/nfs_xdr.h28
-rw-r--r--include/linux/nvme.h1
-rw-r--r--include/linux/nvmem-consumer.h100
-rw-r--r--include/linux/nvmem-provider.h50
-rw-r--r--include/linux/of.h40
-rw-r--r--include/linux/of_device.h3
-rw-r--r--include/linux/of_pci.h10
-rw-r--r--include/linux/page-flags.h14
-rw-r--r--include/linux/pagemap.h10
-rw-r--r--include/linux/pagevec.h8
-rw-r--r--include/linux/pci-dma-compat.h18
-rw-r--r--include/linux/pci-dma.h12
-rw-r--r--include/linux/pci-p2pdma.h114
-rw-r--r--include/linux/pci.h14
-rw-r--r--include/linux/pci_hotplug.h43
-rw-r--r--include/linux/pci_ids.h8
-rw-r--r--include/linux/percpu-refcount.h1
-rw-r--r--include/linux/perf/arm_pmu.h1
-rw-r--r--include/linux/pfn_t.h4
-rw-r--r--include/linux/phy.h41
-rw-r--r--include/linux/phy/phy-qcom-ufs.h38
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/platform_data/dma-ep93xx.h2
-rw-r--r--include/linux/platform_data/dma-mcf-edma.h38
-rw-r--r--include/linux/platform_data/ehci-sh.h16
-rw-r--r--include/linux/platform_data/gpio-davinci.h34
-rw-r--r--include/linux/platform_data/gpio-omap.h15
-rw-r--r--include/linux/platform_data/gpio-ts5500.h27
-rw-r--r--include/linux/platform_data/hsmmc-omap.h3
-rw-r--r--include/linux/platform_data/mv_usb.h1
-rw-r--r--include/linux/platform_data/pxa_sdhci.h4
-rw-r--r--include/linux/platform_data/shmob_drm.h6
-rw-r--r--include/linux/platform_data/spi-davinci.h4
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/pm_domain.h35
-rw-r--r--include/linux/pm_opp.h6
-rw-r--r--include/linux/pmu.h4
-rw-r--r--include/linux/posix-timers.h2
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/psi.h53
-rw-r--r--include/linux/psi_types.h92
-rw-r--r--include/linux/pstore_ram.h3
-rw-r--r--include/linux/ptrace.h38
-rw-r--r--include/linux/pxa2xx_ssp.h3
-rw-r--r--include/linux/qcom-geni-se.h13
-rw-r--r--include/linux/qcom_scm.h4
-rw-r--r--include/linux/qed/common_hsi.h10
-rw-r--r--include/linux/qed/iscsi_common.h2
-rw-r--r--include/linux/qed/qed_if.h35
-rw-r--r--include/linux/qed/qed_rdma_if.h11
-rw-r--r--include/linux/radix-tree.h178
-rw-r--r--include/linux/rculist.h32
-rw-r--r--include/linux/rcupdate.h154
-rw-r--r--include/linux/rcupdate_wait.h14
-rw-r--r--include/linux/rcutiny.h53
-rw-r--r--include/linux/rcutree.h31
-rw-r--r--include/linux/regmap.h31
-rw-r--r--include/linux/regulator/driver.h20
-rw-r--r--include/linux/regulator/fixed.h3
-rw-r--r--include/linux/regulator/machine.h6
-rw-r--r--include/linux/restart_block.h4
-rw-r--r--include/linux/rtc.h21
-rw-r--r--include/linux/rtnetlink.h7
-rw-r--r--include/linux/rwsem.h4
-rw-r--r--include/linux/sched.h27
-rw-r--r--include/linux/sched/loadavg.h24
-rw-r--r--include/linux/sched/signal.h23
-rw-r--r--include/linux/sched/topology.h6
-rw-r--r--include/linux/security.h6
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/serial_sci.h1
-rw-r--r--include/linux/signal.h18
-rw-r--r--include/linux/signal_types.h8
-rw-r--r--include/linux/skbuff.h55
-rw-r--r--include/linux/skmsg.h434
-rw-r--r--include/linux/slab.h56
-rw-r--r--include/linux/smp.h4
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/soundwire/sdw.h12
-rw-r--r--include/linux/spi/spi-mem.h7
-rw-r--r--include/linux/spi/spi.h36
-rw-r--r--include/linux/srcutree.h13
-rw-r--r--include/linux/start_kernel.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/sunrpc/auth.h18
-rw-r--r--include/linux/sunrpc/auth_gss.h1
-rw-r--r--include/linux/sunrpc/bc_xprt.h1
-rw-r--r--include/linux/sunrpc/gss_krb5.h33
-rw-r--r--include/linux/sunrpc/sched.h10
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xdr.h11
-rw-r--r--include/linux/sunrpc/xprt.h35
-rw-r--r--include/linux/sunrpc/xprtsock.h36
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h37
-rw-r--r--include/linux/swapops.h19
-rw-r--r--include/linux/swiotlb.h9
-rw-r--r--include/linux/syscalls.h21
-rw-r--r--include/linux/tc.h1
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/thunderbolt.h5
-rw-r--r--include/linux/time32.h78
-rw-r--r--include/linux/timekeeping.h12
-rw-r--r--include/linux/timekeeping32.h53
-rw-r--r--include/linux/torture.h2
-rw-r--r--include/linux/tracehook.h13
-rw-r--r--include/linux/tracepoint-defs.h6
-rw-r--r--include/linux/tracepoint.h36
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_driver.h3
-rw-r--r--include/linux/tty_ldisc.h10
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/uio_driver.h1
-rw-r--r--include/linux/umh.h1
-rw-r--r--include/linux/usb/chipidea.h6
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/linux/vga_switcheroo.h3
-rw-r--r--include/linux/virtio_net.h18
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmacache.h5
-rw-r--r--include/linux/vt_kern.h7
-rw-r--r--include/linux/wait.h20
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/linux/xarray.h1293
306 files changed, 6482 insertions, 3220 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index de8d3d3fa651..ed80f147bd50 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -831,8 +831,6 @@ static inline int acpi_dma_configure(struct device *dev,
831 return 0; 831 return 0;
832} 832}
833 833
834static inline void acpi_dma_deconfigure(struct device *dev) { }
835
836#define ACPI_PTR(_ptr) (NULL) 834#define ACPI_PTR(_ptr) (NULL)
837 835
838static inline void acpi_device_set_enumerated(struct acpi_device *adev) 836static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -1074,6 +1072,15 @@ static inline int acpi_node_get_property_reference(
1074 NR_FWNODE_REFERENCE_ARGS, args); 1072 NR_FWNODE_REFERENCE_ARGS, args);
1075} 1073}
1076 1074
1075static inline bool acpi_dev_has_props(const struct acpi_device *adev)
1076{
1077 return !list_empty(&adev->data.properties);
1078}
1079
1080struct acpi_device_properties *
1081acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
1082 const union acpi_object *properties);
1083
1077int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, 1084int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
1078 void **valptr); 1085 void **valptr);
1079int acpi_dev_prop_read_single(struct acpi_device *adev, 1086int acpi_dev_prop_read_single(struct acpi_device *adev,
diff --git a/include/linux/adxl.h b/include/linux/adxl.h
new file mode 100644
index 000000000000..2a629acb4c3f
--- /dev/null
+++ b/include/linux/adxl.h
@@ -0,0 +1,13 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Address translation interface via ACPI DSM.
4 * Copyright (C) 2018 Intel Corporation
5 */
6
7#ifndef _LINUX_ADXL_H
8#define _LINUX_ADXL_H
9
10const char * const *adxl_get_component_names(void);
11int adxl_decode(u64 addr, u64 component_values[]);
12
13#endif /* _LINUX_ADXL_H */
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index da8357ba11bc..c92ebc39fc1f 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -18,20 +18,13 @@
18 * mask into a value to be binary (or set some other custom bits 18 * mask into a value to be binary (or set some other custom bits
19 * in MMCIPWR) or:ed and written into the MMCIPWR register of the 19 * in MMCIPWR) or:ed and written into the MMCIPWR register of the
20 * block. May also control external power based on the power_mode. 20 * block. May also control external power based on the power_mode.
21 * @status: if no GPIO read function was given to the block in 21 * @status: if no GPIO line was given to the block in this function will
22 * gpio_wp (below) this function will be called to determine 22 * be called to determine whether a card is present in the MMC slot or not
23 * whether a card is present in the MMC slot or not
24 * @gpio_wp: read this GPIO pin to see if the card is write protected
25 * @gpio_cd: read this GPIO pin to detect card insertion
26 * @cd_invert: true if the gpio_cd pin value is active low
27 */ 23 */
28struct mmci_platform_data { 24struct mmci_platform_data {
29 unsigned int ocr_mask; 25 unsigned int ocr_mask;
30 int (*ios_handler)(struct device *, struct mmc_ios *); 26 int (*ios_handler)(struct device *, struct mmc_ios *);
31 unsigned int (*status)(struct device *); 27 unsigned int (*status)(struct device *);
32 int gpio_wp;
33 int gpio_cd;
34 bool cd_invert;
35}; 28};
36 29
37#endif 30#endif
diff --git a/include/linux/amifd.h b/include/linux/amifd.h
deleted file mode 100644
index 202a77dbe46d..000000000000
--- a/include/linux/amifd.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _AMIFD_H
3#define _AMIFD_H
4
5/* Definitions for the Amiga floppy driver */
6
7#include <linux/fd.h>
8
9#define FD_MAX_UNITS 4 /* Max. Number of drives */
10#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
11
12#ifndef ASSEMBLER
13
14struct fd_data_type {
15 char *name; /* description of data type */
16 int sects; /* sectors per track */
17#ifdef __STDC__
18 int (*read_fkt)(int);
19 void (*write_fkt)(int);
20#else
21 int (*read_fkt)(); /* read whole track */
22 void (*write_fkt)(); /* write whole track */
23#endif
24};
25
26/*
27** Floppy type descriptions
28*/
29
30struct fd_drive_type {
31 unsigned long code; /* code returned from drive */
32 char *name; /* description of drive */
33 unsigned int tracks; /* number of tracks */
34 unsigned int heads; /* number of heads */
35 unsigned int read_size; /* raw read size for one track */
36 unsigned int write_size; /* raw write size for one track */
37 unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
38 unsigned int precomp1; /* start track for precomp 1 */
39 unsigned int precomp2; /* start track for precomp 2 */
40 unsigned int step_delay; /* time (in ms) for delay after step */
41 unsigned int settle_time; /* time to settle after dir change */
42 unsigned int side_time; /* time needed to change sides */
43};
44
45struct amiga_floppy_struct {
46 struct fd_drive_type *type; /* type of floppy for this unit */
47 struct fd_data_type *dtype; /* type of floppy for this unit */
48 int track; /* current track (-1 == unknown) */
49 unsigned char *trackbuf; /* current track (kmaloc()'d */
50
51 int blocks; /* total # blocks on disk */
52
53 int changed; /* true when not known */
54 int disk; /* disk in drive (-1 == unknown) */
55 int motor; /* true when motor is at speed */
56 int busy; /* true when drive is active */
57 int dirty; /* true when trackbuf is not on disk */
58 int status; /* current error code for unit */
59 struct gendisk *gendisk;
60};
61#endif
62
63#endif
diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h
deleted file mode 100644
index 9b514d05ec70..000000000000
--- a/include/linux/amifdreg.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_AMIFDREG_H
3#define _LINUX_AMIFDREG_H
4
5/*
6** CIAAPRA bits (read only)
7*/
8
9#define DSKRDY (0x1<<5) /* disk ready when low */
10#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
11#define DSKPROT (0x1<<3) /* disk protected when low */
12#define DSKCHANGE (0x1<<2) /* low when disk removed */
13
14/*
15** CIAAPRB bits (read/write)
16*/
17
18#define DSKMOTOR (0x1<<7) /* motor on when low */
19#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
20#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
21#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
22#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
23#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
24#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
25#define DSKSTEP (0x1) /* pulse low to step head 1 track */
26
27/*
28** DSKBYTR bits (read only)
29*/
30
31#define DSKBYT (1<<15) /* register contains valid byte when set */
32#define DMAON (1<<14) /* disk DMA enabled */
33#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
34#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
35/* bits 7-0 are data */
36
37/*
38** ADKCON/ADKCONR bits
39*/
40
41#ifndef SETCLR
42#define ADK_SETCLR (1<<15) /* control bit */
43#endif
44#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
45#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
46#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
47#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
48#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
49#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
50
51/*
52** DSKLEN bits
53*/
54
55#define DSKLEN_DMAEN (1<<15)
56#define DSKLEN_WRITE (1<<14)
57
58/*
59** INTENA/INTREQ bits
60*/
61
62#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
63
64/*
65** Misc
66*/
67
68#define MFM_SYNC 0x4489 /* standard MFM sync value */
69
70/* Values for FD_COMMAND */
71#define FD_RECALIBRATE 0x07 /* move to track 0 */
72#define FD_SEEK 0x0F /* seek track */
73#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
74#define FD_WRITE 0xC5 /* write with MT, MFM */
75#define FD_SENSEI 0x08 /* Sense Interrupt Status */
76#define FD_SPECIFY 0x03 /* specify HUT etc */
77#define FD_FORMAT 0x4D /* format one track */
78#define FD_VERSION 0x10 /* get version code */
79#define FD_CONFIGURE 0x13 /* configure FIFO operation */
80#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
81
82#endif /* _LINUX_AMIFDREG_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 2b709416de05..d9bdc1a7f4e7 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -9,6 +9,7 @@
9#include <linux/percpu.h> 9#include <linux/percpu.h>
10 10
11void topology_normalize_cpu_scale(void); 11void topology_normalize_cpu_scale(void);
12int topology_update_cpu_topology(void);
12 13
13struct device_node; 14struct device_node;
14bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu); 15bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 212b3822d180..2c9756bd9c4c 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -252,6 +252,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
252#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 252#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
253#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 253#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
254 254
255/* Define below the capability flags that are not offloads */
256#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
255#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ 257#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
256 VIRTCHNL_VF_OFFLOAD_VLAN | \ 258 VIRTCHNL_VF_OFFLOAD_VLAN | \
257 VIRTCHNL_VF_OFFLOAD_RSS_PF) 259 VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -573,7 +575,7 @@ struct virtchnl_filter {
573 enum virtchnl_flow_type flow_type; 575 enum virtchnl_flow_type flow_type;
574 enum virtchnl_action action; 576 enum virtchnl_action action;
575 u32 action_meta; 577 u32 action_meta;
576 __u8 field_flags; 578 u8 field_flags;
577}; 579};
578 580
579VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); 581VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
@@ -596,10 +598,23 @@ enum virtchnl_event_codes {
596struct virtchnl_pf_event { 598struct virtchnl_pf_event {
597 enum virtchnl_event_codes event; 599 enum virtchnl_event_codes event;
598 union { 600 union {
601 /* If the PF driver does not support the new speed reporting
602 * capabilities then use link_event else use link_event_adv to
603 * get the speed and link information. The ability to understand
604 * new speeds is indicated by setting the capability flag
605 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
606 * in virtchnl_vf_resource struct and can be used to determine
607 * which link event struct to use below.
608 */
599 struct { 609 struct {
600 enum virtchnl_link_speed link_speed; 610 enum virtchnl_link_speed link_speed;
601 bool link_status; 611 bool link_status;
602 } link_event; 612 } link_event;
613 struct {
614 /* link_speed provided in Mbps */
615 u32 link_speed;
616 u8 link_status;
617 } link_event_adv;
603 } event_data; 618 } event_data;
604 619
605 int severity; 620 int severity;
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index c05f24fac4f6..e9f5fe69df31 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -78,7 +78,7 @@ struct linux_binprm {
78 78
79/* Function parameter for binfmt->coredump */ 79/* Function parameter for binfmt->coredump */
80struct coredump_params { 80struct coredump_params {
81 const siginfo_t *siginfo; 81 const kernel_siginfo_t *siginfo;
82 struct pt_regs *regs; 82 struct pt_regs *regs;
83 struct file *file; 83 struct file *file;
84 unsigned long limit; 84 unsigned long limit;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 51371740d2a8..b47c7f716731 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -21,12 +21,8 @@
21#include <linux/highmem.h> 21#include <linux/highmem.h>
22#include <linux/mempool.h> 22#include <linux/mempool.h>
23#include <linux/ioprio.h> 23#include <linux/ioprio.h>
24#include <linux/bug.h>
25 24
26#ifdef CONFIG_BLOCK 25#ifdef CONFIG_BLOCK
27
28#include <asm/io.h>
29
30/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 26/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
31#include <linux/blk_types.h> 27#include <linux/blk_types.h>
32 28
@@ -133,32 +129,6 @@ static inline bool bio_full(struct bio *bio)
133} 129}
134 130
135/* 131/*
136 * will die
137 */
138#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
139
140/*
141 * merge helpers etc
142 */
143
144/* Default implementation of BIOVEC_PHYS_MERGEABLE */
145#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
146 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
147
148/*
149 * allow arch override, for eg virtualized architectures (put in asm/io.h)
150 */
151#ifndef BIOVEC_PHYS_MERGEABLE
152#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
153 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
154#endif
155
156#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
157 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
158#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
159 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
160
161/*
162 * drivers should _never_ use the all version - the bio may have been split 132 * drivers should _never_ use the all version - the bio may have been split
163 * before it got to the driver and the driver won't own all of it 133 * before it got to the driver and the driver won't own all of it
164 */ 134 */
@@ -170,27 +140,11 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
170{ 140{
171 iter->bi_sector += bytes >> 9; 141 iter->bi_sector += bytes >> 9;
172 142
173 if (bio_no_advance_iter(bio)) { 143 if (bio_no_advance_iter(bio))
174 iter->bi_size -= bytes; 144 iter->bi_size -= bytes;
175 iter->bi_done += bytes; 145 else
176 } else {
177 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 146 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
178 /* TODO: It is reasonable to complete bio with error here. */ 147 /* TODO: It is reasonable to complete bio with error here. */
179 }
180}
181
182static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter,
183 unsigned int bytes)
184{
185 iter->bi_sector -= bytes >> 9;
186
187 if (bio_no_advance_iter(bio)) {
188 iter->bi_size += bytes;
189 iter->bi_done -= bytes;
190 return true;
191 }
192
193 return bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
194} 148}
195 149
196#define __bio_for_each_segment(bvl, bio, iter, start) \ 150#define __bio_for_each_segment(bvl, bio, iter, start) \
@@ -353,6 +307,8 @@ struct bio_integrity_payload {
353 unsigned short bip_max_vcnt; /* integrity bio_vec slots */ 307 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
354 unsigned short bip_flags; /* control flags */ 308 unsigned short bip_flags; /* control flags */
355 309
310 struct bvec_iter bio_iter; /* for rewinding parent bio */
311
356 struct work_struct bip_work; /* I/O completion */ 312 struct work_struct bip_work; /* I/O completion */
357 313
358 struct bio_vec *bip_vec; 314 struct bio_vec *bip_vec;
@@ -547,23 +503,31 @@ do { \
547 disk_devt((bio)->bi_disk) 503 disk_devt((bio)->bi_disk)
548 504
549#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 505#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
550int bio_associate_blkcg_from_page(struct bio *bio, struct page *page); 506int bio_associate_blkg_from_page(struct bio *bio, struct page *page);
551#else 507#else
552static inline int bio_associate_blkcg_from_page(struct bio *bio, 508static inline int bio_associate_blkg_from_page(struct bio *bio,
553 struct page *page) { return 0; } 509 struct page *page) { return 0; }
554#endif 510#endif
555 511
556#ifdef CONFIG_BLK_CGROUP 512#ifdef CONFIG_BLK_CGROUP
557int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
558int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg); 513int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
514int bio_associate_blkg_from_css(struct bio *bio,
515 struct cgroup_subsys_state *css);
516int bio_associate_create_blkg(struct request_queue *q, struct bio *bio);
517int bio_reassociate_blkg(struct request_queue *q, struct bio *bio);
559void bio_disassociate_task(struct bio *bio); 518void bio_disassociate_task(struct bio *bio);
560void bio_clone_blkcg_association(struct bio *dst, struct bio *src); 519void bio_clone_blkg_association(struct bio *dst, struct bio *src);
561#else /* CONFIG_BLK_CGROUP */ 520#else /* CONFIG_BLK_CGROUP */
562static inline int bio_associate_blkcg(struct bio *bio, 521static inline int bio_associate_blkg_from_css(struct bio *bio,
563 struct cgroup_subsys_state *blkcg_css) { return 0; } 522 struct cgroup_subsys_state *css)
523{ return 0; }
524static inline int bio_associate_create_blkg(struct request_queue *q,
525 struct bio *bio) { return 0; }
526static inline int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
527{ return 0; }
564static inline void bio_disassociate_task(struct bio *bio) { } 528static inline void bio_disassociate_task(struct bio *bio) { }
565static inline void bio_clone_blkcg_association(struct bio *dst, 529static inline void bio_clone_blkg_association(struct bio *dst,
566 struct bio *src) { } 530 struct bio *src) { }
567#endif /* CONFIG_BLK_CGROUP */ 531#endif /* CONFIG_BLK_CGROUP */
568 532
569#ifdef CONFIG_HIGHMEM 533#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 6d766a19f2bb..1e76ceebeb5d 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -126,7 +126,7 @@ struct blkcg_gq {
126 struct request_list rl; 126 struct request_list rl;
127 127
128 /* reference count */ 128 /* reference count */
129 atomic_t refcnt; 129 struct percpu_ref refcnt;
130 130
131 /* is this blkg online? protected by both blkcg and q locks */ 131 /* is this blkg online? protected by both blkcg and q locks */
132 bool online; 132 bool online;
@@ -184,6 +184,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
184 184
185struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 185struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
186 struct request_queue *q, bool update_hint); 186 struct request_queue *q, bool update_hint);
187struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
188 struct request_queue *q);
187struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 189struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
188 struct request_queue *q); 190 struct request_queue *q);
189int blkcg_init_queue(struct request_queue *q); 191int blkcg_init_queue(struct request_queue *q);
@@ -230,22 +232,59 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
230 char *input, struct blkg_conf_ctx *ctx); 232 char *input, struct blkg_conf_ctx *ctx);
231void blkg_conf_finish(struct blkg_conf_ctx *ctx); 233void blkg_conf_finish(struct blkg_conf_ctx *ctx);
232 234
235/**
236 * blkcg_css - find the current css
237 *
238 * Find the css associated with either the kthread or the current task.
239 * This may return a dying css, so it is up to the caller to use tryget logic
240 * to confirm it is alive and well.
241 */
242static inline struct cgroup_subsys_state *blkcg_css(void)
243{
244 struct cgroup_subsys_state *css;
245
246 css = kthread_blkcg();
247 if (css)
248 return css;
249 return task_css(current, io_cgrp_id);
250}
233 251
234static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) 252static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
235{ 253{
236 return css ? container_of(css, struct blkcg, css) : NULL; 254 return css ? container_of(css, struct blkcg, css) : NULL;
237} 255}
238 256
239static inline struct blkcg *bio_blkcg(struct bio *bio) 257/**
258 * __bio_blkcg - internal version of bio_blkcg for bfq and cfq
259 *
260 * DO NOT USE.
261 * There is a flaw using this version of the function. In particular, this was
262 * used in a broken paradigm where association was called on the given css. It
263 * is possible though that the returned css from task_css() is in the process
264 * of dying due to migration of the current task. So it is improper to assume
265 * *_get() is going to succeed. Both BFQ and CFQ rely on this logic and will
266 * take additional work to handle more gracefully.
267 */
268static inline struct blkcg *__bio_blkcg(struct bio *bio)
240{ 269{
241 struct cgroup_subsys_state *css; 270 if (bio && bio->bi_blkg)
271 return bio->bi_blkg->blkcg;
272 return css_to_blkcg(blkcg_css());
273}
242 274
243 if (bio && bio->bi_css) 275/**
244 return css_to_blkcg(bio->bi_css); 276 * bio_blkcg - grab the blkcg associated with a bio
245 css = kthread_blkcg(); 277 * @bio: target bio
246 if (css) 278 *
247 return css_to_blkcg(css); 279 * This returns the blkcg associated with a bio, NULL if not associated.
248 return css_to_blkcg(task_css(current, io_cgrp_id)); 280 * Callers are expected to either handle NULL or know association has been
281 * done prior to calling this.
282 */
283static inline struct blkcg *bio_blkcg(struct bio *bio)
284{
285 if (bio && bio->bi_blkg)
286 return bio->bi_blkg->blkcg;
287 return NULL;
249} 288}
250 289
251static inline bool blk_cgroup_congested(void) 290static inline bool blk_cgroup_congested(void)
@@ -451,26 +490,35 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
451 */ 490 */
452static inline void blkg_get(struct blkcg_gq *blkg) 491static inline void blkg_get(struct blkcg_gq *blkg)
453{ 492{
454 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); 493 percpu_ref_get(&blkg->refcnt);
455 atomic_inc(&blkg->refcnt);
456} 494}
457 495
458/** 496/**
459 * blkg_try_get - try and get a blkg reference 497 * blkg_tryget - try and get a blkg reference
460 * @blkg: blkg to get 498 * @blkg: blkg to get
461 * 499 *
462 * This is for use when doing an RCU lookup of the blkg. We may be in the midst 500 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
463 * of freeing this blkg, so we can only use it if the refcnt is not zero. 501 * of freeing this blkg, so we can only use it if the refcnt is not zero.
464 */ 502 */
465static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg) 503static inline bool blkg_tryget(struct blkcg_gq *blkg)
466{ 504{
467 if (atomic_inc_not_zero(&blkg->refcnt)) 505 return percpu_ref_tryget(&blkg->refcnt);
468 return blkg;
469 return NULL;
470} 506}
471 507
508/**
509 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
510 * @blkg: blkg to get
511 *
512 * This walks up the blkg tree to find the closest non-dying blkg and returns
513 * the blkg that it did association with as it may not be the passed in blkg.
514 */
515static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
516{
517 while (!percpu_ref_tryget(&blkg->refcnt))
518 blkg = blkg->parent;
472 519
473void __blkg_release_rcu(struct rcu_head *rcu); 520 return blkg;
521}
474 522
475/** 523/**
476 * blkg_put - put a blkg reference 524 * blkg_put - put a blkg reference
@@ -478,9 +526,7 @@ void __blkg_release_rcu(struct rcu_head *rcu);
478 */ 526 */
479static inline void blkg_put(struct blkcg_gq *blkg) 527static inline void blkg_put(struct blkcg_gq *blkg)
480{ 528{
481 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); 529 percpu_ref_put(&blkg->refcnt);
482 if (atomic_dec_and_test(&blkg->refcnt))
483 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
484} 530}
485 531
486/** 532/**
@@ -533,25 +579,36 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
533 579
534 rcu_read_lock(); 580 rcu_read_lock();
535 581
536 blkcg = bio_blkcg(bio); 582 if (bio && bio->bi_blkg) {
583 blkcg = bio->bi_blkg->blkcg;
584 if (blkcg == &blkcg_root)
585 goto rl_use_root;
586
587 blkg_get(bio->bi_blkg);
588 rcu_read_unlock();
589 return &bio->bi_blkg->rl;
590 }
537 591
538 /* bypass blkg lookup and use @q->root_rl directly for root */ 592 blkcg = css_to_blkcg(blkcg_css());
539 if (blkcg == &blkcg_root) 593 if (blkcg == &blkcg_root)
540 goto root_rl; 594 goto rl_use_root;
541 595
542 /*
543 * Try to use blkg->rl. blkg lookup may fail under memory pressure
544 * or if either the blkcg or queue is going away. Fall back to
545 * root_rl in such cases.
546 */
547 blkg = blkg_lookup(blkcg, q); 596 blkg = blkg_lookup(blkcg, q);
548 if (unlikely(!blkg)) 597 if (unlikely(!blkg))
549 goto root_rl; 598 blkg = __blkg_lookup_create(blkcg, q);
599
600 if (blkg->blkcg == &blkcg_root || !blkg_tryget(blkg))
601 goto rl_use_root;
550 602
551 blkg_get(blkg);
552 rcu_read_unlock(); 603 rcu_read_unlock();
553 return &blkg->rl; 604 return &blkg->rl;
554root_rl: 605
606 /*
607 * Each blkg has its own request_list, however, the root blkcg
608 * uses the request_queue's root_rl. This is to avoid most
609 * overhead for the root blkcg.
610 */
611rl_use_root:
555 rcu_read_unlock(); 612 rcu_read_unlock();
556 return &q->root_rl; 613 return &q->root_rl;
557} 614}
@@ -797,32 +854,26 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
797 struct bio *bio) { return false; } 854 struct bio *bio) { return false; }
798#endif 855#endif
799 856
857
858static inline void blkcg_bio_issue_init(struct bio *bio)
859{
860 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
861}
862
800static inline bool blkcg_bio_issue_check(struct request_queue *q, 863static inline bool blkcg_bio_issue_check(struct request_queue *q,
801 struct bio *bio) 864 struct bio *bio)
802{ 865{
803 struct blkcg *blkcg;
804 struct blkcg_gq *blkg; 866 struct blkcg_gq *blkg;
805 bool throtl = false; 867 bool throtl = false;
806 868
807 rcu_read_lock(); 869 rcu_read_lock();
808 blkcg = bio_blkcg(bio);
809
810 /* associate blkcg if bio hasn't attached one */
811 bio_associate_blkcg(bio, &blkcg->css);
812 870
813 blkg = blkg_lookup(blkcg, q); 871 bio_associate_create_blkg(q, bio);
814 if (unlikely(!blkg)) { 872 blkg = bio->bi_blkg;
815 spin_lock_irq(q->queue_lock);
816 blkg = blkg_lookup_create(blkcg, q);
817 if (IS_ERR(blkg))
818 blkg = NULL;
819 spin_unlock_irq(q->queue_lock);
820 }
821 873
822 throtl = blk_throtl_bio(q, blkg, bio); 874 throtl = blk_throtl_bio(q, blkg, bio);
823 875
824 if (!throtl) { 876 if (!throtl) {
825 blkg = blkg ?: q->root_blkg;
826 /* 877 /*
827 * If the bio is flagged with BIO_QUEUE_ENTERED it means this 878 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
828 * is a split bio and we would have already accounted for the 879 * is a split bio and we would have already accounted for the
@@ -834,6 +885,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
834 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); 885 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
835 } 886 }
836 887
888 blkcg_bio_issue_init(bio);
889
837 rcu_read_unlock(); 890 rcu_read_unlock();
838 return !throtl; 891 return !throtl;
839} 892}
@@ -930,6 +983,7 @@ static inline int blkcg_activate_policy(struct request_queue *q,
930static inline void blkcg_deactivate_policy(struct request_queue *q, 983static inline void blkcg_deactivate_policy(struct request_queue *q,
931 const struct blkcg_policy *pol) { } 984 const struct blkcg_policy *pol) { }
932 985
986static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
933static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } 987static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
934 988
935static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 989static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
@@ -945,6 +999,7 @@ static inline void blk_put_rl(struct request_list *rl) { }
945static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } 999static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
946static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } 1000static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
947 1001
1002static inline void blkcg_bio_issue_init(struct bio *bio) { }
948static inline bool blkcg_bio_issue_check(struct request_queue *q, 1003static inline bool blkcg_bio_issue_check(struct request_queue *q,
949 struct bio *bio) { return true; } 1004 struct bio *bio) { return true; }
950 1005
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 1da59c16f637..2286dc12c6bc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -203,6 +203,10 @@ enum {
203struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 203struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
204struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, 204struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
205 struct request_queue *q); 205 struct request_queue *q);
206struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
207 const struct blk_mq_ops *ops,
208 unsigned int queue_depth,
209 unsigned int set_flags);
206int blk_mq_register_dev(struct device *, struct request_queue *); 210int blk_mq_register_dev(struct device *, struct request_queue *);
207void blk_mq_unregister_dev(struct device *, struct request_queue *); 211void blk_mq_unregister_dev(struct device *, struct request_queue *);
208 212
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
new file mode 100644
index 000000000000..b80c65aba249
--- /dev/null
+++ b/include/linux/blk-pm.h
@@ -0,0 +1,24 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _BLK_PM_H_
4#define _BLK_PM_H_
5
6struct device;
7struct request_queue;
8
9/*
10 * block layer runtime pm functions
11 */
12#ifdef CONFIG_PM
13extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
14extern int blk_pre_runtime_suspend(struct request_queue *q);
15extern void blk_post_runtime_suspend(struct request_queue *q, int err);
16extern void blk_pre_runtime_resume(struct request_queue *q);
17extern void blk_post_runtime_resume(struct request_queue *q, int err);
18extern void blk_set_runtime_active(struct request_queue *q);
19#else
20static inline void blk_pm_runtime_init(struct request_queue *q,
21 struct device *dev) {}
22#endif
23
24#endif /* _BLK_PM_H_ */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index f6dfb30737d8..093a818c5b68 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -178,7 +178,6 @@ struct bio {
178 * release. Read comment on top of bio_associate_current(). 178 * release. Read comment on top of bio_associate_current().
179 */ 179 */
180 struct io_context *bi_ioc; 180 struct io_context *bi_ioc;
181 struct cgroup_subsys_state *bi_css;
182 struct blkcg_gq *bi_blkg; 181 struct blkcg_gq *bi_blkg;
183 struct bio_issue bi_issue; 182 struct bio_issue bi_issue;
184#endif 183#endif
@@ -284,8 +283,6 @@ enum req_opf {
284 REQ_OP_FLUSH = 2, 283 REQ_OP_FLUSH = 2,
285 /* discard sectors */ 284 /* discard sectors */
286 REQ_OP_DISCARD = 3, 285 REQ_OP_DISCARD = 3,
287 /* get zone information */
288 REQ_OP_ZONE_REPORT = 4,
289 /* securely erase sectors */ 286 /* securely erase sectors */
290 REQ_OP_SECURE_ERASE = 5, 287 REQ_OP_SECURE_ERASE = 5,
291 /* seset a zone write pointer */ 288 /* seset a zone write pointer */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d6869e0e2b64..4293dc1cd160 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -54,7 +54,7 @@ struct blk_stat_callback;
54 * Maximum number of blkcg policies allowed to be registered concurrently. 54 * Maximum number of blkcg policies allowed to be registered concurrently.
55 * Defined here to simplify include dependency. 55 * Defined here to simplify include dependency.
56 */ 56 */
57#define BLKCG_MAX_POLS 3 57#define BLKCG_MAX_POLS 5
58 58
59typedef void (rq_end_io_fn)(struct request *, blk_status_t); 59typedef void (rq_end_io_fn)(struct request *, blk_status_t);
60 60
@@ -108,7 +108,7 @@ typedef __u32 __bitwise req_flags_t;
108#define RQF_QUIET ((__force req_flags_t)(1 << 11)) 108#define RQF_QUIET ((__force req_flags_t)(1 << 11))
109/* elevator private data attached */ 109/* elevator private data attached */
110#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 110#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
111/* account I/O stat */ 111/* account into disk and partition IO statistics */
112#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 112#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
113/* request came from our alloc pool */ 113/* request came from our alloc pool */
114#define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 114#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
@@ -116,7 +116,7 @@ typedef __u32 __bitwise req_flags_t;
116#define RQF_PM ((__force req_flags_t)(1 << 15)) 116#define RQF_PM ((__force req_flags_t)(1 << 15))
117/* on IO scheduler merge hash */ 117/* on IO scheduler merge hash */
118#define RQF_HASHED ((__force req_flags_t)(1 << 16)) 118#define RQF_HASHED ((__force req_flags_t)(1 << 16))
119/* IO stats tracking on */ 119/* track IO completion time */
120#define RQF_STATS ((__force req_flags_t)(1 << 17)) 120#define RQF_STATS ((__force req_flags_t)(1 << 17))
121/* Look at ->special_vec for the actual data payload instead of the 121/* Look at ->special_vec for the actual data payload instead of the
122 bio chain. */ 122 bio chain. */
@@ -396,16 +396,13 @@ struct queue_limits {
396 396
397#ifdef CONFIG_BLK_DEV_ZONED 397#ifdef CONFIG_BLK_DEV_ZONED
398 398
399struct blk_zone_report_hdr { 399extern unsigned int blkdev_nr_zones(struct block_device *bdev);
400 unsigned int nr_zones;
401 u8 padding[60];
402};
403
404extern int blkdev_report_zones(struct block_device *bdev, 400extern int blkdev_report_zones(struct block_device *bdev,
405 sector_t sector, struct blk_zone *zones, 401 sector_t sector, struct blk_zone *zones,
406 unsigned int *nr_zones, gfp_t gfp_mask); 402 unsigned int *nr_zones, gfp_t gfp_mask);
407extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, 403extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
408 sector_t nr_sectors, gfp_t gfp_mask); 404 sector_t nr_sectors, gfp_t gfp_mask);
405extern int blk_revalidate_disk_zones(struct gendisk *disk);
409 406
410extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, 407extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
411 unsigned int cmd, unsigned long arg); 408 unsigned int cmd, unsigned long arg);
@@ -414,6 +411,16 @@ extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
414 411
415#else /* CONFIG_BLK_DEV_ZONED */ 412#else /* CONFIG_BLK_DEV_ZONED */
416 413
414static inline unsigned int blkdev_nr_zones(struct block_device *bdev)
415{
416 return 0;
417}
418
419static inline int blk_revalidate_disk_zones(struct gendisk *disk)
420{
421 return 0;
422}
423
417static inline int blkdev_report_zones_ioctl(struct block_device *bdev, 424static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
418 fmode_t mode, unsigned int cmd, 425 fmode_t mode, unsigned int cmd,
419 unsigned long arg) 426 unsigned long arg)
@@ -504,6 +511,12 @@ struct request_queue {
504 * various queue flags, see QUEUE_* below 511 * various queue flags, see QUEUE_* below
505 */ 512 */
506 unsigned long queue_flags; 513 unsigned long queue_flags;
514 /*
515 * Number of contexts that have called blk_set_pm_only(). If this
516 * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
517 * processed.
518 */
519 atomic_t pm_only;
507 520
508 /* 521 /*
509 * ida allocated id for this queue. Used to index queues from 522 * ida allocated id for this queue. Used to index queues from
@@ -679,7 +692,7 @@ struct request_queue {
679#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ 692#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
680#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */ 693#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
681#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 694#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
682#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */ 695#define QUEUE_FLAG_IO_STAT 10 /* do disk/partitions IO accounting */
683#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */ 696#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */
684#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */ 697#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */
685#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */ 698#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */
@@ -693,12 +706,12 @@ struct request_queue {
693#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */ 706#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
694#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */ 707#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
695#define QUEUE_FLAG_DAX 23 /* device supports DAX */ 708#define QUEUE_FLAG_DAX 23 /* device supports DAX */
696#define QUEUE_FLAG_STATS 24 /* track rq completion times */ 709#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
697#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ 710#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
698#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ 711#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
699#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ 712#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
700#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ 713#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
701#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */ 714#define QUEUE_FLAG_PCI_P2PDMA 29 /* device supports PCI p2p requests */
702 715
703#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 716#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
704 (1 << QUEUE_FLAG_SAME_COMP) | \ 717 (1 << QUEUE_FLAG_SAME_COMP) | \
@@ -731,17 +744,18 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
731#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) 744#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
732#define blk_queue_scsi_passthrough(q) \ 745#define blk_queue_scsi_passthrough(q) \
733 test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) 746 test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
747#define blk_queue_pci_p2pdma(q) \
748 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
734 749
735#define blk_noretry_request(rq) \ 750#define blk_noretry_request(rq) \
736 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 751 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
737 REQ_FAILFAST_DRIVER)) 752 REQ_FAILFAST_DRIVER))
738#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 753#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
739#define blk_queue_preempt_only(q) \ 754#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
740 test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
741#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) 755#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
742 756
743extern int blk_set_preempt_only(struct request_queue *q); 757extern void blk_set_pm_only(struct request_queue *q);
744extern void blk_clear_preempt_only(struct request_queue *q); 758extern void blk_clear_pm_only(struct request_queue *q);
745 759
746static inline int queue_in_flight(struct request_queue *q) 760static inline int queue_in_flight(struct request_queue *q)
747{ 761{
@@ -799,6 +813,11 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
799} 813}
800 814
801#ifdef CONFIG_BLK_DEV_ZONED 815#ifdef CONFIG_BLK_DEV_ZONED
816static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
817{
818 return blk_queue_is_zoned(q) ? q->nr_zones : 0;
819}
820
802static inline unsigned int blk_queue_zone_no(struct request_queue *q, 821static inline unsigned int blk_queue_zone_no(struct request_queue *q,
803 sector_t sector) 822 sector_t sector)
804{ 823{
@@ -814,6 +833,11 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
814 return false; 833 return false;
815 return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); 834 return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
816} 835}
836#else /* CONFIG_BLK_DEV_ZONED */
837static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
838{
839 return 0;
840}
817#endif /* CONFIG_BLK_DEV_ZONED */ 841#endif /* CONFIG_BLK_DEV_ZONED */
818 842
819static inline bool rq_is_sync(struct request *rq) 843static inline bool rq_is_sync(struct request *rq)
@@ -1281,29 +1305,6 @@ extern void blk_put_queue(struct request_queue *);
1281extern void blk_set_queue_dying(struct request_queue *); 1305extern void blk_set_queue_dying(struct request_queue *);
1282 1306
1283/* 1307/*
1284 * block layer runtime pm functions
1285 */
1286#ifdef CONFIG_PM
1287extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1288extern int blk_pre_runtime_suspend(struct request_queue *q);
1289extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1290extern void blk_pre_runtime_resume(struct request_queue *q);
1291extern void blk_post_runtime_resume(struct request_queue *q, int err);
1292extern void blk_set_runtime_active(struct request_queue *q);
1293#else
1294static inline void blk_pm_runtime_init(struct request_queue *q,
1295 struct device *dev) {}
1296static inline int blk_pre_runtime_suspend(struct request_queue *q)
1297{
1298 return -ENOSYS;
1299}
1300static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1301static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1302static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1303static inline void blk_set_runtime_active(struct request_queue *q) {}
1304#endif
1305
1306/*
1307 * blk_plug permits building a queue of related requests by holding the I/O 1308 * blk_plug permits building a queue of related requests by holding the I/O
1308 * fragments for a short period. This allows merging of sequential requests 1309 * fragments for a short period. This allows merging of sequential requests
1309 * into single larger request. As the requests are moved from a per-task list to 1310 * into single larger request. As the requests are moved from a per-task list to
@@ -1676,94 +1677,6 @@ static inline void put_dev_sector(Sector p)
1676 put_page(p.v); 1677 put_page(p.v);
1677} 1678}
1678 1679
1679static inline bool __bvec_gap_to_prev(struct request_queue *q,
1680 struct bio_vec *bprv, unsigned int offset)
1681{
1682 return offset ||
1683 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1684}
1685
1686/*
1687 * Check if adding a bio_vec after bprv with offset would create a gap in
1688 * the SG list. Most drivers don't care about this, but some do.
1689 */
1690static inline bool bvec_gap_to_prev(struct request_queue *q,
1691 struct bio_vec *bprv, unsigned int offset)
1692{
1693 if (!queue_virt_boundary(q))
1694 return false;
1695 return __bvec_gap_to_prev(q, bprv, offset);
1696}
1697
1698/*
1699 * Check if the two bvecs from two bios can be merged to one segment.
1700 * If yes, no need to check gap between the two bios since the 1st bio
1701 * and the 1st bvec in the 2nd bio can be handled in one segment.
1702 */
1703static inline bool bios_segs_mergeable(struct request_queue *q,
1704 struct bio *prev, struct bio_vec *prev_last_bv,
1705 struct bio_vec *next_first_bv)
1706{
1707 if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
1708 return false;
1709 if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
1710 return false;
1711 if (prev->bi_seg_back_size + next_first_bv->bv_len >
1712 queue_max_segment_size(q))
1713 return false;
1714 return true;
1715}
1716
1717static inline bool bio_will_gap(struct request_queue *q,
1718 struct request *prev_rq,
1719 struct bio *prev,
1720 struct bio *next)
1721{
1722 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1723 struct bio_vec pb, nb;
1724
1725 /*
1726 * don't merge if the 1st bio starts with non-zero
1727 * offset, otherwise it is quite difficult to respect
1728 * sg gap limit. We work hard to merge a huge number of small
1729 * single bios in case of mkfs.
1730 */
1731 if (prev_rq)
1732 bio_get_first_bvec(prev_rq->bio, &pb);
1733 else
1734 bio_get_first_bvec(prev, &pb);
1735 if (pb.bv_offset)
1736 return true;
1737
1738 /*
1739 * We don't need to worry about the situation that the
1740 * merged segment ends in unaligned virt boundary:
1741 *
1742 * - if 'pb' ends aligned, the merged segment ends aligned
1743 * - if 'pb' ends unaligned, the next bio must include
1744 * one single bvec of 'nb', otherwise the 'nb' can't
1745 * merge with 'pb'
1746 */
1747 bio_get_last_bvec(prev, &pb);
1748 bio_get_first_bvec(next, &nb);
1749
1750 if (!bios_segs_mergeable(q, prev, &pb, &nb))
1751 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1752 }
1753
1754 return false;
1755}
1756
1757static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1758{
1759 return bio_will_gap(req->q, req, req->biotail, bio);
1760}
1761
1762static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1763{
1764 return bio_will_gap(req->q, NULL, bio, req->bio);
1765}
1766
1767int kblockd_schedule_work(struct work_struct *work); 1680int kblockd_schedule_work(struct work_struct *work);
1768int kblockd_schedule_work_on(int cpu, struct work_struct *work); 1681int kblockd_schedule_work_on(int cpu, struct work_struct *work);
1769int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1682int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
@@ -1843,26 +1756,6 @@ queue_max_integrity_segments(struct request_queue *q)
1843 return q->limits.max_integrity_segments; 1756 return q->limits.max_integrity_segments;
1844} 1757}
1845 1758
1846static inline bool integrity_req_gap_back_merge(struct request *req,
1847 struct bio *next)
1848{
1849 struct bio_integrity_payload *bip = bio_integrity(req->bio);
1850 struct bio_integrity_payload *bip_next = bio_integrity(next);
1851
1852 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1853 bip_next->bip_vec[0].bv_offset);
1854}
1855
1856static inline bool integrity_req_gap_front_merge(struct request *req,
1857 struct bio *bio)
1858{
1859 struct bio_integrity_payload *bip = bio_integrity(bio);
1860 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1861
1862 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1863 bip_next->bip_vec[0].bv_offset);
1864}
1865
1866/** 1759/**
1867 * bio_integrity_intervals - Return number of integrity intervals for a bio 1760 * bio_integrity_intervals - Return number of integrity intervals for a bio
1868 * @bi: blk_integrity profile for device 1761 * @bi: blk_integrity profile for device
@@ -1947,17 +1840,6 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1947 return true; 1840 return true;
1948} 1841}
1949 1842
1950static inline bool integrity_req_gap_back_merge(struct request *req,
1951 struct bio *next)
1952{
1953 return false;
1954}
1955static inline bool integrity_req_gap_front_merge(struct request *req,
1956 struct bio *bio)
1957{
1958 return false;
1959}
1960
1961static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, 1843static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1962 unsigned int sectors) 1844 unsigned int sectors)
1963{ 1845{
@@ -1987,6 +1869,9 @@ struct block_device_operations {
1987 int (*getgeo)(struct block_device *, struct hd_geometry *); 1869 int (*getgeo)(struct block_device *, struct hd_geometry *);
1988 /* this callback is with swap_lock and sometimes page table lock held */ 1870 /* this callback is with swap_lock and sometimes page table lock held */
1989 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1871 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1872 int (*report_zones)(struct gendisk *, sector_t sector,
1873 struct blk_zone *zones, unsigned int *nr_zones,
1874 gfp_t gfp_mask);
1990 struct module *owner; 1875 struct module *owner;
1991 const struct pr_ops *pr_ops; 1876 const struct pr_ops *pr_ops;
1992}; 1877};
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index f91b0f8ff3a9..588dd5f0bd85 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -2,6 +2,7 @@
2#ifndef _BPF_CGROUP_H 2#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H 3#define _BPF_CGROUP_H
4 4
5#include <linux/bpf.h>
5#include <linux/errno.h> 6#include <linux/errno.h>
6#include <linux/jump_label.h> 7#include <linux/jump_label.h>
7#include <linux/percpu.h> 8#include <linux/percpu.h>
@@ -22,7 +23,11 @@ struct bpf_cgroup_storage;
22extern struct static_key_false cgroup_bpf_enabled_key; 23extern struct static_key_false cgroup_bpf_enabled_key;
23#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) 24#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
24 25
25DECLARE_PER_CPU(void*, bpf_cgroup_storage); 26DECLARE_PER_CPU(struct bpf_cgroup_storage*,
27 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
28
29#define for_each_cgroup_storage_type(stype) \
30 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
26 31
27struct bpf_cgroup_storage_map; 32struct bpf_cgroup_storage_map;
28 33
@@ -32,7 +37,10 @@ struct bpf_storage_buffer {
32}; 37};
33 38
34struct bpf_cgroup_storage { 39struct bpf_cgroup_storage {
35 struct bpf_storage_buffer *buf; 40 union {
41 struct bpf_storage_buffer *buf;
42 void __percpu *percpu_buf;
43 };
36 struct bpf_cgroup_storage_map *map; 44 struct bpf_cgroup_storage_map *map;
37 struct bpf_cgroup_storage_key key; 45 struct bpf_cgroup_storage_key key;
38 struct list_head list; 46 struct list_head list;
@@ -43,7 +51,7 @@ struct bpf_cgroup_storage {
43struct bpf_prog_list { 51struct bpf_prog_list {
44 struct list_head node; 52 struct list_head node;
45 struct bpf_prog *prog; 53 struct bpf_prog *prog;
46 struct bpf_cgroup_storage *storage; 54 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
47}; 55};
48 56
49struct bpf_prog_array; 57struct bpf_prog_array;
@@ -101,18 +109,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
101int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 109int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
102 short access, enum bpf_attach_type type); 110 short access, enum bpf_attach_type type);
103 111
104static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) 112static inline enum bpf_cgroup_storage_type cgroup_storage_type(
113 struct bpf_map *map)
105{ 114{
106 struct bpf_storage_buffer *buf; 115 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
116 return BPF_CGROUP_STORAGE_PERCPU;
117
118 return BPF_CGROUP_STORAGE_SHARED;
119}
107 120
108 if (!storage) 121static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
109 return; 122 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
123{
124 enum bpf_cgroup_storage_type stype;
110 125
111 buf = READ_ONCE(storage->buf); 126 for_each_cgroup_storage_type(stype)
112 this_cpu_write(bpf_cgroup_storage, &buf->data[0]); 127 this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
113} 128}
114 129
115struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog); 130struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
131 enum bpf_cgroup_storage_type stype);
116void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); 132void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
117void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, 133void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
118 struct cgroup *cgroup, 134 struct cgroup *cgroup,
@@ -121,6 +137,10 @@ void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
121int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); 137int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
122void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); 138void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
123 139
140int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
141int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
142 void *value, u64 flags);
143
124/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ 144/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
125#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ 145#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
126({ \ 146({ \
@@ -265,15 +285,24 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
265 return -EINVAL; 285 return -EINVAL;
266} 286}
267 287
268static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {} 288static inline void bpf_cgroup_storage_set(
289 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
269static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, 290static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
270 struct bpf_map *map) { return 0; } 291 struct bpf_map *map) { return 0; }
271static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, 292static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
272 struct bpf_map *map) {} 293 struct bpf_map *map) {}
273static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( 294static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
274 struct bpf_prog *prog) { return 0; } 295 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; }
275static inline void bpf_cgroup_storage_free( 296static inline void bpf_cgroup_storage_free(
276 struct bpf_cgroup_storage *storage) {} 297 struct bpf_cgroup_storage *storage) {}
298static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
299 void *value) {
300 return 0;
301}
302static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
303 void *key, void *value, u64 flags) {
304 return 0;
305}
277 306
278#define cgroup_bpf_enabled (0) 307#define cgroup_bpf_enabled (0)
279#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) 308#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
@@ -293,6 +322,8 @@ static inline void bpf_cgroup_storage_free(
293#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) 322#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
294#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) 323#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
295 324
325#define for_each_cgroup_storage_type(stype) for (; false; )
326
296#endif /* CONFIG_CGROUP_BPF */ 327#endif /* CONFIG_CGROUP_BPF */
297 328
298#endif /* _BPF_CGROUP_H */ 329#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 523481a3471b..33014ae73103 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -39,6 +39,9 @@ struct bpf_map_ops {
39 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 39 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
40 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 40 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
41 int (*map_delete_elem)(struct bpf_map *map, void *key); 41 int (*map_delete_elem)(struct bpf_map *map, void *key);
42 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
43 int (*map_pop_elem)(struct bpf_map *map, void *value);
44 int (*map_peek_elem)(struct bpf_map *map, void *value);
42 45
43 /* funcs called by prog_array and perf_event_array map */ 46 /* funcs called by prog_array and perf_event_array map */
44 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, 47 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
@@ -138,6 +141,7 @@ enum bpf_arg_type {
138 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 141 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
139 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 142 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
140 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 143 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
144 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
141 145
142 /* the following constraints used to prototype bpf_memcmp() and other 146 /* the following constraints used to prototype bpf_memcmp() and other
143 * functions that access data on eBPF program stack 147 * functions that access data on eBPF program stack
@@ -154,6 +158,7 @@ enum bpf_arg_type {
154 158
155 ARG_PTR_TO_CTX, /* pointer to context */ 159 ARG_PTR_TO_CTX, /* pointer to context */
156 ARG_ANYTHING, /* any (initialized) argument is ok */ 160 ARG_ANYTHING, /* any (initialized) argument is ok */
161 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
157}; 162};
158 163
159/* type of values returned from helper functions */ 164/* type of values returned from helper functions */
@@ -162,6 +167,7 @@ enum bpf_return_type {
162 RET_VOID, /* function doesn't return anything */ 167 RET_VOID, /* function doesn't return anything */
163 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 168 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
164 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 169 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
170 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
165}; 171};
166 172
167/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 173/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -212,6 +218,9 @@ enum bpf_reg_type {
212 PTR_TO_PACKET_META, /* skb->data - meta_len */ 218 PTR_TO_PACKET_META, /* skb->data - meta_len */
213 PTR_TO_PACKET, /* reg points to skb->data */ 219 PTR_TO_PACKET, /* reg points to skb->data */
214 PTR_TO_PACKET_END, /* skb->data + headlen */ 220 PTR_TO_PACKET_END, /* skb->data + headlen */
221 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
222 PTR_TO_SOCKET, /* reg points to struct bpf_sock */
223 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
215}; 224};
216 225
217/* The information passed from prog-specific *_is_valid_access 226/* The information passed from prog-specific *_is_valid_access
@@ -258,6 +267,7 @@ struct bpf_verifier_ops {
258struct bpf_prog_offload_ops { 267struct bpf_prog_offload_ops {
259 int (*insn_hook)(struct bpf_verifier_env *env, 268 int (*insn_hook)(struct bpf_verifier_env *env,
260 int insn_idx, int prev_insn_idx); 269 int insn_idx, int prev_insn_idx);
270 int (*finalize)(struct bpf_verifier_env *env);
261}; 271};
262 272
263struct bpf_prog_offload { 273struct bpf_prog_offload {
@@ -271,6 +281,14 @@ struct bpf_prog_offload {
271 u32 jited_len; 281 u32 jited_len;
272}; 282};
273 283
284enum bpf_cgroup_storage_type {
285 BPF_CGROUP_STORAGE_SHARED,
286 BPF_CGROUP_STORAGE_PERCPU,
287 __BPF_CGROUP_STORAGE_MAX
288};
289
290#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
291
274struct bpf_prog_aux { 292struct bpf_prog_aux {
275 atomic_t refcnt; 293 atomic_t refcnt;
276 u32 used_map_cnt; 294 u32 used_map_cnt;
@@ -288,7 +306,7 @@ struct bpf_prog_aux {
288 struct bpf_prog *prog; 306 struct bpf_prog *prog;
289 struct user_struct *user; 307 struct user_struct *user;
290 u64 load_time; /* ns since boottime */ 308 u64 load_time; /* ns since boottime */
291 struct bpf_map *cgroup_storage; 309 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
292 char name[BPF_OBJ_NAME_LEN]; 310 char name[BPF_OBJ_NAME_LEN];
293#ifdef CONFIG_SECURITY 311#ifdef CONFIG_SECURITY
294 void *security; 312 void *security;
@@ -334,6 +352,11 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
334 352
335typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, 353typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
336 unsigned long off, unsigned long len); 354 unsigned long off, unsigned long len);
355typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
356 const struct bpf_insn *src,
357 struct bpf_insn *dst,
358 struct bpf_prog *prog,
359 u32 *target_size);
337 360
338u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 361u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
339 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); 362 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
@@ -357,7 +380,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
357 */ 380 */
358struct bpf_prog_array_item { 381struct bpf_prog_array_item {
359 struct bpf_prog *prog; 382 struct bpf_prog *prog;
360 struct bpf_cgroup_storage *cgroup_storage; 383 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
361}; 384};
362 385
363struct bpf_prog_array { 386struct bpf_prog_array {
@@ -718,33 +741,18 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
718} 741}
719#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 742#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
720 743
721#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) 744#if defined(CONFIG_BPF_STREAM_PARSER)
722struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); 745int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
723struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); 746int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
724int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
725int sockmap_get_from_fd(const union bpf_attr *attr, int type,
726 struct bpf_prog *prog);
727#else 747#else
728static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) 748static inline int sock_map_prog_update(struct bpf_map *map,
729{ 749 struct bpf_prog *prog, u32 which)
730 return NULL;
731}
732
733static inline struct sock *__sock_hash_lookup_elem(struct bpf_map *map,
734 void *key)
735{
736 return NULL;
737}
738
739static inline int sock_map_prog(struct bpf_map *map,
740 struct bpf_prog *prog,
741 u32 type)
742{ 750{
743 return -EOPNOTSUPP; 751 return -EOPNOTSUPP;
744} 752}
745 753
746static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type, 754static inline int sock_map_get_from_fd(const union bpf_attr *attr,
747 struct bpf_prog *prog) 755 struct bpf_prog *prog)
748{ 756{
749 return -EINVAL; 757 return -EINVAL;
750} 758}
@@ -806,6 +814,9 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
806extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 814extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
807extern const struct bpf_func_proto bpf_map_update_elem_proto; 815extern const struct bpf_func_proto bpf_map_update_elem_proto;
808extern const struct bpf_func_proto bpf_map_delete_elem_proto; 816extern const struct bpf_func_proto bpf_map_delete_elem_proto;
817extern const struct bpf_func_proto bpf_map_push_elem_proto;
818extern const struct bpf_func_proto bpf_map_pop_elem_proto;
819extern const struct bpf_func_proto bpf_map_peek_elem_proto;
809 820
810extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 821extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
811extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 822extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
@@ -820,6 +831,10 @@ extern const struct bpf_func_proto bpf_get_stack_proto;
820extern const struct bpf_func_proto bpf_sock_map_update_proto; 831extern const struct bpf_func_proto bpf_sock_map_update_proto;
821extern const struct bpf_func_proto bpf_sock_hash_update_proto; 832extern const struct bpf_func_proto bpf_sock_hash_update_proto;
822extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 833extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
834extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
835extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
836extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
837extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
823 838
824extern const struct bpf_func_proto bpf_get_local_storage_proto; 839extern const struct bpf_func_proto bpf_get_local_storage_proto;
825 840
@@ -827,4 +842,29 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto;
827void bpf_user_rnd_init_once(void); 842void bpf_user_rnd_init_once(void);
828u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 843u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
829 844
845#if defined(CONFIG_NET)
846bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
847 struct bpf_insn_access_aux *info);
848u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
849 const struct bpf_insn *si,
850 struct bpf_insn *insn_buf,
851 struct bpf_prog *prog,
852 u32 *target_size);
853#else
854static inline bool bpf_sock_is_valid_access(int off, int size,
855 enum bpf_access_type type,
856 struct bpf_insn_access_aux *info)
857{
858 return false;
859}
860static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
861 const struct bpf_insn *si,
862 struct bpf_insn *insn_buf,
863 struct bpf_prog *prog,
864 u32 *target_size)
865{
866 return 0;
867}
868#endif
869
830#endif /* _LINUX_BPF_H */ 870#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index cd26c090e7c0..44d9ab4809bd 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -16,6 +16,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local)
16BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) 16BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
17BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) 17BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
18BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) 18BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
19BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector)
19#endif 20#endif
20#ifdef CONFIG_BPF_EVENTS 21#ifdef CONFIG_BPF_EVENTS
21BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) 22BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
@@ -42,6 +43,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
42#endif 43#endif
43#ifdef CONFIG_CGROUP_BPF 44#ifdef CONFIG_CGROUP_BPF
44BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops) 45BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
46BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops)
45#endif 47#endif
46BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) 48BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
47BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) 49BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
@@ -49,13 +51,13 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
49BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops) 51BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops)
50BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops) 52BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops)
51#ifdef CONFIG_PERF_EVENTS 53#ifdef CONFIG_PERF_EVENTS
52BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops) 54BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
53#endif 55#endif
54BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) 56BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
55BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) 57BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
56#ifdef CONFIG_NET 58#ifdef CONFIG_NET
57BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) 59BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
58#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET) 60#if defined(CONFIG_BPF_STREAM_PARSER)
59BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) 61BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
60BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) 62BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
61#endif 63#endif
@@ -67,3 +69,5 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
67BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops) 69BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
68#endif 70#endif
69#endif 71#endif
72BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
73BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 38b04f559ad3..9e8056ec20fa 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -41,6 +41,7 @@ enum bpf_reg_liveness {
41}; 41};
42 42
43struct bpf_reg_state { 43struct bpf_reg_state {
44 /* Ordering of fields matters. See states_equal() */
44 enum bpf_reg_type type; 45 enum bpf_reg_type type;
45 union { 46 union {
46 /* valid when type == PTR_TO_PACKET */ 47 /* valid when type == PTR_TO_PACKET */
@@ -57,9 +58,10 @@ struct bpf_reg_state {
57 * offset, so they can share range knowledge. 58 * offset, so they can share range knowledge.
58 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we 59 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
59 * came from, when one is tested for != NULL. 60 * came from, when one is tested for != NULL.
61 * For PTR_TO_SOCKET this is used to share which pointers retain the
62 * same reference to the socket, to determine proper reference freeing.
60 */ 63 */
61 u32 id; 64 u32 id;
62 /* Ordering of fields matters. See states_equal() */
63 /* For scalar types (SCALAR_VALUE), this represents our knowledge of 65 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
64 * the actual value. 66 * the actual value.
65 * For pointer types, this represents the variable part of the offset 67 * For pointer types, this represents the variable part of the offset
@@ -76,15 +78,15 @@ struct bpf_reg_state {
76 s64 smax_value; /* maximum possible (s64)value */ 78 s64 smax_value; /* maximum possible (s64)value */
77 u64 umin_value; /* minimum possible (u64)value */ 79 u64 umin_value; /* minimum possible (u64)value */
78 u64 umax_value; /* maximum possible (u64)value */ 80 u64 umax_value; /* maximum possible (u64)value */
81 /* parentage chain for liveness checking */
82 struct bpf_reg_state *parent;
79 /* Inside the callee two registers can be both PTR_TO_STACK like 83 /* Inside the callee two registers can be both PTR_TO_STACK like
80 * R1=fp-8 and R2=fp-8, but one of them points to this function stack 84 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
81 * while another to the caller's stack. To differentiate them 'frameno' 85 * while another to the caller's stack. To differentiate them 'frameno'
82 * is used which is an index in bpf_verifier_state->frame[] array 86 * is used which is an index in bpf_verifier_state->frame[] array
83 * pointing to bpf_func_state. 87 * pointing to bpf_func_state.
84 * This field must be second to last, for states_equal() reasons.
85 */ 88 */
86 u32 frameno; 89 u32 frameno;
87 /* This field must be last, for states_equal() reasons. */
88 enum bpf_reg_liveness live; 90 enum bpf_reg_liveness live;
89}; 91};
90 92
@@ -102,12 +104,22 @@ struct bpf_stack_state {
102 u8 slot_type[BPF_REG_SIZE]; 104 u8 slot_type[BPF_REG_SIZE];
103}; 105};
104 106
107struct bpf_reference_state {
108 /* Track each reference created with a unique id, even if the same
109 * instruction creates the reference multiple times (eg, via CALL).
110 */
111 int id;
112 /* Instruction where the allocation of this reference occurred. This
113 * is used purely to inform the user of a reference leak.
114 */
115 int insn_idx;
116};
117
105/* state of the program: 118/* state of the program:
106 * type of all registers and stack info 119 * type of all registers and stack info
107 */ 120 */
108struct bpf_func_state { 121struct bpf_func_state {
109 struct bpf_reg_state regs[MAX_BPF_REG]; 122 struct bpf_reg_state regs[MAX_BPF_REG];
110 struct bpf_verifier_state *parent;
111 /* index of call instruction that called into this func */ 123 /* index of call instruction that called into this func */
112 int callsite; 124 int callsite;
113 /* stack frame number of this function state from pov of 125 /* stack frame number of this function state from pov of
@@ -120,7 +132,9 @@ struct bpf_func_state {
120 */ 132 */
121 u32 subprogno; 133 u32 subprogno;
122 134
123 /* should be second to last. See copy_func_state() */ 135 /* The following fields should be last. See copy_func_state() */
136 int acquired_refs;
137 struct bpf_reference_state *refs;
124 int allocated_stack; 138 int allocated_stack;
125 struct bpf_stack_state *stack; 139 struct bpf_stack_state *stack;
126}; 140};
@@ -129,10 +143,20 @@ struct bpf_func_state {
129struct bpf_verifier_state { 143struct bpf_verifier_state {
130 /* call stack tracking */ 144 /* call stack tracking */
131 struct bpf_func_state *frame[MAX_CALL_FRAMES]; 145 struct bpf_func_state *frame[MAX_CALL_FRAMES];
132 struct bpf_verifier_state *parent;
133 u32 curframe; 146 u32 curframe;
134}; 147};
135 148
149#define bpf_get_spilled_reg(slot, frame) \
150 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
151 (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
152 ? &frame->stack[slot].spilled_ptr : NULL)
153
154/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
155#define bpf_for_each_spilled_reg(iter, frame, reg) \
156 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
157 iter < frame->allocated_stack / BPF_REG_SIZE; \
158 iter++, reg = bpf_get_spilled_reg(iter, frame))
159
136/* linked list of verifier states used to prune search */ 160/* linked list of verifier states used to prune search */
137struct bpf_verifier_state_list { 161struct bpf_verifier_state_list {
138 struct bpf_verifier_state state; 162 struct bpf_verifier_state state;
@@ -206,15 +230,21 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
206__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 230__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
207 const char *fmt, ...); 231 const char *fmt, ...);
208 232
209static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) 233static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
210{ 234{
211 struct bpf_verifier_state *cur = env->cur_state; 235 struct bpf_verifier_state *cur = env->cur_state;
212 236
213 return cur->frame[cur->curframe]->regs; 237 return cur->frame[cur->curframe];
238}
239
240static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
241{
242 return cur_func(env)->regs;
214} 243}
215 244
216int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); 245int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
217int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 246int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
218 int insn_idx, int prev_insn_idx); 247 int insn_idx, int prev_insn_idx);
248int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
219 249
220#endif /* _LINUX_BPF_VERIFIER_H */ 250#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 96225a77c112..7b73ef7f902d 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -242,7 +242,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to);
242int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 242int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
243 get_block_t get_block); 243 get_block_t get_block);
244/* Convert errno to return value from ->page_mkwrite() call */ 244/* Convert errno to return value from ->page_mkwrite() call */
245static inline int block_page_mkwrite_return(int err) 245static inline vm_fault_t block_page_mkwrite_return(int err)
246{ 246{
247 if (err == 0) 247 if (err == 0)
248 return VM_FAULT_LOCKED; 248 return VM_FAULT_LOCKED;
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index fe7a22dd133b..02c73c6aa805 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -40,8 +40,6 @@ struct bvec_iter {
40 40
41 unsigned int bi_idx; /* current index into bvl_vec */ 41 unsigned int bi_idx; /* current index into bvl_vec */
42 42
43 unsigned int bi_done; /* number of bytes completed */
44
45 unsigned int bi_bvec_done; /* number of bytes completed in 43 unsigned int bi_bvec_done; /* number of bytes completed in
46 current bvec */ 44 current bvec */
47}; 45};
@@ -85,7 +83,6 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
85 bytes -= len; 83 bytes -= len;
86 iter->bi_size -= len; 84 iter->bi_size -= len;
87 iter->bi_bvec_done += len; 85 iter->bi_bvec_done += len;
88 iter->bi_done += len;
89 86
90 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { 87 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
91 iter->bi_bvec_done = 0; 88 iter->bi_bvec_done = 0;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ff20b677fb9f..5e1694fe035b 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -20,6 +20,7 @@
20#include <linux/u64_stats_sync.h> 20#include <linux/u64_stats_sync.h>
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22#include <linux/bpf-cgroup.h> 22#include <linux/bpf-cgroup.h>
23#include <linux/psi_types.h>
23 24
24#ifdef CONFIG_CGROUPS 25#ifdef CONFIG_CGROUPS
25 26
@@ -412,6 +413,7 @@ struct cgroup {
412 * specific task are charged to the dom_cgrp. 413 * specific task are charged to the dom_cgrp.
413 */ 414 */
414 struct cgroup *dom_cgrp; 415 struct cgroup *dom_cgrp;
416 struct cgroup *old_dom_cgrp; /* used while enabling threaded */
415 417
416 /* per-cpu recursive resource statistics */ 418 /* per-cpu recursive resource statistics */
417 struct cgroup_rstat_cpu __percpu *rstat_cpu; 419 struct cgroup_rstat_cpu __percpu *rstat_cpu;
@@ -435,6 +437,9 @@ struct cgroup {
435 /* used to schedule release agent */ 437 /* used to schedule release agent */
436 struct work_struct release_agent_work; 438 struct work_struct release_agent_work;
437 439
440 /* used to track pressure stalls */
441 struct psi_group psi;
442
438 /* used to store eBPF programs */ 443 /* used to store eBPF programs */
439 struct cgroup_bpf bpf; 444 struct cgroup_bpf bpf;
440 445
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 32c553556bbd..9968332cceed 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -93,6 +93,8 @@ extern struct css_set init_css_set;
93 93
94bool css_has_online_children(struct cgroup_subsys_state *css); 94bool css_has_online_children(struct cgroup_subsys_state *css);
95struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); 95struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
96struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
97 struct cgroup_subsys *ss);
96struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, 98struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
97 struct cgroup_subsys *ss); 99 struct cgroup_subsys *ss);
98struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 100struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
@@ -567,20 +569,11 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
567static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, 569static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
568 int ancestor_level) 570 int ancestor_level)
569{ 571{
570 struct cgroup *ptr;
571
572 if (cgrp->level < ancestor_level) 572 if (cgrp->level < ancestor_level)
573 return NULL; 573 return NULL;
574 574 while (cgrp && cgrp->level > ancestor_level)
575 for (ptr = cgrp; 575 cgrp = cgroup_parent(cgrp);
576 ptr && ptr->level > ancestor_level; 576 return cgrp;
577 ptr = cgroup_parent(ptr))
578 ;
579
580 if (ptr && ptr->level == ancestor_level)
581 return ptr;
582
583 return NULL;
584} 577}
585 578
586/** 579/**
@@ -657,6 +650,11 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
657 pr_cont_kernfs_path(cgrp->kn); 650 pr_cont_kernfs_path(cgrp->kn);
658} 651}
659 652
653static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
654{
655 return &cgrp->psi;
656}
657
660static inline void cgroup_init_kthreadd(void) 658static inline void cgroup_init_kthreadd(void)
661{ 659{
662 /* 660 /*
@@ -710,6 +708,16 @@ static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
710 return NULL; 708 return NULL;
711} 709}
712 710
711static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
712{
713 return NULL;
714}
715
716static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
717{
718 return NULL;
719}
720
713static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 721static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
714 struct cgroup *ancestor) 722 struct cgroup *ancestor)
715{ 723{
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 308918928767..b21db536fd52 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -241,6 +241,11 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz
241 __clocksource_update_freq_scale(cs, 1000, khz); 241 __clocksource_update_freq_scale(cs, 1000, khz);
242} 242}
243 243
244#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT
245extern void clocksource_arch_init(struct clocksource *cs);
246#else
247static inline void clocksource_arch_init(struct clocksource *cs) { }
248#endif
244 249
245extern int timekeeping_notify(struct clocksource *clock); 250extern int timekeeping_notify(struct clocksource *clock);
246 251
@@ -257,9 +262,6 @@ extern int clocksource_i8253_init(void);
257#define TIMER_OF_DECLARE(name, compat, fn) \ 262#define TIMER_OF_DECLARE(name, compat, fn) \
258 OF_DECLARE_1_RET(timer, name, compat, fn) 263 OF_DECLARE_1_RET(timer, name, compat, fn)
259 264
260#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
261 TIMER_OF_DECLARE(name, compat, fn)
262
263#ifdef CONFIG_TIMER_PROBE 265#ifdef CONFIG_TIMER_PROBE
264extern void timer_probe(void); 266extern void timer_probe(void);
265#else 267#else
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 1a3c4f37e908..d30e4dbd4be2 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -7,7 +7,7 @@
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/compat_time.h> 10#include <linux/time.h>
11 11
12#include <linux/stat.h> 12#include <linux/stat.h>
13#include <linux/param.h> /* for HZ */ 13#include <linux/param.h> /* for HZ */
@@ -103,6 +103,9 @@ typedef struct compat_sigaltstack {
103 compat_size_t ss_size; 103 compat_size_t ss_size;
104} compat_stack_t; 104} compat_stack_t;
105#endif 105#endif
106#ifndef COMPAT_MINSIGSTKSZ
107#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ
108#endif
106 109
107#define compat_jiffies_to_clock_t(x) \ 110#define compat_jiffies_to_clock_t(x) \
108 (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) 111 (((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
@@ -110,19 +113,12 @@ typedef struct compat_sigaltstack {
110typedef __compat_uid32_t compat_uid_t; 113typedef __compat_uid32_t compat_uid_t;
111typedef __compat_gid32_t compat_gid_t; 114typedef __compat_gid32_t compat_gid_t;
112 115
113typedef compat_ulong_t compat_aio_context_t;
114
115struct compat_sel_arg_struct; 116struct compat_sel_arg_struct;
116struct rusage; 117struct rusage;
117 118
118struct compat_utimbuf {
119 compat_time_t actime;
120 compat_time_t modtime;
121};
122
123struct compat_itimerval { 119struct compat_itimerval {
124 struct compat_timeval it_interval; 120 struct old_timeval32 it_interval;
125 struct compat_timeval it_value; 121 struct old_timeval32 it_value;
126}; 122};
127 123
128struct itimerval; 124struct itimerval;
@@ -146,7 +142,7 @@ struct compat_timex {
146 compat_long_t constant; 142 compat_long_t constant;
147 compat_long_t precision; 143 compat_long_t precision;
148 compat_long_t tolerance; 144 compat_long_t tolerance;
149 struct compat_timeval time; 145 struct old_timeval32 time;
150 compat_long_t tick; 146 compat_long_t tick;
151 compat_long_t ppsfreq; 147 compat_long_t ppsfreq;
152 compat_long_t jitter; 148 compat_long_t jitter;
@@ -307,8 +303,8 @@ struct compat_rlimit {
307}; 303};
308 304
309struct compat_rusage { 305struct compat_rusage {
310 struct compat_timeval ru_utime; 306 struct old_timeval32 ru_utime;
311 struct compat_timeval ru_stime; 307 struct old_timeval32 ru_stime;
312 compat_long_t ru_maxrss; 308 compat_long_t ru_maxrss;
313 compat_long_t ru_ixrss; 309 compat_long_t ru_ixrss;
314 compat_long_t ru_idrss; 310 compat_long_t ru_idrss;
@@ -452,13 +448,13 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
452 unsigned long bitmap_size); 448 unsigned long bitmap_size);
453long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, 449long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
454 unsigned long bitmap_size); 450 unsigned long bitmap_size);
455int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from); 451int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from);
456int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); 452int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from);
457int get_compat_sigevent(struct sigevent *event, 453int get_compat_sigevent(struct sigevent *event,
458 const struct compat_sigevent __user *u_event); 454 const struct compat_sigevent __user *u_event);
459 455
460static inline int compat_timeval_compare(struct compat_timeval *lhs, 456static inline int old_timeval32_compare(struct old_timeval32 *lhs,
461 struct compat_timeval *rhs) 457 struct old_timeval32 *rhs)
462{ 458{
463 if (lhs->tv_sec < rhs->tv_sec) 459 if (lhs->tv_sec < rhs->tv_sec)
464 return -1; 460 return -1;
@@ -467,8 +463,8 @@ static inline int compat_timeval_compare(struct compat_timeval *lhs,
467 return lhs->tv_usec - rhs->tv_usec; 463 return lhs->tv_usec - rhs->tv_usec;
468} 464}
469 465
470static inline int compat_timespec_compare(struct compat_timespec *lhs, 466static inline int old_timespec32_compare(struct old_timespec32 *lhs,
471 struct compat_timespec *rhs) 467 struct old_timespec32 *rhs)
472{ 468{
473 if (lhs->tv_sec < rhs->tv_sec) 469 if (lhs->tv_sec < rhs->tv_sec)
474 return -1; 470 return -1;
@@ -552,12 +548,12 @@ asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
552 compat_long_t min_nr, 548 compat_long_t min_nr,
553 compat_long_t nr, 549 compat_long_t nr,
554 struct io_event __user *events, 550 struct io_event __user *events,
555 struct compat_timespec __user *timeout); 551 struct old_timespec32 __user *timeout);
556asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, 552asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
557 compat_long_t min_nr, 553 compat_long_t min_nr,
558 compat_long_t nr, 554 compat_long_t nr,
559 struct io_event __user *events, 555 struct io_event __user *events,
560 struct compat_timespec __user *timeout, 556 struct old_timespec32 __user *timeout,
561 const struct __compat_aio_sigset __user *usig); 557 const struct __compat_aio_sigset __user *usig);
562 558
563/* fs/cookies.c */ 559/* fs/cookies.c */
@@ -642,11 +638,11 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
642asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, 638asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
643 compat_ulong_t __user *outp, 639 compat_ulong_t __user *outp,
644 compat_ulong_t __user *exp, 640 compat_ulong_t __user *exp,
645 struct compat_timespec __user *tsp, 641 struct old_timespec32 __user *tsp,
646 void __user *sig); 642 void __user *sig);
647asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, 643asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
648 unsigned int nfds, 644 unsigned int nfds,
649 struct compat_timespec __user *tsp, 645 struct old_timespec32 __user *tsp,
650 const compat_sigset_t __user *sigmask, 646 const compat_sigset_t __user *sigmask,
651 compat_size_t sigsetsize); 647 compat_size_t sigsetsize);
652 648
@@ -671,15 +667,15 @@ asmlinkage long compat_sys_newfstat(unsigned int fd,
671 667
672/* fs/timerfd.c */ 668/* fs/timerfd.c */
673asmlinkage long compat_sys_timerfd_gettime(int ufd, 669asmlinkage long compat_sys_timerfd_gettime(int ufd,
674 struct compat_itimerspec __user *otmr); 670 struct old_itimerspec32 __user *otmr);
675asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, 671asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
676 const struct compat_itimerspec __user *utmr, 672 const struct old_itimerspec32 __user *utmr,
677 struct compat_itimerspec __user *otmr); 673 struct old_itimerspec32 __user *otmr);
678 674
679/* fs/utimes.c */ 675/* fs/utimes.c */
680asmlinkage long compat_sys_utimensat(unsigned int dfd, 676asmlinkage long compat_sys_utimensat(unsigned int dfd,
681 const char __user *filename, 677 const char __user *filename,
682 struct compat_timespec __user *t, 678 struct old_timespec32 __user *t,
683 int flags); 679 int flags);
684 680
685/* kernel/exit.c */ 681/* kernel/exit.c */
@@ -691,7 +687,7 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t,
691 687
692/* kernel/futex.c */ 688/* kernel/futex.c */
693asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, 689asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
694 struct compat_timespec __user *utime, u32 __user *uaddr2, 690 struct old_timespec32 __user *utime, u32 __user *uaddr2,
695 u32 val3); 691 u32 val3);
696asmlinkage long 692asmlinkage long
697compat_sys_set_robust_list(struct compat_robust_list_head __user *head, 693compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
@@ -701,8 +697,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
701 compat_size_t __user *len_ptr); 697 compat_size_t __user *len_ptr);
702 698
703/* kernel/hrtimer.c */ 699/* kernel/hrtimer.c */
704asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, 700asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp,
705 struct compat_timespec __user *rmtp); 701 struct old_timespec32 __user *rmtp);
706 702
707/* kernel/itimer.c */ 703/* kernel/itimer.c */
708asmlinkage long compat_sys_getitimer(int which, 704asmlinkage long compat_sys_getitimer(int which,
@@ -722,19 +718,19 @@ asmlinkage long compat_sys_timer_create(clockid_t which_clock,
722 struct compat_sigevent __user *timer_event_spec, 718 struct compat_sigevent __user *timer_event_spec,
723 timer_t __user *created_timer_id); 719 timer_t __user *created_timer_id);
724asmlinkage long compat_sys_timer_gettime(timer_t timer_id, 720asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
725 struct compat_itimerspec __user *setting); 721 struct old_itimerspec32 __user *setting);
726asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, 722asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
727 struct compat_itimerspec __user *new, 723 struct old_itimerspec32 __user *new,
728 struct compat_itimerspec __user *old); 724 struct old_itimerspec32 __user *old);
729asmlinkage long compat_sys_clock_settime(clockid_t which_clock, 725asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
730 struct compat_timespec __user *tp); 726 struct old_timespec32 __user *tp);
731asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, 727asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
732 struct compat_timespec __user *tp); 728 struct old_timespec32 __user *tp);
733asmlinkage long compat_sys_clock_getres(clockid_t which_clock, 729asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
734 struct compat_timespec __user *tp); 730 struct old_timespec32 __user *tp);
735asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, 731asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
736 struct compat_timespec __user *rqtp, 732 struct old_timespec32 __user *rqtp,
737 struct compat_timespec __user *rmtp); 733 struct old_timespec32 __user *rmtp);
738 734
739/* kernel/ptrace.c */ 735/* kernel/ptrace.c */
740asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, 736asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
@@ -748,7 +744,7 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
748 unsigned int len, 744 unsigned int len,
749 compat_ulong_t __user *user_mask_ptr); 745 compat_ulong_t __user *user_mask_ptr);
750asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, 746asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
751 struct compat_timespec __user *interval); 747 struct old_timespec32 __user *interval);
752 748
753/* kernel/signal.c */ 749/* kernel/signal.c */
754asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, 750asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
@@ -768,7 +764,7 @@ asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
768 compat_size_t sigsetsize); 764 compat_size_t sigsetsize);
769asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, 765asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
770 struct compat_siginfo __user *uinfo, 766 struct compat_siginfo __user *uinfo,
771 struct compat_timespec __user *uts, compat_size_t sigsetsize); 767 struct old_timespec32 __user *uts, compat_size_t sigsetsize);
772asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig, 768asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
773 struct compat_siginfo __user *uinfo); 769 struct compat_siginfo __user *uinfo);
774/* No generic prototype for rt_sigreturn */ 770/* No generic prototype for rt_sigreturn */
@@ -782,9 +778,9 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
782asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); 778asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
783 779
784/* kernel/time.c */ 780/* kernel/time.c */
785asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, 781asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
786 struct timezone __user *tz); 782 struct timezone __user *tz);
787asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, 783asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
788 struct timezone __user *tz); 784 struct timezone __user *tz);
789asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); 785asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
790 786
@@ -798,11 +794,11 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
798asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, 794asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
799 const char __user *u_msg_ptr, 795 const char __user *u_msg_ptr,
800 compat_size_t msg_len, unsigned int msg_prio, 796 compat_size_t msg_len, unsigned int msg_prio,
801 const struct compat_timespec __user *u_abs_timeout); 797 const struct old_timespec32 __user *u_abs_timeout);
802asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, 798asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
803 char __user *u_msg_ptr, 799 char __user *u_msg_ptr,
804 compat_size_t msg_len, unsigned int __user *u_msg_prio, 800 compat_size_t msg_len, unsigned int __user *u_msg_prio,
805 const struct compat_timespec __user *u_abs_timeout); 801 const struct old_timespec32 __user *u_abs_timeout);
806asmlinkage long compat_sys_mq_notify(mqd_t mqdes, 802asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
807 const struct compat_sigevent __user *u_notification); 803 const struct compat_sigevent __user *u_notification);
808asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, 804asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
@@ -819,7 +815,7 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
819/* ipc/sem.c */ 815/* ipc/sem.c */
820asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); 816asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
821asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, 817asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
822 unsigned nsems, const struct compat_timespec __user *timeout); 818 unsigned nsems, const struct old_timespec32 __user *timeout);
823 819
824/* ipc/shm.c */ 820/* ipc/shm.c */
825asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); 821asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
@@ -876,7 +872,7 @@ asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
876 struct compat_siginfo __user *uinfo); 872 struct compat_siginfo __user *uinfo);
877asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, 873asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
878 unsigned vlen, unsigned int flags, 874 unsigned vlen, unsigned int flags,
879 struct compat_timespec __user *timeout); 875 struct old_timespec32 __user *timeout);
880asmlinkage long compat_sys_wait4(compat_pid_t pid, 876asmlinkage long compat_sys_wait4(compat_pid_t pid,
881 compat_uint_t __user *stat_addr, int options, 877 compat_uint_t __user *stat_addr, int options,
882 struct compat_rusage __user *ru); 878 struct compat_rusage __user *ru);
@@ -928,7 +924,7 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
928asmlinkage long compat_sys_open(const char __user *filename, int flags, 924asmlinkage long compat_sys_open(const char __user *filename, int flags,
929 umode_t mode); 925 umode_t mode);
930asmlinkage long compat_sys_utimes(const char __user *filename, 926asmlinkage long compat_sys_utimes(const char __user *filename,
931 struct compat_timeval __user *t); 927 struct old_timeval32 __user *t);
932 928
933/* __ARCH_WANT_SYSCALL_NO_FLAGS */ 929/* __ARCH_WANT_SYSCALL_NO_FLAGS */
934asmlinkage long compat_sys_signalfd(int ufd, 930asmlinkage long compat_sys_signalfd(int ufd,
@@ -942,15 +938,15 @@ asmlinkage long compat_sys_newlstat(const char __user *filename,
942 struct compat_stat __user *statbuf); 938 struct compat_stat __user *statbuf);
943 939
944/* __ARCH_WANT_SYSCALL_DEPRECATED */ 940/* __ARCH_WANT_SYSCALL_DEPRECATED */
945asmlinkage long compat_sys_time(compat_time_t __user *tloc); 941asmlinkage long compat_sys_time(old_time32_t __user *tloc);
946asmlinkage long compat_sys_utime(const char __user *filename, 942asmlinkage long compat_sys_utime(const char __user *filename,
947 struct compat_utimbuf __user *t); 943 struct old_utimbuf32 __user *t);
948asmlinkage long compat_sys_futimesat(unsigned int dfd, 944asmlinkage long compat_sys_futimesat(unsigned int dfd,
949 const char __user *filename, 945 const char __user *filename,
950 struct compat_timeval __user *t); 946 struct old_timeval32 __user *t);
951asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, 947asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
952 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 948 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
953 struct compat_timeval __user *tvp); 949 struct old_timeval32 __user *tvp);
954asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); 950asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
955asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, 951asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
956 unsigned flags); 952 unsigned flags);
@@ -983,7 +979,7 @@ asmlinkage long compat_sys_sigaction(int sig,
983#endif 979#endif
984 980
985/* obsolete: kernel/time/time.c */ 981/* obsolete: kernel/time/time.c */
986asmlinkage long compat_sys_stime(compat_time_t __user *tptr); 982asmlinkage long compat_sys_stime(old_time32_t __user *tptr);
987 983
988/* obsolete: net/socket.c */ 984/* obsolete: net/socket.c */
989asmlinkage long compat_sys_socketcall(int call, u32 __user *args); 985asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
@@ -1002,15 +998,15 @@ static inline bool in_compat_syscall(void) { return is_compat_task(); }
1002#endif 998#endif
1003 999
1004/** 1000/**
1005 * ns_to_compat_timeval - Compat version of ns_to_timeval 1001 * ns_to_old_timeval32 - Compat version of ns_to_timeval
1006 * @nsec: the nanoseconds value to be converted 1002 * @nsec: the nanoseconds value to be converted
1007 * 1003 *
1008 * Returns the compat_timeval representation of the nsec parameter. 1004 * Returns the old_timeval32 representation of the nsec parameter.
1009 */ 1005 */
1010static inline struct compat_timeval ns_to_compat_timeval(s64 nsec) 1006static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec)
1011{ 1007{
1012 struct timeval tv; 1008 struct timeval tv;
1013 struct compat_timeval ctv; 1009 struct old_timeval32 ctv;
1014 1010
1015 tv = ns_to_timeval(nsec); 1011 tv = ns_to_timeval(nsec);
1016 ctv.tv_sec = tv.tv_sec; 1012 ctv.tv_sec = tv.tv_sec;
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h
deleted file mode 100644
index e70bfd1d2c3f..000000000000
--- a/include/linux/compat_time.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_COMPAT_TIME_H
3#define _LINUX_COMPAT_TIME_H
4
5#include <linux/types.h>
6#include <linux/time64.h>
7
8typedef s32 compat_time_t;
9
10struct compat_timespec {
11 compat_time_t tv_sec;
12 s32 tv_nsec;
13};
14
15struct compat_timeval {
16 compat_time_t tv_sec;
17 s32 tv_usec;
18};
19
20struct compat_itimerspec {
21 struct compat_timespec it_interval;
22 struct compat_timespec it_value;
23};
24
25extern int compat_get_timespec64(struct timespec64 *, const void __user *);
26extern int compat_put_timespec64(const struct timespec64 *, void __user *);
27extern int get_compat_itimerspec64(struct itimerspec64 *its,
28 const struct compat_itimerspec __user *uits);
29extern int put_compat_itimerspec64(const struct itimerspec64 *its,
30 struct compat_itimerspec __user *uits);
31
32#endif /* _LINUX_COMPAT_TIME_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 763bbad1e258..90ddfefb6c2b 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -79,20 +79,6 @@
79#define __noretpoline __attribute__((indirect_branch("keep"))) 79#define __noretpoline __attribute__((indirect_branch("keep")))
80#endif 80#endif
81 81
82/*
83 * it doesn't make sense on ARM (currently the only user of __naked)
84 * to trace naked functions because then mcount is called without
85 * stack and frame pointer being set up and there is no chance to
86 * restore the lr register to the value before mcount was called.
87 *
88 * The asm() bodies of naked functions often depend on standard calling
89 * conventions, therefore they must be noinline and noclone.
90 *
91 * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
92 * See GCC PR44290.
93 */
94#define __naked __attribute__((naked)) noinline __noclone notrace
95
96#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) 82#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
97 83
98#define __optimize(level) __attribute__((__optimize__(level))) 84#define __optimize(level) __attribute__((__optimize__(level)))
@@ -208,6 +194,12 @@
208 * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 194 * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
209 */ 195 */
210#define __no_sanitize_address __attribute__((no_sanitize_address)) 196#define __no_sanitize_address __attribute__((no_sanitize_address))
197#ifdef CONFIG_KASAN
198#define __no_sanitize_address_or_inline \
199 __no_sanitize_address __maybe_unused notrace
200#else
201#define __no_sanitize_address_or_inline inline
202#endif
211#endif 203#endif
212 204
213#if GCC_VERSION >= 50100 205#if GCC_VERSION >= 50100
@@ -225,6 +217,7 @@
225 217
226#if !defined(__no_sanitize_address) 218#if !defined(__no_sanitize_address)
227#define __no_sanitize_address 219#define __no_sanitize_address
220#define __no_sanitize_address_or_inline inline
228#endif 221#endif
229 222
230/* 223/*
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 681d866efb1e..4170fcee5adb 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -99,22 +99,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
99 * unique, to convince GCC not to merge duplicate inline asm statements. 99 * unique, to convince GCC not to merge duplicate inline asm statements.
100 */ 100 */
101#define annotate_reachable() ({ \ 101#define annotate_reachable() ({ \
102 asm volatile("%c0:\n\t" \ 102 asm volatile("ANNOTATE_REACHABLE counter=%c0" \
103 ".pushsection .discard.reachable\n\t" \ 103 : : "i" (__COUNTER__)); \
104 ".long %c0b - .\n\t" \
105 ".popsection\n\t" : : "i" (__COUNTER__)); \
106}) 104})
107#define annotate_unreachable() ({ \ 105#define annotate_unreachable() ({ \
108 asm volatile("%c0:\n\t" \ 106 asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \
109 ".pushsection .discard.unreachable\n\t" \ 107 : : "i" (__COUNTER__)); \
110 ".long %c0b - .\n\t" \
111 ".popsection\n\t" : : "i" (__COUNTER__)); \
112}) 108})
113#define ASM_UNREACHABLE \
114 "999:\n\t" \
115 ".pushsection .discard.unreachable\n\t" \
116 ".long 999b - .\n\t" \
117 ".popsection\n\t"
118#else 109#else
119#define annotate_reachable() 110#define annotate_reachable()
120#define annotate_unreachable() 111#define annotate_unreachable()
@@ -299,6 +290,45 @@ static inline void *offset_to_ptr(const int *off)
299 return (void *)((unsigned long)off + *off); 290 return (void *)((unsigned long)off + *off);
300} 291}
301 292
293#else /* __ASSEMBLY__ */
294
295#ifdef __KERNEL__
296#ifndef LINKER_SCRIPT
297
298#ifdef CONFIG_STACK_VALIDATION
299.macro ANNOTATE_UNREACHABLE counter:req
300\counter:
301 .pushsection .discard.unreachable
302 .long \counter\()b -.
303 .popsection
304.endm
305
306.macro ANNOTATE_REACHABLE counter:req
307\counter:
308 .pushsection .discard.reachable
309 .long \counter\()b -.
310 .popsection
311.endm
312
313.macro ASM_UNREACHABLE
314999:
315 .pushsection .discard.unreachable
316 .long 999b - .
317 .popsection
318.endm
319#else /* CONFIG_STACK_VALIDATION */
320.macro ANNOTATE_UNREACHABLE counter:req
321.endm
322
323.macro ANNOTATE_REACHABLE counter:req
324.endm
325
326.macro ASM_UNREACHABLE
327.endm
328#endif /* CONFIG_STACK_VALIDATION */
329
330#endif /* LINKER_SCRIPT */
331#endif /* __KERNEL__ */
302#endif /* __ASSEMBLY__ */ 332#endif /* __ASSEMBLY__ */
303 333
304#ifndef __optimize 334#ifndef __optimize
@@ -314,29 +344,14 @@ static inline void *offset_to_ptr(const int *off)
314#endif 344#endif
315#ifndef __compiletime_error 345#ifndef __compiletime_error
316# define __compiletime_error(message) 346# define __compiletime_error(message)
317/*
318 * Sparse complains of variable sized arrays due to the temporary variable in
319 * __compiletime_assert. Unfortunately we can't just expand it out to make
320 * sparse see a constant array size without breaking compiletime_assert on old
321 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
322 */
323# ifndef __CHECKER__
324# define __compiletime_error_fallback(condition) \
325 do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
326# endif
327#endif
328#ifndef __compiletime_error_fallback
329# define __compiletime_error_fallback(condition) do { } while (0)
330#endif 347#endif
331 348
332#ifdef __OPTIMIZE__ 349#ifdef __OPTIMIZE__
333# define __compiletime_assert(condition, msg, prefix, suffix) \ 350# define __compiletime_assert(condition, msg, prefix, suffix) \
334 do { \ 351 do { \
335 int __cond = !(condition); \
336 extern void prefix ## suffix(void) __compiletime_error(msg); \ 352 extern void prefix ## suffix(void) __compiletime_error(msg); \
337 if (__cond) \ 353 if (!(condition)) \
338 prefix ## suffix(); \ 354 prefix ## suffix(); \
339 __compiletime_error_fallback(__cond); \
340 } while (0) 355 } while (0)
341#else 356#else
342# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) 357# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3525c179698c..97cfe29b3f0a 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -198,7 +198,6 @@ struct ftrace_likely_data {
198 */ 198 */
199#define __pure __attribute__((pure)) 199#define __pure __attribute__((pure))
200#define __aligned(x) __attribute__((aligned(x))) 200#define __aligned(x) __attribute__((aligned(x)))
201#define __aligned_largest __attribute__((aligned))
202#define __printf(a, b) __attribute__((format(printf, a, b))) 201#define __printf(a, b) __attribute__((format(printf, a, b)))
203#define __scanf(a, b) __attribute__((format(scanf, a, b))) 202#define __scanf(a, b) __attribute__((format(scanf, a, b)))
204#define __maybe_unused __attribute__((unused)) 203#define __maybe_unused __attribute__((unused))
@@ -226,6 +225,14 @@ struct ftrace_likely_data {
226#define notrace __attribute__((no_instrument_function)) 225#define notrace __attribute__((no_instrument_function))
227#endif 226#endif
228 227
228/*
229 * it doesn't make sense on ARM (currently the only user of __naked)
230 * to trace naked functions because then mcount is called without
231 * stack and frame pointer being set up and there is no chance to
232 * restore the lr register to the value before mcount was called.
233 */
234#define __naked __attribute__((naked)) notrace
235
229#define __compiler_offsetof(a, b) __builtin_offsetof(a, b) 236#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
230 237
231/* 238/*
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index fea64f2692a0..ab137f97ecbd 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -141,7 +141,6 @@ struct vc_data {
141 struct uni_pagedir *vc_uni_pagedir; 141 struct uni_pagedir *vc_uni_pagedir;
142 struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ 142 struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
143 struct uni_screen *vc_uni_screen; /* unicode screen content */ 143 struct uni_screen *vc_uni_screen; /* unicode screen content */
144 bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
145 /* additional information is in vt_kern.h */ 144 /* additional information is in vt_kern.h */
146}; 145};
147 146
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 207aed96a5b7..abf4b4e65dbb 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -17,9 +17,9 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
17extern int dump_align(struct coredump_params *cprm, int align); 17extern int dump_align(struct coredump_params *cprm, int align);
18extern void dump_truncate(struct coredump_params *cprm); 18extern void dump_truncate(struct coredump_params *cprm);
19#ifdef CONFIG_COREDUMP 19#ifdef CONFIG_COREDUMP
20extern void do_coredump(const siginfo_t *siginfo); 20extern void do_coredump(const kernel_siginfo_t *siginfo);
21#else 21#else
22static inline void do_coredump(const siginfo_t *siginfo) {} 22static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
23#endif 23#endif
24 24
25#endif /* _LINUX_COREDUMP_H */ 25#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index d828a6efe0b1..46c67a764877 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -94,20 +94,15 @@ union coresight_dev_subtype {
94 * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. 94 * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
95 * @name: name of the component as shown under sysfs. 95 * @name: name of the component as shown under sysfs.
96 * @nr_inport: number of input ports for this component. 96 * @nr_inport: number of input ports for this component.
97 * @outports: list of remote endpoint port number.
98 * @child_names:name of all child components connected to this device.
99 * @child_ports:child component port number the current component is
100 connected to.
101 * @nr_outport: number of output ports for this component. 97 * @nr_outport: number of output ports for this component.
98 * @conns: Array of nr_outport connections from this component
102 */ 99 */
103struct coresight_platform_data { 100struct coresight_platform_data {
104 int cpu; 101 int cpu;
105 const char *name; 102 const char *name;
106 int nr_inport; 103 int nr_inport;
107 int *outports;
108 const char **child_names;
109 int *child_ports;
110 int nr_outport; 104 int nr_outport;
105 struct coresight_connection *conns;
111}; 106};
112 107
113/** 108/**
@@ -190,23 +185,15 @@ struct coresight_device {
190 * @disable: disables the sink. 185 * @disable: disables the sink.
191 * @alloc_buffer: initialises perf's ring buffer for trace collection. 186 * @alloc_buffer: initialises perf's ring buffer for trace collection.
192 * @free_buffer: release memory allocated in @get_config. 187 * @free_buffer: release memory allocated in @get_config.
193 * @set_buffer: initialises buffer mechanic before a trace session.
194 * @reset_buffer: finalises buffer mechanic after a trace session.
195 * @update_buffer: update buffer pointers after a trace session. 188 * @update_buffer: update buffer pointers after a trace session.
196 */ 189 */
197struct coresight_ops_sink { 190struct coresight_ops_sink {
198 int (*enable)(struct coresight_device *csdev, u32 mode); 191 int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
199 void (*disable)(struct coresight_device *csdev); 192 void (*disable)(struct coresight_device *csdev);
200 void *(*alloc_buffer)(struct coresight_device *csdev, int cpu, 193 void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
201 void **pages, int nr_pages, bool overwrite); 194 void **pages, int nr_pages, bool overwrite);
202 void (*free_buffer)(void *config); 195 void (*free_buffer)(void *config);
203 int (*set_buffer)(struct coresight_device *csdev, 196 unsigned long (*update_buffer)(struct coresight_device *csdev,
204 struct perf_output_handle *handle,
205 void *sink_config);
206 unsigned long (*reset_buffer)(struct coresight_device *csdev,
207 struct perf_output_handle *handle,
208 void *sink_config);
209 void (*update_buffer)(struct coresight_device *csdev,
210 struct perf_output_handle *handle, 197 struct perf_output_handle *handle,
211 void *sink_config); 198 void *sink_config);
212}; 199};
@@ -270,6 +257,13 @@ extern int coresight_enable(struct coresight_device *csdev);
270extern void coresight_disable(struct coresight_device *csdev); 257extern void coresight_disable(struct coresight_device *csdev);
271extern int coresight_timeout(void __iomem *addr, u32 offset, 258extern int coresight_timeout(void __iomem *addr, u32 offset,
272 int position, int value); 259 int position, int value);
260
261extern int coresight_claim_device(void __iomem *base);
262extern int coresight_claim_device_unlocked(void __iomem *base);
263
264extern void coresight_disclaim_device(void __iomem *base);
265extern void coresight_disclaim_device_unlocked(void __iomem *base);
266
273#else 267#else
274static inline struct coresight_device * 268static inline struct coresight_device *
275coresight_register(struct coresight_desc *desc) { return NULL; } 269coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -279,6 +273,19 @@ coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
279static inline void coresight_disable(struct coresight_device *csdev) {} 273static inline void coresight_disable(struct coresight_device *csdev) {}
280static inline int coresight_timeout(void __iomem *addr, u32 offset, 274static inline int coresight_timeout(void __iomem *addr, u32 offset,
281 int position, int value) { return 1; } 275 int position, int value) { return 1; }
276static inline int coresight_claim_device_unlocked(void __iomem *base)
277{
278 return -EINVAL;
279}
280
281static inline int coresight_claim_device(void __iomem *base)
282{
283 return -EINVAL;
284}
285
286static inline void coresight_disclaim_device(void __iomem *base) {}
287static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
288
282#endif 289#endif
283 290
284#ifdef CONFIG_OF 291#ifdef CONFIG_OF
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index 986c06c88d81..84d3c81b5978 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -45,7 +45,7 @@
45 * 'asm/cpufeature.h' of your favorite architecture. 45 * 'asm/cpufeature.h' of your favorite architecture.
46 */ 46 */
47#define module_cpu_feature_match(x, __initfunc) \ 47#define module_cpu_feature_match(x, __initfunc) \
48static struct cpu_feature const cpu_feature_match_ ## x[] = \ 48static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \
49 { { .feature = cpu_feature(x) }, { } }; \ 49 { { .feature = cpu_feature(x) }, { } }; \
50MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ 50MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
51 \ 51 \
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 4325d6fdde9b..faed7a8977e8 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -81,6 +81,7 @@ struct cpuidle_device {
81 unsigned int registered:1; 81 unsigned int registered:1;
82 unsigned int enabled:1; 82 unsigned int enabled:1;
83 unsigned int use_deepest_state:1; 83 unsigned int use_deepest_state:1;
84 unsigned int poll_time_limit:1;
84 unsigned int cpu; 85 unsigned int cpu;
85 86
86 int last_residency; 87 int last_residency;
@@ -99,16 +100,6 @@ struct cpuidle_device {
99DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 100DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
100DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); 101DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
101 102
102/**
103 * cpuidle_get_last_residency - retrieves the last state's residency time
104 * @dev: the target CPU
105 */
106static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
107{
108 return dev->last_residency;
109}
110
111
112/**************************** 103/****************************
113 * CPUIDLE DRIVER INTERFACE * 104 * CPUIDLE DRIVER INTERFACE *
114 ****************************/ 105 ****************************/
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 3e4ba9d753c8..f774c5eb9e3c 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -26,6 +26,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
26 26
27extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 27extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
28 unsigned long, int); 28 unsigned long, int);
29extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
30 size_t csize, unsigned long offset,
31 int userbuf);
32
29void vmcore_cleanup(void); 33void vmcore_cleanup(void);
30 34
31/* Architecture code defines this if there are other possible ELF 35/* Architecture code defines this if there are other possible ELF
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index 1fe0cfcdea30..6bb0c0bf357b 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -6,6 +6,7 @@
6 6
7#define CRC_T10DIF_DIGEST_SIZE 2 7#define CRC_T10DIF_DIGEST_SIZE 2
8#define CRC_T10DIF_BLOCK_SIZE 1 8#define CRC_T10DIF_BLOCK_SIZE 1
9#define CRC_T10DIF_STRING "crct10dif"
9 10
10extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, 11extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
11 size_t len); 12 size_t len);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index e8839d3a7559..3634ad6fe202 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -454,6 +454,33 @@ struct compress_alg {
454 * @cra_refcnt: internally used 454 * @cra_refcnt: internally used
455 * @cra_destroy: internally used 455 * @cra_destroy: internally used
456 * 456 *
457 * All following statistics are for this crypto_alg
458 * @encrypt_cnt: number of encrypt requests
459 * @decrypt_cnt: number of decrypt requests
460 * @compress_cnt: number of compress requests
461 * @decompress_cnt: number of decompress requests
462 * @generate_cnt: number of RNG generate requests
463 * @seed_cnt: number of times the rng was seeded
464 * @hash_cnt: number of hash requests
465 * @sign_cnt: number of sign requests
466 * @setsecret_cnt: number of setsecrey operation
467 * @generate_public_key_cnt: number of generate_public_key operation
468 * @verify_cnt: number of verify operation
469 * @compute_shared_secret_cnt: number of compute_shared_secret operation
470 * @encrypt_tlen: total data size handled by encrypt requests
471 * @decrypt_tlen: total data size handled by decrypt requests
472 * @compress_tlen: total data size handled by compress requests
473 * @decompress_tlen: total data size handled by decompress requests
474 * @generate_tlen: total data size of generated data by the RNG
475 * @hash_tlen: total data size hashed
476 * @akcipher_err_cnt: number of error for akcipher requests
477 * @cipher_err_cnt: number of error for akcipher requests
478 * @compress_err_cnt: number of error for akcipher requests
479 * @aead_err_cnt: number of error for akcipher requests
480 * @hash_err_cnt: number of error for akcipher requests
481 * @rng_err_cnt: number of error for akcipher requests
482 * @kpp_err_cnt: number of error for akcipher requests
483 *
457 * The struct crypto_alg describes a generic Crypto API algorithm and is common 484 * The struct crypto_alg describes a generic Crypto API algorithm and is common
458 * for all of the transformations. Any variable not documented here shall not 485 * for all of the transformations. Any variable not documented here shall not
459 * be used by a cipher implementation as it is internal to the Crypto API. 486 * be used by a cipher implementation as it is internal to the Crypto API.
@@ -487,6 +514,45 @@ struct crypto_alg {
487 void (*cra_destroy)(struct crypto_alg *alg); 514 void (*cra_destroy)(struct crypto_alg *alg);
488 515
489 struct module *cra_module; 516 struct module *cra_module;
517
518 union {
519 atomic_t encrypt_cnt;
520 atomic_t compress_cnt;
521 atomic_t generate_cnt;
522 atomic_t hash_cnt;
523 atomic_t setsecret_cnt;
524 };
525 union {
526 atomic64_t encrypt_tlen;
527 atomic64_t compress_tlen;
528 atomic64_t generate_tlen;
529 atomic64_t hash_tlen;
530 };
531 union {
532 atomic_t akcipher_err_cnt;
533 atomic_t cipher_err_cnt;
534 atomic_t compress_err_cnt;
535 atomic_t aead_err_cnt;
536 atomic_t hash_err_cnt;
537 atomic_t rng_err_cnt;
538 atomic_t kpp_err_cnt;
539 };
540 union {
541 atomic_t decrypt_cnt;
542 atomic_t decompress_cnt;
543 atomic_t seed_cnt;
544 atomic_t generate_public_key_cnt;
545 };
546 union {
547 atomic64_t decrypt_tlen;
548 atomic64_t decompress_tlen;
549 };
550 union {
551 atomic_t verify_cnt;
552 atomic_t compute_shared_secret_cnt;
553 };
554 atomic_t sign_cnt;
555
490} CRYPTO_MINALIGN_ATTR; 556} CRYPTO_MINALIGN_ATTR;
491 557
492/* 558/*
@@ -907,6 +973,38 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
907 return __crypto_ablkcipher_cast(req->base.tfm); 973 return __crypto_ablkcipher_cast(req->base.tfm);
908} 974}
909 975
976static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req,
977 int ret)
978{
979#ifdef CONFIG_CRYPTO_STATS
980 struct ablkcipher_tfm *crt =
981 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
982
983 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
984 atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
985 } else {
986 atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt);
987 atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen);
988 }
989#endif
990}
991
992static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req,
993 int ret)
994{
995#ifdef CONFIG_CRYPTO_STATS
996 struct ablkcipher_tfm *crt =
997 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
998
999 if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
1000 atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
1001 } else {
1002 atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt);
1003 atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen);
1004 }
1005#endif
1006}
1007
910/** 1008/**
911 * crypto_ablkcipher_encrypt() - encrypt plaintext 1009 * crypto_ablkcipher_encrypt() - encrypt plaintext
912 * @req: reference to the ablkcipher_request handle that holds all information 1010 * @req: reference to the ablkcipher_request handle that holds all information
@@ -922,7 +1020,11 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
922{ 1020{
923 struct ablkcipher_tfm *crt = 1021 struct ablkcipher_tfm *crt =
924 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 1022 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
925 return crt->encrypt(req); 1023 int ret;
1024
1025 ret = crt->encrypt(req);
1026 crypto_stat_ablkcipher_encrypt(req, ret);
1027 return ret;
926} 1028}
927 1029
928/** 1030/**
@@ -940,7 +1042,11 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
940{ 1042{
941 struct ablkcipher_tfm *crt = 1043 struct ablkcipher_tfm *crt =
942 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 1044 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
943 return crt->decrypt(req); 1045 int ret;
1046
1047 ret = crt->decrypt(req);
1048 crypto_stat_ablkcipher_decrypt(req, ret);
1049 return ret;
944} 1050}
945 1051
946/** 1052/**
diff --git a/include/linux/cuda.h b/include/linux/cuda.h
index 056867f09a01..45bfe9d61271 100644
--- a/include/linux/cuda.h
+++ b/include/linux/cuda.h
@@ -8,6 +8,7 @@
8#ifndef _LINUX_CUDA_H 8#ifndef _LINUX_CUDA_H
9#define _LINUX_CUDA_H 9#define _LINUX_CUDA_H
10 10
11#include <linux/rtc.h>
11#include <uapi/linux/cuda.h> 12#include <uapi/linux/cuda.h>
12 13
13 14
@@ -16,4 +17,7 @@ extern int cuda_request(struct adb_request *req,
16 void (*done)(struct adb_request *), int nbytes, ...); 17 void (*done)(struct adb_request *), int nbytes, ...);
17extern void cuda_poll(void); 18extern void cuda_poll(void);
18 19
20extern time64_t cuda_get_time(void);
21extern int cuda_set_rtc_time(struct rtc_time *tm);
22
19#endif /* _LINUX_CUDA_H */ 23#endif /* _LINUX_CUDA_H */
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 120225e9a366..257ab3c92cb8 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -8,8 +8,8 @@
8 8
9struct task_struct; 9struct task_struct;
10 10
11extern int debug_locks; 11extern int debug_locks __read_mostly;
12extern int debug_locks_silent; 12extern int debug_locks_silent __read_mostly;
13 13
14 14
15static inline int __debug_locks_off(void) 15static inline int __debug_locks_off(void)
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 31c865d1842e..577d1b25fccd 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -57,7 +57,12 @@ struct task_delay_info {
57 57
58 u64 freepages_start; 58 u64 freepages_start;
59 u64 freepages_delay; /* wait for memory reclaim */ 59 u64 freepages_delay; /* wait for memory reclaim */
60
61 u64 thrashing_start;
62 u64 thrashing_delay; /* wait for thrashing page */
63
60 u32 freepages_count; /* total count of memory reclaim */ 64 u32 freepages_count; /* total count of memory reclaim */
65 u32 thrashing_count; /* total count of thrash waits */
61}; 66};
62#endif 67#endif
63 68
@@ -76,6 +81,8 @@ extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
76extern __u64 __delayacct_blkio_ticks(struct task_struct *); 81extern __u64 __delayacct_blkio_ticks(struct task_struct *);
77extern void __delayacct_freepages_start(void); 82extern void __delayacct_freepages_start(void);
78extern void __delayacct_freepages_end(void); 83extern void __delayacct_freepages_end(void);
84extern void __delayacct_thrashing_start(void);
85extern void __delayacct_thrashing_end(void);
79 86
80static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) 87static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
81{ 88{
@@ -156,6 +163,18 @@ static inline void delayacct_freepages_end(void)
156 __delayacct_freepages_end(); 163 __delayacct_freepages_end();
157} 164}
158 165
166static inline void delayacct_thrashing_start(void)
167{
168 if (current->delays)
169 __delayacct_thrashing_start();
170}
171
172static inline void delayacct_thrashing_end(void)
173{
174 if (current->delays)
175 __delayacct_thrashing_end();
176}
177
159#else 178#else
160static inline void delayacct_set_flag(int flag) 179static inline void delayacct_set_flag(int flag)
161{} 180{}
@@ -182,6 +201,10 @@ static inline void delayacct_freepages_start(void)
182{} 201{}
183static inline void delayacct_freepages_end(void) 202static inline void delayacct_freepages_end(void)
184{} 203{}
204static inline void delayacct_thrashing_start(void)
205{}
206static inline void delayacct_thrashing_end(void)
207{}
185 208
186#endif /* CONFIG_TASK_DELAY_ACCT */ 209#endif /* CONFIG_TASK_DELAY_ACCT */
187 210
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 3aae5b3af87c..e4963b0f45da 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -198,6 +198,14 @@ extern void devm_devfreq_remove_device(struct device *dev,
198extern int devfreq_suspend_device(struct devfreq *devfreq); 198extern int devfreq_suspend_device(struct devfreq *devfreq);
199extern int devfreq_resume_device(struct devfreq *devfreq); 199extern int devfreq_resume_device(struct devfreq *devfreq);
200 200
201/**
202 * update_devfreq() - Reevaluate the device and configure frequency
203 * @devfreq: the devfreq device
204 *
205 * Note: devfreq->lock must be held
206 */
207extern int update_devfreq(struct devfreq *devfreq);
208
201/* Helper functions for devfreq user device driver with OPP. */ 209/* Helper functions for devfreq user device driver with OPP. */
202extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, 210extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
203 unsigned long *freq, u32 flags); 211 unsigned long *freq, u32 flags);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 6fb0808e87c8..e528baebad69 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -26,9 +26,8 @@ enum dm_queue_mode {
26 DM_TYPE_NONE = 0, 26 DM_TYPE_NONE = 0,
27 DM_TYPE_BIO_BASED = 1, 27 DM_TYPE_BIO_BASED = 1,
28 DM_TYPE_REQUEST_BASED = 2, 28 DM_TYPE_REQUEST_BASED = 2,
29 DM_TYPE_MQ_REQUEST_BASED = 3, 29 DM_TYPE_DAX_BIO_BASED = 3,
30 DM_TYPE_DAX_BIO_BASED = 4, 30 DM_TYPE_NVME_BIO_BASED = 4,
31 DM_TYPE_NVME_BIO_BASED = 5,
32}; 31};
33 32
34typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; 33typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -92,6 +91,11 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
92 91
93typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); 92typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
94 93
94typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
95 struct blk_zone *zones,
96 unsigned int *nr_zones,
97 gfp_t gfp_mask);
98
95/* 99/*
96 * These iteration functions are typically used to check (and combine) 100 * These iteration functions are typically used to check (and combine)
97 * properties of underlying devices. 101 * properties of underlying devices.
@@ -180,6 +184,9 @@ struct target_type {
180 dm_status_fn status; 184 dm_status_fn status;
181 dm_message_fn message; 185 dm_message_fn message;
182 dm_prepare_ioctl_fn prepare_ioctl; 186 dm_prepare_ioctl_fn prepare_ioctl;
187#ifdef CONFIG_BLK_DEV_ZONED
188 dm_report_zones_fn report_zones;
189#endif
183 dm_busy_fn busy; 190 dm_busy_fn busy;
184 dm_iterate_devices_fn iterate_devices; 191 dm_iterate_devices_fn iterate_devices;
185 dm_io_hints_fn io_hints; 192 dm_io_hints_fn io_hints;
@@ -420,8 +427,8 @@ struct gendisk *dm_disk(struct mapped_device *md);
420int dm_suspended(struct dm_target *ti); 427int dm_suspended(struct dm_target *ti);
421int dm_noflush_suspending(struct dm_target *ti); 428int dm_noflush_suspending(struct dm_target *ti);
422void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); 429void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
423void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, 430void dm_remap_zone_report(struct dm_target *ti, sector_t start,
424 sector_t start); 431 struct blk_zone *zones, unsigned int *nr_zones);
425union map_info *dm_get_rq_mapinfo(struct request *rq); 432union map_info *dm_get_rq_mapinfo(struct request *rq);
426 433
427struct queue_limits *dm_get_queue_limits(struct mapped_device *md); 434struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
@@ -490,6 +497,7 @@ sector_t dm_table_get_size(struct dm_table *t);
490unsigned int dm_table_get_num_targets(struct dm_table *t); 497unsigned int dm_table_get_num_targets(struct dm_table *t);
491fmode_t dm_table_get_mode(struct dm_table *t); 498fmode_t dm_table_get_mode(struct dm_table *t);
492struct mapped_device *dm_table_get_md(struct dm_table *t); 499struct mapped_device *dm_table_get_md(struct dm_table *t);
500const char *dm_table_device_name(struct dm_table *t);
493 501
494/* 502/*
495 * Trigger an event. 503 * Trigger an event.
diff --git a/include/linux/device.h b/include/linux/device.h
index 8f882549edee..1b25c7a43f4c 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -55,6 +55,8 @@ struct bus_attribute {
55 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) 55 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
56#define BUS_ATTR_RO(_name) \ 56#define BUS_ATTR_RO(_name) \
57 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) 57 struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
58#define BUS_ATTR_WO(_name) \
59 struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
58 60
59extern int __must_check bus_create_file(struct bus_type *, 61extern int __must_check bus_create_file(struct bus_type *,
60 struct bus_attribute *); 62 struct bus_attribute *);
@@ -692,8 +694,10 @@ static inline void *devm_kcalloc(struct device *dev,
692{ 694{
693 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); 695 return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
694} 696}
695extern void devm_kfree(struct device *dev, void *p); 697extern void devm_kfree(struct device *dev, const void *p);
696extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; 698extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
699extern const char *devm_kstrdup_const(struct device *dev,
700 const char *s, gfp_t gfp);
697extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, 701extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
698 gfp_t gfp); 702 gfp_t gfp);
699 703
@@ -774,6 +778,30 @@ void device_connection_add(struct device_connection *con);
774void device_connection_remove(struct device_connection *con); 778void device_connection_remove(struct device_connection *con);
775 779
776/** 780/**
781 * device_connections_add - Add multiple device connections at once
782 * @cons: Zero terminated array of device connection descriptors
783 */
784static inline void device_connections_add(struct device_connection *cons)
785{
786 struct device_connection *c;
787
788 for (c = cons; c->endpoint[0]; c++)
789 device_connection_add(c);
790}
791
792/**
793 * device_connections_remove - Remove multiple device connections at once
794 * @cons: Zero terminated array of device connection descriptors
795 */
796static inline void device_connections_remove(struct device_connection *cons)
797{
798 struct device_connection *c;
799
800 for (c = cons; c->endpoint[0]; c++)
801 device_connection_remove(c);
802}
803
804/**
777 * enum device_link_state - Device link states. 805 * enum device_link_state - Device link states.
778 * @DL_STATE_NONE: The presence of the drivers is not being tracked. 806 * @DL_STATE_NONE: The presence of the drivers is not being tracked.
779 * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. 807 * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
@@ -927,6 +955,8 @@ struct dev_links_info {
927 * @offline: Set after successful invocation of bus type's .offline(). 955 * @offline: Set after successful invocation of bus type's .offline().
928 * @of_node_reused: Set if the device-tree node is shared with an ancestor 956 * @of_node_reused: Set if the device-tree node is shared with an ancestor
929 * device. 957 * device.
958 * @dma_coherent: this particular device is dma coherent, even if the
959 * architecture supports non-coherent devices.
930 * 960 *
931 * At the lowest level, every device in a Linux system is represented by an 961 * At the lowest level, every device in a Linux system is represented by an
932 * instance of struct device. The device structure contains the information 962 * instance of struct device. The device structure contains the information
@@ -1016,6 +1046,11 @@ struct device {
1016 bool offline_disabled:1; 1046 bool offline_disabled:1;
1017 bool offline:1; 1047 bool offline:1;
1018 bool of_node_reused:1; 1048 bool of_node_reused:1;
1049#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
1050 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
1051 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
1052 bool dma_coherent:1;
1053#endif
1019}; 1054};
1020 1055
1021static inline struct device *kobj_to_dev(struct kobject *kobj) 1056static inline struct device *kobj_to_dev(struct kobject *kobj)
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index a785f2507159..30213adbb6b9 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -32,6 +32,9 @@ extern void dma_debug_add_bus(struct bus_type *bus);
32 32
33extern int dma_debug_resize_entries(u32 num_entries); 33extern int dma_debug_resize_entries(u32 num_entries);
34 34
35extern void debug_dma_map_single(struct device *dev, const void *addr,
36 unsigned long len);
37
35extern void debug_dma_map_page(struct device *dev, struct page *page, 38extern void debug_dma_map_page(struct device *dev, struct page *page,
36 size_t offset, size_t size, 39 size_t offset, size_t size,
37 int direction, dma_addr_t dma_addr, 40 int direction, dma_addr_t dma_addr,
@@ -103,6 +106,11 @@ static inline int dma_debug_resize_entries(u32 num_entries)
103 return 0; 106 return 0;
104} 107}
105 108
109static inline void debug_dma_map_single(struct device *dev, const void *addr,
110 unsigned long len)
111{
112}
113
106static inline void debug_dma_map_page(struct device *dev, struct page *page, 114static inline void debug_dma_map_page(struct device *dev, struct page *page,
107 size_t offset, size_t size, 115 size_t offset, size_t size,
108 int direction, dma_addr_t dma_addr, 116 int direction, dma_addr_t dma_addr,
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 8d9f33febde5..bd73e7a91410 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,6 +5,8 @@
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <linux/mem_encrypt.h> 6#include <linux/mem_encrypt.h>
7 7
8#define DIRECT_MAPPING_ERROR 0
9
8#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA 10#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
9#include <asm/dma-direct.h> 11#include <asm/dma-direct.h>
10#else 12#else
@@ -27,7 +29,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
27 if (!dev->dma_mask) 29 if (!dev->dma_mask)
28 return false; 30 return false;
29 31
30 return addr + size - 1 <= *dev->dma_mask; 32 return addr + size - 1 <=
33 min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
31} 34}
32#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ 35#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
33 36
@@ -55,10 +58,15 @@ static inline void dma_mark_clean(void *addr, size_t size)
55} 58}
56#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ 59#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
57 60
61u64 dma_direct_get_required_mask(struct device *dev);
58void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 62void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
59 gfp_t gfp, unsigned long attrs); 63 gfp_t gfp, unsigned long attrs);
60void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, 64void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
61 dma_addr_t dma_addr, unsigned long attrs); 65 dma_addr_t dma_addr, unsigned long attrs);
66void *dma_direct_alloc_pages(struct device *dev, size_t size,
67 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
68void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
69 dma_addr_t dma_addr, unsigned long attrs);
62dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, 70dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
63 unsigned long offset, size_t size, enum dma_data_direction dir, 71 unsigned long offset, size_t size, enum dma_data_direction dir,
64 unsigned long attrs); 72 unsigned long attrs);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 1db6a6b46d0d..15bd41447025 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -130,13 +130,10 @@ struct dma_map_ops {
130 enum dma_data_direction direction); 130 enum dma_data_direction direction);
131 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); 131 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
132 int (*dma_supported)(struct device *dev, u64 mask); 132 int (*dma_supported)(struct device *dev, u64 mask);
133#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
134 u64 (*get_required_mask)(struct device *dev); 133 u64 (*get_required_mask)(struct device *dev);
135#endif
136}; 134};
137 135
138extern const struct dma_map_ops dma_direct_ops; 136extern const struct dma_map_ops dma_direct_ops;
139extern const struct dma_map_ops dma_noncoherent_ops;
140extern const struct dma_map_ops dma_virt_ops; 137extern const struct dma_map_ops dma_virt_ops;
141 138
142#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 139#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -232,6 +229,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
232 dma_addr_t addr; 229 dma_addr_t addr;
233 230
234 BUG_ON(!valid_dma_direction(dir)); 231 BUG_ON(!valid_dma_direction(dir));
232 debug_dma_map_single(dev, ptr, size);
235 addr = ops->map_page(dev, virt_to_page(ptr), 233 addr = ops->map_page(dev, virt_to_page(ptr),
236 offset_in_page(ptr), size, 234 offset_in_page(ptr), size,
237 dir, attrs); 235 dir, attrs);
@@ -445,7 +443,8 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
445} 443}
446 444
447extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 445extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
448 void *cpu_addr, dma_addr_t dma_addr, size_t size); 446 void *cpu_addr, dma_addr_t dma_addr, size_t size,
447 unsigned long attrs);
449 448
450void *dma_common_contiguous_remap(struct page *page, size_t size, 449void *dma_common_contiguous_remap(struct page *page, size_t size,
451 unsigned long vm_flags, 450 unsigned long vm_flags,
@@ -477,14 +476,14 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
477 BUG_ON(!ops); 476 BUG_ON(!ops);
478 if (ops->mmap) 477 if (ops->mmap)
479 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 478 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
480 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 479 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
481} 480}
482 481
483#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) 482#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
484 483
485int 484int
486dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 485dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
487 void *cpu_addr, dma_addr_t dma_addr, size_t size); 486 dma_addr_t dma_addr, size_t size, unsigned long attrs);
488 487
489static inline int 488static inline int
490dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, 489dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
@@ -496,7 +495,8 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
496 if (ops->get_sgtable) 495 if (ops->get_sgtable)
497 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 496 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
498 attrs); 497 attrs);
499 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); 498 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
499 attrs);
500} 500}
501 501
502#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) 502#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
@@ -558,9 +558,11 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
558} 558}
559 559
560static inline void *dma_alloc_coherent(struct device *dev, size_t size, 560static inline void *dma_alloc_coherent(struct device *dev, size_t size,
561 dma_addr_t *dma_handle, gfp_t flag) 561 dma_addr_t *dma_handle, gfp_t gfp)
562{ 562{
563 return dma_alloc_attrs(dev, size, dma_handle, flag, 0); 563
564 return dma_alloc_attrs(dev, size, dma_handle, gfp,
565 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
564} 566}
565 567
566static inline void dma_free_coherent(struct device *dev, size_t size, 568static inline void dma_free_coherent(struct device *dev, size_t size,
@@ -753,18 +755,6 @@ dma_mark_declared_memory_occupied(struct device *dev,
753} 755}
754#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 756#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
755 757
756#ifdef CONFIG_HAS_DMA
757int dma_configure(struct device *dev);
758void dma_deconfigure(struct device *dev);
759#else
760static inline int dma_configure(struct device *dev)
761{
762 return 0;
763}
764
765static inline void dma_deconfigure(struct device *dev) {}
766#endif
767
768/* 758/*
769 * Managed DMA API 759 * Managed DMA API
770 */ 760 */
@@ -806,8 +796,12 @@ static inline void dmam_release_declared_memory(struct device *dev)
806static inline void *dma_alloc_wc(struct device *dev, size_t size, 796static inline void *dma_alloc_wc(struct device *dev, size_t size,
807 dma_addr_t *dma_addr, gfp_t gfp) 797 dma_addr_t *dma_addr, gfp_t gfp)
808{ 798{
809 return dma_alloc_attrs(dev, size, dma_addr, gfp, 799 unsigned long attrs = DMA_ATTR_NO_WARN;
810 DMA_ATTR_WRITE_COMBINE); 800
801 if (gfp & __GFP_NOWARN)
802 attrs |= DMA_ATTR_NO_WARN;
803
804 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
811} 805}
812#ifndef dma_alloc_writecombine 806#ifndef dma_alloc_writecombine
813#define dma_alloc_writecombine dma_alloc_wc 807#define dma_alloc_writecombine dma_alloc_wc
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index a0aa00cc909d..9051b055beec 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -4,18 +4,35 @@
4 4
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6 6
7#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
8#include <asm/dma-coherence.h>
9#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
10 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
11 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
12static inline bool dev_is_dma_coherent(struct device *dev)
13{
14 return dev->dma_coherent;
15}
16#else
17static inline bool dev_is_dma_coherent(struct device *dev)
18{
19 return true;
20}
21#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
22
7void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 23void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
8 gfp_t gfp, unsigned long attrs); 24 gfp_t gfp, unsigned long attrs);
9void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 25void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
10 dma_addr_t dma_addr, unsigned long attrs); 26 dma_addr_t dma_addr, unsigned long attrs);
27long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
28 dma_addr_t dma_addr);
11 29
12#ifdef CONFIG_DMA_NONCOHERENT_MMAP 30#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
13int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, 31pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
14 void *cpu_addr, dma_addr_t dma_addr, size_t size,
15 unsigned long attrs); 32 unsigned long attrs);
16#else 33#else
17#define arch_dma_mmap NULL 34# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot)
18#endif /* CONFIG_DMA_NONCOHERENT_MMAP */ 35#endif
19 36
20#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC 37#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
21void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 38void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
index b0115e340fbc..b42b80e52cc2 100644
--- a/include/linux/dma/sprd-dma.h
+++ b/include/linux/dma/sprd-dma.h
@@ -58,4 +58,73 @@ enum sprd_dma_int_type {
58 SPRD_DMA_CFGERR_INT, 58 SPRD_DMA_CFGERR_INT,
59}; 59};
60 60
61/*
62 * struct sprd_dma_linklist - DMA link-list address structure
63 * @virt_addr: link-list virtual address to configure link-list node
64 * @phy_addr: link-list physical address to link DMA transfer
65 *
66 * The Spreadtrum DMA controller supports the link-list mode, that means slaves
67 * can supply several groups configurations (each configuration represents one
68 * DMA transfer) saved in memory, and DMA controller will link these groups
69 * configurations by writing the physical address of each configuration into the
70 * link-list register.
71 *
72 * Just as shown below, the link-list pointer register will be pointed to the
73 * physical address of 'configuration 1', and the 'configuration 1' link-list
74 * pointer will be pointed to 'configuration 2', and so on.
75 * Once trigger the DMA transfer, the DMA controller will load 'configuration
76 * 1' to its registers automatically, after 'configuration 1' transaction is
77 * done, DMA controller will load 'configuration 2' automatically, until all
78 * DMA transactions are done.
79 *
80 * Note: The last link-list pointer should point to the physical address
81 * of 'configuration 1', which can avoid DMA controller loads incorrect
82 * configuration when the last configuration transaction is done.
83 *
84 * DMA controller linklist memory
85 * ====================== -----------------------
86 *| | | configuration 1 |<---
87 *| DMA controller | ------->| | |
88 *| | | | | |
89 *| | | | | |
90 *| | | | | |
91 *| linklist pointer reg |---- ----| linklist pointer | |
92 * ====================== | ----------------------- |
93 * | |
94 * | ----------------------- |
95 * | | configuration 2 | |
96 * --->| | |
97 * | | |
98 * | | |
99 * | | |
100 * ----| linklist pointer | |
101 * | ----------------------- |
102 * | |
103 * | ----------------------- |
104 * | | configuration 3 | |
105 * --->| | |
106 * | | |
107 * | . | |
108 * . |
109 * . |
110 * . |
111 * | . |
112 * | ----------------------- |
113 * | | configuration n | |
114 * --->| | |
115 * | | |
116 * | | |
117 * | | |
118 * | linklist pointer |----
119 * -----------------------
120 *
121 * To support the link-list mode, DMA slaves should allocate one segment memory
122 * from always-on IRAM or dma coherent memory to store these groups of DMA
123 * configuration, and pass the virtual and physical address to DMA controller.
124 */
125struct sprd_dma_linklist {
126 unsigned long virt_addr;
127 phys_addr_t phy_addr;
128};
129
61#endif 130#endif
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
index 6ac3cad9aef1..34a744a1bafc 100644
--- a/include/linux/dns_resolver.h
+++ b/include/linux/dns_resolver.h
@@ -24,11 +24,9 @@
24#ifndef _LINUX_DNS_RESOLVER_H 24#ifndef _LINUX_DNS_RESOLVER_H
25#define _LINUX_DNS_RESOLVER_H 25#define _LINUX_DNS_RESOLVER_H
26 26
27#ifdef __KERNEL__ 27#include <uapi/linux/dns_resolver.h>
28 28
29extern int dns_query(const char *type, const char *name, size_t namelen, 29extern int dns_query(const char *type, const char *name, size_t namelen,
30 const char *options, char **_result, time64_t *_expiry); 30 const char *options, char **_result, time64_t *_expiry);
31 31
32#endif /* KERNEL */
33
34#endif /* _LINUX_DNS_RESOLVER_H */ 32#endif /* _LINUX_DNS_RESOLVER_H */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index bffb97828ed6..1d0c9ea8825d 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -17,6 +17,7 @@
17#include <linux/completion.h> 17#include <linux/completion.h>
18#include <linux/workqueue.h> 18#include <linux/workqueue.h>
19#include <linux/debugfs.h> 19#include <linux/debugfs.h>
20#include <linux/numa.h>
20 21
21#define EDAC_DEVICE_NAME_LEN 31 22#define EDAC_DEVICE_NAME_LEN 31
22 23
@@ -451,6 +452,8 @@ struct dimm_info {
451 u32 nr_pages; /* number of pages on this dimm */ 452 u32 nr_pages; /* number of pages on this dimm */
452 453
453 unsigned csrow, cschannel; /* Points to the old API data */ 454 unsigned csrow, cschannel; /* Points to the old API data */
455
456 u16 smbios_handle; /* Handle for SMBIOS type 17 */
454}; 457};
455 458
456/** 459/**
@@ -670,6 +673,6 @@ struct mem_ctl_info {
670/* 673/*
671 * Maximum number of memory controllers in the coherent fabric. 674 * Maximum number of memory controllers in the coherent fabric.
672 */ 675 */
673#define EDAC_MAX_MCS 16 676#define EDAC_MAX_MCS 2 * MAX_NUMNODES
674 677
675#endif 678#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 401e4b254e30..845174e113ce 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -672,6 +672,7 @@ void efi_native_runtime_setup(void);
672#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) 672#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
673#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) 673#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
674#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) 674#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
675#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
675 676
676typedef struct { 677typedef struct {
677 efi_guid_t guid; 678 efi_guid_t guid;
@@ -957,6 +958,7 @@ extern struct efi {
957 unsigned long mem_attr_table; /* memory attributes table */ 958 unsigned long mem_attr_table; /* memory attributes table */
958 unsigned long rng_seed; /* UEFI firmware random seed */ 959 unsigned long rng_seed; /* UEFI firmware random seed */
959 unsigned long tpm_log; /* TPM2 Event Log table */ 960 unsigned long tpm_log; /* TPM2 Event Log table */
961 unsigned long mem_reserve; /* Linux EFI memreserve table */
960 efi_get_time_t *get_time; 962 efi_get_time_t *get_time;
961 efi_set_time_t *set_time; 963 efi_set_time_t *set_time;
962 efi_get_wakeup_time_t *get_wakeup_time; 964 efi_get_wakeup_time_t *get_wakeup_time;
@@ -1041,6 +1043,7 @@ extern int __init efi_uart_console_only (void);
1041extern u64 efi_mem_desc_end(efi_memory_desc_t *md); 1043extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
1042extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); 1044extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
1043extern void efi_mem_reserve(phys_addr_t addr, u64 size); 1045extern void efi_mem_reserve(phys_addr_t addr, u64 size);
1046extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
1044extern void efi_initialize_iomem_resources(struct resource *code_resource, 1047extern void efi_initialize_iomem_resources(struct resource *code_resource,
1045 struct resource *data_resource, struct resource *bss_resource); 1048 struct resource *data_resource, struct resource *bss_resource);
1046extern void efi_reserve_boot_services(void); 1049extern void efi_reserve_boot_services(void);
@@ -1659,7 +1662,55 @@ struct linux_efi_tpm_eventlog {
1659 1662
1660extern int efi_tpm_eventlog_init(void); 1663extern int efi_tpm_eventlog_init(void);
1661 1664
1665/*
1666 * efi_runtime_service() function identifiers.
1667 * "NONE" is used by efi_recover_from_page_fault() to check if the page
1668 * fault happened while executing an efi runtime service.
1669 */
1670enum efi_rts_ids {
1671 NONE,
1672 GET_TIME,
1673 SET_TIME,
1674 GET_WAKEUP_TIME,
1675 SET_WAKEUP_TIME,
1676 GET_VARIABLE,
1677 GET_NEXT_VARIABLE,
1678 SET_VARIABLE,
1679 QUERY_VARIABLE_INFO,
1680 GET_NEXT_HIGH_MONO_COUNT,
1681 RESET_SYSTEM,
1682 UPDATE_CAPSULE,
1683 QUERY_CAPSULE_CAPS,
1684};
1685
1686/*
1687 * efi_runtime_work: Details of EFI Runtime Service work
1688 * @arg<1-5>: EFI Runtime Service function arguments
1689 * @status: Status of executing EFI Runtime Service
1690 * @efi_rts_id: EFI Runtime Service function identifier
1691 * @efi_rts_comp: Struct used for handling completions
1692 */
1693struct efi_runtime_work {
1694 void *arg1;
1695 void *arg2;
1696 void *arg3;
1697 void *arg4;
1698 void *arg5;
1699 efi_status_t status;
1700 struct work_struct work;
1701 enum efi_rts_ids efi_rts_id;
1702 struct completion efi_rts_comp;
1703};
1704
1705extern struct efi_runtime_work efi_rts_work;
1706
1662/* Workqueue to queue EFI Runtime Services */ 1707/* Workqueue to queue EFI Runtime Services */
1663extern struct workqueue_struct *efi_rts_wq; 1708extern struct workqueue_struct *efi_rts_wq;
1664 1709
1710struct linux_efi_memreserve {
1711 phys_addr_t next;
1712 phys_addr_t base;
1713 phys_addr_t size;
1714};
1715
1665#endif /* _LINUX_EFI_H */ 1716#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a02deea30185..015bb59c0331 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -111,7 +111,7 @@ struct elevator_mq_ops {
111 void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); 111 void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
112 struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); 112 struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
113 bool (*has_work)(struct blk_mq_hw_ctx *); 113 bool (*has_work)(struct blk_mq_hw_ctx *);
114 void (*completed_request)(struct request *); 114 void (*completed_request)(struct request *, u64);
115 void (*started_request)(struct request *); 115 void (*started_request)(struct request *);
116 void (*requeue_request)(struct request *); 116 void (*requeue_request)(struct request *);
117 struct request *(*former_request)(struct request_queue *, struct request *); 117 struct request *(*former_request)(struct request_queue *, struct request *);
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index b5f2efdd05e0..7a37f4ce9fd2 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -27,10 +27,10 @@ struct compat_elf_prstatus
27 compat_pid_t pr_ppid; 27 compat_pid_t pr_ppid;
28 compat_pid_t pr_pgrp; 28 compat_pid_t pr_pgrp;
29 compat_pid_t pr_sid; 29 compat_pid_t pr_sid;
30 struct compat_timeval pr_utime; 30 struct old_timeval32 pr_utime;
31 struct compat_timeval pr_stime; 31 struct old_timeval32 pr_stime;
32 struct compat_timeval pr_cutime; 32 struct old_timeval32 pr_cutime;
33 struct compat_timeval pr_cstime; 33 struct old_timeval32 pr_cstime;
34 compat_elf_gregset_t pr_reg; 34 compat_elf_gregset_t pr_reg;
35#ifdef CONFIG_BINFMT_ELF_FDPIC 35#ifdef CONFIG_BINFMT_ELF_FDPIC
36 compat_ulong_t pr_exec_fdpic_loadmap; 36 compat_ulong_t pr_exec_fdpic_loadmap;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index f8a2245b70ac..afd9596ce636 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -183,14 +183,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
183 183
184/** 184/**
185 * struct ethtool_ops - optional netdev operations 185 * struct ethtool_ops - optional netdev operations
186 * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
187 * API. Get various device settings including Ethernet link
188 * settings. The @cmd parameter is expected to have been cleared
189 * before get_settings is called. Returns a negative error code
190 * or zero.
191 * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
192 * API. Set various device settings including Ethernet link
193 * settings. Returns a negative error code or zero.
194 * @get_drvinfo: Report driver/device information. Should only set the 186 * @get_drvinfo: Report driver/device information. Should only set the
195 * @driver, @version, @fw_version and @bus_info fields. If not 187 * @driver, @version, @fw_version and @bus_info fields. If not
196 * implemented, the @driver and @bus_info fields will be filled in 188 * implemented, the @driver and @bus_info fields will be filled in
@@ -297,19 +289,16 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
297 * a TX queue has this number, return -EINVAL. If only a RX queue or a TX 289 * a TX queue has this number, return -EINVAL. If only a RX queue or a TX
298 * queue has this number, ignore the inapplicable fields. 290 * queue has this number, ignore the inapplicable fields.
299 * Returns a negative error code or zero. 291 * Returns a negative error code or zero.
300 * @get_link_ksettings: When defined, takes precedence over the 292 * @get_link_ksettings: Get various device settings including Ethernet link
301 * %get_settings method. Get various device settings 293 * settings. The %cmd and %link_mode_masks_nwords fields should be
302 * including Ethernet link settings. The %cmd and 294 * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
303 * %link_mode_masks_nwords fields should be ignored (use 295 * any change to them will be overwritten by kernel. Returns a negative
304 * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any 296 * error code or zero.
305 * change to them will be overwritten by kernel. Returns a 297 * @set_link_ksettings: Set various device settings including Ethernet link
306 * negative error code or zero. 298 * settings. The %cmd and %link_mode_masks_nwords fields should be
307 * @set_link_ksettings: When defined, takes precedence over the 299 * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
308 * %set_settings method. Set various device settings including 300 * any change to them will be overwritten by kernel. Returns a negative
309 * Ethernet link settings. The %cmd and %link_mode_masks_nwords 301 * error code or zero.
310 * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
311 * instead of the latter), any change to them will be overwritten
312 * by kernel. Returns a negative error code or zero.
313 * @get_fecparam: Get the network device Forward Error Correction parameters. 302 * @get_fecparam: Get the network device Forward Error Correction parameters.
314 * @set_fecparam: Set the network device Forward Error Correction parameters. 303 * @set_fecparam: Set the network device Forward Error Correction parameters.
315 * @get_ethtool_phy_stats: Return extended statistics about the PHY device. 304 * @get_ethtool_phy_stats: Return extended statistics about the PHY device.
@@ -329,8 +318,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
329 * of the generic netdev features interface. 318 * of the generic netdev features interface.
330 */ 319 */
331struct ethtool_ops { 320struct ethtool_ops {
332 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
333 int (*set_settings)(struct net_device *, struct ethtool_cmd *);
334 void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); 321 void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
335 int (*get_regs_len)(struct net_device *); 322 int (*get_regs_len)(struct net_device *);
336 void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); 323 void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index f70f8ac9c4f4..d7711048ef93 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/** 2/**
2 * include/linux/f2fs_fs.h 3 * include/linux/f2fs_fs.h
3 * 4 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/ 6 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */ 7 */
11#ifndef _LINUX_F2FS_FS_H 8#ifndef _LINUX_F2FS_FS_H
12#define _LINUX_F2FS_FS_H 9#define _LINUX_F2FS_FS_H
@@ -112,12 +109,15 @@ struct f2fs_super_block {
112 struct f2fs_device devs[MAX_DEVICES]; /* device list */ 109 struct f2fs_device devs[MAX_DEVICES]; /* device list */
113 __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ 110 __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
114 __u8 hot_ext_count; /* # of hot file extension */ 111 __u8 hot_ext_count; /* # of hot file extension */
115 __u8 reserved[314]; /* valid reserved region */ 112 __u8 reserved[310]; /* valid reserved region */
113 __le32 crc; /* checksum of superblock */
116} __packed; 114} __packed;
117 115
118/* 116/*
119 * For checkpoint 117 * For checkpoint
120 */ 118 */
119#define CP_DISABLED_FLAG 0x00001000
120#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800
121#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 121#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400
122#define CP_NOCRC_RECOVERY_FLAG 0x00000200 122#define CP_NOCRC_RECOVERY_FLAG 0x00000200
123#define CP_TRIMMED_FLAG 0x00000100 123#define CP_TRIMMED_FLAG 0x00000100
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 096c96f4f16a..a5a60691e48b 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -4,6 +4,61 @@
4 4
5#include <uapi/linux/fanotify.h> 5#include <uapi/linux/fanotify.h>
6 6
7/* not valid from userspace, only kernel internal */ 7#define FAN_GROUP_FLAG(group, flag) \
8#define FAN_MARK_ONDIR 0x00000100 8 ((group)->fanotify_data.flags & (flag))
9
10/*
11 * Flags allowed to be passed from/to userspace.
12 *
13 * We intentionally do not add new bits to the old FAN_ALL_* constants, because
14 * they are uapi exposed constants. If there are programs out there using
15 * these constant, the programs may break if re-compiled with new uapi headers
16 * and then run on an old kernel.
17 */
18#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \
19 FAN_CLASS_PRE_CONTENT)
20
21#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \
22 FAN_REPORT_TID | \
23 FAN_CLOEXEC | FAN_NONBLOCK | \
24 FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS)
25
26#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
27 FAN_MARK_FILESYSTEM)
28
29#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \
30 FAN_MARK_ADD | \
31 FAN_MARK_REMOVE | \
32 FAN_MARK_DONT_FOLLOW | \
33 FAN_MARK_ONLYDIR | \
34 FAN_MARK_IGNORED_MASK | \
35 FAN_MARK_IGNORED_SURV_MODIFY | \
36 FAN_MARK_FLUSH)
37
38/* Events that user can request to be notified on */
39#define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \
40 FAN_CLOSE | FAN_OPEN)
41
42/* Events that require a permission response from user */
43#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM)
44
45/* Extra flags that may be reported with event or control handling of events */
46#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
47
48/* Events that may be reported to user */
49#define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \
50 FANOTIFY_PERM_EVENTS | \
51 FAN_Q_OVERFLOW)
52
53#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \
54 FANOTIFY_EVENT_FLAGS)
55
56/* Do not use these old uapi constants internally */
57#undef FAN_ALL_CLASS_BITS
58#undef FAN_ALL_INIT_FLAGS
59#undef FAN_ALL_MARK_FLAGS
60#undef FAN_ALL_EVENTS
61#undef FAN_ALL_PERM_EVENTS
62#undef FAN_ALL_OUTGOING_EVENTS
63
9#endif /* _LINUX_FANOTIFY_H */ 64#endif /* _LINUX_FANOTIFY_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3e7e75383d32..a3cab6dc9b44 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -456,10 +456,13 @@ struct fb_tile_ops {
456 * and host endianness. Drivers should not use this flag. 456 * and host endianness. Drivers should not use this flag.
457 */ 457 */
458#define FBINFO_BE_MATH 0x100000 458#define FBINFO_BE_MATH 0x100000
459/*
460 * Hide smem_start in the FBIOGET_FSCREENINFO IOCTL. This is used by modern DRM
461 * drivers to stop userspace from trying to share buffers behind the kernel's
462 * back. Instead dma-buf based buffer sharing should be used.
463 */
464#define FBINFO_HIDE_SMEM_START 0x200000
459 465
460/* report to the VT layer that this fb driver can accept forced console
461 output like oopses */
462#define FBINFO_CAN_FORCE_OUTPUT 0x200000
463 466
464struct fb_info { 467struct fb_info {
465 atomic_t count; 468 atomic_t count;
@@ -632,6 +635,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
632extern int register_framebuffer(struct fb_info *fb_info); 635extern int register_framebuffer(struct fb_info *fb_info);
633extern int unregister_framebuffer(struct fb_info *fb_info); 636extern int unregister_framebuffer(struct fb_info *fb_info);
634extern int unlink_framebuffer(struct fb_info *fb_info); 637extern int unlink_framebuffer(struct fb_info *fb_info);
638extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id,
639 const char *name);
635extern int remove_conflicting_framebuffers(struct apertures_struct *a, 640extern int remove_conflicting_framebuffers(struct apertures_struct *a,
636 const char *name, bool primary); 641 const char *name, bool primary);
637extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); 642extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6791a0ac0139..de629b706d1d 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -520,24 +520,6 @@ struct bpf_skb_data_end {
520 void *data_end; 520 void *data_end;
521}; 521};
522 522
523struct sk_msg_buff {
524 void *data;
525 void *data_end;
526 __u32 apply_bytes;
527 __u32 cork_bytes;
528 int sg_copybreak;
529 int sg_start;
530 int sg_curr;
531 int sg_end;
532 struct scatterlist sg_data[MAX_SKB_FRAGS];
533 bool sg_copy[MAX_SKB_FRAGS];
534 __u32 flags;
535 struct sock *sk_redir;
536 struct sock *sk;
537 struct sk_buff *skb;
538 struct list_head list;
539};
540
541struct bpf_redirect_info { 523struct bpf_redirect_info {
542 u32 ifindex; 524 u32 ifindex;
543 u32 flags; 525 u32 flags;
@@ -566,6 +548,27 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
566 cb->data_end = skb->data + skb_headlen(skb); 548 cb->data_end = skb->data + skb_headlen(skb);
567} 549}
568 550
551/* Similar to bpf_compute_data_pointers(), except that save orginal
552 * data in cb->data and cb->meta_data for restore.
553 */
554static inline void bpf_compute_and_save_data_end(
555 struct sk_buff *skb, void **saved_data_end)
556{
557 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
558
559 *saved_data_end = cb->data_end;
560 cb->data_end = skb->data + skb_headlen(skb);
561}
562
563/* Restore data saved by bpf_compute_data_pointers(). */
564static inline void bpf_restore_data_end(
565 struct sk_buff *skb, void *saved_data_end)
566{
567 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
568
569 cb->data_end = saved_data_end;
570}
571
569static inline u8 *bpf_skb_cb(struct sk_buff *skb) 572static inline u8 *bpf_skb_cb(struct sk_buff *skb)
570{ 573{
571 /* eBPF programs may read/write skb->cb[] area to transfer meta 574 /* eBPF programs may read/write skb->cb[] area to transfer meta
@@ -833,9 +836,6 @@ void xdp_do_flush_map(void);
833 836
834void bpf_warn_invalid_xdp_action(u32 act); 837void bpf_warn_invalid_xdp_action(u32 act);
835 838
836struct sock *do_sk_redirect_map(struct sk_buff *skb);
837struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
838
839#ifdef CONFIG_INET 839#ifdef CONFIG_INET
840struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, 840struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
841 struct bpf_prog *prog, struct sk_buff *skb, 841 struct bpf_prog *prog, struct sk_buff *skb,
@@ -854,6 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
854extern int bpf_jit_enable; 854extern int bpf_jit_enable;
855extern int bpf_jit_harden; 855extern int bpf_jit_harden;
856extern int bpf_jit_kallsyms; 856extern int bpf_jit_kallsyms;
857extern int bpf_jit_limit;
857 858
858typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); 859typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
859 860
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index ce550fcf6360..817600a32c93 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -69,4 +69,8 @@ void fpga_bridge_free(struct fpga_bridge *br);
69int fpga_bridge_register(struct fpga_bridge *br); 69int fpga_bridge_register(struct fpga_bridge *br);
70void fpga_bridge_unregister(struct fpga_bridge *br); 70void fpga_bridge_unregister(struct fpga_bridge *br);
71 71
72struct fpga_bridge
73*devm_fpga_bridge_create(struct device *dev, const char *name,
74 const struct fpga_bridge_ops *br_ops, void *priv);
75
72#endif /* _LINUX_FPGA_BRIDGE_H */ 76#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 8942e61f0028..e8ca62b2cb5b 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -53,12 +53,20 @@ enum fpga_mgr_states {
53 FPGA_MGR_STATE_OPERATING, 53 FPGA_MGR_STATE_OPERATING,
54}; 54};
55 55
56/* 56/**
57 * FPGA Manager flags 57 * DOC: FPGA Manager flags
58 * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported 58 *
59 * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting 59 * Flags used in the &fpga_image_info->flags field
60 * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first 60 *
61 * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed 61 * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
62 *
63 * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
64 *
65 * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
66 *
67 * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
68 *
69 * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
62 */ 70 */
63#define FPGA_MGR_PARTIAL_RECONFIG BIT(0) 71#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
64#define FPGA_MGR_EXTERNAL_CONFIG BIT(1) 72#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
@@ -190,4 +198,8 @@ void fpga_mgr_free(struct fpga_manager *mgr);
190int fpga_mgr_register(struct fpga_manager *mgr); 198int fpga_mgr_register(struct fpga_manager *mgr);
191void fpga_mgr_unregister(struct fpga_manager *mgr); 199void fpga_mgr_unregister(struct fpga_manager *mgr);
192 200
201struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
202 const struct fpga_manager_ops *mops,
203 void *priv);
204
193#endif /*_LINUX_FPGA_MGR_H */ 205#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index 0521b7f577a4..27cb706275db 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -44,4 +44,8 @@ void fpga_region_free(struct fpga_region *region);
44int fpga_region_register(struct fpga_region *region); 44int fpga_region_register(struct fpga_region *region);
45void fpga_region_unregister(struct fpga_region *region); 45void fpga_region_unregister(struct fpga_region *region);
46 46
47struct fpga_region
48*devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr,
49 int (*get_bridges)(struct fpga_region *));
50
47#endif /* _FPGA_REGION_H */ 51#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 33322702c910..8252df30b9a1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -403,24 +403,40 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
403 loff_t pos, unsigned len, unsigned copied, 403 loff_t pos, unsigned len, unsigned copied,
404 struct page *page, void *fsdata); 404 struct page *page, void *fsdata);
405 405
406/**
407 * struct address_space - Contents of a cacheable, mappable object.
408 * @host: Owner, either the inode or the block_device.
409 * @i_pages: Cached pages.
410 * @gfp_mask: Memory allocation flags to use for allocating pages.
411 * @i_mmap_writable: Number of VM_SHARED mappings.
412 * @i_mmap: Tree of private and shared mappings.
413 * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
414 * @nrpages: Number of page entries, protected by the i_pages lock.
415 * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock.
416 * @writeback_index: Writeback starts here.
417 * @a_ops: Methods.
418 * @flags: Error bits and flags (AS_*).
419 * @wb_err: The most recent error which has occurred.
420 * @private_lock: For use by the owner of the address_space.
421 * @private_list: For use by the owner of the address_space.
422 * @private_data: For use by the owner of the address_space.
423 */
406struct address_space { 424struct address_space {
407 struct inode *host; /* owner: inode, block_device */ 425 struct inode *host;
408 struct radix_tree_root i_pages; /* cached pages */ 426 struct xarray i_pages;
409 atomic_t i_mmap_writable;/* count VM_SHARED mappings */ 427 gfp_t gfp_mask;
410 struct rb_root_cached i_mmap; /* tree of private and shared mappings */ 428 atomic_t i_mmap_writable;
411 struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ 429 struct rb_root_cached i_mmap;
412 /* Protected by the i_pages lock */ 430 struct rw_semaphore i_mmap_rwsem;
413 unsigned long nrpages; /* number of total pages */ 431 unsigned long nrpages;
414 /* number of shadow or DAX exceptional entries */
415 unsigned long nrexceptional; 432 unsigned long nrexceptional;
416 pgoff_t writeback_index;/* writeback starts here */ 433 pgoff_t writeback_index;
417 const struct address_space_operations *a_ops; /* methods */ 434 const struct address_space_operations *a_ops;
418 unsigned long flags; /* error bits */ 435 unsigned long flags;
419 spinlock_t private_lock; /* for use by the address_space */
420 gfp_t gfp_mask; /* implicit gfp mask for allocations */
421 struct list_head private_list; /* for use by the address_space */
422 void *private_data; /* ditto */
423 errseq_t wb_err; 436 errseq_t wb_err;
437 spinlock_t private_lock;
438 struct list_head private_list;
439 void *private_data;
424} __attribute__((aligned(sizeof(long)))) __randomize_layout; 440} __attribute__((aligned(sizeof(long)))) __randomize_layout;
425 /* 441 /*
426 * On most architectures that alignment is already the case; but 442 * On most architectures that alignment is already the case; but
@@ -467,15 +483,18 @@ struct block_device {
467 struct mutex bd_fsfreeze_mutex; 483 struct mutex bd_fsfreeze_mutex;
468} __randomize_layout; 484} __randomize_layout;
469 485
486/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
487#define PAGECACHE_TAG_DIRTY XA_MARK_0
488#define PAGECACHE_TAG_WRITEBACK XA_MARK_1
489#define PAGECACHE_TAG_TOWRITE XA_MARK_2
490
470/* 491/*
471 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache 492 * Returns true if any of the pages in the mapping are marked with the tag.
472 * radix trees
473 */ 493 */
474#define PAGECACHE_TAG_DIRTY 0 494static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
475#define PAGECACHE_TAG_WRITEBACK 1 495{
476#define PAGECACHE_TAG_TOWRITE 2 496 return xa_marked(&mapping->i_pages, tag);
477 497}
478int mapping_tagged(struct address_space *mapping, int tag);
479 498
480static inline void i_mmap_lock_write(struct address_space *mapping) 499static inline void i_mmap_lock_write(struct address_space *mapping)
481{ 500{
@@ -1393,17 +1412,26 @@ struct super_block {
1393 1412
1394 struct sb_writers s_writers; 1413 struct sb_writers s_writers;
1395 1414
1415 /*
1416 * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
1417 * s_fsnotify_marks together for cache efficiency. They are frequently
1418 * accessed and rarely modified.
1419 */
1420 void *s_fs_info; /* Filesystem private info */
1421
1422 /* Granularity of c/m/atime in ns (cannot be worse than a second) */
1423 u32 s_time_gran;
1424#ifdef CONFIG_FSNOTIFY
1425 __u32 s_fsnotify_mask;
1426 struct fsnotify_mark_connector __rcu *s_fsnotify_marks;
1427#endif
1428
1396 char s_id[32]; /* Informational name */ 1429 char s_id[32]; /* Informational name */
1397 uuid_t s_uuid; /* UUID */ 1430 uuid_t s_uuid; /* UUID */
1398 1431
1399 void *s_fs_info; /* Filesystem private info */
1400 unsigned int s_max_links; 1432 unsigned int s_max_links;
1401 fmode_t s_mode; 1433 fmode_t s_mode;
1402 1434
1403 /* Granularity of c/m/atime in ns.
1404 Cannot be worse than a second */
1405 u32 s_time_gran;
1406
1407 /* 1435 /*
1408 * The next field is for VFS *only*. No filesystems have any business 1436 * The next field is for VFS *only*. No filesystems have any business
1409 * even looking at it. You had been warned. 1437 * even looking at it. You had been warned.
@@ -1428,6 +1456,9 @@ struct super_block {
1428 /* Number of inodes with nlink == 0 but still referenced */ 1456 /* Number of inodes with nlink == 0 but still referenced */
1429 atomic_long_t s_remove_count; 1457 atomic_long_t s_remove_count;
1430 1458
1459 /* Pending fsnotify inode refs */
1460 atomic_long_t s_fsnotify_inode_refs;
1461
1431 /* Being remounted read-only */ 1462 /* Being remounted read-only */
1432 int s_readonly_remount; 1463 int s_readonly_remount;
1433 1464
@@ -1763,6 +1794,7 @@ struct file_operations {
1763 u64); 1794 u64);
1764 int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, 1795 int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t,
1765 u64); 1796 u64);
1797 int (*fadvise)(struct file *, loff_t, loff_t, int);
1766} __randomize_layout; 1798} __randomize_layout;
1767 1799
1768struct inode_operations { 1800struct inode_operations {
@@ -1827,8 +1859,10 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
1827extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, 1859extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
1828 struct inode *inode_out, loff_t pos_out, 1860 struct inode *inode_out, loff_t pos_out,
1829 u64 *len, bool is_dedupe); 1861 u64 *len, bool is_dedupe);
1862extern int do_clone_file_range(struct file *file_in, loff_t pos_in,
1863 struct file *file_out, loff_t pos_out, u64 len);
1830extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, 1864extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
1831 struct file *file_out, loff_t pos_out, u64 len); 1865 struct file *file_out, loff_t pos_out, u64 len);
1832extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, 1866extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1833 struct inode *dest, loff_t destoff, 1867 struct inode *dest, loff_t destoff,
1834 loff_t len, bool *is_same); 1868 loff_t len, bool *is_same);
@@ -2772,19 +2806,6 @@ static inline void file_end_write(struct file *file)
2772 __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); 2806 __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
2773} 2807}
2774 2808
2775static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
2776 struct file *file_out, loff_t pos_out,
2777 u64 len)
2778{
2779 int ret;
2780
2781 file_start_write(file_out);
2782 ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
2783 file_end_write(file_out);
2784
2785 return ret;
2786}
2787
2788/* 2809/*
2789 * get_write_access() gets write permission for a file. 2810 * get_write_access() gets write permission for a file.
2790 * put_write_access() releases this write permission. 2811 * put_write_access() releases this write permission.
@@ -3459,4 +3480,8 @@ static inline bool dir_relax_shared(struct inode *inode)
3459extern bool path_noexec(const struct path *path); 3480extern bool path_noexec(const struct path *path);
3460extern void inode_nohighmem(struct inode *inode); 3481extern void inode_nohighmem(struct inode *inode);
3461 3482
3483/* mm/fadvise.c */
3484extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
3485 int advice);
3486
3462#endif /* _LINUX_FS_H */ 3487#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index f27cb14088a4..9d3f668df7df 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -351,6 +351,14 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
351#define dev_is_fsl_mc(_dev) (0) 351#define dev_is_fsl_mc(_dev) (0)
352#endif 352#endif
353 353
354/* Macro to check if a device is a container device */
355#define fsl_mc_is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & \
356 FSL_MC_IS_DPRC)
357
358/* Macro to get the container device of a MC device */
359#define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \
360 (_dev) : (_dev)->parent)
361
354/* 362/*
355 * module_fsl_mc_driver() - Helper macro for drivers that don't do 363 * module_fsl_mc_driver() - Helper macro for drivers that don't do
356 * anything special in module init/exit. This eliminates a lot of 364 * anything special in module init/exit. This eliminates a lot of
@@ -405,6 +413,7 @@ extern struct device_type fsl_mc_bus_dpcon_type;
405extern struct device_type fsl_mc_bus_dpmcp_type; 413extern struct device_type fsl_mc_bus_dpmcp_type;
406extern struct device_type fsl_mc_bus_dpmac_type; 414extern struct device_type fsl_mc_bus_dpmac_type;
407extern struct device_type fsl_mc_bus_dprtc_type; 415extern struct device_type fsl_mc_bus_dprtc_type;
416extern struct device_type fsl_mc_bus_dpseci_type;
408 417
409static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev) 418static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
410{ 419{
@@ -451,6 +460,11 @@ static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
451 return mc_dev->dev.type == &fsl_mc_bus_dprtc_type; 460 return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
452} 461}
453 462
463static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
464{
465 return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
466}
467
454/* 468/*
455 * Data Path Buffer Pool (DPBP) API 469 * Data Path Buffer Pool (DPBP) API
456 * Contains initialization APIs and runtime control APIs for DPBP 470 * Contains initialization APIs and runtime control APIs for DPBP
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 3fdfede2f0f3..5f343b796ad9 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -274,6 +274,8 @@
274 */ 274 */
275/* Auto Boot Mode */ 275/* Auto Boot Mode */
276#define IFC_NAND_NCFGR_BOOT 0x80000000 276#define IFC_NAND_NCFGR_BOOT 0x80000000
277/* SRAM Initialization */
278#define IFC_NAND_NCFGR_SRAM_INIT_EN 0x20000000
277/* Addressing Mode-ROW0+n/COL0 */ 279/* Addressing Mode-ROW0+n/COL0 */
278#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000 280#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000
279/* Addressing Mode-ROW0+n/COL0+n */ 281/* Addressing Mode-ROW0+n/COL0+n */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index b8f4182f42f1..135b973e44d1 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -68,15 +68,20 @@
68 68
69#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM) 69#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM)
70 70
71/* Events that can be reported to backends */
71#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ 72#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
72 FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \ 73 FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \
73 FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ 74 FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
74 FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ 75 FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
75 FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ 76 FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
76 FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \ 77 FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME)
77 FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \ 78
79/* Extra flags that may be reported with event or control handling of events */
80#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
78 FS_DN_MULTISHOT | FS_EVENT_ON_CHILD) 81 FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
79 82
83#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS)
84
80struct fsnotify_group; 85struct fsnotify_group;
81struct fsnotify_event; 86struct fsnotify_event;
82struct fsnotify_mark; 87struct fsnotify_mark;
@@ -189,10 +194,10 @@ struct fsnotify_group {
189 /* allows a group to block waiting for a userspace response */ 194 /* allows a group to block waiting for a userspace response */
190 struct list_head access_list; 195 struct list_head access_list;
191 wait_queue_head_t access_waitq; 196 wait_queue_head_t access_waitq;
192 int f_flags; 197 int flags; /* flags from fanotify_init() */
198 int f_flags; /* event_f_flags from fanotify_init() */
193 unsigned int max_marks; 199 unsigned int max_marks;
194 struct user_struct *user; 200 struct user_struct *user;
195 bool audit;
196 } fanotify_data; 201 } fanotify_data;
197#endif /* CONFIG_FANOTIFY */ 202#endif /* CONFIG_FANOTIFY */
198 }; 203 };
@@ -206,12 +211,14 @@ struct fsnotify_group {
206enum fsnotify_obj_type { 211enum fsnotify_obj_type {
207 FSNOTIFY_OBJ_TYPE_INODE, 212 FSNOTIFY_OBJ_TYPE_INODE,
208 FSNOTIFY_OBJ_TYPE_VFSMOUNT, 213 FSNOTIFY_OBJ_TYPE_VFSMOUNT,
214 FSNOTIFY_OBJ_TYPE_SB,
209 FSNOTIFY_OBJ_TYPE_COUNT, 215 FSNOTIFY_OBJ_TYPE_COUNT,
210 FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT 216 FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
211}; 217};
212 218
213#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) 219#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
214#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) 220#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
221#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB)
215#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) 222#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
216 223
217static inline bool fsnotify_valid_obj_type(unsigned int type) 224static inline bool fsnotify_valid_obj_type(unsigned int type)
@@ -255,6 +262,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
255 262
256FSNOTIFY_ITER_FUNCS(inode, INODE) 263FSNOTIFY_ITER_FUNCS(inode, INODE)
257FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) 264FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
265FSNOTIFY_ITER_FUNCS(sb, SB)
258 266
259#define fsnotify_foreach_obj_type(type) \ 267#define fsnotify_foreach_obj_type(type) \
260 for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++) 268 for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
@@ -267,8 +275,8 @@ struct fsnotify_mark_connector;
267typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; 275typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
268 276
269/* 277/*
270 * Inode / vfsmount point to this structure which tracks all marks attached to 278 * Inode/vfsmount/sb point to this structure which tracks all marks attached to
271 * the inode / vfsmount. The reference to inode / vfsmount is held by this 279 * the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this
272 * structure. We destroy this structure when there are no more marks attached 280 * structure. We destroy this structure when there are no more marks attached
273 * to it. The structure is protected by fsnotify_mark_srcu. 281 * to it. The structure is protected by fsnotify_mark_srcu.
274 */ 282 */
@@ -335,6 +343,7 @@ extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int dat
335extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask); 343extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
336extern void __fsnotify_inode_delete(struct inode *inode); 344extern void __fsnotify_inode_delete(struct inode *inode);
337extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); 345extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
346extern void fsnotify_sb_delete(struct super_block *sb);
338extern u32 fsnotify_get_cookie(void); 347extern u32 fsnotify_get_cookie(void);
339 348
340static inline int fsnotify_inode_watches_children(struct inode *inode) 349static inline int fsnotify_inode_watches_children(struct inode *inode)
@@ -455,9 +464,13 @@ static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *gr
455{ 464{
456 fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL); 465 fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
457} 466}
467/* run all the marks in a group, and clear all of the sn marks */
468static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
469{
470 fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL);
471}
458extern void fsnotify_get_mark(struct fsnotify_mark *mark); 472extern void fsnotify_get_mark(struct fsnotify_mark *mark);
459extern void fsnotify_put_mark(struct fsnotify_mark *mark); 473extern void fsnotify_put_mark(struct fsnotify_mark *mark);
460extern void fsnotify_unmount_inodes(struct super_block *sb);
461extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); 474extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
462extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); 475extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
463 476
@@ -484,6 +497,9 @@ static inline void __fsnotify_inode_delete(struct inode *inode)
484static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) 497static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
485{} 498{}
486 499
500static inline void fsnotify_sb_delete(struct super_block *sb)
501{}
502
487static inline void fsnotify_update_flags(struct dentry *dentry) 503static inline void fsnotify_update_flags(struct dentry *dentry)
488{} 504{}
489 505
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 57864422a2c8..70fc838e6773 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -83,10 +83,10 @@ struct partition {
83} __attribute__((packed)); 83} __attribute__((packed));
84 84
85struct disk_stats { 85struct disk_stats {
86 u64 nsecs[NR_STAT_GROUPS];
86 unsigned long sectors[NR_STAT_GROUPS]; 87 unsigned long sectors[NR_STAT_GROUPS];
87 unsigned long ios[NR_STAT_GROUPS]; 88 unsigned long ios[NR_STAT_GROUPS];
88 unsigned long merges[NR_STAT_GROUPS]; 89 unsigned long merges[NR_STAT_GROUPS];
89 unsigned long ticks[NR_STAT_GROUPS];
90 unsigned long io_ticks; 90 unsigned long io_ticks;
91 unsigned long time_in_queue; 91 unsigned long time_in_queue;
92}; 92};
@@ -354,6 +354,9 @@ static inline void free_part_stats(struct hd_struct *part)
354 354
355#endif /* CONFIG_SMP */ 355#endif /* CONFIG_SMP */
356 356
357#define part_stat_read_msecs(part, which) \
358 div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC)
359
357#define part_stat_read_accum(part, field) \ 360#define part_stat_read_accum(part, field) \
358 (part_stat_read(part, field[STAT_READ]) + \ 361 (part_stat_read(part, field[STAT_READ]) + \
359 part_stat_read(part, field[STAT_WRITE]) + \ 362 part_stat_read(part, field[STAT_WRITE]) + \
@@ -399,10 +402,11 @@ static inline void free_part_info(struct hd_struct *part)
399extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part); 402extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
400 403
401/* block/genhd.c */ 404/* block/genhd.c */
402extern void device_add_disk(struct device *parent, struct gendisk *disk); 405extern void device_add_disk(struct device *parent, struct gendisk *disk,
406 const struct attribute_group **groups);
403static inline void add_disk(struct gendisk *disk) 407static inline void add_disk(struct gendisk *disk)
404{ 408{
405 device_add_disk(NULL, disk); 409 device_add_disk(NULL, disk, NULL);
406} 410}
407extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk); 411extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
408static inline void add_disk_no_queue_reg(struct gendisk *disk) 412static inline void add_disk_no_queue_reg(struct gendisk *disk)
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 21ddbe440030..f2f887795d43 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -18,10 +18,19 @@ struct device;
18struct gpio_desc; 18struct gpio_desc;
19 19
20/** 20/**
21 * Opaque descriptor for a structure of GPIO array attributes. This structure
22 * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be
23 * passed back to get/set array functions in order to activate fast processing
24 * path if applicable.
25 */
26struct gpio_array;
27
28/**
21 * Struct containing an array of descriptors that can be obtained using 29 * Struct containing an array of descriptors that can be obtained using
22 * gpiod_get_array(). 30 * gpiod_get_array().
23 */ 31 */
24struct gpio_descs { 32struct gpio_descs {
33 struct gpio_array *info;
25 unsigned int ndescs; 34 unsigned int ndescs;
26 struct gpio_desc *desc[]; 35 struct gpio_desc *desc[];
27}; 36};
@@ -30,6 +39,7 @@ struct gpio_descs {
30#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1) 39#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
31#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2) 40#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
32#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3) 41#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
42#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
33 43
34/** 44/**
35 * Optional flags that can be passed to one of gpiod_* to configure direction 45 * Optional flags that can be passed to one of gpiod_* to configure direction
@@ -104,36 +114,46 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
104/* Value get/set from non-sleeping context */ 114/* Value get/set from non-sleeping context */
105int gpiod_get_value(const struct gpio_desc *desc); 115int gpiod_get_value(const struct gpio_desc *desc);
106int gpiod_get_array_value(unsigned int array_size, 116int gpiod_get_array_value(unsigned int array_size,
107 struct gpio_desc **desc_array, int *value_array); 117 struct gpio_desc **desc_array,
118 struct gpio_array *array_info,
119 unsigned long *value_bitmap);
108void gpiod_set_value(struct gpio_desc *desc, int value); 120void gpiod_set_value(struct gpio_desc *desc, int value);
109void gpiod_set_array_value(unsigned int array_size, 121int gpiod_set_array_value(unsigned int array_size,
110 struct gpio_desc **desc_array, int *value_array); 122 struct gpio_desc **desc_array,
123 struct gpio_array *array_info,
124 unsigned long *value_bitmap);
111int gpiod_get_raw_value(const struct gpio_desc *desc); 125int gpiod_get_raw_value(const struct gpio_desc *desc);
112int gpiod_get_raw_array_value(unsigned int array_size, 126int gpiod_get_raw_array_value(unsigned int array_size,
113 struct gpio_desc **desc_array, 127 struct gpio_desc **desc_array,
114 int *value_array); 128 struct gpio_array *array_info,
129 unsigned long *value_bitmap);
115void gpiod_set_raw_value(struct gpio_desc *desc, int value); 130void gpiod_set_raw_value(struct gpio_desc *desc, int value);
116int gpiod_set_raw_array_value(unsigned int array_size, 131int gpiod_set_raw_array_value(unsigned int array_size,
117 struct gpio_desc **desc_array, 132 struct gpio_desc **desc_array,
118 int *value_array); 133 struct gpio_array *array_info,
134 unsigned long *value_bitmap);
119 135
120/* Value get/set from sleeping context */ 136/* Value get/set from sleeping context */
121int gpiod_get_value_cansleep(const struct gpio_desc *desc); 137int gpiod_get_value_cansleep(const struct gpio_desc *desc);
122int gpiod_get_array_value_cansleep(unsigned int array_size, 138int gpiod_get_array_value_cansleep(unsigned int array_size,
123 struct gpio_desc **desc_array, 139 struct gpio_desc **desc_array,
124 int *value_array); 140 struct gpio_array *array_info,
141 unsigned long *value_bitmap);
125void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); 142void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
126void gpiod_set_array_value_cansleep(unsigned int array_size, 143int gpiod_set_array_value_cansleep(unsigned int array_size,
127 struct gpio_desc **desc_array, 144 struct gpio_desc **desc_array,
128 int *value_array); 145 struct gpio_array *array_info,
146 unsigned long *value_bitmap);
129int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); 147int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
130int gpiod_get_raw_array_value_cansleep(unsigned int array_size, 148int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
131 struct gpio_desc **desc_array, 149 struct gpio_desc **desc_array,
132 int *value_array); 150 struct gpio_array *array_info,
151 unsigned long *value_bitmap);
133void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); 152void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
134int gpiod_set_raw_array_value_cansleep(unsigned int array_size, 153int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
135 struct gpio_desc **desc_array, 154 struct gpio_desc **desc_array,
136 int *value_array); 155 struct gpio_array *array_info,
156 unsigned long *value_bitmap);
137 157
138int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); 158int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
139int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); 159int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
@@ -330,7 +350,8 @@ static inline int gpiod_get_value(const struct gpio_desc *desc)
330} 350}
331static inline int gpiod_get_array_value(unsigned int array_size, 351static inline int gpiod_get_array_value(unsigned int array_size,
332 struct gpio_desc **desc_array, 352 struct gpio_desc **desc_array,
333 int *value_array) 353 struct gpio_array *array_info,
354 unsigned long *value_bitmap)
334{ 355{
335 /* GPIO can never have been requested */ 356 /* GPIO can never have been requested */
336 WARN_ON(1); 357 WARN_ON(1);
@@ -341,12 +362,14 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
341 /* GPIO can never have been requested */ 362 /* GPIO can never have been requested */
342 WARN_ON(1); 363 WARN_ON(1);
343} 364}
344static inline void gpiod_set_array_value(unsigned int array_size, 365static inline int gpiod_set_array_value(unsigned int array_size,
345 struct gpio_desc **desc_array, 366 struct gpio_desc **desc_array,
346 int *value_array) 367 struct gpio_array *array_info,
368 unsigned long *value_bitmap)
347{ 369{
348 /* GPIO can never have been requested */ 370 /* GPIO can never have been requested */
349 WARN_ON(1); 371 WARN_ON(1);
372 return 0;
350} 373}
351static inline int gpiod_get_raw_value(const struct gpio_desc *desc) 374static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
352{ 375{
@@ -356,7 +379,8 @@ static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
356} 379}
357static inline int gpiod_get_raw_array_value(unsigned int array_size, 380static inline int gpiod_get_raw_array_value(unsigned int array_size,
358 struct gpio_desc **desc_array, 381 struct gpio_desc **desc_array,
359 int *value_array) 382 struct gpio_array *array_info,
383 unsigned long *value_bitmap)
360{ 384{
361 /* GPIO can never have been requested */ 385 /* GPIO can never have been requested */
362 WARN_ON(1); 386 WARN_ON(1);
@@ -368,8 +392,9 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
368 WARN_ON(1); 392 WARN_ON(1);
369} 393}
370static inline int gpiod_set_raw_array_value(unsigned int array_size, 394static inline int gpiod_set_raw_array_value(unsigned int array_size,
371 struct gpio_desc **desc_array, 395 struct gpio_desc **desc_array,
372 int *value_array) 396 struct gpio_array *array_info,
397 unsigned long *value_bitmap)
373{ 398{
374 /* GPIO can never have been requested */ 399 /* GPIO can never have been requested */
375 WARN_ON(1); 400 WARN_ON(1);
@@ -384,7 +409,8 @@ static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
384} 409}
385static inline int gpiod_get_array_value_cansleep(unsigned int array_size, 410static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
386 struct gpio_desc **desc_array, 411 struct gpio_desc **desc_array,
387 int *value_array) 412 struct gpio_array *array_info,
413 unsigned long *value_bitmap)
388{ 414{
389 /* GPIO can never have been requested */ 415 /* GPIO can never have been requested */
390 WARN_ON(1); 416 WARN_ON(1);
@@ -395,12 +421,14 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
395 /* GPIO can never have been requested */ 421 /* GPIO can never have been requested */
396 WARN_ON(1); 422 WARN_ON(1);
397} 423}
398static inline void gpiod_set_array_value_cansleep(unsigned int array_size, 424static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
399 struct gpio_desc **desc_array, 425 struct gpio_desc **desc_array,
400 int *value_array) 426 struct gpio_array *array_info,
427 unsigned long *value_bitmap)
401{ 428{
402 /* GPIO can never have been requested */ 429 /* GPIO can never have been requested */
403 WARN_ON(1); 430 WARN_ON(1);
431 return 0;
404} 432}
405static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) 433static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
406{ 434{
@@ -410,7 +438,8 @@ static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
410} 438}
411static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, 439static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
412 struct gpio_desc **desc_array, 440 struct gpio_desc **desc_array,
413 int *value_array) 441 struct gpio_array *array_info,
442 unsigned long *value_bitmap)
414{ 443{
415 /* GPIO can never have been requested */ 444 /* GPIO can never have been requested */
416 WARN_ON(1); 445 WARN_ON(1);
@@ -424,7 +453,8 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
424} 453}
425static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, 454static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
426 struct gpio_desc **desc_array, 455 struct gpio_desc **desc_array,
427 int *value_array) 456 struct gpio_array *array_info,
457 unsigned long *value_bitmap)
428{ 458{
429 /* GPIO can never have been requested */ 459 /* GPIO can never have been requested */
430 WARN_ON(1); 460 WARN_ON(1);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 0ea328e71ec9..2db62b550b95 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -66,9 +66,15 @@ struct gpio_irq_chip {
66 /** 66 /**
67 * @lock_key: 67 * @lock_key:
68 * 68 *
69 * Per GPIO IRQ chip lockdep classes. 69 * Per GPIO IRQ chip lockdep class for IRQ lock.
70 */ 70 */
71 struct lock_class_key *lock_key; 71 struct lock_class_key *lock_key;
72
73 /**
74 * @request_key:
75 *
76 * Per GPIO IRQ chip lockdep class for IRQ request.
77 */
72 struct lock_class_key *request_key; 78 struct lock_class_key *request_key;
73 79
74 /** 80 /**
@@ -95,6 +101,13 @@ struct gpio_irq_chip {
95 unsigned int num_parents; 101 unsigned int num_parents;
96 102
97 /** 103 /**
104 * @parent_irq:
105 *
106 * For use by gpiochip_set_cascaded_irqchip()
107 */
108 unsigned int parent_irq;
109
110 /**
98 * @parents: 111 * @parents:
99 * 112 *
100 * A list of interrupt parents of a GPIO chip. This is owned by the 113 * A list of interrupt parents of a GPIO chip. This is owned by the
@@ -138,6 +151,20 @@ struct gpio_irq_chip {
138 * will allocate and map all IRQs during initialization. 151 * will allocate and map all IRQs during initialization.
139 */ 152 */
140 unsigned int first; 153 unsigned int first;
154
155 /**
156 * @irq_enable:
157 *
158 * Store old irq_chip irq_enable callback
159 */
160 void (*irq_enable)(struct irq_data *data);
161
162 /**
163 * @irq_disable:
164 *
165 * Store old irq_chip irq_disable callback
166 */
167 void (*irq_disable)(struct irq_data *data);
141}; 168};
142 169
143static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip) 170static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
@@ -158,9 +185,13 @@ static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
158 * @free: optional hook for chip-specific deactivation, such as 185 * @free: optional hook for chip-specific deactivation, such as
159 * disabling module power and clock; may sleep 186 * disabling module power and clock; may sleep
160 * @get_direction: returns direction for signal "offset", 0=out, 1=in, 187 * @get_direction: returns direction for signal "offset", 0=out, 1=in,
161 * (same as GPIOF_DIR_XXX), or negative error 188 * (same as GPIOF_DIR_XXX), or negative error.
189 * It is recommended to always implement this function, even on
190 * input-only or output-only gpio chips.
162 * @direction_input: configures signal "offset" as input, or returns error 191 * @direction_input: configures signal "offset" as input, or returns error
192 * This can be omitted on input-only or output-only gpio chips.
163 * @direction_output: configures signal "offset" as output, or returns error 193 * @direction_output: configures signal "offset" as output, or returns error
194 * This can be omitted on input-only or output-only gpio chips.
164 * @get: returns value for signal "offset", 0=low, 1=high, or negative error 195 * @get: returns value for signal "offset", 0=low, 1=high, or negative error
165 * @get_multiple: reads values for multiple signals defined by "mask" and 196 * @get_multiple: reads values for multiple signals defined by "mask" and
166 * stores them in "bits", returns 0 on success or negative error 197 * stores them in "bits", returns 0 on success or negative error
@@ -256,6 +287,9 @@ struct gpio_chip {
256 287
257 void (*dbg_show)(struct seq_file *s, 288 void (*dbg_show)(struct seq_file *s,
258 struct gpio_chip *chip); 289 struct gpio_chip *chip);
290
291 int (*init_valid_mask)(struct gpio_chip *chip);
292
259 int base; 293 int base;
260 u16 ngpio; 294 u16 ngpio;
261 const char *const *names; 295 const char *const *names;
@@ -294,7 +328,9 @@ struct gpio_chip {
294 /** 328 /**
295 * @need_valid_mask: 329 * @need_valid_mask:
296 * 330 *
297 * If set core allocates @valid_mask with all bits set to one. 331 * If set core allocates @valid_mask with all its values initialized
332 * with init_valid_mask() or set to one if init_valid_mask() is not
333 * defined
298 */ 334 */
299 bool need_valid_mask; 335 bool need_valid_mask;
300 336
@@ -395,6 +431,10 @@ extern struct gpio_chip *gpiochip_find(void *data,
395int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); 431int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
396void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); 432void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
397bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset); 433bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
434int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset);
435void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset);
436void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset);
437void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset);
398 438
399/* Line status inquiry for drivers */ 439/* Line status inquiry for drivers */
400bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset); 440bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index d271ff23984f..4f3febc0f971 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -101,8 +101,8 @@ enum hdmi_extended_colorimetry {
101 HDMI_EXTENDED_COLORIMETRY_XV_YCC_601, 101 HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
102 HDMI_EXTENDED_COLORIMETRY_XV_YCC_709, 102 HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
103 HDMI_EXTENDED_COLORIMETRY_S_YCC_601, 103 HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
104 HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601, 104 HDMI_EXTENDED_COLORIMETRY_OPYCC_601,
105 HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB, 105 HDMI_EXTENDED_COLORIMETRY_OPRGB,
106 106
107 /* The following EC values are only defined in CEA-861-F. */ 107 /* The following EC values are only defined in CEA-861-F. */
108 HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM, 108 HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 834e6461a690..2827b87590d8 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -526,6 +526,7 @@ struct hid_input {
526 const char *name; 526 const char *name;
527 bool registered; 527 bool registered;
528 struct list_head reports; /* the list of reports */ 528 struct list_head reports; /* the list of reports */
529 unsigned int application; /* application usage for this input */
529}; 530};
530 531
531enum hid_type { 532enum hid_type {
@@ -1138,6 +1139,34 @@ static inline u32 hid_report_len(struct hid_report *report)
1138int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, 1139int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
1139 int interrupt); 1140 int interrupt);
1140 1141
1142
1143/**
1144 * struct hid_scroll_counter - Utility class for processing high-resolution
1145 * scroll events.
1146 * @dev: the input device for which events should be reported.
1147 * @microns_per_hi_res_unit: the amount moved by the user's finger for each
1148 * high-resolution unit reported by the mouse, in
1149 * microns.
1150 * @resolution_multiplier: the wheel's resolution in high-resolution mode as a
1151 * multiple of its lower resolution. For example, if
1152 * moving the wheel by one "notch" would result in a
1153 * value of 1 in low-resolution mode but 8 in
1154 * high-resolution, the multiplier is 8.
1155 * @remainder: counts the number of high-resolution units moved since the last
1156 * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
1157 * only be used by class methods.
1158 */
1159struct hid_scroll_counter {
1160 struct input_dev *dev;
1161 int microns_per_hi_res_unit;
1162 int resolution_multiplier;
1163
1164 int remainder;
1165};
1166
1167void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
1168 int hi_res_value);
1169
1141/* HID quirks API */ 1170/* HID quirks API */
1142unsigned long hid_lookup_quirk(const struct hid_device *hdev); 1171unsigned long hid_lookup_quirk(const struct hid_device *hdev);
1143int hid_quirks_init(char **quirks_param, __u16 bus, int count); 1172int hid_quirks_init(char **quirks_param, __u16 bus, int count);
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 4c92e3ba3e16..dde947083d4e 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -107,7 +107,7 @@ enum hmm_pfn_flag_e {
107 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory 107 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
108 * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() 108 * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
109 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the 109 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
110 * result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not 110 * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
111 * be mirrored by a device, because the entry will never have HMM_PFN_VALID 111 * be mirrored by a device, because the entry will never have HMM_PFN_VALID
112 * set and the pfn value is undefined. 112 * set and the pfn value is undefined.
113 * 113 *
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 99c19b06d9a4..4663ee96cf59 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -43,7 +43,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
43 unsigned char *vec); 43 unsigned char *vec);
44extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 44extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
45 unsigned long new_addr, unsigned long old_end, 45 unsigned long new_addr, unsigned long old_end,
46 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); 46 pmd_t *old_pmd, pmd_t *new_pmd);
47extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 47extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
48 unsigned long addr, pgprot_t newprot, 48 unsigned long addr, pgprot_t newprot,
49 int prot_numa); 49 int prot_numa);
@@ -213,9 +213,9 @@ static inline int hpage_nr_pages(struct page *page)
213} 213}
214 214
215struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 215struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
216 pmd_t *pmd, int flags); 216 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
217struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 217struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
218 pud_t *pud, int flags); 218 pud_t *pud, int flags, struct dev_pagemap **pgmap);
219 219
220extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); 220extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
221 221
@@ -344,13 +344,13 @@ static inline void mm_put_huge_zero_page(struct mm_struct *mm)
344} 344}
345 345
346static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 346static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
347 unsigned long addr, pmd_t *pmd, int flags) 347 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
348{ 348{
349 return NULL; 349 return NULL;
350} 350}
351 351
352static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, 352static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
353 unsigned long addr, pud_t *pud, int flags) 353 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
354{ 354{
355 return NULL; 355 return NULL;
356} 356}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6b68e345f0ca..087fd5f48c91 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
140pte_t *huge_pte_offset(struct mm_struct *mm, 140pte_t *huge_pte_offset(struct mm_struct *mm,
141 unsigned long addr, unsigned long sz); 141 unsigned long addr, unsigned long sz);
142int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); 142int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
143void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
144 unsigned long *start, unsigned long *end);
143struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 145struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
144 int write); 146 int write);
145struct page *follow_huge_pd(struct vm_area_struct *vma, 147struct page *follow_huge_pd(struct vm_area_struct *vma,
@@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void)
170 return 0; 172 return 0;
171} 173}
172 174
175static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
176 pte_t *ptep)
177{
178 return 0;
179}
180
181static inline void adjust_range_if_pmd_sharing_possible(
182 struct vm_area_struct *vma,
183 unsigned long *start, unsigned long *end)
184{
185}
186
173#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) 187#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
174#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) 188#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
175#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) 189#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index bee0827766a3..c0b93e0ff0c0 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -33,7 +33,8 @@
33 * and max is a multiple of 4 and >= 32 bytes. 33 * and max is a multiple of 4 and >= 32 bytes.
34 * @priv: Private data, for use by the RNG driver. 34 * @priv: Private data, for use by the RNG driver.
35 * @quality: Estimation of true entropy in RNG's bitstream 35 * @quality: Estimation of true entropy in RNG's bitstream
36 * (per mill). 36 * (in bits of entropy per 1024 bits of input;
37 * valid values: 1 to 1024, or 0 for unknown).
37 */ 38 */
38struct hwrng { 39struct hwrng {
39 const char *name; 40 const char *name;
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 9493d4a388db..99e0c1b0b5fb 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -118,6 +118,7 @@ enum hwmon_in_attributes {
118 hwmon_in_max_alarm, 118 hwmon_in_max_alarm,
119 hwmon_in_lcrit_alarm, 119 hwmon_in_lcrit_alarm,
120 hwmon_in_crit_alarm, 120 hwmon_in_crit_alarm,
121 hwmon_in_enable,
121}; 122};
122 123
123#define HWMON_I_INPUT BIT(hwmon_in_input) 124#define HWMON_I_INPUT BIT(hwmon_in_input)
@@ -135,6 +136,7 @@ enum hwmon_in_attributes {
135#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm) 136#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
136#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm) 137#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
137#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm) 138#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
139#define HWMON_I_ENABLE BIT(hwmon_in_enable)
138 140
139enum hwmon_curr_attributes { 141enum hwmon_curr_attributes {
140 hwmon_curr_input, 142 hwmon_curr_input,
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index efda23cf32c7..b3e24368930a 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -739,8 +739,9 @@ struct vmbus_channel {
739 u32 ringbuffer_gpadlhandle; 739 u32 ringbuffer_gpadlhandle;
740 740
741 /* Allocated memory for ring buffer */ 741 /* Allocated memory for ring buffer */
742 void *ringbuffer_pages; 742 struct page *ringbuffer_page;
743 u32 ringbuffer_pagecount; 743 u32 ringbuffer_pagecount;
744 u32 ringbuffer_send_offset;
744 struct hv_ring_buffer_info outbound; /* send to parent */ 745 struct hv_ring_buffer_info outbound; /* send to parent */
745 struct hv_ring_buffer_info inbound; /* receive from parent */ 746 struct hv_ring_buffer_info inbound; /* receive from parent */
746 747
@@ -1021,6 +1022,14 @@ struct vmbus_packet_mpb_array {
1021 struct hv_mpb_array range; 1022 struct hv_mpb_array range;
1022} __packed; 1023} __packed;
1023 1024
1025int vmbus_alloc_ring(struct vmbus_channel *channel,
1026 u32 send_size, u32 recv_size);
1027void vmbus_free_ring(struct vmbus_channel *channel);
1028
1029int vmbus_connect_ring(struct vmbus_channel *channel,
1030 void (*onchannel_callback)(void *context),
1031 void *context);
1032int vmbus_disconnect_ring(struct vmbus_channel *channel);
1024 1033
1025extern int vmbus_open(struct vmbus_channel *channel, 1034extern int vmbus_open(struct vmbus_channel *channel,
1026 u32 send_ringbuffersize, 1035 u32 send_ringbuffersize,
@@ -1125,6 +1134,7 @@ struct hv_device {
1125 u16 device_id; 1134 u16 device_id;
1126 1135
1127 struct device device; 1136 struct device device;
1137 char *driver_override; /* Driver name to force a match */
1128 1138
1129 struct vmbus_channel *channel; 1139 struct vmbus_channel *channel;
1130 struct kset *channels_kset; 1140 struct kset *channels_kset;
@@ -1442,7 +1452,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1442 const int *srv_version, int srv_vercnt, 1452 const int *srv_version, int srv_vercnt,
1443 int *nego_fw_version, int *nego_srv_version); 1453 int *nego_fw_version, int *nego_srv_version);
1444 1454
1445void hv_process_channel_removal(u32 relid); 1455void hv_process_channel_removal(struct vmbus_channel *channel);
1446 1456
1447void vmbus_setevent(struct vmbus_channel *channel); 1457void vmbus_setevent(struct vmbus_channel *channel);
1448/* 1458/*
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 3ec8628ce17f..60daf34b625d 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -214,8 +214,7 @@ static inline void idr_preload_end(void)
214 ++id, (entry) = idr_get_next((idr), &(id))) 214 ++id, (entry) = idr_get_next((idr), &(id)))
215 215
216/* 216/*
217 * IDA - IDR based id allocator, use when translation from id to 217 * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
218 * pointer isn't necessary.
219 */ 218 */
220#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ 219#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
221#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) 220#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
@@ -225,14 +224,14 @@ struct ida_bitmap {
225 unsigned long bitmap[IDA_BITMAP_LONGS]; 224 unsigned long bitmap[IDA_BITMAP_LONGS];
226}; 225};
227 226
228DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
229
230struct ida { 227struct ida {
231 struct radix_tree_root ida_rt; 228 struct xarray xa;
232}; 229};
233 230
231#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
232
234#define IDA_INIT(name) { \ 233#define IDA_INIT(name) { \
235 .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \ 234 .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
236} 235}
237#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) 236#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
238 237
@@ -292,7 +291,7 @@ static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
292 291
293static inline void ida_init(struct ida *ida) 292static inline void ida_init(struct ida *ida)
294{ 293{
295 INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); 294 xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
296} 295}
297 296
298#define ida_simple_get(ida, start, end, gfp) \ 297#define ida_simple_get(ida, start, end, gfp) \
@@ -301,9 +300,6 @@ static inline void ida_init(struct ida *ida)
301 300
302static inline bool ida_is_empty(const struct ida *ida) 301static inline bool ida_is_empty(const struct ida *ida)
303{ 302{
304 return radix_tree_empty(&ida->ida_rt); 303 return xa_empty(&ida->xa);
305} 304}
306
307/* in lib/radix-tree.c */
308int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
309#endif /* __IDR_H__ */ 305#endif /* __IDR_H__ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 9c03a7d5e400..0ef67f837ae1 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1460,13 +1460,16 @@ struct ieee80211_ht_operation {
1460 * STA can receive. Rate expressed in units of 1 Mbps. 1460 * STA can receive. Rate expressed in units of 1 Mbps.
1461 * If this field is 0 this value should not be used to 1461 * If this field is 0 this value should not be used to
1462 * consider the highest RX data rate supported. 1462 * consider the highest RX data rate supported.
1463 * The top 3 bits of this field are reserved. 1463 * The top 3 bits of this field indicate the Maximum NSTS,total
1464 * (a beamformee capability.)
1464 * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams 1465 * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
1465 * @tx_highest: Indicates highest long GI VHT PPDU data rate 1466 * @tx_highest: Indicates highest long GI VHT PPDU data rate
1466 * STA can transmit. Rate expressed in units of 1 Mbps. 1467 * STA can transmit. Rate expressed in units of 1 Mbps.
1467 * If this field is 0 this value should not be used to 1468 * If this field is 0 this value should not be used to
1468 * consider the highest TX data rate supported. 1469 * consider the highest TX data rate supported.
1469 * The top 3 bits of this field are reserved. 1470 * The top 2 bits of this field are reserved, the
1471 * 3rd bit from the top indiciates VHT Extended NSS BW
1472 * Capability.
1470 */ 1473 */
1471struct ieee80211_vht_mcs_info { 1474struct ieee80211_vht_mcs_info {
1472 __le16 rx_mcs_map; 1475 __le16 rx_mcs_map;
@@ -1475,6 +1478,13 @@ struct ieee80211_vht_mcs_info {
1475 __le16 tx_highest; 1478 __le16 tx_highest;
1476} __packed; 1479} __packed;
1477 1480
1481/* for rx_highest */
1482#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13
1483#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT)
1484
1485/* for tx_highest */
1486#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13)
1487
1478/** 1488/**
1479 * enum ieee80211_vht_mcs_support - VHT MCS support definitions 1489 * enum ieee80211_vht_mcs_support - VHT MCS support definitions
1480 * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the 1490 * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
@@ -1545,11 +1555,11 @@ struct ieee80211_vht_operation {
1545 * struct ieee80211_he_cap_elem - HE capabilities element 1555 * struct ieee80211_he_cap_elem - HE capabilities element
1546 * 1556 *
1547 * This structure is the "HE capabilities element" fixed fields as 1557 * This structure is the "HE capabilities element" fixed fields as
1548 * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3 1558 * described in P802.11ax_D3.0 section 9.4.2.237.2 and 9.4.2.237.3
1549 */ 1559 */
1550struct ieee80211_he_cap_elem { 1560struct ieee80211_he_cap_elem {
1551 u8 mac_cap_info[5]; 1561 u8 mac_cap_info[6];
1552 u8 phy_cap_info[9]; 1562 u8 phy_cap_info[11];
1553} __packed; 1563} __packed;
1554 1564
1555#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5 1565#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
@@ -1650,6 +1660,7 @@ struct ieee80211_mu_edca_param_set {
1650#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004 1660#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
1651#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008 1661#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
1652#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C 1662#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
1663#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2
1653#define IEEE80211_VHT_CAP_RXLDPC 0x00000010 1664#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
1654#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020 1665#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
1655#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040 1666#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
@@ -1659,6 +1670,7 @@ struct ieee80211_mu_edca_param_set {
1659#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300 1670#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
1660#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400 1671#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
1661#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700 1672#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
1673#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
1662#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 1674#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
1663#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 1675#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
1664#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13 1676#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
@@ -1678,6 +1690,26 @@ struct ieee80211_mu_edca_param_set {
1678#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000 1690#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
1679#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 1691#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
1680#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 1692#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
1693#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30
1694#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000
1695
1696/**
1697 * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS
1698 * @cap: VHT capabilities of the peer
1699 * @bw: bandwidth to use
1700 * @mcs: MCS index to use
1701 * @ext_nss_bw_capable: indicates whether or not the local transmitter
1702 * (rate scaling algorithm) can deal with the new logic
1703 * (dot11VHTExtendedNSSBWCapable)
1704 *
1705 * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
1706 * vary for a given BW/MCS. This function parses the data.
1707 *
1708 * Note: This function is exported by cfg80211.
1709 */
1710int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
1711 enum ieee80211_vht_chanwidth bw,
1712 int mcs, bool ext_nss_bw_capable);
1681 1713
1682/* 802.11ax HE MAC capabilities */ 1714/* 802.11ax HE MAC capabilities */
1683#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 1715#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
@@ -1707,15 +1739,15 @@ struct ieee80211_mu_edca_param_set {
1707#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04 1739#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
1708#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08 1740#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
1709#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c 1741#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
1710#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00 1742#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00
1711#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10 1743#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10
1712#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20 1744#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20
1713#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30 1745#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30
1714#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40 1746#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40
1715#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50 1747#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50
1716#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60 1748#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60
1717#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70 1749#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70
1718#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70 1750#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70
1719 1751
1720/* Link adaptation is split between byte HE_MAC_CAP1 and 1752/* Link adaptation is split between byte HE_MAC_CAP1 and
1721 * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE 1753 * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
@@ -1729,14 +1761,13 @@ struct ieee80211_mu_edca_param_set {
1729 1761
1730#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01 1762#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
1731#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02 1763#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
1732#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04 1764#define IEEE80211_HE_MAC_CAP2_TRS 0x04
1733#define IEEE80211_HE_MAC_CAP2_BSR 0x08 1765#define IEEE80211_HE_MAC_CAP2_BSR 0x08
1734#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10 1766#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
1735#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20 1767#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
1736#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40 1768#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
1737#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80 1769#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
1738 1770
1739#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01
1740#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02 1771#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
1741#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04 1772#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
1742 1773
@@ -1744,25 +1775,34 @@ struct ieee80211_mu_edca_param_set {
1744 * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the 1775 * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
1745 * same field in the HE capabilities. 1776 * same field in the HE capabilities.
1746 */ 1777 */
1747#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00 1778#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT 0x00
1748#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08 1779#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1 0x08
1749#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10 1780#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2 0x10
1750#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18 1781#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED 0x18
1751#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18 1782#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
1752#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20 1783#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
1753#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 1784#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
1754#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 1785#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
1755 1786
1756#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 1787#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
1757#define IEEE80211_HE_MAC_CAP4_QTP 0x02 1788#define IEEE80211_HE_MAC_CAP4_QTP 0x02
1758#define IEEE80211_HE_MAC_CAP4_BQR 0x04 1789#define IEEE80211_HE_MAC_CAP4_BQR 0x04
1759#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08 1790#define IEEE80211_HE_MAC_CAP4_SRP_RESP 0x08
1760#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10 1791#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
1761#define IEEE80211_HE_MAC_CAP4_OPS 0x20 1792#define IEEE80211_HE_MAC_CAP4_OPS 0x20
1762#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40 1793#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
1794/* Multi TID agg TX is split between byte #4 and #5
1795 * The value is a combination of B39,B40,B41
1796 */
1797#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80
1798
1799#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
1800#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
1801#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04
1802#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
1803#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
1763 1804
1764/* 802.11ax HE PHY capabilities */ 1805/* 802.11ax HE PHY capabilities */
1765#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01
1766#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 1806#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
1767#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 1807#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
1768#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08 1808#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
@@ -1779,10 +1819,10 @@ struct ieee80211_mu_edca_param_set {
1779#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10 1819#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
1780#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20 1820#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
1781#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40 1821#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
1782/* Midamble RX Max NSTS is split between byte #2 and byte #3 */ 1822/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */
1783#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80 1823#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
1784 1824
1785#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01 1825#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
1786#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02 1826#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
1787#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04 1827#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
1788#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08 1828#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
@@ -1883,7 +1923,19 @@ struct ieee80211_mu_edca_param_set {
1883#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04 1923#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
1884#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08 1924#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
1885#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10 1925#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
1886#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20 1926#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
1927#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ 0x00
1928#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ 0x40
1929#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ 0x80
1930#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ 0xc0
1931#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK 0xc0
1932
1933#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
1934#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
1935#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04
1936#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
1937#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
1938#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
1887 1939
1888/* 802.11ax HE TX/RX MCS NSS Support */ 1940/* 802.11ax HE TX/RX MCS NSS Support */
1889#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) 1941#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
@@ -1963,8 +2015,8 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
1963#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200 2015#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
1964#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00 2016#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
1965#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10 2017#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
1966#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000 2018#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x00100000
1967#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000 2019#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00200000
1968#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000 2020#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
1969#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000 2021#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
1970#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000 2022#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 3d2996dc7d85..12e3eebf0ce6 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -16,9 +16,23 @@
16#define __IF_TUN_H 16#define __IF_TUN_H
17 17
18#include <uapi/linux/if_tun.h> 18#include <uapi/linux/if_tun.h>
19#include <uapi/linux/virtio_net.h>
19 20
20#define TUN_XDP_FLAG 0x1UL 21#define TUN_XDP_FLAG 0x1UL
21 22
23#define TUN_MSG_UBUF 1
24#define TUN_MSG_PTR 2
25struct tun_msg_ctl {
26 unsigned short type;
27 unsigned short num;
28 void *ptr;
29};
30
31struct tun_xdp_hdr {
32 int buflen;
33 struct virtio_net_hdr gso;
34};
35
22#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) 36#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
23struct socket *tun_get_socket(struct file *); 37struct socket *tun_get_socket(struct file *);
24struct ptr_ring *tun_get_tx_ring(struct file *file); 38struct ptr_ring *tun_get_tx_ring(struct file *file);
diff --git a/include/linux/init.h b/include/linux/init.h
index 2538d176dd1f..9c2aba1dbabf 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -133,7 +133,6 @@ static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
133#endif 133#endif
134 134
135extern initcall_entry_t __con_initcall_start[], __con_initcall_end[]; 135extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
136extern initcall_entry_t __security_initcall_start[], __security_initcall_end[];
137 136
138/* Used for contructor calls. */ 137/* Used for contructor calls. */
139typedef void (*ctor_fn_t)(void); 138typedef void (*ctor_fn_t)(void);
@@ -236,7 +235,6 @@ extern bool initcall_debug;
236 static exitcall_t __exitcall_##fn __exit_call = fn 235 static exitcall_t __exitcall_##fn __exit_call = fn
237 236
238#define console_initcall(fn) ___define_initcall(fn,, .con_initcall) 237#define console_initcall(fn) ___define_initcall(fn,, .con_initcall)
239#define security_initcall(fn) ___define_initcall(fn,, .security_initcall)
240 238
241struct obs_kernel_param { 239struct obs_kernel_param {
242 const char *str; 240 const char *str;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 28004d74ae04..b0ae25837361 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -72,6 +72,42 @@
72#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ 72#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
73#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ 73#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
74#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ 74#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
75#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
76#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
77#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
78#define DMAR_MTRR_FIX16K_80000_REG 0x128
79#define DMAR_MTRR_FIX16K_A0000_REG 0x130
80#define DMAR_MTRR_FIX4K_C0000_REG 0x138
81#define DMAR_MTRR_FIX4K_C8000_REG 0x140
82#define DMAR_MTRR_FIX4K_D0000_REG 0x148
83#define DMAR_MTRR_FIX4K_D8000_REG 0x150
84#define DMAR_MTRR_FIX4K_E0000_REG 0x158
85#define DMAR_MTRR_FIX4K_E8000_REG 0x160
86#define DMAR_MTRR_FIX4K_F0000_REG 0x168
87#define DMAR_MTRR_FIX4K_F8000_REG 0x170
88#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
89#define DMAR_MTRR_PHYSMASK0_REG 0x188
90#define DMAR_MTRR_PHYSBASE1_REG 0x190
91#define DMAR_MTRR_PHYSMASK1_REG 0x198
92#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
93#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
94#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
95#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
96#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
97#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
98#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
99#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
100#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
101#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
102#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
103#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
104#define DMAR_MTRR_PHYSBASE8_REG 0x200
105#define DMAR_MTRR_PHYSMASK8_REG 0x208
106#define DMAR_MTRR_PHYSBASE9_REG 0x210
107#define DMAR_MTRR_PHYSMASK9_REG 0x218
108#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
109#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
110#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
75 111
76#define OFFSET_STRIDE (9) 112#define OFFSET_STRIDE (9)
77 113
@@ -389,6 +425,33 @@ struct pasid_entry;
389struct pasid_state_entry; 425struct pasid_state_entry;
390struct page_req_dsc; 426struct page_req_dsc;
391 427
428/*
429 * 0: Present
430 * 1-11: Reserved
431 * 12-63: Context Ptr (12 - (haw-1))
432 * 64-127: Reserved
433 */
434struct root_entry {
435 u64 lo;
436 u64 hi;
437};
438
439/*
440 * low 64 bits:
441 * 0: present
442 * 1: fault processing disable
443 * 2-3: translation type
444 * 12-63: address space root
445 * high 64 bits:
446 * 0-2: address width
447 * 3-6: aval
448 * 8-23: domain id
449 */
450struct context_entry {
451 u64 lo;
452 u64 hi;
453};
454
392struct dmar_domain { 455struct dmar_domain {
393 int nid; /* node id */ 456 int nid; /* node id */
394 457
@@ -558,6 +621,15 @@ extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_
558extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); 621extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
559#endif 622#endif
560 623
624#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
625void intel_iommu_debugfs_init(void);
626#else
627static inline void intel_iommu_debugfs_init(void) {}
628#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
629
561extern const struct attribute_group *intel_iommu_groups[]; 630extern const struct attribute_group *intel_iommu_groups[];
631bool context_present(struct context_entry *context);
632struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
633 u8 devfn, int alloc);
562 634
563#endif 635#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index eeceac3376fc..1d6711c28271 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -45,7 +45,7 @@
45 * IRQF_PERCPU - Interrupt is per cpu 45 * IRQF_PERCPU - Interrupt is per cpu
46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing 46 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is 47 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48 * registered first in an shared interrupt is considered for 48 * registered first in a shared interrupt is considered for
49 * performance reasons) 49 * performance reasons)
50 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. 50 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
51 * Used by threaded interrupts which need to keep the 51 * Used by threaded interrupts which need to keep the
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 3555d54bf79a..9a4258154b25 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -6,6 +6,7 @@
6#include <linux/bitmap.h> 6#include <linux/bitmap.h>
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/mm_types.h>
9 10
10struct address_space; 11struct address_space;
11struct fiemap_extent_info; 12struct fiemap_extent_info;
@@ -141,7 +142,8 @@ int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
141 bool *did_zero, const struct iomap_ops *ops); 142 bool *did_zero, const struct iomap_ops *ops);
142int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 143int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
143 const struct iomap_ops *ops); 144 const struct iomap_ops *ops);
144int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); 145vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
146 const struct iomap_ops *ops);
145int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 147int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
146 loff_t start, loff_t len, const struct iomap_ops *ops); 148 loff_t start, loff_t len, const struct iomap_ops *ops);
147loff_t iomap_seek_hole(struct inode *inode, loff_t offset, 149loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 87994c265bf5..a1d28f42cb77 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -124,6 +124,7 @@ enum iommu_attr {
124 DOMAIN_ATTR_FSL_PAMU_ENABLE, 124 DOMAIN_ATTR_FSL_PAMU_ENABLE,
125 DOMAIN_ATTR_FSL_PAMUV1, 125 DOMAIN_ATTR_FSL_PAMUV1,
126 DOMAIN_ATTR_NESTING, /* two stages of translation */ 126 DOMAIN_ATTR_NESTING, /* two stages of translation */
127 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
127 DOMAIN_ATTR_MAX, 128 DOMAIN_ATTR_MAX,
128}; 129};
129 130
@@ -181,8 +182,6 @@ struct iommu_resv_region {
181 * @apply_resv_region: Temporary helper call-back for iova reserved ranges 182 * @apply_resv_region: Temporary helper call-back for iova reserved ranges
182 * @domain_window_enable: Configure and enable a particular window for a domain 183 * @domain_window_enable: Configure and enable a particular window for a domain
183 * @domain_window_disable: Disable a particular window for a domain 184 * @domain_window_disable: Disable a particular window for a domain
184 * @domain_set_windows: Set the number of windows for a domain
185 * @domain_get_windows: Return the number of windows for a domain
186 * @of_xlate: add OF master IDs to iommu grouping 185 * @of_xlate: add OF master IDs to iommu grouping
187 * @pgsize_bitmap: bitmap of all possible supported page sizes 186 * @pgsize_bitmap: bitmap of all possible supported page sizes
188 */ 187 */
@@ -223,10 +222,6 @@ struct iommu_ops {
223 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, 222 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
224 phys_addr_t paddr, u64 size, int prot); 223 phys_addr_t paddr, u64 size, int prot);
225 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); 224 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
226 /* Set the number of windows per domain */
227 int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
228 /* Get the number of windows per domain */
229 u32 (*domain_get_windows)(struct iommu_domain *domain);
230 225
231 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 226 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
232 bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); 227 bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
@@ -293,6 +288,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
293extern void iommu_detach_device(struct iommu_domain *domain, 288extern void iommu_detach_device(struct iommu_domain *domain,
294 struct device *dev); 289 struct device *dev);
295extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 290extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
291extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
296extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 292extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
297 phys_addr_t paddr, size_t size, int prot); 293 phys_addr_t paddr, size_t size, int prot);
298extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 294extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -377,6 +373,8 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain)
377extern struct iommu_group *pci_device_group(struct device *dev); 373extern struct iommu_group *pci_device_group(struct device *dev);
378/* Generic device grouping function */ 374/* Generic device grouping function */
379extern struct iommu_group *generic_device_group(struct device *dev); 375extern struct iommu_group *generic_device_group(struct device *dev);
376/* FSL-MC device grouping function */
377struct iommu_group *fsl_mc_device_group(struct device *dev);
380 378
381/** 379/**
382 * struct iommu_fwspec - per-device IOMMU instance data 380 * struct iommu_fwspec - per-device IOMMU instance data
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 928442dda565..0b93bf96693e 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -75,6 +75,7 @@ struct iova_domain {
75 unsigned long granule; /* pfn granularity for this domain */ 75 unsigned long granule; /* pfn granularity for this domain */
76 unsigned long start_pfn; /* Lower limit for this domain */ 76 unsigned long start_pfn; /* Lower limit for this domain */
77 unsigned long dma_32bit_pfn; 77 unsigned long dma_32bit_pfn;
78 unsigned long max32_alloc_size; /* Size of last failed allocation */
78 struct iova anchor; /* rbtree lookup anchor */ 79 struct iova anchor; /* rbtree lookup anchor */
79 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ 80 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
80 81
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 41f5c086f670..ef61676cfe05 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -27,7 +27,7 @@ struct device;
27 * Opaque type for a IPMI message user. One of these is needed to 27 * Opaque type for a IPMI message user. One of these is needed to
28 * send and receive messages. 28 * send and receive messages.
29 */ 29 */
30typedef struct ipmi_user *ipmi_user_t; 30struct ipmi_user;
31 31
32/* 32/*
33 * Stuff coming from the receive interface comes as one of these. 33 * Stuff coming from the receive interface comes as one of these.
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 7d5fd38d5282..8c4e2ab696c3 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -28,7 +28,7 @@ struct device;
28 */ 28 */
29 29
30/* Structure for the low-level drivers. */ 30/* Structure for the low-level drivers. */
31typedef struct ipmi_smi *ipmi_smi_t; 31struct ipmi_smi;
32 32
33/* 33/*
34 * Messages to/from the lower layer. The smi interface will take one 34 * Messages to/from the lower layer. The smi interface will take one
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 8415bf1a9776..495e834c1367 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -274,7 +274,8 @@ struct ipv6_pinfo {
274 */ 274 */
275 dontfrag:1, 275 dontfrag:1,
276 autoflowlabel:1, 276 autoflowlabel:1,
277 autoflowlabel_set:1; 277 autoflowlabel_set:1,
278 mc_all:1;
278 __u8 min_hopcount; 279 __u8 min_hopcount;
279 __u8 tclass; 280 __u8 tclass;
280 __be32 rcv_flowinfo; 281 __be32 rcv_flowinfo;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 201de12a9957..c9bffda04a45 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
1151void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); 1151void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
1152int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); 1152int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
1153void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); 1153void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
1154int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu); 1154int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
1155 unsigned int *mapped_cpu);
1155void irq_matrix_reserve(struct irq_matrix *m); 1156void irq_matrix_reserve(struct irq_matrix *m);
1156void irq_matrix_remove_reserved(struct irq_matrix *m); 1157void irq_matrix_remove_reserved(struct irq_matrix *m);
1157int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, 1158int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index 0a83b4379f34..9a1a479a2bf4 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -13,6 +13,12 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/ioport.h> 14#include <linux/ioport.h>
15 15
16#define GICD_INT_DEF_PRI 0xa0
17#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
18 (GICD_INT_DEF_PRI << 16) |\
19 (GICD_INT_DEF_PRI << 8) |\
20 GICD_INT_DEF_PRI)
21
16enum gic_type { 22enum gic_type {
17 GIC_V2, 23 GIC_V2,
18 GIC_V3, 24 GIC_V3,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 8bdbb5f29494..071b4cbdf010 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -357,6 +357,8 @@
357#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) 357#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt)
358#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) 358#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb)
359 359
360#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12))
361
360#define GITS_BASER_NR_REGS 8 362#define GITS_BASER_NR_REGS 8
361 363
362#define GITS_BASER_VALID (1ULL << 63) 364#define GITS_BASER_VALID (1ULL << 63)
@@ -388,6 +390,9 @@
388#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) 390#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
389#define GITS_BASER_PHYS_52_to_48(phys) \ 391#define GITS_BASER_PHYS_52_to_48(phys) \
390 (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) 392 (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
393#define GITS_BASER_ADDR_48_to_52(baser) \
394 (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48)
395
391#define GITS_BASER_SHAREABILITY_SHIFT (10) 396#define GITS_BASER_SHAREABILITY_SHIFT (10)
392#define GITS_BASER_InnerShareable \ 397#define GITS_BASER_InnerShareable \
393 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) 398 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
@@ -585,8 +590,10 @@ struct rdists {
585 void __iomem *rd_base; 590 void __iomem *rd_base;
586 struct page *pend_page; 591 struct page *pend_page;
587 phys_addr_t phys_base; 592 phys_addr_t phys_base;
593 bool lpi_enabled;
588 } __percpu *rdist; 594 } __percpu *rdist;
589 struct page *prop_page; 595 phys_addr_t prop_table_pa;
596 void *prop_table_va;
590 u64 flags; 597 u64 flags;
591 u32 gicd_typer; 598 u32 gicd_typer;
592 bool has_vlpis; 599 bool has_vlpis;
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 6c4aaf04046c..626179077bb0 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -65,11 +65,6 @@
65#define GICD_INT_EN_CLR_X32 0xffffffff 65#define GICD_INT_EN_CLR_X32 0xffffffff
66#define GICD_INT_EN_SET_SGI 0x0000ffff 66#define GICD_INT_EN_SET_SGI 0x0000ffff
67#define GICD_INT_EN_CLR_PPI 0xffff0000 67#define GICD_INT_EN_CLR_PPI 0xffff0000
68#define GICD_INT_DEF_PRI 0xa0
69#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
70 (GICD_INT_DEF_PRI << 16) |\
71 (GICD_INT_DEF_PRI << 8) |\
72 GICD_INT_DEF_PRI)
73 68
74#define GICD_IIDR_IMPLEMENTER_SHIFT 0 69#define GICD_IIDR_IMPLEMENTER_SHIFT 0
75#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) 70#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index dccfa65aee96..068aa46f0d55 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -75,6 +75,7 @@ struct irq_fwspec {
75enum irq_domain_bus_token { 75enum irq_domain_bus_token {
76 DOMAIN_BUS_ANY = 0, 76 DOMAIN_BUS_ANY = 0,
77 DOMAIN_BUS_WIRED, 77 DOMAIN_BUS_WIRED,
78 DOMAIN_BUS_GENERIC_MSI,
78 DOMAIN_BUS_PCI_MSI, 79 DOMAIN_BUS_PCI_MSI,
79 DOMAIN_BUS_PLATFORM_MSI, 80 DOMAIN_BUS_PLATFORM_MSI,
80 DOMAIN_BUS_NEXUS, 81 DOMAIN_BUS_NEXUS,
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 1a0b6f17a5d6..5df6a621e464 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -119,6 +119,68 @@ struct static_key {
119 119
120#ifdef HAVE_JUMP_LABEL 120#ifdef HAVE_JUMP_LABEL
121#include <asm/jump_label.h> 121#include <asm/jump_label.h>
122
123#ifndef __ASSEMBLY__
124#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
125
126struct jump_entry {
127 s32 code;
128 s32 target;
129 long key; // key may be far away from the core kernel under KASLR
130};
131
132static inline unsigned long jump_entry_code(const struct jump_entry *entry)
133{
134 return (unsigned long)&entry->code + entry->code;
135}
136
137static inline unsigned long jump_entry_target(const struct jump_entry *entry)
138{
139 return (unsigned long)&entry->target + entry->target;
140}
141
142static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
143{
144 long offset = entry->key & ~3L;
145
146 return (struct static_key *)((unsigned long)&entry->key + offset);
147}
148
149#else
150
151static inline unsigned long jump_entry_code(const struct jump_entry *entry)
152{
153 return entry->code;
154}
155
156static inline unsigned long jump_entry_target(const struct jump_entry *entry)
157{
158 return entry->target;
159}
160
161static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
162{
163 return (struct static_key *)((unsigned long)entry->key & ~3UL);
164}
165
166#endif
167
168static inline bool jump_entry_is_branch(const struct jump_entry *entry)
169{
170 return (unsigned long)entry->key & 1UL;
171}
172
173static inline bool jump_entry_is_init(const struct jump_entry *entry)
174{
175 return (unsigned long)entry->key & 2UL;
176}
177
178static inline void jump_entry_set_init(struct jump_entry *entry)
179{
180 entry->key |= 2;
181}
182
183#endif
122#endif 184#endif
123 185
124#ifndef __ASSEMBLY__ 186#ifndef __ASSEMBLY__
@@ -151,7 +213,6 @@ extern struct jump_entry __start___jump_table[];
151extern struct jump_entry __stop___jump_table[]; 213extern struct jump_entry __stop___jump_table[];
152 214
153extern void jump_label_init(void); 215extern void jump_label_init(void);
154extern void jump_label_invalidate_initmem(void);
155extern void jump_label_lock(void); 216extern void jump_label_lock(void);
156extern void jump_label_unlock(void); 217extern void jump_label_unlock(void);
157extern void arch_jump_label_transform(struct jump_entry *entry, 218extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -199,8 +260,6 @@ static __always_inline void jump_label_init(void)
199 static_key_initialized = true; 260 static_key_initialized = true;
200} 261}
201 262
202static inline void jump_label_invalidate_initmem(void) {}
203
204static __always_inline bool static_key_false(struct static_key *key) 263static __always_inline bool static_key_false(struct static_key *key)
205{ 264{
206 if (unlikely(static_key_count(key) > 0)) 265 if (unlikely(static_key_count(key) > 0))
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 814643f7ee52..5b36b1287a5a 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -477,10 +477,11 @@ static inline void kernfs_init(void) { }
477 * @buf: buffer to copy @kn's name into 477 * @buf: buffer to copy @kn's name into
478 * @buflen: size of @buf 478 * @buflen: size of @buf
479 * 479 *
480 * Builds and returns the full path of @kn in @buf of @buflen bytes. The 480 * If @kn is NULL result will be "(null)".
481 * path is built from the end of @buf so the returned pointer usually 481 *
482 * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated 482 * Returns the length of the full path. If the full length is equal to or
483 * and %NULL is returned. 483 * greater than @buflen, @buf contains the truncated path with the trailing
484 * '\0'. On error, -errno is returned.
484 */ 485 */
485static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen) 486static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
486{ 487{
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0205aee44ded..c926698040e0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -733,8 +733,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
733void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 733void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
734int kvm_vcpu_yield_to(struct kvm_vcpu *target); 734int kvm_vcpu_yield_to(struct kvm_vcpu *target);
735void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); 735void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
736void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
737void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
738 736
739void kvm_flush_remote_tlbs(struct kvm *kvm); 737void kvm_flush_remote_tlbs(struct kvm *kvm);
740void kvm_reload_remote_mmus(struct kvm *kvm); 738void kvm_reload_remote_mmus(struct kvm *kvm);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 834683d603f9..7393a316d9fa 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -22,6 +22,7 @@
22#include <linux/workqueue.h> 22#include <linux/workqueue.h>
23 23
24struct device; 24struct device;
25struct led_pattern;
25/* 26/*
26 * LED Core 27 * LED Core
27 */ 28 */
@@ -88,6 +89,10 @@ struct led_classdev {
88 unsigned long *delay_on, 89 unsigned long *delay_on,
89 unsigned long *delay_off); 90 unsigned long *delay_off);
90 91
92 int (*pattern_set)(struct led_classdev *led_cdev,
93 struct led_pattern *pattern, u32 len, int repeat);
94 int (*pattern_clear)(struct led_classdev *led_cdev);
95
91 struct device *dev; 96 struct device *dev;
92 const struct attribute_group **groups; 97 const struct attribute_group **groups;
93 98
@@ -472,4 +477,14 @@ static inline void led_classdev_notify_brightness_hw_changed(
472 struct led_classdev *led_cdev, enum led_brightness brightness) { } 477 struct led_classdev *led_cdev, enum led_brightness brightness) { }
473#endif 478#endif
474 479
480/**
481 * struct led_pattern - pattern interval settings
482 * @delta_t: pattern interval delay, in milliseconds
483 * @brightness: pattern interval brightness
484 */
485struct led_pattern {
486 u32 delta_t;
487 int brightness;
488};
489
475#endif /* __LINUX_LEDS_H_INCLUDED */ 490#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index c6ac1fe7ec68..edb0f0c30904 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -2,6 +2,7 @@
2#ifndef LIBFDT_ENV_H 2#ifndef LIBFDT_ENV_H
3#define LIBFDT_ENV_H 3#define LIBFDT_ENV_H
4 4
5#include <linux/kernel.h> /* For INT_MAX */
5#include <linux/string.h> 6#include <linux/string.h>
6 7
7#include <asm/byteorder.h> 8#include <asm/byteorder.h>
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index e9e0d1c7eaf5..2fdeac1a420d 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -86,8 +86,8 @@ struct nvm_chk_meta;
86typedef int (nvm_id_fn)(struct nvm_dev *); 86typedef int (nvm_id_fn)(struct nvm_dev *);
87typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); 87typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
88typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); 88typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
89typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *, 89typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
90 sector_t, int); 90 struct nvm_chk_meta *);
91typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); 91typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
92typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *); 92typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
93typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); 93typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
@@ -305,6 +305,8 @@ struct nvm_rq {
305 u64 ppa_status; /* ppa media status */ 305 u64 ppa_status; /* ppa media status */
306 int error; 306 int error;
307 307
308 int is_seq; /* Sequential hint flag. 1.2 only */
309
308 void *private; 310 void *private;
309}; 311};
310 312
@@ -318,6 +320,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
318 return rqdata + 1; 320 return rqdata + 1;
319} 321}
320 322
323static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
324{
325 return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
326}
327
321enum { 328enum {
322 NVM_BLK_ST_FREE = 0x1, /* Free block */ 329 NVM_BLK_ST_FREE = 0x1, /* Free block */
323 NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ 330 NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
@@ -485,6 +492,144 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
485 return l; 492 return l;
486} 493}
487 494
495static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
496 struct ppa_addr p)
497{
498 struct nvm_geo *geo = &dev->geo;
499 u64 caddr;
500
501 if (geo->version == NVM_OCSSD_SPEC_12) {
502 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
503
504 caddr = (u64)p.g.pg << ppaf->pg_offset;
505 caddr |= (u64)p.g.pl << ppaf->pln_offset;
506 caddr |= (u64)p.g.sec << ppaf->sec_offset;
507 } else {
508 caddr = p.m.sec;
509 }
510
511 return caddr;
512}
513
514static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
515 void *addrf, u32 ppa32)
516{
517 struct ppa_addr ppa64;
518
519 ppa64.ppa = 0;
520
521 if (ppa32 == -1) {
522 ppa64.ppa = ADDR_EMPTY;
523 } else if (ppa32 & (1U << 31)) {
524 ppa64.c.line = ppa32 & ((~0U) >> 1);
525 ppa64.c.is_cached = 1;
526 } else {
527 struct nvm_geo *geo = &dev->geo;
528
529 if (geo->version == NVM_OCSSD_SPEC_12) {
530 struct nvm_addrf_12 *ppaf = addrf;
531
532 ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
533 ppaf->ch_offset;
534 ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
535 ppaf->lun_offset;
536 ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
537 ppaf->blk_offset;
538 ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
539 ppaf->pg_offset;
540 ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
541 ppaf->pln_offset;
542 ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
543 ppaf->sec_offset;
544 } else {
545 struct nvm_addrf *lbaf = addrf;
546
547 ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
548 lbaf->ch_offset;
549 ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
550 lbaf->lun_offset;
551 ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
552 lbaf->chk_offset;
553 ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
554 lbaf->sec_offset;
555 }
556 }
557
558 return ppa64;
559}
560
561static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
562 void *addrf, struct ppa_addr ppa64)
563{
564 u32 ppa32 = 0;
565
566 if (ppa64.ppa == ADDR_EMPTY) {
567 ppa32 = ~0U;
568 } else if (ppa64.c.is_cached) {
569 ppa32 |= ppa64.c.line;
570 ppa32 |= 1U << 31;
571 } else {
572 struct nvm_geo *geo = &dev->geo;
573
574 if (geo->version == NVM_OCSSD_SPEC_12) {
575 struct nvm_addrf_12 *ppaf = addrf;
576
577 ppa32 |= ppa64.g.ch << ppaf->ch_offset;
578 ppa32 |= ppa64.g.lun << ppaf->lun_offset;
579 ppa32 |= ppa64.g.blk << ppaf->blk_offset;
580 ppa32 |= ppa64.g.pg << ppaf->pg_offset;
581 ppa32 |= ppa64.g.pl << ppaf->pln_offset;
582 ppa32 |= ppa64.g.sec << ppaf->sec_offset;
583 } else {
584 struct nvm_addrf *lbaf = addrf;
585
586 ppa32 |= ppa64.m.grp << lbaf->ch_offset;
587 ppa32 |= ppa64.m.pu << lbaf->lun_offset;
588 ppa32 |= ppa64.m.chk << lbaf->chk_offset;
589 ppa32 |= ppa64.m.sec << lbaf->sec_offset;
590 }
591 }
592
593 return ppa32;
594}
595
596static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
597 struct ppa_addr *ppa)
598{
599 struct nvm_geo *geo = &dev->geo;
600 int last = 0;
601
602 if (geo->version == NVM_OCSSD_SPEC_12) {
603 int sec = ppa->g.sec;
604
605 sec++;
606 if (sec == geo->ws_min) {
607 int pg = ppa->g.pg;
608
609 sec = 0;
610 pg++;
611 if (pg == geo->num_pg) {
612 int pl = ppa->g.pl;
613
614 pg = 0;
615 pl++;
616 if (pl == geo->num_pln)
617 last = 1;
618
619 ppa->g.pl = pl;
620 }
621 ppa->g.pg = pg;
622 }
623 ppa->g.sec = sec;
624 } else {
625 ppa->m.sec++;
626 if (ppa->m.sec == geo->clba)
627 last = 1;
628 }
629
630 return last;
631}
632
488typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); 633typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
489typedef sector_t (nvm_tgt_capacity_fn)(void *); 634typedef sector_t (nvm_tgt_capacity_fn)(void *);
490typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, 635typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
@@ -493,9 +638,15 @@ typedef void (nvm_tgt_exit_fn)(void *, bool);
493typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); 638typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
494typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); 639typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
495 640
641enum {
642 NVM_TGT_F_DEV_L2P = 0,
643 NVM_TGT_F_HOST_L2P = 1 << 0,
644};
645
496struct nvm_tgt_type { 646struct nvm_tgt_type {
497 const char *name; 647 const char *name;
498 unsigned int version[3]; 648 unsigned int version[3];
649 int flags;
499 650
500 /* target entry points */ 651 /* target entry points */
501 nvm_tgt_make_rq_fn *make_rq; 652 nvm_tgt_make_rq_fn *make_rq;
@@ -524,18 +675,13 @@ extern struct nvm_dev *nvm_alloc_dev(int);
524extern int nvm_register(struct nvm_dev *); 675extern int nvm_register(struct nvm_dev *);
525extern void nvm_unregister(struct nvm_dev *); 676extern void nvm_unregister(struct nvm_dev *);
526 677
527 678extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
528extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, 679 int, struct nvm_chk_meta *);
529 struct nvm_chk_meta *meta, struct ppa_addr ppa, 680extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
530 int nchks);
531
532extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
533 int, int); 681 int, int);
534extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); 682extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
535extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); 683extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
536extern void nvm_end_io(struct nvm_rq *); 684extern void nvm_end_io(struct nvm_rq *);
537extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
538extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
539 685
540#else /* CONFIG_NVM */ 686#else /* CONFIG_NVM */
541struct nvm_dev_ops; 687struct nvm_dev_ops;
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index d7618c41f74c..7c47b1a471d4 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -90,6 +90,7 @@
90#ifndef WEAK 90#ifndef WEAK
91#define WEAK(name) \ 91#define WEAK(name) \
92 .weak name ASM_NL \ 92 .weak name ASM_NL \
93 ALIGN ASM_NL \
93 name: 94 name:
94#endif 95#endif
95 96
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
new file mode 100644
index 000000000000..22443d7fb5cd
--- /dev/null
+++ b/include/linux/linkmode.h
@@ -0,0 +1,76 @@
1#ifndef __LINKMODE_H
2#define __LINKMODE_H
3
4#include <linux/bitmap.h>
5#include <linux/ethtool.h>
6#include <uapi/linux/ethtool.h>
7
8static inline void linkmode_zero(unsigned long *dst)
9{
10 bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
11}
12
13static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
14{
15 bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
16}
17
18static inline void linkmode_and(unsigned long *dst, const unsigned long *a,
19 const unsigned long *b)
20{
21 bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
22}
23
24static inline void linkmode_or(unsigned long *dst, const unsigned long *a,
25 const unsigned long *b)
26{
27 bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
28}
29
30static inline bool linkmode_empty(const unsigned long *src)
31{
32 return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
33}
34
35static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
36 const unsigned long *src2)
37{
38 return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
39}
40
41static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
42{
43 __set_bit(nr, addr);
44}
45
46static inline void linkmode_set_bit_array(const int *array, int array_size,
47 unsigned long *addr)
48{
49 int i;
50
51 for (i = 0; i < array_size; i++)
52 linkmode_set_bit(array[i], addr);
53}
54
55static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
56{
57 __clear_bit(nr, addr);
58}
59
60static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
61{
62 __change_bit(nr, addr);
63}
64
65static inline int linkmode_test_bit(int nr, volatile unsigned long *addr)
66{
67 return test_bit(nr, addr);
68}
69
70static inline int linkmode_equal(const unsigned long *src1,
71 const unsigned long *src2)
72{
73 return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
74}
75
76#endif /* __LINKMODE_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index de04cc5ed536..edb7628e46ed 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -184,6 +184,29 @@ static inline void list_move_tail(struct list_head *list,
184} 184}
185 185
186/** 186/**
187 * list_bulk_move_tail - move a subsection of a list to its tail
188 * @head: the head that will follow our entry
189 * @first: first entry to move
190 * @last: last entry to move, can be the same as first
191 *
192 * Move all entries between @first and including @last before @head.
193 * All three entries must belong to the same linked list.
194 */
195static inline void list_bulk_move_tail(struct list_head *head,
196 struct list_head *first,
197 struct list_head *last)
198{
199 first->prev->next = last->next;
200 last->next->prev = first->prev;
201
202 head->prev->next = first;
203 first->prev = head->prev;
204
205 last->next = head;
206 head->prev = last;
207}
208
209/**
187 * list_is_last - tests whether @list is the last entry in list @head 210 * list_is_last - tests whether @list is the last entry in list @head
188 * @list: the entry to test 211 * @list: the entry to test
189 * @head: the head of the list 212 * @head: the head of the list
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b0d0b51c4d85..1fd82ff99c65 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -99,13 +99,8 @@ struct lock_class {
99 */ 99 */
100 unsigned int version; 100 unsigned int version;
101 101
102 /*
103 * Statistics counter:
104 */
105 unsigned long ops;
106
107 const char *name;
108 int name_version; 102 int name_version;
103 const char *name;
109 104
110#ifdef CONFIG_LOCK_STAT 105#ifdef CONFIG_LOCK_STAT
111 unsigned long contention_point[LOCKSTAT_POINTS]; 106 unsigned long contention_point[LOCKSTAT_POINTS];
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 97a020c616ad..aaeb7fa24dc4 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -672,7 +672,7 @@
672 * Return 0 if permission is granted. 672 * Return 0 if permission is granted.
673 * @task_kill: 673 * @task_kill:
674 * Check permission before sending signal @sig to @p. @info can be NULL, 674 * Check permission before sending signal @sig to @p. @info can be NULL,
675 * the constant 1, or a pointer to a siginfo structure. If @info is 1 or 675 * the constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or
676 * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming 676 * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
677 * from the kernel and should typically be permitted. 677 * from the kernel and should typically be permitted.
678 * SIGIO signals are handled separately by the send_sigiotask hook in 678 * SIGIO signals are handled separately by the send_sigiotask hook in
@@ -1606,7 +1606,7 @@ union security_list_options {
1606 int (*task_setscheduler)(struct task_struct *p); 1606 int (*task_setscheduler)(struct task_struct *p);
1607 int (*task_getscheduler)(struct task_struct *p); 1607 int (*task_getscheduler)(struct task_struct *p);
1608 int (*task_movememory)(struct task_struct *p); 1608 int (*task_movememory)(struct task_struct *p);
1609 int (*task_kill)(struct task_struct *p, struct siginfo *info, 1609 int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info,
1610 int sig, const struct cred *cred); 1610 int sig, const struct cred *cred);
1611 int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, 1611 int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
1612 unsigned long arg4, unsigned long arg5); 1612 unsigned long arg4, unsigned long arg5);
@@ -2039,6 +2039,18 @@ extern char *lsm_names;
2039extern void security_add_hooks(struct security_hook_list *hooks, int count, 2039extern void security_add_hooks(struct security_hook_list *hooks, int count,
2040 char *lsm); 2040 char *lsm);
2041 2041
2042struct lsm_info {
2043 const char *name; /* Required. */
2044 int (*init)(void); /* Required. */
2045};
2046
2047extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
2048
2049#define DEFINE_LSM(lsm) \
2050 static struct lsm_info __lsm_##lsm \
2051 __used __section(.lsm_info.init) \
2052 __aligned(sizeof(unsigned long))
2053
2042#ifdef CONFIG_SECURITY_SELINUX_DISABLE 2054#ifdef CONFIG_SECURITY_SELINUX_DISABLE
2043/* 2055/*
2044 * Assuring the safety of deleting a security module is up to 2056 * Assuring the safety of deleting a security module is up to
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 837f2f2d1d34..bb2c84afb80c 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -281,4 +281,7 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
281} 281}
282#endif /* mul_u64_u32_div */ 282#endif /* mul_u64_u32_div */
283 283
284#define DIV64_U64_ROUND_UP(ll, d) \
285 ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
286
284#endif /* _LINUX_MATH64_H */ 287#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 516920549378..2acdd046df2d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -265,21 +265,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
265 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ 265 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
266 nid, flags, p_start, p_end, p_nid) 266 nid, flags, p_start, p_end, p_nid)
267 267
268/**
269 * for_each_resv_unavail_range - iterate through reserved and unavailable memory
270 * @i: u64 used as loop variable
271 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
272 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
273 *
274 * Walks over unavailable but reserved (reserved && !memory) areas of memblock.
275 * Available as soon as memblock is initialized.
276 * Note: because this memory does not belong to any physical node, flags and
277 * nid arguments do not make sense and thus not exported as arguments.
278 */
279#define for_each_resv_unavail_range(i, p_start, p_end) \
280 for_each_mem_range(i, &memblock.reserved, &memblock.memory, \
281 NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
282
283static inline void memblock_set_region_flags(struct memblock_region *r, 268static inline void memblock_set_region_flags(struct memblock_region *r,
284 enum memblock_flags flags) 269 enum memblock_flags flags)
285{ 270{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 652f602167df..7ab2120155a4 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -78,7 +78,7 @@ struct mem_cgroup_reclaim_cookie {
78 78
79struct mem_cgroup_id { 79struct mem_cgroup_id {
80 int id; 80 int id;
81 atomic_t ref; 81 refcount_t ref;
82}; 82};
83 83
84/* 84/*
@@ -1268,10 +1268,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1268void memcg_kmem_put_cache(struct kmem_cache *cachep); 1268void memcg_kmem_put_cache(struct kmem_cache *cachep);
1269int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 1269int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1270 struct mem_cgroup *memcg); 1270 struct mem_cgroup *memcg);
1271
1272#ifdef CONFIG_MEMCG_KMEM
1271int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); 1273int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1272void memcg_kmem_uncharge(struct page *page, int order); 1274void memcg_kmem_uncharge(struct page *page, int order);
1273 1275
1274#ifdef CONFIG_MEMCG_KMEM
1275extern struct static_key_false memcg_kmem_enabled_key; 1276extern struct static_key_false memcg_kmem_enabled_key;
1276extern struct workqueue_struct *memcg_kmem_cache_wq; 1277extern struct workqueue_struct *memcg_kmem_cache_wq;
1277 1278
@@ -1307,6 +1308,16 @@ extern int memcg_expand_shrinker_maps(int new_id);
1307extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, 1308extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1308 int nid, int shrinker_id); 1309 int nid, int shrinker_id);
1309#else 1310#else
1311
1312static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1313{
1314 return 0;
1315}
1316
1317static inline void memcg_kmem_uncharge(struct page *page, int order)
1318{
1319}
1320
1310#define for_each_memcg_cache_index(_idx) \ 1321#define for_each_memcg_cache_index(_idx) \
1311 for (; NULL; ) 1322 for (; NULL; )
1312 1323
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index f91f9e763557..0ac69ddf5fc4 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -53,11 +53,16 @@ struct vmem_altmap {
53 * wakeup event whenever a page is unpinned and becomes idle. This 53 * wakeup event whenever a page is unpinned and becomes idle. This
54 * wakeup is used to coordinate physical address space management (ex: 54 * wakeup is used to coordinate physical address space management (ex:
55 * fs truncate/hole punch) vs pinned pages (ex: device dma). 55 * fs truncate/hole punch) vs pinned pages (ex: device dma).
56 *
57 * MEMORY_DEVICE_PCI_P2PDMA:
58 * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
59 * transactions.
56 */ 60 */
57enum memory_type { 61enum memory_type {
58 MEMORY_DEVICE_PRIVATE = 1, 62 MEMORY_DEVICE_PRIVATE = 1,
59 MEMORY_DEVICE_PUBLIC, 63 MEMORY_DEVICE_PUBLIC,
60 MEMORY_DEVICE_FS_DAX, 64 MEMORY_DEVICE_FS_DAX,
65 MEMORY_DEVICE_PCI_P2PDMA,
61}; 66};
62 67
63/* 68/*
@@ -120,6 +125,7 @@ struct dev_pagemap {
120 struct device *dev; 125 struct device *dev;
121 void *data; 126 void *data;
122 enum memory_type type; 127 enum memory_type type;
128 u64 pci_p2pdma_bus_offset;
123}; 129};
124 130
125#ifdef CONFIG_ZONE_DEVICE 131#ifdef CONFIG_ZONE_DEVICE
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 6e1ab9bead28..5fd0e429f472 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -2132,6 +2132,7 @@ struct ec_response_get_next_event_v1 {
2132/* Switches */ 2132/* Switches */
2133#define EC_MKBP_LID_OPEN 0 2133#define EC_MKBP_LID_OPEN 0
2134#define EC_MKBP_TABLET_MODE 1 2134#define EC_MKBP_TABLET_MODE 1
2135#define EC_MKBP_BASE_ATTACHED 2
2135 2136
2136/*****************************************************************************/ 2137/*****************************************************************************/
2137/* Temperature sensor commands */ 2138/* Temperature sensor commands */
@@ -3102,6 +3103,16 @@ struct ec_params_usb_pd_info_request {
3102 uint8_t port; 3103 uint8_t port;
3103} __packed; 3104} __packed;
3104 3105
3106/*
3107 * This command will return the number of USB PD charge port + the number
3108 * of dedicated port present.
3109 * EC_CMD_USB_PD_PORTS does NOT include the dedicated ports
3110 */
3111#define EC_CMD_CHARGE_PORT_COUNT 0x0105
3112struct ec_response_charge_port_count {
3113 uint8_t port_count;
3114} __packed;
3115
3105/* Read USB-PD Device discovery info */ 3116/* Read USB-PD Device discovery info */
3106#define EC_CMD_USB_PD_DISCOVERY 0x0113 3117#define EC_CMD_USB_PD_DISCOVERY 0x0113
3107struct ec_params_usb_pd_discovery_entry { 3118struct ec_params_usb_pd_discovery_entry {
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 8a125701ef7b..50bed4f89c1a 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -21,7 +21,7 @@
21/* 21/*
22 * Regulator configuration 22 * Regulator configuration
23 */ 23 */
24/* DA9063 regulator IDs */ 24/* DA9063 and DA9063L regulator IDs */
25enum { 25enum {
26 /* BUCKs */ 26 /* BUCKs */
27 DA9063_ID_BCORE1, 27 DA9063_ID_BCORE1,
@@ -37,18 +37,20 @@ enum {
37 DA9063_ID_BMEM_BIO_MERGED, 37 DA9063_ID_BMEM_BIO_MERGED,
38 /* When two BUCKs are merged, they cannot be reused separately */ 38 /* When two BUCKs are merged, they cannot be reused separately */
39 39
40 /* LDOs */ 40 /* LDOs on both DA9063 and DA9063L */
41 DA9063_ID_LDO3,
42 DA9063_ID_LDO7,
43 DA9063_ID_LDO8,
44 DA9063_ID_LDO9,
45 DA9063_ID_LDO11,
46
47 /* DA9063-only LDOs */
41 DA9063_ID_LDO1, 48 DA9063_ID_LDO1,
42 DA9063_ID_LDO2, 49 DA9063_ID_LDO2,
43 DA9063_ID_LDO3,
44 DA9063_ID_LDO4, 50 DA9063_ID_LDO4,
45 DA9063_ID_LDO5, 51 DA9063_ID_LDO5,
46 DA9063_ID_LDO6, 52 DA9063_ID_LDO6,
47 DA9063_ID_LDO7,
48 DA9063_ID_LDO8,
49 DA9063_ID_LDO9,
50 DA9063_ID_LDO10, 53 DA9063_ID_LDO10,
51 DA9063_ID_LDO11,
52}; 54};
53 55
54/* Regulators platform data */ 56/* Regulators platform data */
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h
new file mode 100644
index 000000000000..ab16ad283def
--- /dev/null
+++ b/include/linux/mfd/ingenic-tcu.h
@@ -0,0 +1,56 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Header file for the Ingenic JZ47xx TCU driver
4 */
5#ifndef __LINUX_MFD_INGENIC_TCU_H_
6#define __LINUX_MFD_INGENIC_TCU_H_
7
8#include <linux/bitops.h>
9
10#define TCU_REG_WDT_TDR 0x00
11#define TCU_REG_WDT_TCER 0x04
12#define TCU_REG_WDT_TCNT 0x08
13#define TCU_REG_WDT_TCSR 0x0c
14#define TCU_REG_TER 0x10
15#define TCU_REG_TESR 0x14
16#define TCU_REG_TECR 0x18
17#define TCU_REG_TSR 0x1c
18#define TCU_REG_TFR 0x20
19#define TCU_REG_TFSR 0x24
20#define TCU_REG_TFCR 0x28
21#define TCU_REG_TSSR 0x2c
22#define TCU_REG_TMR 0x30
23#define TCU_REG_TMSR 0x34
24#define TCU_REG_TMCR 0x38
25#define TCU_REG_TSCR 0x3c
26#define TCU_REG_TDFR0 0x40
27#define TCU_REG_TDHR0 0x44
28#define TCU_REG_TCNT0 0x48
29#define TCU_REG_TCSR0 0x4c
30#define TCU_REG_OST_DR 0xe0
31#define TCU_REG_OST_CNTL 0xe4
32#define TCU_REG_OST_CNTH 0xe8
33#define TCU_REG_OST_TCSR 0xec
34#define TCU_REG_TSTR 0xf0
35#define TCU_REG_TSTSR 0xf4
36#define TCU_REG_TSTCR 0xf8
37#define TCU_REG_OST_CNTHBUF 0xfc
38
39#define TCU_TCSR_RESERVED_BITS 0x3f
40#define TCU_TCSR_PARENT_CLOCK_MASK 0x07
41#define TCU_TCSR_PRESCALE_LSB 3
42#define TCU_TCSR_PRESCALE_MASK 0x38
43
44#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */
45#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
46#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
47
48#define TCU_WDT_TCER_TCEN BIT(0) /* Watchdog timer enable */
49
50#define TCU_CHANNEL_STRIDE 0x10
51#define TCU_REG_TDFRc(c) (TCU_REG_TDFR0 + ((c) * TCU_CHANNEL_STRIDE))
52#define TCU_REG_TDHRc(c) (TCU_REG_TDHR0 + ((c) * TCU_CHANNEL_STRIDE))
53#define TCU_REG_TCNTc(c) (TCU_REG_TCNT0 + ((c) * TCU_CHANNEL_STRIDE))
54#define TCU_REG_TCSRc(c) (TCU_REG_TCSR0 + ((c) * TCU_CHANNEL_STRIDE))
55
56#endif /* __LINUX_MFD_INGENIC_TCU_H_ */
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
index 439a7a617bc9..317e8608cf41 100644
--- a/include/linux/mfd/intel_msic.h
+++ b/include/linux/mfd/intel_msic.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC 3 * Core interface for Intel MSIC
3 * 4 *
4 * Copyright (C) 2011, Intel Corporation 5 * Copyright (C) 2011, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */ 7 */
11 8
12#ifndef __LINUX_MFD_INTEL_MSIC_H__ 9#ifndef __LINUX_MFD_INTEL_MSIC_H__
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 5aacdb017a9f..ed1dfba5e5f9 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -1,17 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * intel_soc_pmic.h - Intel SoC PMIC Driver 3 * Intel SoC PMIC Driver
3 * 4 *
4 * Copyright (C) 2012-2014 Intel Corporation. All rights reserved. 5 * Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
5 * 6 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Yang, Bin <bin.yang@intel.com> 7 * Author: Yang, Bin <bin.yang@intel.com>
16 * Author: Zhu, Lejun <lejun.zhu@linux.intel.com> 8 * Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
17 */ 9 */
@@ -25,6 +17,7 @@ struct intel_soc_pmic {
25 int irq; 17 int irq;
26 struct regmap *regmap; 18 struct regmap *regmap;
27 struct regmap_irq_chip_data *irq_chip_data; 19 struct regmap_irq_chip_data *irq_chip_data;
20 struct regmap_irq_chip_data *irq_chip_data_pwrbtn;
28 struct regmap_irq_chip_data *irq_chip_data_tmu; 21 struct regmap_irq_chip_data *irq_chip_data_tmu;
29 struct regmap_irq_chip_data *irq_chip_data_bcu; 22 struct regmap_irq_chip_data *irq_chip_data_bcu;
30 struct regmap_irq_chip_data *irq_chip_data_adc; 23 struct regmap_irq_chip_data *irq_chip_data_adc;
diff --git a/include/linux/mfd/intel_soc_pmic_bxtwc.h b/include/linux/mfd/intel_soc_pmic_bxtwc.h
index 0c351bc85d2d..9be566cc58c6 100644
--- a/include/linux/mfd/intel_soc_pmic_bxtwc.h
+++ b/include/linux/mfd/intel_soc_pmic_bxtwc.h
@@ -1,16 +1,8 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Header file for Intel Broxton Whiskey Cove PMIC 3 * Header file for Intel Broxton Whiskey Cove PMIC
3 * 4 *
4 * Copyright (C) 2015 Intel Corporation. All rights reserved. 5 * Copyright (C) 2015 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */ 6 */
15 7
16#ifndef __INTEL_BXTWC_H__ 8#ifndef __INTEL_BXTWC_H__
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index c332681848ef..fe69c0f4398f 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -148,6 +148,7 @@ struct snd_soc_dapm_context;
148 * @internal_dcvdd: true if DCVDD is supplied from the internal LDO1 148 * @internal_dcvdd: true if DCVDD is supplied from the internal LDO1
149 * @pdata: our pdata 149 * @pdata: our pdata
150 * @irq_dev: the irqchip child driver device 150 * @irq_dev: the irqchip child driver device
151 * @irq_data: pointer to irqchip data for the child irqchip driver
151 * @irq: host irq number from SPI or I2C configuration 152 * @irq: host irq number from SPI or I2C configuration
152 * @out_clamp: indicates output clamp state for each analogue output 153 * @out_clamp: indicates output clamp state for each analogue output
153 * @out_shorted: indicates short circuit state for each analogue output 154 * @out_shorted: indicates short circuit state for each analogue output
@@ -175,6 +176,7 @@ struct madera {
175 struct madera_pdata pdata; 176 struct madera_pdata pdata;
176 177
177 struct device *irq_dev; 178 struct device *irq_dev;
179 struct regmap_irq_chip_data *irq_data;
178 int irq; 180 int irq;
179 181
180 unsigned int num_micbias; 182 unsigned int num_micbias;
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index 0b311f39c8f4..8dc852402dbb 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -24,7 +24,6 @@
24 24
25struct gpio_desc; 25struct gpio_desc;
26struct pinctrl_map; 26struct pinctrl_map;
27struct madera_irqchip_pdata;
28struct madera_codec_pdata; 27struct madera_codec_pdata;
29 28
30/** 29/**
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index df75234f979d..a21374f8ad26 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -1,19 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip 3 * max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip
3 * 4 *
4 * Copyright (C) 2014 Samsung Electrnoics 5 * Copyright (C) 2014 Samsung Electrnoics
5 * Chanwoo Choi <cw00.choi@samsung.com> 6 * Chanwoo Choi <cw00.choi@samsung.com>
6 * Krzysztof Kozlowski <krzk@kernel.org> 7 * Krzysztof Kozlowski <krzk@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */ 8 */
18 9
19#ifndef __MAX14577_PRIVATE_H__ 10#ifndef __MAX14577_PRIVATE_H__
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index d81b52bb8bee..8b3ef891ba42 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * max14577.h - Driver for the Maxim 14577/77836 3 * max14577.h - Driver for the Maxim 14577/77836
3 * 4 *
@@ -5,16 +6,6 @@
5 * Chanwoo Choi <cw00.choi@samsung.com> 6 * Chanwoo Choi <cw00.choi@samsung.com>
6 * Krzysztof Kozlowski <krzk@kernel.org> 7 * Krzysztof Kozlowski <krzk@kernel.org>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * This driver is based on max8997.h 9 * This driver is based on max8997.h
19 * 10 *
20 * MAX14577 has MUIC, Charger devices. 11 * MAX14577 has MUIC, Charger devices.
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 643dae777b43..833e578e051e 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -1,22 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * max77686-private.h - Voltage regulator driver for the Maxim 77686/802 3 * max77686-private.h - Voltage regulator driver for the Maxim 77686/802
3 * 4 *
4 * Copyright (C) 2012 Samsung Electrnoics 5 * Copyright (C) 2012 Samsung Electrnoics
5 * Chiwoong Byun <woong.byun@samsung.com> 6 * Chiwoong Byun <woong.byun@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 7 */
21 8
22#ifndef __LINUX_MFD_MAX77686_PRIV_H 9#ifndef __LINUX_MFD_MAX77686_PRIV_H
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index d4b72d519115..d0fb510875e6 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * max77686.h - Driver for the Maxim 77686/802 3 * max77686.h - Driver for the Maxim 77686/802
3 * 4 *
4 * Copyright (C) 2012 Samsung Electrnoics 5 * Copyright (C) 2012 Samsung Electrnoics
5 * Chiwoong Byun <woong.byun@samsung.com> 6 * Chiwoong Byun <woong.byun@samsung.com>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * This driver is based on max8997.h 8 * This driver is based on max8997.h
22 * 9 *
23 * MAX77686 has PMIC, RTC devices. 10 * MAX77686 has PMIC, RTC devices.
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
index 095b121aa725..a5bce099f1ed 100644
--- a/include/linux/mfd/max77693-common.h
+++ b/include/linux/mfd/max77693-common.h
@@ -1,12 +1,8 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Common data shared between Maxim 77693 and 77843 drivers 3 * Common data shared between Maxim 77693 and 77843 drivers
3 * 4 *
4 * Copyright (C) 2015 Samsung Electronics 5 * Copyright (C) 2015 Samsung Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */ 6 */
11 7
12#ifndef __LINUX_MFD_MAX77693_COMMON_H 8#ifndef __LINUX_MFD_MAX77693_COMMON_H
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 3c7a63b98ad6..e798c81aec31 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * max77693-private.h - Voltage regulator driver for the Maxim 77693 3 * max77693-private.h - Voltage regulator driver for the Maxim 77693
3 * 4 *
@@ -5,20 +6,6 @@
5 * SangYoung Son <hello.son@samsung.com> 6 * SangYoung Son <hello.son@samsung.com>
6 * 7 *
7 * This program is not provided / owned by Maxim Integrated Products. 8 * This program is not provided / owned by Maxim Integrated Products.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 9 */
23 10
24#ifndef __LINUX_MFD_MAX77693_PRIV_H 11#ifndef __LINUX_MFD_MAX77693_PRIV_H
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index d450f687301b..c67c16ba8649 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * max77693.h - Driver for the Maxim 77693 3 * max77693.h - Driver for the Maxim 77693
3 * 4 *
@@ -6,20 +7,6 @@
6 * 7 *
7 * This program is not provided / owned by Maxim Integrated Products. 8 * This program is not provided / owned by Maxim Integrated Products.
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * This driver is based on max8997.h 10 * This driver is based on max8997.h
24 * 11 *
25 * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices. 12 * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index b8908bf8d315..0bc7454c4dbe 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -1,14 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Common variables for the Maxim MAX77843 driver 3 * Common variables for the Maxim MAX77843 driver
3 * 4 *
4 * Copyright (C) 2015 Samsung Electronics 5 * Copyright (C) 2015 Samsung Electronics
5 * Author: Jaewon Kim <jaewon02.kim@samsung.com> 6 * Author: Jaewon Kim <jaewon02.kim@samsung.com>
6 * Author: Beomho Seo <beomho.seo@samsung.com> 7 * Author: Beomho Seo <beomho.seo@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */ 8 */
13 9
14#ifndef __MAX77843_PRIVATE_H_ 10#ifndef __MAX77843_PRIVATE_H_
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 78c76cd4d37b..a10cd6945232 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -1,22 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * max8997-private.h - Voltage regulator driver for the Maxim 8997 3 * max8997-private.h - Voltage regulator driver for the Maxim 8997
3 * 4 *
4 * Copyright (C) 2010 Samsung Electrnoics 5 * Copyright (C) 2010 Samsung Electrnoics
5 * MyungJoo Ham <myungjoo.ham@samsung.com> 6 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 7 */
21 8
22#ifndef __LINUX_MFD_MAX8997_PRIV_H 9#ifndef __LINUX_MFD_MAX8997_PRIV_H
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index cf815577bd68..e955e2f0a2cc 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * max8997.h - Driver for the Maxim 8997/8966 3 * max8997.h - Driver for the Maxim 8997/8966
3 * 4 *
4 * Copyright (C) 2009-2010 Samsung Electrnoics 5 * Copyright (C) 2009-2010 Samsung Electrnoics
5 * MyungJoo Ham <myungjoo.ham@samsung.com> 6 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * This driver is based on max8998.h 8 * This driver is based on max8998.h
22 * 9 *
23 * MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices. 10 * MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices.
@@ -178,7 +165,6 @@ struct max8997_led_platform_data {
178struct max8997_platform_data { 165struct max8997_platform_data {
179 /* IRQ */ 166 /* IRQ */
180 int ono; 167 int ono;
181 int wakeup;
182 168
183 /* ---- PMIC ---- */ 169 /* ---- PMIC ---- */
184 struct max8997_regulator_data *regulators; 170 struct max8997_regulator_data *regulators;
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index d68ada502ff3..6deb5f577602 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * max8998-private.h - Voltage regulator driver for the Maxim 8998 3 * max8998-private.h - Voltage regulator driver for the Maxim 8998
3 * 4 *
4 * Copyright (C) 2009-2010 Samsung Electrnoics 5 * Copyright (C) 2009-2010 Samsung Electrnoics
5 * Kyungmin Park <kyungmin.park@samsung.com> 6 * Kyungmin Park <kyungmin.park@samsung.com>
6 * Marek Szyprowski <m.szyprowski@samsung.com> 7 * Marek Szyprowski <m.szyprowski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 8 */
22 9
23#ifndef __LINUX_MFD_MAX8998_PRIV_H 10#ifndef __LINUX_MFD_MAX8998_PRIV_H
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index e3956a654cbc..061af220dcd3 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * max8998.h - Voltage regulator driver for the Maxim 8998 3 * max8998.h - Voltage regulator driver for the Maxim 8998
3 * 4 *
4 * Copyright (C) 2009-2010 Samsung Electrnoics 5 * Copyright (C) 2009-2010 Samsung Electrnoics
5 * Kyungmin Park <kyungmin.park@samsung.com> 6 * Kyungmin Park <kyungmin.park@samsung.com>
6 * Marek Szyprowski <m.szyprowski@samsung.com> 7 * Marek Szyprowski <m.szyprowski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 8 */
22 9
23#ifndef __LINUX_MFD_MAX8998_H 10#ifndef __LINUX_MFD_MAX8998_H
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 54a3cd808f9e..2ad9bdc0a5ec 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -249,6 +249,7 @@ struct mc13xxx_platform_data {
249#define MC13XXX_ADC0_TSMOD0 (1 << 12) 249#define MC13XXX_ADC0_TSMOD0 (1 << 12)
250#define MC13XXX_ADC0_TSMOD1 (1 << 13) 250#define MC13XXX_ADC0_TSMOD1 (1 << 13)
251#define MC13XXX_ADC0_TSMOD2 (1 << 14) 251#define MC13XXX_ADC0_TSMOD2 (1 << 14)
252#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15)
252#define MC13XXX_ADC0_ADINC1 (1 << 16) 253#define MC13XXX_ADC0_ADINC1 (1 << 16)
253#define MC13XXX_ADC0_ADINC2 (1 << 17) 254#define MC13XXX_ADC0_ADINC2 (1 << 17)
254 255
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
index a528747f8aed..fd194bfc836f 100644
--- a/include/linux/mfd/rohm-bd718x7.h
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -1,112 +1,127 @@
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* Copyright (C) 2018 ROHM Semiconductors */ 2/* Copyright (C) 2018 ROHM Semiconductors */
3 3
4#ifndef __LINUX_MFD_BD71837_H__ 4#ifndef __LINUX_MFD_BD718XX_H__
5#define __LINUX_MFD_BD71837_H__ 5#define __LINUX_MFD_BD718XX_H__
6 6
7#include <linux/regmap.h> 7#include <linux/regmap.h>
8 8
9enum { 9enum {
10 BD71837_BUCK1 = 0, 10 BD718XX_TYPE_BD71837 = 0,
11 BD71837_BUCK2, 11 BD718XX_TYPE_BD71847,
12 BD71837_BUCK3, 12 BD718XX_TYPE_AMOUNT
13 BD71837_BUCK4,
14 BD71837_BUCK5,
15 BD71837_BUCK6,
16 BD71837_BUCK7,
17 BD71837_BUCK8,
18 BD71837_LDO1,
19 BD71837_LDO2,
20 BD71837_LDO3,
21 BD71837_LDO4,
22 BD71837_LDO5,
23 BD71837_LDO6,
24 BD71837_LDO7,
25 BD71837_REGULATOR_CNT,
26}; 13};
27 14
28#define BD71837_BUCK1_VOLTAGE_NUM 0x40 15enum {
29#define BD71837_BUCK2_VOLTAGE_NUM 0x40 16 BD718XX_BUCK1 = 0,
30#define BD71837_BUCK3_VOLTAGE_NUM 0x40 17 BD718XX_BUCK2,
31#define BD71837_BUCK4_VOLTAGE_NUM 0x40 18 BD718XX_BUCK3,
19 BD718XX_BUCK4,
20 BD718XX_BUCK5,
21 BD718XX_BUCK6,
22 BD718XX_BUCK7,
23 BD718XX_BUCK8,
24 BD718XX_LDO1,
25 BD718XX_LDO2,
26 BD718XX_LDO3,
27 BD718XX_LDO4,
28 BD718XX_LDO5,
29 BD718XX_LDO6,
30 BD718XX_LDO7,
31 BD718XX_REGULATOR_AMOUNT,
32};
33
34/* Common voltage configurations */
35#define BD718XX_DVS_BUCK_VOLTAGE_NUM 0x3D
36#define BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM 0x3D
37
38#define BD718XX_LDO1_VOLTAGE_NUM 0x08
39#define BD718XX_LDO2_VOLTAGE_NUM 0x02
40#define BD718XX_LDO3_VOLTAGE_NUM 0x10
41#define BD718XX_LDO4_VOLTAGE_NUM 0x0A
42#define BD718XX_LDO6_VOLTAGE_NUM 0x0A
32 43
33#define BD71837_BUCK5_VOLTAGE_NUM 0x08 44/* BD71837 specific voltage configurations */
45#define BD71837_BUCK5_VOLTAGE_NUM 0x10
34#define BD71837_BUCK6_VOLTAGE_NUM 0x04 46#define BD71837_BUCK6_VOLTAGE_NUM 0x04
35#define BD71837_BUCK7_VOLTAGE_NUM 0x08 47#define BD71837_BUCK7_VOLTAGE_NUM 0x08
36#define BD71837_BUCK8_VOLTAGE_NUM 0x40
37
38#define BD71837_LDO1_VOLTAGE_NUM 0x04
39#define BD71837_LDO2_VOLTAGE_NUM 0x02
40#define BD71837_LDO3_VOLTAGE_NUM 0x10
41#define BD71837_LDO4_VOLTAGE_NUM 0x10
42#define BD71837_LDO5_VOLTAGE_NUM 0x10 48#define BD71837_LDO5_VOLTAGE_NUM 0x10
43#define BD71837_LDO6_VOLTAGE_NUM 0x10
44#define BD71837_LDO7_VOLTAGE_NUM 0x10 49#define BD71837_LDO7_VOLTAGE_NUM 0x10
45 50
51/* BD71847 specific voltage configurations */
52#define BD71847_BUCK3_VOLTAGE_NUM 0x18
53#define BD71847_BUCK4_VOLTAGE_NUM 0x08
54#define BD71847_LDO5_VOLTAGE_NUM 0x20
55
56/* Registers specific to BD71837 */
57enum {
58 BD71837_REG_BUCK3_CTRL = 0x07,
59 BD71837_REG_BUCK4_CTRL = 0x08,
60 BD71837_REG_BUCK3_VOLT_RUN = 0x12,
61 BD71837_REG_BUCK4_VOLT_RUN = 0x13,
62 BD71837_REG_LDO7_VOLT = 0x1E,
63};
64
65/* Registers common for BD71837 and BD71847 */
46enum { 66enum {
47 BD71837_REG_REV = 0x00, 67 BD718XX_REG_REV = 0x00,
48 BD71837_REG_SWRESET = 0x01, 68 BD718XX_REG_SWRESET = 0x01,
49 BD71837_REG_I2C_DEV = 0x02, 69 BD718XX_REG_I2C_DEV = 0x02,
50 BD71837_REG_PWRCTRL0 = 0x03, 70 BD718XX_REG_PWRCTRL0 = 0x03,
51 BD71837_REG_PWRCTRL1 = 0x04, 71 BD718XX_REG_PWRCTRL1 = 0x04,
52 BD71837_REG_BUCK1_CTRL = 0x05, 72 BD718XX_REG_BUCK1_CTRL = 0x05,
53 BD71837_REG_BUCK2_CTRL = 0x06, 73 BD718XX_REG_BUCK2_CTRL = 0x06,
54 BD71837_REG_BUCK3_CTRL = 0x07, 74 BD718XX_REG_1ST_NODVS_BUCK_CTRL = 0x09,
55 BD71837_REG_BUCK4_CTRL = 0x08, 75 BD718XX_REG_2ND_NODVS_BUCK_CTRL = 0x0A,
56 BD71837_REG_BUCK5_CTRL = 0x09, 76 BD718XX_REG_3RD_NODVS_BUCK_CTRL = 0x0B,
57 BD71837_REG_BUCK6_CTRL = 0x0A, 77 BD718XX_REG_4TH_NODVS_BUCK_CTRL = 0x0C,
58 BD71837_REG_BUCK7_CTRL = 0x0B, 78 BD718XX_REG_BUCK1_VOLT_RUN = 0x0D,
59 BD71837_REG_BUCK8_CTRL = 0x0C, 79 BD718XX_REG_BUCK1_VOLT_IDLE = 0x0E,
60 BD71837_REG_BUCK1_VOLT_RUN = 0x0D, 80 BD718XX_REG_BUCK1_VOLT_SUSP = 0x0F,
61 BD71837_REG_BUCK1_VOLT_IDLE = 0x0E, 81 BD718XX_REG_BUCK2_VOLT_RUN = 0x10,
62 BD71837_REG_BUCK1_VOLT_SUSP = 0x0F, 82 BD718XX_REG_BUCK2_VOLT_IDLE = 0x11,
63 BD71837_REG_BUCK2_VOLT_RUN = 0x10, 83 BD718XX_REG_1ST_NODVS_BUCK_VOLT = 0x14,
64 BD71837_REG_BUCK2_VOLT_IDLE = 0x11, 84 BD718XX_REG_2ND_NODVS_BUCK_VOLT = 0x15,
65 BD71837_REG_BUCK3_VOLT_RUN = 0x12, 85 BD718XX_REG_3RD_NODVS_BUCK_VOLT = 0x16,
66 BD71837_REG_BUCK4_VOLT_RUN = 0x13, 86 BD718XX_REG_4TH_NODVS_BUCK_VOLT = 0x17,
67 BD71837_REG_BUCK5_VOLT = 0x14, 87 BD718XX_REG_LDO1_VOLT = 0x18,
68 BD71837_REG_BUCK6_VOLT = 0x15, 88 BD718XX_REG_LDO2_VOLT = 0x19,
69 BD71837_REG_BUCK7_VOLT = 0x16, 89 BD718XX_REG_LDO3_VOLT = 0x1A,
70 BD71837_REG_BUCK8_VOLT = 0x17, 90 BD718XX_REG_LDO4_VOLT = 0x1B,
71 BD71837_REG_LDO1_VOLT = 0x18, 91 BD718XX_REG_LDO5_VOLT = 0x1C,
72 BD71837_REG_LDO2_VOLT = 0x19, 92 BD718XX_REG_LDO6_VOLT = 0x1D,
73 BD71837_REG_LDO3_VOLT = 0x1A, 93 BD718XX_REG_TRANS_COND0 = 0x1F,
74 BD71837_REG_LDO4_VOLT = 0x1B, 94 BD718XX_REG_TRANS_COND1 = 0x20,
75 BD71837_REG_LDO5_VOLT = 0x1C, 95 BD718XX_REG_VRFAULTEN = 0x21,
76 BD71837_REG_LDO6_VOLT = 0x1D, 96 BD718XX_REG_MVRFLTMASK0 = 0x22,
77 BD71837_REG_LDO7_VOLT = 0x1E, 97 BD718XX_REG_MVRFLTMASK1 = 0x23,
78 BD71837_REG_TRANS_COND0 = 0x1F, 98 BD718XX_REG_MVRFLTMASK2 = 0x24,
79 BD71837_REG_TRANS_COND1 = 0x20, 99 BD718XX_REG_RCVCFG = 0x25,
80 BD71837_REG_VRFAULTEN = 0x21, 100 BD718XX_REG_RCVNUM = 0x26,
81 BD71837_REG_MVRFLTMASK0 = 0x22, 101 BD718XX_REG_PWRONCONFIG0 = 0x27,
82 BD71837_REG_MVRFLTMASK1 = 0x23, 102 BD718XX_REG_PWRONCONFIG1 = 0x28,
83 BD71837_REG_MVRFLTMASK2 = 0x24, 103 BD718XX_REG_RESETSRC = 0x29,
84 BD71837_REG_RCVCFG = 0x25, 104 BD718XX_REG_MIRQ = 0x2A,
85 BD71837_REG_RCVNUM = 0x26, 105 BD718XX_REG_IRQ = 0x2B,
86 BD71837_REG_PWRONCONFIG0 = 0x27, 106 BD718XX_REG_IN_MON = 0x2C,
87 BD71837_REG_PWRONCONFIG1 = 0x28, 107 BD718XX_REG_POW_STATE = 0x2D,
88 BD71837_REG_RESETSRC = 0x29, 108 BD718XX_REG_OUT32K = 0x2E,
89 BD71837_REG_MIRQ = 0x2A, 109 BD718XX_REG_REGLOCK = 0x2F,
90 BD71837_REG_IRQ = 0x2B, 110 BD718XX_REG_OTPVER = 0xFF,
91 BD71837_REG_IN_MON = 0x2C, 111 BD718XX_MAX_REGISTER = 0x100,
92 BD71837_REG_POW_STATE = 0x2D,
93 BD71837_REG_OUT32K = 0x2E,
94 BD71837_REG_REGLOCK = 0x2F,
95 BD71837_REG_OTPVER = 0xFF,
96 BD71837_MAX_REGISTER = 0x100,
97}; 112};
98 113
99#define REGLOCK_PWRSEQ 0x1 114#define REGLOCK_PWRSEQ 0x1
100#define REGLOCK_VREG 0x10 115#define REGLOCK_VREG 0x10
101 116
102/* Generic BUCK control masks */ 117/* Generic BUCK control masks */
103#define BD71837_BUCK_SEL 0x02 118#define BD718XX_BUCK_SEL 0x02
104#define BD71837_BUCK_EN 0x01 119#define BD718XX_BUCK_EN 0x01
105#define BD71837_BUCK_RUN_ON 0x04 120#define BD718XX_BUCK_RUN_ON 0x04
106 121
107/* Generic LDO masks */ 122/* Generic LDO masks */
108#define BD71837_LDO_SEL 0x80 123#define BD718XX_LDO_SEL 0x80
109#define BD71837_LDO_EN 0x40 124#define BD718XX_LDO_EN 0x40
110 125
111/* BD71837 BUCK ramp rate CTRL reg bits */ 126/* BD71837 BUCK ramp rate CTRL reg bits */
112#define BUCK_RAMPRATE_MASK 0xC0 127#define BUCK_RAMPRATE_MASK 0xC0
@@ -115,51 +130,64 @@ enum {
115#define BUCK_RAMPRATE_2P50MV 0x2 130#define BUCK_RAMPRATE_2P50MV 0x2
116#define BUCK_RAMPRATE_1P25MV 0x3 131#define BUCK_RAMPRATE_1P25MV 0x3
117 132
118/* BD71837_REG_BUCK1_VOLT_RUN bits */ 133#define DVS_BUCK_RUN_MASK 0x3F
119#define BUCK1_RUN_MASK 0x3F 134#define DVS_BUCK_SUSP_MASK 0x3F
120#define BUCK1_RUN_DEFAULT 0x14 135#define DVS_BUCK_IDLE_MASK 0x3F
121 136
122/* BD71837_REG_BUCK1_VOLT_SUSP bits */ 137#define BD718XX_1ST_NODVS_BUCK_MASK 0x07
123#define BUCK1_SUSP_MASK 0x3F 138#define BD718XX_3RD_NODVS_BUCK_MASK 0x07
124#define BUCK1_SUSP_DEFAULT 0x14 139#define BD718XX_4TH_NODVS_BUCK_MASK 0x3F
125 140
126/* BD71837_REG_BUCK1_VOLT_IDLE bits */ 141#define BD71847_BUCK3_MASK 0x07
127#define BUCK1_IDLE_MASK 0x3F 142#define BD71847_BUCK3_RANGE_MASK 0xC0
128#define BUCK1_IDLE_DEFAULT 0x14 143#define BD71847_BUCK4_MASK 0x03
129 144#define BD71847_BUCK4_RANGE_MASK 0x40
130/* BD71837_REG_BUCK2_VOLT_RUN bits */ 145
131#define BUCK2_RUN_MASK 0x3F 146#define BD71837_BUCK5_MASK 0x07
132#define BUCK2_RUN_DEFAULT 0x1E 147#define BD71837_BUCK5_RANGE_MASK 0x80
133 148#define BD71837_BUCK6_MASK 0x03
134/* BD71837_REG_BUCK2_VOLT_IDLE bits */ 149
135#define BUCK2_IDLE_MASK 0x3F 150#define BD718XX_LDO1_MASK 0x03
136#define BUCK2_IDLE_DEFAULT 0x14 151#define BD718XX_LDO1_RANGE_MASK 0x20
137 152#define BD718XX_LDO2_MASK 0x20
138/* BD71837_REG_BUCK3_VOLT_RUN bits */ 153#define BD718XX_LDO3_MASK 0x0F
139#define BUCK3_RUN_MASK 0x3F 154#define BD718XX_LDO4_MASK 0x0F
140#define BUCK3_RUN_DEFAULT 0x1E 155#define BD718XX_LDO6_MASK 0x0F
141 156
142/* BD71837_REG_BUCK4_VOLT_RUN bits */ 157#define BD71837_LDO5_MASK 0x0F
143#define BUCK4_RUN_MASK 0x3F 158#define BD71847_LDO5_MASK 0x0F
144#define BUCK4_RUN_DEFAULT 0x1E 159#define BD71847_LDO5_RANGE_MASK 0x20
145 160
146/* BD71837_REG_BUCK5_VOLT bits */ 161#define BD71837_LDO7_MASK 0x0F
147#define BUCK5_MASK 0x07 162
148#define BUCK5_DEFAULT 0x02 163/* BD718XX Voltage monitoring masks */
149 164#define BD718XX_BUCK1_VRMON80 0x1
150/* BD71837_REG_BUCK6_VOLT bits */ 165#define BD718XX_BUCK1_VRMON130 0x2
151#define BUCK6_MASK 0x03 166#define BD718XX_BUCK2_VRMON80 0x4
152#define BUCK6_DEFAULT 0x03 167#define BD718XX_BUCK2_VRMON130 0x8
153 168#define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1
154/* BD71837_REG_BUCK7_VOLT bits */ 169#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
155#define BUCK7_MASK 0x07 170#define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4
156#define BUCK7_DEFAULT 0x03 171#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
157 172#define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10
158/* BD71837_REG_BUCK8_VOLT bits */ 173#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
159#define BUCK8_MASK 0x3F 174#define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40
160#define BUCK8_DEFAULT 0x1E 175#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
161 176#define BD718XX_LDO1_VRMON80 0x1
162/* BD71837_REG_IRQ bits */ 177#define BD718XX_LDO2_VRMON80 0x2
178#define BD718XX_LDO3_VRMON80 0x4
179#define BD718XX_LDO4_VRMON80 0x8
180#define BD718XX_LDO5_VRMON80 0x10
181#define BD718XX_LDO6_VRMON80 0x20
182
183/* BD71837 specific voltage monitoring masks */
184#define BD71837_BUCK3_VRMON80 0x10
185#define BD71837_BUCK3_VRMON130 0x20
186#define BD71837_BUCK4_VRMON80 0x40
187#define BD71837_BUCK4_VRMON130 0x80
188#define BD71837_LDO7_VRMON80 0x40
189
190/* BD718XX_REG_IRQ bits */
163#define IRQ_SWRST 0x40 191#define IRQ_SWRST 0x40
164#define IRQ_PWRON_S 0x20 192#define IRQ_PWRON_S 0x20
165#define IRQ_PWRON_L 0x10 193#define IRQ_PWRON_L 0x10
@@ -168,52 +196,31 @@ enum {
168#define IRQ_ON_REQ 0x02 196#define IRQ_ON_REQ 0x02
169#define IRQ_STBY_REQ 0x01 197#define IRQ_STBY_REQ 0x01
170 198
171/* BD71837_REG_OUT32K bits */ 199/* BD718XX_REG_OUT32K bits */
172#define BD71837_OUT32K_EN 0x01 200#define BD718XX_OUT32K_EN 0x01
173 201
174/* BD71837 gated clock rate */ 202/* BD7183XX gated clock rate */
175#define BD71837_CLK_RATE 32768 203#define BD718XX_CLK_RATE 32768
176 204
177/* ROHM BD71837 irqs */ 205/* ROHM BD718XX irqs */
178enum { 206enum {
179 BD71837_INT_STBY_REQ, 207 BD718XX_INT_STBY_REQ,
180 BD71837_INT_ON_REQ, 208 BD718XX_INT_ON_REQ,
181 BD71837_INT_WDOG, 209 BD718XX_INT_WDOG,
182 BD71837_INT_PWRBTN, 210 BD718XX_INT_PWRBTN,
183 BD71837_INT_PWRBTN_L, 211 BD718XX_INT_PWRBTN_L,
184 BD71837_INT_PWRBTN_S, 212 BD718XX_INT_PWRBTN_S,
185 BD71837_INT_SWRST 213 BD718XX_INT_SWRST
186}; 214};
187 215
188/* ROHM BD71837 interrupt masks */ 216/* ROHM BD718XX interrupt masks */
189#define BD71837_INT_SWRST_MASK 0x40 217#define BD718XX_INT_SWRST_MASK 0x40
190#define BD71837_INT_PWRBTN_S_MASK 0x20 218#define BD718XX_INT_PWRBTN_S_MASK 0x20
191#define BD71837_INT_PWRBTN_L_MASK 0x10 219#define BD718XX_INT_PWRBTN_L_MASK 0x10
192#define BD71837_INT_PWRBTN_MASK 0x8 220#define BD718XX_INT_PWRBTN_MASK 0x8
193#define BD71837_INT_WDOG_MASK 0x4 221#define BD718XX_INT_WDOG_MASK 0x4
194#define BD71837_INT_ON_REQ_MASK 0x2 222#define BD718XX_INT_ON_REQ_MASK 0x2
195#define BD71837_INT_STBY_REQ_MASK 0x1 223#define BD718XX_INT_STBY_REQ_MASK 0x1
196
197/* BD71837_REG_LDO1_VOLT bits */
198#define LDO1_MASK 0x03
199
200/* BD71837_REG_LDO1_VOLT bits */
201#define LDO2_MASK 0x20
202
203/* BD71837_REG_LDO3_VOLT bits */
204#define LDO3_MASK 0x0F
205
206/* BD71837_REG_LDO4_VOLT bits */
207#define LDO4_MASK 0x0F
208
209/* BD71837_REG_LDO5_VOLT bits */
210#define LDO5_MASK 0x0F
211
212/* BD71837_REG_LDO6_VOLT bits */
213#define LDO6_MASK 0x0F
214
215/* BD71837_REG_LDO7_VOLT bits */
216#define LDO7_MASK 0x0F
217 224
218/* Register write induced reset settings */ 225/* Register write induced reset settings */
219 226
@@ -223,13 +230,13 @@ enum {
223 * write 1 to it we will trigger the action. So always write 0 to it when 230 * write 1 to it we will trigger the action. So always write 0 to it when
224 * changning SWRESET action - no matter what we read from it. 231 * changning SWRESET action - no matter what we read from it.
225 */ 232 */
226#define BD71837_SWRESET_TYPE_MASK 7 233#define BD718XX_SWRESET_TYPE_MASK 7
227#define BD71837_SWRESET_TYPE_DISABLED 0 234#define BD718XX_SWRESET_TYPE_DISABLED 0
228#define BD71837_SWRESET_TYPE_COLD 4 235#define BD718XX_SWRESET_TYPE_COLD 4
229#define BD71837_SWRESET_TYPE_WARM 6 236#define BD718XX_SWRESET_TYPE_WARM 6
230 237
231#define BD71837_SWRESET_RESET_MASK 1 238#define BD718XX_SWRESET_RESET_MASK 1
232#define BD71837_SWRESET_RESET 1 239#define BD718XX_SWRESET_RESET 1
233 240
234/* Poweroff state transition conditions */ 241/* Poweroff state transition conditions */
235 242
@@ -314,10 +321,10 @@ enum {
314 BD718XX_PWRBTN_LONG_PRESS_15S 321 BD718XX_PWRBTN_LONG_PRESS_15S
315}; 322};
316 323
317struct bd71837_pmic; 324struct bd718xx_clk;
318struct bd71837_clk;
319 325
320struct bd71837 { 326struct bd718xx {
327 unsigned int chip_type;
321 struct device *dev; 328 struct device *dev;
322 struct regmap *regmap; 329 struct regmap *regmap;
323 unsigned long int id; 330 unsigned long int id;
@@ -325,8 +332,7 @@ struct bd71837 {
325 int chip_irq; 332 int chip_irq;
326 struct regmap_irq_chip_data *irq_data; 333 struct regmap_irq_chip_data *irq_data;
327 334
328 struct bd71837_pmic *pmic; 335 struct bd718xx_clk *clk;
329 struct bd71837_clk *clk;
330}; 336};
331 337
332#endif /* __LINUX_MFD_BD71837_H__ */ 338#endif /* __LINUX_MFD_BD718XX_H__ */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 28f4ae76271d..3ca17eb89aa2 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -1,14 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * core.h 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd
3 *
4 * copyright (c) 2011 Samsung Electronics Co., Ltd
5 * http://www.samsung.com 4 * http://www.samsung.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */ 5 */
13 6
14#ifndef __LINUX_MFD_SEC_CORE_H 7#ifndef __LINUX_MFD_SEC_CORE_H
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 667aa40486dd..6cfe4201a106 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -1,13 +1,7 @@
1/* irq.h 1/* SPDX-License-Identifier: GPL-2.0+ */
2 * 2/*
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd
4 * http://www.samsung.com 4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */ 5 */
12 6
13#ifndef __LINUX_MFD_SEC_IRQ_H 7#ifndef __LINUX_MFD_SEC_IRQ_H
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 9ed2871ea335..0204decfc9aa 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -1,18 +1,7 @@
1/* rtc.h 1/* SPDX-License-Identifier: GPL-2.0+ */
2 * 2/*
3 * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd 3 * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
4 * http://www.samsung.com 4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */ 5 */
17 6
18#ifndef __LINUX_MFD_SEC_RTC_H 7#ifndef __LINUX_MFD_SEC_RTC_H
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
index 2766108bca2f..0762e9de6f2f 100644
--- a/include/linux/mfd/samsung/s2mpa01.h
+++ b/include/linux/mfd/samsung/s2mpa01.h
@@ -1,12 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd 3 * Copyright (c) 2013 Samsung Electronics Co., Ltd
3 * http://www.samsung.com 4 * http://www.samsung.com
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 */ 5 */
11 6
12#ifndef __LINUX_MFD_S2MPA01_H 7#ifndef __LINUX_MFD_S2MPA01_H
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index 2c14eeca46f0..6e7668a389a1 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -1,14 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * s2mps11.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd
5 * http://www.samsung.com 4 * http://www.samsung.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */ 5 */
13 6
14#ifndef __LINUX_MFD_S2MPS11_H 7#ifndef __LINUX_MFD_S2MPS11_H
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
index 239e977ba45d..b96d8a11dcd3 100644
--- a/include/linux/mfd/samsung/s2mps13.h
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -1,19 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * s2mps13.h
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd 3 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 * http://www.samsung.com 4 * http://www.samsung.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */ 5 */
18 6
19#ifndef __LINUX_MFD_S2MPS13_H 7#ifndef __LINUX_MFD_S2MPS13_H
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
index c92f4782afb5..f4afa0cfc24f 100644
--- a/include/linux/mfd/samsung/s2mps14.h
+++ b/include/linux/mfd/samsung/s2mps14.h
@@ -1,19 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * s2mps14.h
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd 3 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 * http://www.samsung.com 4 * http://www.samsung.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */ 5 */
18 6
19#ifndef __LINUX_MFD_S2MPS14_H 7#ifndef __LINUX_MFD_S2MPS14_H
diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h
index 36d35287c3c0..eac6bf74b72e 100644
--- a/include/linux/mfd/samsung/s2mps15.h
+++ b/include/linux/mfd/samsung/s2mps15.h
@@ -1,16 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Copyright (c) 2015 Samsung Electronics Co., Ltd 3 * Copyright (c) 2015 Samsung Electronics Co., Ltd
3 * http://www.samsung.com 4 * http://www.samsung.com
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */ 5 */
15 6
16#ifndef __LINUX_MFD_S2MPS15_H 7#ifndef __LINUX_MFD_S2MPS15_H
diff --git a/include/linux/mfd/samsung/s2mpu02.h b/include/linux/mfd/samsung/s2mpu02.h
index 47ae9bc583a7..76cd5380cf0f 100644
--- a/include/linux/mfd/samsung/s2mpu02.h
+++ b/include/linux/mfd/samsung/s2mpu02.h
@@ -1,19 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * s2mpu02.h
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd 3 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 * http://www.samsung.com 4 * http://www.samsung.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */ 5 */
18 6
19#ifndef __LINUX_MFD_S2MPU02_H 7#ifndef __LINUX_MFD_S2MPU02_H
diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h
index e025418e5589..c534f086ca16 100644
--- a/include/linux/mfd/samsung/s5m8763.h
+++ b/include/linux/mfd/samsung/s5m8763.h
@@ -1,13 +1,7 @@
1/* s5m8763.h 1/* SPDX-License-Identifier: GPL-2.0+ */
2 * 2/*
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd
4 * http://www.samsung.com 4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */ 5 */
12 6
13#ifndef __LINUX_MFD_S5M8763_H 7#ifndef __LINUX_MFD_S5M8763_H
diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h
index 243b58fec33d..704f8d80e96e 100644
--- a/include/linux/mfd/samsung/s5m8767.h
+++ b/include/linux/mfd/samsung/s5m8767.h
@@ -1,13 +1,7 @@
1/* s5m8767.h 1/* SPDX-License-Identifier: GPL-2.0+ */
2 * 2/*
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd
4 * http://www.samsung.com 4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */ 5 */
12 6
13#ifndef __LINUX_MFD_S5M8767_H 7#ifndef __LINUX_MFD_S5M8767_H
diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h
index 09d5f30384e5..1ef51ed36be5 100644
--- a/include/linux/mfd/ti-lmu.h
+++ b/include/linux/mfd/ti-lmu.h
@@ -16,6 +16,7 @@
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/notifier.h> 17#include <linux/notifier.h>
18#include <linux/regmap.h> 18#include <linux/regmap.h>
19#include <linux/gpio/consumer.h>
19 20
20/* Notifier event */ 21/* Notifier event */
21#define LMU_EVENT_MONITOR_DONE 0x01 22#define LMU_EVENT_MONITOR_DONE 0x01
@@ -81,7 +82,7 @@ enum lm363x_regulator_id {
81struct ti_lmu { 82struct ti_lmu {
82 struct device *dev; 83 struct device *dev;
83 struct regmap *regmap; 84 struct regmap *regmap;
84 int en_gpio; 85 struct gpio_desc *en_gpio;
85 struct blocking_notifier_head notifier; 86 struct blocking_notifier_head notifier;
86}; 87};
87#endif 88#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 77866214ab51..1e70060c92ce 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -62,13 +62,6 @@
62#define TMIO_MMC_USE_GPIO_CD BIT(5) 62#define TMIO_MMC_USE_GPIO_CD BIT(5)
63 63
64/* 64/*
65 * Some controllers doesn't have over 0x100 register.
66 * it is used to checking accessibility of
67 * CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL
68 */
69#define TMIO_MMC_HAVE_HIGH_REG BIT(6)
70
71/*
72 * Some controllers have CMD12 automatically 65 * Some controllers have CMD12 automatically
73 * issue/non-issue register 66 * issue/non-issue register
74 */ 67 */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 472fa4d4ea62..7361cd3fddc1 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -31,6 +31,7 @@
31#define PHY_ID_KSZ8081 0x00221560 31#define PHY_ID_KSZ8081 0x00221560
32#define PHY_ID_KSZ8061 0x00221570 32#define PHY_ID_KSZ8061 0x00221570
33#define PHY_ID_KSZ9031 0x00221620 33#define PHY_ID_KSZ9031 0x00221620
34#define PHY_ID_KSZ9131 0x00221640
34 35
35#define PHY_ID_KSZ886X 0x00221430 36#define PHY_ID_KSZ886X 0x00221430
36#define PHY_ID_KSZ8863 0x00221435 37#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 55000ee5c6ad..2da85b02e1c0 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -10,6 +10,7 @@
10 10
11 11
12#include <linux/if.h> 12#include <linux/if.h>
13#include <linux/linkmode.h>
13#include <uapi/linux/mii.h> 14#include <uapi/linux/mii.h>
14 15
15struct ethtool_cmd; 16struct ethtool_cmd;
@@ -132,6 +133,34 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
132} 133}
133 134
134/** 135/**
136 * linkmode_adv_to_mii_adv_t
137 * @advertising: the linkmode advertisement settings
138 *
139 * A small helper function that translates linkmode advertisement
140 * settings to phy autonegotiation advertisements for the
141 * MII_ADVERTISE register.
142 */
143static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising)
144{
145 u32 result = 0;
146
147 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising))
148 result |= ADVERTISE_10HALF;
149 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising))
150 result |= ADVERTISE_10FULL;
151 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising))
152 result |= ADVERTISE_100HALF;
153 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising))
154 result |= ADVERTISE_100FULL;
155 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
156 result |= ADVERTISE_PAUSE_CAP;
157 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
158 result |= ADVERTISE_PAUSE_ASYM;
159
160 return result;
161}
162
163/**
135 * mii_adv_to_ethtool_adv_t 164 * mii_adv_to_ethtool_adv_t
136 * @adv: value of the MII_ADVERTISE register 165 * @adv: value of the MII_ADVERTISE register
137 * 166 *
@@ -179,6 +208,28 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
179} 208}
180 209
181/** 210/**
211 * linkmode_adv_to_mii_ctrl1000_t
212 * advertising: the linkmode advertisement settings
213 *
214 * A small helper function that translates linkmode advertisement
215 * settings to phy autonegotiation advertisements for the
216 * MII_CTRL1000 register when in 1000T mode.
217 */
218static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising)
219{
220 u32 result = 0;
221
222 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
223 advertising))
224 result |= ADVERTISE_1000HALF;
225 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
226 advertising))
227 result |= ADVERTISE_1000FULL;
228
229 return result;
230}
231
232/**
182 * mii_ctrl1000_to_ethtool_adv_t 233 * mii_ctrl1000_to_ethtool_adv_t
183 * @adv: value of the MII_CTRL1000 register 234 * @adv: value of the MII_CTRL1000 register
184 * 235 *
@@ -303,6 +354,56 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
303} 354}
304 355
305/** 356/**
357 * mii_adv_to_linkmode_adv_t
358 * @advertising:pointer to destination link mode.
359 * @adv: value of the MII_ADVERTISE register
360 *
361 * A small helper function that translates MII_ADVERTISE bits
362 * to linkmode advertisement settings.
363 */
364static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising,
365 u32 adv)
366{
367 linkmode_zero(advertising);
368
369 if (adv & ADVERTISE_10HALF)
370 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
371 advertising);
372 if (adv & ADVERTISE_10FULL)
373 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
374 advertising);
375 if (adv & ADVERTISE_100HALF)
376 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
377 advertising);
378 if (adv & ADVERTISE_100FULL)
379 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
380 advertising);
381 if (adv & ADVERTISE_PAUSE_CAP)
382 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
383 if (adv & ADVERTISE_PAUSE_ASYM)
384 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
385}
386
387/**
388 * ethtool_adv_to_lcl_adv_t
389 * @advertising:pointer to ethtool advertising
390 *
391 * A small helper function that translates ethtool advertising to LVL
392 * pause capabilities.
393 */
394static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising)
395{
396 u32 lcl_adv = 0;
397
398 if (advertising & ADVERTISED_Pause)
399 lcl_adv |= ADVERTISE_PAUSE_CAP;
400 if (advertising & ADVERTISED_Asym_Pause)
401 lcl_adv |= ADVERTISE_PAUSE_ASYM;
402
403 return lcl_adv;
404}
405
406/**
306 * mii_advertise_flowctrl - get flow control advertisement flags 407 * mii_advertise_flowctrl - get flow control advertisement flags
307 * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) 408 * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
308 */ 409 */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 0ef6138eca49..31a750570c38 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -61,6 +61,7 @@ struct mlx5_core_cq {
61 int reset_notify_added; 61 int reset_notify_added;
62 struct list_head reset_notify; 62 struct list_head reset_notify;
63 struct mlx5_eq *eq; 63 struct mlx5_eq *eq;
64 u16 uid;
64}; 65};
65 66
66 67
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 11fa4e66afc5..b4c0457fbebd 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -504,6 +504,10 @@ struct health_buffer {
504 __be16 ext_synd; 504 __be16 ext_synd;
505}; 505};
506 506
507enum mlx5_cmd_addr_l_sz_offset {
508 MLX5_NIC_IFC_OFFSET = 8,
509};
510
507struct mlx5_init_seg { 511struct mlx5_init_seg {
508 __be32 fw_rev; 512 __be32 fw_rev;
509 __be32 cmdif_rev_fw_sub; 513 __be32 cmdif_rev_fw_sub;
@@ -1120,6 +1124,12 @@ enum mlx5_qcam_feature_groups {
1120#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ 1124#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1121 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) 1125 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1122 1126
1127#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
1128 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
1129
1130#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
1131 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
1132
1123#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ 1133#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
1124 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) 1134 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
1125 1135
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 7a452716de4b..aa5963b5d38e 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -97,14 +97,15 @@ enum {
97}; 97};
98 98
99enum { 99enum {
100 MLX5_ATOMIC_MODE_IB_COMP = 1 << 16, 100 MLX5_ATOMIC_MODE_OFFSET = 16,
101 MLX5_ATOMIC_MODE_CX = 2 << 16, 101 MLX5_ATOMIC_MODE_IB_COMP = 1,
102 MLX5_ATOMIC_MODE_8B = 3 << 16, 102 MLX5_ATOMIC_MODE_CX = 2,
103 MLX5_ATOMIC_MODE_16B = 4 << 16, 103 MLX5_ATOMIC_MODE_8B = 3,
104 MLX5_ATOMIC_MODE_32B = 5 << 16, 104 MLX5_ATOMIC_MODE_16B = 4,
105 MLX5_ATOMIC_MODE_64B = 6 << 16, 105 MLX5_ATOMIC_MODE_32B = 5,
106 MLX5_ATOMIC_MODE_128B = 7 << 16, 106 MLX5_ATOMIC_MODE_64B = 6,
107 MLX5_ATOMIC_MODE_256B = 8 << 16, 107 MLX5_ATOMIC_MODE_128B = 7,
108 MLX5_ATOMIC_MODE_256B = 8,
108}; 109};
109 110
110enum { 111enum {
@@ -133,6 +134,7 @@ enum {
133 MLX5_REG_PVLC = 0x500f, 134 MLX5_REG_PVLC = 0x500f,
134 MLX5_REG_PCMR = 0x5041, 135 MLX5_REG_PCMR = 0x5041,
135 MLX5_REG_PMLP = 0x5002, 136 MLX5_REG_PMLP = 0x5002,
137 MLX5_REG_PPLM = 0x5023,
136 MLX5_REG_PCAM = 0x507f, 138 MLX5_REG_PCAM = 0x507f,
137 MLX5_REG_NODE_DESC = 0x6001, 139 MLX5_REG_NODE_DESC = 0x6001,
138 MLX5_REG_HOST_ENDIANNESS = 0x7004, 140 MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -162,16 +164,11 @@ enum mlx5_dcbx_oper_mode {
162 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, 164 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
163}; 165};
164 166
165enum mlx5_dct_atomic_mode {
166 MLX5_ATOMIC_MODE_DCT_OFF = 20,
167 MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
168 MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
169 MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
170};
171
172enum { 167enum {
173 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, 168 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
174 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, 169 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
170 MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
171 MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
175}; 172};
176 173
177enum mlx5_page_fault_resume_flags { 174enum mlx5_page_fault_resume_flags {
@@ -360,10 +357,10 @@ struct mlx5_frag_buf {
360}; 357};
361 358
362struct mlx5_frag_buf_ctrl { 359struct mlx5_frag_buf_ctrl {
363 struct mlx5_frag_buf frag_buf; 360 struct mlx5_buf_list *frags;
364 u32 sz_m1; 361 u32 sz_m1;
365 u32 frag_sz_m1; 362 u16 frag_sz_m1;
366 u32 strides_offset; 363 u16 strides_offset;
367 u8 log_sz; 364 u8 log_sz;
368 u8 log_stride; 365 u8 log_stride;
369 u8 log_frag_strides; 366 u8 log_frag_strides;
@@ -477,6 +474,7 @@ struct mlx5_core_srq {
477 474
478 atomic_t refcount; 475 atomic_t refcount;
479 struct completion free; 476 struct completion free;
477 u16 uid;
480}; 478};
481 479
482struct mlx5_eq_table { 480struct mlx5_eq_table {
@@ -583,10 +581,11 @@ struct mlx5_irq_info {
583}; 581};
584 582
585struct mlx5_fc_stats { 583struct mlx5_fc_stats {
586 struct rb_root counters; 584 spinlock_t counters_idr_lock; /* protects counters_idr */
587 struct list_head addlist; 585 struct idr counters_idr;
588 /* protect addlist add/splice operations */ 586 struct list_head counters;
589 spinlock_t addlist_lock; 587 struct llist_head addlist;
588 struct llist_head dellist;
590 589
591 struct workqueue_struct *wq; 590 struct workqueue_struct *wq;
592 struct delayed_work work; 591 struct delayed_work work;
@@ -804,7 +803,7 @@ struct mlx5_pps {
804}; 803};
805 804
806struct mlx5_clock { 805struct mlx5_clock {
807 rwlock_t lock; 806 seqlock_t lock;
808 struct cyclecounter cycles; 807 struct cyclecounter cycles;
809 struct timecounter tc; 808 struct timecounter tc;
810 struct hwtstamp_config hwtstamp_config; 809 struct hwtstamp_config hwtstamp_config;
@@ -837,6 +836,7 @@ struct mlx5_core_dev {
837 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; 836 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
838 u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; 837 u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
839 } caps; 838 } caps;
839 u64 sys_image_guid;
840 phys_addr_t iseg_base; 840 phys_addr_t iseg_base;
841 struct mlx5_init_seg __iomem *iseg; 841 struct mlx5_init_seg __iomem *iseg;
842 enum mlx5_device_state state; 842 enum mlx5_device_state state;
@@ -994,10 +994,12 @@ static inline u32 mlx5_base_mkey(const u32 key)
994 return key & 0xffffff00u; 994 return key & 0xffffff00u;
995} 995}
996 996
997static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, 997static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
998 u32 strides_offset, 998 u8 log_stride, u8 log_sz,
999 u16 strides_offset,
999 struct mlx5_frag_buf_ctrl *fbc) 1000 struct mlx5_frag_buf_ctrl *fbc)
1000{ 1001{
1002 fbc->frags = frags;
1001 fbc->log_stride = log_stride; 1003 fbc->log_stride = log_stride;
1002 fbc->log_sz = log_sz; 1004 fbc->log_sz = log_sz;
1003 fbc->sz_m1 = (1 << fbc->log_sz) - 1; 1005 fbc->sz_m1 = (1 << fbc->log_sz) - 1;
@@ -1006,18 +1008,11 @@ static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
1006 fbc->strides_offset = strides_offset; 1008 fbc->strides_offset = strides_offset;
1007} 1009}
1008 1010
1009static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, 1011static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
1012 u8 log_stride, u8 log_sz,
1010 struct mlx5_frag_buf_ctrl *fbc) 1013 struct mlx5_frag_buf_ctrl *fbc)
1011{ 1014{
1012 mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc); 1015 mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
1013}
1014
1015static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
1016 void *cqc)
1017{
1018 mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
1019 MLX5_GET(cqc, cqc, log_cq_size),
1020 fbc);
1021} 1016}
1022 1017
1023static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, 1018static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
@@ -1028,8 +1023,15 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
1028 ix += fbc->strides_offset; 1023 ix += fbc->strides_offset;
1029 frag = ix >> fbc->log_frag_strides; 1024 frag = ix >> fbc->log_frag_strides;
1030 1025
1031 return fbc->frag_buf.frags[frag].buf + 1026 return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
1032 ((fbc->frag_sz_m1 & ix) << fbc->log_stride); 1027}
1028
1029static inline u32
1030mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
1031{
1032 u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
1033
1034 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
1033} 1035}
1034 1036
1035int mlx5_cmd_init(struct mlx5_core_dev *dev); 1037int mlx5_cmd_init(struct mlx5_core_dev *dev);
@@ -1052,7 +1054,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
1052void mlx5_health_cleanup(struct mlx5_core_dev *dev); 1054void mlx5_health_cleanup(struct mlx5_core_dev *dev);
1053int mlx5_health_init(struct mlx5_core_dev *dev); 1055int mlx5_health_init(struct mlx5_core_dev *dev);
1054void mlx5_start_health_poll(struct mlx5_core_dev *dev); 1056void mlx5_start_health_poll(struct mlx5_core_dev *dev);
1055void mlx5_stop_health_poll(struct mlx5_core_dev *dev); 1057void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
1056void mlx5_drain_health_wq(struct mlx5_core_dev *dev); 1058void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
1057void mlx5_trigger_health_work(struct mlx5_core_dev *dev); 1059void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
1058void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); 1060void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
@@ -1226,21 +1228,15 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1226struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); 1228struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1227void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); 1229void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1228 1230
1229#ifndef CONFIG_MLX5_CORE_IPOIB 1231#ifdef CONFIG_MLX5_CORE_IPOIB
1230static inline
1231struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1232 struct ib_device *ibdev,
1233 const char *name,
1234 void (*setup)(struct net_device *))
1235{
1236 return ERR_PTR(-EOPNOTSUPP);
1237}
1238#else
1239struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, 1232struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1240 struct ib_device *ibdev, 1233 struct ib_device *ibdev,
1241 const char *name, 1234 const char *name,
1242 void (*setup)(struct net_device *)); 1235 void (*setup)(struct net_device *));
1243#endif /* CONFIG_MLX5_CORE_IPOIB */ 1236#endif /* CONFIG_MLX5_CORE_IPOIB */
1237int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
1238 struct ib_device *device,
1239 struct rdma_netdev_alloc_params *params);
1244 1240
1245struct mlx5_profile { 1241struct mlx5_profile {
1246 u64 mask; 1242 u64 mask;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 804516e4f483..5660f07d3be0 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -45,7 +45,8 @@ enum {
45}; 45};
46 46
47enum { 47enum {
48 MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0), 48 MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
49 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
49}; 50};
50 51
51#define LEFTOVERS_RULE_NUM 2 52#define LEFTOVERS_RULE_NUM 2
@@ -91,7 +92,7 @@ struct mlx5_flow_destination {
91 u32 tir_num; 92 u32 tir_num;
92 u32 ft_num; 93 u32 ft_num;
93 struct mlx5_flow_table *ft; 94 struct mlx5_flow_table *ft;
94 struct mlx5_fc *counter; 95 u32 counter_id;
95 struct { 96 struct {
96 u16 num; 97 u16 num;
97 u16 vhca_id; 98 u16 vhca_id;
@@ -101,6 +102,8 @@ struct mlx5_flow_destination {
101}; 102};
102 103
103struct mlx5_flow_namespace * 104struct mlx5_flow_namespace *
105mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n);
106struct mlx5_flow_namespace *
104mlx5_get_flow_namespace(struct mlx5_core_dev *dev, 107mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
105 enum mlx5_flow_namespace_type type); 108 enum mlx5_flow_namespace_type type);
106struct mlx5_flow_namespace * 109struct mlx5_flow_namespace *
@@ -155,20 +158,28 @@ struct mlx5_fs_vlan {
155 158
156#define MLX5_FS_VLAN_DEPTH 2 159#define MLX5_FS_VLAN_DEPTH 2
157 160
161enum {
162 FLOW_ACT_HAS_TAG = BIT(0),
163 FLOW_ACT_NO_APPEND = BIT(1),
164};
165
158struct mlx5_flow_act { 166struct mlx5_flow_act {
159 u32 action; 167 u32 action;
160 bool has_flow_tag;
161 u32 flow_tag; 168 u32 flow_tag;
162 u32 encap_id; 169 u32 reformat_id;
163 u32 modify_id; 170 u32 modify_id;
164 uintptr_t esp_id; 171 uintptr_t esp_id;
172 u32 flags;
165 struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; 173 struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
166 struct ib_counters *counters; 174 struct ib_counters *counters;
167}; 175};
168 176
169#define MLX5_DECLARE_FLOW_ACT(name) \ 177#define MLX5_DECLARE_FLOW_ACT(name) \
170 struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ 178 struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
171 MLX5_FS_DEFAULT_FLOW_TAG, 0, 0} 179 .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, \
180 .reformat_id = 0, \
181 .modify_id = 0, \
182 .flags = 0, }
172 183
173/* Single destination per rule. 184/* Single destination per rule.
174 * Group ID is implied by the match criteria. 185 * Group ID is implied by the match criteria.
@@ -185,15 +196,30 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
185 struct mlx5_flow_destination *new_dest, 196 struct mlx5_flow_destination *new_dest,
186 struct mlx5_flow_destination *old_dest); 197 struct mlx5_flow_destination *old_dest);
187 198
188struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
189struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); 199struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
190void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); 200void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
191void mlx5_fc_query_cached(struct mlx5_fc *counter, 201void mlx5_fc_query_cached(struct mlx5_fc *counter,
192 u64 *bytes, u64 *packets, u64 *lastuse); 202 u64 *bytes, u64 *packets, u64 *lastuse);
193int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, 203int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
194 u64 *packets, u64 *bytes); 204 u64 *packets, u64 *bytes);
205u32 mlx5_fc_id(struct mlx5_fc *counter);
195 206
196int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); 207int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
197int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); 208int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
198 209
210int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
211 u8 namespace, u8 num_actions,
212 void *modify_actions, u32 *modify_header_id);
213void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
214 u32 modify_header_id);
215
216int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
217 int reformat_type,
218 size_t size,
219 void *reformat_data,
220 enum mlx5_flow_namespace_type namespace,
221 u32 *packet_reformat_id);
222void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
223 u32 packet_reformat_id);
224
199#endif 225#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f043d65b9bac..dbff9ff28f2c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -243,8 +243,8 @@ enum {
243 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, 243 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
244 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, 244 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
245 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, 245 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
246 MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d, 246 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
247 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, 247 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
248 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, 248 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
249 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, 249 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
250 MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, 250 MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
@@ -336,7 +336,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
336 u8 modify_root[0x1]; 336 u8 modify_root[0x1];
337 u8 identified_miss_table_mode[0x1]; 337 u8 identified_miss_table_mode[0x1];
338 u8 flow_table_modify[0x1]; 338 u8 flow_table_modify[0x1];
339 u8 encap[0x1]; 339 u8 reformat[0x1];
340 u8 decap[0x1]; 340 u8 decap[0x1];
341 u8 reserved_at_9[0x1]; 341 u8 reserved_at_9[0x1];
342 u8 pop_vlan[0x1]; 342 u8 pop_vlan[0x1];
@@ -344,8 +344,12 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
344 u8 reserved_at_c[0x1]; 344 u8 reserved_at_c[0x1];
345 u8 pop_vlan_2[0x1]; 345 u8 pop_vlan_2[0x1];
346 u8 push_vlan_2[0x1]; 346 u8 push_vlan_2[0x1];
347 u8 reserved_at_f[0x11]; 347 u8 reformat_and_vlan_action[0x1];
348 348 u8 reserved_at_10[0x2];
349 u8 reformat_l3_tunnel_to_l2[0x1];
350 u8 reformat_l2_to_l3_tunnel[0x1];
351 u8 reformat_and_modify_action[0x1];
352 u8 reserved_at_14[0xb];
349 u8 reserved_at_20[0x2]; 353 u8 reserved_at_20[0x2];
350 u8 log_max_ft_size[0x6]; 354 u8 log_max_ft_size[0x6];
351 u8 log_max_modify_header_context[0x8]; 355 u8 log_max_modify_header_context[0x8];
@@ -554,7 +558,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
554 u8 nic_rx_multi_path_tirs[0x1]; 558 u8 nic_rx_multi_path_tirs[0x1];
555 u8 nic_rx_multi_path_tirs_fts[0x1]; 559 u8 nic_rx_multi_path_tirs_fts[0x1];
556 u8 allow_sniffer_and_nic_rx_shared_tir[0x1]; 560 u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
557 u8 reserved_at_3[0x1fd]; 561 u8 reserved_at_3[0x1d];
562 u8 encap_general_header[0x1];
563 u8 reserved_at_21[0xa];
564 u8 log_max_packet_reformat_context[0x5];
565 u8 reserved_at_30[0x6];
566 u8 max_encap_header_size[0xa];
567 u8 reserved_at_40[0x1c0];
558 568
559 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; 569 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
560 570
@@ -574,7 +584,9 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
574struct mlx5_ifc_flow_table_eswitch_cap_bits { 584struct mlx5_ifc_flow_table_eswitch_cap_bits {
575 u8 reserved_at_0[0x1c]; 585 u8 reserved_at_0[0x1c];
576 u8 fdb_multi_path_to_table[0x1]; 586 u8 fdb_multi_path_to_table[0x1];
577 u8 reserved_at_1d[0x1e3]; 587 u8 reserved_at_1d[0x1];
588 u8 multi_fdb_encap[0x1];
589 u8 reserved_at_1e[0x1e1];
578 590
579 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; 591 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
580 592
@@ -599,7 +611,7 @@ struct mlx5_ifc_e_switch_cap_bits {
599 u8 vxlan_encap_decap[0x1]; 611 u8 vxlan_encap_decap[0x1];
600 u8 nvgre_encap_decap[0x1]; 612 u8 nvgre_encap_decap[0x1];
601 u8 reserved_at_22[0x9]; 613 u8 reserved_at_22[0x9];
602 u8 log_max_encap_headers[0x5]; 614 u8 log_max_packet_reformat_context[0x5];
603 u8 reserved_2b[0x6]; 615 u8 reserved_2b[0x6];
604 u8 max_encap_header_size[0xa]; 616 u8 max_encap_header_size[0xa];
605 617
@@ -896,7 +908,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
896 u8 log_max_mkey[0x6]; 908 u8 log_max_mkey[0x6];
897 u8 reserved_at_f0[0x8]; 909 u8 reserved_at_f0[0x8];
898 u8 dump_fill_mkey[0x1]; 910 u8 dump_fill_mkey[0x1];
899 u8 reserved_at_f9[0x3]; 911 u8 reserved_at_f9[0x2];
912 u8 fast_teardown[0x1];
900 u8 log_max_eq[0x4]; 913 u8 log_max_eq[0x4];
901 914
902 u8 max_indirection[0x8]; 915 u8 max_indirection[0x8];
@@ -995,7 +1008,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
995 u8 umr_modify_atomic_disabled[0x1]; 1008 u8 umr_modify_atomic_disabled[0x1];
996 u8 umr_indirect_mkey_disabled[0x1]; 1009 u8 umr_indirect_mkey_disabled[0x1];
997 u8 umr_fence[0x2]; 1010 u8 umr_fence[0x2];
998 u8 reserved_at_20c[0x3]; 1011 u8 dc_req_scat_data_cqe[0x1];
1012 u8 reserved_at_20d[0x2];
999 u8 drain_sigerr[0x1]; 1013 u8 drain_sigerr[0x1];
1000 u8 cmdif_checksum[0x2]; 1014 u8 cmdif_checksum[0x2];
1001 u8 sigerr_cqe[0x1]; 1015 u8 sigerr_cqe[0x1];
@@ -1280,7 +1294,9 @@ struct mlx5_ifc_wq_bits {
1280 u8 reserved_at_118[0x3]; 1294 u8 reserved_at_118[0x3];
1281 u8 log_wq_sz[0x5]; 1295 u8 log_wq_sz[0x5];
1282 1296
1283 u8 reserved_at_120[0x3]; 1297 u8 dbr_umem_valid[0x1];
1298 u8 wq_umem_valid[0x1];
1299 u8 reserved_at_122[0x1];
1284 u8 log_hairpin_num_packets[0x5]; 1300 u8 log_hairpin_num_packets[0x5];
1285 u8 reserved_at_128[0x3]; 1301 u8 reserved_at_128[0x3];
1286 u8 log_hairpin_data_sz[0x5]; 1302 u8 log_hairpin_data_sz[0x5];
@@ -2354,7 +2370,10 @@ struct mlx5_ifc_qpc_bits {
2354 2370
2355 u8 dc_access_key[0x40]; 2371 u8 dc_access_key[0x40];
2356 2372
2357 u8 reserved_at_680[0xc0]; 2373 u8 reserved_at_680[0x3];
2374 u8 dbr_umem_valid[0x1];
2375
2376 u8 reserved_at_684[0xbc];
2358}; 2377};
2359 2378
2360struct mlx5_ifc_roce_addr_layout_bits { 2379struct mlx5_ifc_roce_addr_layout_bits {
@@ -2394,7 +2413,7 @@ enum {
2394 MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, 2413 MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
2395 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, 2414 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
2396 MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, 2415 MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
2397 MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10, 2416 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10,
2398 MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, 2417 MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
2399 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, 2418 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
2400 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, 2419 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
@@ -2427,7 +2446,7 @@ struct mlx5_ifc_flow_context_bits {
2427 u8 reserved_at_a0[0x8]; 2446 u8 reserved_at_a0[0x8];
2428 u8 flow_counter_list_size[0x18]; 2447 u8 flow_counter_list_size[0x18];
2429 2448
2430 u8 encap_id[0x20]; 2449 u8 packet_reformat_id[0x20];
2431 2450
2432 u8 modify_header_id[0x20]; 2451 u8 modify_header_id[0x20];
2433 2452
@@ -2454,7 +2473,7 @@ struct mlx5_ifc_xrc_srqc_bits {
2454 2473
2455 u8 wq_signature[0x1]; 2474 u8 wq_signature[0x1];
2456 u8 cont_srq[0x1]; 2475 u8 cont_srq[0x1];
2457 u8 reserved_at_22[0x1]; 2476 u8 dbr_umem_valid[0x1];
2458 u8 rlky[0x1]; 2477 u8 rlky[0x1];
2459 u8 basic_cyclic_rcv_wqe[0x1]; 2478 u8 basic_cyclic_rcv_wqe[0x1];
2460 u8 log_rq_stride[0x3]; 2479 u8 log_rq_stride[0x3];
@@ -2549,8 +2568,8 @@ enum {
2549}; 2568};
2550 2569
2551enum { 2570enum {
2552 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1, 2571 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1,
2553 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2, 2572 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2,
2554}; 2573};
2555 2574
2556struct mlx5_ifc_tirc_bits { 2575struct mlx5_ifc_tirc_bits {
@@ -3118,7 +3137,9 @@ enum {
3118 3137
3119struct mlx5_ifc_cqc_bits { 3138struct mlx5_ifc_cqc_bits {
3120 u8 status[0x4]; 3139 u8 status[0x4];
3121 u8 reserved_at_4[0x4]; 3140 u8 reserved_at_4[0x2];
3141 u8 dbr_umem_valid[0x1];
3142 u8 reserved_at_7[0x1];
3122 u8 cqe_sz[0x3]; 3143 u8 cqe_sz[0x3];
3123 u8 cc[0x1]; 3144 u8 cc[0x1];
3124 u8 reserved_at_c[0x1]; 3145 u8 reserved_at_c[0x1];
@@ -3352,12 +3373,13 @@ struct mlx5_ifc_teardown_hca_out_bits {
3352 3373
3353 u8 reserved_at_40[0x3f]; 3374 u8 reserved_at_40[0x3f];
3354 3375
3355 u8 force_state[0x1]; 3376 u8 state[0x1];
3356}; 3377};
3357 3378
3358enum { 3379enum {
3359 MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, 3380 MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
3360 MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1, 3381 MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1,
3382 MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2,
3361}; 3383};
3362 3384
3363struct mlx5_ifc_teardown_hca_in_bits { 3385struct mlx5_ifc_teardown_hca_in_bits {
@@ -3384,7 +3406,7 @@ struct mlx5_ifc_sqerr2rts_qp_out_bits {
3384 3406
3385struct mlx5_ifc_sqerr2rts_qp_in_bits { 3407struct mlx5_ifc_sqerr2rts_qp_in_bits {
3386 u8 opcode[0x10]; 3408 u8 opcode[0x10];
3387 u8 reserved_at_10[0x10]; 3409 u8 uid[0x10];
3388 3410
3389 u8 reserved_at_20[0x10]; 3411 u8 reserved_at_20[0x10];
3390 u8 op_mod[0x10]; 3412 u8 op_mod[0x10];
@@ -3414,7 +3436,7 @@ struct mlx5_ifc_sqd2rts_qp_out_bits {
3414 3436
3415struct mlx5_ifc_sqd2rts_qp_in_bits { 3437struct mlx5_ifc_sqd2rts_qp_in_bits {
3416 u8 opcode[0x10]; 3438 u8 opcode[0x10];
3417 u8 reserved_at_10[0x10]; 3439 u8 uid[0x10];
3418 3440
3419 u8 reserved_at_20[0x10]; 3441 u8 reserved_at_20[0x10];
3420 u8 op_mod[0x10]; 3442 u8 op_mod[0x10];
@@ -3619,7 +3641,7 @@ struct mlx5_ifc_rts2rts_qp_out_bits {
3619 3641
3620struct mlx5_ifc_rts2rts_qp_in_bits { 3642struct mlx5_ifc_rts2rts_qp_in_bits {
3621 u8 opcode[0x10]; 3643 u8 opcode[0x10];
3622 u8 reserved_at_10[0x10]; 3644 u8 uid[0x10];
3623 3645
3624 u8 reserved_at_20[0x10]; 3646 u8 reserved_at_20[0x10];
3625 u8 op_mod[0x10]; 3647 u8 op_mod[0x10];
@@ -3649,7 +3671,7 @@ struct mlx5_ifc_rtr2rts_qp_out_bits {
3649 3671
3650struct mlx5_ifc_rtr2rts_qp_in_bits { 3672struct mlx5_ifc_rtr2rts_qp_in_bits {
3651 u8 opcode[0x10]; 3673 u8 opcode[0x10];
3652 u8 reserved_at_10[0x10]; 3674 u8 uid[0x10];
3653 3675
3654 u8 reserved_at_20[0x10]; 3676 u8 reserved_at_20[0x10];
3655 u8 op_mod[0x10]; 3677 u8 op_mod[0x10];
@@ -3679,7 +3701,7 @@ struct mlx5_ifc_rst2init_qp_out_bits {
3679 3701
3680struct mlx5_ifc_rst2init_qp_in_bits { 3702struct mlx5_ifc_rst2init_qp_in_bits {
3681 u8 opcode[0x10]; 3703 u8 opcode[0x10];
3682 u8 reserved_at_10[0x10]; 3704 u8 uid[0x10];
3683 3705
3684 u8 reserved_at_20[0x10]; 3706 u8 reserved_at_20[0x10];
3685 u8 op_mod[0x10]; 3707 u8 op_mod[0x10];
@@ -4802,19 +4824,19 @@ struct mlx5_ifc_query_eq_in_bits {
4802 u8 reserved_at_60[0x20]; 4824 u8 reserved_at_60[0x20];
4803}; 4825};
4804 4826
4805struct mlx5_ifc_encap_header_in_bits { 4827struct mlx5_ifc_packet_reformat_context_in_bits {
4806 u8 reserved_at_0[0x5]; 4828 u8 reserved_at_0[0x5];
4807 u8 header_type[0x3]; 4829 u8 reformat_type[0x3];
4808 u8 reserved_at_8[0xe]; 4830 u8 reserved_at_8[0xe];
4809 u8 encap_header_size[0xa]; 4831 u8 reformat_data_size[0xa];
4810 4832
4811 u8 reserved_at_20[0x10]; 4833 u8 reserved_at_20[0x10];
4812 u8 encap_header[2][0x8]; 4834 u8 reformat_data[2][0x8];
4813 4835
4814 u8 more_encap_header[0][0x8]; 4836 u8 more_reformat_data[0][0x8];
4815}; 4837};
4816 4838
4817struct mlx5_ifc_query_encap_header_out_bits { 4839struct mlx5_ifc_query_packet_reformat_context_out_bits {
4818 u8 status[0x8]; 4840 u8 status[0x8];
4819 u8 reserved_at_8[0x18]; 4841 u8 reserved_at_8[0x18];
4820 4842
@@ -4822,33 +4844,41 @@ struct mlx5_ifc_query_encap_header_out_bits {
4822 4844
4823 u8 reserved_at_40[0xa0]; 4845 u8 reserved_at_40[0xa0];
4824 4846
4825 struct mlx5_ifc_encap_header_in_bits encap_header[0]; 4847 struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[0];
4826}; 4848};
4827 4849
4828struct mlx5_ifc_query_encap_header_in_bits { 4850struct mlx5_ifc_query_packet_reformat_context_in_bits {
4829 u8 opcode[0x10]; 4851 u8 opcode[0x10];
4830 u8 reserved_at_10[0x10]; 4852 u8 reserved_at_10[0x10];
4831 4853
4832 u8 reserved_at_20[0x10]; 4854 u8 reserved_at_20[0x10];
4833 u8 op_mod[0x10]; 4855 u8 op_mod[0x10];
4834 4856
4835 u8 encap_id[0x20]; 4857 u8 packet_reformat_id[0x20];
4836 4858
4837 u8 reserved_at_60[0xa0]; 4859 u8 reserved_at_60[0xa0];
4838}; 4860};
4839 4861
4840struct mlx5_ifc_alloc_encap_header_out_bits { 4862struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
4841 u8 status[0x8]; 4863 u8 status[0x8];
4842 u8 reserved_at_8[0x18]; 4864 u8 reserved_at_8[0x18];
4843 4865
4844 u8 syndrome[0x20]; 4866 u8 syndrome[0x20];
4845 4867
4846 u8 encap_id[0x20]; 4868 u8 packet_reformat_id[0x20];
4847 4869
4848 u8 reserved_at_60[0x20]; 4870 u8 reserved_at_60[0x20];
4849}; 4871};
4850 4872
4851struct mlx5_ifc_alloc_encap_header_in_bits { 4873enum {
4874 MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
4875 MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
4876 MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
4877 MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
4878 MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
4879};
4880
4881struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
4852 u8 opcode[0x10]; 4882 u8 opcode[0x10];
4853 u8 reserved_at_10[0x10]; 4883 u8 reserved_at_10[0x10];
4854 4884
@@ -4857,10 +4887,10 @@ struct mlx5_ifc_alloc_encap_header_in_bits {
4857 4887
4858 u8 reserved_at_40[0xa0]; 4888 u8 reserved_at_40[0xa0];
4859 4889
4860 struct mlx5_ifc_encap_header_in_bits encap_header; 4890 struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context;
4861}; 4891};
4862 4892
4863struct mlx5_ifc_dealloc_encap_header_out_bits { 4893struct mlx5_ifc_dealloc_packet_reformat_context_out_bits {
4864 u8 status[0x8]; 4894 u8 status[0x8];
4865 u8 reserved_at_8[0x18]; 4895 u8 reserved_at_8[0x18];
4866 4896
@@ -4869,14 +4899,14 @@ struct mlx5_ifc_dealloc_encap_header_out_bits {
4869 u8 reserved_at_40[0x40]; 4899 u8 reserved_at_40[0x40];
4870}; 4900};
4871 4901
4872struct mlx5_ifc_dealloc_encap_header_in_bits { 4902struct mlx5_ifc_dealloc_packet_reformat_context_in_bits {
4873 u8 opcode[0x10]; 4903 u8 opcode[0x10];
4874 u8 reserved_at_10[0x10]; 4904 u8 reserved_at_10[0x10];
4875 4905
4876 u8 reserved_20[0x10]; 4906 u8 reserved_20[0x10];
4877 u8 op_mod[0x10]; 4907 u8 op_mod[0x10];
4878 4908
4879 u8 encap_id[0x20]; 4909 u8 packet_reformat_id[0x20];
4880 4910
4881 u8 reserved_60[0x20]; 4911 u8 reserved_60[0x20];
4882}; 4912};
@@ -5174,7 +5204,7 @@ struct mlx5_ifc_qp_2rst_out_bits {
5174 5204
5175struct mlx5_ifc_qp_2rst_in_bits { 5205struct mlx5_ifc_qp_2rst_in_bits {
5176 u8 opcode[0x10]; 5206 u8 opcode[0x10];
5177 u8 reserved_at_10[0x10]; 5207 u8 uid[0x10];
5178 5208
5179 u8 reserved_at_20[0x10]; 5209 u8 reserved_at_20[0x10];
5180 u8 op_mod[0x10]; 5210 u8 op_mod[0x10];
@@ -5196,7 +5226,7 @@ struct mlx5_ifc_qp_2err_out_bits {
5196 5226
5197struct mlx5_ifc_qp_2err_in_bits { 5227struct mlx5_ifc_qp_2err_in_bits {
5198 u8 opcode[0x10]; 5228 u8 opcode[0x10];
5199 u8 reserved_at_10[0x10]; 5229 u8 uid[0x10];
5200 5230
5201 u8 reserved_at_20[0x10]; 5231 u8 reserved_at_20[0x10];
5202 u8 op_mod[0x10]; 5232 u8 op_mod[0x10];
@@ -5296,7 +5326,7 @@ struct mlx5_ifc_modify_tis_bitmask_bits {
5296 5326
5297struct mlx5_ifc_modify_tis_in_bits { 5327struct mlx5_ifc_modify_tis_in_bits {
5298 u8 opcode[0x10]; 5328 u8 opcode[0x10];
5299 u8 reserved_at_10[0x10]; 5329 u8 uid[0x10];
5300 5330
5301 u8 reserved_at_20[0x10]; 5331 u8 reserved_at_20[0x10];
5302 u8 op_mod[0x10]; 5332 u8 op_mod[0x10];
@@ -5335,7 +5365,7 @@ struct mlx5_ifc_modify_tir_out_bits {
5335 5365
5336struct mlx5_ifc_modify_tir_in_bits { 5366struct mlx5_ifc_modify_tir_in_bits {
5337 u8 opcode[0x10]; 5367 u8 opcode[0x10];
5338 u8 reserved_at_10[0x10]; 5368 u8 uid[0x10];
5339 5369
5340 u8 reserved_at_20[0x10]; 5370 u8 reserved_at_20[0x10];
5341 u8 op_mod[0x10]; 5371 u8 op_mod[0x10];
@@ -5363,7 +5393,7 @@ struct mlx5_ifc_modify_sq_out_bits {
5363 5393
5364struct mlx5_ifc_modify_sq_in_bits { 5394struct mlx5_ifc_modify_sq_in_bits {
5365 u8 opcode[0x10]; 5395 u8 opcode[0x10];
5366 u8 reserved_at_10[0x10]; 5396 u8 uid[0x10];
5367 5397
5368 u8 reserved_at_20[0x10]; 5398 u8 reserved_at_20[0x10];
5369 u8 op_mod[0x10]; 5399 u8 op_mod[0x10];
@@ -5436,7 +5466,7 @@ struct mlx5_ifc_rqt_bitmask_bits {
5436 5466
5437struct mlx5_ifc_modify_rqt_in_bits { 5467struct mlx5_ifc_modify_rqt_in_bits {
5438 u8 opcode[0x10]; 5468 u8 opcode[0x10];
5439 u8 reserved_at_10[0x10]; 5469 u8 uid[0x10];
5440 5470
5441 u8 reserved_at_20[0x10]; 5471 u8 reserved_at_20[0x10];
5442 u8 op_mod[0x10]; 5472 u8 op_mod[0x10];
@@ -5470,7 +5500,7 @@ enum {
5470 5500
5471struct mlx5_ifc_modify_rq_in_bits { 5501struct mlx5_ifc_modify_rq_in_bits {
5472 u8 opcode[0x10]; 5502 u8 opcode[0x10];
5473 u8 reserved_at_10[0x10]; 5503 u8 uid[0x10];
5474 5504
5475 u8 reserved_at_20[0x10]; 5505 u8 reserved_at_20[0x10];
5476 u8 op_mod[0x10]; 5506 u8 op_mod[0x10];
@@ -5506,7 +5536,7 @@ struct mlx5_ifc_rmp_bitmask_bits {
5506 5536
5507struct mlx5_ifc_modify_rmp_in_bits { 5537struct mlx5_ifc_modify_rmp_in_bits {
5508 u8 opcode[0x10]; 5538 u8 opcode[0x10];
5509 u8 reserved_at_10[0x10]; 5539 u8 uid[0x10];
5510 5540
5511 u8 reserved_at_20[0x10]; 5541 u8 reserved_at_20[0x10];
5512 u8 op_mod[0x10]; 5542 u8 op_mod[0x10];
@@ -5611,7 +5641,7 @@ enum {
5611 5641
5612struct mlx5_ifc_modify_cq_in_bits { 5642struct mlx5_ifc_modify_cq_in_bits {
5613 u8 opcode[0x10]; 5643 u8 opcode[0x10];
5614 u8 reserved_at_10[0x10]; 5644 u8 uid[0x10];
5615 5645
5616 u8 reserved_at_20[0x10]; 5646 u8 reserved_at_20[0x10];
5617 u8 op_mod[0x10]; 5647 u8 op_mod[0x10];
@@ -5623,7 +5653,10 @@ struct mlx5_ifc_modify_cq_in_bits {
5623 5653
5624 struct mlx5_ifc_cqc_bits cq_context; 5654 struct mlx5_ifc_cqc_bits cq_context;
5625 5655
5626 u8 reserved_at_280[0x600]; 5656 u8 reserved_at_280[0x40];
5657
5658 u8 cq_umem_valid[0x1];
5659 u8 reserved_at_2c1[0x5bf];
5627 5660
5628 u8 pas[0][0x40]; 5661 u8 pas[0][0x40];
5629}; 5662};
@@ -5771,7 +5804,7 @@ struct mlx5_ifc_init2rtr_qp_out_bits {
5771 5804
5772struct mlx5_ifc_init2rtr_qp_in_bits { 5805struct mlx5_ifc_init2rtr_qp_in_bits {
5773 u8 opcode[0x10]; 5806 u8 opcode[0x10];
5774 u8 reserved_at_10[0x10]; 5807 u8 uid[0x10];
5775 5808
5776 u8 reserved_at_20[0x10]; 5809 u8 reserved_at_20[0x10];
5777 u8 op_mod[0x10]; 5810 u8 op_mod[0x10];
@@ -5801,7 +5834,7 @@ struct mlx5_ifc_init2init_qp_out_bits {
5801 5834
5802struct mlx5_ifc_init2init_qp_in_bits { 5835struct mlx5_ifc_init2init_qp_in_bits {
5803 u8 opcode[0x10]; 5836 u8 opcode[0x10];
5804 u8 reserved_at_10[0x10]; 5837 u8 uid[0x10];
5805 5838
5806 u8 reserved_at_20[0x10]; 5839 u8 reserved_at_20[0x10];
5807 u8 op_mod[0x10]; 5840 u8 op_mod[0x10];
@@ -5900,7 +5933,7 @@ struct mlx5_ifc_drain_dct_out_bits {
5900 5933
5901struct mlx5_ifc_drain_dct_in_bits { 5934struct mlx5_ifc_drain_dct_in_bits {
5902 u8 opcode[0x10]; 5935 u8 opcode[0x10];
5903 u8 reserved_at_10[0x10]; 5936 u8 uid[0x10];
5904 5937
5905 u8 reserved_at_20[0x10]; 5938 u8 reserved_at_20[0x10];
5906 u8 op_mod[0x10]; 5939 u8 op_mod[0x10];
@@ -5944,7 +5977,7 @@ struct mlx5_ifc_detach_from_mcg_out_bits {
5944 5977
5945struct mlx5_ifc_detach_from_mcg_in_bits { 5978struct mlx5_ifc_detach_from_mcg_in_bits {
5946 u8 opcode[0x10]; 5979 u8 opcode[0x10];
5947 u8 reserved_at_10[0x10]; 5980 u8 uid[0x10];
5948 5981
5949 u8 reserved_at_20[0x10]; 5982 u8 reserved_at_20[0x10];
5950 u8 op_mod[0x10]; 5983 u8 op_mod[0x10];
@@ -5968,7 +6001,7 @@ struct mlx5_ifc_destroy_xrq_out_bits {
5968 6001
5969struct mlx5_ifc_destroy_xrq_in_bits { 6002struct mlx5_ifc_destroy_xrq_in_bits {
5970 u8 opcode[0x10]; 6003 u8 opcode[0x10];
5971 u8 reserved_at_10[0x10]; 6004 u8 uid[0x10];
5972 6005
5973 u8 reserved_at_20[0x10]; 6006 u8 reserved_at_20[0x10];
5974 u8 op_mod[0x10]; 6007 u8 op_mod[0x10];
@@ -5990,7 +6023,7 @@ struct mlx5_ifc_destroy_xrc_srq_out_bits {
5990 6023
5991struct mlx5_ifc_destroy_xrc_srq_in_bits { 6024struct mlx5_ifc_destroy_xrc_srq_in_bits {
5992 u8 opcode[0x10]; 6025 u8 opcode[0x10];
5993 u8 reserved_at_10[0x10]; 6026 u8 uid[0x10];
5994 6027
5995 u8 reserved_at_20[0x10]; 6028 u8 reserved_at_20[0x10];
5996 u8 op_mod[0x10]; 6029 u8 op_mod[0x10];
@@ -6012,7 +6045,7 @@ struct mlx5_ifc_destroy_tis_out_bits {
6012 6045
6013struct mlx5_ifc_destroy_tis_in_bits { 6046struct mlx5_ifc_destroy_tis_in_bits {
6014 u8 opcode[0x10]; 6047 u8 opcode[0x10];
6015 u8 reserved_at_10[0x10]; 6048 u8 uid[0x10];
6016 6049
6017 u8 reserved_at_20[0x10]; 6050 u8 reserved_at_20[0x10];
6018 u8 op_mod[0x10]; 6051 u8 op_mod[0x10];
@@ -6034,7 +6067,7 @@ struct mlx5_ifc_destroy_tir_out_bits {
6034 6067
6035struct mlx5_ifc_destroy_tir_in_bits { 6068struct mlx5_ifc_destroy_tir_in_bits {
6036 u8 opcode[0x10]; 6069 u8 opcode[0x10];
6037 u8 reserved_at_10[0x10]; 6070 u8 uid[0x10];
6038 6071
6039 u8 reserved_at_20[0x10]; 6072 u8 reserved_at_20[0x10];
6040 u8 op_mod[0x10]; 6073 u8 op_mod[0x10];
@@ -6056,7 +6089,7 @@ struct mlx5_ifc_destroy_srq_out_bits {
6056 6089
6057struct mlx5_ifc_destroy_srq_in_bits { 6090struct mlx5_ifc_destroy_srq_in_bits {
6058 u8 opcode[0x10]; 6091 u8 opcode[0x10];
6059 u8 reserved_at_10[0x10]; 6092 u8 uid[0x10];
6060 6093
6061 u8 reserved_at_20[0x10]; 6094 u8 reserved_at_20[0x10];
6062 u8 op_mod[0x10]; 6095 u8 op_mod[0x10];
@@ -6078,7 +6111,7 @@ struct mlx5_ifc_destroy_sq_out_bits {
6078 6111
6079struct mlx5_ifc_destroy_sq_in_bits { 6112struct mlx5_ifc_destroy_sq_in_bits {
6080 u8 opcode[0x10]; 6113 u8 opcode[0x10];
6081 u8 reserved_at_10[0x10]; 6114 u8 uid[0x10];
6082 6115
6083 u8 reserved_at_20[0x10]; 6116 u8 reserved_at_20[0x10];
6084 u8 op_mod[0x10]; 6117 u8 op_mod[0x10];
@@ -6124,7 +6157,7 @@ struct mlx5_ifc_destroy_rqt_out_bits {
6124 6157
6125struct mlx5_ifc_destroy_rqt_in_bits { 6158struct mlx5_ifc_destroy_rqt_in_bits {
6126 u8 opcode[0x10]; 6159 u8 opcode[0x10];
6127 u8 reserved_at_10[0x10]; 6160 u8 uid[0x10];
6128 6161
6129 u8 reserved_at_20[0x10]; 6162 u8 reserved_at_20[0x10];
6130 u8 op_mod[0x10]; 6163 u8 op_mod[0x10];
@@ -6146,7 +6179,7 @@ struct mlx5_ifc_destroy_rq_out_bits {
6146 6179
6147struct mlx5_ifc_destroy_rq_in_bits { 6180struct mlx5_ifc_destroy_rq_in_bits {
6148 u8 opcode[0x10]; 6181 u8 opcode[0x10];
6149 u8 reserved_at_10[0x10]; 6182 u8 uid[0x10];
6150 6183
6151 u8 reserved_at_20[0x10]; 6184 u8 reserved_at_20[0x10];
6152 u8 op_mod[0x10]; 6185 u8 op_mod[0x10];
@@ -6190,7 +6223,7 @@ struct mlx5_ifc_destroy_rmp_out_bits {
6190 6223
6191struct mlx5_ifc_destroy_rmp_in_bits { 6224struct mlx5_ifc_destroy_rmp_in_bits {
6192 u8 opcode[0x10]; 6225 u8 opcode[0x10];
6193 u8 reserved_at_10[0x10]; 6226 u8 uid[0x10];
6194 6227
6195 u8 reserved_at_20[0x10]; 6228 u8 reserved_at_20[0x10];
6196 u8 op_mod[0x10]; 6229 u8 op_mod[0x10];
@@ -6212,7 +6245,7 @@ struct mlx5_ifc_destroy_qp_out_bits {
6212 6245
6213struct mlx5_ifc_destroy_qp_in_bits { 6246struct mlx5_ifc_destroy_qp_in_bits {
6214 u8 opcode[0x10]; 6247 u8 opcode[0x10];
6215 u8 reserved_at_10[0x10]; 6248 u8 uid[0x10];
6216 6249
6217 u8 reserved_at_20[0x10]; 6250 u8 reserved_at_20[0x10];
6218 u8 op_mod[0x10]; 6251 u8 op_mod[0x10];
@@ -6364,7 +6397,7 @@ struct mlx5_ifc_destroy_dct_out_bits {
6364 6397
6365struct mlx5_ifc_destroy_dct_in_bits { 6398struct mlx5_ifc_destroy_dct_in_bits {
6366 u8 opcode[0x10]; 6399 u8 opcode[0x10];
6367 u8 reserved_at_10[0x10]; 6400 u8 uid[0x10];
6368 6401
6369 u8 reserved_at_20[0x10]; 6402 u8 reserved_at_20[0x10];
6370 u8 op_mod[0x10]; 6403 u8 op_mod[0x10];
@@ -6386,7 +6419,7 @@ struct mlx5_ifc_destroy_cq_out_bits {
6386 6419
6387struct mlx5_ifc_destroy_cq_in_bits { 6420struct mlx5_ifc_destroy_cq_in_bits {
6388 u8 opcode[0x10]; 6421 u8 opcode[0x10];
6389 u8 reserved_at_10[0x10]; 6422 u8 uid[0x10];
6390 6423
6391 u8 reserved_at_20[0x10]; 6424 u8 reserved_at_20[0x10];
6392 u8 op_mod[0x10]; 6425 u8 op_mod[0x10];
@@ -6489,7 +6522,7 @@ struct mlx5_ifc_dealloc_xrcd_out_bits {
6489 6522
6490struct mlx5_ifc_dealloc_xrcd_in_bits { 6523struct mlx5_ifc_dealloc_xrcd_in_bits {
6491 u8 opcode[0x10]; 6524 u8 opcode[0x10];
6492 u8 reserved_at_10[0x10]; 6525 u8 uid[0x10];
6493 6526
6494 u8 reserved_at_20[0x10]; 6527 u8 reserved_at_20[0x10];
6495 u8 op_mod[0x10]; 6528 u8 op_mod[0x10];
@@ -6577,7 +6610,7 @@ struct mlx5_ifc_dealloc_pd_out_bits {
6577 6610
6578struct mlx5_ifc_dealloc_pd_in_bits { 6611struct mlx5_ifc_dealloc_pd_in_bits {
6579 u8 opcode[0x10]; 6612 u8 opcode[0x10];
6580 u8 reserved_at_10[0x10]; 6613 u8 uid[0x10];
6581 6614
6582 u8 reserved_at_20[0x10]; 6615 u8 reserved_at_20[0x10];
6583 u8 op_mod[0x10]; 6616 u8 op_mod[0x10];
@@ -6623,7 +6656,7 @@ struct mlx5_ifc_create_xrq_out_bits {
6623 6656
6624struct mlx5_ifc_create_xrq_in_bits { 6657struct mlx5_ifc_create_xrq_in_bits {
6625 u8 opcode[0x10]; 6658 u8 opcode[0x10];
6626 u8 reserved_at_10[0x10]; 6659 u8 uid[0x10];
6627 6660
6628 u8 reserved_at_20[0x10]; 6661 u8 reserved_at_20[0x10];
6629 u8 op_mod[0x10]; 6662 u8 op_mod[0x10];
@@ -6647,7 +6680,7 @@ struct mlx5_ifc_create_xrc_srq_out_bits {
6647 6680
6648struct mlx5_ifc_create_xrc_srq_in_bits { 6681struct mlx5_ifc_create_xrc_srq_in_bits {
6649 u8 opcode[0x10]; 6682 u8 opcode[0x10];
6650 u8 reserved_at_10[0x10]; 6683 u8 uid[0x10];
6651 6684
6652 u8 reserved_at_20[0x10]; 6685 u8 reserved_at_20[0x10];
6653 u8 op_mod[0x10]; 6686 u8 op_mod[0x10];
@@ -6656,7 +6689,9 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
6656 6689
6657 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; 6690 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
6658 6691
6659 u8 reserved_at_280[0x600]; 6692 u8 reserved_at_280[0x40];
6693 u8 xrc_srq_umem_valid[0x1];
6694 u8 reserved_at_2c1[0x5bf];
6660 6695
6661 u8 pas[0][0x40]; 6696 u8 pas[0][0x40];
6662}; 6697};
@@ -6675,7 +6710,7 @@ struct mlx5_ifc_create_tis_out_bits {
6675 6710
6676struct mlx5_ifc_create_tis_in_bits { 6711struct mlx5_ifc_create_tis_in_bits {
6677 u8 opcode[0x10]; 6712 u8 opcode[0x10];
6678 u8 reserved_at_10[0x10]; 6713 u8 uid[0x10];
6679 6714
6680 u8 reserved_at_20[0x10]; 6715 u8 reserved_at_20[0x10];
6681 u8 op_mod[0x10]; 6716 u8 op_mod[0x10];
@@ -6699,7 +6734,7 @@ struct mlx5_ifc_create_tir_out_bits {
6699 6734
6700struct mlx5_ifc_create_tir_in_bits { 6735struct mlx5_ifc_create_tir_in_bits {
6701 u8 opcode[0x10]; 6736 u8 opcode[0x10];
6702 u8 reserved_at_10[0x10]; 6737 u8 uid[0x10];
6703 6738
6704 u8 reserved_at_20[0x10]; 6739 u8 reserved_at_20[0x10];
6705 u8 op_mod[0x10]; 6740 u8 op_mod[0x10];
@@ -6723,7 +6758,7 @@ struct mlx5_ifc_create_srq_out_bits {
6723 6758
6724struct mlx5_ifc_create_srq_in_bits { 6759struct mlx5_ifc_create_srq_in_bits {
6725 u8 opcode[0x10]; 6760 u8 opcode[0x10];
6726 u8 reserved_at_10[0x10]; 6761 u8 uid[0x10];
6727 6762
6728 u8 reserved_at_20[0x10]; 6763 u8 reserved_at_20[0x10];
6729 u8 op_mod[0x10]; 6764 u8 op_mod[0x10];
@@ -6751,7 +6786,7 @@ struct mlx5_ifc_create_sq_out_bits {
6751 6786
6752struct mlx5_ifc_create_sq_in_bits { 6787struct mlx5_ifc_create_sq_in_bits {
6753 u8 opcode[0x10]; 6788 u8 opcode[0x10];
6754 u8 reserved_at_10[0x10]; 6789 u8 uid[0x10];
6755 6790
6756 u8 reserved_at_20[0x10]; 6791 u8 reserved_at_20[0x10];
6757 u8 op_mod[0x10]; 6792 u8 op_mod[0x10];
@@ -6805,7 +6840,7 @@ struct mlx5_ifc_create_rqt_out_bits {
6805 6840
6806struct mlx5_ifc_create_rqt_in_bits { 6841struct mlx5_ifc_create_rqt_in_bits {
6807 u8 opcode[0x10]; 6842 u8 opcode[0x10];
6808 u8 reserved_at_10[0x10]; 6843 u8 uid[0x10];
6809 6844
6810 u8 reserved_at_20[0x10]; 6845 u8 reserved_at_20[0x10];
6811 u8 op_mod[0x10]; 6846 u8 op_mod[0x10];
@@ -6829,7 +6864,7 @@ struct mlx5_ifc_create_rq_out_bits {
6829 6864
6830struct mlx5_ifc_create_rq_in_bits { 6865struct mlx5_ifc_create_rq_in_bits {
6831 u8 opcode[0x10]; 6866 u8 opcode[0x10];
6832 u8 reserved_at_10[0x10]; 6867 u8 uid[0x10];
6833 6868
6834 u8 reserved_at_20[0x10]; 6869 u8 reserved_at_20[0x10];
6835 u8 op_mod[0x10]; 6870 u8 op_mod[0x10];
@@ -6853,7 +6888,7 @@ struct mlx5_ifc_create_rmp_out_bits {
6853 6888
6854struct mlx5_ifc_create_rmp_in_bits { 6889struct mlx5_ifc_create_rmp_in_bits {
6855 u8 opcode[0x10]; 6890 u8 opcode[0x10];
6856 u8 reserved_at_10[0x10]; 6891 u8 uid[0x10];
6857 6892
6858 u8 reserved_at_20[0x10]; 6893 u8 reserved_at_20[0x10];
6859 u8 op_mod[0x10]; 6894 u8 op_mod[0x10];
@@ -6877,7 +6912,7 @@ struct mlx5_ifc_create_qp_out_bits {
6877 6912
6878struct mlx5_ifc_create_qp_in_bits { 6913struct mlx5_ifc_create_qp_in_bits {
6879 u8 opcode[0x10]; 6914 u8 opcode[0x10];
6880 u8 reserved_at_10[0x10]; 6915 u8 uid[0x10];
6881 6916
6882 u8 reserved_at_20[0x10]; 6917 u8 reserved_at_20[0x10];
6883 u8 op_mod[0x10]; 6918 u8 op_mod[0x10];
@@ -6890,7 +6925,10 @@ struct mlx5_ifc_create_qp_in_bits {
6890 6925
6891 struct mlx5_ifc_qpc_bits qpc; 6926 struct mlx5_ifc_qpc_bits qpc;
6892 6927
6893 u8 reserved_at_800[0x80]; 6928 u8 reserved_at_800[0x60];
6929
6930 u8 wq_umem_valid[0x1];
6931 u8 reserved_at_861[0x1f];
6894 6932
6895 u8 pas[0][0x40]; 6933 u8 pas[0][0x40];
6896}; 6934};
@@ -6952,7 +6990,8 @@ struct mlx5_ifc_create_mkey_in_bits {
6952 u8 reserved_at_40[0x20]; 6990 u8 reserved_at_40[0x20];
6953 6991
6954 u8 pg_access[0x1]; 6992 u8 pg_access[0x1];
6955 u8 reserved_at_61[0x1f]; 6993 u8 mkey_umem_valid[0x1];
6994 u8 reserved_at_62[0x1e];
6956 6995
6957 struct mlx5_ifc_mkc_bits memory_key_mkey_entry; 6996 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
6958 6997
@@ -6978,7 +7017,7 @@ struct mlx5_ifc_create_flow_table_out_bits {
6978}; 7017};
6979 7018
6980struct mlx5_ifc_flow_table_context_bits { 7019struct mlx5_ifc_flow_table_context_bits {
6981 u8 encap_en[0x1]; 7020 u8 reformat_en[0x1];
6982 u8 decap_en[0x1]; 7021 u8 decap_en[0x1];
6983 u8 reserved_at_2[0x2]; 7022 u8 reserved_at_2[0x2];
6984 u8 table_miss_action[0x4]; 7023 u8 table_miss_action[0x4];
@@ -7120,7 +7159,7 @@ struct mlx5_ifc_create_dct_out_bits {
7120 7159
7121struct mlx5_ifc_create_dct_in_bits { 7160struct mlx5_ifc_create_dct_in_bits {
7122 u8 opcode[0x10]; 7161 u8 opcode[0x10];
7123 u8 reserved_at_10[0x10]; 7162 u8 uid[0x10];
7124 7163
7125 u8 reserved_at_20[0x10]; 7164 u8 reserved_at_20[0x10];
7126 u8 op_mod[0x10]; 7165 u8 op_mod[0x10];
@@ -7146,7 +7185,7 @@ struct mlx5_ifc_create_cq_out_bits {
7146 7185
7147struct mlx5_ifc_create_cq_in_bits { 7186struct mlx5_ifc_create_cq_in_bits {
7148 u8 opcode[0x10]; 7187 u8 opcode[0x10];
7149 u8 reserved_at_10[0x10]; 7188 u8 uid[0x10];
7150 7189
7151 u8 reserved_at_20[0x10]; 7190 u8 reserved_at_20[0x10];
7152 u8 op_mod[0x10]; 7191 u8 op_mod[0x10];
@@ -7155,7 +7194,10 @@ struct mlx5_ifc_create_cq_in_bits {
7155 7194
7156 struct mlx5_ifc_cqc_bits cq_context; 7195 struct mlx5_ifc_cqc_bits cq_context;
7157 7196
7158 u8 reserved_at_280[0x600]; 7197 u8 reserved_at_280[0x60];
7198
7199 u8 cq_umem_valid[0x1];
7200 u8 reserved_at_2e1[0x59f];
7159 7201
7160 u8 pas[0][0x40]; 7202 u8 pas[0][0x40];
7161}; 7203};
@@ -7203,7 +7245,7 @@ struct mlx5_ifc_attach_to_mcg_out_bits {
7203 7245
7204struct mlx5_ifc_attach_to_mcg_in_bits { 7246struct mlx5_ifc_attach_to_mcg_in_bits {
7205 u8 opcode[0x10]; 7247 u8 opcode[0x10];
7206 u8 reserved_at_10[0x10]; 7248 u8 uid[0x10];
7207 7249
7208 u8 reserved_at_20[0x10]; 7250 u8 reserved_at_20[0x10];
7209 u8 op_mod[0x10]; 7251 u8 op_mod[0x10];
@@ -7254,7 +7296,7 @@ enum {
7254 7296
7255struct mlx5_ifc_arm_xrc_srq_in_bits { 7297struct mlx5_ifc_arm_xrc_srq_in_bits {
7256 u8 opcode[0x10]; 7298 u8 opcode[0x10];
7257 u8 reserved_at_10[0x10]; 7299 u8 uid[0x10];
7258 7300
7259 u8 reserved_at_20[0x10]; 7301 u8 reserved_at_20[0x10];
7260 u8 op_mod[0x10]; 7302 u8 op_mod[0x10];
@@ -7282,7 +7324,7 @@ enum {
7282 7324
7283struct mlx5_ifc_arm_rq_in_bits { 7325struct mlx5_ifc_arm_rq_in_bits {
7284 u8 opcode[0x10]; 7326 u8 opcode[0x10];
7285 u8 reserved_at_10[0x10]; 7327 u8 uid[0x10];
7286 7328
7287 u8 reserved_at_20[0x10]; 7329 u8 reserved_at_20[0x10];
7288 u8 op_mod[0x10]; 7330 u8 op_mod[0x10];
@@ -7330,7 +7372,7 @@ struct mlx5_ifc_alloc_xrcd_out_bits {
7330 7372
7331struct mlx5_ifc_alloc_xrcd_in_bits { 7373struct mlx5_ifc_alloc_xrcd_in_bits {
7332 u8 opcode[0x10]; 7374 u8 opcode[0x10];
7333 u8 reserved_at_10[0x10]; 7375 u8 uid[0x10];
7334 7376
7335 u8 reserved_at_20[0x10]; 7377 u8 reserved_at_20[0x10];
7336 u8 op_mod[0x10]; 7378 u8 op_mod[0x10];
@@ -7418,7 +7460,7 @@ struct mlx5_ifc_alloc_pd_out_bits {
7418 7460
7419struct mlx5_ifc_alloc_pd_in_bits { 7461struct mlx5_ifc_alloc_pd_in_bits {
7420 u8 opcode[0x10]; 7462 u8 opcode[0x10];
7421 u8 reserved_at_10[0x10]; 7463 u8 uid[0x10];
7422 7464
7423 u8 reserved_at_20[0x10]; 7465 u8 reserved_at_20[0x10];
7424 u8 op_mod[0x10]; 7466 u8 op_mod[0x10];
@@ -7786,20 +7828,34 @@ struct mlx5_ifc_pplr_reg_bits {
7786 7828
7787struct mlx5_ifc_pplm_reg_bits { 7829struct mlx5_ifc_pplm_reg_bits {
7788 u8 reserved_at_0[0x8]; 7830 u8 reserved_at_0[0x8];
7789 u8 local_port[0x8]; 7831 u8 local_port[0x8];
7790 u8 reserved_at_10[0x10]; 7832 u8 reserved_at_10[0x10];
7791 7833
7792 u8 reserved_at_20[0x20]; 7834 u8 reserved_at_20[0x20];
7793 7835
7794 u8 port_profile_mode[0x8]; 7836 u8 port_profile_mode[0x8];
7795 u8 static_port_profile[0x8]; 7837 u8 static_port_profile[0x8];
7796 u8 active_port_profile[0x8]; 7838 u8 active_port_profile[0x8];
7797 u8 reserved_at_58[0x8]; 7839 u8 reserved_at_58[0x8];
7798 7840
7799 u8 retransmission_active[0x8]; 7841 u8 retransmission_active[0x8];
7800 u8 fec_mode_active[0x18]; 7842 u8 fec_mode_active[0x18];
7801 7843
7802 u8 reserved_at_80[0x20]; 7844 u8 rs_fec_correction_bypass_cap[0x4];
7845 u8 reserved_at_84[0x8];
7846 u8 fec_override_cap_56g[0x4];
7847 u8 fec_override_cap_100g[0x4];
7848 u8 fec_override_cap_50g[0x4];
7849 u8 fec_override_cap_25g[0x4];
7850 u8 fec_override_cap_10g_40g[0x4];
7851
7852 u8 rs_fec_correction_bypass_admin[0x4];
7853 u8 reserved_at_a4[0x8];
7854 u8 fec_override_admin_56g[0x4];
7855 u8 fec_override_admin_100g[0x4];
7856 u8 fec_override_admin_50g[0x4];
7857 u8 fec_override_admin_25g[0x4];
7858 u8 fec_override_admin_10g_40g[0x4];
7803}; 7859};
7804 7860
7805struct mlx5_ifc_ppcnt_reg_bits { 7861struct mlx5_ifc_ppcnt_reg_bits {
@@ -8084,7 +8140,8 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
8084 u8 rx_icrc_encapsulated_counter[0x1]; 8140 u8 rx_icrc_encapsulated_counter[0x1];
8085 u8 reserved_at_6e[0x8]; 8141 u8 reserved_at_6e[0x8];
8086 u8 pfcc_mask[0x1]; 8142 u8 pfcc_mask[0x1];
8087 u8 reserved_at_77[0x4]; 8143 u8 reserved_at_77[0x3];
8144 u8 per_lane_error_counters[0x1];
8088 u8 rx_buffer_fullness_counters[0x1]; 8145 u8 rx_buffer_fullness_counters[0x1];
8089 u8 ptys_connector_type[0x1]; 8146 u8 ptys_connector_type[0x1];
8090 u8 reserved_at_7d[0x1]; 8147 u8 reserved_at_7d[0x1];
@@ -8095,7 +8152,10 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
8095struct mlx5_ifc_pcam_regs_5000_to_507f_bits { 8152struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
8096 u8 port_access_reg_cap_mask_127_to_96[0x20]; 8153 u8 port_access_reg_cap_mask_127_to_96[0x20];
8097 u8 port_access_reg_cap_mask_95_to_64[0x20]; 8154 u8 port_access_reg_cap_mask_95_to_64[0x20];
8098 u8 port_access_reg_cap_mask_63_to_32[0x20]; 8155
8156 u8 port_access_reg_cap_mask_63_to_36[0x1c];
8157 u8 pplm[0x1];
8158 u8 port_access_reg_cap_mask_34_to_32[0x3];
8099 8159
8100 u8 port_access_reg_cap_mask_31_to_13[0x13]; 8160 u8 port_access_reg_cap_mask_31_to_13[0x13];
8101 u8 pbmc[0x1]; 8161 u8 pbmc[0x1];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 4778d41085d4..fbe322c966bc 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -471,6 +471,7 @@ struct mlx5_core_qp {
471 int qpn; 471 int qpn;
472 struct mlx5_rsc_debug *dbg; 472 struct mlx5_rsc_debug *dbg;
473 int pid; 473 int pid;
474 u16 uid;
474}; 475};
475 476
476struct mlx5_core_dct { 477struct mlx5_core_dct {
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index 24ff23e27c8a..1b1f3c20c6a3 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -61,6 +61,7 @@ struct mlx5_srq_attr {
61 u32 tm_next_tag; 61 u32 tm_next_tag;
62 u32 tm_hw_phase_cnt; 62 u32 tm_hw_phase_cnt;
63 u32 tm_sw_phase_cnt; 63 u32 tm_sw_phase_cnt;
64 u16 uid;
64}; 65};
65 66
66struct mlx5_core_dev; 67struct mlx5_core_dev;
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 83a33a1873a6..7f5ca2cd3a32 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -90,6 +90,8 @@ struct mlx5_hairpin {
90 90
91 u32 *rqn; 91 u32 *rqn;
92 u32 *sqn; 92 u32 *sqn;
93
94 bool peer_gone;
93}; 95};
94 96
95struct mlx5_hairpin * 97struct mlx5_hairpin *
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 7e7c6dfcfb09..9c694808c212 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -121,4 +121,6 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
121int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, 121int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
122 struct mlx5_core_dev *port_mdev); 122 struct mlx5_core_dev *port_mdev);
123int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev); 123int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
124
125u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
124#endif /* __MLX5_VPORT_H__ */ 126#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a61ebe8ad4ca..1e52b8fd1685 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -848,6 +848,8 @@ static inline bool is_zone_device_page(const struct page *page)
848{ 848{
849 return page_zonenum(page) == ZONE_DEVICE; 849 return page_zonenum(page) == ZONE_DEVICE;
850} 850}
851extern void memmap_init_zone_device(struct zone *, unsigned long,
852 unsigned long, struct dev_pagemap *);
851#else 853#else
852static inline bool is_zone_device_page(const struct page *page) 854static inline bool is_zone_device_page(const struct page *page)
853{ 855{
@@ -890,6 +892,19 @@ static inline bool is_device_public_page(const struct page *page)
890 page->pgmap->type == MEMORY_DEVICE_PUBLIC; 892 page->pgmap->type == MEMORY_DEVICE_PUBLIC;
891} 893}
892 894
895#ifdef CONFIG_PCI_P2PDMA
896static inline bool is_pci_p2pdma_page(const struct page *page)
897{
898 return is_zone_device_page(page) &&
899 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
900}
901#else /* CONFIG_PCI_P2PDMA */
902static inline bool is_pci_p2pdma_page(const struct page *page)
903{
904 return false;
905}
906#endif /* CONFIG_PCI_P2PDMA */
907
893#else /* CONFIG_DEV_PAGEMAP_OPS */ 908#else /* CONFIG_DEV_PAGEMAP_OPS */
894static inline void dev_pagemap_get_ops(void) 909static inline void dev_pagemap_get_ops(void)
895{ 910{
@@ -913,6 +928,11 @@ static inline bool is_device_public_page(const struct page *page)
913{ 928{
914 return false; 929 return false;
915} 930}
931
932static inline bool is_pci_p2pdma_page(const struct page *page)
933{
934 return false;
935}
916#endif /* CONFIG_DEV_PAGEMAP_OPS */ 936#endif /* CONFIG_DEV_PAGEMAP_OPS */
917 937
918static inline void get_page(struct page *page) 938static inline void get_page(struct page *page)
@@ -2286,6 +2306,8 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
2286 unsigned long len, unsigned long prot, unsigned long flags, 2306 unsigned long len, unsigned long prot, unsigned long flags,
2287 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, 2307 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2288 struct list_head *uf); 2308 struct list_head *uf);
2309extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
2310 struct list_head *uf, bool downgrade);
2289extern int do_munmap(struct mm_struct *, unsigned long, size_t, 2311extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2290 struct list_head *uf); 2312 struct list_head *uf);
2291 2313
@@ -2455,6 +2477,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2455 return vma; 2477 return vma;
2456} 2478}
2457 2479
2480static inline bool range_in_vma(struct vm_area_struct *vma,
2481 unsigned long start, unsigned long end)
2482{
2483 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2484}
2485
2458#ifdef CONFIG_MMU 2486#ifdef CONFIG_MMU
2459pgprot_t vm_get_page_prot(unsigned long vm_flags); 2487pgprot_t vm_get_page_prot(unsigned long vm_flags);
2460void vma_set_page_prot(struct vm_area_struct *vma); 2488void vma_set_page_prot(struct vm_area_struct *vma);
@@ -2478,11 +2506,11 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2478int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 2506int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2479 unsigned long pfn, unsigned long size, pgprot_t); 2507 unsigned long pfn, unsigned long size, pgprot_t);
2480int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2508int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2481int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2509vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2482 unsigned long pfn); 2510 unsigned long pfn);
2483int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2511vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2484 unsigned long pfn, pgprot_t pgprot); 2512 unsigned long pfn, pgprot_t pgprot);
2485int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2513vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2486 pfn_t pfn); 2514 pfn_t pfn);
2487vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 2515vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2488 unsigned long addr, pfn_t pfn); 2516 unsigned long addr, pfn_t pfn);
@@ -2501,32 +2529,6 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2501 return VM_FAULT_NOPAGE; 2529 return VM_FAULT_NOPAGE;
2502} 2530}
2503 2531
2504static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
2505 unsigned long addr, pfn_t pfn)
2506{
2507 int err = vm_insert_mixed(vma, addr, pfn);
2508
2509 if (err == -ENOMEM)
2510 return VM_FAULT_OOM;
2511 if (err < 0 && err != -EBUSY)
2512 return VM_FAULT_SIGBUS;
2513
2514 return VM_FAULT_NOPAGE;
2515}
2516
2517static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
2518 unsigned long addr, unsigned long pfn)
2519{
2520 int err = vm_insert_pfn(vma, addr, pfn);
2521
2522 if (err == -ENOMEM)
2523 return VM_FAULT_OOM;
2524 if (err < 0 && err != -EBUSY)
2525 return VM_FAULT_SIGBUS;
2526
2527 return VM_FAULT_NOPAGE;
2528}
2529
2530static inline vm_fault_t vmf_error(int err) 2532static inline vm_fault_t vmf_error(int err)
2531{ 2533{
2532 if (err == -ENOMEM) 2534 if (err == -ENOMEM)
@@ -2534,16 +2536,8 @@ static inline vm_fault_t vmf_error(int err)
2534 return VM_FAULT_SIGBUS; 2536 return VM_FAULT_SIGBUS;
2535} 2537}
2536 2538
2537struct page *follow_page_mask(struct vm_area_struct *vma, 2539struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
2538 unsigned long address, unsigned int foll_flags, 2540 unsigned int foll_flags);
2539 unsigned int *page_mask);
2540
2541static inline struct page *follow_page(struct vm_area_struct *vma,
2542 unsigned long address, unsigned int foll_flags)
2543{
2544 unsigned int unused_page_mask;
2545 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2546}
2547 2541
2548#define FOLL_WRITE 0x01 /* check pte is writable */ 2542#define FOLL_WRITE 0x01 /* check pte is writable */
2549#define FOLL_TOUCH 0x02 /* mark page accessed */ 2543#define FOLL_TOUCH 0x02 /* mark page accessed */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cd2bc939efd0..5ed8f6292a53 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@ struct mm_struct {
341 struct { 341 struct {
342 struct vm_area_struct *mmap; /* list of VMAs */ 342 struct vm_area_struct *mmap; /* list of VMAs */
343 struct rb_root mm_rb; 343 struct rb_root mm_rb;
344 u32 vmacache_seqnum; /* per-thread vmacache */ 344 u64 vmacache_seqnum; /* per-thread vmacache */
345#ifdef CONFIG_MMU 345#ifdef CONFIG_MMU
346 unsigned long (*get_unmapped_area) (struct file *filp, 346 unsigned long (*get_unmapped_area) (struct file *filp,
347 unsigned long addr, unsigned long len, 347 unsigned long addr, unsigned long len,
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 5fe87687664c..d7016dcb245e 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -32,7 +32,7 @@
32#define VMACACHE_MASK (VMACACHE_SIZE - 1) 32#define VMACACHE_MASK (VMACACHE_SIZE - 1)
33 33
34struct vmacache { 34struct vmacache {
35 u32 seqnum; 35 u64 seqnum;
36 struct vm_area_struct *vmas[VMACACHE_SIZE]; 36 struct vm_area_struct *vmas[VMACACHE_SIZE];
37}; 37};
38 38
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index beed7121c781..2a5fe75dd082 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -569,6 +569,11 @@ static inline bool mmc_can_retune(struct mmc_host *host)
569 return host->can_retune == 1; 569 return host->can_retune == 1;
570} 570}
571 571
572static inline bool mmc_doing_retune(struct mmc_host *host)
573{
574 return host->doing_retune == 1;
575}
576
572static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data) 577static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
573{ 578{
574 return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 579 return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 133ba78820ee..9893a6432adf 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -2,7 +2,6 @@
2#ifndef _LINUX_MMU_NOTIFIER_H 2#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H 3#define _LINUX_MMU_NOTIFIER_H
4 4
5#include <linux/types.h>
6#include <linux/list.h> 5#include <linux/list.h>
7#include <linux/spinlock.h> 6#include <linux/spinlock.h>
8#include <linux/mm_types.h> 7#include <linux/mm_types.h>
@@ -11,9 +10,6 @@
11struct mmu_notifier; 10struct mmu_notifier;
12struct mmu_notifier_ops; 11struct mmu_notifier_ops;
13 12
14/* mmu_notifier_ops flags */
15#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
16
17#ifdef CONFIG_MMU_NOTIFIER 13#ifdef CONFIG_MMU_NOTIFIER
18 14
19/* 15/*
@@ -31,15 +27,6 @@ struct mmu_notifier_mm {
31 27
32struct mmu_notifier_ops { 28struct mmu_notifier_ops {
33 /* 29 /*
34 * Flags to specify behavior of callbacks for this MMU notifier.
35 * Used to determine which context an operation may be called.
36 *
37 * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not
38 * block
39 */
40 int flags;
41
42 /*
43 * Called either by mmu_notifier_unregister or when the mm is 30 * Called either by mmu_notifier_unregister or when the mm is
44 * being destroyed by exit_mmap, always before all pages are 31 * being destroyed by exit_mmap, always before all pages are
45 * freed. This can run concurrently with other mmu notifier 32 * freed. This can run concurrently with other mmu notifier
@@ -153,7 +140,9 @@ struct mmu_notifier_ops {
153 * 140 *
154 * If blockable argument is set to false then the callback cannot 141 * If blockable argument is set to false then the callback cannot
155 * sleep and has to return with -EAGAIN. 0 should be returned 142 * sleep and has to return with -EAGAIN. 0 should be returned
156 * otherwise. 143 * otherwise. Please note that if invalidate_range_start approves
144 * a non-blocking behavior then the same applies to
145 * invalidate_range_end.
157 * 146 *
158 */ 147 */
159 int (*invalidate_range_start)(struct mmu_notifier *mn, 148 int (*invalidate_range_start)(struct mmu_notifier *mn,
@@ -181,10 +170,6 @@ struct mmu_notifier_ops {
181 * Note that this function might be called with just a sub-range 170 * Note that this function might be called with just a sub-range
182 * of what was passed to invalidate_range_start()/end(), if 171 * of what was passed to invalidate_range_start()/end(), if
183 * called between those functions. 172 * called between those functions.
184 *
185 * If this callback cannot block, and invalidate_range_{start,end}
186 * cannot block, mmu_notifier_ops.flags should have
187 * MMU_INVALIDATE_DOES_NOT_BLOCK set.
188 */ 173 */
189 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, 174 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
190 unsigned long start, unsigned long end); 175 unsigned long start, unsigned long end);
@@ -239,7 +224,6 @@ extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
239 bool only_end); 224 bool only_end);
240extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, 225extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
241 unsigned long start, unsigned long end); 226 unsigned long start, unsigned long end);
242extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
243 227
244static inline void mmu_notifier_release(struct mm_struct *mm) 228static inline void mmu_notifier_release(struct mm_struct *mm)
245{ 229{
@@ -493,11 +477,6 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
493{ 477{
494} 478}
495 479
496static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
497{
498 return false;
499}
500
501static inline void mmu_notifier_mm_init(struct mm_struct *mm) 480static inline void mmu_notifier_mm_init(struct mm_struct *mm)
502{ 481{
503} 482}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1e22d96734e0..9f0caccd5833 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -161,8 +161,10 @@ enum node_stat_item {
161 NR_SLAB_UNRECLAIMABLE, 161 NR_SLAB_UNRECLAIMABLE,
162 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 162 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
163 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 163 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
164 WORKINGSET_NODES,
164 WORKINGSET_REFAULT, 165 WORKINGSET_REFAULT,
165 WORKINGSET_ACTIVATE, 166 WORKINGSET_ACTIVATE,
167 WORKINGSET_RESTORE,
166 WORKINGSET_NODERECLAIM, 168 WORKINGSET_NODERECLAIM,
167 NR_ANON_MAPPED, /* Mapped anonymous pages */ 169 NR_ANON_MAPPED, /* Mapped anonymous pages */
168 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 170 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
@@ -180,7 +182,7 @@ enum node_stat_item {
180 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 182 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
181 NR_DIRTIED, /* page dirtyings since bootup */ 183 NR_DIRTIED, /* page dirtyings since bootup */
182 NR_WRITTEN, /* page writings since bootup */ 184 NR_WRITTEN, /* page writings since bootup */
183 NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */ 185 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
184 NR_VM_NODE_STAT_ITEMS 186 NR_VM_NODE_STAT_ITEMS
185}; 187};
186 188
@@ -668,16 +670,6 @@ typedef struct pglist_data {
668 wait_queue_head_t kcompactd_wait; 670 wait_queue_head_t kcompactd_wait;
669 struct task_struct *kcompactd; 671 struct task_struct *kcompactd;
670#endif 672#endif
671#ifdef CONFIG_NUMA_BALANCING
672 /* Lock serializing the migrate rate limiting window */
673 spinlock_t numabalancing_migrate_lock;
674
675 /* Rate limiting time interval */
676 unsigned long numabalancing_migrate_next_window;
677
678 /* Number of pages migrated during the rate limiting time interval */
679 unsigned long numabalancing_migrate_nr_pages;
680#endif
681 /* 673 /*
682 * This is a per-node reserve of pages that are not available 674 * This is a per-node reserve of pages that are not available
683 * to userspace allocations. 675 * to userspace allocations.
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 1298a7daa57d..01797cb4587e 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -754,6 +754,7 @@ struct tb_service_id {
754 * struct typec_device_id - USB Type-C alternate mode identifiers 754 * struct typec_device_id - USB Type-C alternate mode identifiers
755 * @svid: Standard or Vendor ID 755 * @svid: Standard or Vendor ID
756 * @mode: Mode index 756 * @mode: Mode index
757 * @driver_data: Driver specific data
757 */ 758 */
758struct typec_device_id { 759struct typec_device_id {
759 __u16 svid; 760 __u16 svid;
diff --git a/include/linux/module.h b/include/linux/module.h
index f807f15bebbe..fce6b4335e36 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -20,6 +20,7 @@
20#include <linux/export.h> 20#include <linux/export.h>
21#include <linux/rbtree_latch.h> 21#include <linux/rbtree_latch.h>
22#include <linux/error-injection.h> 22#include <linux/error-injection.h>
23#include <linux/tracepoint-defs.h>
23 24
24#include <linux/percpu.h> 25#include <linux/percpu.h>
25#include <asm/module.h> 26#include <asm/module.h>
@@ -123,7 +124,6 @@ extern void cleanup_module(void);
123#define late_initcall_sync(fn) module_init(fn) 124#define late_initcall_sync(fn) module_init(fn)
124 125
125#define console_initcall(fn) module_init(fn) 126#define console_initcall(fn) module_init(fn)
126#define security_initcall(fn) module_init(fn)
127 127
128/* Each module must use one module_init(). */ 128/* Each module must use one module_init(). */
129#define module_init(initfn) \ 129#define module_init(initfn) \
@@ -430,7 +430,7 @@ struct module {
430 430
431#ifdef CONFIG_TRACEPOINTS 431#ifdef CONFIG_TRACEPOINTS
432 unsigned int num_tracepoints; 432 unsigned int num_tracepoints;
433 struct tracepoint * const *tracepoints_ptrs; 433 tracepoint_ptr_t *tracepoints_ptrs;
434#endif 434#endif
435#ifdef HAVE_JUMP_LABEL 435#ifdef HAVE_JUMP_LABEL
436 struct jump_entry *jump_entries; 436 struct jump_entry *jump_entries;
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 6675b9f81979..34de06b426ef 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -7,6 +7,7 @@
7#include <net/net_namespace.h> 7#include <net/net_namespace.h>
8#include <net/sock.h> 8#include <net/sock.h>
9#include <net/fib_notifier.h> 9#include <net/fib_notifier.h>
10#include <net/ip_fib.h>
10 11
11/** 12/**
12 * struct vif_device - interface representor for multicast routing 13 * struct vif_device - interface representor for multicast routing
@@ -283,6 +284,12 @@ void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg);
283 284
284int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 285int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
285 struct mr_mfc *c, struct rtmsg *rtm); 286 struct mr_mfc *c, struct rtmsg *rtm);
287int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
288 struct netlink_callback *cb,
289 int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
290 u32 portid, u32 seq, struct mr_mfc *c,
291 int cmd, int flags),
292 spinlock_t *lock, struct fib_dump_filter *filter);
286int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, 293int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
287 struct mr_table *(*iter)(struct net *net, 294 struct mr_table *(*iter)(struct net *net,
288 struct mr_table *mrt), 295 struct mr_table *mrt),
@@ -290,7 +297,7 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
290 struct sk_buff *skb, 297 struct sk_buff *skb,
291 u32 portid, u32 seq, struct mr_mfc *c, 298 u32 portid, u32 seq, struct mr_mfc *c,
292 int cmd, int flags), 299 int cmd, int flags),
293 spinlock_t *lock); 300 spinlock_t *lock, struct fib_dump_filter *filter);
294 301
295int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family, 302int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
296 int (*rules_dump)(struct net *net, 303 int (*rules_dump)(struct net *net,
@@ -340,7 +347,7 @@ mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
340 struct sk_buff *skb, 347 struct sk_buff *skb,
341 u32 portid, u32 seq, struct mr_mfc *c, 348 u32 portid, u32 seq, struct mr_mfc *c,
342 int cmd, int flags), 349 int cmd, int flags),
343 spinlock_t *lock) 350 spinlock_t *lock, struct fib_dump_filter *filter)
344{ 351{
345 return -EINVAL; 352 return -EINVAL;
346} 353}
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 5839d8062dfc..0e9c50052ff3 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -317,11 +317,18 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
317int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, 317int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
318 int virq, int nvec, msi_alloc_info_t *args); 318 int virq, int nvec, msi_alloc_info_t *args);
319struct irq_domain * 319struct irq_domain *
320platform_msi_create_device_domain(struct device *dev, 320__platform_msi_create_device_domain(struct device *dev,
321 unsigned int nvec, 321 unsigned int nvec,
322 irq_write_msi_msg_t write_msi_msg, 322 bool is_tree,
323 const struct irq_domain_ops *ops, 323 irq_write_msi_msg_t write_msi_msg,
324 void *host_data); 324 const struct irq_domain_ops *ops,
325 void *host_data);
326
327#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
328 __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
329#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
330 __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
331
325int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, 332int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
326 unsigned int nr_irqs); 333 unsigned int nr_irqs);
327void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, 334void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index e93837f647de..1d3ade69d39a 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -23,7 +23,6 @@
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/kref.h> 24#include <linux/kref.h>
25#include <linux/sysfs.h> 25#include <linux/sysfs.h>
26#include <linux/workqueue.h>
27 26
28struct hd_geometry; 27struct hd_geometry;
29struct mtd_info; 28struct mtd_info;
@@ -44,9 +43,9 @@ struct mtd_blktrans_dev {
44 struct kref ref; 43 struct kref ref;
45 struct gendisk *disk; 44 struct gendisk *disk;
46 struct attribute_group *disk_attributes; 45 struct attribute_group *disk_attributes;
47 struct workqueue_struct *wq;
48 struct work_struct work;
49 struct request_queue *rq; 46 struct request_queue *rq;
47 struct list_head rq_list;
48 struct blk_mq_tag_set *tag_set;
50 spinlock_t queue_lock; 49 spinlock_t queue_lock;
51 void *priv; 50 void *priv;
52 fmode_t file_mode; 51 fmode_t file_mode;
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
new file mode 100644
index 000000000000..0b6b59f7cfbd
--- /dev/null
+++ b/include/linux/mtd/jedec.h
@@ -0,0 +1,91 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
4 * Steven J. Hill <sjhill@realitydiluted.com>
5 * Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Contains all JEDEC related definitions
8 */
9
10#ifndef __LINUX_MTD_JEDEC_H
11#define __LINUX_MTD_JEDEC_H
12
13struct jedec_ecc_info {
14 u8 ecc_bits;
15 u8 codeword_size;
16 __le16 bb_per_lun;
17 __le16 block_endurance;
18 u8 reserved[2];
19} __packed;
20
21/* JEDEC features */
22#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
23
24struct nand_jedec_params {
25 /* rev info and features block */
26 /* 'J' 'E' 'S' 'D' */
27 u8 sig[4];
28 __le16 revision;
29 __le16 features;
30 u8 opt_cmd[3];
31 __le16 sec_cmd;
32 u8 num_of_param_pages;
33 u8 reserved0[18];
34
35 /* manufacturer information block */
36 char manufacturer[12];
37 char model[20];
38 u8 jedec_id[6];
39 u8 reserved1[10];
40
41 /* memory organization block */
42 __le32 byte_per_page;
43 __le16 spare_bytes_per_page;
44 u8 reserved2[6];
45 __le32 pages_per_block;
46 __le32 blocks_per_lun;
47 u8 lun_count;
48 u8 addr_cycles;
49 u8 bits_per_cell;
50 u8 programs_per_page;
51 u8 multi_plane_addr;
52 u8 multi_plane_op_attr;
53 u8 reserved3[38];
54
55 /* electrical parameter block */
56 __le16 async_sdr_speed_grade;
57 __le16 toggle_ddr_speed_grade;
58 __le16 sync_ddr_speed_grade;
59 u8 async_sdr_features;
60 u8 toggle_ddr_features;
61 u8 sync_ddr_features;
62 __le16 t_prog;
63 __le16 t_bers;
64 __le16 t_r;
65 __le16 t_r_multi_plane;
66 __le16 t_ccs;
67 __le16 io_pin_capacitance_typ;
68 __le16 input_pin_capacitance_typ;
69 __le16 clk_pin_capacitance_typ;
70 u8 driver_strength_support;
71 __le16 t_adl;
72 u8 reserved4[36];
73
74 /* ECC and endurance block */
75 u8 guaranteed_good_blocks;
76 __le16 guaranteed_block_endurance;
77 struct jedec_ecc_info ecc_info[4];
78 u8 reserved5[29];
79
80 /* reserved */
81 u8 reserved6[148];
82
83 /* vendor */
84 __le16 vendor_rev_num;
85 u8 reserved7[88];
86
87 /* CRC for Parameter Page */
88 __le16 crc;
89} __packed;
90
91#endif /* __LINUX_MTD_JEDEC_H */
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
index 98f20ef05d60..b8106651f807 100644
--- a/include/linux/mtd/nand_bch.h
+++ b/include/linux/mtd/nand_bch.h
@@ -12,6 +12,7 @@
12#define __MTD_NAND_BCH_H__ 12#define __MTD_NAND_BCH_H__
13 13
14struct mtd_info; 14struct mtd_info;
15struct nand_chip;
15struct nand_bch_control; 16struct nand_bch_control;
16 17
17#if defined(CONFIG_MTD_NAND_ECC_BCH) 18#if defined(CONFIG_MTD_NAND_ECC_BCH)
@@ -21,14 +22,14 @@ static inline int mtd_nand_has_bch(void) { return 1; }
21/* 22/*
22 * Calculate BCH ecc code 23 * Calculate BCH ecc code
23 */ 24 */
24int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, 25int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
25 u_char *ecc_code); 26 u_char *ecc_code);
26 27
27/* 28/*
28 * Detect and correct bit errors 29 * Detect and correct bit errors
29 */ 30 */
30int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, 31int nand_bch_correct_data(struct nand_chip *chip, u_char *dat,
31 u_char *calc_ecc); 32 u_char *read_ecc, u_char *calc_ecc);
32/* 33/*
33 * Initialize BCH encoder/decoder 34 * Initialize BCH encoder/decoder
34 */ 35 */
@@ -43,14 +44,14 @@ void nand_bch_free(struct nand_bch_control *nbc);
43static inline int mtd_nand_has_bch(void) { return 0; } 44static inline int mtd_nand_has_bch(void) { return 0; }
44 45
45static inline int 46static inline int
46nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, 47nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
47 u_char *ecc_code) 48 u_char *ecc_code)
48{ 49{
49 return -1; 50 return -1;
50} 51}
51 52
52static inline int 53static inline int
53nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, 54nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
54 unsigned char *read_ecc, unsigned char *calc_ecc) 55 unsigned char *read_ecc, unsigned char *calc_ecc)
55{ 56{
56 return -ENOTSUPP; 57 return -ENOTSUPP;
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 8a2decf7462c..0b3bb156c344 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -13,28 +13,30 @@
13#ifndef __MTD_NAND_ECC_H__ 13#ifndef __MTD_NAND_ECC_H__
14#define __MTD_NAND_ECC_H__ 14#define __MTD_NAND_ECC_H__
15 15
16struct mtd_info; 16struct nand_chip;
17 17
18/* 18/*
19 * Calculate 3 byte ECC code for eccsize byte block 19 * Calculate 3 byte ECC code for eccsize byte block
20 */ 20 */
21void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize, 21void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
22 u_char *ecc_code); 22 u_char *ecc_code, bool sm_order);
23 23
24/* 24/*
25 * Calculate 3 byte ECC code for 256/512 byte block 25 * Calculate 3 byte ECC code for 256/512 byte block
26 */ 26 */
27int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); 27int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat,
28 u_char *ecc_code);
28 29
29/* 30/*
30 * Detect and correct a 1 bit error for eccsize byte block 31 * Detect and correct a 1 bit error for eccsize byte block
31 */ 32 */
32int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, 33int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
33 unsigned int eccsize); 34 unsigned int eccsize, bool sm_order);
34 35
35/* 36/*
36 * Detect and correct a 1 bit error for 256/512 byte block 37 * Detect and correct a 1 bit error for 256/512 byte block
37 */ 38 */
38int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); 39int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc,
40 u_char *calc_ecc);
39 41
40#endif /* __MTD_NAND_ECC_H__ */ 42#endif /* __MTD_NAND_ECC_H__ */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
new file mode 100644
index 000000000000..339ac798568e
--- /dev/null
+++ b/include/linux/mtd/onfi.h
@@ -0,0 +1,178 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
4 * Steven J. Hill <sjhill@realitydiluted.com>
5 * Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Contains all ONFI related definitions
8 */
9
10#ifndef __LINUX_MTD_ONFI_H
11#define __LINUX_MTD_ONFI_H
12
13#include <linux/types.h>
14
15/* ONFI version bits */
16#define ONFI_VERSION_1_0 BIT(1)
17#define ONFI_VERSION_2_0 BIT(2)
18#define ONFI_VERSION_2_1 BIT(3)
19#define ONFI_VERSION_2_2 BIT(4)
20#define ONFI_VERSION_2_3 BIT(5)
21#define ONFI_VERSION_3_0 BIT(6)
22#define ONFI_VERSION_3_1 BIT(7)
23#define ONFI_VERSION_3_2 BIT(8)
24#define ONFI_VERSION_4_0 BIT(9)
25
26/* ONFI features */
27#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
28#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
29
30/* ONFI timing mode, used in both asynchronous and synchronous mode */
31#define ONFI_TIMING_MODE_0 (1 << 0)
32#define ONFI_TIMING_MODE_1 (1 << 1)
33#define ONFI_TIMING_MODE_2 (1 << 2)
34#define ONFI_TIMING_MODE_3 (1 << 3)
35#define ONFI_TIMING_MODE_4 (1 << 4)
36#define ONFI_TIMING_MODE_5 (1 << 5)
37#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
38
39/* ONFI feature number/address */
40#define ONFI_FEATURE_NUMBER 256
41#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
42
43/* Vendor-specific feature address (Micron) */
44#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
45#define ONFI_FEATURE_ON_DIE_ECC 0x90
46#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
47
48/* ONFI subfeature parameters length */
49#define ONFI_SUBFEATURE_PARAM_LEN 4
50
51/* ONFI optional commands SET/GET FEATURES supported? */
52#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
53
54struct nand_onfi_params {
55 /* rev info and features block */
56 /* 'O' 'N' 'F' 'I' */
57 u8 sig[4];
58 __le16 revision;
59 __le16 features;
60 __le16 opt_cmd;
61 u8 reserved0[2];
62 __le16 ext_param_page_length; /* since ONFI 2.1 */
63 u8 num_of_param_pages; /* since ONFI 2.1 */
64 u8 reserved1[17];
65
66 /* manufacturer information block */
67 char manufacturer[12];
68 char model[20];
69 u8 jedec_id;
70 __le16 date_code;
71 u8 reserved2[13];
72
73 /* memory organization block */
74 __le32 byte_per_page;
75 __le16 spare_bytes_per_page;
76 __le32 data_bytes_per_ppage;
77 __le16 spare_bytes_per_ppage;
78 __le32 pages_per_block;
79 __le32 blocks_per_lun;
80 u8 lun_count;
81 u8 addr_cycles;
82 u8 bits_per_cell;
83 __le16 bb_per_lun;
84 __le16 block_endurance;
85 u8 guaranteed_good_blocks;
86 __le16 guaranteed_block_endurance;
87 u8 programs_per_page;
88 u8 ppage_attr;
89 u8 ecc_bits;
90 u8 interleaved_bits;
91 u8 interleaved_ops;
92 u8 reserved3[13];
93
94 /* electrical parameter block */
95 u8 io_pin_capacitance_max;
96 __le16 async_timing_mode;
97 __le16 program_cache_timing_mode;
98 __le16 t_prog;
99 __le16 t_bers;
100 __le16 t_r;
101 __le16 t_ccs;
102 __le16 src_sync_timing_mode;
103 u8 src_ssync_features;
104 __le16 clk_pin_capacitance_typ;
105 __le16 io_pin_capacitance_typ;
106 __le16 input_pin_capacitance_typ;
107 u8 input_pin_capacitance_max;
108 u8 driver_strength_support;
109 __le16 t_int_r;
110 __le16 t_adl;
111 u8 reserved4[8];
112
113 /* vendor */
114 __le16 vendor_revision;
115 u8 vendor[88];
116
117 __le16 crc;
118} __packed;
119
120#define ONFI_CRC_BASE 0x4F4E
121
122/* Extended ECC information Block Definition (since ONFI 2.1) */
123struct onfi_ext_ecc_info {
124 u8 ecc_bits;
125 u8 codeword_size;
126 __le16 bb_per_lun;
127 __le16 block_endurance;
128 u8 reserved[2];
129} __packed;
130
131#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
132#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
133#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
134struct onfi_ext_section {
135 u8 type;
136 u8 length;
137} __packed;
138
139#define ONFI_EXT_SECTION_MAX 8
140
141/* Extended Parameter Page Definition (since ONFI 2.1) */
142struct onfi_ext_param_page {
143 __le16 crc;
144 u8 sig[4]; /* 'E' 'P' 'P' 'S' */
145 u8 reserved0[10];
146 struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
147
148 /*
149 * The actual size of the Extended Parameter Page is in
150 * @ext_param_page_length of nand_onfi_params{}.
151 * The following are the variable length sections.
152 * So we do not add any fields below. Please see the ONFI spec.
153 */
154} __packed;
155
156/**
157 * struct onfi_params - ONFI specific parameters that will be reused
158 * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
159 * @tPROG: Page program time
160 * @tBERS: Block erase time
161 * @tR: Page read time
162 * @tCCS: Change column setup time
163 * @async_timing_mode: Supported asynchronous timing mode
164 * @vendor_revision: Vendor specific revision number
165 * @vendor: Vendor specific data
166 */
167struct onfi_params {
168 int version;
169 u16 tPROG;
170 u16 tBERS;
171 u16 tR;
172 u16 tCCS;
173 u16 async_timing_mode;
174 u16 vendor_revision;
175 u8 vendor[88];
176};
177
178#endif /* __LINUX_MTD_ONFI_H */
diff --git a/include/linux/mtd/platnand.h b/include/linux/mtd/platnand.h
new file mode 100644
index 000000000000..bc11eb6b593b
--- /dev/null
+++ b/include/linux/mtd/platnand.h
@@ -0,0 +1,74 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
4 * Steven J. Hill <sjhill@realitydiluted.com>
5 * Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Contains all platform NAND related definitions.
8 */
9
10#ifndef __LINUX_MTD_PLATNAND_H
11#define __LINUX_MTD_PLATNAND_H
12
13#include <linux/mtd/partitions.h>
14#include <linux/mtd/rawnand.h>
15#include <linux/platform_device.h>
16
17/**
18 * struct platform_nand_chip - chip level device structure
19 * @nr_chips: max. number of chips to scan for
20 * @chip_offset: chip number offset
21 * @nr_partitions: number of partitions pointed to by partitions (or zero)
22 * @partitions: mtd partition list
23 * @chip_delay: R/B delay value in us
24 * @options: Option flags, e.g. 16bit buswidth
25 * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
26 * @part_probe_types: NULL-terminated array of probe types
27 */
28struct platform_nand_chip {
29 int nr_chips;
30 int chip_offset;
31 int nr_partitions;
32 struct mtd_partition *partitions;
33 int chip_delay;
34 unsigned int options;
35 unsigned int bbt_options;
36 const char **part_probe_types;
37};
38
39/**
40 * struct platform_nand_ctrl - controller level device structure
41 * @probe: platform specific function to probe/setup hardware
42 * @remove: platform specific function to remove/teardown hardware
43 * @dev_ready: platform specific function to read ready/busy pin
44 * @select_chip: platform specific chip select function
45 * @cmd_ctrl: platform specific function for controlling
46 * ALE/CLE/nCE. Also used to write command and address
47 * @write_buf: platform specific function for write buffer
48 * @read_buf: platform specific function for read buffer
49 * @priv: private data to transport driver specific settings
50 *
51 * All fields are optional and depend on the hardware driver requirements
52 */
53struct platform_nand_ctrl {
54 int (*probe)(struct platform_device *pdev);
55 void (*remove)(struct platform_device *pdev);
56 int (*dev_ready)(struct nand_chip *chip);
57 void (*select_chip)(struct nand_chip *chip, int cs);
58 void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
59 void (*write_buf)(struct nand_chip *chip, const uint8_t *buf, int len);
60 void (*read_buf)(struct nand_chip *chip, uint8_t *buf, int len);
61 void *priv;
62};
63
64/**
65 * struct platform_nand_data - container structure for platform-specific data
66 * @chip: chip level chip structure
67 * @ctrl: controller level device structure
68 */
69struct platform_nand_data {
70 struct platform_nand_chip chip;
71 struct platform_nand_ctrl ctrl;
72};
73
74#endif /* __LINUX_MTD_PLATNAND_H */
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index efb2345359bb..e10b126e148f 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -21,22 +21,12 @@
21#include <linux/mtd/mtd.h> 21#include <linux/mtd/mtd.h>
22#include <linux/mtd/flashchip.h> 22#include <linux/mtd/flashchip.h>
23#include <linux/mtd/bbm.h> 23#include <linux/mtd/bbm.h>
24#include <linux/mtd/jedec.h>
25#include <linux/mtd/onfi.h>
24#include <linux/of.h> 26#include <linux/of.h>
25#include <linux/types.h> 27#include <linux/types.h>
26 28
27struct nand_flash_dev; 29struct nand_chip;
28
29/* Scan and identify a NAND device */
30int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
31 struct nand_flash_dev *ids);
32
33static inline int nand_scan(struct mtd_info *mtd, int max_chips)
34{
35 return nand_scan_with_ids(mtd, max_chips, NULL);
36}
37
38/* Internal helper for board drivers which need to override command function */
39void nand_wait_ready(struct mtd_info *mtd);
40 30
41/* The maximum number of NAND chips in an array */ 31/* The maximum number of NAND chips in an array */
42#define NAND_MAX_CHIPS 8 32#define NAND_MAX_CHIPS 8
@@ -131,9 +121,11 @@ enum nand_ecc_algo {
131#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) 121#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
132#define NAND_ECC_MAXIMIZE BIT(1) 122#define NAND_ECC_MAXIMIZE BIT(1)
133 123
134/* Bit mask for flags passed to do_nand_read_ecc */ 124/*
135#define NAND_GET_DEVICE 0x80 125 * When using software implementation of Hamming, we can specify which byte
136 126 * ordering should be used.
127 */
128#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
137 129
138/* 130/*
139 * Option constants for bizarre disfunctionality and real 131 * Option constants for bizarre disfunctionality and real
@@ -175,9 +167,7 @@ enum nand_ecc_algo {
175#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG 167#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
176 168
177/* Macros to identify the above */ 169/* Macros to identify the above */
178#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
179#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) 170#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
180#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE)
181 171
182/* Non chip related options */ 172/* Non chip related options */
183/* This option skips the bbt scan during initialization. */ 173/* This option skips the bbt scan during initialization. */
@@ -198,10 +188,10 @@ enum nand_ecc_algo {
198#define NAND_USE_BOUNCE_BUFFER 0x00100000 188#define NAND_USE_BOUNCE_BUFFER 0x00100000
199 189
200/* 190/*
201 * In case your controller is implementing ->cmd_ctrl() and is relying on the 191 * In case your controller is implementing ->legacy.cmd_ctrl() and is relying
202 * default ->cmdfunc() implementation, you may want to let the core handle the 192 * on the default ->cmdfunc() implementation, you may want to let the core
203 * tCCS delay which is required when a column change (RNDIN or RNDOUT) is 193 * handle the tCCS delay which is required when a column change (RNDIN or
204 * requested. 194 * RNDOUT) is requested.
205 * If your controller already takes care of this delay, you don't need to set 195 * If your controller already takes care of this delay, you don't need to set
206 * this flag. 196 * this flag.
207 */ 197 */
@@ -222,250 +212,6 @@ enum nand_ecc_algo {
222#define NAND_CI_CELLTYPE_MSK 0x0C 212#define NAND_CI_CELLTYPE_MSK 0x0C
223#define NAND_CI_CELLTYPE_SHIFT 2 213#define NAND_CI_CELLTYPE_SHIFT 2
224 214
225/* Keep gcc happy */
226struct nand_chip;
227
228/* ONFI version bits */
229#define ONFI_VERSION_1_0 BIT(1)
230#define ONFI_VERSION_2_0 BIT(2)
231#define ONFI_VERSION_2_1 BIT(3)
232#define ONFI_VERSION_2_2 BIT(4)
233#define ONFI_VERSION_2_3 BIT(5)
234#define ONFI_VERSION_3_0 BIT(6)
235#define ONFI_VERSION_3_1 BIT(7)
236#define ONFI_VERSION_3_2 BIT(8)
237#define ONFI_VERSION_4_0 BIT(9)
238
239/* ONFI features */
240#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
241#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
242
243/* ONFI timing mode, used in both asynchronous and synchronous mode */
244#define ONFI_TIMING_MODE_0 (1 << 0)
245#define ONFI_TIMING_MODE_1 (1 << 1)
246#define ONFI_TIMING_MODE_2 (1 << 2)
247#define ONFI_TIMING_MODE_3 (1 << 3)
248#define ONFI_TIMING_MODE_4 (1 << 4)
249#define ONFI_TIMING_MODE_5 (1 << 5)
250#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
251
252/* ONFI feature number/address */
253#define ONFI_FEATURE_NUMBER 256
254#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
255
256/* Vendor-specific feature address (Micron) */
257#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
258#define ONFI_FEATURE_ON_DIE_ECC 0x90
259#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
260
261/* ONFI subfeature parameters length */
262#define ONFI_SUBFEATURE_PARAM_LEN 4
263
264/* ONFI optional commands SET/GET FEATURES supported? */
265#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
266
267struct nand_onfi_params {
268 /* rev info and features block */
269 /* 'O' 'N' 'F' 'I' */
270 u8 sig[4];
271 __le16 revision;
272 __le16 features;
273 __le16 opt_cmd;
274 u8 reserved0[2];
275 __le16 ext_param_page_length; /* since ONFI 2.1 */
276 u8 num_of_param_pages; /* since ONFI 2.1 */
277 u8 reserved1[17];
278
279 /* manufacturer information block */
280 char manufacturer[12];
281 char model[20];
282 u8 jedec_id;
283 __le16 date_code;
284 u8 reserved2[13];
285
286 /* memory organization block */
287 __le32 byte_per_page;
288 __le16 spare_bytes_per_page;
289 __le32 data_bytes_per_ppage;
290 __le16 spare_bytes_per_ppage;
291 __le32 pages_per_block;
292 __le32 blocks_per_lun;
293 u8 lun_count;
294 u8 addr_cycles;
295 u8 bits_per_cell;
296 __le16 bb_per_lun;
297 __le16 block_endurance;
298 u8 guaranteed_good_blocks;
299 __le16 guaranteed_block_endurance;
300 u8 programs_per_page;
301 u8 ppage_attr;
302 u8 ecc_bits;
303 u8 interleaved_bits;
304 u8 interleaved_ops;
305 u8 reserved3[13];
306
307 /* electrical parameter block */
308 u8 io_pin_capacitance_max;
309 __le16 async_timing_mode;
310 __le16 program_cache_timing_mode;
311 __le16 t_prog;
312 __le16 t_bers;
313 __le16 t_r;
314 __le16 t_ccs;
315 __le16 src_sync_timing_mode;
316 u8 src_ssync_features;
317 __le16 clk_pin_capacitance_typ;
318 __le16 io_pin_capacitance_typ;
319 __le16 input_pin_capacitance_typ;
320 u8 input_pin_capacitance_max;
321 u8 driver_strength_support;
322 __le16 t_int_r;
323 __le16 t_adl;
324 u8 reserved4[8];
325
326 /* vendor */
327 __le16 vendor_revision;
328 u8 vendor[88];
329
330 __le16 crc;
331} __packed;
332
333#define ONFI_CRC_BASE 0x4F4E
334
335/* Extended ECC information Block Definition (since ONFI 2.1) */
336struct onfi_ext_ecc_info {
337 u8 ecc_bits;
338 u8 codeword_size;
339 __le16 bb_per_lun;
340 __le16 block_endurance;
341 u8 reserved[2];
342} __packed;
343
344#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
345#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
346#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
347struct onfi_ext_section {
348 u8 type;
349 u8 length;
350} __packed;
351
352#define ONFI_EXT_SECTION_MAX 8
353
354/* Extended Parameter Page Definition (since ONFI 2.1) */
355struct onfi_ext_param_page {
356 __le16 crc;
357 u8 sig[4]; /* 'E' 'P' 'P' 'S' */
358 u8 reserved0[10];
359 struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
360
361 /*
362 * The actual size of the Extended Parameter Page is in
363 * @ext_param_page_length of nand_onfi_params{}.
364 * The following are the variable length sections.
365 * So we do not add any fields below. Please see the ONFI spec.
366 */
367} __packed;
368
369struct jedec_ecc_info {
370 u8 ecc_bits;
371 u8 codeword_size;
372 __le16 bb_per_lun;
373 __le16 block_endurance;
374 u8 reserved[2];
375} __packed;
376
377/* JEDEC features */
378#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
379
380struct nand_jedec_params {
381 /* rev info and features block */
382 /* 'J' 'E' 'S' 'D' */
383 u8 sig[4];
384 __le16 revision;
385 __le16 features;
386 u8 opt_cmd[3];
387 __le16 sec_cmd;
388 u8 num_of_param_pages;
389 u8 reserved0[18];
390
391 /* manufacturer information block */
392 char manufacturer[12];
393 char model[20];
394 u8 jedec_id[6];
395 u8 reserved1[10];
396
397 /* memory organization block */
398 __le32 byte_per_page;
399 __le16 spare_bytes_per_page;
400 u8 reserved2[6];
401 __le32 pages_per_block;
402 __le32 blocks_per_lun;
403 u8 lun_count;
404 u8 addr_cycles;
405 u8 bits_per_cell;
406 u8 programs_per_page;
407 u8 multi_plane_addr;
408 u8 multi_plane_op_attr;
409 u8 reserved3[38];
410
411 /* electrical parameter block */
412 __le16 async_sdr_speed_grade;
413 __le16 toggle_ddr_speed_grade;
414 __le16 sync_ddr_speed_grade;
415 u8 async_sdr_features;
416 u8 toggle_ddr_features;
417 u8 sync_ddr_features;
418 __le16 t_prog;
419 __le16 t_bers;
420 __le16 t_r;
421 __le16 t_r_multi_plane;
422 __le16 t_ccs;
423 __le16 io_pin_capacitance_typ;
424 __le16 input_pin_capacitance_typ;
425 __le16 clk_pin_capacitance_typ;
426 u8 driver_strength_support;
427 __le16 t_adl;
428 u8 reserved4[36];
429
430 /* ECC and endurance block */
431 u8 guaranteed_good_blocks;
432 __le16 guaranteed_block_endurance;
433 struct jedec_ecc_info ecc_info[4];
434 u8 reserved5[29];
435
436 /* reserved */
437 u8 reserved6[148];
438
439 /* vendor */
440 __le16 vendor_rev_num;
441 u8 reserved7[88];
442
443 /* CRC for Parameter Page */
444 __le16 crc;
445} __packed;
446
447/**
448 * struct onfi_params - ONFI specific parameters that will be reused
449 * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
450 * @tPROG: Page program time
451 * @tBERS: Block erase time
452 * @tR: Page read time
453 * @tCCS: Change column setup time
454 * @async_timing_mode: Supported asynchronous timing mode
455 * @vendor_revision: Vendor specific revision number
456 * @vendor: Vendor specific data
457 */
458struct onfi_params {
459 int version;
460 u16 tPROG;
461 u16 tBERS;
462 u16 tR;
463 u16 tCCS;
464 u16 async_timing_mode;
465 u16 vendor_revision;
466 u8 vendor[88];
467};
468
469/** 215/**
470 * struct nand_parameters - NAND generic parameters from the parameter page 216 * struct nand_parameters - NAND generic parameters from the parameter page
471 * @model: Model name 217 * @model: Model name
@@ -646,31 +392,28 @@ struct nand_ecc_ctrl {
646 void *priv; 392 void *priv;
647 u8 *calc_buf; 393 u8 *calc_buf;
648 u8 *code_buf; 394 u8 *code_buf;
649 void (*hwctl)(struct mtd_info *mtd, int mode); 395 void (*hwctl)(struct nand_chip *chip, int mode);
650 int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, 396 int (*calculate)(struct nand_chip *chip, const uint8_t *dat,
651 uint8_t *ecc_code); 397 uint8_t *ecc_code);
652 int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, 398 int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc,
653 uint8_t *calc_ecc); 399 uint8_t *calc_ecc);
654 int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, 400 int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf,
655 uint8_t *buf, int oob_required, int page); 401 int oob_required, int page);
656 int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, 402 int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf,
657 const uint8_t *buf, int oob_required, int page); 403 int oob_required, int page);
658 int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, 404 int (*read_page)(struct nand_chip *chip, uint8_t *buf,
659 uint8_t *buf, int oob_required, int page); 405 int oob_required, int page);
660 int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, 406 int (*read_subpage)(struct nand_chip *chip, uint32_t offs,
661 uint32_t offs, uint32_t len, uint8_t *buf, int page); 407 uint32_t len, uint8_t *buf, int page);
662 int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip, 408 int (*write_subpage)(struct nand_chip *chip, uint32_t offset,
663 uint32_t offset, uint32_t data_len, 409 uint32_t data_len, const uint8_t *data_buf,
664 const uint8_t *data_buf, int oob_required, int page); 410 int oob_required, int page);
665 int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, 411 int (*write_page)(struct nand_chip *chip, const uint8_t *buf,
666 const uint8_t *buf, int oob_required, int page); 412 int oob_required, int page);
667 int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, 413 int (*write_oob_raw)(struct nand_chip *chip, int page);
668 int page); 414 int (*read_oob_raw)(struct nand_chip *chip, int page);
669 int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, 415 int (*read_oob)(struct nand_chip *chip, int page);
670 int page); 416 int (*write_oob)(struct nand_chip *chip, int page);
671 int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
672 int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
673 int page);
674}; 417};
675 418
676/** 419/**
@@ -800,24 +543,6 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
800} 543}
801 544
802/** 545/**
803 * struct nand_manufacturer_ops - NAND Manufacturer operations
804 * @detect: detect the NAND memory organization and capabilities
805 * @init: initialize all vendor specific fields (like the ->read_retry()
806 * implementation) if any.
807 * @cleanup: the ->init() function may have allocated resources, ->cleanup()
808 * is here to let vendor specific code release those resources.
809 * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
810 * page. This is called after the checksum is verified.
811 */
812struct nand_manufacturer_ops {
813 void (*detect)(struct nand_chip *chip);
814 int (*init)(struct nand_chip *chip);
815 void (*cleanup)(struct nand_chip *chip);
816 void (*fixup_onfi_param_page)(struct nand_chip *chip,
817 struct nand_onfi_params *p);
818};
819
820/**
821 * struct nand_op_cmd_instr - Definition of a command instruction 546 * struct nand_op_cmd_instr - Definition of a command instruction
822 * @opcode: the command to issue in one cycle 547 * @opcode: the command to issue in one cycle
823 */ 548 */
@@ -1175,44 +900,72 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
1175 const struct nand_operation *op, bool check_only); 900 const struct nand_operation *op, bool check_only);
1176 901
1177/** 902/**
903 * struct nand_legacy - NAND chip legacy fields/hooks
904 * @IO_ADDR_R: address to read the 8 I/O lines of the flash device
905 * @IO_ADDR_W: address to write the 8 I/O lines of the flash device
906 * @read_byte: read one byte from the chip
907 * @write_byte: write a single byte to the chip on the low 8 I/O lines
908 * @write_buf: write data from the buffer to the chip
909 * @read_buf: read data from the chip into the buffer
910 * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used
911 * to write command and address
912 * @cmdfunc: hardware specific function for writing commands to the chip.
913 * @dev_ready: hardware specific function for accessing device ready/busy line.
914 * If set to NULL no access to ready/busy is available and the
915 * ready/busy information is read from the chip status register.
916 * @waitfunc: hardware specific function for wait on ready.
917 * @block_bad: check if a block is bad, using OOB markers
918 * @block_markbad: mark a block bad
919 * @erase: erase function
920 * @set_features: set the NAND chip features
921 * @get_features: get the NAND chip features
922 * @chip_delay: chip dependent delay for transferring data from array to read
923 * regs (tR).
924 *
925 * If you look at this structure you're already wrong. These fields/hooks are
926 * all deprecated.
927 */
928struct nand_legacy {
929 void __iomem *IO_ADDR_R;
930 void __iomem *IO_ADDR_W;
931 u8 (*read_byte)(struct nand_chip *chip);
932 void (*write_byte)(struct nand_chip *chip, u8 byte);
933 void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
934 void (*read_buf)(struct nand_chip *chip, u8 *buf, int len);
935 void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
936 void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column,
937 int page_addr);
938 int (*dev_ready)(struct nand_chip *chip);
939 int (*waitfunc)(struct nand_chip *chip);
940 int (*block_bad)(struct nand_chip *chip, loff_t ofs);
941 int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
942 int (*erase)(struct nand_chip *chip, int page);
943 int (*set_features)(struct nand_chip *chip, int feature_addr,
944 u8 *subfeature_para);
945 int (*get_features)(struct nand_chip *chip, int feature_addr,
946 u8 *subfeature_para);
947 int chip_delay;
948};
949
950/**
1178 * struct nand_chip - NAND Private Flash Chip Data 951 * struct nand_chip - NAND Private Flash Chip Data
1179 * @mtd: MTD device registered to the MTD framework 952 * @mtd: MTD device registered to the MTD framework
1180 * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the 953 * @legacy: All legacy fields/hooks. If you develop a new driver,
1181 * flash device 954 * don't even try to use any of these fields/hooks, and if
1182 * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the 955 * you're modifying an existing driver that is using those
1183 * flash device. 956 * fields/hooks, you should consider reworking the driver
1184 * @read_byte: [REPLACEABLE] read one byte from the chip 957 * avoid using them.
1185 * @read_word: [REPLACEABLE] read one word from the chip
1186 * @write_byte: [REPLACEABLE] write a single byte to the chip on the
1187 * low 8 I/O lines
1188 * @write_buf: [REPLACEABLE] write data from the buffer to the chip
1189 * @read_buf: [REPLACEABLE] read data from the chip into the buffer
1190 * @select_chip: [REPLACEABLE] select chip nr 958 * @select_chip: [REPLACEABLE] select chip nr
1191 * @block_bad: [REPLACEABLE] check if a block is bad, using OOB markers
1192 * @block_markbad: [REPLACEABLE] mark a block bad
1193 * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling
1194 * ALE/CLE/nCE. Also used to write command and address
1195 * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing
1196 * device ready/busy line. If set to NULL no access to
1197 * ready/busy is available and the ready/busy information
1198 * is read from the chip status register.
1199 * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing
1200 * commands to the chip.
1201 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
1202 * ready.
1203 * @exec_op: controller specific method to execute NAND operations. 959 * @exec_op: controller specific method to execute NAND operations.
1204 * This method replaces ->cmdfunc(), 960 * This method replaces ->cmdfunc(),
1205 * ->{read,write}_{buf,byte,word}(), ->dev_ready() and 961 * ->legacy.{read,write}_{buf,byte,word}(),
1206 * ->waifunc(). 962 * ->legacy.dev_ready() and ->waifunc().
1207 * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for 963 * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
1208 * setting the read-retry mode. Mostly needed for MLC NAND. 964 * setting the read-retry mode. Mostly needed for MLC NAND.
1209 * @ecc: [BOARDSPECIFIC] ECC control structure 965 * @ecc: [BOARDSPECIFIC] ECC control structure
1210 * @buf_align: minimum buffer alignment required by a platform 966 * @buf_align: minimum buffer alignment required by a platform
1211 * @dummy_controller: dummy controller implementation for drivers that can 967 * @dummy_controller: dummy controller implementation for drivers that can
1212 * only control a single chip 968 * only control a single chip
1213 * @erase: [REPLACEABLE] erase function
1214 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
1215 * data from array to read regs (tR).
1216 * @state: [INTERN] the current state of the NAND device 969 * @state: [INTERN] the current state of the NAND device
1217 * @oob_poi: "poison value buffer," used for laying out OOB data 970 * @oob_poi: "poison value buffer," used for laying out OOB data
1218 * before writing 971 * before writing
@@ -1260,8 +1013,6 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
1260 * @blocks_per_die: [INTERN] The number of PEBs in a die 1013 * @blocks_per_die: [INTERN] The number of PEBs in a die
1261 * @data_interface: [INTERN] NAND interface timing information 1014 * @data_interface: [INTERN] NAND interface timing information
1262 * @read_retries: [INTERN] the number of read retry modes supported 1015 * @read_retries: [INTERN] the number of read retry modes supported
1263 * @set_features: [REPLACEABLE] set the NAND chip features
1264 * @get_features: [REPLACEABLE] get the NAND chip features
1265 * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If 1016 * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
1266 * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this 1017 * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
1267 * means the configuration should not be applied but 1018 * means the configuration should not be applied but
@@ -1283,35 +1034,17 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
1283 1034
1284struct nand_chip { 1035struct nand_chip {
1285 struct mtd_info mtd; 1036 struct mtd_info mtd;
1286 void __iomem *IO_ADDR_R;
1287 void __iomem *IO_ADDR_W;
1288 1037
1289 uint8_t (*read_byte)(struct mtd_info *mtd); 1038 struct nand_legacy legacy;
1290 u16 (*read_word)(struct mtd_info *mtd); 1039
1291 void (*write_byte)(struct mtd_info *mtd, uint8_t byte); 1040 void (*select_chip)(struct nand_chip *chip, int cs);
1292 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
1293 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
1294 void (*select_chip)(struct mtd_info *mtd, int chip);
1295 int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
1296 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
1297 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
1298 int (*dev_ready)(struct mtd_info *mtd);
1299 void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
1300 int page_addr);
1301 int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
1302 int (*exec_op)(struct nand_chip *chip, 1041 int (*exec_op)(struct nand_chip *chip,
1303 const struct nand_operation *op, 1042 const struct nand_operation *op,
1304 bool check_only); 1043 bool check_only);
1305 int (*erase)(struct mtd_info *mtd, int page); 1044 int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
1306 int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip, 1045 int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
1307 int feature_addr, uint8_t *subfeature_para);
1308 int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
1309 int feature_addr, uint8_t *subfeature_para);
1310 int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
1311 int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
1312 const struct nand_data_interface *conf); 1046 const struct nand_data_interface *conf);
1313 1047
1314 int chip_delay;
1315 unsigned int options; 1048 unsigned int options;
1316 unsigned int bbt_options; 1049 unsigned int bbt_options;
1317 1050
@@ -1420,27 +1153,6 @@ static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
1420} 1153}
1421 1154
1422/* 1155/*
1423 * NAND Flash Manufacturer ID Codes
1424 */
1425#define NAND_MFR_TOSHIBA 0x98
1426#define NAND_MFR_ESMT 0xc8
1427#define NAND_MFR_SAMSUNG 0xec
1428#define NAND_MFR_FUJITSU 0x04
1429#define NAND_MFR_NATIONAL 0x8f
1430#define NAND_MFR_RENESAS 0x07
1431#define NAND_MFR_STMICRO 0x20
1432#define NAND_MFR_HYNIX 0xad
1433#define NAND_MFR_MICRON 0x2c
1434#define NAND_MFR_AMD 0x01
1435#define NAND_MFR_MACRONIX 0xc2
1436#define NAND_MFR_EON 0x92
1437#define NAND_MFR_SANDISK 0x45
1438#define NAND_MFR_INTEL 0x89
1439#define NAND_MFR_ATO 0x9b
1440#define NAND_MFR_WINBOND 0xef
1441
1442
1443/*
1444 * A helper for defining older NAND chips where the second ID byte fully 1156 * A helper for defining older NAND chips where the second ID byte fully
1445 * defined the chip, including the geometry (chip size, eraseblock size, page 1157 * defined the chip, including the geometry (chip size, eraseblock size, page
1446 * size). All these chips have 512 bytes NAND page size. 1158 * size). All these chips have 512 bytes NAND page size.
@@ -1519,114 +1231,7 @@ struct nand_flash_dev {
1519 int onfi_timing_mode_default; 1231 int onfi_timing_mode_default;
1520}; 1232};
1521 1233
1522/**
1523 * struct nand_manufacturer - NAND Flash Manufacturer structure
1524 * @name: Manufacturer name
1525 * @id: manufacturer ID code of device.
1526 * @ops: manufacturer operations
1527*/
1528struct nand_manufacturer {
1529 int id;
1530 char *name;
1531 const struct nand_manufacturer_ops *ops;
1532};
1533
1534const struct nand_manufacturer *nand_get_manufacturer(u8 id);
1535
1536static inline const char *
1537nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
1538{
1539 return manufacturer ? manufacturer->name : "Unknown";
1540}
1541
1542extern struct nand_flash_dev nand_flash_ids[];
1543
1544extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
1545extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
1546extern const struct nand_manufacturer_ops hynix_nand_manuf_ops;
1547extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
1548extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
1549extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
1550
1551int nand_create_bbt(struct nand_chip *chip); 1234int nand_create_bbt(struct nand_chip *chip);
1552int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
1553int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
1554int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
1555int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
1556 int allowbbt);
1557
1558/**
1559 * struct platform_nand_chip - chip level device structure
1560 * @nr_chips: max. number of chips to scan for
1561 * @chip_offset: chip number offset
1562 * @nr_partitions: number of partitions pointed to by partitions (or zero)
1563 * @partitions: mtd partition list
1564 * @chip_delay: R/B delay value in us
1565 * @options: Option flags, e.g. 16bit buswidth
1566 * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
1567 * @part_probe_types: NULL-terminated array of probe types
1568 */
1569struct platform_nand_chip {
1570 int nr_chips;
1571 int chip_offset;
1572 int nr_partitions;
1573 struct mtd_partition *partitions;
1574 int chip_delay;
1575 unsigned int options;
1576 unsigned int bbt_options;
1577 const char **part_probe_types;
1578};
1579
1580/* Keep gcc happy */
1581struct platform_device;
1582
1583/**
1584 * struct platform_nand_ctrl - controller level device structure
1585 * @probe: platform specific function to probe/setup hardware
1586 * @remove: platform specific function to remove/teardown hardware
1587 * @dev_ready: platform specific function to read ready/busy pin
1588 * @select_chip: platform specific chip select function
1589 * @cmd_ctrl: platform specific function for controlling
1590 * ALE/CLE/nCE. Also used to write command and address
1591 * @write_buf: platform specific function for write buffer
1592 * @read_buf: platform specific function for read buffer
1593 * @priv: private data to transport driver specific settings
1594 *
1595 * All fields are optional and depend on the hardware driver requirements
1596 */
1597struct platform_nand_ctrl {
1598 int (*probe)(struct platform_device *pdev);
1599 void (*remove)(struct platform_device *pdev);
1600 int (*dev_ready)(struct mtd_info *mtd);
1601 void (*select_chip)(struct mtd_info *mtd, int chip);
1602 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
1603 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
1604 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
1605 void *priv;
1606};
1607
1608/**
1609 * struct platform_nand_data - container structure for platform-specific data
1610 * @chip: chip level chip structure
1611 * @ctrl: controller level device structure
1612 */
1613struct platform_nand_data {
1614 struct platform_nand_chip chip;
1615 struct platform_nand_ctrl ctrl;
1616};
1617
1618/* return the supported asynchronous timing mode. */
1619static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
1620{
1621 if (!chip->parameters.onfi)
1622 return ONFI_TIMING_MODE_UNKNOWN;
1623
1624 return chip->parameters.onfi->async_timing_mode;
1625}
1626
1627int onfi_fill_data_interface(struct nand_chip *chip,
1628 enum nand_data_interface_type type,
1629 int timing_mode);
1630 1235
1631/* 1236/*
1632 * Check if it is a SLC nand. 1237 * Check if it is a SLC nand.
@@ -1658,9 +1263,6 @@ static inline int nand_opcode_8bits(unsigned int command)
1658 return 0; 1263 return 0;
1659} 1264}
1660 1265
1661/* get timing characteristics from ONFI timing mode. */
1662const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
1663
1664int nand_check_erased_ecc_chunk(void *data, int datalen, 1266int nand_check_erased_ecc_chunk(void *data, int datalen,
1665 void *ecc, int ecclen, 1267 void *ecc, int ecclen,
1666 void *extraoob, int extraooblen, 1268 void *extraoob, int extraooblen,
@@ -1670,37 +1272,22 @@ int nand_ecc_choose_conf(struct nand_chip *chip,
1670 const struct nand_ecc_caps *caps, int oobavail); 1272 const struct nand_ecc_caps *caps, int oobavail);
1671 1273
1672/* Default write_oob implementation */ 1274/* Default write_oob implementation */
1673int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); 1275int nand_write_oob_std(struct nand_chip *chip, int page);
1674
1675/* Default write_oob syndrome implementation */
1676int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1677 int page);
1678 1276
1679/* Default read_oob implementation */ 1277/* Default read_oob implementation */
1680int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); 1278int nand_read_oob_std(struct nand_chip *chip, int page);
1681 1279
1682/* Default read_oob syndrome implementation */
1683int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1684 int page);
1685
1686/* Wrapper to use in order for controllers/vendors to GET/SET FEATURES */
1687int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
1688int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
1689/* Stub used by drivers that do not support GET/SET FEATURES operations */ 1280/* Stub used by drivers that do not support GET/SET FEATURES operations */
1690int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip, 1281int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
1691 int addr, u8 *subfeature_param); 1282 u8 *subfeature_param);
1692 1283
1693/* Default read_page_raw implementation */ 1284/* Default read_page_raw implementation */
1694int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1285int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
1695 uint8_t *buf, int oob_required, int page); 1286 int page);
1696int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
1697 u8 *buf, int oob_required, int page);
1698 1287
1699/* Default write_page_raw implementation */ 1288/* Default write_page_raw implementation */
1700int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1289int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1701 const uint8_t *buf, int oob_required, int page); 1290 int oob_required, int page);
1702int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
1703 const u8 *buf, int oob_required, int page);
1704 1291
1705/* Reset and initialize a NAND device */ 1292/* Reset and initialize a NAND device */
1706int nand_reset(struct nand_chip *chip, int chipnr); 1293int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1710,7 +1297,6 @@ int nand_reset_op(struct nand_chip *chip);
1710int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, 1297int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1711 unsigned int len); 1298 unsigned int len);
1712int nand_status_op(struct nand_chip *chip, u8 *status); 1299int nand_status_op(struct nand_chip *chip, u8 *status);
1713int nand_exit_status_op(struct nand_chip *chip);
1714int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock); 1300int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
1715int nand_read_page_op(struct nand_chip *chip, unsigned int page, 1301int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1716 unsigned int offset_in_page, void *buf, unsigned int len); 1302 unsigned int offset_in_page, void *buf, unsigned int len);
@@ -1734,16 +1320,25 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1734int nand_write_data_op(struct nand_chip *chip, const void *buf, 1320int nand_write_data_op(struct nand_chip *chip, const void *buf,
1735 unsigned int len, bool force_8bit); 1321 unsigned int len, bool force_8bit);
1736 1322
1323/* Scan and identify a NAND device */
1324int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
1325 struct nand_flash_dev *ids);
1326
1327static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips)
1328{
1329 return nand_scan_with_ids(chip, max_chips, NULL);
1330}
1331
1332/* Internal helper for board drivers which need to override command function */
1333void nand_wait_ready(struct nand_chip *chip);
1334
1737/* 1335/*
1738 * Free resources held by the NAND device, must be called on error after a 1336 * Free resources held by the NAND device, must be called on error after a
1739 * sucessful nand_scan(). 1337 * sucessful nand_scan().
1740 */ 1338 */
1741void nand_cleanup(struct nand_chip *chip); 1339void nand_cleanup(struct nand_chip *chip);
1742/* Unregister the MTD device and calls nand_cleanup() */ 1340/* Unregister the MTD device and calls nand_cleanup() */
1743void nand_release(struct mtd_info *mtd); 1341void nand_release(struct nand_chip *chip);
1744
1745/* Default extended ID decoding function */
1746void nand_decode_ext_id(struct nand_chip *chip);
1747 1342
1748/* 1343/*
1749 * External helper for controller drivers that have to implement the WAITRDY 1344 * External helper for controller drivers that have to implement the WAITRDY
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index c922e97f205a..7f0c7303575e 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -239,6 +239,94 @@ enum spi_nor_option_flags {
239}; 239};
240 240
241/** 241/**
242 * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
243 * @size: the size of the sector/block erased by the erase type.
244 * JEDEC JESD216B imposes erase sizes to be a power of 2.
245 * @size_shift: @size is a power of 2, the shift is stored in
246 * @size_shift.
247 * @size_mask: the size mask based on @size_shift.
248 * @opcode: the SPI command op code to erase the sector/block.
249 * @idx: Erase Type index as sorted in the Basic Flash Parameter
250 * Table. It will be used to synchronize the supported
251 * Erase Types with the ones identified in the SFDP
252 * optional tables.
253 */
254struct spi_nor_erase_type {
255 u32 size;
256 u32 size_shift;
257 u32 size_mask;
258 u8 opcode;
259 u8 idx;
260};
261
262/**
263 * struct spi_nor_erase_command - Used for non-uniform erases
264 * The structure is used to describe a list of erase commands to be executed
265 * once we validate that the erase can be performed. The elements in the list
266 * are run-length encoded.
267 * @list: for inclusion into the list of erase commands.
268 * @count: how many times the same erase command should be
269 * consecutively used.
270 * @size: the size of the sector/block erased by the command.
271 * @opcode: the SPI command op code to erase the sector/block.
272 */
273struct spi_nor_erase_command {
274 struct list_head list;
275 u32 count;
276 u32 size;
277 u8 opcode;
278};
279
280/**
281 * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
282 * @offset: the offset in the data array of erase region start.
283 * LSB bits are used as a bitmask encoding flags to
284 * determine if this region is overlaid, if this region is
285 * the last in the SPI NOR flash memory and to indicate
286 * all the supported erase commands inside this region.
287 * The erase types are sorted in ascending order with the
288 * smallest Erase Type size being at BIT(0).
289 * @size: the size of the region in bytes.
290 */
291struct spi_nor_erase_region {
292 u64 offset;
293 u64 size;
294};
295
296#define SNOR_ERASE_TYPE_MAX 4
297#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
298
299#define SNOR_LAST_REGION BIT(4)
300#define SNOR_OVERLAID_REGION BIT(5)
301
302#define SNOR_ERASE_FLAGS_MAX 6
303#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
304
305/**
306 * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
307 * @regions: array of erase regions. The regions are consecutive in
308 * address space. Walking through the regions is done
309 * incrementally.
310 * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
311 * sector size (legacy implementation).
312 * @erase_type: an array of erase types shared by all the regions.
313 * The erase types are sorted in ascending order, with the
314 * smallest Erase Type size being the first member in the
315 * erase_type array.
316 * @uniform_erase_type: bitmask encoding erase types that can erase the
317 * entire memory. This member is completed at init by
318 * uniform and non-uniform SPI NOR flash memories if they
319 * support at least one erase type that can erase the
320 * entire memory.
321 */
322struct spi_nor_erase_map {
323 struct spi_nor_erase_region *regions;
324 struct spi_nor_erase_region uniform_region;
325 struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
326 u8 uniform_erase_type;
327};
328
329/**
242 * struct flash_info - Forward declaration of a structure used internally by 330 * struct flash_info - Forward declaration of a structure used internally by
243 * spi_nor_scan() 331 * spi_nor_scan()
244 */ 332 */
@@ -262,6 +350,7 @@ struct flash_info;
262 * @write_proto: the SPI protocol for write operations 350 * @write_proto: the SPI protocol for write operations
263 * @reg_proto the SPI protocol for read_reg/write_reg/erase operations 351 * @reg_proto the SPI protocol for read_reg/write_reg/erase operations
264 * @cmd_buf: used by the write_reg 352 * @cmd_buf: used by the write_reg
353 * @erase_map: the erase map of the SPI NOR
265 * @prepare: [OPTIONAL] do some preparations for the 354 * @prepare: [OPTIONAL] do some preparations for the
266 * read/write/erase/lock/unlock operations 355 * read/write/erase/lock/unlock operations
267 * @unprepare: [OPTIONAL] do some post work after the 356 * @unprepare: [OPTIONAL] do some post work after the
@@ -297,6 +386,7 @@ struct spi_nor {
297 bool sst_write_second; 386 bool sst_write_second;
298 u32 flags; 387 u32 flags;
299 u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; 388 u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
389 struct spi_nor_erase_map erase_map;
300 390
301 int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); 391 int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
302 void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); 392 void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
@@ -317,6 +407,35 @@ struct spi_nor {
317 void *priv; 407 void *priv;
318}; 408};
319 409
410static u64 __maybe_unused
411spi_nor_region_is_last(const struct spi_nor_erase_region *region)
412{
413 return region->offset & SNOR_LAST_REGION;
414}
415
416static u64 __maybe_unused
417spi_nor_region_end(const struct spi_nor_erase_region *region)
418{
419 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
420}
421
422static void __maybe_unused
423spi_nor_region_mark_end(struct spi_nor_erase_region *region)
424{
425 region->offset |= SNOR_LAST_REGION;
426}
427
428static void __maybe_unused
429spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
430{
431 region->offset |= SNOR_OVERLAID_REGION;
432}
433
434static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor)
435{
436 return !!nor->erase_map.uniform_erase_type;
437}
438
320static inline void spi_nor_set_flash_node(struct spi_nor *nor, 439static inline void spi_nor_set_flash_node(struct spi_nor *nor,
321 struct device_node *np) 440 struct device_node *np)
322{ 441{
diff --git a/include/linux/ndctl.h b/include/linux/ndctl.h
new file mode 100644
index 000000000000..cd5a293ce3ae
--- /dev/null
+++ b/include/linux/ndctl.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (c) 2014-2016, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU Lesser General Public License,
6 * version 2.1, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT ANY
9 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
10 * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
11 * more details.
12 */
13#ifndef _LINUX_NDCTL_H
14#define _LINUX_NDCTL_H
15
16#include <uapi/linux/ndctl.h>
17
18enum {
19 ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
20};
21
22#endif /* _LINUX_NDCTL_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ca5ab98053c8..dc1d9ed33b31 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -535,6 +535,32 @@ static inline void napi_synchronize(const struct napi_struct *n)
535 barrier(); 535 barrier();
536} 536}
537 537
538/**
539 * napi_if_scheduled_mark_missed - if napi is running, set the
540 * NAPIF_STATE_MISSED
541 * @n: NAPI context
542 *
543 * If napi is running, set the NAPIF_STATE_MISSED, and return true if
544 * NAPI is scheduled.
545 **/
546static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
547{
548 unsigned long val, new;
549
550 do {
551 val = READ_ONCE(n->state);
552 if (val & NAPIF_STATE_DISABLE)
553 return true;
554
555 if (!(val & NAPIF_STATE_SCHED))
556 return false;
557
558 new = val | NAPIF_STATE_MISSED;
559 } while (cmpxchg(&n->state, val, new) != val);
560
561 return true;
562}
563
538enum netdev_queue_state_t { 564enum netdev_queue_state_t {
539 __QUEUE_STATE_DRV_XOFF, 565 __QUEUE_STATE_DRV_XOFF,
540 __QUEUE_STATE_STACK_XOFF, 566 __QUEUE_STATE_STACK_XOFF,
@@ -583,6 +609,9 @@ struct netdev_queue {
583 609
584 /* Subordinate device that the queue has been assigned to */ 610 /* Subordinate device that the queue has been assigned to */
585 struct net_device *sb_dev; 611 struct net_device *sb_dev;
612#ifdef CONFIG_XDP_SOCKETS
613 struct xdp_umem *umem;
614#endif
586/* 615/*
587 * write-mostly part 616 * write-mostly part
588 */ 617 */
@@ -712,6 +741,9 @@ struct netdev_rx_queue {
712 struct kobject kobj; 741 struct kobject kobj;
713 struct net_device *dev; 742 struct net_device *dev;
714 struct xdp_rxq_info xdp_rxq; 743 struct xdp_rxq_info xdp_rxq;
744#ifdef CONFIG_XDP_SOCKETS
745 struct xdp_umem *umem;
746#endif
715} ____cacheline_aligned_in_smp; 747} ____cacheline_aligned_in_smp;
716 748
717/* 749/*
@@ -1730,6 +1762,8 @@ enum netdev_priv_flags {
1730 * switch driver and used to set the phys state of the 1762 * switch driver and used to set the phys state of the
1731 * switch port. 1763 * switch port.
1732 * 1764 *
1765 * @wol_enabled: Wake-on-LAN is enabled
1766 *
1733 * FIXME: cleanup struct net_device such that network protocol info 1767 * FIXME: cleanup struct net_device such that network protocol info
1734 * moves out. 1768 * moves out.
1735 */ 1769 */
@@ -1974,7 +2008,6 @@ struct net_device {
1974 struct pcpu_lstats __percpu *lstats; 2008 struct pcpu_lstats __percpu *lstats;
1975 struct pcpu_sw_netstats __percpu *tstats; 2009 struct pcpu_sw_netstats __percpu *tstats;
1976 struct pcpu_dstats __percpu *dstats; 2010 struct pcpu_dstats __percpu *dstats;
1977 struct pcpu_vstats __percpu *vstats;
1978 }; 2011 };
1979 2012
1980#if IS_ENABLED(CONFIG_GARP) 2013#if IS_ENABLED(CONFIG_GARP)
@@ -2014,6 +2047,7 @@ struct net_device {
2014 struct lock_class_key *qdisc_tx_busylock; 2047 struct lock_class_key *qdisc_tx_busylock;
2015 struct lock_class_key *qdisc_running_key; 2048 struct lock_class_key *qdisc_running_key;
2016 bool proto_down; 2049 bool proto_down;
2050 unsigned wol_enabled:1;
2017}; 2051};
2018#define to_net_dev(d) container_of(d, struct net_device, dev) 2052#define to_net_dev(d) container_of(d, struct net_device, dev)
2019 2053
@@ -2317,6 +2351,7 @@ static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2317 2351
2318struct packet_type { 2352struct packet_type {
2319 __be16 type; /* This is really htons(ether_type). */ 2353 __be16 type; /* This is really htons(ether_type). */
2354 bool ignore_outgoing;
2320 struct net_device *dev; /* NULL is wildcarded here */ 2355 struct net_device *dev; /* NULL is wildcarded here */
2321 int (*func) (struct sk_buff *, 2356 int (*func) (struct sk_buff *,
2322 struct net_device *, 2357 struct net_device *,
@@ -2355,6 +2390,12 @@ struct pcpu_sw_netstats {
2355 struct u64_stats_sync syncp; 2390 struct u64_stats_sync syncp;
2356}; 2391};
2357 2392
2393struct pcpu_lstats {
2394 u64 packets;
2395 u64 bytes;
2396 struct u64_stats_sync syncp;
2397};
2398
2358#define __netdev_alloc_pcpu_stats(type, gfp) \ 2399#define __netdev_alloc_pcpu_stats(type, gfp) \
2359({ \ 2400({ \
2360 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ 2401 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
@@ -2455,6 +2496,13 @@ struct netdev_notifier_info {
2455 struct netlink_ext_ack *extack; 2496 struct netlink_ext_ack *extack;
2456}; 2497};
2457 2498
2499struct netdev_notifier_info_ext {
2500 struct netdev_notifier_info info; /* must be first */
2501 union {
2502 u32 mtu;
2503 } ext;
2504};
2505
2458struct netdev_notifier_change_info { 2506struct netdev_notifier_change_info {
2459 struct netdev_notifier_info info; /* must be first */ 2507 struct netdev_notifier_info info; /* must be first */
2460 unsigned int flags_changed; 2508 unsigned int flags_changed;
@@ -3597,6 +3645,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
3597 return 0; 3645 return 0;
3598} 3646}
3599 3647
3648bool dev_nit_active(struct net_device *dev);
3600void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 3649void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3601 3650
3602extern int netdev_budget; 3651extern int netdev_budget;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 07efffd0c759..bbe99d2b28b4 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
215 break; 215 break;
216 case NFPROTO_ARP: 216 case NFPROTO_ARP:
217#ifdef CONFIG_NETFILTER_FAMILY_ARP 217#ifdef CONFIG_NETFILTER_FAMILY_ARP
218 if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
219 break;
218 hook_head = rcu_dereference(net->nf.hooks_arp[hook]); 220 hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
219#endif 221#endif
220 break; 222 break;
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 03097fa70975..e142b2b5f1ea 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -19,7 +19,4 @@ struct ip_conntrack_stat {
19 unsigned int search_restart; 19 unsigned int search_restart;
20}; 20};
21 21
22/* call to create an explicit dependency on nf_conntrack. */
23void need_conntrack(void);
24
25#endif /* _NF_CONNTRACK_COMMON_H */ 22#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nfnetlink_osf.h b/include/linux/netfilter/nfnetlink_osf.h
index ecf7dab81e9e..c6000046c966 100644
--- a/include/linux/netfilter/nfnetlink_osf.h
+++ b/include/linux/netfilter/nfnetlink_osf.h
@@ -27,6 +27,7 @@ bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
27 const struct list_head *nf_osf_fingers); 27 const struct list_head *nf_osf_fingers);
28 28
29const char *nf_osf_find(const struct sk_buff *skb, 29const char *nf_osf_find(const struct sk_buff *skb,
30 const struct list_head *nf_osf_fingers); 30 const struct list_head *nf_osf_fingers,
31 const int ttl_check);
31 32
32#endif /* _NFOSF_H */ 33#endif /* _NFOSF_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 71f121b66ca8..4da90a6ab536 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -176,8 +176,11 @@ struct netlink_callback {
176 void *data; 176 void *data;
177 /* the module that dump function belong to */ 177 /* the module that dump function belong to */
178 struct module *module; 178 struct module *module;
179 struct netlink_ext_ack *extack;
179 u16 family; 180 u16 family;
180 u16 min_dump_alloc; 181 u16 min_dump_alloc;
182 bool strict_check;
183 u16 answer_flags;
181 unsigned int prev_seq, seq; 184 unsigned int prev_seq, seq;
182 long args[6]; 185 long args[6];
183}; 186};
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 67662d01130a..676f1ff161a9 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -31,8 +31,6 @@ struct netpoll {
31 bool ipv6; 31 bool ipv6;
32 u16 local_port, remote_port; 32 u16 local_port, remote_port;
33 u8 remote_mac[ETH_ALEN]; 33 u8 remote_mac[ETH_ALEN];
34
35 struct work_struct cleanup_work;
36}; 34};
37 35
38struct netpoll_info { 36struct netpoll_info {
@@ -49,8 +47,9 @@ struct netpoll_info {
49}; 47};
50 48
51#ifdef CONFIG_NETPOLL 49#ifdef CONFIG_NETPOLL
52extern void netpoll_poll_disable(struct net_device *dev); 50void netpoll_poll_dev(struct net_device *dev);
53extern void netpoll_poll_enable(struct net_device *dev); 51void netpoll_poll_disable(struct net_device *dev);
52void netpoll_poll_enable(struct net_device *dev);
54#else 53#else
55static inline void netpoll_poll_disable(struct net_device *dev) { return; } 54static inline void netpoll_poll_disable(struct net_device *dev) { return; }
56static inline void netpoll_poll_enable(struct net_device *dev) { return; } 55static inline void netpoll_poll_enable(struct net_device *dev) { return; }
@@ -62,7 +61,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt);
62int __netpoll_setup(struct netpoll *np, struct net_device *ndev); 61int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
63int netpoll_setup(struct netpoll *np); 62int netpoll_setup(struct netpoll *np);
64void __netpoll_cleanup(struct netpoll *np); 63void __netpoll_cleanup(struct netpoll *np);
65void __netpoll_free_async(struct netpoll *np); 64void __netpoll_free(struct netpoll *np);
66void netpoll_cleanup(struct netpoll *np); 65void netpoll_cleanup(struct netpoll *np);
67void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, 66void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
68 struct net_device *dev); 67 struct net_device *dev);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index a0831e9d19c9..6e0417c02279 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -62,6 +62,7 @@ struct nfs_lock_context {
62 struct nfs_open_context *open_context; 62 struct nfs_open_context *open_context;
63 fl_owner_t lockowner; 63 fl_owner_t lockowner;
64 atomic_t io_count; 64 atomic_t io_count;
65 struct rcu_head rcu_head;
65}; 66};
66 67
67struct nfs4_state; 68struct nfs4_state;
@@ -82,6 +83,7 @@ struct nfs_open_context {
82 83
83 struct list_head list; 84 struct list_head list;
84 struct nfs4_threshold *mdsthreshold; 85 struct nfs4_threshold *mdsthreshold;
86 struct rcu_head rcu_head;
85}; 87};
86 88
87struct nfs_open_dir_context { 89struct nfs_open_dir_context {
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index bf39d9c92201..0fc0b9135d46 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -228,6 +228,9 @@ struct nfs_server {
228 unsigned short mountd_port; 228 unsigned short mountd_port;
229 unsigned short mountd_protocol; 229 unsigned short mountd_protocol;
230 struct rpc_wait_queue uoc_rpcwaitq; 230 struct rpc_wait_queue uoc_rpcwaitq;
231
232 /* XDR related information */
233 unsigned int read_hdrsize;
231}; 234};
232 235
233/* Server capabilities */ 236/* Server capabilities */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index bd1c889a9ed9..0e016252cfc6 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -608,8 +608,13 @@ struct nfs_pgio_args {
608 __u32 count; 608 __u32 count;
609 unsigned int pgbase; 609 unsigned int pgbase;
610 struct page ** pages; 610 struct page ** pages;
611 const u32 * bitmask; /* used by write */ 611 union {
612 enum nfs3_stable_how stable; /* used by write */ 612 unsigned int replen; /* used by read */
613 struct {
614 const u32 * bitmask; /* used by write */
615 enum nfs3_stable_how stable; /* used by write */
616 };
617 };
613}; 618};
614 619
615struct nfs_pgio_res { 620struct nfs_pgio_res {
@@ -617,10 +622,16 @@ struct nfs_pgio_res {
617 struct nfs_fattr * fattr; 622 struct nfs_fattr * fattr;
618 __u32 count; 623 __u32 count;
619 __u32 op_status; 624 __u32 op_status;
620 int eof; /* used by read */ 625 union {
621 struct nfs_writeverf * verf; /* used by write */ 626 struct {
622 const struct nfs_server *server; /* used by write */ 627 unsigned int replen; /* used by read */
623 628 int eof; /* used by read */
629 };
630 struct {
631 struct nfs_writeverf * verf; /* used by write */
632 const struct nfs_server *server; /* used by write */
633 };
634 };
624}; 635};
625 636
626/* 637/*
@@ -1471,11 +1482,10 @@ struct nfs_pgio_header {
1471 const struct nfs_rw_ops *rw_ops; 1482 const struct nfs_rw_ops *rw_ops;
1472 struct nfs_io_completion *io_completion; 1483 struct nfs_io_completion *io_completion;
1473 struct nfs_direct_req *dreq; 1484 struct nfs_direct_req *dreq;
1474 spinlock_t lock; 1485
1475 /* fields protected by lock */
1476 int pnfs_error; 1486 int pnfs_error;
1477 int error; /* merge with pnfs_error */ 1487 int error; /* merge with pnfs_error */
1478 unsigned long good_bytes; /* boundary of good data */ 1488 unsigned int good_bytes; /* boundary of good data */
1479 unsigned long flags; 1489 unsigned long flags;
1480 1490
1481 /* 1491 /*
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 68e91ef5494c..818dbe9331be 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1241,6 +1241,7 @@ enum {
1241 NVME_SC_ANA_PERSISTENT_LOSS = 0x301, 1241 NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
1242 NVME_SC_ANA_INACCESSIBLE = 0x302, 1242 NVME_SC_ANA_INACCESSIBLE = 0x302,
1243 NVME_SC_ANA_TRANSITION = 0x303, 1243 NVME_SC_ANA_TRANSITION = 0x303,
1244 NVME_SC_HOST_PATH_ERROR = 0x370,
1244 1245
1245 NVME_SC_DNR = 0x4000, 1246 NVME_SC_DNR = 0x4000,
1246}; 1247};
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 4e85447f7860..312bfa5efd80 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * nvmem framework consumer. 3 * nvmem framework consumer.
3 * 4 *
4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */ 7 */
11 8
12#ifndef _LINUX_NVMEM_CONSUMER_H 9#ifndef _LINUX_NVMEM_CONSUMER_H
@@ -14,6 +11,7 @@
14 11
15#include <linux/err.h> 12#include <linux/err.h>
16#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/notifier.h>
17 15
18struct device; 16struct device;
19struct device_node; 17struct device_node;
@@ -29,11 +27,36 @@ struct nvmem_cell_info {
29 unsigned int nbits; 27 unsigned int nbits;
30}; 28};
31 29
30/**
31 * struct nvmem_cell_lookup - cell lookup entry
32 *
33 * @nvmem_name: Name of the provider.
34 * @cell_name: Name of the nvmem cell as defined in the name field of
35 * struct nvmem_cell_info.
36 * @dev_id: Name of the consumer device that will be associated with
37 * this cell.
38 * @con_id: Connector id for this cell lookup.
39 */
40struct nvmem_cell_lookup {
41 const char *nvmem_name;
42 const char *cell_name;
43 const char *dev_id;
44 const char *con_id;
45 struct list_head node;
46};
47
48enum {
49 NVMEM_ADD = 1,
50 NVMEM_REMOVE,
51 NVMEM_CELL_ADD,
52 NVMEM_CELL_REMOVE,
53};
54
32#if IS_ENABLED(CONFIG_NVMEM) 55#if IS_ENABLED(CONFIG_NVMEM)
33 56
34/* Cell based interface */ 57/* Cell based interface */
35struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name); 58struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id);
36struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name); 59struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id);
37void nvmem_cell_put(struct nvmem_cell *cell); 60void nvmem_cell_put(struct nvmem_cell *cell);
38void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); 61void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
39void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); 62void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
@@ -55,18 +78,28 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
55int nvmem_device_cell_write(struct nvmem_device *nvmem, 78int nvmem_device_cell_write(struct nvmem_device *nvmem,
56 struct nvmem_cell_info *info, void *buf); 79 struct nvmem_cell_info *info, void *buf);
57 80
81const char *nvmem_dev_name(struct nvmem_device *nvmem);
82
83void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
84 size_t nentries);
85void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries,
86 size_t nentries);
87
88int nvmem_register_notifier(struct notifier_block *nb);
89int nvmem_unregister_notifier(struct notifier_block *nb);
90
58#else 91#else
59 92
60static inline struct nvmem_cell *nvmem_cell_get(struct device *dev, 93static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
61 const char *name) 94 const char *id)
62{ 95{
63 return ERR_PTR(-ENOSYS); 96 return ERR_PTR(-EOPNOTSUPP);
64} 97}
65 98
66static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, 99static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
67 const char *name) 100 const char *id)
68{ 101{
69 return ERR_PTR(-ENOSYS); 102 return ERR_PTR(-EOPNOTSUPP);
70} 103}
71 104
72static inline void devm_nvmem_cell_put(struct device *dev, 105static inline void devm_nvmem_cell_put(struct device *dev,
@@ -80,31 +113,31 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell)
80 113
81static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 114static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
82{ 115{
83 return ERR_PTR(-ENOSYS); 116 return ERR_PTR(-EOPNOTSUPP);
84} 117}
85 118
86static inline int nvmem_cell_write(struct nvmem_cell *cell, 119static inline int nvmem_cell_write(struct nvmem_cell *cell,
87 const char *buf, size_t len) 120 const char *buf, size_t len)
88{ 121{
89 return -ENOSYS; 122 return -EOPNOTSUPP;
90} 123}
91 124
92static inline int nvmem_cell_read_u32(struct device *dev, 125static inline int nvmem_cell_read_u32(struct device *dev,
93 const char *cell_id, u32 *val) 126 const char *cell_id, u32 *val)
94{ 127{
95 return -ENOSYS; 128 return -EOPNOTSUPP;
96} 129}
97 130
98static inline struct nvmem_device *nvmem_device_get(struct device *dev, 131static inline struct nvmem_device *nvmem_device_get(struct device *dev,
99 const char *name) 132 const char *name)
100{ 133{
101 return ERR_PTR(-ENOSYS); 134 return ERR_PTR(-EOPNOTSUPP);
102} 135}
103 136
104static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev, 137static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
105 const char *name) 138 const char *name)
106{ 139{
107 return ERR_PTR(-ENOSYS); 140 return ERR_PTR(-EOPNOTSUPP);
108} 141}
109 142
110static inline void nvmem_device_put(struct nvmem_device *nvmem) 143static inline void nvmem_device_put(struct nvmem_device *nvmem)
@@ -120,47 +153,68 @@ static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
120 struct nvmem_cell_info *info, 153 struct nvmem_cell_info *info,
121 void *buf) 154 void *buf)
122{ 155{
123 return -ENOSYS; 156 return -EOPNOTSUPP;
124} 157}
125 158
126static inline int nvmem_device_cell_write(struct nvmem_device *nvmem, 159static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
127 struct nvmem_cell_info *info, 160 struct nvmem_cell_info *info,
128 void *buf) 161 void *buf)
129{ 162{
130 return -ENOSYS; 163 return -EOPNOTSUPP;
131} 164}
132 165
133static inline int nvmem_device_read(struct nvmem_device *nvmem, 166static inline int nvmem_device_read(struct nvmem_device *nvmem,
134 unsigned int offset, size_t bytes, 167 unsigned int offset, size_t bytes,
135 void *buf) 168 void *buf)
136{ 169{
137 return -ENOSYS; 170 return -EOPNOTSUPP;
138} 171}
139 172
140static inline int nvmem_device_write(struct nvmem_device *nvmem, 173static inline int nvmem_device_write(struct nvmem_device *nvmem,
141 unsigned int offset, size_t bytes, 174 unsigned int offset, size_t bytes,
142 void *buf) 175 void *buf)
143{ 176{
144 return -ENOSYS; 177 return -EOPNOTSUPP;
145} 178}
179
180static inline const char *nvmem_dev_name(struct nvmem_device *nvmem)
181{
182 return NULL;
183}
184
185static inline void
186nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {}
187static inline void
188nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {}
189
190static inline int nvmem_register_notifier(struct notifier_block *nb)
191{
192 return -EOPNOTSUPP;
193}
194
195static inline int nvmem_unregister_notifier(struct notifier_block *nb)
196{
197 return -EOPNOTSUPP;
198}
199
146#endif /* CONFIG_NVMEM */ 200#endif /* CONFIG_NVMEM */
147 201
148#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) 202#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
149struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 203struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
150 const char *name); 204 const char *id);
151struct nvmem_device *of_nvmem_device_get(struct device_node *np, 205struct nvmem_device *of_nvmem_device_get(struct device_node *np,
152 const char *name); 206 const char *name);
153#else 207#else
154static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, 208static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
155 const char *name) 209 const char *id)
156{ 210{
157 return ERR_PTR(-ENOSYS); 211 return ERR_PTR(-EOPNOTSUPP);
158} 212}
159 213
160static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np, 214static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
161 const char *name) 215 const char *name)
162{ 216{
163 return ERR_PTR(-ENOSYS); 217 return ERR_PTR(-EOPNOTSUPP);
164} 218}
165#endif /* CONFIG_NVMEM && CONFIG_OF */ 219#endif /* CONFIG_NVMEM && CONFIG_OF */
166 220
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 24def6ad09bb..1e3283c2af77 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * nvmem framework provider. 3 * nvmem framework provider.
3 * 4 *
4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */ 7 */
11 8
12#ifndef _LINUX_NVMEM_PROVIDER_H 9#ifndef _LINUX_NVMEM_PROVIDER_H
@@ -67,30 +64,46 @@ struct nvmem_config {
67 struct device *base_dev; 64 struct device *base_dev;
68}; 65};
69 66
67/**
68 * struct nvmem_cell_table - NVMEM cell definitions for given provider
69 *
70 * @nvmem_name: Provider name.
71 * @cells: Array of cell definitions.
72 * @ncells: Number of cell definitions in the array.
73 * @node: List node.
74 *
75 * This structure together with related helper functions is provided for users
76 * that don't can't access the nvmem provided structure but wish to register
77 * cell definitions for it e.g. board files registering an EEPROM device.
78 */
79struct nvmem_cell_table {
80 const char *nvmem_name;
81 const struct nvmem_cell_info *cells;
82 size_t ncells;
83 struct list_head node;
84};
85
70#if IS_ENABLED(CONFIG_NVMEM) 86#if IS_ENABLED(CONFIG_NVMEM)
71 87
72struct nvmem_device *nvmem_register(const struct nvmem_config *cfg); 88struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
73int nvmem_unregister(struct nvmem_device *nvmem); 89void nvmem_unregister(struct nvmem_device *nvmem);
74 90
75struct nvmem_device *devm_nvmem_register(struct device *dev, 91struct nvmem_device *devm_nvmem_register(struct device *dev,
76 const struct nvmem_config *cfg); 92 const struct nvmem_config *cfg);
77 93
78int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem); 94int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
79 95
80int nvmem_add_cells(struct nvmem_device *nvmem, 96void nvmem_add_cell_table(struct nvmem_cell_table *table);
81 const struct nvmem_cell_info *info, 97void nvmem_del_cell_table(struct nvmem_cell_table *table);
82 int ncells); 98
83#else 99#else
84 100
85static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c) 101static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
86{ 102{
87 return ERR_PTR(-ENOSYS); 103 return ERR_PTR(-EOPNOTSUPP);
88} 104}
89 105
90static inline int nvmem_unregister(struct nvmem_device *nvmem) 106static inline void nvmem_unregister(struct nvmem_device *nvmem) {}
91{
92 return -ENOSYS;
93}
94 107
95static inline struct nvmem_device * 108static inline struct nvmem_device *
96devm_nvmem_register(struct device *dev, const struct nvmem_config *c) 109devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
@@ -101,16 +114,11 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
101static inline int 114static inline int
102devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 115devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
103{ 116{
104 return nvmem_unregister(nvmem); 117 return -EOPNOTSUPP;
105
106} 118}
107 119
108static inline int nvmem_add_cells(struct nvmem_device *nvmem, 120static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
109 const struct nvmem_cell_info *info, 121static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
110 int ncells)
111{
112 return -ENOSYS;
113}
114 122
115#endif /* CONFIG_NVMEM */ 123#endif /* CONFIG_NVMEM */
116#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ 124#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 99b0ebf49632..a5aee3c438ad 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -247,12 +247,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
247#include <asm/prom.h> 247#include <asm/prom.h>
248#endif 248#endif
249 249
250/* Default #address and #size cells. Allow arch asm/prom.h to override */
251#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT)
252#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
253#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
254#endif
255
256#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) 250#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
257#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) 251#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
258 252
@@ -353,6 +347,8 @@ extern const void *of_get_property(const struct device_node *node,
353 const char *name, 347 const char *name,
354 int *lenp); 348 int *lenp);
355extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); 349extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
350extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
351
356#define for_each_property_of_node(dn, pp) \ 352#define for_each_property_of_node(dn, pp) \
357 for (pp = dn->properties; pp != NULL; pp = pp->next) 353 for (pp = dn->properties; pp != NULL; pp = pp->next)
358 354
@@ -392,6 +388,9 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
392extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); 388extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
393extern int of_alias_get_id(struct device_node *np, const char *stem); 389extern int of_alias_get_id(struct device_node *np, const char *stem);
394extern int of_alias_get_highest_id(const char *stem); 390extern int of_alias_get_highest_id(const char *stem);
391extern int of_alias_get_alias_list(const struct of_device_id *matches,
392 const char *stem, unsigned long *bitmap,
393 unsigned int nbits);
395 394
396extern int of_machine_is_compatible(const char *compat); 395extern int of_machine_is_compatible(const char *compat);
397 396
@@ -550,6 +549,10 @@ bool of_console_check(struct device_node *dn, char *name, int index);
550 549
551extern int of_cpu_node_to_id(struct device_node *np); 550extern int of_cpu_node_to_id(struct device_node *np);
552 551
552int of_map_rid(struct device_node *np, u32 rid,
553 const char *map_name, const char *map_mask_name,
554 struct device_node **target, u32 *id_out);
555
553#else /* CONFIG_OF */ 556#else /* CONFIG_OF */
554 557
555static inline void of_core_init(void) 558static inline void of_core_init(void)
@@ -754,6 +757,11 @@ static inline struct device_node *of_get_cpu_node(int cpu,
754 return NULL; 757 return NULL;
755} 758}
756 759
760static inline struct device_node *of_get_next_cpu_node(struct device_node *prev)
761{
762 return NULL;
763}
764
757static inline int of_n_addr_cells(struct device_node *np) 765static inline int of_n_addr_cells(struct device_node *np)
758{ 766{
759 return 0; 767 return 0;
@@ -893,6 +901,13 @@ static inline int of_alias_get_highest_id(const char *stem)
893 return -ENOSYS; 901 return -ENOSYS;
894} 902}
895 903
904static inline int of_alias_get_alias_list(const struct of_device_id *matches,
905 const char *stem, unsigned long *bitmap,
906 unsigned int nbits)
907{
908 return -ENOSYS;
909}
910
896static inline int of_machine_is_compatible(const char *compat) 911static inline int of_machine_is_compatible(const char *compat)
897{ 912{
898 return 0; 913 return 0;
@@ -952,6 +967,13 @@ static inline int of_cpu_node_to_id(struct device_node *np)
952 return -ENODEV; 967 return -ENODEV;
953} 968}
954 969
970static inline int of_map_rid(struct device_node *np, u32 rid,
971 const char *map_name, const char *map_mask_name,
972 struct device_node **target, u32 *id_out)
973{
974 return -EINVAL;
975}
976
955#define of_match_ptr(_ptr) NULL 977#define of_match_ptr(_ptr) NULL
956#define of_match_node(_matches, _node) NULL 978#define of_match_node(_matches, _node) NULL
957#endif /* CONFIG_OF */ 979#endif /* CONFIG_OF */
@@ -990,7 +1012,7 @@ static inline struct device_node *of_find_matching_node(
990 1012
991static inline const char *of_node_get_device_type(const struct device_node *np) 1013static inline const char *of_node_get_device_type(const struct device_node *np)
992{ 1014{
993 return of_get_property(np, "type", NULL); 1015 return of_get_property(np, "device_type", NULL);
994} 1016}
995 1017
996static inline bool of_node_is_type(const struct device_node *np, const char *type) 1018static inline bool of_node_is_type(const struct device_node *np, const char *type)
@@ -1217,6 +1239,10 @@ static inline int of_property_read_s32(const struct device_node *np,
1217 for (child = of_get_next_available_child(parent, NULL); child != NULL; \ 1239 for (child = of_get_next_available_child(parent, NULL); child != NULL; \
1218 child = of_get_next_available_child(parent, child)) 1240 child = of_get_next_available_child(parent, child))
1219 1241
1242#define for_each_of_cpu_node(cpu) \
1243 for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \
1244 cpu = of_get_next_cpu_node(cpu))
1245
1220#define for_each_node_with_property(dn, prop_name) \ 1246#define for_each_node_with_property(dn, prop_name) \
1221 for (dn = of_find_node_with_property(NULL, prop_name); dn; \ 1247 for (dn = of_find_node_with_property(NULL, prop_name); dn; \
1222 dn = of_find_node_with_property(dn, prop_name)) 1248 dn = of_find_node_with_property(dn, prop_name))
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 165fd302b442..8d31e39dd564 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -58,7 +58,6 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
58int of_dma_configure(struct device *dev, 58int of_dma_configure(struct device *dev,
59 struct device_node *np, 59 struct device_node *np,
60 bool force_dma); 60 bool force_dma);
61void of_dma_deconfigure(struct device *dev);
62#else /* CONFIG_OF */ 61#else /* CONFIG_OF */
63 62
64static inline int of_driver_match_device(struct device *dev, 63static inline int of_driver_match_device(struct device *dev,
@@ -113,8 +112,6 @@ static inline int of_dma_configure(struct device *dev,
113{ 112{
114 return 0; 113 return 0;
115} 114}
116static inline void of_dma_deconfigure(struct device *dev)
117{}
118#endif /* CONFIG_OF */ 115#endif /* CONFIG_OF */
119 116
120#endif /* _LINUX_OF_DEVICE_H */ 117#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index e83d87fc5673..21a89c4880fa 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -14,9 +14,6 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
14 unsigned int devfn); 14 unsigned int devfn);
15int of_pci_get_devfn(struct device_node *np); 15int of_pci_get_devfn(struct device_node *np);
16void of_pci_check_probe_only(void); 16void of_pci_check_probe_only(void);
17int of_pci_map_rid(struct device_node *np, u32 rid,
18 const char *map_name, const char *map_mask_name,
19 struct device_node **target, u32 *id_out);
20#else 17#else
21static inline struct device_node *of_pci_find_child_device(struct device_node *parent, 18static inline struct device_node *of_pci_find_child_device(struct device_node *parent,
22 unsigned int devfn) 19 unsigned int devfn)
@@ -29,13 +26,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
29 return -EINVAL; 26 return -EINVAL;
30} 27}
31 28
32static inline int of_pci_map_rid(struct device_node *np, u32 rid,
33 const char *map_name, const char *map_mask_name,
34 struct device_node **target, u32 *id_out)
35{
36 return -EINVAL;
37}
38
39static inline void of_pci_check_probe_only(void) { } 29static inline void of_pci_check_probe_only(void) { }
40#endif 30#endif
41 31
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 74bee8cecf4c..50ce1bddaf56 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -69,13 +69,14 @@
69 */ 69 */
70enum pageflags { 70enum pageflags {
71 PG_locked, /* Page is locked. Don't touch. */ 71 PG_locked, /* Page is locked. Don't touch. */
72 PG_error,
73 PG_referenced, 72 PG_referenced,
74 PG_uptodate, 73 PG_uptodate,
75 PG_dirty, 74 PG_dirty,
76 PG_lru, 75 PG_lru,
77 PG_active, 76 PG_active,
77 PG_workingset,
78 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ 78 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
79 PG_error,
79 PG_slab, 80 PG_slab,
80 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ 81 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
81 PG_arch_1, 82 PG_arch_1,
@@ -162,6 +163,14 @@ static inline int PagePoisoned(const struct page *page)
162 return page->flags == PAGE_POISON_PATTERN; 163 return page->flags == PAGE_POISON_PATTERN;
163} 164}
164 165
166#ifdef CONFIG_DEBUG_VM
167void page_init_poison(struct page *page, size_t size);
168#else
169static inline void page_init_poison(struct page *page, size_t size)
170{
171}
172#endif
173
165/* 174/*
166 * Page flags policies wrt compound pages 175 * Page flags policies wrt compound pages
167 * 176 *
@@ -280,6 +289,8 @@ PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
280PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) 289PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
281PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) 290PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
282 TESTCLEARFLAG(Active, active, PF_HEAD) 291 TESTCLEARFLAG(Active, active, PF_HEAD)
292PAGEFLAG(Workingset, workingset, PF_HEAD)
293 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
283__PAGEFLAG(Slab, slab, PF_NO_TAIL) 294__PAGEFLAG(Slab, slab, PF_NO_TAIL)
284__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) 295__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
285PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ 296PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
@@ -292,6 +303,7 @@ PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
292 303
293PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 304PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
294 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) 305 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
306 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
295PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 307PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
296 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 308 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
297 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) 309 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b1bd2186e6d2..226f96f0dee0 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -241,9 +241,9 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
241 241
242typedef int filler_t(void *, struct page *); 242typedef int filler_t(void *, struct page *);
243 243
244pgoff_t page_cache_next_hole(struct address_space *mapping, 244pgoff_t page_cache_next_miss(struct address_space *mapping,
245 pgoff_t index, unsigned long max_scan); 245 pgoff_t index, unsigned long max_scan);
246pgoff_t page_cache_prev_hole(struct address_space *mapping, 246pgoff_t page_cache_prev_miss(struct address_space *mapping,
247 pgoff_t index, unsigned long max_scan); 247 pgoff_t index, unsigned long max_scan);
248 248
249#define FGP_ACCESSED 0x00000001 249#define FGP_ACCESSED 0x00000001
@@ -363,17 +363,17 @@ static inline unsigned find_get_pages(struct address_space *mapping,
363unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, 363unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
364 unsigned int nr_pages, struct page **pages); 364 unsigned int nr_pages, struct page **pages);
365unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, 365unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
366 pgoff_t end, int tag, unsigned int nr_pages, 366 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
367 struct page **pages); 367 struct page **pages);
368static inline unsigned find_get_pages_tag(struct address_space *mapping, 368static inline unsigned find_get_pages_tag(struct address_space *mapping,
369 pgoff_t *index, int tag, unsigned int nr_pages, 369 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
370 struct page **pages) 370 struct page **pages)
371{ 371{
372 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, 372 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
373 nr_pages, pages); 373 nr_pages, pages);
374} 374}
375unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, 375unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
376 int tag, unsigned int nr_entries, 376 xa_mark_t tag, unsigned int nr_entries,
377 struct page **entries, pgoff_t *indices); 377 struct page **entries, pgoff_t *indices);
378 378
379struct page *grab_cache_page_write_begin(struct address_space *mapping, 379struct page *grab_cache_page_write_begin(struct address_space *mapping,
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 6dc456ac6136..081d934eda64 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -9,6 +9,8 @@
9#ifndef _LINUX_PAGEVEC_H 9#ifndef _LINUX_PAGEVEC_H
10#define _LINUX_PAGEVEC_H 10#define _LINUX_PAGEVEC_H
11 11
12#include <linux/xarray.h>
13
12/* 15 pointers + header align the pagevec structure to a power of two */ 14/* 15 pointers + header align the pagevec structure to a power of two */
13#define PAGEVEC_SIZE 15 15#define PAGEVEC_SIZE 15
14 16
@@ -40,12 +42,12 @@ static inline unsigned pagevec_lookup(struct pagevec *pvec,
40 42
41unsigned pagevec_lookup_range_tag(struct pagevec *pvec, 43unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
42 struct address_space *mapping, pgoff_t *index, pgoff_t end, 44 struct address_space *mapping, pgoff_t *index, pgoff_t end,
43 int tag); 45 xa_mark_t tag);
44unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, 46unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
45 struct address_space *mapping, pgoff_t *index, pgoff_t end, 47 struct address_space *mapping, pgoff_t *index, pgoff_t end,
46 int tag, unsigned max_pages); 48 xa_mark_t tag, unsigned max_pages);
47static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, 49static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
48 struct address_space *mapping, pgoff_t *index, int tag) 50 struct address_space *mapping, pgoff_t *index, xa_mark_t tag)
49{ 51{
50 return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); 52 return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
51} 53}
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index c3f1b44ade29..cb1adf0b78a9 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -119,29 +119,11 @@ static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
119{ 119{
120 return dma_set_coherent_mask(&dev->dev, mask); 120 return dma_set_coherent_mask(&dev->dev, mask);
121} 121}
122
123static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
124 unsigned int size)
125{
126 return dma_set_max_seg_size(&dev->dev, size);
127}
128
129static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
130 unsigned long mask)
131{
132 return dma_set_seg_boundary(&dev->dev, mask);
133}
134#else 122#else
135static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) 123static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
136{ return -EIO; } 124{ return -EIO; }
137static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) 125static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
138{ return -EIO; } 126{ return -EIO; }
139static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
140 unsigned int size)
141{ return -EIO; }
142static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
143 unsigned long mask)
144{ return -EIO; }
145#endif 127#endif
146 128
147#endif 129#endif
diff --git a/include/linux/pci-dma.h b/include/linux/pci-dma.h
deleted file mode 100644
index 0f7aa7353ca3..000000000000
--- a/include/linux/pci-dma.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_PCI_DMA_H
3#define _LINUX_PCI_DMA_H
4
5#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME);
6#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME);
7#define pci_unmap_addr dma_unmap_addr
8#define pci_unmap_addr_set dma_unmap_addr_set
9#define pci_unmap_len dma_unmap_len
10#define pci_unmap_len_set dma_unmap_len_set
11
12#endif
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
new file mode 100644
index 000000000000..bca9bc3e5be7
--- /dev/null
+++ b/include/linux/pci-p2pdma.h
@@ -0,0 +1,114 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * PCI Peer 2 Peer DMA support.
4 *
5 * Copyright (c) 2016-2018, Logan Gunthorpe
6 * Copyright (c) 2016-2017, Microsemi Corporation
7 * Copyright (c) 2017, Christoph Hellwig
8 * Copyright (c) 2018, Eideticom Inc.
9 */
10
11#ifndef _LINUX_PCI_P2PDMA_H
12#define _LINUX_PCI_P2PDMA_H
13
14#include <linux/pci.h>
15
16struct block_device;
17struct scatterlist;
18
19#ifdef CONFIG_PCI_P2PDMA
20int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
21 u64 offset);
22int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
23 int num_clients, bool verbose);
24bool pci_has_p2pmem(struct pci_dev *pdev);
25struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients);
26void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size);
27void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size);
28pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr);
29struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
30 unsigned int *nents, u32 length);
31void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
32void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
33int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
34 enum dma_data_direction dir);
35int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
36 bool *use_p2pdma);
37ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
38 bool use_p2pdma);
39#else /* CONFIG_PCI_P2PDMA */
40static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar,
41 size_t size, u64 offset)
42{
43 return -EOPNOTSUPP;
44}
45static inline int pci_p2pdma_distance_many(struct pci_dev *provider,
46 struct device **clients, int num_clients, bool verbose)
47{
48 return -1;
49}
50static inline bool pci_has_p2pmem(struct pci_dev *pdev)
51{
52 return false;
53}
54static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients,
55 int num_clients)
56{
57 return NULL;
58}
59static inline void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
60{
61 return NULL;
62}
63static inline void pci_free_p2pmem(struct pci_dev *pdev, void *addr,
64 size_t size)
65{
66}
67static inline pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev,
68 void *addr)
69{
70 return 0;
71}
72static inline struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
73 unsigned int *nents, u32 length)
74{
75 return NULL;
76}
77static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
78 struct scatterlist *sgl)
79{
80}
81static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
82{
83}
84static inline int pci_p2pdma_map_sg(struct device *dev,
85 struct scatterlist *sg, int nents, enum dma_data_direction dir)
86{
87 return 0;
88}
89static inline int pci_p2pdma_enable_store(const char *page,
90 struct pci_dev **p2p_dev, bool *use_p2pdma)
91{
92 *use_p2pdma = false;
93 return 0;
94}
95static inline ssize_t pci_p2pdma_enable_show(char *page,
96 struct pci_dev *p2p_dev, bool use_p2pdma)
97{
98 return sprintf(page, "none\n");
99}
100#endif /* CONFIG_PCI_P2PDMA */
101
102
103static inline int pci_p2pdma_distance(struct pci_dev *provider,
104 struct device *client, bool verbose)
105{
106 return pci_p2pdma_distance_many(provider, &client, 1, verbose);
107}
108
109static inline struct pci_dev *pci_p2pmem_find(struct device *client)
110{
111 return pci_p2pmem_find_many(&client, 1);
112}
113
114#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e72ca8dd6241..11c71c4ecf75 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -281,6 +281,7 @@ struct pcie_link_state;
281struct pci_vpd; 281struct pci_vpd;
282struct pci_sriov; 282struct pci_sriov;
283struct pci_ats; 283struct pci_ats;
284struct pci_p2pdma;
284 285
285/* The pci_dev structure describes PCI devices */ 286/* The pci_dev structure describes PCI devices */
286struct pci_dev { 287struct pci_dev {
@@ -325,6 +326,7 @@ struct pci_dev {
325 pci_power_t current_state; /* Current operating state. In ACPI, 326 pci_power_t current_state; /* Current operating state. In ACPI,
326 this is D0-D3, D0 being fully 327 this is D0-D3, D0 being fully
327 functional, and D3 being off. */ 328 functional, and D3 being off. */
329 unsigned int imm_ready:1; /* Supports Immediate Readiness */
328 u8 pm_cap; /* PM capability offset */ 330 u8 pm_cap; /* PM capability offset */
329 unsigned int pme_support:5; /* Bitmask of states from which PME# 331 unsigned int pme_support:5; /* Bitmask of states from which PME#
330 can be generated */ 332 can be generated */
@@ -402,6 +404,7 @@ struct pci_dev {
402 unsigned int has_secondary_link:1; 404 unsigned int has_secondary_link:1;
403 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ 405 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
404 unsigned int is_probed:1; /* Device probing in progress */ 406 unsigned int is_probed:1; /* Device probing in progress */
407 unsigned int link_active_reporting:1;/* Device capable of reporting link active */
405 pci_dev_flags_t dev_flags; 408 pci_dev_flags_t dev_flags;
406 atomic_t enable_cnt; /* pci_enable_device has been called */ 409 atomic_t enable_cnt; /* pci_enable_device has been called */
407 410
@@ -439,6 +442,9 @@ struct pci_dev {
439#ifdef CONFIG_PCI_PASID 442#ifdef CONFIG_PCI_PASID
440 u16 pasid_features; 443 u16 pasid_features;
441#endif 444#endif
445#ifdef CONFIG_PCI_P2PDMA
446 struct pci_p2pdma *p2pdma;
447#endif
442 phys_addr_t rom; /* Physical address if not from BAR */ 448 phys_addr_t rom; /* Physical address if not from BAR */
443 size_t romlen; /* Length if not from BAR */ 449 size_t romlen; /* Length if not from BAR */
444 char *driver_override; /* Driver name to force a match */ 450 char *driver_override; /* Driver name to force a match */
@@ -1235,6 +1241,9 @@ void pci_bus_remove_resources(struct pci_bus *bus);
1235int devm_request_pci_bus_resources(struct device *dev, 1241int devm_request_pci_bus_resources(struct device *dev,
1236 struct list_head *resources); 1242 struct list_head *resources);
1237 1243
1244/* Temporary until new and working PCI SBR API in place */
1245int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1246
1238#define pci_bus_for_each_resource(bus, res, i) \ 1247#define pci_bus_for_each_resource(bus, res, i) \
1239 for (i = 0; \ 1248 for (i = 0; \
1240 (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \ 1249 (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
@@ -1339,7 +1348,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1339 1348
1340/* kmem_cache style wrapper around pci_alloc_consistent() */ 1349/* kmem_cache style wrapper around pci_alloc_consistent() */
1341 1350
1342#include <linux/pci-dma.h>
1343#include <linux/dmapool.h> 1351#include <linux/dmapool.h>
1344 1352
1345#define pci_pool dma_pool 1353#define pci_pool dma_pool
@@ -1702,6 +1710,10 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1702 unsigned long *out_hwirq, 1710 unsigned long *out_hwirq,
1703 unsigned int *out_type) 1711 unsigned int *out_type)
1704{ return -EINVAL; } 1712{ return -EINVAL; }
1713
1714static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1715 struct pci_dev *dev)
1716{ return NULL; }
1705#endif /* CONFIG_PCI */ 1717#endif /* CONFIG_PCI */
1706 1718
1707/* Include architecture-dependent settings and functions */ 1719/* Include architecture-dependent settings and functions */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index a6d6650a0490..7acc9f91e72b 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -16,8 +16,6 @@
16 16
17/** 17/**
18 * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use 18 * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
19 * @owner: The module owner of this structure
20 * @mod_name: The module name (KBUILD_MODNAME) of this structure
21 * @enable_slot: Called when the user wants to enable a specific pci slot 19 * @enable_slot: Called when the user wants to enable a specific pci slot
22 * @disable_slot: Called when the user wants to disable a specific pci slot 20 * @disable_slot: Called when the user wants to disable a specific pci slot
23 * @set_attention_status: Called to set the specific slot's attention LED to 21 * @set_attention_status: Called to set the specific slot's attention LED to
@@ -25,17 +23,9 @@
25 * @hardware_test: Called to run a specified hardware test on the specified 23 * @hardware_test: Called to run a specified hardware test on the specified
26 * slot. 24 * slot.
27 * @get_power_status: Called to get the current power status of a slot. 25 * @get_power_status: Called to get the current power status of a slot.
28 * If this field is NULL, the value passed in the struct hotplug_slot_info
29 * will be used when this value is requested by a user.
30 * @get_attention_status: Called to get the current attention status of a slot. 26 * @get_attention_status: Called to get the current attention status of a slot.
31 * If this field is NULL, the value passed in the struct hotplug_slot_info
32 * will be used when this value is requested by a user.
33 * @get_latch_status: Called to get the current latch status of a slot. 27 * @get_latch_status: Called to get the current latch status of a slot.
34 * If this field is NULL, the value passed in the struct hotplug_slot_info
35 * will be used when this value is requested by a user.
36 * @get_adapter_status: Called to get see if an adapter is present in the slot or not. 28 * @get_adapter_status: Called to get see if an adapter is present in the slot or not.
37 * If this field is NULL, the value passed in the struct hotplug_slot_info
38 * will be used when this value is requested by a user.
39 * @reset_slot: Optional interface to allow override of a bus reset for the 29 * @reset_slot: Optional interface to allow override of a bus reset for the
40 * slot for cases where a secondary bus reset can result in spurious 30 * slot for cases where a secondary bus reset can result in spurious
41 * hotplug events or where a slot can be reset independent of the bus. 31 * hotplug events or where a slot can be reset independent of the bus.
@@ -46,8 +36,6 @@
46 * set an LED, enable / disable power, etc.) 36 * set an LED, enable / disable power, etc.)
47 */ 37 */
48struct hotplug_slot_ops { 38struct hotplug_slot_ops {
49 struct module *owner;
50 const char *mod_name;
51 int (*enable_slot) (struct hotplug_slot *slot); 39 int (*enable_slot) (struct hotplug_slot *slot);
52 int (*disable_slot) (struct hotplug_slot *slot); 40 int (*disable_slot) (struct hotplug_slot *slot);
53 int (*set_attention_status) (struct hotplug_slot *slot, u8 value); 41 int (*set_attention_status) (struct hotplug_slot *slot, u8 value);
@@ -60,37 +48,19 @@ struct hotplug_slot_ops {
60}; 48};
61 49
62/** 50/**
63 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
64 * @power_status: if power is enabled or not (1/0)
65 * @attention_status: if the attention light is enabled or not (1/0)
66 * @latch_status: if the latch (if any) is open or closed (1/0)
67 * @adapter_status: if there is a pci board present in the slot or not (1/0)
68 *
69 * Used to notify the hotplug pci core of the status of a specific slot.
70 */
71struct hotplug_slot_info {
72 u8 power_status;
73 u8 attention_status;
74 u8 latch_status;
75 u8 adapter_status;
76};
77
78/**
79 * struct hotplug_slot - used to register a physical slot with the hotplug pci core 51 * struct hotplug_slot - used to register a physical slot with the hotplug pci core
80 * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot 52 * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
81 * @info: pointer to the &struct hotplug_slot_info for the initial values for 53 * @owner: The module owner of this structure
82 * this slot. 54 * @mod_name: The module name (KBUILD_MODNAME) of this structure
83 * @private: used by the hotplug pci controller driver to store whatever it
84 * needs.
85 */ 55 */
86struct hotplug_slot { 56struct hotplug_slot {
87 struct hotplug_slot_ops *ops; 57 const struct hotplug_slot_ops *ops;
88 struct hotplug_slot_info *info;
89 void *private;
90 58
91 /* Variables below this are for use only by the hotplug pci core. */ 59 /* Variables below this are for use only by the hotplug pci core. */
92 struct list_head slot_list; 60 struct list_head slot_list;
93 struct pci_slot *pci_slot; 61 struct pci_slot *pci_slot;
62 struct module *owner;
63 const char *mod_name;
94}; 64};
95 65
96static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) 66static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
@@ -110,9 +80,6 @@ void pci_hp_del(struct hotplug_slot *slot);
110void pci_hp_destroy(struct hotplug_slot *slot); 80void pci_hp_destroy(struct hotplug_slot *slot);
111void pci_hp_deregister(struct hotplug_slot *slot); 81void pci_hp_deregister(struct hotplug_slot *slot);
112 82
113int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
114 struct hotplug_slot_info *info);
115
116/* use a define to avoid include chaining to get THIS_MODULE & friends */ 83/* use a define to avoid include chaining to get THIS_MODULE & friends */
117#define pci_hp_register(slot, pbus, devnr, name) \ 84#define pci_hp_register(slot, pbus, devnr, name) \
118 __pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME) 85 __pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d157983b84cf..69f0abe1ba1a 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -117,6 +117,10 @@
117#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe 117#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
118#define PCI_CLASS_SERIAL_FIBER 0x0c04 118#define PCI_CLASS_SERIAL_FIBER 0x0c04
119#define PCI_CLASS_SERIAL_SMBUS 0x0c05 119#define PCI_CLASS_SERIAL_SMBUS 0x0c05
120#define PCI_CLASS_SERIAL_IPMI 0x0c07
121#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700
122#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701
123#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702
120 124
121#define PCI_BASE_CLASS_WIRELESS 0x0d 125#define PCI_BASE_CLASS_WIRELESS 0x0d
122#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10 126#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10
@@ -2539,8 +2543,6 @@
2539#define PCI_VENDOR_ID_HUAWEI 0x19e5 2543#define PCI_VENDOR_ID_HUAWEI 0x19e5
2540 2544
2541#define PCI_VENDOR_ID_NETRONOME 0x19ee 2545#define PCI_VENDOR_ID_NETRONOME 0x19ee
2542#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
2543#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
2544#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 2546#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
2545#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000 2547#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
2546#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000 2548#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
@@ -2561,6 +2563,8 @@
2561 2563
2562#define PCI_VENDOR_ID_AMAZON 0x1d0f 2564#define PCI_VENDOR_ID_AMAZON 0x1d0f
2563 2565
2566#define PCI_VENDOR_ID_HYGON 0x1d94
2567
2564#define PCI_VENDOR_ID_TEKRAM 0x1de1 2568#define PCI_VENDOR_ID_TEKRAM 0x1de1
2565#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 2569#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
2566 2570
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 009cdf3d65b6..b297cd1cd4f1 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -108,6 +108,7 @@ void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
108void percpu_ref_switch_to_percpu(struct percpu_ref *ref); 108void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
109void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 109void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
110 percpu_ref_func_t *confirm_kill); 110 percpu_ref_func_t *confirm_kill);
111void percpu_ref_resurrect(struct percpu_ref *ref);
111void percpu_ref_reinit(struct percpu_ref *ref); 112void percpu_ref_reinit(struct percpu_ref *ref);
112 113
113/** 114/**
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 10f92e1d8e7b..bf309ff6f244 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -99,6 +99,7 @@ struct arm_pmu {
99 void (*stop)(struct arm_pmu *); 99 void (*stop)(struct arm_pmu *);
100 void (*reset)(void *); 100 void (*reset)(void *);
101 int (*map_event)(struct perf_event *event); 101 int (*map_event)(struct perf_event *event);
102 int (*filter_match)(struct perf_event *event);
102 int num_events; 103 int num_events;
103 bool secure_access; /* 32-bit ARM only */ 104 bool secure_access; /* 32-bit ARM only */
104#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 105#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 21713dc14ce2..7bb77850c65a 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -9,8 +9,10 @@
9 * PFN_SG_LAST - pfn references a page and is the last scatterlist entry 9 * PFN_SG_LAST - pfn references a page and is the last scatterlist entry
10 * PFN_DEV - pfn is not covered by system memmap by default 10 * PFN_DEV - pfn is not covered by system memmap by default
11 * PFN_MAP - pfn has a dynamic page mapping established by a device driver 11 * PFN_MAP - pfn has a dynamic page mapping established by a device driver
12 * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not
13 * get_user_pages
12 */ 14 */
13#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) 15#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
14#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1)) 16#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
15#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) 17#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
16#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) 18#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
diff --git a/include/linux/phy.h b/include/linux/phy.h
index cd6f637cbbfb..3ea87f774a76 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -19,6 +19,7 @@
19#include <linux/compiler.h> 19#include <linux/compiler.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/linkmode.h>
22#include <linux/mdio.h> 23#include <linux/mdio.h>
23#include <linux/mii.h> 24#include <linux/mii.h>
24#include <linux/module.h> 25#include <linux/module.h>
@@ -41,13 +42,21 @@
41#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \ 42#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \
42 SUPPORTED_1000baseT_Full) 43 SUPPORTED_1000baseT_Full)
43 44
44#define PHY_BASIC_FEATURES (PHY_10BT_FEATURES | \ 45extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
45 PHY_100BT_FEATURES | \ 46extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
46 PHY_DEFAULT_FEATURES) 47extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
47 48extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
48#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \ 49extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
49 PHY_1000BT_FEATURES) 50extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
50 51extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
52
53#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
54#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
55#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
56#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
57#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
58#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
59#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
51 60
52/* 61/*
53 * Set phydev->irq to PHY_POLL if interrupts are not supported, 62 * Set phydev->irq to PHY_POLL if interrupts are not supported,
@@ -509,7 +518,7 @@ struct phy_driver {
509 u32 phy_id; 518 u32 phy_id;
510 char *name; 519 char *name;
511 u32 phy_id_mask; 520 u32 phy_id_mask;
512 u32 features; 521 const unsigned long * const features;
513 u32 flags; 522 u32 flags;
514 const void *driver_data; 523 const void *driver_data;
515 524
@@ -967,6 +976,12 @@ static inline void phy_device_reset(struct phy_device *phydev, int value)
967#define phydev_err(_phydev, format, args...) \ 976#define phydev_err(_phydev, format, args...) \
968 dev_err(&_phydev->mdio.dev, format, ##args) 977 dev_err(&_phydev->mdio.dev, format, ##args)
969 978
979#define phydev_info(_phydev, format, args...) \
980 dev_info(&_phydev->mdio.dev, format, ##args)
981
982#define phydev_warn(_phydev, format, args...) \
983 dev_warn(&_phydev->mdio.dev, format, ##args)
984
970#define phydev_dbg(_phydev, format, args...) \ 985#define phydev_dbg(_phydev, format, args...) \
971 dev_dbg(&_phydev->mdio.dev, format, ##args) 986 dev_dbg(&_phydev->mdio.dev, format, ##args)
972 987
@@ -1039,7 +1054,7 @@ void phy_change_work(struct work_struct *work);
1039void phy_mac_interrupt(struct phy_device *phydev); 1054void phy_mac_interrupt(struct phy_device *phydev);
1040void phy_start_machine(struct phy_device *phydev); 1055void phy_start_machine(struct phy_device *phydev);
1041void phy_stop_machine(struct phy_device *phydev); 1056void phy_stop_machine(struct phy_device *phydev);
1042void phy_trigger_machine(struct phy_device *phydev, bool sync); 1057void phy_trigger_machine(struct phy_device *phydev);
1043int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); 1058int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
1044void phy_ethtool_ksettings_get(struct phy_device *phydev, 1059void phy_ethtool_ksettings_get(struct phy_device *phydev,
1045 struct ethtool_link_ksettings *cmd); 1060 struct ethtool_link_ksettings *cmd);
@@ -1049,6 +1064,14 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
1049int phy_start_interrupts(struct phy_device *phydev); 1064int phy_start_interrupts(struct phy_device *phydev);
1050void phy_print_status(struct phy_device *phydev); 1065void phy_print_status(struct phy_device *phydev);
1051int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); 1066int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
1067void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
1068void phy_support_sym_pause(struct phy_device *phydev);
1069void phy_support_asym_pause(struct phy_device *phydev);
1070void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
1071 bool autoneg);
1072void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
1073bool phy_validate_pause(struct phy_device *phydev,
1074 struct ethtool_pauseparam *pp);
1052 1075
1053int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, 1076int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
1054 int (*run)(struct phy_device *)); 1077 int (*run)(struct phy_device *));
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
deleted file mode 100644
index 0a2c18a9771d..000000000000
--- a/include/linux/phy/phy-qcom-ufs.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef PHY_QCOM_UFS_H_
16#define PHY_QCOM_UFS_H_
17
18#include "phy.h"
19
20/**
21 * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
22 * ref clock.
23 * @phy: reference to a generic phy.
24 */
25void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
26
27/**
28 * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device
29 * ref clock.
30 * @phy: reference to a generic phy.
31 */
32void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
33
34int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
35void ufs_qcom_phy_save_controller_version(struct phy *phy,
36 u8 major, u16 minor, u16 step);
37
38#endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 9713aebdd348..03b319f89a34 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -37,9 +37,11 @@ enum phy_mode {
37 PHY_MODE_USB_OTG, 37 PHY_MODE_USB_OTG,
38 PHY_MODE_SGMII, 38 PHY_MODE_SGMII,
39 PHY_MODE_2500SGMII, 39 PHY_MODE_2500SGMII,
40 PHY_MODE_QSGMII,
40 PHY_MODE_10GKR, 41 PHY_MODE_10GKR,
41 PHY_MODE_UFS_HS_A, 42 PHY_MODE_UFS_HS_A,
42 PHY_MODE_UFS_HS_B, 43 PHY_MODE_UFS_HS_B,
44 PHY_MODE_PCIE,
43}; 45};
44 46
45/** 47/**
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
index f8f1f6b952a6..eb9805bb3fe8 100644
--- a/include/linux/platform_data/dma-ep93xx.h
+++ b/include/linux/platform_data/dma-ep93xx.h
@@ -85,7 +85,7 @@ static inline enum dma_transfer_direction
85ep93xx_dma_chan_direction(struct dma_chan *chan) 85ep93xx_dma_chan_direction(struct dma_chan *chan)
86{ 86{
87 if (!ep93xx_dma_chan_is_m2p(chan)) 87 if (!ep93xx_dma_chan_is_m2p(chan))
88 return DMA_NONE; 88 return DMA_TRANS_NONE;
89 89
90 /* even channels are for TX, odd for RX */ 90 /* even channels are for TX, odd for RX */
91 return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 91 return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
diff --git a/include/linux/platform_data/dma-mcf-edma.h b/include/linux/platform_data/dma-mcf-edma.h
new file mode 100644
index 000000000000..d718ccfa3421
--- /dev/null
+++ b/include/linux/platform_data/dma-mcf-edma.h
@@ -0,0 +1,38 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Freescale eDMA platform data, ColdFire SoC's family.
4 *
5 * Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __LINUX_PLATFORM_DATA_MCF_EDMA_H__
18#define __LINUX_PLATFORM_DATA_MCF_EDMA_H__
19
20struct dma_slave_map;
21
22bool mcf_edma_filter_fn(struct dma_chan *chan, void *param);
23
24#define MCF_EDMA_FILTER_PARAM(ch) ((void *)ch)
25
26/**
27 * struct mcf_edma_platform_data - platform specific data for eDMA engine
28 *
29 * @ver The eDMA module version.
30 * @dma_channels The number of eDMA channels.
31 */
32struct mcf_edma_platform_data {
33 int dma_channels;
34 const struct dma_slave_map *slave_map;
35 int slavecnt;
36};
37
38#endif /* __LINUX_PLATFORM_DATA_MCF_EDMA_H__ */
diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h
index 5c15a738e116..219bd79dabfc 100644
--- a/include/linux/platform_data/ehci-sh.h
+++ b/include/linux/platform_data/ehci-sh.h
@@ -1,21 +1,9 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 *
2 * EHCI SuperH driver platform data 3 * EHCI SuperH driver platform data
3 * 4 *
4 * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> 5 * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
5 * Copyright (C) 2012 Renesas Solutions Corp. 6 * Copyright (C) 2012 Renesas Solutions Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */ 7 */
20 8
21#ifndef __USB_EHCI_SH_H 9#ifndef __USB_EHCI_SH_H
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index 57a5a35e0073..f92a47e18034 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -16,46 +16,12 @@
16#ifndef __DAVINCI_GPIO_PLATFORM_H 16#ifndef __DAVINCI_GPIO_PLATFORM_H
17#define __DAVINCI_GPIO_PLATFORM_H 17#define __DAVINCI_GPIO_PLATFORM_H
18 18
19#include <linux/io.h>
20#include <linux/spinlock.h>
21
22#include <asm-generic/gpio.h>
23
24#define MAX_REGS_BANKS 5
25#define MAX_INT_PER_BANK 32
26
27struct davinci_gpio_platform_data { 19struct davinci_gpio_platform_data {
28 u32 ngpio; 20 u32 ngpio;
29 u32 gpio_unbanked; 21 u32 gpio_unbanked;
30}; 22};
31 23
32struct davinci_gpio_irq_data {
33 void __iomem *regs;
34 struct davinci_gpio_controller *chip;
35 int bank_num;
36};
37
38struct davinci_gpio_controller {
39 struct gpio_chip chip;
40 struct irq_domain *irq_domain;
41 /* Serialize access to GPIO registers */
42 spinlock_t lock;
43 void __iomem *regs[MAX_REGS_BANKS];
44 int gpio_unbanked;
45 int irqs[MAX_INT_PER_BANK];
46 unsigned int base;
47};
48
49/*
50 * basic gpio routines
51 */
52#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */
53
54/* Convert GPIO signal to GPIO pin number */ 24/* Convert GPIO signal to GPIO pin number */
55#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio)) 25#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
56 26
57static inline u32 __gpio_mask(unsigned gpio)
58{
59 return 1 << (gpio % 32);
60}
61#endif 27#endif
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index 8612855691b2..8485c6a9a383 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -197,23 +197,12 @@ struct omap_gpio_platform_data {
197 bool is_mpuio; /* whether the bank is of type MPUIO */ 197 bool is_mpuio; /* whether the bank is of type MPUIO */
198 u32 non_wakeup_gpios; 198 u32 non_wakeup_gpios;
199 199
200 u32 quirks; /* Version specific quirks mask */
201
200 struct omap_gpio_reg_offs *regs; 202 struct omap_gpio_reg_offs *regs;
201 203
202 /* Return context loss count due to PM states changing */ 204 /* Return context loss count due to PM states changing */
203 int (*get_context_loss_count)(struct device *dev); 205 int (*get_context_loss_count)(struct device *dev);
204}; 206};
205 207
206#if IS_BUILTIN(CONFIG_GPIO_OMAP)
207extern void omap2_gpio_prepare_for_idle(int off_mode);
208extern void omap2_gpio_resume_after_idle(void);
209#else
210static inline void omap2_gpio_prepare_for_idle(int off_mode)
211{
212}
213
214static inline void omap2_gpio_resume_after_idle(void)
215{
216}
217#endif
218
219#endif 208#endif
diff --git a/include/linux/platform_data/gpio-ts5500.h b/include/linux/platform_data/gpio-ts5500.h
deleted file mode 100644
index b10d11c9bb49..000000000000
--- a/include/linux/platform_data/gpio-ts5500.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * GPIO (DIO) header for Technologic Systems TS-5500
3 *
4 * Copyright (c) 2012 Savoir-faire Linux Inc.
5 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _PDATA_GPIO_TS5500_H
13#define _PDATA_GPIO_TS5500_H
14
15/**
16 * struct ts5500_dio_platform_data - TS-5500 pin block configuration
17 * @base: The GPIO base number to use.
18 * @strap: The only pin connected to an interrupt in a block is input-only.
19 * If you need a bidirectional line which can trigger an IRQ, you
20 * may strap it with an in/out pin. This flag indicates this case.
21 */
22struct ts5500_dio_platform_data {
23 int base;
24 bool strap;
25};
26
27#endif /* _PDATA_GPIO_TS5500_H */
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index 73d9098ada2d..85da11916bd5 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -70,9 +70,6 @@ struct omap_hsmmc_platform_data {
70 /* string specifying a particular variant of hardware */ 70 /* string specifying a particular variant of hardware */
71 char *version; 71 char *version;
72 72
73 int gpio_cd; /* gpio (card detect) */
74 int gpio_cod; /* gpio (cover detect) */
75 int gpio_wp; /* gpio (write protect) */
76 /* if we have special card, init it using this callback */ 73 /* if we have special card, init it using this callback */
77 void (*init_card)(struct mmc_card *card); 74 void (*init_card)(struct mmc_card *card);
78 75
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
index 98b7925f1a2d..c0f624aca81c 100644
--- a/include/linux/platform_data/mv_usb.h
+++ b/include/linux/platform_data/mv_usb.h
@@ -48,6 +48,5 @@ struct mv_usb_platform_data {
48 int (*phy_init)(void __iomem *regbase); 48 int (*phy_init)(void __iomem *regbase);
49 void (*phy_deinit)(void __iomem *regbase); 49 void (*phy_deinit)(void __iomem *regbase);
50 int (*set_vbus)(unsigned int vbus); 50 int (*set_vbus)(unsigned int vbus);
51 int (*private_init)(void __iomem *opregs, void __iomem *phyregs);
52}; 51};
53#endif 52#endif
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h
index 9e20c2fb4ffd..4977c06d8a86 100644
--- a/include/linux/platform_data/pxa_sdhci.h
+++ b/include/linux/platform_data/pxa_sdhci.h
@@ -33,8 +33,6 @@
33 * 1: choose feedback clk + delay value 33 * 1: choose feedback clk + delay value
34 * 2: choose internal clk 34 * 2: choose internal clk
35 * @clk_delay_enable: enable clk_delay or not, used on pxa910 35 * @clk_delay_enable: enable clk_delay or not, used on pxa910
36 * @ext_cd_gpio: gpio pin used for external CD line
37 * @ext_cd_gpio_invert: invert values for external CD gpio line
38 * @max_speed: the maximum speed supported 36 * @max_speed: the maximum speed supported
39 * @host_caps: Standard MMC host capabilities bit field. 37 * @host_caps: Standard MMC host capabilities bit field.
40 * @quirks: quirks of platfrom 38 * @quirks: quirks of platfrom
@@ -46,8 +44,6 @@ struct sdhci_pxa_platdata {
46 unsigned int clk_delay_cycles; 44 unsigned int clk_delay_cycles;
47 unsigned int clk_delay_sel; 45 unsigned int clk_delay_sel;
48 bool clk_delay_enable; 46 bool clk_delay_enable;
49 unsigned int ext_cd_gpio;
50 bool ext_cd_gpio_invert;
51 unsigned int max_speed; 47 unsigned int max_speed;
52 u32 host_caps; 48 u32 host_caps;
53 u32 host_caps2; 49 u32 host_caps2;
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index ee495d707f17..fe815d7d9f58 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -1,14 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * shmob_drm.h -- SH Mobile DRM driver 3 * shmob_drm.h -- SH Mobile DRM driver
3 * 4 *
4 * Copyright (C) 2012 Renesas Corporation 5 * Copyright (C) 2012 Renesas Corporation
5 * 6 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 7 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */ 8 */
13 9
14#ifndef __SHMOB_DRM_H__ 10#ifndef __SHMOB_DRM_H__
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
index f4edcb03c40c..0638fb6353bc 100644
--- a/include/linux/platform_data/spi-davinci.h
+++ b/include/linux/platform_data/spi-davinci.h
@@ -36,9 +36,6 @@ enum {
36 * @num_chipselect: number of chipselects supported by this SPI master 36 * @num_chipselect: number of chipselects supported by this SPI master
37 * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt 37 * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt
38 * controller withn the SoC. Possible values are 0 and 1. 38 * controller withn the SoC. Possible values are 0 and 1.
39 * @chip_sel: list of GPIOs which can act as chip-selects for the SPI.
40 * SPI_INTERN_CS denotes internal SPI chip-select. Not necessary
41 * to populate if all chip-selects are internal.
42 * @cshold_bug: set this to true if the SPI controller on your chip requires 39 * @cshold_bug: set this to true if the SPI controller on your chip requires
43 * a write to CSHOLD bit in between transfers (like in DM355). 40 * a write to CSHOLD bit in between transfers (like in DM355).
44 * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any 41 * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any
@@ -48,7 +45,6 @@ struct davinci_spi_platform_data {
48 u8 version; 45 u8 version;
49 u8 num_chipselect; 46 u8 num_chipselect;
50 u8 intr_line; 47 u8 intr_line;
51 u8 *chip_sel;
52 u8 prescaler_limit; 48 u8 prescaler_limit;
53 bool cshold_bug; 49 bool cshold_bug;
54 enum dma_event_q dma_event_q; 50 enum dma_event_q dma_event_q;
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 1a9f38f27f65..c7c081dc6034 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -40,6 +40,7 @@ struct platform_device {
40 40
41#define platform_get_device_id(pdev) ((pdev)->id_entry) 41#define platform_get_device_id(pdev) ((pdev)->id_entry)
42 42
43#define dev_is_platform(dev) ((dev)->bus == &platform_bus_type)
43#define to_platform_device(x) container_of((x), struct platform_device, dev) 44#define to_platform_device(x) container_of((x), struct platform_device, dev)
44 45
45extern int platform_device_register(struct platform_device *); 46extern int platform_device_register(struct platform_device *);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 776c546d581a..3b5d7280e52e 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -17,11 +17,36 @@
17#include <linux/notifier.h> 17#include <linux/notifier.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19 19
20/* Defines used for the flags field in the struct generic_pm_domain */ 20/*
21#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ 21 * Flags to control the behaviour of a genpd.
22#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */ 22 *
23#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */ 23 * These flags may be set in the struct generic_pm_domain's flags field by a
24#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) /* Keep devices active if wakeup */ 24 * genpd backend driver. The flags must be set before it calls pm_genpd_init(),
25 * which initializes a genpd.
26 *
27 * GENPD_FLAG_PM_CLK: Instructs genpd to use the PM clk framework,
28 * while powering on/off attached devices.
29 *
30 * GENPD_FLAG_IRQ_SAFE: This informs genpd that its backend callbacks,
31 * ->power_on|off(), doesn't sleep. Hence, these
32 * can be invoked from within atomic context, which
33 * enables genpd to power on/off the PM domain,
34 * even when pm_runtime_is_irq_safe() returns true,
35 * for any of its attached devices. Note that, a
36 * genpd having this flag set, requires its
37 * masterdomains to also have it set.
38 *
39 * GENPD_FLAG_ALWAYS_ON: Instructs genpd to always keep the PM domain
40 * powered on.
41 *
42 * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
43 * on, in case any of its attached devices is used
44 * in the wakeup path to serve system wakeups.
45 */
46#define GENPD_FLAG_PM_CLK (1U << 0)
47#define GENPD_FLAG_IRQ_SAFE (1U << 1)
48#define GENPD_FLAG_ALWAYS_ON (1U << 2)
49#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
25 50
26enum gpd_status { 51enum gpd_status {
27 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 52 GPD_STATE_ACTIVE = 0, /* PM domain is active */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 099b31960dec..5d399eeef172 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -79,6 +79,7 @@ struct dev_pm_set_opp_data {
79#if defined(CONFIG_PM_OPP) 79#if defined(CONFIG_PM_OPP)
80 80
81struct opp_table *dev_pm_opp_get_opp_table(struct device *dev); 81struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
82struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index);
82void dev_pm_opp_put_opp_table(struct opp_table *opp_table); 83void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
83 84
84unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); 85unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
@@ -136,6 +137,11 @@ static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
136 return ERR_PTR(-ENOTSUPP); 137 return ERR_PTR(-ENOTSUPP);
137} 138}
138 139
140static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index)
141{
142 return ERR_PTR(-ENOTSUPP);
143}
144
139static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {} 145static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
140 146
141static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 147static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 9ac8fc60ad49..52453a24a24f 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -9,6 +9,7 @@
9#ifndef _LINUX_PMU_H 9#ifndef _LINUX_PMU_H
10#define _LINUX_PMU_H 10#define _LINUX_PMU_H
11 11
12#include <linux/rtc.h>
12#include <uapi/linux/pmu.h> 13#include <uapi/linux/pmu.h>
13 14
14 15
@@ -36,6 +37,9 @@ static inline void pmu_resume(void)
36 37
37extern void pmu_enable_irled(int on); 38extern void pmu_enable_irled(int on);
38 39
40extern time64_t pmu_get_time(void);
41extern int pmu_set_rtc_time(struct rtc_time *tm);
42
39extern void pmu_restart(void); 43extern void pmu_restart(void);
40extern void pmu_shutdown(void); 44extern void pmu_shutdown(void);
41extern void pmu_unlock(void); 45extern void pmu_unlock(void);
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index ee7e987ea1b4..e96581ca7c9d 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -126,5 +126,5 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
126 126
127void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); 127void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
128 128
129void posixtimer_rearm(struct siginfo *info); 129void posixtimer_rearm(struct kernel_siginfo *info);
130#endif 130#endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index d6355f49fbae..507c5e214c42 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -24,6 +24,7 @@ enum bq27xxx_chip {
24 BQ27546, 24 BQ27546,
25 BQ27742, 25 BQ27742,
26 BQ27545, /* bq27545 */ 26 BQ27545, /* bq27545 */
27 BQ27411,
27 BQ27421, /* bq27421, bq27441, bq27621 */ 28 BQ27421, /* bq27421, bq27441, bq27621 */
28 BQ27425, 29 BQ27425,
29 BQ27426, 30 BQ27426,
diff --git a/include/linux/psi.h b/include/linux/psi.h
new file mode 100644
index 000000000000..8e0725aac0aa
--- /dev/null
+++ b/include/linux/psi.h
@@ -0,0 +1,53 @@
1#ifndef _LINUX_PSI_H
2#define _LINUX_PSI_H
3
4#include <linux/psi_types.h>
5#include <linux/sched.h>
6
7struct seq_file;
8struct css_set;
9
10#ifdef CONFIG_PSI
11
12extern bool psi_disabled;
13
14void psi_init(void);
15
16void psi_task_change(struct task_struct *task, int clear, int set);
17
18void psi_memstall_tick(struct task_struct *task, int cpu);
19void psi_memstall_enter(unsigned long *flags);
20void psi_memstall_leave(unsigned long *flags);
21
22int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
23
24#ifdef CONFIG_CGROUPS
25int psi_cgroup_alloc(struct cgroup *cgrp);
26void psi_cgroup_free(struct cgroup *cgrp);
27void cgroup_move_task(struct task_struct *p, struct css_set *to);
28#endif
29
30#else /* CONFIG_PSI */
31
32static inline void psi_init(void) {}
33
34static inline void psi_memstall_enter(unsigned long *flags) {}
35static inline void psi_memstall_leave(unsigned long *flags) {}
36
37#ifdef CONFIG_CGROUPS
38static inline int psi_cgroup_alloc(struct cgroup *cgrp)
39{
40 return 0;
41}
42static inline void psi_cgroup_free(struct cgroup *cgrp)
43{
44}
45static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
46{
47 rcu_assign_pointer(p->cgroups, to);
48}
49#endif
50
51#endif /* CONFIG_PSI */
52
53#endif /* _LINUX_PSI_H */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
new file mode 100644
index 000000000000..2cf422db5d18
--- /dev/null
+++ b/include/linux/psi_types.h
@@ -0,0 +1,92 @@
1#ifndef _LINUX_PSI_TYPES_H
2#define _LINUX_PSI_TYPES_H
3
4#include <linux/seqlock.h>
5#include <linux/types.h>
6
7#ifdef CONFIG_PSI
8
9/* Tracked task states */
10enum psi_task_count {
11 NR_IOWAIT,
12 NR_MEMSTALL,
13 NR_RUNNING,
14 NR_PSI_TASK_COUNTS,
15};
16
17/* Task state bitmasks */
18#define TSK_IOWAIT (1 << NR_IOWAIT)
19#define TSK_MEMSTALL (1 << NR_MEMSTALL)
20#define TSK_RUNNING (1 << NR_RUNNING)
21
22/* Resources that workloads could be stalled on */
23enum psi_res {
24 PSI_IO,
25 PSI_MEM,
26 PSI_CPU,
27 NR_PSI_RESOURCES,
28};
29
30/*
31 * Pressure states for each resource:
32 *
33 * SOME: Stalled tasks & working tasks
34 * FULL: Stalled tasks & no working tasks
35 */
36enum psi_states {
37 PSI_IO_SOME,
38 PSI_IO_FULL,
39 PSI_MEM_SOME,
40 PSI_MEM_FULL,
41 PSI_CPU_SOME,
42 /* Only per-CPU, to weigh the CPU in the global average: */
43 PSI_NONIDLE,
44 NR_PSI_STATES,
45};
46
47struct psi_group_cpu {
48 /* 1st cacheline updated by the scheduler */
49
50 /* Aggregator needs to know of concurrent changes */
51 seqcount_t seq ____cacheline_aligned_in_smp;
52
53 /* States of the tasks belonging to this group */
54 unsigned int tasks[NR_PSI_TASK_COUNTS];
55
56 /* Period time sampling buckets for each state of interest (ns) */
57 u32 times[NR_PSI_STATES];
58
59 /* Time of last task change in this group (rq_clock) */
60 u64 state_start;
61
62 /* 2nd cacheline updated by the aggregator */
63
64 /* Delta detection against the sampling buckets */
65 u32 times_prev[NR_PSI_STATES] ____cacheline_aligned_in_smp;
66};
67
68struct psi_group {
69 /* Protects data updated during an aggregation */
70 struct mutex stat_lock;
71
72 /* Per-cpu task state & time tracking */
73 struct psi_group_cpu __percpu *pcpu;
74
75 /* Periodic aggregation state */
76 u64 total_prev[NR_PSI_STATES - 1];
77 u64 last_update;
78 u64 next_update;
79 struct delayed_work clock_work;
80
81 /* Total stall times and sampled pressure averages */
82 u64 total[NR_PSI_STATES - 1];
83 unsigned long avg[NR_PSI_STATES - 1][3];
84};
85
86#else /* CONFIG_PSI */
87
88struct psi_group { };
89
90#endif /* CONFIG_PSI */
91
92#endif /* _LINUX_PSI_TYPES_H */
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index e6d226464838..602d64725222 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -46,6 +46,7 @@ struct persistent_ram_zone {
46 phys_addr_t paddr; 46 phys_addr_t paddr;
47 size_t size; 47 size_t size;
48 void *vaddr; 48 void *vaddr;
49 char *label;
49 struct persistent_ram_buffer *buffer; 50 struct persistent_ram_buffer *buffer;
50 size_t buffer_size; 51 size_t buffer_size;
51 u32 flags; 52 u32 flags;
@@ -65,7 +66,7 @@ struct persistent_ram_zone {
65 66
66struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, 67struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
67 u32 sig, struct persistent_ram_ecc_info *ecc_info, 68 u32 sig, struct persistent_ram_ecc_info *ecc_info,
68 unsigned int memtype, u32 flags); 69 unsigned int memtype, u32 flags, char *label);
69void persistent_ram_free(struct persistent_ram_zone *prz); 70void persistent_ram_free(struct persistent_ram_zone *prz);
70void persistent_ram_zap(struct persistent_ram_zone *prz); 71void persistent_ram_zap(struct persistent_ram_zone *prz);
71 72
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 4f36431c380b..6c2ffed907f5 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -62,14 +62,17 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
62#define PTRACE_MODE_READ 0x01 62#define PTRACE_MODE_READ 0x01
63#define PTRACE_MODE_ATTACH 0x02 63#define PTRACE_MODE_ATTACH 0x02
64#define PTRACE_MODE_NOAUDIT 0x04 64#define PTRACE_MODE_NOAUDIT 0x04
65#define PTRACE_MODE_FSCREDS 0x08 65#define PTRACE_MODE_FSCREDS 0x08
66#define PTRACE_MODE_REALCREDS 0x10 66#define PTRACE_MODE_REALCREDS 0x10
67#define PTRACE_MODE_SCHED 0x20
68#define PTRACE_MODE_IBPB 0x40
67 69
68/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ 70/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
69#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) 71#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
70#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) 72#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
71#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) 73#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
72#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) 74#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
75#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
73 76
74/** 77/**
75 * ptrace_may_access - check whether the caller is permitted to access 78 * ptrace_may_access - check whether the caller is permitted to access
@@ -87,6 +90,20 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
87 */ 90 */
88extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); 91extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
89 92
93/**
94 * ptrace_may_access - check whether the caller is permitted to access
95 * a target task.
96 * @task: target task
97 * @mode: selects type of access and caller credentials
98 *
99 * Returns true on success, false on denial.
100 *
101 * Similar to ptrace_may_access(). Only to be called from context switch
102 * code. Does not call into audit and the regular LSM hooks due to locking
103 * constraints.
104 */
105extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
106
90static inline int ptrace_reparented(struct task_struct *child) 107static inline int ptrace_reparented(struct task_struct *child)
91{ 108{
92 return !same_thread_group(child->real_parent, child->parent); 109 return !same_thread_group(child->real_parent, child->parent);
@@ -336,14 +353,19 @@ static inline void user_enable_block_step(struct task_struct *task)
336extern void user_enable_block_step(struct task_struct *); 353extern void user_enable_block_step(struct task_struct *);
337#endif /* arch_has_block_step */ 354#endif /* arch_has_block_step */
338 355
339#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO 356#ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT
340extern void user_single_step_siginfo(struct task_struct *tsk, 357extern void user_single_step_report(struct pt_regs *regs);
341 struct pt_regs *regs, siginfo_t *info);
342#else 358#else
343static inline void user_single_step_siginfo(struct task_struct *tsk, 359static inline void user_single_step_report(struct pt_regs *regs)
344 struct pt_regs *regs, siginfo_t *info)
345{ 360{
346 info->si_signo = SIGTRAP; 361 kernel_siginfo_t info;
362 clear_siginfo(&info);
363 info.si_signo = SIGTRAP;
364 info.si_errno = 0;
365 info.si_code = SI_USER;
366 info.si_pid = 0;
367 info.si_uid = 0;
368 force_sig_info(info.si_signo, &info, current);
347} 369}
348#endif 370#endif
349 371
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 13b4244d44c1..979087e021f3 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -196,6 +196,7 @@ enum pxa_ssp_type {
196 PXA27x_SSP, 196 PXA27x_SSP,
197 PXA3xx_SSP, 197 PXA3xx_SSP,
198 PXA168_SSP, 198 PXA168_SSP,
199 MMP2_SSP,
199 PXA910_SSP, 200 PXA910_SSP,
200 CE4100_SSP, 201 CE4100_SSP,
201 QUARK_X1000_SSP, 202 QUARK_X1000_SSP,
@@ -217,7 +218,7 @@ struct ssp_device {
217 218
218 const char *label; 219 const char *label;
219 int port_id; 220 int port_id;
220 int type; 221 enum pxa_ssp_type type;
221 int use_count; 222 int use_count;
222 int irq; 223 int irq;
223 224
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 5d6144977828..3bcd67fd5548 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -225,19 +225,14 @@ struct geni_se {
225#define HW_VER_MINOR_SHFT 16 225#define HW_VER_MINOR_SHFT 16
226#define HW_VER_STEP_MASK GENMASK(15, 0) 226#define HW_VER_STEP_MASK GENMASK(15, 0)
227 227
228#define GENI_SE_VERSION_MAJOR(ver) ((ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT)
229#define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT)
230#define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK)
231
228#if IS_ENABLED(CONFIG_QCOM_GENI_SE) 232#if IS_ENABLED(CONFIG_QCOM_GENI_SE)
229 233
230u32 geni_se_get_qup_hw_version(struct geni_se *se); 234u32 geni_se_get_qup_hw_version(struct geni_se *se);
231 235
232#define geni_se_get_wrapper_version(se, major, minor, step) do { \
233 u32 ver; \
234\
235 ver = geni_se_get_qup_hw_version(se); \
236 major = (ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT; \
237 minor = (ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT; \
238 step = version & HW_VER_STEP_MASK; \
239} while (0)
240
241/** 236/**
242 * geni_se_read_proto() - Read the protocol configured for a serial engine 237 * geni_se_read_proto() - Read the protocol configured for a serial engine
243 * @se: Pointer to the concerned serial engine. 238 * @se: Pointer to the concerned serial engine.
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 5d65521260b3..06996ad4f2bc 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -1,4 +1,4 @@
1/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. 1/* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2015 Linaro Ltd. 2 * Copyright (C) 2015 Linaro Ltd.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -33,6 +33,8 @@ struct qcom_scm_vmperm {
33 33
34#define QCOM_SCM_VMID_HLOS 0x3 34#define QCOM_SCM_VMID_HLOS 0x3
35#define QCOM_SCM_VMID_MSS_MSA 0xF 35#define QCOM_SCM_VMID_MSS_MSA 0xF
36#define QCOM_SCM_VMID_WLAN 0x18
37#define QCOM_SCM_VMID_WLAN_CE 0x19
36#define QCOM_SCM_PERM_READ 0x4 38#define QCOM_SCM_PERM_READ 0x4
37#define QCOM_SCM_PERM_WRITE 0x2 39#define QCOM_SCM_PERM_WRITE 0x2
38#define QCOM_SCM_PERM_EXEC 0x1 40#define QCOM_SCM_PERM_EXEC 0x1
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 0081fa6d1268..03f59a28fefd 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -110,7 +110,7 @@
110 110
111#define FW_MAJOR_VERSION 8 111#define FW_MAJOR_VERSION 8
112#define FW_MINOR_VERSION 37 112#define FW_MINOR_VERSION 37
113#define FW_REVISION_VERSION 2 113#define FW_REVISION_VERSION 7
114#define FW_ENGINEERING_VERSION 0 114#define FW_ENGINEERING_VERSION 0
115 115
116/***********************/ 116/***********************/
@@ -931,12 +931,12 @@ struct db_rdma_dpm_params {
931#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 931#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
932#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 932#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
933#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 933#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
934#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 934#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1
935#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 935#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28
936#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 936#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
937#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 937#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
938#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 938#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
939#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 939#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30
940#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 940#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
941#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 941#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
942}; 942};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index b34c573f2b30..66aba505ec56 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -896,7 +896,7 @@ struct e4_ustorm_iscsi_task_ag_ctx {
896 __le32 exp_cont_len; 896 __le32 exp_cont_len;
897 __le32 total_data_acked; 897 __le32 total_data_acked;
898 __le32 exp_data_acked; 898 __le32 exp_data_acked;
899 u8 next_tid_valid; 899 u8 byte2;
900 u8 byte3; 900 u8 byte3;
901 __le16 word1; 901 __le16 word1;
902 __le16 next_tid; 902 __le16 next_tid;
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 8cd34645e892..a47321a0d572 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -667,14 +667,35 @@ enum qed_link_mode_bits {
667 QED_LM_Autoneg_BIT = BIT(1), 667 QED_LM_Autoneg_BIT = BIT(1),
668 QED_LM_Asym_Pause_BIT = BIT(2), 668 QED_LM_Asym_Pause_BIT = BIT(2),
669 QED_LM_Pause_BIT = BIT(3), 669 QED_LM_Pause_BIT = BIT(3),
670 QED_LM_1000baseT_Half_BIT = BIT(4), 670 QED_LM_1000baseT_Full_BIT = BIT(4),
671 QED_LM_1000baseT_Full_BIT = BIT(5), 671 QED_LM_10000baseT_Full_BIT = BIT(5),
672 QED_LM_10000baseKR_Full_BIT = BIT(6), 672 QED_LM_10000baseKR_Full_BIT = BIT(6),
673 QED_LM_25000baseKR_Full_BIT = BIT(7), 673 QED_LM_20000baseKR2_Full_BIT = BIT(7),
674 QED_LM_40000baseLR4_Full_BIT = BIT(8), 674 QED_LM_25000baseKR_Full_BIT = BIT(8),
675 QED_LM_50000baseKR2_Full_BIT = BIT(9), 675 QED_LM_40000baseLR4_Full_BIT = BIT(9),
676 QED_LM_100000baseKR4_Full_BIT = BIT(10), 676 QED_LM_50000baseKR2_Full_BIT = BIT(10),
677 QED_LM_COUNT = 11 677 QED_LM_100000baseKR4_Full_BIT = BIT(11),
678 QED_LM_2500baseX_Full_BIT = BIT(12),
679 QED_LM_Backplane_BIT = BIT(13),
680 QED_LM_1000baseKX_Full_BIT = BIT(14),
681 QED_LM_10000baseKX4_Full_BIT = BIT(15),
682 QED_LM_10000baseR_FEC_BIT = BIT(16),
683 QED_LM_40000baseKR4_Full_BIT = BIT(17),
684 QED_LM_40000baseCR4_Full_BIT = BIT(18),
685 QED_LM_40000baseSR4_Full_BIT = BIT(19),
686 QED_LM_25000baseCR_Full_BIT = BIT(20),
687 QED_LM_25000baseSR_Full_BIT = BIT(21),
688 QED_LM_50000baseCR2_Full_BIT = BIT(22),
689 QED_LM_100000baseSR4_Full_BIT = BIT(23),
690 QED_LM_100000baseCR4_Full_BIT = BIT(24),
691 QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25),
692 QED_LM_50000baseSR2_Full_BIT = BIT(26),
693 QED_LM_1000baseX_Full_BIT = BIT(27),
694 QED_LM_10000baseCR_Full_BIT = BIT(28),
695 QED_LM_10000baseSR_Full_BIT = BIT(29),
696 QED_LM_10000baseLR_Full_BIT = BIT(30),
697 QED_LM_10000baseLRM_Full_BIT = BIT(31),
698 QED_LM_COUNT = 32
678}; 699};
679 700
680struct qed_link_params { 701struct qed_link_params {
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index df4d13f7e191..d15f8e4815e3 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -39,15 +39,6 @@
39#include <linux/qed/qed_ll2_if.h> 39#include <linux/qed/qed_ll2_if.h>
40#include <linux/qed/rdma_common.h> 40#include <linux/qed/rdma_common.h>
41 41
42enum qed_roce_ll2_tx_dest {
43 /* Light L2 TX Destination to the Network */
44 QED_ROCE_LL2_TX_DEST_NW,
45
46 /* Light L2 TX Destination to the Loopback */
47 QED_ROCE_LL2_TX_DEST_LB,
48 QED_ROCE_LL2_TX_DEST_MAX
49};
50
51#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) 42#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
52 43
53/* rdma interface */ 44/* rdma interface */
@@ -581,7 +572,7 @@ struct qed_roce_ll2_packet {
581 int n_seg; 572 int n_seg;
582 struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; 573 struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
583 int roce_mode; 574 int roce_mode;
584 enum qed_roce_ll2_tx_dest tx_dest; 575 enum qed_ll2_tx_dest tx_dest;
585}; 576};
586 577
587enum qed_rdma_type { 578enum qed_rdma_type {
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 34149e8b5f73..06c4c7a6c09c 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -28,34 +28,30 @@
28#include <linux/rcupdate.h> 28#include <linux/rcupdate.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/xarray.h>
32
33/* Keep unconverted code working */
34#define radix_tree_root xarray
35#define radix_tree_node xa_node
31 36
32/* 37/*
33 * The bottom two bits of the slot determine how the remaining bits in the 38 * The bottom two bits of the slot determine how the remaining bits in the
34 * slot are interpreted: 39 * slot are interpreted:
35 * 40 *
36 * 00 - data pointer 41 * 00 - data pointer
37 * 01 - internal entry 42 * 10 - internal entry
38 * 10 - exceptional entry 43 * x1 - value entry
39 * 11 - this bit combination is currently unused/reserved
40 * 44 *
41 * The internal entry may be a pointer to the next level in the tree, a 45 * The internal entry may be a pointer to the next level in the tree, a
42 * sibling entry, or an indicator that the entry in this slot has been moved 46 * sibling entry, or an indicator that the entry in this slot has been moved
43 * to another location in the tree and the lookup should be restarted. While 47 * to another location in the tree and the lookup should be restarted. While
44 * NULL fits the 'data pointer' pattern, it means that there is no entry in 48 * NULL fits the 'data pointer' pattern, it means that there is no entry in
45 * the tree for this index (no matter what level of the tree it is found at). 49 * the tree for this index (no matter what level of the tree it is found at).
46 * This means that you cannot store NULL in the tree as a value for the index. 50 * This means that storing a NULL entry in the tree is the same as deleting
51 * the entry from the tree.
47 */ 52 */
48#define RADIX_TREE_ENTRY_MASK 3UL 53#define RADIX_TREE_ENTRY_MASK 3UL
49#define RADIX_TREE_INTERNAL_NODE 1UL 54#define RADIX_TREE_INTERNAL_NODE 2UL
50
51/*
52 * Most users of the radix tree store pointers but shmem/tmpfs stores swap
53 * entries in the same tree. They are marked as exceptional entries to
54 * distinguish them from pointers to struct page.
55 * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
56 */
57#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
58#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
59 55
60static inline bool radix_tree_is_internal_node(void *ptr) 56static inline bool radix_tree_is_internal_node(void *ptr)
61{ 57{
@@ -65,75 +61,32 @@ static inline bool radix_tree_is_internal_node(void *ptr)
65 61
66/*** radix-tree API starts here ***/ 62/*** radix-tree API starts here ***/
67 63
68#define RADIX_TREE_MAX_TAGS 3 64#define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT
69
70#ifndef RADIX_TREE_MAP_SHIFT
71#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
72#endif
73
74#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) 65#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
75#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) 66#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
76 67
77#define RADIX_TREE_TAG_LONGS \ 68#define RADIX_TREE_MAX_TAGS XA_MAX_MARKS
78 ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) 69#define RADIX_TREE_TAG_LONGS XA_MARK_LONGS
79 70
80#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) 71#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
81#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ 72#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
82 RADIX_TREE_MAP_SHIFT)) 73 RADIX_TREE_MAP_SHIFT))
83 74
84/* 75/* The IDR tag is stored in the low bits of xa_flags */
85 * @count is the count of every non-NULL element in the ->slots array
86 * whether that is an exceptional entry, a retry entry, a user pointer,
87 * a sibling entry or a pointer to the next level of the tree.
88 * @exceptional is the count of every element in ->slots which is
89 * either radix_tree_exceptional_entry() or is a sibling entry for an
90 * exceptional entry.
91 */
92struct radix_tree_node {
93 unsigned char shift; /* Bits remaining in each slot */
94 unsigned char offset; /* Slot offset in parent */
95 unsigned char count; /* Total entry count */
96 unsigned char exceptional; /* Exceptional entry count */
97 struct radix_tree_node *parent; /* Used when ascending tree */
98 struct radix_tree_root *root; /* The tree we belong to */
99 union {
100 struct list_head private_list; /* For tree user */
101 struct rcu_head rcu_head; /* Used when freeing node */
102 };
103 void __rcu *slots[RADIX_TREE_MAP_SIZE];
104 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
105};
106
107/* The IDR tag is stored in the low bits of the GFP flags */
108#define ROOT_IS_IDR ((__force gfp_t)4) 76#define ROOT_IS_IDR ((__force gfp_t)4)
109/* The top bits of gfp_mask are used to store the root tags */ 77/* The top bits of xa_flags are used to store the root tags */
110#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) 78#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
111 79
112struct radix_tree_root { 80#define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask)
113 spinlock_t xa_lock;
114 gfp_t gfp_mask;
115 struct radix_tree_node __rcu *rnode;
116};
117
118#define RADIX_TREE_INIT(name, mask) { \
119 .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
120 .gfp_mask = (mask), \
121 .rnode = NULL, \
122}
123 81
124#define RADIX_TREE(name, mask) \ 82#define RADIX_TREE(name, mask) \
125 struct radix_tree_root name = RADIX_TREE_INIT(name, mask) 83 struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
126 84
127#define INIT_RADIX_TREE(root, mask) \ 85#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
128do { \
129 spin_lock_init(&(root)->xa_lock); \
130 (root)->gfp_mask = (mask); \
131 (root)->rnode = NULL; \
132} while (0)
133 86
134static inline bool radix_tree_empty(const struct radix_tree_root *root) 87static inline bool radix_tree_empty(const struct radix_tree_root *root)
135{ 88{
136 return root->rnode == NULL; 89 return root->xa_head == NULL;
137} 90}
138 91
139/** 92/**
@@ -143,7 +96,6 @@ static inline bool radix_tree_empty(const struct radix_tree_root *root)
143 * @next_index: one beyond the last index for this chunk 96 * @next_index: one beyond the last index for this chunk
144 * @tags: bit-mask for tag-iterating 97 * @tags: bit-mask for tag-iterating
145 * @node: node that contains current slot 98 * @node: node that contains current slot
146 * @shift: shift for the node that holds our slots
147 * 99 *
148 * This radix tree iterator works in terms of "chunks" of slots. A chunk is a 100 * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
149 * subinterval of slots contained within one radix tree leaf node. It is 101 * subinterval of slots contained within one radix tree leaf node. It is
@@ -157,20 +109,8 @@ struct radix_tree_iter {
157 unsigned long next_index; 109 unsigned long next_index;
158 unsigned long tags; 110 unsigned long tags;
159 struct radix_tree_node *node; 111 struct radix_tree_node *node;
160#ifdef CONFIG_RADIX_TREE_MULTIORDER
161 unsigned int shift;
162#endif
163}; 112};
164 113
165static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
166{
167#ifdef CONFIG_RADIX_TREE_MULTIORDER
168 return iter->shift;
169#else
170 return 0;
171#endif
172}
173
174/** 114/**
175 * Radix-tree synchronization 115 * Radix-tree synchronization
176 * 116 *
@@ -194,12 +134,11 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
194 * radix_tree_lookup_slot 134 * radix_tree_lookup_slot
195 * radix_tree_tag_get 135 * radix_tree_tag_get
196 * radix_tree_gang_lookup 136 * radix_tree_gang_lookup
197 * radix_tree_gang_lookup_slot
198 * radix_tree_gang_lookup_tag 137 * radix_tree_gang_lookup_tag
199 * radix_tree_gang_lookup_tag_slot 138 * radix_tree_gang_lookup_tag_slot
200 * radix_tree_tagged 139 * radix_tree_tagged
201 * 140 *
202 * The first 8 functions are able to be called locklessly, using RCU. The 141 * The first 7 functions are able to be called locklessly, using RCU. The
203 * caller must ensure calls to these functions are made within rcu_read_lock() 142 * caller must ensure calls to these functions are made within rcu_read_lock()
204 * regions. Other readers (lock-free or otherwise) and modifications may be 143 * regions. Other readers (lock-free or otherwise) and modifications may be
205 * running concurrently. 144 * running concurrently.
@@ -269,17 +208,6 @@ static inline int radix_tree_deref_retry(void *arg)
269} 208}
270 209
271/** 210/**
272 * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry?
273 * @arg: value returned by radix_tree_deref_slot
274 * Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
275 */
276static inline int radix_tree_exceptional_entry(void *arg)
277{
278 /* Not unlikely because radix_tree_exception often tested first */
279 return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
280}
281
282/**
283 * radix_tree_exception - radix_tree_deref_slot returned either exception? 211 * radix_tree_exception - radix_tree_deref_slot returned either exception?
284 * @arg: value returned by radix_tree_deref_slot 212 * @arg: value returned by radix_tree_deref_slot
285 * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. 213 * Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
@@ -289,47 +217,28 @@ static inline int radix_tree_exception(void *arg)
289 return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); 217 return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
290} 218}
291 219
292int __radix_tree_create(struct radix_tree_root *, unsigned long index, 220int radix_tree_insert(struct radix_tree_root *, unsigned long index,
293 unsigned order, struct radix_tree_node **nodep, 221 void *);
294 void __rcu ***slotp);
295int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
296 unsigned order, void *);
297static inline int radix_tree_insert(struct radix_tree_root *root,
298 unsigned long index, void *entry)
299{
300 return __radix_tree_insert(root, index, 0, entry);
301}
302void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, 222void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
303 struct radix_tree_node **nodep, void __rcu ***slotp); 223 struct radix_tree_node **nodep, void __rcu ***slotp);
304void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); 224void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
305void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, 225void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
306 unsigned long index); 226 unsigned long index);
307typedef void (*radix_tree_update_node_t)(struct radix_tree_node *);
308void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, 227void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
309 void __rcu **slot, void *entry, 228 void __rcu **slot, void *entry);
310 radix_tree_update_node_t update_node);
311void radix_tree_iter_replace(struct radix_tree_root *, 229void radix_tree_iter_replace(struct radix_tree_root *,
312 const struct radix_tree_iter *, void __rcu **slot, void *entry); 230 const struct radix_tree_iter *, void __rcu **slot, void *entry);
313void radix_tree_replace_slot(struct radix_tree_root *, 231void radix_tree_replace_slot(struct radix_tree_root *,
314 void __rcu **slot, void *entry); 232 void __rcu **slot, void *entry);
315void __radix_tree_delete_node(struct radix_tree_root *,
316 struct radix_tree_node *,
317 radix_tree_update_node_t update_node);
318void radix_tree_iter_delete(struct radix_tree_root *, 233void radix_tree_iter_delete(struct radix_tree_root *,
319 struct radix_tree_iter *iter, void __rcu **slot); 234 struct radix_tree_iter *iter, void __rcu **slot);
320void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); 235void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
321void *radix_tree_delete(struct radix_tree_root *, unsigned long); 236void *radix_tree_delete(struct radix_tree_root *, unsigned long);
322void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *,
323 void __rcu **slot);
324unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, 237unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
325 void **results, unsigned long first_index, 238 void **results, unsigned long first_index,
326 unsigned int max_items); 239 unsigned int max_items);
327unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
328 void __rcu ***results, unsigned long *indices,
329 unsigned long first_index, unsigned int max_items);
330int radix_tree_preload(gfp_t gfp_mask); 240int radix_tree_preload(gfp_t gfp_mask);
331int radix_tree_maybe_preload(gfp_t gfp_mask); 241int radix_tree_maybe_preload(gfp_t gfp_mask);
332int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
333void radix_tree_init(void); 242void radix_tree_init(void);
334void *radix_tree_tag_set(struct radix_tree_root *, 243void *radix_tree_tag_set(struct radix_tree_root *,
335 unsigned long index, unsigned int tag); 244 unsigned long index, unsigned int tag);
@@ -337,8 +246,6 @@ void *radix_tree_tag_clear(struct radix_tree_root *,
337 unsigned long index, unsigned int tag); 246 unsigned long index, unsigned int tag);
338int radix_tree_tag_get(const struct radix_tree_root *, 247int radix_tree_tag_get(const struct radix_tree_root *,
339 unsigned long index, unsigned int tag); 248 unsigned long index, unsigned int tag);
340void radix_tree_iter_tag_set(struct radix_tree_root *,
341 const struct radix_tree_iter *iter, unsigned int tag);
342void radix_tree_iter_tag_clear(struct radix_tree_root *, 249void radix_tree_iter_tag_clear(struct radix_tree_root *,
343 const struct radix_tree_iter *iter, unsigned int tag); 250 const struct radix_tree_iter *iter, unsigned int tag);
344unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, 251unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
@@ -354,12 +261,6 @@ static inline void radix_tree_preload_end(void)
354 preempt_enable(); 261 preempt_enable();
355} 262}
356 263
357int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
358int radix_tree_split(struct radix_tree_root *, unsigned long index,
359 unsigned new_order);
360int radix_tree_join(struct radix_tree_root *, unsigned long index,
361 unsigned new_order, void *);
362
363void __rcu **idr_get_free(struct radix_tree_root *root, 264void __rcu **idr_get_free(struct radix_tree_root *root,
364 struct radix_tree_iter *iter, gfp_t gfp, 265 struct radix_tree_iter *iter, gfp_t gfp,
365 unsigned long max); 266 unsigned long max);
@@ -465,7 +366,7 @@ void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
465static inline unsigned long 366static inline unsigned long
466__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) 367__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
467{ 368{
468 return iter->index + (slots << iter_shift(iter)); 369 return iter->index + slots;
469} 370}
470 371
471/** 372/**
@@ -490,21 +391,9 @@ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
490static __always_inline long 391static __always_inline long
491radix_tree_chunk_size(struct radix_tree_iter *iter) 392radix_tree_chunk_size(struct radix_tree_iter *iter)
492{ 393{
493 return (iter->next_index - iter->index) >> iter_shift(iter); 394 return iter->next_index - iter->index;
494} 395}
495 396
496#ifdef CONFIG_RADIX_TREE_MULTIORDER
497void __rcu **__radix_tree_next_slot(void __rcu **slot,
498 struct radix_tree_iter *iter, unsigned flags);
499#else
500/* Can't happen without sibling entries, but the compiler can't tell that */
501static inline void __rcu **__radix_tree_next_slot(void __rcu **slot,
502 struct radix_tree_iter *iter, unsigned flags)
503{
504 return slot;
505}
506#endif
507
508/** 397/**
509 * radix_tree_next_slot - find next slot in chunk 398 * radix_tree_next_slot - find next slot in chunk
510 * 399 *
@@ -563,8 +452,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
563 return NULL; 452 return NULL;
564 453
565 found: 454 found:
566 if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot))))
567 return __radix_tree_next_slot(slot, iter, flags);
568 return slot; 455 return slot;
569} 456}
570 457
@@ -584,23 +471,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
584 slot = radix_tree_next_slot(slot, iter, 0)) 471 slot = radix_tree_next_slot(slot, iter, 0))
585 472
586/** 473/**
587 * radix_tree_for_each_contig - iterate over contiguous slots
588 *
589 * @slot: the void** variable for pointer to slot
590 * @root: the struct radix_tree_root pointer
591 * @iter: the struct radix_tree_iter pointer
592 * @start: iteration starting index
593 *
594 * @slot points to radix tree slot, @iter->index contains its index.
595 */
596#define radix_tree_for_each_contig(slot, root, iter, start) \
597 for (slot = radix_tree_iter_init(iter, start) ; \
598 slot || (slot = radix_tree_next_chunk(root, iter, \
599 RADIX_TREE_ITER_CONTIG)) ; \
600 slot = radix_tree_next_slot(slot, iter, \
601 RADIX_TREE_ITER_CONTIG))
602
603/**
604 * radix_tree_for_each_tagged - iterate over tagged slots 474 * radix_tree_for_each_tagged - iterate over tagged slots
605 * 475 *
606 * @slot: the void** variable for pointer to slot 476 * @slot: the void** variable for pointer to slot
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4786c2235b98..e91ec9ddcd30 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -182,7 +182,7 @@ static inline void list_replace_rcu(struct list_head *old,
182 * @list: the RCU-protected list to splice 182 * @list: the RCU-protected list to splice
183 * @prev: points to the last element of the existing list 183 * @prev: points to the last element of the existing list
184 * @next: points to the first element of the existing list 184 * @next: points to the first element of the existing list
185 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... 185 * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
186 * 186 *
187 * The list pointed to by @prev and @next can be RCU-read traversed 187 * The list pointed to by @prev and @next can be RCU-read traversed
188 * concurrently with this function. 188 * concurrently with this function.
@@ -240,7 +240,7 @@ static inline void __list_splice_init_rcu(struct list_head *list,
240 * designed for stacks. 240 * designed for stacks.
241 * @list: the RCU-protected list to splice 241 * @list: the RCU-protected list to splice
242 * @head: the place in the existing list to splice the first list into 242 * @head: the place in the existing list to splice the first list into
243 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... 243 * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
244 */ 244 */
245static inline void list_splice_init_rcu(struct list_head *list, 245static inline void list_splice_init_rcu(struct list_head *list,
246 struct list_head *head, 246 struct list_head *head,
@@ -255,7 +255,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
255 * list, designed for queues. 255 * list, designed for queues.
256 * @list: the RCU-protected list to splice 256 * @list: the RCU-protected list to splice
257 * @head: the place in the existing list to splice the first list into 257 * @head: the place in the existing list to splice the first list into
258 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... 258 * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
259 */ 259 */
260static inline void list_splice_tail_init_rcu(struct list_head *list, 260static inline void list_splice_tail_init_rcu(struct list_head *list,
261 struct list_head *head, 261 struct list_head *head,
@@ -359,13 +359,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
359 * @type: the type of the struct this is embedded in. 359 * @type: the type of the struct this is embedded in.
360 * @member: the name of the list_head within the struct. 360 * @member: the name of the list_head within the struct.
361 * 361 *
362 * This primitive may safely run concurrently with the _rcu list-mutation 362 * This primitive may safely run concurrently with the _rcu
363 * primitives such as list_add_rcu(), but requires some implicit RCU 363 * list-mutation primitives such as list_add_rcu(), but requires some
364 * read-side guarding. One example is running within a special 364 * implicit RCU read-side guarding. One example is running within a special
365 * exception-time environment where preemption is disabled and where 365 * exception-time environment where preemption is disabled and where lockdep
366 * lockdep cannot be invoked (in which case updaters must use RCU-sched, 366 * cannot be invoked. Another example is when items are added to the list,
367 * as in synchronize_sched(), call_rcu_sched(), and friends). Another 367 * but never deleted.
368 * example is when items are added to the list, but never deleted.
369 */ 368 */
370#define list_entry_lockless(ptr, type, member) \ 369#define list_entry_lockless(ptr, type, member) \
371 container_of((typeof(ptr))READ_ONCE(ptr), type, member) 370 container_of((typeof(ptr))READ_ONCE(ptr), type, member)
@@ -376,13 +375,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
376 * @head: the head for your list. 375 * @head: the head for your list.
377 * @member: the name of the list_struct within the struct. 376 * @member: the name of the list_struct within the struct.
378 * 377 *
379 * This primitive may safely run concurrently with the _rcu list-mutation 378 * This primitive may safely run concurrently with the _rcu
380 * primitives such as list_add_rcu(), but requires some implicit RCU 379 * list-mutation primitives such as list_add_rcu(), but requires some
381 * read-side guarding. One example is running within a special 380 * implicit RCU read-side guarding. One example is running within a special
382 * exception-time environment where preemption is disabled and where 381 * exception-time environment where preemption is disabled and where lockdep
383 * lockdep cannot be invoked (in which case updaters must use RCU-sched, 382 * cannot be invoked. Another example is when items are added to the list,
384 * as in synchronize_sched(), call_rcu_sched(), and friends). Another 383 * but never deleted.
385 * example is when items are added to the list, but never deleted.
386 */ 384 */
387#define list_for_each_entry_lockless(pos, head, member) \ 385#define list_for_each_entry_lockless(pos, head, member) \
388 for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ 386 for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 75e5b393cf44..4db8bcacc51a 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -48,23 +48,14 @@
48#define ulong2long(a) (*(long *)(&(a))) 48#define ulong2long(a) (*(long *)(&(a)))
49 49
50/* Exported common interfaces */ 50/* Exported common interfaces */
51
52#ifdef CONFIG_PREEMPT_RCU
53void call_rcu(struct rcu_head *head, rcu_callback_t func); 51void call_rcu(struct rcu_head *head, rcu_callback_t func);
54#else /* #ifdef CONFIG_PREEMPT_RCU */
55#define call_rcu call_rcu_sched
56#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
57
58void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
59void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
60void synchronize_sched(void);
61void rcu_barrier_tasks(void); 52void rcu_barrier_tasks(void);
53void synchronize_rcu(void);
62 54
63#ifdef CONFIG_PREEMPT_RCU 55#ifdef CONFIG_PREEMPT_RCU
64 56
65void __rcu_read_lock(void); 57void __rcu_read_lock(void);
66void __rcu_read_unlock(void); 58void __rcu_read_unlock(void);
67void synchronize_rcu(void);
68 59
69/* 60/*
70 * Defined as a macro as it is a very low level header included from 61 * Defined as a macro as it is a very low level header included from
@@ -88,11 +79,6 @@ static inline void __rcu_read_unlock(void)
88 preempt_enable(); 79 preempt_enable();
89} 80}
90 81
91static inline void synchronize_rcu(void)
92{
93 synchronize_sched();
94}
95
96static inline int rcu_preempt_depth(void) 82static inline int rcu_preempt_depth(void)
97{ 83{
98 return 0; 84 return 0;
@@ -103,8 +89,6 @@ static inline int rcu_preempt_depth(void)
103/* Internal to kernel */ 89/* Internal to kernel */
104void rcu_init(void); 90void rcu_init(void);
105extern int rcu_scheduler_active __read_mostly; 91extern int rcu_scheduler_active __read_mostly;
106void rcu_sched_qs(void);
107void rcu_bh_qs(void);
108void rcu_check_callbacks(int user); 92void rcu_check_callbacks(int user);
109void rcu_report_dead(unsigned int cpu); 93void rcu_report_dead(unsigned int cpu);
110void rcutree_migrate_callbacks(int cpu); 94void rcutree_migrate_callbacks(int cpu);
@@ -135,11 +119,10 @@ static inline void rcu_init_nohz(void) { }
135 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers 119 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
136 * @a: Code that RCU needs to pay attention to. 120 * @a: Code that RCU needs to pay attention to.
137 * 121 *
138 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden 122 * RCU read-side critical sections are forbidden in the inner idle loop,
139 * in the inner idle loop, that is, between the rcu_idle_enter() and 123 * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
140 * the rcu_idle_exit() -- RCU will happily ignore any such read-side 124 * will happily ignore any such read-side critical sections. However,
141 * critical sections. However, things like powertop need tracepoints 125 * things like powertop need tracepoints in the inner idle loop.
142 * in the inner idle loop.
143 * 126 *
144 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) 127 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
145 * will tell RCU that it needs to pay attention, invoke its argument 128 * will tell RCU that it needs to pay attention, invoke its argument
@@ -167,20 +150,16 @@ static inline void rcu_init_nohz(void) { }
167 if (READ_ONCE((t)->rcu_tasks_holdout)) \ 150 if (READ_ONCE((t)->rcu_tasks_holdout)) \
168 WRITE_ONCE((t)->rcu_tasks_holdout, false); \ 151 WRITE_ONCE((t)->rcu_tasks_holdout, false); \
169 } while (0) 152 } while (0)
170#define rcu_note_voluntary_context_switch(t) \ 153#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
171 do { \
172 rcu_all_qs(); \
173 rcu_tasks_qs(t); \
174 } while (0)
175void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); 154void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
176void synchronize_rcu_tasks(void); 155void synchronize_rcu_tasks(void);
177void exit_tasks_rcu_start(void); 156void exit_tasks_rcu_start(void);
178void exit_tasks_rcu_finish(void); 157void exit_tasks_rcu_finish(void);
179#else /* #ifdef CONFIG_TASKS_RCU */ 158#else /* #ifdef CONFIG_TASKS_RCU */
180#define rcu_tasks_qs(t) do { } while (0) 159#define rcu_tasks_qs(t) do { } while (0)
181#define rcu_note_voluntary_context_switch(t) rcu_all_qs() 160#define rcu_note_voluntary_context_switch(t) do { } while (0)
182#define call_rcu_tasks call_rcu_sched 161#define call_rcu_tasks call_rcu
183#define synchronize_rcu_tasks synchronize_sched 162#define synchronize_rcu_tasks synchronize_rcu
184static inline void exit_tasks_rcu_start(void) { } 163static inline void exit_tasks_rcu_start(void) { }
185static inline void exit_tasks_rcu_finish(void) { } 164static inline void exit_tasks_rcu_finish(void) { }
186#endif /* #else #ifdef CONFIG_TASKS_RCU */ 165#endif /* #else #ifdef CONFIG_TASKS_RCU */
@@ -325,9 +304,8 @@ static inline void rcu_preempt_sleep_check(void) { }
325 * Helper functions for rcu_dereference_check(), rcu_dereference_protected() 304 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
326 * and rcu_assign_pointer(). Some of these could be folded into their 305 * and rcu_assign_pointer(). Some of these could be folded into their
327 * callers, but they are left separate in order to ease introduction of 306 * callers, but they are left separate in order to ease introduction of
328 * multiple flavors of pointers to match the multiple flavors of RCU 307 * multiple pointers markings to match different RCU implementations
329 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in 308 * (e.g., __srcu), should this make sense in the future.
330 * the future.
331 */ 309 */
332 310
333#ifdef __CHECKER__ 311#ifdef __CHECKER__
@@ -686,14 +664,9 @@ static inline void rcu_read_unlock(void)
686/** 664/**
687 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section 665 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
688 * 666 *
689 * This is equivalent of rcu_read_lock(), but to be used when updates 667 * This is equivalent of rcu_read_lock(), but also disables softirqs.
690 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since 668 * Note that anything else that disables softirqs can also serve as
691 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a 669 * an RCU read-side critical section.
692 * softirq handler to be a quiescent state, a process in RCU read-side
693 * critical section must be protected by disabling softirqs. Read-side
694 * critical sections in interrupt context can use just rcu_read_lock(),
695 * though this should at least be commented to avoid confusing people
696 * reading the code.
697 * 670 *
698 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() 671 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
699 * must occur in the same context, for example, it is illegal to invoke 672 * must occur in the same context, for example, it is illegal to invoke
@@ -726,10 +699,9 @@ static inline void rcu_read_unlock_bh(void)
726/** 699/**
727 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section 700 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
728 * 701 *
729 * This is equivalent of rcu_read_lock(), but to be used when updates 702 * This is equivalent of rcu_read_lock(), but disables preemption.
730 * are being done using call_rcu_sched() or synchronize_rcu_sched(). 703 * Read-side critical sections can also be introduced by anything else
731 * Read-side critical sections can also be introduced by anything that 704 * that disables preemption, including local_irq_disable() and friends.
732 * disables preemption, including local_irq_disable() and friends.
733 * 705 *
734 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() 706 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
735 * must occur in the same context, for example, it is illegal to invoke 707 * must occur in the same context, for example, it is illegal to invoke
@@ -885,4 +857,96 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
885#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ 857#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
886 858
887 859
860/* Has the specified rcu_head structure been handed to call_rcu()? */
861
862/*
863 * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
864 * @rhp: The rcu_head structure to initialize.
865 *
866 * If you intend to invoke rcu_head_after_call_rcu() to test whether a
867 * given rcu_head structure has already been passed to call_rcu(), then
868 * you must also invoke this rcu_head_init() function on it just after
869 * allocating that structure. Calls to this function must not race with
870 * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
871 */
872static inline void rcu_head_init(struct rcu_head *rhp)
873{
874 rhp->func = (rcu_callback_t)~0L;
875}
876
877/*
878 * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()?
879 * @rhp: The rcu_head structure to test.
880 * @func: The function passed to call_rcu() along with @rhp.
881 *
882 * Returns @true if the @rhp has been passed to call_rcu() with @func,
883 * and @false otherwise. Emits a warning in any other case, including
884 * the case where @rhp has already been invoked after a grace period.
885 * Calls to this function must not race with callback invocation. One way
886 * to avoid such races is to enclose the call to rcu_head_after_call_rcu()
887 * in an RCU read-side critical section that includes a read-side fetch
888 * of the pointer to the structure containing @rhp.
889 */
890static inline bool
891rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
892{
893 if (READ_ONCE(rhp->func) == f)
894 return true;
895 WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L);
896 return false;
897}
898
899
900/* Transitional pre-consolidation compatibility definitions. */
901
902static inline void synchronize_rcu_bh(void)
903{
904 synchronize_rcu();
905}
906
907static inline void synchronize_rcu_bh_expedited(void)
908{
909 synchronize_rcu_expedited();
910}
911
912static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
913{
914 call_rcu(head, func);
915}
916
917static inline void rcu_barrier_bh(void)
918{
919 rcu_barrier();
920}
921
922static inline void synchronize_sched(void)
923{
924 synchronize_rcu();
925}
926
927static inline void synchronize_sched_expedited(void)
928{
929 synchronize_rcu_expedited();
930}
931
932static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
933{
934 call_rcu(head, func);
935}
936
937static inline void rcu_barrier_sched(void)
938{
939 rcu_barrier();
940}
941
942static inline unsigned long get_state_synchronize_sched(void)
943{
944 return get_state_synchronize_rcu();
945}
946
947static inline void cond_synchronize_sched(unsigned long oldstate)
948{
949 cond_synchronize_rcu(oldstate);
950}
951
888#endif /* __LINUX_RCUPDATE_H */ 952#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 57f371344152..8a16c3eb3dd0 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -33,17 +33,17 @@ do { \
33 33
34/** 34/**
35 * synchronize_rcu_mult - Wait concurrently for multiple grace periods 35 * synchronize_rcu_mult - Wait concurrently for multiple grace periods
36 * @...: List of call_rcu() functions for the flavors to wait on. 36 * @...: List of call_rcu() functions for different grace periods to wait on
37 * 37 *
38 * This macro waits concurrently for multiple flavors of RCU grace periods. 38 * This macro waits concurrently for multiple types of RCU grace periods.
39 * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait 39 * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
40 * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU 40 * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU
41 * domain requires you to write a wrapper function for that SRCU domain's 41 * domain requires you to write a wrapper function for that SRCU domain's
42 * call_srcu() function, supplying the corresponding srcu_struct. 42 * call_srcu() function, supplying the corresponding srcu_struct.
43 * 43 *
44 * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU 44 * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU,
45 * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called 45 * given that anywhere synchronize_rcu_mult() can be called is automatically
46 * is automatically a grace period. 46 * a grace period.
47 */ 47 */
48#define synchronize_rcu_mult(...) \ 48#define synchronize_rcu_mult(...) \
49 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) 49 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 8d9a0ea8f0b5..af65d1f36ddb 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,12 +27,6 @@
27 27
28#include <linux/ktime.h> 28#include <linux/ktime.h>
29 29
30struct rcu_dynticks;
31static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
32{
33 return 0;
34}
35
36/* Never flag non-existent other CPUs! */ 30/* Never flag non-existent other CPUs! */
37static inline bool rcu_eqs_special_set(int cpu) { return false; } 31static inline bool rcu_eqs_special_set(int cpu) { return false; }
38 32
@@ -46,53 +40,28 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
46 might_sleep(); 40 might_sleep();
47} 41}
48 42
49static inline unsigned long get_state_synchronize_sched(void) 43extern void rcu_barrier(void);
50{
51 return 0;
52}
53
54static inline void cond_synchronize_sched(unsigned long oldstate)
55{
56 might_sleep();
57}
58
59extern void rcu_barrier_bh(void);
60extern void rcu_barrier_sched(void);
61 44
62static inline void synchronize_rcu_expedited(void) 45static inline void synchronize_rcu_expedited(void)
63{ 46{
64 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 47 synchronize_rcu();
65} 48}
66 49
67static inline void rcu_barrier(void) 50static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
68{ 51{
69 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 52 call_rcu(head, func);
70}
71
72static inline void synchronize_rcu_bh(void)
73{
74 synchronize_sched();
75}
76
77static inline void synchronize_rcu_bh_expedited(void)
78{
79 synchronize_sched();
80} 53}
81 54
82static inline void synchronize_sched_expedited(void) 55void rcu_qs(void);
83{
84 synchronize_sched();
85}
86 56
87static inline void kfree_call_rcu(struct rcu_head *head, 57static inline void rcu_softirq_qs(void)
88 rcu_callback_t func)
89{ 58{
90 call_rcu(head, func); 59 rcu_qs();
91} 60}
92 61
93#define rcu_note_context_switch(preempt) \ 62#define rcu_note_context_switch(preempt) \
94 do { \ 63 do { \
95 rcu_sched_qs(); \ 64 rcu_qs(); \
96 rcu_tasks_qs(current); \ 65 rcu_tasks_qs(current); \
97 } while (0) 66 } while (0)
98 67
@@ -108,6 +77,7 @@ static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
108 */ 77 */
109static inline void rcu_virt_note_context_switch(int cpu) { } 78static inline void rcu_virt_note_context_switch(int cpu) { }
110static inline void rcu_cpu_stall_reset(void) { } 79static inline void rcu_cpu_stall_reset(void) { }
80static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
111static inline void rcu_idle_enter(void) { } 81static inline void rcu_idle_enter(void) { }
112static inline void rcu_idle_exit(void) { } 82static inline void rcu_idle_exit(void) { }
113static inline void rcu_irq_enter(void) { } 83static inline void rcu_irq_enter(void) { }
@@ -115,6 +85,11 @@ static inline void rcu_irq_exit_irqson(void) { }
115static inline void rcu_irq_enter_irqson(void) { } 85static inline void rcu_irq_enter_irqson(void) { }
116static inline void rcu_irq_exit(void) { } 86static inline void rcu_irq_exit(void) { }
117static inline void exit_rcu(void) { } 87static inline void exit_rcu(void) { }
88static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
89{
90 return false;
91}
92static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
118#ifdef CONFIG_SRCU 93#ifdef CONFIG_SRCU
119void rcu_scheduler_starting(void); 94void rcu_scheduler_starting(void);
120#else /* #ifndef CONFIG_SRCU */ 95#else /* #ifndef CONFIG_SRCU */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 914655848ef6..7f83179177d1 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,6 +30,7 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33void rcu_softirq_qs(void);
33void rcu_note_context_switch(bool preempt); 34void rcu_note_context_switch(bool preempt);
34int rcu_needs_cpu(u64 basem, u64 *nextevt); 35int rcu_needs_cpu(u64 basem, u64 *nextevt);
35void rcu_cpu_stall_reset(void); 36void rcu_cpu_stall_reset(void);
@@ -44,41 +45,13 @@ static inline void rcu_virt_note_context_switch(int cpu)
44 rcu_note_context_switch(false); 45 rcu_note_context_switch(false);
45} 46}
46 47
47void synchronize_rcu_bh(void);
48void synchronize_sched_expedited(void);
49void synchronize_rcu_expedited(void); 48void synchronize_rcu_expedited(void);
50
51void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); 49void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
52 50
53/**
54 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
55 *
56 * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
57 * approach to force the grace period to end quickly. This consumes
58 * significant time on all CPUs and is unfriendly to real-time workloads,
59 * so is thus not recommended for any sort of common-case code. In fact,
60 * if you are using synchronize_rcu_bh_expedited() in a loop, please
61 * restructure your code to batch your updates, and then use a single
62 * synchronize_rcu_bh() instead.
63 *
64 * Note that it is illegal to call this function while holding any lock
65 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
66 * to call this function from a CPU-hotplug notifier. Failing to observe
67 * these restriction will result in deadlock.
68 */
69static inline void synchronize_rcu_bh_expedited(void)
70{
71 synchronize_sched_expedited();
72}
73
74void rcu_barrier(void); 51void rcu_barrier(void);
75void rcu_barrier_bh(void);
76void rcu_barrier_sched(void);
77bool rcu_eqs_special_set(int cpu); 52bool rcu_eqs_special_set(int cpu);
78unsigned long get_state_synchronize_rcu(void); 53unsigned long get_state_synchronize_rcu(void);
79void cond_synchronize_rcu(unsigned long oldstate); 54void cond_synchronize_rcu(unsigned long oldstate);
80unsigned long get_state_synchronize_sched(void);
81void cond_synchronize_sched(unsigned long oldstate);
82 55
83void rcu_idle_enter(void); 56void rcu_idle_enter(void);
84void rcu_idle_exit(void); 57void rcu_idle_exit(void);
@@ -93,7 +66,9 @@ void rcu_scheduler_starting(void);
93extern int rcu_scheduler_active __read_mostly; 66extern int rcu_scheduler_active __read_mostly;
94void rcu_end_inkernel_boot(void); 67void rcu_end_inkernel_boot(void);
95bool rcu_is_watching(void); 68bool rcu_is_watching(void);
69#ifndef CONFIG_PREEMPT
96void rcu_all_qs(void); 70void rcu_all_qs(void);
71#endif
97 72
98/* RCUtree hotplug events */ 73/* RCUtree hotplug events */
99int rcutree_prepare_cpu(unsigned int cpu); 74int rcutree_prepare_cpu(unsigned int cpu);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 379505a53722..a367d59c301d 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -268,6 +268,13 @@ typedef void (*regmap_unlock)(void *);
268 * field is NULL but precious_table (see below) is not, the 268 * field is NULL but precious_table (see below) is not, the
269 * check is performed on such table (a register is precious if 269 * check is performed on such table (a register is precious if
270 * it belongs to one of the ranges specified by precious_table). 270 * it belongs to one of the ranges specified by precious_table).
271 * @writeable_noinc_reg: Optional callback returning true if the register
272 * supports multiple write operations without incrementing
273 * the register number. If this field is NULL but
274 * wr_noinc_table (see below) is not, the check is
275 * performed on such table (a register is no increment
276 * writeable if it belongs to one of the ranges specified
277 * by wr_noinc_table).
271 * @readable_noinc_reg: Optional callback returning true if the register 278 * @readable_noinc_reg: Optional callback returning true if the register
272 * supports multiple read operations without incrementing 279 * supports multiple read operations without incrementing
273 * the register number. If this field is NULL but 280 * the register number. If this field is NULL but
@@ -302,6 +309,7 @@ typedef void (*regmap_unlock)(void *);
302 * @rd_table: As above, for read access. 309 * @rd_table: As above, for read access.
303 * @volatile_table: As above, for volatile registers. 310 * @volatile_table: As above, for volatile registers.
304 * @precious_table: As above, for precious registers. 311 * @precious_table: As above, for precious registers.
312 * @wr_noinc_table: As above, for no increment writeable registers.
305 * @rd_noinc_table: As above, for no increment readable registers. 313 * @rd_noinc_table: As above, for no increment readable registers.
306 * @reg_defaults: Power on reset values for registers (for use with 314 * @reg_defaults: Power on reset values for registers (for use with
307 * register cache support). 315 * register cache support).
@@ -315,9 +323,12 @@ typedef void (*regmap_unlock)(void *);
315 * masks are used. 323 * masks are used.
316 * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even 324 * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
317 * if they are both empty. 325 * if they are both empty.
318 * @use_single_rw: If set, converts the bulk read and write operations into 326 * @use_single_read: If set, converts the bulk read operation into a series of
319 * a series of single read and write operations. This is useful 327 * single read operations. This is useful for a device that
320 * for device that does not support bulk read and write. 328 * does not support bulk read.
329 * @use_single_write: If set, converts the bulk write operation into a series of
330 * single write operations. This is useful for a device that
331 * does not support bulk write.
321 * @can_multi_write: If set, the device supports the multi write mode of bulk 332 * @can_multi_write: If set, the device supports the multi write mode of bulk
322 * write operations, if clear multi write requests will be 333 * write operations, if clear multi write requests will be
323 * split into individual write operations 334 * split into individual write operations
@@ -352,6 +363,7 @@ struct regmap_config {
352 bool (*readable_reg)(struct device *dev, unsigned int reg); 363 bool (*readable_reg)(struct device *dev, unsigned int reg);
353 bool (*volatile_reg)(struct device *dev, unsigned int reg); 364 bool (*volatile_reg)(struct device *dev, unsigned int reg);
354 bool (*precious_reg)(struct device *dev, unsigned int reg); 365 bool (*precious_reg)(struct device *dev, unsigned int reg);
366 bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
355 bool (*readable_noinc_reg)(struct device *dev, unsigned int reg); 367 bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
356 368
357 bool disable_locking; 369 bool disable_locking;
@@ -369,6 +381,7 @@ struct regmap_config {
369 const struct regmap_access_table *rd_table; 381 const struct regmap_access_table *rd_table;
370 const struct regmap_access_table *volatile_table; 382 const struct regmap_access_table *volatile_table;
371 const struct regmap_access_table *precious_table; 383 const struct regmap_access_table *precious_table;
384 const struct regmap_access_table *wr_noinc_table;
372 const struct regmap_access_table *rd_noinc_table; 385 const struct regmap_access_table *rd_noinc_table;
373 const struct reg_default *reg_defaults; 386 const struct reg_default *reg_defaults;
374 unsigned int num_reg_defaults; 387 unsigned int num_reg_defaults;
@@ -380,7 +393,8 @@ struct regmap_config {
380 unsigned long write_flag_mask; 393 unsigned long write_flag_mask;
381 bool zero_flag_mask; 394 bool zero_flag_mask;
382 395
383 bool use_single_rw; 396 bool use_single_read;
397 bool use_single_write;
384 bool can_multi_write; 398 bool can_multi_write;
385 399
386 enum regmap_endian reg_format_endian; 400 enum regmap_endian reg_format_endian;
@@ -979,6 +993,8 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
979int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val); 993int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
980int regmap_raw_write(struct regmap *map, unsigned int reg, 994int regmap_raw_write(struct regmap *map, unsigned int reg,
981 const void *val, size_t val_len); 995 const void *val, size_t val_len);
996int regmap_noinc_write(struct regmap *map, unsigned int reg,
997 const void *val, size_t val_len);
982int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 998int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
983 size_t val_count); 999 size_t val_count);
984int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, 1000int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
@@ -1222,6 +1238,13 @@ static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg,
1222 return -EINVAL; 1238 return -EINVAL;
1223} 1239}
1224 1240
1241static inline int regmap_noinc_write(struct regmap *map, unsigned int reg,
1242 const void *val, size_t val_len)
1243{
1244 WARN_ONCE(1, "regmap API is disabled");
1245 return -EINVAL;
1246}
1247
1225static inline int regmap_bulk_write(struct regmap *map, unsigned int reg, 1248static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
1226 const void *val, size_t val_count) 1249 const void *val, size_t val_count)
1227{ 1250{
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 0fd8fbb74763..a9c030192147 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -271,9 +271,16 @@ enum regulator_type {
271 * @ramp_delay: Time to settle down after voltage change (unit: uV/us) 271 * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
272 * @min_dropout_uV: The minimum dropout voltage this regulator can handle 272 * @min_dropout_uV: The minimum dropout voltage this regulator can handle
273 * @linear_ranges: A constant table of possible voltage ranges. 273 * @linear_ranges: A constant table of possible voltage ranges.
274 * @n_linear_ranges: Number of entries in the @linear_ranges table. 274 * @linear_range_selectors: A constant table of voltage range selectors.
275 * If pickable ranges are used each range must
276 * have corresponding selector here.
277 * @n_linear_ranges: Number of entries in the @linear_ranges (and in
278 * linear_range_selectors if used) table(s).
275 * @volt_table: Voltage mapping table (if table based mapping) 279 * @volt_table: Voltage mapping table (if table based mapping)
276 * 280 *
281 * @vsel_range_reg: Register for range selector when using pickable ranges
282 * and regulator_regmap_X_voltage_X_pickable functions.
283 * @vsel_range_mask: Mask for register bitfield used for range selector
277 * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ 284 * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
278 * @vsel_mask: Mask for register bitfield used for selector 285 * @vsel_mask: Mask for register bitfield used for selector
279 * @csel_reg: Register for TPS65218 LS3 current regulator 286 * @csel_reg: Register for TPS65218 LS3 current regulator
@@ -338,10 +345,14 @@ struct regulator_desc {
338 int min_dropout_uV; 345 int min_dropout_uV;
339 346
340 const struct regulator_linear_range *linear_ranges; 347 const struct regulator_linear_range *linear_ranges;
348 const unsigned int *linear_range_selectors;
349
341 int n_linear_ranges; 350 int n_linear_ranges;
342 351
343 const unsigned int *volt_table; 352 const unsigned int *volt_table;
344 353
354 unsigned int vsel_range_reg;
355 unsigned int vsel_range_mask;
345 unsigned int vsel_reg; 356 unsigned int vsel_reg;
346 unsigned int vsel_mask; 357 unsigned int vsel_mask;
347 unsigned int csel_reg; 358 unsigned int csel_reg;
@@ -498,18 +509,25 @@ int regulator_mode_to_status(unsigned int);
498 509
499int regulator_list_voltage_linear(struct regulator_dev *rdev, 510int regulator_list_voltage_linear(struct regulator_dev *rdev,
500 unsigned int selector); 511 unsigned int selector);
512int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev,
513 unsigned int selector);
501int regulator_list_voltage_linear_range(struct regulator_dev *rdev, 514int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
502 unsigned int selector); 515 unsigned int selector);
503int regulator_list_voltage_table(struct regulator_dev *rdev, 516int regulator_list_voltage_table(struct regulator_dev *rdev,
504 unsigned int selector); 517 unsigned int selector);
505int regulator_map_voltage_linear(struct regulator_dev *rdev, 518int regulator_map_voltage_linear(struct regulator_dev *rdev,
506 int min_uV, int max_uV); 519 int min_uV, int max_uV);
520int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
521 int min_uV, int max_uV);
507int regulator_map_voltage_linear_range(struct regulator_dev *rdev, 522int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
508 int min_uV, int max_uV); 523 int min_uV, int max_uV);
509int regulator_map_voltage_iterate(struct regulator_dev *rdev, 524int regulator_map_voltage_iterate(struct regulator_dev *rdev,
510 int min_uV, int max_uV); 525 int min_uV, int max_uV);
511int regulator_map_voltage_ascend(struct regulator_dev *rdev, 526int regulator_map_voltage_ascend(struct regulator_dev *rdev,
512 int min_uV, int max_uV); 527 int min_uV, int max_uV);
528int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev);
529int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
530 unsigned int sel);
513int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev); 531int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev);
514int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel); 532int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel);
515int regulator_is_enabled_regmap(struct regulator_dev *rdev); 533int regulator_is_enabled_regmap(struct regulator_dev *rdev);
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 48918be649d4..1a4340ed8e2b 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -24,8 +24,6 @@ struct regulator_init_data;
24 * @supply_name: Name of the regulator supply 24 * @supply_name: Name of the regulator supply
25 * @input_supply: Name of the input regulator supply 25 * @input_supply: Name of the input regulator supply
26 * @microvolts: Output voltage of regulator 26 * @microvolts: Output voltage of regulator
27 * @gpio: GPIO to use for enable control
28 * set to -EINVAL if not used
29 * @startup_delay: Start-up time in microseconds 27 * @startup_delay: Start-up time in microseconds
30 * @gpio_is_open_drain: Gpio pin is open drain or normal type. 28 * @gpio_is_open_drain: Gpio pin is open drain or normal type.
31 * If it is open drain type then HIGH will be set 29 * If it is open drain type then HIGH will be set
@@ -49,7 +47,6 @@ struct fixed_voltage_config {
49 const char *supply_name; 47 const char *supply_name;
50 const char *input_supply; 48 const char *input_supply;
51 int microvolts; 49 int microvolts;
52 int gpio;
53 unsigned startup_delay; 50 unsigned startup_delay;
54 unsigned gpio_is_open_drain:1; 51 unsigned gpio_is_open_drain:1;
55 unsigned enable_high:1; 52 unsigned enable_high:1;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 3468703d663a..a459a5e973a7 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -48,9 +48,9 @@ struct regulator;
48 * DISABLE_IN_SUSPEND - turn off regulator in suspend states 48 * DISABLE_IN_SUSPEND - turn off regulator in suspend states
49 * ENABLE_IN_SUSPEND - keep regulator on in suspend states 49 * ENABLE_IN_SUSPEND - keep regulator on in suspend states
50 */ 50 */
51#define DO_NOTHING_IN_SUSPEND (-1) 51#define DO_NOTHING_IN_SUSPEND 0
52#define DISABLE_IN_SUSPEND 0 52#define DISABLE_IN_SUSPEND 1
53#define ENABLE_IN_SUSPEND 1 53#define ENABLE_IN_SUSPEND 2
54 54
55/* Regulator active discharge flags */ 55/* Regulator active discharge flags */
56enum regulator_active_discharge { 56enum regulator_active_discharge {
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 5d83d0c1d06c..bba2920e9c05 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -10,7 +10,7 @@
10#include <linux/time64.h> 10#include <linux/time64.h>
11 11
12struct timespec; 12struct timespec;
13struct compat_timespec; 13struct old_timespec32;
14struct pollfd; 14struct pollfd;
15 15
16enum timespec_type { 16enum timespec_type {
@@ -40,7 +40,7 @@ struct restart_block {
40 enum timespec_type type; 40 enum timespec_type type;
41 union { 41 union {
42 struct __kernel_timespec __user *rmtp; 42 struct __kernel_timespec __user *rmtp;
43 struct compat_timespec __user *compat_rmtp; 43 struct old_timespec32 __user *compat_rmtp;
44 }; 44 };
45 u64 expires; 45 u64 expires;
46 } nanosleep; 46 } nanosleep;
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 6aedc30003e7..c8bb4a2b48c3 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -167,17 +167,12 @@ struct rtc_device {
167#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ 167#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
168#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ 168#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */
169 169
170extern struct rtc_device *rtc_device_register(const char *name,
171 struct device *dev,
172 const struct rtc_class_ops *ops,
173 struct module *owner);
174extern struct rtc_device *devm_rtc_device_register(struct device *dev, 170extern struct rtc_device *devm_rtc_device_register(struct device *dev,
175 const char *name, 171 const char *name,
176 const struct rtc_class_ops *ops, 172 const struct rtc_class_ops *ops,
177 struct module *owner); 173 struct module *owner);
178struct rtc_device *devm_rtc_allocate_device(struct device *dev); 174struct rtc_device *devm_rtc_allocate_device(struct device *dev);
179int __rtc_register_device(struct module *owner, struct rtc_device *rtc); 175int __rtc_register_device(struct module *owner, struct rtc_device *rtc);
180extern void rtc_device_unregister(struct rtc_device *rtc);
181extern void devm_rtc_device_unregister(struct device *dev, 176extern void devm_rtc_device_unregister(struct device *dev,
182 struct rtc_device *rtc); 177 struct rtc_device *rtc);
183 178
@@ -277,4 +272,20 @@ static inline int rtc_nvmem_register(struct rtc_device *rtc,
277static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {} 272static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {}
278#endif 273#endif
279 274
275#ifdef CONFIG_RTC_INTF_SYSFS
276int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp);
277int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps);
278#else
279static inline
280int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp)
281{
282 return 0;
283}
284
285static inline
286int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps)
287{
288 return 0;
289}
290#endif
280#endif /* _LINUX_RTC_H_ */ 291#endif /* _LINUX_RTC_H_ */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 5225832bd6ff..bb9cb84114c1 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -6,6 +6,7 @@
6#include <linux/mutex.h> 6#include <linux/mutex.h>
7#include <linux/netdevice.h> 7#include <linux/netdevice.h>
8#include <linux/wait.h> 8#include <linux/wait.h>
9#include <linux/refcount.h>
9#include <uapi/linux/rtnetlink.h> 10#include <uapi/linux/rtnetlink.h>
10 11
11extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); 12extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -34,6 +35,7 @@ extern void rtnl_unlock(void);
34extern int rtnl_trylock(void); 35extern int rtnl_trylock(void);
35extern int rtnl_is_locked(void); 36extern int rtnl_is_locked(void);
36extern int rtnl_lock_killable(void); 37extern int rtnl_lock_killable(void);
38extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
37 39
38extern wait_queue_head_t netdev_unregistering_wq; 40extern wait_queue_head_t netdev_unregistering_wq;
39extern struct rw_semaphore pernet_ops_rwsem; 41extern struct rw_semaphore pernet_ops_rwsem;
@@ -83,6 +85,11 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
83 return rtnl_dereference(dev->ingress_queue); 85 return rtnl_dereference(dev->ingress_queue);
84} 86}
85 87
88static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev)
89{
90 return rcu_dereference(dev->ingress_queue);
91}
92
86struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); 93struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
87 94
88#ifdef CONFIG_NET_INGRESS 95#ifdef CONFIG_NET_INGRESS
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index ab93b6eae696..67dbb57508b1 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -45,10 +45,10 @@ struct rw_semaphore {
45}; 45};
46 46
47/* 47/*
48 * Setting bit 0 of the owner field with other non-zero bits will indicate 48 * Setting bit 1 of the owner field but not bit 0 will indicate
49 * that the rwsem is writer-owned with an unknown owner. 49 * that the rwsem is writer-owned with an unknown owner.
50 */ 50 */
51#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L) 51#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L)
52 52
53extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); 53extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
54extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); 54extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 977cb57d7bc9..8f8a5418b627 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -25,6 +25,7 @@
25#include <linux/latencytop.h> 25#include <linux/latencytop.h>
26#include <linux/sched/prio.h> 26#include <linux/sched/prio.h>
27#include <linux/signal_types.h> 27#include <linux/signal_types.h>
28#include <linux/psi_types.h>
28#include <linux/mm_types_task.h> 29#include <linux/mm_types_task.h>
29#include <linux/task_io_accounting.h> 30#include <linux/task_io_accounting.h>
30#include <linux/rseq.h> 31#include <linux/rseq.h>
@@ -571,12 +572,8 @@ union rcu_special {
571 struct { 572 struct {
572 u8 blocked; 573 u8 blocked;
573 u8 need_qs; 574 u8 need_qs;
574 u8 exp_need_qs;
575
576 /* Otherwise the compiler can store garbage here: */
577 u8 pad;
578 } b; /* Bits. */ 575 } b; /* Bits. */
579 u32 s; /* Set of bits. */ 576 u16 s; /* Set of bits. */
580}; 577};
581 578
582enum perf_event_task_context { 579enum perf_event_task_context {
@@ -710,6 +707,10 @@ struct task_struct {
710 unsigned sched_contributes_to_load:1; 707 unsigned sched_contributes_to_load:1;
711 unsigned sched_migrated:1; 708 unsigned sched_migrated:1;
712 unsigned sched_remote_wakeup:1; 709 unsigned sched_remote_wakeup:1;
710#ifdef CONFIG_PSI
711 unsigned sched_psi_wake_requeue:1;
712#endif
713
713 /* Force alignment to the next boundary: */ 714 /* Force alignment to the next boundary: */
714 unsigned :0; 715 unsigned :0;
715 716
@@ -723,9 +724,6 @@ struct task_struct {
723#endif 724#endif
724#ifdef CONFIG_MEMCG 725#ifdef CONFIG_MEMCG
725 unsigned in_user_fault:1; 726 unsigned in_user_fault:1;
726#ifdef CONFIG_MEMCG_KMEM
727 unsigned memcg_kmem_skip_account:1;
728#endif
729#endif 727#endif
730#ifdef CONFIG_COMPAT_BRK 728#ifdef CONFIG_COMPAT_BRK
731 unsigned brk_randomized:1; 729 unsigned brk_randomized:1;
@@ -739,6 +737,12 @@ struct task_struct {
739 unsigned use_memdelay:1; 737 unsigned use_memdelay:1;
740#endif 738#endif
741 739
740 /*
741 * May usercopy functions fault on kernel addresses?
742 * This is not just a single bit because this can potentially nest.
743 */
744 unsigned int kernel_uaccess_faults_ok;
745
742 unsigned long atomic_flags; /* Flags requiring atomic access. */ 746 unsigned long atomic_flags; /* Flags requiring atomic access. */
743 747
744 struct restart_block restart_block; 748 struct restart_block restart_block;
@@ -960,9 +964,13 @@ struct task_struct {
960 964
961 /* Ptrace state: */ 965 /* Ptrace state: */
962 unsigned long ptrace_message; 966 unsigned long ptrace_message;
963 siginfo_t *last_siginfo; 967 kernel_siginfo_t *last_siginfo;
964 968
965 struct task_io_accounting ioac; 969 struct task_io_accounting ioac;
970#ifdef CONFIG_PSI
971 /* Pressure stall state */
972 unsigned int psi_flags;
973#endif
966#ifdef CONFIG_TASK_XACCT 974#ifdef CONFIG_TASK_XACCT
967 /* Accumulated RSS usage: */ 975 /* Accumulated RSS usage: */
968 u64 acct_rss_mem1; 976 u64 acct_rss_mem1;
@@ -1389,6 +1397,7 @@ extern struct pid *cad_pid;
1389#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1397#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1390#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1398#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
1391#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1399#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1400#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
1392#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1401#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1393#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1402#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1394#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1403#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h
index 80bc84ba5d2a..4859bea47a7b 100644
--- a/include/linux/sched/loadavg.h
+++ b/include/linux/sched/loadavg.h
@@ -22,10 +22,26 @@ extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
22#define EXP_5 2014 /* 1/exp(5sec/5min) */ 22#define EXP_5 2014 /* 1/exp(5sec/5min) */
23#define EXP_15 2037 /* 1/exp(5sec/15min) */ 23#define EXP_15 2037 /* 1/exp(5sec/15min) */
24 24
25#define CALC_LOAD(load,exp,n) \ 25/*
26 load *= exp; \ 26 * a1 = a0 * e + a * (1 - e)
27 load += n*(FIXED_1-exp); \ 27 */
28 load >>= FSHIFT; 28static inline unsigned long
29calc_load(unsigned long load, unsigned long exp, unsigned long active)
30{
31 unsigned long newload;
32
33 newload = load * exp + active * (FIXED_1 - exp);
34 if (active >= load)
35 newload += FIXED_1-1;
36
37 return newload / FIXED_1;
38}
39
40extern unsigned long calc_load_n(unsigned long load, unsigned long exp,
41 unsigned long active, unsigned int n);
42
43#define LOAD_INT(x) ((x) >> FSHIFT)
44#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
29 45
30extern void calc_global_load(unsigned long ticks); 46extern void calc_global_load(unsigned long ticks);
31 47
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 1be35729c2c5..13789d10a50e 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -270,16 +270,16 @@ static inline int signal_group_exit(const struct signal_struct *sig)
270extern void flush_signals(struct task_struct *); 270extern void flush_signals(struct task_struct *);
271extern void ignore_signals(struct task_struct *); 271extern void ignore_signals(struct task_struct *);
272extern void flush_signal_handlers(struct task_struct *, int force_default); 272extern void flush_signal_handlers(struct task_struct *, int force_default);
273extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); 273extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info);
274 274
275static inline int kernel_dequeue_signal(siginfo_t *info) 275static inline int kernel_dequeue_signal(void)
276{ 276{
277 struct task_struct *tsk = current; 277 struct task_struct *tsk = current;
278 siginfo_t __info; 278 kernel_siginfo_t __info;
279 int ret; 279 int ret;
280 280
281 spin_lock_irq(&tsk->sighand->siglock); 281 spin_lock_irq(&tsk->sighand->siglock);
282 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); 282 ret = dequeue_signal(tsk, &tsk->blocked, &__info);
283 spin_unlock_irq(&tsk->sighand->siglock); 283 spin_unlock_irq(&tsk->sighand->siglock);
284 284
285 return ret; 285 return ret;
@@ -322,12 +322,12 @@ int force_sig_pkuerr(void __user *addr, u32 pkey);
322 322
323int force_sig_ptrace_errno_trap(int errno, void __user *addr); 323int force_sig_ptrace_errno_trap(int errno, void __user *addr);
324 324
325extern int send_sig_info(int, struct siginfo *, struct task_struct *); 325extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
326extern void force_sigsegv(int sig, struct task_struct *p); 326extern void force_sigsegv(int sig, struct task_struct *p);
327extern int force_sig_info(int, struct siginfo *, struct task_struct *); 327extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *);
328extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); 328extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
329extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); 329extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
330extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, 330extern int kill_pid_info_as_cred(int, struct kernel_siginfo *, struct pid *,
331 const struct cred *); 331 const struct cred *);
332extern int kill_pgrp(struct pid *pid, int sig, int priv); 332extern int kill_pgrp(struct pid *pid, int sig, int priv);
333extern int kill_pid(struct pid *pid, int sig, int priv); 333extern int kill_pid(struct pid *pid, int sig, int priv);
@@ -475,9 +475,8 @@ static inline int kill_cad_pid(int sig, int priv)
475} 475}
476 476
477/* These can be the second arg to send_sig_info/send_group_sig_info. */ 477/* These can be the second arg to send_sig_info/send_group_sig_info. */
478#define SEND_SIG_NOINFO ((struct siginfo *) 0) 478#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
479#define SEND_SIG_PRIV ((struct siginfo *) 1) 479#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
480#define SEND_SIG_FORCED ((struct siginfo *) 2)
481 480
482/* 481/*
483 * True if we are on the alternate signal stack. 482 * True if we are on the alternate signal stack.
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 26347741ba50..6b9976180c1e 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -23,10 +23,10 @@
23#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ 23#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
24#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ 24#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
25#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ 25#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
26#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ 26#define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */
27#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ 27#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */
28#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ 28#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
29#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 29#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */
30#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 30#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
31#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ 31#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
32#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 32#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
diff --git a/include/linux/security.h b/include/linux/security.h
index 75f4156c84d7..d170a5b031f3 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -35,7 +35,7 @@
35struct linux_binprm; 35struct linux_binprm;
36struct cred; 36struct cred;
37struct rlimit; 37struct rlimit;
38struct siginfo; 38struct kernel_siginfo;
39struct sembuf; 39struct sembuf;
40struct kern_ipc_perm; 40struct kern_ipc_perm;
41struct audit_context; 41struct audit_context;
@@ -361,7 +361,7 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource,
361int security_task_setscheduler(struct task_struct *p); 361int security_task_setscheduler(struct task_struct *p);
362int security_task_getscheduler(struct task_struct *p); 362int security_task_getscheduler(struct task_struct *p);
363int security_task_movememory(struct task_struct *p); 363int security_task_movememory(struct task_struct *p);
364int security_task_kill(struct task_struct *p, struct siginfo *info, 364int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
365 int sig, const struct cred *cred); 365 int sig, const struct cred *cred);
366int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, 366int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
367 unsigned long arg4, unsigned long arg5); 367 unsigned long arg4, unsigned long arg5);
@@ -1020,7 +1020,7 @@ static inline int security_task_movememory(struct task_struct *p)
1020} 1020}
1021 1021
1022static inline int security_task_kill(struct task_struct *p, 1022static inline int security_task_kill(struct task_struct *p,
1023 struct siginfo *info, int sig, 1023 struct kernel_siginfo *info, int sig,
1024 const struct cred *cred) 1024 const struct cred *cred)
1025{ 1025{
1026 return 0; 1026 return 0;
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 406edae44ca3..047fa67d039b 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -144,6 +144,8 @@ struct uart_port {
144 void (*handle_break)(struct uart_port *); 144 void (*handle_break)(struct uart_port *);
145 int (*rs485_config)(struct uart_port *, 145 int (*rs485_config)(struct uart_port *,
146 struct serial_rs485 *rs485); 146 struct serial_rs485 *rs485);
147 int (*iso7816_config)(struct uart_port *,
148 struct serial_iso7816 *iso7816);
147 unsigned int irq; /* irq number */ 149 unsigned int irq; /* irq number */
148 unsigned long irqflags; /* irq flags */ 150 unsigned long irqflags; /* irq flags */
149 unsigned int uartclk; /* base uart clock */ 151 unsigned int uartclk; /* base uart clock */
@@ -260,6 +262,7 @@ struct uart_port {
260 struct attribute_group *attr_group; /* port specific attributes */ 262 struct attribute_group *attr_group; /* port specific attributes */
261 const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ 263 const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
262 struct serial_rs485 rs485; 264 struct serial_rs485 rs485;
265 struct serial_iso7816 iso7816;
263 void *private_data; /* generic platform data pointer */ 266 void *private_data; /* generic platform data pointer */
264}; 267};
265 268
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index c0e795d95477..1c89611e0e06 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -36,6 +36,7 @@ enum {
36 SCIx_SH4_SCIF_FIFODATA_REGTYPE, 36 SCIx_SH4_SCIF_FIFODATA_REGTYPE,
37 SCIx_SH7705_SCIF_REGTYPE, 37 SCIx_SH7705_SCIF_REGTYPE,
38 SCIx_HSCIF_REGTYPE, 38 SCIx_HSCIF_REGTYPE,
39 SCIx_RZ_SCIFA_REGTYPE,
39 40
40 SCIx_NR_REGTYPES, 41 SCIx_NR_REGTYPES,
41}; 42};
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 3d4cd5db30a9..200ed96a05af 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -11,17 +11,21 @@ struct task_struct;
11/* for sysctl */ 11/* for sysctl */
12extern int print_fatal_signals; 12extern int print_fatal_signals;
13 13
14static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from) 14static inline void copy_siginfo(kernel_siginfo_t *to,
15 const kernel_siginfo_t *from)
15{ 16{
16 memcpy(to, from, sizeof(*to)); 17 memcpy(to, from, sizeof(*to));
17} 18}
18 19
19static inline void clear_siginfo(struct siginfo *info) 20static inline void clear_siginfo(kernel_siginfo_t *info)
20{ 21{
21 memset(info, 0, sizeof(*info)); 22 memset(info, 0, sizeof(*info));
22} 23}
23 24
24int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); 25#define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo))
26
27int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from);
28int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from);
25 29
26enum siginfo_layout { 30enum siginfo_layout {
27 SIL_KILL, 31 SIL_KILL,
@@ -36,7 +40,7 @@ enum siginfo_layout {
36 SIL_SYS, 40 SIL_SYS,
37}; 41};
38 42
39enum siginfo_layout siginfo_layout(int sig, int si_code); 43enum siginfo_layout siginfo_layout(unsigned sig, int si_code);
40 44
41/* 45/*
42 * Define some primitives to manipulate sigset_t. 46 * Define some primitives to manipulate sigset_t.
@@ -257,11 +261,11 @@ struct pt_regs;
257enum pid_type; 261enum pid_type;
258 262
259extern int next_signal(struct sigpending *pending, sigset_t *mask); 263extern int next_signal(struct sigpending *pending, sigset_t *mask);
260extern int do_send_sig_info(int sig, struct siginfo *info, 264extern int do_send_sig_info(int sig, struct kernel_siginfo *info,
261 struct task_struct *p, enum pid_type type); 265 struct task_struct *p, enum pid_type type);
262extern int group_send_sig_info(int sig, struct siginfo *info, 266extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
263 struct task_struct *p, enum pid_type type); 267 struct task_struct *p, enum pid_type type);
264extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); 268extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
265extern int sigprocmask(int, sigset_t *, sigset_t *); 269extern int sigprocmask(int, sigset_t *, sigset_t *);
266extern void set_current_blocked(sigset_t *); 270extern void set_current_blocked(sigset_t *);
267extern void __set_current_blocked(const sigset_t *); 271extern void __set_current_blocked(const sigset_t *);
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index 222ae696000b..f8a90ae9c6ec 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -9,6 +9,10 @@
9#include <linux/list.h> 9#include <linux/list.h>
10#include <uapi/linux/signal.h> 10#include <uapi/linux/signal.h>
11 11
12typedef struct kernel_siginfo {
13 __SIGINFO;
14} kernel_siginfo_t;
15
12/* 16/*
13 * Real Time signals may be queued. 17 * Real Time signals may be queued.
14 */ 18 */
@@ -16,7 +20,7 @@
16struct sigqueue { 20struct sigqueue {
17 struct list_head list; 21 struct list_head list;
18 int flags; 22 int flags;
19 siginfo_t info; 23 kernel_siginfo_t info;
20 struct user_struct *user; 24 struct user_struct *user;
21}; 25};
22 26
@@ -60,7 +64,7 @@ struct old_sigaction {
60 64
61struct ksignal { 65struct ksignal {
62 struct k_sigaction ka; 66 struct k_sigaction ka;
63 siginfo_t info; 67 kernel_siginfo_t info;
64 int sig; 68 int sig;
65}; 69};
66 70
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 17a13e4785fc..0ba687454267 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -243,6 +243,8 @@ struct scatterlist;
243struct pipe_inode_info; 243struct pipe_inode_info;
244struct iov_iter; 244struct iov_iter;
245struct napi_struct; 245struct napi_struct;
246struct bpf_prog;
247union bpf_attr;
246 248
247#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 249#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
248struct nf_conntrack { 250struct nf_conntrack {
@@ -689,7 +691,7 @@ struct sk_buff {
689 691
690 union { 692 union {
691 ktime_t tstamp; 693 ktime_t tstamp;
692 u64 skb_mstamp; 694 u64 skb_mstamp_ns; /* earliest departure time */
693 }; 695 };
694 /* 696 /*
695 * This is the control buffer. It is free to use for every 697 * This is the control buffer. It is free to use for every
@@ -1080,11 +1082,6 @@ static inline int skb_pad(struct sk_buff *skb, int pad)
1080} 1082}
1081#define dev_kfree_skb(a) consume_skb(a) 1083#define dev_kfree_skb(a) consume_skb(a)
1082 1084
1083int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
1084 int getfrag(void *from, char *to, int offset,
1085 int len, int odd, struct sk_buff *skb),
1086 void *from, int length);
1087
1088int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 1085int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1089 int offset, size_t size); 1086 int offset, size_t size);
1090 1087
@@ -1192,6 +1189,24 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1192 const struct flow_dissector_key *key, 1189 const struct flow_dissector_key *key,
1193 unsigned int key_count); 1190 unsigned int key_count);
1194 1191
1192#ifdef CONFIG_NET
1193int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1194 struct bpf_prog *prog);
1195
1196int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
1197#else
1198static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1199 struct bpf_prog *prog)
1200{
1201 return -EOPNOTSUPP;
1202}
1203
1204static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
1205{
1206 return -EOPNOTSUPP;
1207}
1208#endif
1209
1195bool __skb_flow_dissect(const struct sk_buff *skb, 1210bool __skb_flow_dissect(const struct sk_buff *skb,
1196 struct flow_dissector *flow_dissector, 1211 struct flow_dissector *flow_dissector,
1197 void *target_container, 1212 void *target_container,
@@ -1339,6 +1354,17 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
1339 } 1354 }
1340} 1355}
1341 1356
1357static inline void skb_mark_not_on_list(struct sk_buff *skb)
1358{
1359 skb->next = NULL;
1360}
1361
1362static inline void skb_list_del_init(struct sk_buff *skb)
1363{
1364 __list_del_entry(&skb->list);
1365 skb_mark_not_on_list(skb);
1366}
1367
1342/** 1368/**
1343 * skb_queue_empty - check if a queue is empty 1369 * skb_queue_empty - check if a queue is empty
1344 * @list: queue head 1370 * @list: queue head
@@ -1593,6 +1619,17 @@ static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1593} 1619}
1594 1620
1595/** 1621/**
1622 * __skb_peek - peek at the head of a non-empty &sk_buff_head
1623 * @list_: list to peek at
1624 *
1625 * Like skb_peek(), but the caller knows that the list is not empty.
1626 */
1627static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1628{
1629 return list_->next;
1630}
1631
1632/**
1596 * skb_peek_next - peek skb following the given one from a queue 1633 * skb_peek_next - peek skb following the given one from a queue
1597 * @skb: skb to start from 1634 * @skb: skb to start from
1598 * @list_: list to peek at 1635 * @list_: list to peek at
@@ -3468,13 +3505,19 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3468#define __it(x, op) (x -= sizeof(u##op)) 3505#define __it(x, op) (x -= sizeof(u##op))
3469#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) 3506#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3470 case 32: diffs |= __it_diff(a, b, 64); 3507 case 32: diffs |= __it_diff(a, b, 64);
3508 /* fall through */
3471 case 24: diffs |= __it_diff(a, b, 64); 3509 case 24: diffs |= __it_diff(a, b, 64);
3510 /* fall through */
3472 case 16: diffs |= __it_diff(a, b, 64); 3511 case 16: diffs |= __it_diff(a, b, 64);
3512 /* fall through */
3473 case 8: diffs |= __it_diff(a, b, 64); 3513 case 8: diffs |= __it_diff(a, b, 64);
3474 break; 3514 break;
3475 case 28: diffs |= __it_diff(a, b, 64); 3515 case 28: diffs |= __it_diff(a, b, 64);
3516 /* fall through */
3476 case 20: diffs |= __it_diff(a, b, 64); 3517 case 20: diffs |= __it_diff(a, b, 64);
3518 /* fall through */
3477 case 12: diffs |= __it_diff(a, b, 64); 3519 case 12: diffs |= __it_diff(a, b, 64);
3520 /* fall through */
3478 case 4: diffs |= __it_diff(a, b, 32); 3521 case 4: diffs |= __it_diff(a, b, 32);
3479 break; 3522 break;
3480 } 3523 }
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
new file mode 100644
index 000000000000..2a11e9d91dfa
--- /dev/null
+++ b/include/linux/skmsg.h
@@ -0,0 +1,434 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#ifndef _LINUX_SKMSG_H
5#define _LINUX_SKMSG_H
6
7#include <linux/bpf.h>
8#include <linux/filter.h>
9#include <linux/scatterlist.h>
10#include <linux/skbuff.h>
11
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <net/strparser.h>
15
16#define MAX_MSG_FRAGS MAX_SKB_FRAGS
17
18enum __sk_action {
19 __SK_DROP = 0,
20 __SK_PASS,
21 __SK_REDIRECT,
22 __SK_NONE,
23};
24
25struct sk_msg_sg {
26 u32 start;
27 u32 curr;
28 u32 end;
29 u32 size;
30 u32 copybreak;
31 bool copy[MAX_MSG_FRAGS];
32 /* The extra element is used for chaining the front and sections when
33 * the list becomes partitioned (e.g. end < start). The crypto APIs
34 * require the chaining.
35 */
36 struct scatterlist data[MAX_MSG_FRAGS + 1];
37};
38
39struct sk_msg {
40 struct sk_msg_sg sg;
41 void *data;
42 void *data_end;
43 u32 apply_bytes;
44 u32 cork_bytes;
45 u32 flags;
46 struct sk_buff *skb;
47 struct sock *sk_redir;
48 struct sock *sk;
49 struct list_head list;
50};
51
52struct sk_psock_progs {
53 struct bpf_prog *msg_parser;
54 struct bpf_prog *skb_parser;
55 struct bpf_prog *skb_verdict;
56};
57
58enum sk_psock_state_bits {
59 SK_PSOCK_TX_ENABLED,
60};
61
62struct sk_psock_link {
63 struct list_head list;
64 struct bpf_map *map;
65 void *link_raw;
66};
67
68struct sk_psock_parser {
69 struct strparser strp;
70 bool enabled;
71 void (*saved_data_ready)(struct sock *sk);
72};
73
74struct sk_psock_work_state {
75 struct sk_buff *skb;
76 u32 len;
77 u32 off;
78};
79
80struct sk_psock {
81 struct sock *sk;
82 struct sock *sk_redir;
83 u32 apply_bytes;
84 u32 cork_bytes;
85 u32 eval;
86 struct sk_msg *cork;
87 struct sk_psock_progs progs;
88 struct sk_psock_parser parser;
89 struct sk_buff_head ingress_skb;
90 struct list_head ingress_msg;
91 unsigned long state;
92 struct list_head link;
93 spinlock_t link_lock;
94 refcount_t refcnt;
95 void (*saved_unhash)(struct sock *sk);
96 void (*saved_close)(struct sock *sk, long timeout);
97 void (*saved_write_space)(struct sock *sk);
98 struct proto *sk_proto;
99 struct sk_psock_work_state work_state;
100 struct work_struct work;
101 union {
102 struct rcu_head rcu;
103 struct work_struct gc;
104 };
105};
106
107int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
108 int elem_first_coalesce);
109int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
110 u32 off, u32 len);
111void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
112int sk_msg_free(struct sock *sk, struct sk_msg *msg);
113int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
114void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
115void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
116 u32 bytes);
117
118void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
119void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
120
121int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
122 struct sk_msg *msg, u32 bytes);
123int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
124 struct sk_msg *msg, u32 bytes);
125
126static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
127{
128 WARN_ON(i == msg->sg.end && bytes);
129}
130
131static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
132{
133 if (psock->apply_bytes) {
134 if (psock->apply_bytes < bytes)
135 psock->apply_bytes = 0;
136 else
137 psock->apply_bytes -= bytes;
138 }
139}
140
141#define sk_msg_iter_var_prev(var) \
142 do { \
143 if (var == 0) \
144 var = MAX_MSG_FRAGS - 1; \
145 else \
146 var--; \
147 } while (0)
148
149#define sk_msg_iter_var_next(var) \
150 do { \
151 var++; \
152 if (var == MAX_MSG_FRAGS) \
153 var = 0; \
154 } while (0)
155
156#define sk_msg_iter_prev(msg, which) \
157 sk_msg_iter_var_prev(msg->sg.which)
158
159#define sk_msg_iter_next(msg, which) \
160 sk_msg_iter_var_next(msg->sg.which)
161
162static inline void sk_msg_clear_meta(struct sk_msg *msg)
163{
164 memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
165}
166
167static inline void sk_msg_init(struct sk_msg *msg)
168{
169 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS);
170 memset(msg, 0, sizeof(*msg));
171 sg_init_marker(msg->sg.data, MAX_MSG_FRAGS);
172}
173
174static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
175 int which, u32 size)
176{
177 dst->sg.data[which] = src->sg.data[which];
178 dst->sg.data[which].length = size;
179 dst->sg.size += size;
180 src->sg.data[which].length -= size;
181 src->sg.data[which].offset += size;
182}
183
184static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
185{
186 memcpy(dst, src, sizeof(*src));
187 sk_msg_init(src);
188}
189
190static inline bool sk_msg_full(const struct sk_msg *msg)
191{
192 return (msg->sg.end == msg->sg.start) && msg->sg.size;
193}
194
195static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
196{
197 if (sk_msg_full(msg))
198 return MAX_MSG_FRAGS;
199
200 return msg->sg.end >= msg->sg.start ?
201 msg->sg.end - msg->sg.start :
202 msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start);
203}
204
205static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
206{
207 return &msg->sg.data[which];
208}
209
210static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
211{
212 return msg->sg.data[which];
213}
214
215static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
216{
217 return sg_page(sk_msg_elem(msg, which));
218}
219
220static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
221{
222 return msg->flags & BPF_F_INGRESS;
223}
224
225static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
226{
227 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
228
229 if (msg->sg.copy[msg->sg.start]) {
230 msg->data = NULL;
231 msg->data_end = NULL;
232 } else {
233 msg->data = sg_virt(sge);
234 msg->data_end = msg->data + sge->length;
235 }
236}
237
238static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
239 u32 len, u32 offset)
240{
241 struct scatterlist *sge;
242
243 get_page(page);
244 sge = sk_msg_elem(msg, msg->sg.end);
245 sg_set_page(sge, page, len, offset);
246 sg_unmark_end(sge);
247
248 msg->sg.copy[msg->sg.end] = true;
249 msg->sg.size += len;
250 sk_msg_iter_next(msg, end);
251}
252
253static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
254{
255 do {
256 msg->sg.copy[i] = copy_state;
257 sk_msg_iter_var_next(i);
258 if (i == msg->sg.end)
259 break;
260 } while (1);
261}
262
263static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
264{
265 sk_msg_sg_copy(msg, start, true);
266}
267
268static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
269{
270 sk_msg_sg_copy(msg, start, false);
271}
272
273static inline struct sk_psock *sk_psock(const struct sock *sk)
274{
275 return rcu_dereference_sk_user_data(sk);
276}
277
278static inline void sk_psock_queue_msg(struct sk_psock *psock,
279 struct sk_msg *msg)
280{
281 list_add_tail(&msg->list, &psock->ingress_msg);
282}
283
284static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
285{
286 return psock ? list_empty(&psock->ingress_msg) : true;
287}
288
289static inline void sk_psock_report_error(struct sk_psock *psock, int err)
290{
291 struct sock *sk = psock->sk;
292
293 sk->sk_err = err;
294 sk->sk_error_report(sk);
295}
296
297struct sk_psock *sk_psock_init(struct sock *sk, int node);
298
299int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
300void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
301void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
302
303int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
304 struct sk_msg *msg);
305
306static inline struct sk_psock_link *sk_psock_init_link(void)
307{
308 return kzalloc(sizeof(struct sk_psock_link),
309 GFP_ATOMIC | __GFP_NOWARN);
310}
311
312static inline void sk_psock_free_link(struct sk_psock_link *link)
313{
314 kfree(link);
315}
316
317struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
318#if defined(CONFIG_BPF_STREAM_PARSER)
319void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link);
320#else
321static inline void sk_psock_unlink(struct sock *sk,
322 struct sk_psock_link *link)
323{
324}
325#endif
326
327void __sk_psock_purge_ingress_msg(struct sk_psock *psock);
328
329static inline void sk_psock_cork_free(struct sk_psock *psock)
330{
331 if (psock->cork) {
332 sk_msg_free(psock->sk, psock->cork);
333 kfree(psock->cork);
334 psock->cork = NULL;
335 }
336}
337
338static inline void sk_psock_update_proto(struct sock *sk,
339 struct sk_psock *psock,
340 struct proto *ops)
341{
342 psock->saved_unhash = sk->sk_prot->unhash;
343 psock->saved_close = sk->sk_prot->close;
344 psock->saved_write_space = sk->sk_write_space;
345
346 psock->sk_proto = sk->sk_prot;
347 sk->sk_prot = ops;
348}
349
350static inline void sk_psock_restore_proto(struct sock *sk,
351 struct sk_psock *psock)
352{
353 if (psock->sk_proto) {
354 sk->sk_prot = psock->sk_proto;
355 psock->sk_proto = NULL;
356 }
357}
358
359static inline void sk_psock_set_state(struct sk_psock *psock,
360 enum sk_psock_state_bits bit)
361{
362 set_bit(bit, &psock->state);
363}
364
365static inline void sk_psock_clear_state(struct sk_psock *psock,
366 enum sk_psock_state_bits bit)
367{
368 clear_bit(bit, &psock->state);
369}
370
371static inline bool sk_psock_test_state(const struct sk_psock *psock,
372 enum sk_psock_state_bits bit)
373{
374 return test_bit(bit, &psock->state);
375}
376
377static inline struct sk_psock *sk_psock_get_checked(struct sock *sk)
378{
379 struct sk_psock *psock;
380
381 rcu_read_lock();
382 psock = sk_psock(sk);
383 if (psock) {
384 if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) {
385 psock = ERR_PTR(-EBUSY);
386 goto out;
387 }
388
389 if (!refcount_inc_not_zero(&psock->refcnt))
390 psock = ERR_PTR(-EBUSY);
391 }
392out:
393 rcu_read_unlock();
394 return psock;
395}
396
397static inline struct sk_psock *sk_psock_get(struct sock *sk)
398{
399 struct sk_psock *psock;
400
401 rcu_read_lock();
402 psock = sk_psock(sk);
403 if (psock && !refcount_inc_not_zero(&psock->refcnt))
404 psock = NULL;
405 rcu_read_unlock();
406 return psock;
407}
408
409void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
410void sk_psock_destroy(struct rcu_head *rcu);
411void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
412
413static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
414{
415 if (refcount_dec_and_test(&psock->refcnt))
416 sk_psock_drop(sk, psock);
417}
418
419static inline void psock_set_prog(struct bpf_prog **pprog,
420 struct bpf_prog *prog)
421{
422 prog = xchg(pprog, prog);
423 if (prog)
424 bpf_prog_put(prog);
425}
426
427static inline void psock_progs_drop(struct sk_psock_progs *progs)
428{
429 psock_set_prog(&progs->msg_parser, NULL);
430 psock_set_prog(&progs->skb_parser, NULL);
431 psock_set_prog(&progs->skb_verdict, NULL);
432}
433
434#endif /* _LINUX_SKMSG_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ed9cbddeb4a6..918f374e7156 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -295,12 +295,43 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
295#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ 295#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
296 (KMALLOC_MIN_SIZE) : 16) 296 (KMALLOC_MIN_SIZE) : 16)
297 297
298/*
299 * Whenever changing this, take care of that kmalloc_type() and
300 * create_kmalloc_caches() still work as intended.
301 */
302enum kmalloc_cache_type {
303 KMALLOC_NORMAL = 0,
304 KMALLOC_RECLAIM,
305#ifdef CONFIG_ZONE_DMA
306 KMALLOC_DMA,
307#endif
308 NR_KMALLOC_TYPES
309};
310
298#ifndef CONFIG_SLOB 311#ifndef CONFIG_SLOB
299extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 312extern struct kmem_cache *
313kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
314
315static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
316{
317 int is_dma = 0;
318 int type_dma = 0;
319 int is_reclaimable;
320
300#ifdef CONFIG_ZONE_DMA 321#ifdef CONFIG_ZONE_DMA
301extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 322 is_dma = !!(flags & __GFP_DMA);
323 type_dma = is_dma * KMALLOC_DMA;
302#endif 324#endif
303 325
326 is_reclaimable = !!(flags & __GFP_RECLAIMABLE);
327
328 /*
329 * If an allocation is both __GFP_DMA and __GFP_RECLAIMABLE, return
330 * KMALLOC_DMA and effectively ignore __GFP_RECLAIMABLE
331 */
332 return type_dma + (is_reclaimable & !is_dma) * KMALLOC_RECLAIM;
333}
334
304/* 335/*
305 * Figure out which kmalloc slab an allocation of a certain size 336 * Figure out which kmalloc slab an allocation of a certain size
306 * belongs to. 337 * belongs to.
@@ -501,18 +532,20 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
501static __always_inline void *kmalloc(size_t size, gfp_t flags) 532static __always_inline void *kmalloc(size_t size, gfp_t flags)
502{ 533{
503 if (__builtin_constant_p(size)) { 534 if (__builtin_constant_p(size)) {
535#ifndef CONFIG_SLOB
536 unsigned int index;
537#endif
504 if (size > KMALLOC_MAX_CACHE_SIZE) 538 if (size > KMALLOC_MAX_CACHE_SIZE)
505 return kmalloc_large(size, flags); 539 return kmalloc_large(size, flags);
506#ifndef CONFIG_SLOB 540#ifndef CONFIG_SLOB
507 if (!(flags & GFP_DMA)) { 541 index = kmalloc_index(size);
508 unsigned int index = kmalloc_index(size);
509 542
510 if (!index) 543 if (!index)
511 return ZERO_SIZE_PTR; 544 return ZERO_SIZE_PTR;
512 545
513 return kmem_cache_alloc_trace(kmalloc_caches[index], 546 return kmem_cache_alloc_trace(
514 flags, size); 547 kmalloc_caches[kmalloc_type(flags)][index],
515 } 548 flags, size);
516#endif 549#endif
517 } 550 }
518 return __kmalloc(size, flags); 551 return __kmalloc(size, flags);
@@ -542,13 +575,14 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
542{ 575{
543#ifndef CONFIG_SLOB 576#ifndef CONFIG_SLOB
544 if (__builtin_constant_p(size) && 577 if (__builtin_constant_p(size) &&
545 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { 578 size <= KMALLOC_MAX_CACHE_SIZE) {
546 unsigned int i = kmalloc_index(size); 579 unsigned int i = kmalloc_index(size);
547 580
548 if (!i) 581 if (!i)
549 return ZERO_SIZE_PTR; 582 return ZERO_SIZE_PTR;
550 583
551 return kmem_cache_alloc_node_trace(kmalloc_caches[i], 584 return kmem_cache_alloc_node_trace(
585 kmalloc_caches[kmalloc_type(flags)][i],
552 flags, node, size); 586 flags, node, size);
553 } 587 }
554#endif 588#endif
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9fb239e12b82..a56f08ff3097 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,6 +53,10 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
53 smp_call_func_t func, void *info, bool wait, 53 smp_call_func_t func, void *info, bool wait,
54 gfp_t gfp_flags); 54 gfp_t gfp_flags);
55 55
56void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
57 smp_call_func_t func, void *info, bool wait,
58 gfp_t gfp_flags, const struct cpumask *mask);
59
56int smp_call_function_single_async(int cpu, call_single_data_t *csd); 60int smp_call_function_single_async(int cpu, call_single_data_t *csd);
57 61
58#ifdef CONFIG_SMP 62#ifdef CONFIG_SMP
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 7ed4713d5337..8b571e9b9f76 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -348,7 +348,7 @@ struct ucred {
348extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); 348extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
349extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); 349extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
350 350
351struct timespec; 351struct timespec64;
352 352
353/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff 353/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff
354 * forbid_cmsg_compat==false 354 * forbid_cmsg_compat==false
@@ -358,7 +358,7 @@ extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg,
358extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, 358extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg,
359 unsigned int flags, bool forbid_cmsg_compat); 359 unsigned int flags, bool forbid_cmsg_compat);
360extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, 360extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
361 unsigned int flags, struct timespec *timeout); 361 unsigned int flags, struct timespec64 *timeout);
362extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, 362extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
363 unsigned int vlen, unsigned int flags, 363 unsigned int vlen, unsigned int flags,
364 bool forbid_cmsg_compat); 364 bool forbid_cmsg_compat);
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 962971e6a9c7..df313913e856 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -678,6 +678,9 @@ struct sdw_master_ops {
678 * @defer_msg: Defer message 678 * @defer_msg: Defer message
679 * @clk_stop_timeout: Clock stop timeout computed 679 * @clk_stop_timeout: Clock stop timeout computed
680 * @bank_switch_timeout: Bank switch timeout computed 680 * @bank_switch_timeout: Bank switch timeout computed
681 * @multi_link: Store bus property that indicates if multi links
682 * are supported. This flag is populated by drivers after reading
683 * appropriate firmware (ACPI/DT).
681 */ 684 */
682struct sdw_bus { 685struct sdw_bus {
683 struct device *dev; 686 struct device *dev;
@@ -694,6 +697,7 @@ struct sdw_bus {
694 struct sdw_defer defer_msg; 697 struct sdw_defer defer_msg;
695 unsigned int clk_stop_timeout; 698 unsigned int clk_stop_timeout;
696 u32 bank_switch_timeout; 699 u32 bank_switch_timeout;
700 bool multi_link;
697}; 701};
698 702
699int sdw_add_bus_master(struct sdw_bus *bus); 703int sdw_add_bus_master(struct sdw_bus *bus);
@@ -768,14 +772,18 @@ struct sdw_stream_params {
768 * @params: Stream parameters 772 * @params: Stream parameters
769 * @state: Current state of the stream 773 * @state: Current state of the stream
770 * @type: Stream type PCM or PDM 774 * @type: Stream type PCM or PDM
771 * @m_rt: Master runtime 775 * @master_list: List of Master runtime(s) in this stream.
776 * master_list can contain only one m_rt per Master instance
777 * for a stream
778 * @m_rt_count: Count of Master runtime(s) in this stream
772 */ 779 */
773struct sdw_stream_runtime { 780struct sdw_stream_runtime {
774 char *name; 781 char *name;
775 struct sdw_stream_params params; 782 struct sdw_stream_params params;
776 enum sdw_stream_state state; 783 enum sdw_stream_state state;
777 enum sdw_stream_type type; 784 enum sdw_stream_type type;
778 struct sdw_master_runtime *m_rt; 785 struct list_head master_list;
786 int m_rt_count;
779}; 787};
780 788
781struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name); 789struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name);
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index b2bd4b4127c4..69ee30456864 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -81,8 +81,10 @@ enum spi_mem_data_dir {
81 * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes 81 * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
82 * @data.buswidth: number of IO lanes used to send/receive the data 82 * @data.buswidth: number of IO lanes used to send/receive the data
83 * @data.dir: direction of the transfer 83 * @data.dir: direction of the transfer
84 * @data.buf.in: input buffer 84 * @data.nbytes: number of data bytes to send/receive. Can be zero if the
85 * @data.buf.out: output buffer 85 * operation does not involve transferring data
86 * @data.buf.in: input buffer (must be DMA-able)
87 * @data.buf.out: output buffer (must be DMA-able)
86 */ 88 */
87struct spi_mem_op { 89struct spi_mem_op {
88 struct { 90 struct {
@@ -105,7 +107,6 @@ struct spi_mem_op {
105 u8 buswidth; 107 u8 buswidth;
106 enum spi_mem_data_dir dir; 108 enum spi_mem_data_dir dir;
107 unsigned int nbytes; 109 unsigned int nbytes;
108 /* buf.{in,out} must be DMA-able. */
109 union { 110 union {
110 void *in; 111 void *in;
111 const void *out; 112 const void *out;
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a64235e05321..6be77fa5ab90 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -1,15 +1,6 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0-or-later
2 * Copyright (C) 2005 David Brownell
3 * 2 *
4 * This program is free software; you can redistribute it and/or modify 3 * Copyright (C) 2005 David Brownell
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */ 4 */
14 5
15#ifndef __LINUX_SPI_H 6#ifndef __LINUX_SPI_H
@@ -163,10 +154,12 @@ struct spi_device {
163#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ 154#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
164#define SPI_RX_DUAL 0x400 /* receive with 2 wires */ 155#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
165#define SPI_RX_QUAD 0x800 /* receive with 4 wires */ 156#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
157#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
166 int irq; 158 int irq;
167 void *controller_state; 159 void *controller_state;
168 void *controller_data; 160 void *controller_data;
169 char modalias[SPI_NAME_SIZE]; 161 char modalias[SPI_NAME_SIZE];
162 const char *driver_override;
170 int cs_gpio; /* chip select gpio */ 163 int cs_gpio; /* chip select gpio */
171 164
172 /* the statistics */ 165 /* the statistics */
@@ -177,7 +170,6 @@ struct spi_device {
177 * the controller talks to each chip, like: 170 * the controller talks to each chip, like:
178 * - memory packing (12 bit samples into low bits, others zeroed) 171 * - memory packing (12 bit samples into low bits, others zeroed)
179 * - priority 172 * - priority
180 * - drop chipselect after each word
181 * - chipselect delays 173 * - chipselect delays
182 * - ... 174 * - ...
183 */ 175 */
@@ -711,6 +703,8 @@ extern void spi_res_release(struct spi_controller *ctlr,
711 * @delay_usecs: microseconds to delay after this transfer before 703 * @delay_usecs: microseconds to delay after this transfer before
712 * (optionally) changing the chipselect status, then starting 704 * (optionally) changing the chipselect status, then starting
713 * the next transfer or completing this @spi_message. 705 * the next transfer or completing this @spi_message.
706 * @word_delay: clock cycles to inter word delay after each word size
707 * (set by bits_per_word) transmission.
714 * @transfer_list: transfers are sequenced through @spi_message.transfers 708 * @transfer_list: transfers are sequenced through @spi_message.transfers
715 * @tx_sg: Scatterlist for transmit, currently not for client use 709 * @tx_sg: Scatterlist for transmit, currently not for client use
716 * @rx_sg: Scatterlist for receive, currently not for client use 710 * @rx_sg: Scatterlist for receive, currently not for client use
@@ -793,6 +787,7 @@ struct spi_transfer {
793 u8 bits_per_word; 787 u8 bits_per_word;
794 u16 delay_usecs; 788 u16 delay_usecs;
795 u32 speed_hz; 789 u32 speed_hz;
790 u16 word_delay;
796 791
797 struct list_head transfer_list; 792 struct list_head transfer_list;
798}; 793};
@@ -1277,7 +1272,6 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
1277 { return 0; } 1272 { return 0; }
1278#endif 1273#endif
1279 1274
1280
1281/* If you're hotplugging an adapter with devices (parport, usb, etc) 1275/* If you're hotplugging an adapter with devices (parport, usb, etc)
1282 * use spi_new_device() to describe each device. You can also call 1276 * use spi_new_device() to describe each device. You can also call
1283 * spi_unregister_device() to start making that device vanish, but 1277 * spi_unregister_device() to start making that device vanish, but
@@ -1309,6 +1303,22 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
1309 return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers); 1303 return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
1310} 1304}
1311 1305
1306/* OF support code */
1307#if IS_ENABLED(CONFIG_OF)
1308
1309/* must call put_device() when done with returned spi_device device */
1310extern struct spi_device *
1311of_find_spi_device_by_node(struct device_node *node);
1312
1313#else
1314
1315static inline struct spi_device *
1316of_find_spi_device_by_node(struct device_node *node)
1317{
1318 return NULL;
1319}
1320
1321#endif /* IS_ENABLED(CONFIG_OF) */
1312 1322
1313/* Compatibility layer */ 1323/* Compatibility layer */
1314#define spi_master spi_controller 1324#define spi_master spi_controller
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 745d4ca4dd50..0ae91b3a7406 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -105,12 +105,13 @@ struct srcu_struct {
105#define SRCU_STATE_SCAN2 2 105#define SRCU_STATE_SCAN2 2
106 106
107#define __SRCU_STRUCT_INIT(name, pcpu_name) \ 107#define __SRCU_STRUCT_INIT(name, pcpu_name) \
108 { \ 108{ \
109 .sda = &pcpu_name, \ 109 .sda = &pcpu_name, \
110 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 110 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
111 .srcu_gp_seq_needed = 0 - 1, \ 111 .srcu_gp_seq_needed = -1UL, \
112 __SRCU_DEP_MAP_INIT(name) \ 112 .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
113 } 113 __SRCU_DEP_MAP_INIT(name) \
114}
114 115
115/* 116/*
116 * Define and initialize a srcu struct at build time. 117 * Define and initialize a srcu struct at build time.
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h
index 4b268d86a784..8b369a41c03c 100644
--- a/include/linux/start_kernel.h
+++ b/include/linux/start_kernel.h
@@ -9,5 +9,7 @@
9 up something else. */ 9 up something else. */
10 10
11extern asmlinkage void __init start_kernel(void); 11extern asmlinkage void __init start_kernel(void);
12extern void __init arch_call_rest_init(void);
13extern void __ref rest_init(void);
12 14
13#endif /* _LINUX_START_KERNEL_H */ 15#endif /* _LINUX_START_KERNEL_H */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c43e9a01b892..7ddfc65586b0 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -30,6 +30,7 @@
30 30
31#define MTL_MAX_RX_QUEUES 8 31#define MTL_MAX_RX_QUEUES 8
32#define MTL_MAX_TX_QUEUES 8 32#define MTL_MAX_TX_QUEUES 8
33#define STMMAC_CH_MAX 8
33 34
34#define STMMAC_RX_COE_NONE 0 35#define STMMAC_RX_COE_NONE 0
35#define STMMAC_RX_COE_TYPE1 1 36#define STMMAC_RX_COE_TYPE1 1
diff --git a/include/linux/string.h b/include/linux/string.h
index 4a5a0eb7df51..27d0482e5e05 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -131,6 +131,13 @@ static inline void *memset_p(void **p, void *v, __kernel_size_t n)
131 return memset64((uint64_t *)p, (uintptr_t)v, n); 131 return memset64((uint64_t *)p, (uintptr_t)v, n);
132} 132}
133 133
134extern void **__memcat_p(void **a, void **b);
135#define memcat_p(a, b) ({ \
136 BUILD_BUG_ON_MSG(!__same_type(*(a), *(b)), \
137 "type mismatch in memcat_p()"); \
138 (typeof(*a) *)__memcat_p((void **)(a), (void **)(b)); \
139})
140
134#ifndef __HAVE_ARCH_MEMCPY 141#ifndef __HAVE_ARCH_MEMCPY
135extern void * memcpy(void *,const void *,__kernel_size_t); 142extern void * memcpy(void *,const void *,__kernel_size_t);
136#endif 143#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 58a6765c1c5e..c4db9424b63b 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -67,7 +67,7 @@ struct rpc_cred {
67 const struct rpc_credops *cr_ops; 67 const struct rpc_credops *cr_ops;
68 unsigned long cr_expire; /* when to gc */ 68 unsigned long cr_expire; /* when to gc */
69 unsigned long cr_flags; /* various flags */ 69 unsigned long cr_flags; /* various flags */
70 atomic_t cr_count; /* ref count */ 70 refcount_t cr_count; /* ref count */
71 71
72 kuid_t cr_uid; 72 kuid_t cr_uid;
73 73
@@ -100,7 +100,7 @@ struct rpc_auth {
100 * differ from the flavor in 100 * differ from the flavor in
101 * au_ops->au_flavor in gss 101 * au_ops->au_flavor in gss
102 * case) */ 102 * case) */
103 atomic_t au_count; /* Reference counter */ 103 refcount_t au_count; /* Reference counter */
104 104
105 struct rpc_cred_cache * au_credcache; 105 struct rpc_cred_cache * au_credcache;
106 /* per-flavor data */ 106 /* per-flavor data */
@@ -157,6 +157,7 @@ struct rpc_credops {
157 int (*crkey_timeout)(struct rpc_cred *); 157 int (*crkey_timeout)(struct rpc_cred *);
158 bool (*crkey_to_expire)(struct rpc_cred *); 158 bool (*crkey_to_expire)(struct rpc_cred *);
159 char * (*crstringify_acceptor)(struct rpc_cred *); 159 char * (*crstringify_acceptor)(struct rpc_cred *);
160 bool (*crneed_reencode)(struct rpc_task *);
160}; 161};
161 162
162extern const struct rpc_authops authunix_ops; 163extern const struct rpc_authops authunix_ops;
@@ -192,6 +193,7 @@ __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
192__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); 193__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
193int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj); 194int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
194int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj); 195int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
196bool rpcauth_xmit_need_reencode(struct rpc_task *task);
195int rpcauth_refreshcred(struct rpc_task *); 197int rpcauth_refreshcred(struct rpc_task *);
196void rpcauth_invalcred(struct rpc_task *); 198void rpcauth_invalcred(struct rpc_task *);
197int rpcauth_uptodatecred(struct rpc_task *); 199int rpcauth_uptodatecred(struct rpc_task *);
@@ -204,11 +206,11 @@ bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *);
204char * rpcauth_stringify_acceptor(struct rpc_cred *); 206char * rpcauth_stringify_acceptor(struct rpc_cred *);
205 207
206static inline 208static inline
207struct rpc_cred * get_rpccred(struct rpc_cred *cred) 209struct rpc_cred *get_rpccred(struct rpc_cred *cred)
208{ 210{
209 if (cred != NULL) 211 if (cred != NULL && refcount_inc_not_zero(&cred->cr_count))
210 atomic_inc(&cred->cr_count); 212 return cred;
211 return cred; 213 return NULL;
212} 214}
213 215
214/** 216/**
@@ -224,9 +226,7 @@ struct rpc_cred * get_rpccred(struct rpc_cred *cred)
224static inline struct rpc_cred * 226static inline struct rpc_cred *
225get_rpccred_rcu(struct rpc_cred *cred) 227get_rpccred_rcu(struct rpc_cred *cred)
226{ 228{
227 if (atomic_inc_not_zero(&cred->cr_count)) 229 return get_rpccred(cred);
228 return cred;
229 return NULL;
230} 230}
231 231
232#endif /* __KERNEL__ */ 232#endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index 0c9eac351aab..30427b729070 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -70,6 +70,7 @@ struct gss_cl_ctx {
70 refcount_t count; 70 refcount_t count;
71 enum rpc_gss_proc gc_proc; 71 enum rpc_gss_proc gc_proc;
72 u32 gc_seq; 72 u32 gc_seq;
73 u32 gc_seq_xmit;
73 spinlock_t gc_seq_lock; 74 spinlock_t gc_seq_lock;
74 struct gss_ctx *gc_gss_ctx; 75 struct gss_ctx *gc_gss_ctx;
75 struct xdr_netobj gc_wire_ctx; 76 struct xdr_netobj gc_wire_ctx;
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 4397a4824c81..28721cf73ec3 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -34,6 +34,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34#ifdef CONFIG_SUNRPC_BACKCHANNEL 34#ifdef CONFIG_SUNRPC_BACKCHANNEL
35struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid); 35struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
36void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied); 36void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
37void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
37void xprt_free_bc_request(struct rpc_rqst *req); 38void xprt_free_bc_request(struct rpc_rqst *req);
38int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); 39int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
39void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); 40void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 7df625d41e35..131424cefc6a 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -71,10 +71,10 @@ struct gss_krb5_enctype {
71 const u32 keyed_cksum; /* is it a keyed cksum? */ 71 const u32 keyed_cksum; /* is it a keyed cksum? */
72 const u32 keybytes; /* raw key len, in bytes */ 72 const u32 keybytes; /* raw key len, in bytes */
73 const u32 keylength; /* final key len, in bytes */ 73 const u32 keylength; /* final key len, in bytes */
74 u32 (*encrypt) (struct crypto_skcipher *tfm, 74 u32 (*encrypt) (struct crypto_sync_skcipher *tfm,
75 void *iv, void *in, void *out, 75 void *iv, void *in, void *out,
76 int length); /* encryption function */ 76 int length); /* encryption function */
77 u32 (*decrypt) (struct crypto_skcipher *tfm, 77 u32 (*decrypt) (struct crypto_sync_skcipher *tfm,
78 void *iv, void *in, void *out, 78 void *iv, void *in, void *out,
79 int length); /* decryption function */ 79 int length); /* decryption function */
80 u32 (*mk_key) (const struct gss_krb5_enctype *gk5e, 80 u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
@@ -98,12 +98,12 @@ struct krb5_ctx {
98 u32 enctype; 98 u32 enctype;
99 u32 flags; 99 u32 flags;
100 const struct gss_krb5_enctype *gk5e; /* enctype-specific info */ 100 const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
101 struct crypto_skcipher *enc; 101 struct crypto_sync_skcipher *enc;
102 struct crypto_skcipher *seq; 102 struct crypto_sync_skcipher *seq;
103 struct crypto_skcipher *acceptor_enc; 103 struct crypto_sync_skcipher *acceptor_enc;
104 struct crypto_skcipher *initiator_enc; 104 struct crypto_sync_skcipher *initiator_enc;
105 struct crypto_skcipher *acceptor_enc_aux; 105 struct crypto_sync_skcipher *acceptor_enc_aux;
106 struct crypto_skcipher *initiator_enc_aux; 106 struct crypto_sync_skcipher *initiator_enc_aux;
107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ 107 u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
108 u8 cksum[GSS_KRB5_MAX_KEYLEN]; 108 u8 cksum[GSS_KRB5_MAX_KEYLEN];
109 s32 endtime; 109 s32 endtime;
@@ -118,7 +118,8 @@ struct krb5_ctx {
118 u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN]; 118 u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
119}; 119};
120 120
121extern spinlock_t krb5_seq_lock; 121extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx);
122extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx);
122 123
123/* The length of the Kerberos GSS token header */ 124/* The length of the Kerberos GSS token header */
124#define GSS_KRB5_TOK_HDR_LEN (16) 125#define GSS_KRB5_TOK_HDR_LEN (16)
@@ -262,24 +263,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
262 263
263 264
264u32 265u32
265krb5_encrypt(struct crypto_skcipher *key, 266krb5_encrypt(struct crypto_sync_skcipher *key,
266 void *iv, void *in, void *out, int length); 267 void *iv, void *in, void *out, int length);
267 268
268u32 269u32
269krb5_decrypt(struct crypto_skcipher *key, 270krb5_decrypt(struct crypto_sync_skcipher *key,
270 void *iv, void *in, void *out, int length); 271 void *iv, void *in, void *out, int length);
271 272
272int 273int
273gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf, 274gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf,
274 int offset, struct page **pages); 275 int offset, struct page **pages);
275 276
276int 277int
277gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf, 278gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf,
278 int offset); 279 int offset);
279 280
280s32 281s32
281krb5_make_seq_num(struct krb5_ctx *kctx, 282krb5_make_seq_num(struct krb5_ctx *kctx,
282 struct crypto_skcipher *key, 283 struct crypto_sync_skcipher *key,
283 int direction, 284 int direction,
284 u32 seqnum, unsigned char *cksum, unsigned char *buf); 285 u32 seqnum, unsigned char *cksum, unsigned char *buf);
285 286
@@ -320,12 +321,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
320 321
321int 322int
322krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, 323krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
323 struct crypto_skcipher *cipher, 324 struct crypto_sync_skcipher *cipher,
324 unsigned char *cksum); 325 unsigned char *cksum);
325 326
326int 327int
327krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, 328krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
328 struct crypto_skcipher *cipher, 329 struct crypto_sync_skcipher *cipher,
329 s32 seqnum); 330 s32 seqnum);
330void 331void
331gss_krb5_make_confounder(char *p, u32 conflen); 332gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 592653becd91..7b540c066594 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -140,8 +140,9 @@ struct rpc_task_setup {
140#define RPC_TASK_RUNNING 0 140#define RPC_TASK_RUNNING 0
141#define RPC_TASK_QUEUED 1 141#define RPC_TASK_QUEUED 1
142#define RPC_TASK_ACTIVE 2 142#define RPC_TASK_ACTIVE 2
143#define RPC_TASK_MSG_RECV 3 143#define RPC_TASK_NEED_XMIT 3
144#define RPC_TASK_MSG_RECV_WAIT 4 144#define RPC_TASK_NEED_RECV 4
145#define RPC_TASK_MSG_PIN_WAIT 5
145 146
146#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) 147#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
147#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) 148#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
@@ -188,7 +189,6 @@ struct rpc_timer {
188struct rpc_wait_queue { 189struct rpc_wait_queue {
189 spinlock_t lock; 190 spinlock_t lock;
190 struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ 191 struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
191 pid_t owner; /* process id of last task serviced */
192 unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ 192 unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
193 unsigned char priority; /* current priority */ 193 unsigned char priority; /* current priority */
194 unsigned char nr; /* # tasks remaining for cookie */ 194 unsigned char nr; /* # tasks remaining for cookie */
@@ -204,7 +204,6 @@ struct rpc_wait_queue {
204 * from a single cookie. The aim is to improve 204 * from a single cookie. The aim is to improve
205 * performance of NFS operations such as read/write. 205 * performance of NFS operations such as read/write.
206 */ 206 */
207#define RPC_BATCH_COUNT 16
208#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) 207#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
209 208
210/* 209/*
@@ -234,6 +233,9 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
234 struct rpc_task *task); 233 struct rpc_task *task);
235void rpc_wake_up_queued_task(struct rpc_wait_queue *, 234void rpc_wake_up_queued_task(struct rpc_wait_queue *,
236 struct rpc_task *); 235 struct rpc_task *);
236void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *,
237 struct rpc_task *,
238 int);
237void rpc_wake_up(struct rpc_wait_queue *); 239void rpc_wake_up(struct rpc_wait_queue *);
238struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); 240struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
239struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, 241struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index c3d72066d4b1..6b7a86c4d6e6 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -84,7 +84,6 @@ struct svc_xprt {
84 struct sockaddr_storage xpt_remote; /* remote peer's address */ 84 struct sockaddr_storage xpt_remote; /* remote peer's address */
85 size_t xpt_remotelen; /* length of address */ 85 size_t xpt_remotelen; /* length of address */
86 char xpt_remotebuf[INET6_ADDRSTRLEN + 10]; 86 char xpt_remotebuf[INET6_ADDRSTRLEN + 10];
87 struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
88 struct list_head xpt_users; /* callbacks on free */ 87 struct list_head xpt_users; /* callbacks on free */
89 88
90 struct net *xpt_net; 89 struct net *xpt_net;
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 2bd68177a442..43106ffa6788 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -18,6 +18,7 @@
18#include <asm/unaligned.h> 18#include <asm/unaligned.h>
19#include <linux/scatterlist.h> 19#include <linux/scatterlist.h>
20 20
21struct bio_vec;
21struct rpc_rqst; 22struct rpc_rqst;
22 23
23/* 24/*
@@ -52,12 +53,14 @@ struct xdr_buf {
52 struct kvec head[1], /* RPC header + non-page data */ 53 struct kvec head[1], /* RPC header + non-page data */
53 tail[1]; /* Appended after page data */ 54 tail[1]; /* Appended after page data */
54 55
56 struct bio_vec *bvec;
55 struct page ** pages; /* Array of pages */ 57 struct page ** pages; /* Array of pages */
56 unsigned int page_base, /* Start of page data */ 58 unsigned int page_base, /* Start of page data */
57 page_len, /* Length of page data */ 59 page_len, /* Length of page data */
58 flags; /* Flags for data disposition */ 60 flags; /* Flags for data disposition */
59#define XDRBUF_READ 0x01 /* target of file read */ 61#define XDRBUF_READ 0x01 /* target of file read */
60#define XDRBUF_WRITE 0x02 /* source of file write */ 62#define XDRBUF_WRITE 0x02 /* source of file write */
63#define XDRBUF_SPARSE_PAGES 0x04 /* Page array is sparse */
61 64
62 unsigned int buflen, /* Total length of storage buffer */ 65 unsigned int buflen, /* Total length of storage buffer */
63 len; /* Length of XDR encoded message */ 66 len; /* Length of XDR encoded message */
@@ -69,6 +72,8 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
69 buf->head[0].iov_base = start; 72 buf->head[0].iov_base = start;
70 buf->head[0].iov_len = len; 73 buf->head[0].iov_len = len;
71 buf->tail[0].iov_len = 0; 74 buf->tail[0].iov_len = 0;
75 buf->bvec = NULL;
76 buf->pages = NULL;
72 buf->page_len = 0; 77 buf->page_len = 0;
73 buf->flags = 0; 78 buf->flags = 0;
74 buf->len = 0; 79 buf->len = 0;
@@ -115,6 +120,9 @@ __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
115void xdr_inline_pages(struct xdr_buf *, unsigned int, 120void xdr_inline_pages(struct xdr_buf *, unsigned int,
116 struct page **, unsigned int, unsigned int); 121 struct page **, unsigned int, unsigned int);
117void xdr_terminate_string(struct xdr_buf *, const u32); 122void xdr_terminate_string(struct xdr_buf *, const u32);
123size_t xdr_buf_pagecount(struct xdr_buf *buf);
124int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
125void xdr_free_bvec(struct xdr_buf *buf);
118 126
119static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len) 127static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
120{ 128{
@@ -177,10 +185,7 @@ struct xdr_skb_reader {
177 185
178typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len); 186typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len);
179 187
180size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len);
181extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *); 188extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
182extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
183 struct xdr_skb_reader *, xdr_skb_read_actor);
184 189
185extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32); 190extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32);
186extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *); 191extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 336fd1a19cca..a4ab4f8d9140 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -82,7 +82,14 @@ struct rpc_rqst {
82 struct page **rq_enc_pages; /* scratch pages for use by 82 struct page **rq_enc_pages; /* scratch pages for use by
83 gss privacy code */ 83 gss privacy code */
84 void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ 84 void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
85 struct list_head rq_list; 85
86 union {
87 struct list_head rq_list; /* Slot allocation list */
88 struct rb_node rq_recv; /* Receive queue */
89 };
90
91 struct list_head rq_xmit; /* Send queue */
92 struct list_head rq_xmit2; /* Send queue */
86 93
87 void *rq_buffer; /* Call XDR encode buffer */ 94 void *rq_buffer; /* Call XDR encode buffer */
88 size_t rq_callsize; 95 size_t rq_callsize;
@@ -103,6 +110,7 @@ struct rpc_rqst {
103 /* A cookie used to track the 110 /* A cookie used to track the
104 state of the transport 111 state of the transport
105 connection */ 112 connection */
113 atomic_t rq_pin;
106 114
107 /* 115 /*
108 * Partial send handling 116 * Partial send handling
@@ -133,7 +141,8 @@ struct rpc_xprt_ops {
133 void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); 141 void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
134 int (*buf_alloc)(struct rpc_task *task); 142 int (*buf_alloc)(struct rpc_task *task);
135 void (*buf_free)(struct rpc_task *task); 143 void (*buf_free)(struct rpc_task *task);
136 int (*send_request)(struct rpc_task *task); 144 void (*prepare_request)(struct rpc_rqst *req);
145 int (*send_request)(struct rpc_rqst *req);
137 void (*set_retrans_timeout)(struct rpc_task *task); 146 void (*set_retrans_timeout)(struct rpc_task *task);
138 void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); 147 void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
139 void (*release_request)(struct rpc_task *task); 148 void (*release_request)(struct rpc_task *task);
@@ -234,9 +243,12 @@ struct rpc_xprt {
234 */ 243 */
235 spinlock_t transport_lock; /* lock transport info */ 244 spinlock_t transport_lock; /* lock transport info */
236 spinlock_t reserve_lock; /* lock slot table */ 245 spinlock_t reserve_lock; /* lock slot table */
237 spinlock_t recv_lock; /* lock receive list */ 246 spinlock_t queue_lock; /* send/receive queue lock */
238 u32 xid; /* Next XID value to use */ 247 u32 xid; /* Next XID value to use */
239 struct rpc_task * snd_task; /* Task blocked in send */ 248 struct rpc_task * snd_task; /* Task blocked in send */
249
250 struct list_head xmit_queue; /* Send queue */
251
240 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ 252 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
241#if defined(CONFIG_SUNRPC_BACKCHANNEL) 253#if defined(CONFIG_SUNRPC_BACKCHANNEL)
242 struct svc_serv *bc_serv; /* The RPC service which will */ 254 struct svc_serv *bc_serv; /* The RPC service which will */
@@ -248,7 +260,8 @@ struct rpc_xprt {
248 struct list_head bc_pa_list; /* List of preallocated 260 struct list_head bc_pa_list; /* List of preallocated
249 * backchannel rpc_rqst's */ 261 * backchannel rpc_rqst's */
250#endif /* CONFIG_SUNRPC_BACKCHANNEL */ 262#endif /* CONFIG_SUNRPC_BACKCHANNEL */
251 struct list_head recv; 263
264 struct rb_root recv_queue; /* Receive queue */
252 265
253 struct { 266 struct {
254 unsigned long bind_count, /* total number of binds */ 267 unsigned long bind_count, /* total number of binds */
@@ -325,15 +338,18 @@ struct xprt_class {
325struct rpc_xprt *xprt_create_transport(struct xprt_create *args); 338struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
326void xprt_connect(struct rpc_task *task); 339void xprt_connect(struct rpc_task *task);
327void xprt_reserve(struct rpc_task *task); 340void xprt_reserve(struct rpc_task *task);
328void xprt_request_init(struct rpc_task *task);
329void xprt_retry_reserve(struct rpc_task *task); 341void xprt_retry_reserve(struct rpc_task *task);
330int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); 342int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
331int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); 343int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
332void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); 344void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
333void xprt_free_slot(struct rpc_xprt *xprt, 345void xprt_free_slot(struct rpc_xprt *xprt,
334 struct rpc_rqst *req); 346 struct rpc_rqst *req);
335void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); 347void xprt_request_prepare(struct rpc_rqst *req);
336bool xprt_prepare_transmit(struct rpc_task *task); 348bool xprt_prepare_transmit(struct rpc_task *task);
349void xprt_request_enqueue_transmit(struct rpc_task *task);
350void xprt_request_enqueue_receive(struct rpc_task *task);
351void xprt_request_wait_receive(struct rpc_task *task);
352bool xprt_request_need_retransmit(struct rpc_task *task);
337void xprt_transmit(struct rpc_task *task); 353void xprt_transmit(struct rpc_task *task);
338void xprt_end_transmit(struct rpc_task *task); 354void xprt_end_transmit(struct rpc_task *task);
339int xprt_adjust_timeout(struct rpc_rqst *req); 355int xprt_adjust_timeout(struct rpc_rqst *req);
@@ -373,8 +389,8 @@ int xprt_load_transport(const char *);
373void xprt_set_retrans_timeout_def(struct rpc_task *task); 389void xprt_set_retrans_timeout_def(struct rpc_task *task);
374void xprt_set_retrans_timeout_rtt(struct rpc_task *task); 390void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
375void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); 391void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
376void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action); 392void xprt_wait_for_buffer_space(struct rpc_xprt *xprt);
377void xprt_write_space(struct rpc_xprt *xprt); 393bool xprt_write_space(struct rpc_xprt *xprt);
378void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); 394void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
379struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); 395struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
380void xprt_update_rtt(struct rpc_task *task); 396void xprt_update_rtt(struct rpc_task *task);
@@ -382,6 +398,7 @@ void xprt_complete_rqst(struct rpc_task *task, int copied);
382void xprt_pin_rqst(struct rpc_rqst *req); 398void xprt_pin_rqst(struct rpc_rqst *req);
383void xprt_unpin_rqst(struct rpc_rqst *req); 399void xprt_unpin_rqst(struct rpc_rqst *req);
384void xprt_release_rqst_cong(struct rpc_task *task); 400void xprt_release_rqst_cong(struct rpc_task *task);
401bool xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req);
385void xprt_disconnect_done(struct rpc_xprt *xprt); 402void xprt_disconnect_done(struct rpc_xprt *xprt);
386void xprt_force_disconnect(struct rpc_xprt *xprt); 403void xprt_force_disconnect(struct rpc_xprt *xprt);
387void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); 404void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
@@ -400,6 +417,8 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
400#define XPRT_BINDING (5) 417#define XPRT_BINDING (5)
401#define XPRT_CLOSING (6) 418#define XPRT_CLOSING (6)
402#define XPRT_CONGESTED (9) 419#define XPRT_CONGESTED (9)
420#define XPRT_CWND_WAIT (10)
421#define XPRT_WRITE_SPACE (11)
403 422
404static inline void xprt_set_connected(struct rpc_xprt *xprt) 423static inline void xprt_set_connected(struct rpc_xprt *xprt)
405{ 424{
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index ae0f99b9b965..458bfe0137f5 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -30,15 +30,25 @@ struct sock_xprt {
30 /* 30 /*
31 * State of TCP reply receive 31 * State of TCP reply receive
32 */ 32 */
33 __be32 tcp_fraghdr, 33 struct {
34 tcp_xid, 34 struct {
35 tcp_calldir; 35 __be32 fraghdr,
36 xid,
37 calldir;
38 } __attribute__((packed));
36 39
37 u32 tcp_offset, 40 u32 offset,
38 tcp_reclen; 41 len;
39 42
40 unsigned long tcp_copied, 43 unsigned long copied;
41 tcp_flags; 44 } recv;
45
46 /*
47 * State of TCP transmit queue
48 */
49 struct {
50 u32 offset;
51 } xmit;
42 52
43 /* 53 /*
44 * Connection of transports 54 * Connection of transports
@@ -68,20 +78,8 @@ struct sock_xprt {
68}; 78};
69 79
70/* 80/*
71 * TCP receive state flags
72 */
73#define TCP_RCV_LAST_FRAG (1UL << 0)
74#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
75#define TCP_RCV_COPY_XID (1UL << 2)
76#define TCP_RCV_COPY_DATA (1UL << 3)
77#define TCP_RCV_READ_CALLDIR (1UL << 4)
78#define TCP_RCV_COPY_CALLDIR (1UL << 5)
79
80/*
81 * TCP RPC flags 81 * TCP RPC flags
82 */ 82 */
83#define TCP_RPC_REPLY (1UL << 6)
84
85#define XPRT_SOCK_CONNECTING 1U 83#define XPRT_SOCK_CONNECTING 1U
86#define XPRT_SOCK_DATA_READY (2) 84#define XPRT_SOCK_DATA_READY (2)
87#define XPRT_SOCK_UPD_TIMEOUT (3) 85#define XPRT_SOCK_UPD_TIMEOUT (3)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5a28ac9284f0..3f529ad9a9d2 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -251,6 +251,7 @@ static inline bool idle_should_enter_s2idle(void)
251 return unlikely(s2idle_state == S2IDLE_STATE_ENTER); 251 return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
252} 252}
253 253
254extern bool pm_suspend_via_s2idle(void);
254extern void __init pm_states_init(void); 255extern void __init pm_states_init(void);
255extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); 256extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
256extern void s2idle_wake(void); 257extern void s2idle_wake(void);
@@ -282,6 +283,7 @@ static inline void pm_set_suspend_via_firmware(void) {}
282static inline void pm_set_resume_via_firmware(void) {} 283static inline void pm_set_resume_via_firmware(void) {}
283static inline bool pm_suspend_via_firmware(void) { return false; } 284static inline bool pm_suspend_via_firmware(void) { return false; }
284static inline bool pm_resume_via_firmware(void) { return false; } 285static inline bool pm_resume_via_firmware(void) { return false; }
286static inline bool pm_suspend_via_s2idle(void) { return false; }
285 287
286static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} 288static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
287static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } 289static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 8e2c11e692ba..d8a07a4f171d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -167,13 +167,14 @@ enum {
167 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 167 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
168 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ 168 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
169 SWP_BLKDEV = (1 << 6), /* its a block device */ 169 SWP_BLKDEV = (1 << 6), /* its a block device */
170 SWP_FILE = (1 << 7), /* set after swap_activate success */ 170 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
171 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ 171 SWP_FS = (1 << 8), /* swap file goes through fs */
172 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ 172 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
173 SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */ 173 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
174 SWP_SYNCHRONOUS_IO = (1 << 11), /* synchronous IO is efficient */ 174 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
175 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
175 /* add others here before... */ 176 /* add others here before... */
176 SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */ 177 SWP_SCANNING = (1 << 13), /* refcount in scan_swap_map */
177}; 178};
178 179
179#define SWAP_CLUSTER_MAX 32UL 180#define SWAP_CLUSTER_MAX 32UL
@@ -296,20 +297,15 @@ struct vma_swap_readahead {
296 297
297/* linux/mm/workingset.c */ 298/* linux/mm/workingset.c */
298void *workingset_eviction(struct address_space *mapping, struct page *page); 299void *workingset_eviction(struct address_space *mapping, struct page *page);
299bool workingset_refault(void *shadow); 300void workingset_refault(struct page *page, void *shadow);
300void workingset_activation(struct page *page); 301void workingset_activation(struct page *page);
301 302
302/* Do not use directly, use workingset_lookup_update */ 303/* Only track the nodes of mappings with shadow entries */
303void workingset_update_node(struct radix_tree_node *node); 304void workingset_update_node(struct xa_node *node);
304 305#define mapping_set_update(xas, mapping) do { \
305/* Returns workingset_update_node() if the mapping has shadow entries. */ 306 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
306#define workingset_lookup_update(mapping) \ 307 xas_set_update(xas, workingset_update_node); \
307({ \ 308} while (0)
308 radix_tree_update_node_t __helper = workingset_update_node; \
309 if (dax_mapping(mapping) || shmem_mapping(mapping)) \
310 __helper = NULL; \
311 __helper; \
312})
313 309
314/* linux/mm/page_alloc.c */ 310/* linux/mm/page_alloc.c */
315extern unsigned long totalram_pages; 311extern unsigned long totalram_pages;
@@ -408,7 +404,7 @@ extern void show_swap_cache_info(void);
408extern int add_to_swap(struct page *page); 404extern int add_to_swap(struct page *page);
409extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); 405extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
410extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); 406extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
411extern void __delete_from_swap_cache(struct page *); 407extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
412extern void delete_from_swap_cache(struct page *); 408extern void delete_from_swap_cache(struct page *);
413extern void free_page_and_swap_cache(struct page *); 409extern void free_page_and_swap_cache(struct page *);
414extern void free_pages_and_swap_cache(struct page **, int); 410extern void free_pages_and_swap_cache(struct page **, int);
@@ -562,7 +558,8 @@ static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
562 return -1; 558 return -1;
563} 559}
564 560
565static inline void __delete_from_swap_cache(struct page *page) 561static inline void __delete_from_swap_cache(struct page *page,
562 swp_entry_t entry)
566{ 563{
567} 564}
568 565
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 22af9d8a84ae..4d961668e5fc 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -18,9 +18,8 @@
18 * 18 *
19 * swp_entry_t's are *never* stored anywhere in their arch-dependent format. 19 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
20 */ 20 */
21#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ 21#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
22 (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) 22#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
23#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
24 23
25/* 24/*
26 * Store a type+offset into a swp_entry_t in an arch-independent format 25 * Store a type+offset into a swp_entry_t in an arch-independent format
@@ -29,8 +28,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
29{ 28{
30 swp_entry_t ret; 29 swp_entry_t ret;
31 30
32 ret.val = (type << SWP_TYPE_SHIFT(ret)) | 31 ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
33 (offset & SWP_OFFSET_MASK(ret));
34 return ret; 32 return ret;
35} 33}
36 34
@@ -40,7 +38,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
40 */ 38 */
41static inline unsigned swp_type(swp_entry_t entry) 39static inline unsigned swp_type(swp_entry_t entry)
42{ 40{
43 return (entry.val >> SWP_TYPE_SHIFT(entry)); 41 return (entry.val >> SWP_TYPE_SHIFT);
44} 42}
45 43
46/* 44/*
@@ -49,7 +47,7 @@ static inline unsigned swp_type(swp_entry_t entry)
49 */ 47 */
50static inline pgoff_t swp_offset(swp_entry_t entry) 48static inline pgoff_t swp_offset(swp_entry_t entry)
51{ 49{
52 return entry.val & SWP_OFFSET_MASK(entry); 50 return entry.val & SWP_OFFSET_MASK;
53} 51}
54 52
55#ifdef CONFIG_MMU 53#ifdef CONFIG_MMU
@@ -90,16 +88,13 @@ static inline swp_entry_t radix_to_swp_entry(void *arg)
90{ 88{
91 swp_entry_t entry; 89 swp_entry_t entry;
92 90
93 entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; 91 entry.val = xa_to_value(arg);
94 return entry; 92 return entry;
95} 93}
96 94
97static inline void *swp_to_radix_entry(swp_entry_t entry) 95static inline void *swp_to_radix_entry(swp_entry_t entry)
98{ 96{
99 unsigned long value; 97 return xa_mk_value(entry.val);
100
101 value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
102 return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
103} 98}
104 99
105#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) 100#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 965be92c33b5..a387b59640a4 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -67,11 +67,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
67 67
68/* Accessory functions. */ 68/* Accessory functions. */
69 69
70void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
71 gfp_t flags, unsigned long attrs);
72void swiotlb_free(struct device *dev, size_t size, void *vaddr,
73 dma_addr_t dma_addr, unsigned long attrs);
74
75extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, 70extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
76 unsigned long offset, size_t size, 71 unsigned long offset, size_t size,
77 enum dma_data_direction dir, 72 enum dma_data_direction dir,
@@ -107,9 +102,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
107 int nelems, enum dma_data_direction dir); 102 int nelems, enum dma_data_direction dir);
108 103
109extern int 104extern int
110swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
111
112extern int
113swiotlb_dma_supported(struct device *hwdev, u64 mask); 105swiotlb_dma_supported(struct device *hwdev, u64 mask);
114 106
115#ifdef CONFIG_SWIOTLB 107#ifdef CONFIG_SWIOTLB
@@ -121,7 +113,6 @@ static inline unsigned int swiotlb_max_segment(void) { return 0; }
121#endif 113#endif
122 114
123extern void swiotlb_print_info(void); 115extern void swiotlb_print_info(void);
124extern int is_swiotlb_buffer(phys_addr_t paddr);
125extern void swiotlb_set_max_segment(unsigned int); 116extern void swiotlb_set_max_segment(unsigned int);
126 117
127extern const struct dma_map_ops swiotlb_dma_ops; 118extern const struct dma_map_ops swiotlb_dma_ops;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 2ff814c92f7f..2ac3d13a915b 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -60,7 +60,7 @@ struct tms;
60struct utimbuf; 60struct utimbuf;
61struct mq_attr; 61struct mq_attr;
62struct compat_stat; 62struct compat_stat;
63struct compat_timeval; 63struct old_timeval32;
64struct robust_list_head; 64struct robust_list_head;
65struct getcpu_cache; 65struct getcpu_cache;
66struct old_linux_dirent; 66struct old_linux_dirent;
@@ -513,7 +513,8 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *
513 513
514/* fs/utimes.c */ 514/* fs/utimes.c */
515asmlinkage long sys_utimensat(int dfd, const char __user *filename, 515asmlinkage long sys_utimensat(int dfd, const char __user *filename,
516 struct timespec __user *utimes, int flags); 516 struct __kernel_timespec __user *utimes,
517 int flags);
517 518
518/* kernel/acct.c */ 519/* kernel/acct.c */
519asmlinkage long sys_acct(const char __user *name); 520asmlinkage long sys_acct(const char __user *name);
@@ -613,7 +614,7 @@ asmlinkage long sys_sched_yield(void);
613asmlinkage long sys_sched_get_priority_max(int policy); 614asmlinkage long sys_sched_get_priority_max(int policy);
614asmlinkage long sys_sched_get_priority_min(int policy); 615asmlinkage long sys_sched_get_priority_min(int policy);
615asmlinkage long sys_sched_rr_get_interval(pid_t pid, 616asmlinkage long sys_sched_rr_get_interval(pid_t pid,
616 struct timespec __user *interval); 617 struct __kernel_timespec __user *interval);
617 618
618/* kernel/signal.c */ 619/* kernel/signal.c */
619asmlinkage long sys_restart_syscall(void); 620asmlinkage long sys_restart_syscall(void);
@@ -634,7 +635,7 @@ asmlinkage long sys_rt_sigprocmask(int how, sigset_t __user *set,
634asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize); 635asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize);
635asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, 636asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
636 siginfo_t __user *uinfo, 637 siginfo_t __user *uinfo,
637 const struct timespec __user *uts, 638 const struct __kernel_timespec __user *uts,
638 size_t sigsetsize); 639 size_t sigsetsize);
639asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo); 640asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
640 641
@@ -829,7 +830,7 @@ asmlinkage long sys_perf_event_open(
829asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int); 830asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
830asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, 831asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
831 unsigned int vlen, unsigned flags, 832 unsigned int vlen, unsigned flags,
832 struct timespec __user *timeout); 833 struct __kernel_timespec __user *timeout);
833 834
834asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, 835asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
835 int options, struct rusage __user *ru); 836 int options, struct rusage __user *ru);
@@ -954,8 +955,6 @@ asmlinkage long sys_access(const char __user *filename, int mode);
954asmlinkage long sys_rename(const char __user *oldname, 955asmlinkage long sys_rename(const char __user *oldname,
955 const char __user *newname); 956 const char __user *newname);
956asmlinkage long sys_symlink(const char __user *old, const char __user *new); 957asmlinkage long sys_symlink(const char __user *old, const char __user *new);
957asmlinkage long sys_utimes(char __user *filename,
958 struct timeval __user *utimes);
959#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) 958#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
960asmlinkage long sys_stat64(const char __user *filename, 959asmlinkage long sys_stat64(const char __user *filename,
961 struct stat64 __user *statbuf); 960 struct stat64 __user *statbuf);
@@ -985,14 +984,18 @@ asmlinkage long sys_alarm(unsigned int seconds);
985asmlinkage long sys_getpgrp(void); 984asmlinkage long sys_getpgrp(void);
986asmlinkage long sys_pause(void); 985asmlinkage long sys_pause(void);
987asmlinkage long sys_time(time_t __user *tloc); 986asmlinkage long sys_time(time_t __user *tloc);
987#ifdef __ARCH_WANT_SYS_UTIME
988asmlinkage long sys_utime(char __user *filename, 988asmlinkage long sys_utime(char __user *filename,
989 struct utimbuf __user *times); 989 struct utimbuf __user *times);
990asmlinkage long sys_utimes(char __user *filename,
991 struct timeval __user *utimes);
992asmlinkage long sys_futimesat(int dfd, const char __user *filename,
993 struct timeval __user *utimes);
994#endif
990asmlinkage long sys_creat(const char __user *pathname, umode_t mode); 995asmlinkage long sys_creat(const char __user *pathname, umode_t mode);
991asmlinkage long sys_getdents(unsigned int fd, 996asmlinkage long sys_getdents(unsigned int fd,
992 struct linux_dirent __user *dirent, 997 struct linux_dirent __user *dirent,
993 unsigned int count); 998 unsigned int count);
994asmlinkage long sys_futimesat(int dfd, const char __user *filename,
995 struct timeval __user *utimes);
996asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, 999asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
997 fd_set __user *exp, struct timeval __user *tvp); 1000 fd_set __user *exp, struct timeval __user *tvp);
998asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, 1001asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
diff --git a/include/linux/tc.h b/include/linux/tc.h
index f92511e57cdb..a60639f37963 100644
--- a/include/linux/tc.h
+++ b/include/linux/tc.h
@@ -84,6 +84,7 @@ struct tc_dev {
84 device. */ 84 device. */
85 struct device dev; /* Generic device interface. */ 85 struct device dev; /* Generic device interface. */
86 struct resource resource; /* Address space of this device. */ 86 struct resource resource; /* Address space of this device. */
87 u64 dma_mask; /* DMA addressable range. */
87 char vendor[9]; 88 char vendor[9];
88 char name[9]; 89 char name[9];
89 char firmware[9]; 90 char firmware[9];
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 263e37271afd..8ed77bb4ed86 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -248,6 +248,9 @@ struct tcp_sock {
248 syn_smc:1; /* SYN includes SMC */ 248 syn_smc:1; /* SYN includes SMC */
249 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ 249 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
250 250
251 u64 tcp_wstamp_ns; /* departure time for next sent data packet */
252 u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
253
251/* RTT measurement */ 254/* RTT measurement */
252 u64 tcp_mstamp; /* most recent packet received/sent */ 255 u64 tcp_mstamp; /* most recent packet received/sent */
253 u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 256 u32 srtt_us; /* smoothed round trip time << 3 in usecs */
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index a3ed26082bc1..bf6ec83e60ee 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Thunderbolt service API 3 * Thunderbolt service API
3 * 4 *
@@ -5,10 +6,6 @@
5 * Copyright (C) 2017, Intel Corporation 6 * Copyright (C) 2017, Intel Corporation
6 * Authors: Michael Jamet <michael.jamet@intel.com> 7 * Authors: Michael Jamet <michael.jamet@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com> 8 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */ 9 */
13 10
14#ifndef THUNDERBOLT_H_ 11#ifndef THUNDERBOLT_H_
diff --git a/include/linux/time32.h b/include/linux/time32.h
index d1ae43c13e25..61904a6c098f 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -13,6 +13,36 @@
13 13
14#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) 14#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
15 15
16typedef s32 old_time32_t;
17
18struct old_timespec32 {
19 old_time32_t tv_sec;
20 s32 tv_nsec;
21};
22
23struct old_timeval32 {
24 old_time32_t tv_sec;
25 s32 tv_usec;
26};
27
28struct old_itimerspec32 {
29 struct old_timespec32 it_interval;
30 struct old_timespec32 it_value;
31};
32
33struct old_utimbuf32 {
34 old_time32_t actime;
35 old_time32_t modtime;
36};
37
38extern int get_old_timespec32(struct timespec64 *, const void __user *);
39extern int put_old_timespec32(const struct timespec64 *, void __user *);
40extern int get_old_itimerspec32(struct itimerspec64 *its,
41 const struct old_itimerspec32 __user *uits);
42extern int put_old_itimerspec32(const struct itimerspec64 *its,
43 struct old_itimerspec32 __user *uits);
44
45
16#if __BITS_PER_LONG == 64 46#if __BITS_PER_LONG == 64
17 47
18/* timespec64 is defined as timespec here */ 48/* timespec64 is defined as timespec here */
@@ -105,16 +135,6 @@ static inline bool timespec_valid(const struct timespec *ts)
105 return true; 135 return true;
106} 136}
107 137
108static inline bool timespec_valid_strict(const struct timespec *ts)
109{
110 if (!timespec_valid(ts))
111 return false;
112 /* Disallow values that could overflow ktime_t */
113 if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
114 return false;
115 return true;
116}
117
118/** 138/**
119 * timespec_to_ns - Convert timespec to nanoseconds 139 * timespec_to_ns - Convert timespec to nanoseconds
120 * @ts: pointer to the timespec variable to be converted 140 * @ts: pointer to the timespec variable to be converted
@@ -149,19 +169,6 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
149 a->tv_nsec = ns; 169 a->tv_nsec = ns;
150} 170}
151 171
152/**
153 * time_to_tm - converts the calendar time to local broken-down time
154 *
155 * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
156 * Coordinated Universal Time (UTC).
157 * @offset offset seconds adding to totalsecs.
158 * @result pointer to struct tm variable to receive broken-down time
159 */
160static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
161{
162 time64_to_tm(totalsecs, offset, result);
163}
164
165static inline unsigned long mktime(const unsigned int year, 172static inline unsigned long mktime(const unsigned int year,
166 const unsigned int mon, const unsigned int day, 173 const unsigned int mon, const unsigned int day,
167 const unsigned int hour, const unsigned int min, 174 const unsigned int hour, const unsigned int min,
@@ -183,8 +190,6 @@ static inline bool timeval_valid(const struct timeval *tv)
183 return true; 190 return true;
184} 191}
185 192
186extern struct timespec timespec_trunc(struct timespec t, unsigned int gran);
187
188/** 193/**
189 * timeval_to_ns - Convert timeval to nanoseconds 194 * timeval_to_ns - Convert timeval to nanoseconds
190 * @ts: pointer to the timeval variable to be converted 195 * @ts: pointer to the timeval variable to be converted
@@ -208,18 +213,17 @@ extern struct timeval ns_to_timeval(const s64 nsec);
208extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec); 213extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
209 214
210/* 215/*
211 * New aliases for compat time functions. These will be used to replace 216 * Old names for the 32-bit time_t interfaces, these will be removed
212 * the compat code so it can be shared between 32-bit and 64-bit builds 217 * when everything uses the new names.
213 * both of which provide compatibility with old 32-bit tasks.
214 */ 218 */
215#define old_time32_t compat_time_t 219#define compat_time_t old_time32_t
216#define old_timeval32 compat_timeval 220#define compat_timeval old_timeval32
217#define old_timespec32 compat_timespec 221#define compat_timespec old_timespec32
218#define old_itimerspec32 compat_itimerspec 222#define compat_itimerspec old_itimerspec32
219#define ns_to_old_timeval32 ns_to_compat_timeval 223#define ns_to_compat_timeval ns_to_old_timeval32
220#define get_old_itimerspec32 get_compat_itimerspec64 224#define get_compat_itimerspec64 get_old_itimerspec32
221#define put_old_itimerspec32 put_compat_itimerspec64 225#define put_compat_itimerspec64 put_old_itimerspec32
222#define get_old_timespec32 compat_get_timespec64 226#define compat_get_timespec64 get_old_timespec32
223#define put_old_timespec32 compat_put_timespec64 227#define compat_put_timespec64 put_old_timespec32
224 228
225#endif 229#endif
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index a5a3cfc3c2fa..29975e93fcb8 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -266,9 +266,6 @@ extern int update_persistent_clock64(struct timespec64 now);
266 * deprecated aliases, don't use in new code 266 * deprecated aliases, don't use in new code
267 */ 267 */
268#define getnstimeofday64(ts) ktime_get_real_ts64(ts) 268#define getnstimeofday64(ts) ktime_get_real_ts64(ts)
269#define get_monotonic_boottime64(ts) ktime_get_boottime_ts64(ts)
270#define getrawmonotonic64(ts) ktime_get_raw_ts64(ts)
271#define timekeeping_clocktai64(ts) ktime_get_clocktai_ts64(ts)
272 269
273static inline struct timespec64 current_kernel_time64(void) 270static inline struct timespec64 current_kernel_time64(void)
274{ 271{
@@ -279,13 +276,4 @@ static inline struct timespec64 current_kernel_time64(void)
279 return ts; 276 return ts;
280} 277}
281 278
282static inline struct timespec64 get_monotonic_coarse64(void)
283{
284 struct timespec64 ts;
285
286 ktime_get_coarse_ts64(&ts);
287
288 return ts;
289}
290
291#endif 279#endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
index 8762c2f45f8b..a502616f7e1c 100644
--- a/include/linux/timekeeping32.h
+++ b/include/linux/timekeeping32.h
@@ -6,27 +6,18 @@
6 * over time so we can remove the file here. 6 * over time so we can remove the file here.
7 */ 7 */
8 8
9extern void do_gettimeofday(struct timeval *tv); 9static inline void do_gettimeofday(struct timeval *tv)
10unsigned long get_seconds(void);
11
12static inline struct timespec current_kernel_time(void)
13{ 10{
14 struct timespec64 ts64; 11 struct timespec64 now;
15 12
16 ktime_get_coarse_real_ts64(&ts64); 13 ktime_get_real_ts64(&now);
17 14 tv->tv_sec = now.tv_sec;
18 return timespec64_to_timespec(ts64); 15 tv->tv_usec = now.tv_nsec/1000;
19} 16}
20 17
21/** 18static inline unsigned long get_seconds(void)
22 * Deprecated. Use do_settimeofday64().
23 */
24static inline int do_settimeofday(const struct timespec *ts)
25{ 19{
26 struct timespec64 ts64; 20 return ktime_get_real_seconds();
27
28 ts64 = timespec_to_timespec64(*ts);
29 return do_settimeofday64(&ts64);
30} 21}
31 22
32static inline void getnstimeofday(struct timespec *ts) 23static inline void getnstimeofday(struct timespec *ts)
@@ -45,14 +36,6 @@ static inline void ktime_get_ts(struct timespec *ts)
45 *ts = timespec64_to_timespec(ts64); 36 *ts = timespec64_to_timespec(ts64);
46} 37}
47 38
48static inline void ktime_get_real_ts(struct timespec *ts)
49{
50 struct timespec64 ts64;
51
52 ktime_get_real_ts64(&ts64);
53 *ts = timespec64_to_timespec(ts64);
54}
55
56static inline void getrawmonotonic(struct timespec *ts) 39static inline void getrawmonotonic(struct timespec *ts)
57{ 40{
58 struct timespec64 ts64; 41 struct timespec64 ts64;
@@ -61,15 +44,6 @@ static inline void getrawmonotonic(struct timespec *ts)
61 *ts = timespec64_to_timespec(ts64); 44 *ts = timespec64_to_timespec(ts64);
62} 45}
63 46
64static inline struct timespec get_monotonic_coarse(void)
65{
66 struct timespec64 ts64;
67
68 ktime_get_coarse_ts64(&ts64);
69
70 return timespec64_to_timespec(ts64);
71}
72
73static inline void getboottime(struct timespec *ts) 47static inline void getboottime(struct timespec *ts)
74{ 48{
75 struct timespec64 ts64; 49 struct timespec64 ts64;
@@ -79,19 +53,6 @@ static inline void getboottime(struct timespec *ts)
79} 53}
80 54
81/* 55/*
82 * Timespec interfaces utilizing the ktime based ones
83 */
84static inline void get_monotonic_boottime(struct timespec *ts)
85{
86 *ts = ktime_to_timespec(ktime_get_boottime());
87}
88
89static inline void timekeeping_clocktai(struct timespec *ts)
90{
91 *ts = ktime_to_timespec(ktime_get_clocktai());
92}
93
94/*
95 * Persistent clock related interfaces 56 * Persistent clock related interfaces
96 */ 57 */
97extern void read_persistent_clock(struct timespec *ts); 58extern void read_persistent_clock(struct timespec *ts);
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 61dfd93b6ee4..48fad21109fc 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -77,7 +77,7 @@ void torture_shutdown_absorb(const char *title);
77int torture_shutdown_init(int ssecs, void (*cleanup)(void)); 77int torture_shutdown_init(int ssecs, void (*cleanup)(void));
78 78
79/* Task stuttering, which forces load/no-load transitions. */ 79/* Task stuttering, which forces load/no-load transitions. */
80void stutter_wait(const char *title); 80bool stutter_wait(const char *title);
81int torture_stutter_init(int s); 81int torture_stutter_init(int s);
82 82
83/* Initialization and cleanup. */ 83/* Initialization and cleanup. */
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 05589a3e37f4..40b0b4c1bf7b 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -123,15 +123,10 @@ static inline __must_check int tracehook_report_syscall_entry(
123 */ 123 */
124static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) 124static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
125{ 125{
126 if (step) { 126 if (step)
127 siginfo_t info; 127 user_single_step_report(regs);
128 clear_siginfo(&info); 128 else
129 user_single_step_siginfo(current, regs, &info); 129 ptrace_report_syscall(regs);
130 force_sig_info(SIGTRAP, &info, current);
131 return;
132 }
133
134 ptrace_report_syscall(regs);
135} 130}
136 131
137/** 132/**
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index 22c5a46e9693..49ba9cde7e4b 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -35,6 +35,12 @@ struct tracepoint {
35 struct tracepoint_func __rcu *funcs; 35 struct tracepoint_func __rcu *funcs;
36}; 36};
37 37
38#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
39typedef const int tracepoint_ptr_t;
40#else
41typedef struct tracepoint * const tracepoint_ptr_t;
42#endif
43
38struct bpf_raw_event_map { 44struct bpf_raw_event_map {
39 struct tracepoint *tp; 45 struct tracepoint *tp;
40 void *bpf_func; 46 void *bpf_func;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 041f7e56a289..538ba1a58f5b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -99,6 +99,29 @@ extern void syscall_unregfunc(void);
99#define TRACE_DEFINE_ENUM(x) 99#define TRACE_DEFINE_ENUM(x)
100#define TRACE_DEFINE_SIZEOF(x) 100#define TRACE_DEFINE_SIZEOF(x)
101 101
102#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
103static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
104{
105 return offset_to_ptr(p);
106}
107
108#define __TRACEPOINT_ENTRY(name) \
109 asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \
110 " .balign 4 \n" \
111 " .long __tracepoint_" #name " - . \n" \
112 " .previous \n")
113#else
114static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
115{
116 return *p;
117}
118
119#define __TRACEPOINT_ENTRY(name) \
120 static tracepoint_ptr_t __tracepoint_ptr_##name __used \
121 __attribute__((section("__tracepoints_ptrs"))) = \
122 &__tracepoint_##name
123#endif
124
102#endif /* _LINUX_TRACEPOINT_H */ 125#endif /* _LINUX_TRACEPOINT_H */
103 126
104/* 127/*
@@ -253,19 +276,6 @@ extern void syscall_unregfunc(void);
253 return static_key_false(&__tracepoint_##name.key); \ 276 return static_key_false(&__tracepoint_##name.key); \
254 } 277 }
255 278
256#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
257#define __TRACEPOINT_ENTRY(name) \
258 asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \
259 " .balign 4 \n" \
260 " .long __tracepoint_" #name " - . \n" \
261 " .previous \n")
262#else
263#define __TRACEPOINT_ENTRY(name) \
264 static struct tracepoint * const __tracepoint_ptr_##name __used \
265 __attribute__((section("__tracepoints_ptrs"))) = \
266 &__tracepoint_##name
267#endif
268
269/* 279/*
270 * We have no guarantee that gcc and the linker won't up-align the tracepoint 280 * We have no guarantee that gcc and the linker won't up-align the tracepoint
271 * structures, so we create an array of pointers that will be used for iteration 281 * structures, so we create an array of pointers that will be used for iteration
diff --git a/include/linux/tty.h b/include/linux/tty.h
index c56e3978b00f..414db2bce715 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -746,8 +746,6 @@ static inline int tty_audit_push(void)
746/* tty_ioctl.c */ 746/* tty_ioctl.c */
747extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, 747extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
748 unsigned int cmd, unsigned long arg); 748 unsigned int cmd, unsigned long arg);
749extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
750 unsigned int cmd, unsigned long arg);
751 749
752/* vt.c */ 750/* vt.c */
753 751
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 71dbc891851a..358446247ccd 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -249,6 +249,7 @@
249struct tty_struct; 249struct tty_struct;
250struct tty_driver; 250struct tty_driver;
251struct serial_icounter_struct; 251struct serial_icounter_struct;
252struct serial_struct;
252 253
253struct tty_operations { 254struct tty_operations {
254 struct tty_struct * (*lookup)(struct tty_driver *driver, 255 struct tty_struct * (*lookup)(struct tty_driver *driver,
@@ -287,6 +288,8 @@ struct tty_operations {
287 int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); 288 int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
288 int (*get_icount)(struct tty_struct *tty, 289 int (*get_icount)(struct tty_struct *tty,
289 struct serial_icounter_struct *icount); 290 struct serial_icounter_struct *icount);
291 int (*get_serial)(struct tty_struct *tty, struct serial_struct *p);
292 int (*set_serial)(struct tty_struct *tty, struct serial_struct *p);
290 void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m); 293 void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m);
291#ifdef CONFIG_CONSOLE_POLL 294#ifdef CONFIG_CONSOLE_POLL
292 int (*poll_init)(struct tty_driver *driver, int line, char *options); 295 int (*poll_init)(struct tty_driver *driver, int line, char *options);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 840894ca3fc0..b1e6043e9917 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -54,11 +54,17 @@
54 * low-level driver can "grab" an ioctl request before the line 54 * low-level driver can "grab" an ioctl request before the line
55 * discpline has a chance to see it. 55 * discpline has a chance to see it.
56 * 56 *
57 * long (*compat_ioctl)(struct tty_struct * tty, struct file * file, 57 * int (*compat_ioctl)(struct tty_struct * tty, struct file * file,
58 * unsigned int cmd, unsigned long arg); 58 * unsigned int cmd, unsigned long arg);
59 * 59 *
60 * Process ioctl calls from 32-bit process on 64-bit system 60 * Process ioctl calls from 32-bit process on 64-bit system
61 * 61 *
62 * NOTE: only ioctls that are neither "pointer to compatible
63 * structure" nor tty-generic. Something private that takes
64 * an integer or a pointer to wordsize-sensitive structure
65 * belongs here, but most of ldiscs will happily leave
66 * it NULL.
67 *
62 * void (*set_termios)(struct tty_struct *tty, struct ktermios * old); 68 * void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
63 * 69 *
64 * This function notifies the line discpline that a change has 70 * This function notifies the line discpline that a change has
@@ -184,7 +190,7 @@ struct tty_ldisc_ops {
184 const unsigned char *buf, size_t nr); 190 const unsigned char *buf, size_t nr);
185 int (*ioctl)(struct tty_struct *tty, struct file *file, 191 int (*ioctl)(struct tty_struct *tty, struct file *file,
186 unsigned int cmd, unsigned long arg); 192 unsigned int cmd, unsigned long arg);
187 long (*compat_ioctl)(struct tty_struct *tty, struct file *file, 193 int (*compat_ioctl)(struct tty_struct *tty, struct file *file,
188 unsigned int cmd, unsigned long arg); 194 unsigned int cmd, unsigned long arg);
189 void (*set_termios)(struct tty_struct *tty, struct ktermios *old); 195 void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
190 __poll_t (*poll)(struct tty_struct *, struct file *, 196 __poll_t (*poll)(struct tty_struct *, struct file *,
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 409c845d4cd3..422b1c01ee0d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -172,7 +172,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
172static __always_inline __must_check 172static __always_inline __must_check
173size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) 173size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
174{ 174{
175 if (unlikely(!check_copy_size(addr, bytes, false))) 175 if (unlikely(!check_copy_size(addr, bytes, true)))
176 return 0; 176 return 0;
177 else 177 else
178 return _copy_to_iter_mcsafe(addr, bytes, i); 178 return _copy_to_iter_mcsafe(addr, bytes, i);
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 6f8b68cd460f..a3cd7cb67a69 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -133,6 +133,7 @@ extern void uio_event_notify(struct uio_info *info);
133#define UIO_MEM_PHYS 1 133#define UIO_MEM_PHYS 1
134#define UIO_MEM_LOGICAL 2 134#define UIO_MEM_LOGICAL 2
135#define UIO_MEM_VIRTUAL 3 135#define UIO_MEM_VIRTUAL 3
136#define UIO_MEM_IOVA 4
136 137
137/* defines for uio_port->porttype */ 138/* defines for uio_port->porttype */
138#define UIO_PORT_NONE 0 139#define UIO_PORT_NONE 0
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 5c812acbb80a..235f51b62c71 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -44,6 +44,7 @@ struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
44 int (*init)(struct subprocess_info *info, struct cred *new), 44 int (*init)(struct subprocess_info *info, struct cred *new),
45 void (*cleanup)(struct subprocess_info *), void *data); 45 void (*cleanup)(struct subprocess_info *), void *data);
46struct umh_info { 46struct umh_info {
47 const char *cmdline;
47 struct file *pipe_to_umh; 48 struct file *pipe_to_umh;
48 struct file *pipe_from_umh; 49 struct file *pipe_from_umh;
49 pid_t pid; 50 pid_t pid;
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 07f99362bc90..63758c399e4e 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -77,6 +77,12 @@ struct ci_hdrc_platform_data {
77 struct ci_hdrc_cable vbus_extcon; 77 struct ci_hdrc_cable vbus_extcon;
78 struct ci_hdrc_cable id_extcon; 78 struct ci_hdrc_cable id_extcon;
79 u32 phy_clkgate_delay_us; 79 u32 phy_clkgate_delay_us;
80
81 /* pins */
82 struct pinctrl *pctl;
83 struct pinctrl_state *pins_default;
84 struct pinctrl_state *pins_host;
85 struct pinctrl_state *pins_device;
80}; 86};
81 87
82/* Default offset of capability registers */ 88/* Default offset of capability registers */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 106551a5616e..1c19f77ed541 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -285,6 +285,8 @@ struct usb_serial_driver {
285 int (*write_room)(struct tty_struct *tty); 285 int (*write_room)(struct tty_struct *tty);
286 int (*ioctl)(struct tty_struct *tty, 286 int (*ioctl)(struct tty_struct *tty,
287 unsigned int cmd, unsigned long arg); 287 unsigned int cmd, unsigned long arg);
288 int (*get_serial)(struct tty_struct *tty, struct serial_struct *ss);
289 int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss);
288 void (*set_termios)(struct tty_struct *tty, 290 void (*set_termios)(struct tty_struct *tty,
289 struct usb_serial_port *port, struct ktermios *old); 291 struct usb_serial_port *port, struct ktermios *old);
290 void (*break_ctl)(struct tty_struct *tty, int break_state); 292 void (*break_ctl)(struct tty_struct *tty, int break_state);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index e2ec3582e549..d8860f2d0976 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -28,7 +28,7 @@ struct usbnet {
28 /* housekeeping */ 28 /* housekeeping */
29 struct usb_device *udev; 29 struct usb_device *udev;
30 struct usb_interface *intf; 30 struct usb_interface *intf;
31 struct driver_info *driver_info; 31 const struct driver_info *driver_info;
32 const char *driver_name; 32 const char *driver_name;
33 void *driver_priv; 33 void *driver_priv;
34 wait_queue_head_t wait; 34 wait_queue_head_t wait;
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index a34539b7f750..7e6ac0114d55 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -133,15 +133,18 @@ struct vga_switcheroo_handler {
133 * @can_switch: check if the device is in a position to switch now. 133 * @can_switch: check if the device is in a position to switch now.
134 * Mandatory. The client should return false if a user space process 134 * Mandatory. The client should return false if a user space process
135 * has one of its device files open 135 * has one of its device files open
136 * @gpu_bound: notify the client id to audio client when the GPU is bound.
136 * 137 *
137 * Client callbacks. A client can be either a GPU or an audio device on a GPU. 138 * Client callbacks. A client can be either a GPU or an audio device on a GPU.
138 * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be 139 * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
139 * set to NULL. For audio clients, the @reprobe member is bogus. 140 * set to NULL. For audio clients, the @reprobe member is bogus.
141 * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients.
140 */ 142 */
141struct vga_switcheroo_client_ops { 143struct vga_switcheroo_client_ops {
142 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); 144 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
143 void (*reprobe)(struct pci_dev *dev); 145 void (*reprobe)(struct pci_dev *dev);
144 bool (*can_switch)(struct pci_dev *dev); 146 bool (*can_switch)(struct pci_dev *dev);
147 void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id);
145}; 148};
146 149
147#if defined(CONFIG_VGA_SWITCHEROO) 150#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 9397628a1967..cb462f9ab7dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -5,6 +5,24 @@
5#include <linux/if_vlan.h> 5#include <linux/if_vlan.h>
6#include <uapi/linux/virtio_net.h> 6#include <uapi/linux/virtio_net.h>
7 7
8static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
9 const struct virtio_net_hdr *hdr)
10{
11 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
12 case VIRTIO_NET_HDR_GSO_TCPV4:
13 case VIRTIO_NET_HDR_GSO_UDP:
14 skb->protocol = cpu_to_be16(ETH_P_IP);
15 break;
16 case VIRTIO_NET_HDR_GSO_TCPV6:
17 skb->protocol = cpu_to_be16(ETH_P_IPV6);
18 break;
19 default:
20 return -EINVAL;
21 }
22
23 return 0;
24}
25
8static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, 26static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
9 const struct virtio_net_hdr *hdr, 27 const struct virtio_net_hdr *hdr,
10 bool little_endian) 28 bool little_endian)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 5c7f010676a7..47a3441cf4c4 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
105#ifdef CONFIG_DEBUG_VM_VMACACHE 105#ifdef CONFIG_DEBUG_VM_VMACACHE
106 VMACACHE_FIND_CALLS, 106 VMACACHE_FIND_CALLS,
107 VMACACHE_FIND_HITS, 107 VMACACHE_FIND_HITS,
108 VMACACHE_FULL_FLUSHES,
109#endif 108#endif
110#ifdef CONFIG_SWAP 109#ifdef CONFIG_SWAP
111 SWAP_RA, 110 SWAP_RA,
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index 3e9a963edd6a..6fce268a4588 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -10,7 +10,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
10 memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); 10 memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
11} 11}
12 12
13extern void vmacache_flush_all(struct mm_struct *mm);
14extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); 13extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
15extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, 14extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
16 unsigned long addr); 15 unsigned long addr);
@@ -24,10 +23,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
24static inline void vmacache_invalidate(struct mm_struct *mm) 23static inline void vmacache_invalidate(struct mm_struct *mm)
25{ 24{
26 mm->vmacache_seqnum++; 25 mm->vmacache_seqnum++;
27
28 /* deal with overflows */
29 if (unlikely(mm->vmacache_seqnum == 0))
30 vmacache_flush_all(mm);
31} 26}
32 27
33#endif /* __LINUX_VMACACHE_H */ 28#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 3fd07912909c..8dc77e40bc03 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -135,13 +135,6 @@ extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
135 int deflt); 135 int deflt);
136int vty_init(const struct file_operations *console_fops); 136int vty_init(const struct file_operations *console_fops);
137 137
138static inline bool vt_force_oops_output(struct vc_data *vc)
139{
140 if (oops_in_progress && vc->vc_panic_force_write && panic_timeout >= 0)
141 return true;
142 return false;
143}
144
145extern char vt_dont_switch; 138extern char vt_dont_switch;
146extern int default_utf8; 139extern int default_utf8;
147extern int global_cursor_default; 140extern int global_cursor_default;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d9f131ecf708..ed7c122cb31f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1052,10 +1052,9 @@ do { \
1052 __ret; \ 1052 __ret; \
1053}) 1053})
1054 1054
1055#define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \ 1055#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1056 lock, timeout) \
1057 ___wait_event(wq_head, ___wait_cond_timeout(condition), \ 1056 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1058 TASK_INTERRUPTIBLE, 0, timeout, \ 1057 state, 0, timeout, \
1059 spin_unlock_irq(&lock); \ 1058 spin_unlock_irq(&lock); \
1060 __ret = schedule_timeout(__ret); \ 1059 __ret = schedule_timeout(__ret); \
1061 spin_lock_irq(&lock)); 1060 spin_lock_irq(&lock));
@@ -1089,8 +1088,19 @@ do { \
1089({ \ 1088({ \
1090 long __ret = timeout; \ 1089 long __ret = timeout; \
1091 if (!___wait_cond_timeout(condition)) \ 1090 if (!___wait_cond_timeout(condition)) \
1092 __ret = __wait_event_interruptible_lock_irq_timeout( \ 1091 __ret = __wait_event_lock_irq_timeout( \
1093 wq_head, condition, lock, timeout); \ 1092 wq_head, condition, lock, timeout, \
1093 TASK_INTERRUPTIBLE); \
1094 __ret; \
1095})
1096
1097#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1098({ \
1099 long __ret = timeout; \
1100 if (!___wait_cond_timeout(condition)) \
1101 __ret = __wait_event_lock_irq_timeout( \
1102 wq_head, condition, lock, timeout, \
1103 TASK_UNINTERRUPTIBLE); \
1094 __ret; \ 1104 __ret; \
1095}) 1105})
1096 1106
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fdfd04e348f6..738a0c24874f 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -246,7 +246,8 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
246 * 246 *
247 * @bio is a part of the writeback in progress controlled by @wbc. Perform 247 * @bio is a part of the writeback in progress controlled by @wbc. Perform
248 * writeback specific initialization. This is used to apply the cgroup 248 * writeback specific initialization. This is used to apply the cgroup
249 * writeback context. 249 * writeback context. Must be called after the bio has been associated with
250 * a device.
250 */ 251 */
251static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) 252static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
252{ 253{
@@ -257,7 +258,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
257 * regular writeback instead of writing things out itself. 258 * regular writeback instead of writing things out itself.
258 */ 259 */
259 if (wbc->wb) 260 if (wbc->wb)
260 bio_associate_blkcg(bio, wbc->wb->blkcg_css); 261 bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
261} 262}
262 263
263#else /* CONFIG_CGROUP_WRITEBACK */ 264#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 2dfc8006fe64..d9514928ddac 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -4,10 +4,432 @@
4/* 4/*
5 * eXtensible Arrays 5 * eXtensible Arrays
6 * Copyright (c) 2017 Microsoft Corporation 6 * Copyright (c) 2017 Microsoft Corporation
7 * Author: Matthew Wilcox <mawilcox@microsoft.com> 7 * Author: Matthew Wilcox <willy@infradead.org>
8 *
9 * See Documentation/core-api/xarray.rst for how to use the XArray.
8 */ 10 */
9 11
12#include <linux/bug.h>
13#include <linux/compiler.h>
14#include <linux/gfp.h>
15#include <linux/kconfig.h>
16#include <linux/kernel.h>
17#include <linux/rcupdate.h>
10#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/types.h>
20
21/*
22 * The bottom two bits of the entry determine how the XArray interprets
23 * the contents:
24 *
25 * 00: Pointer entry
26 * 10: Internal entry
27 * x1: Value entry or tagged pointer
28 *
29 * Attempting to store internal entries in the XArray is a bug.
30 *
31 * Most internal entries are pointers to the next node in the tree.
32 * The following internal entries have a special meaning:
33 *
34 * 0-62: Sibling entries
35 * 256: Zero entry
36 * 257: Retry entry
37 *
38 * Errors are also represented as internal entries, but use the negative
39 * space (-4094 to -2). They're never stored in the slots array; only
40 * returned by the normal API.
41 */
42
43#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
44
45/**
46 * xa_mk_value() - Create an XArray entry from an integer.
47 * @v: Value to store in XArray.
48 *
49 * Context: Any context.
50 * Return: An entry suitable for storing in the XArray.
51 */
52static inline void *xa_mk_value(unsigned long v)
53{
54 WARN_ON((long)v < 0);
55 return (void *)((v << 1) | 1);
56}
57
58/**
59 * xa_to_value() - Get value stored in an XArray entry.
60 * @entry: XArray entry.
61 *
62 * Context: Any context.
63 * Return: The value stored in the XArray entry.
64 */
65static inline unsigned long xa_to_value(const void *entry)
66{
67 return (unsigned long)entry >> 1;
68}
69
70/**
71 * xa_is_value() - Determine if an entry is a value.
72 * @entry: XArray entry.
73 *
74 * Context: Any context.
75 * Return: True if the entry is a value, false if it is a pointer.
76 */
77static inline bool xa_is_value(const void *entry)
78{
79 return (unsigned long)entry & 1;
80}
81
82/**
83 * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
84 * @p: Plain pointer.
85 * @tag: Tag value (0, 1 or 3).
86 *
87 * If the user of the XArray prefers, they can tag their pointers instead
88 * of storing value entries. Three tags are available (0, 1 and 3).
89 * These are distinct from the xa_mark_t as they are not replicated up
90 * through the array and cannot be searched for.
91 *
92 * Context: Any context.
93 * Return: An XArray entry.
94 */
95static inline void *xa_tag_pointer(void *p, unsigned long tag)
96{
97 return (void *)((unsigned long)p | tag);
98}
99
100/**
101 * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
102 * @entry: XArray entry.
103 *
104 * If you have stored a tagged pointer in the XArray, call this function
105 * to get the untagged version of the pointer.
106 *
107 * Context: Any context.
108 * Return: A pointer.
109 */
110static inline void *xa_untag_pointer(void *entry)
111{
112 return (void *)((unsigned long)entry & ~3UL);
113}
114
115/**
116 * xa_pointer_tag() - Get the tag stored in an XArray entry.
117 * @entry: XArray entry.
118 *
119 * If you have stored a tagged pointer in the XArray, call this function
120 * to get the tag of that pointer.
121 *
122 * Context: Any context.
123 * Return: A tag.
124 */
125static inline unsigned int xa_pointer_tag(void *entry)
126{
127 return (unsigned long)entry & 3UL;
128}
129
130/*
131 * xa_mk_internal() - Create an internal entry.
132 * @v: Value to turn into an internal entry.
133 *
134 * Context: Any context.
135 * Return: An XArray internal entry corresponding to this value.
136 */
137static inline void *xa_mk_internal(unsigned long v)
138{
139 return (void *)((v << 2) | 2);
140}
141
142/*
143 * xa_to_internal() - Extract the value from an internal entry.
144 * @entry: XArray entry.
145 *
146 * Context: Any context.
147 * Return: The value which was stored in the internal entry.
148 */
149static inline unsigned long xa_to_internal(const void *entry)
150{
151 return (unsigned long)entry >> 2;
152}
153
154/*
155 * xa_is_internal() - Is the entry an internal entry?
156 * @entry: XArray entry.
157 *
158 * Context: Any context.
159 * Return: %true if the entry is an internal entry.
160 */
161static inline bool xa_is_internal(const void *entry)
162{
163 return ((unsigned long)entry & 3) == 2;
164}
165
166/**
167 * xa_is_err() - Report whether an XArray operation returned an error
168 * @entry: Result from calling an XArray function
169 *
170 * If an XArray operation cannot complete an operation, it will return
171 * a special value indicating an error. This function tells you
172 * whether an error occurred; xa_err() tells you which error occurred.
173 *
174 * Context: Any context.
175 * Return: %true if the entry indicates an error.
176 */
177static inline bool xa_is_err(const void *entry)
178{
179 return unlikely(xa_is_internal(entry));
180}
181
182/**
183 * xa_err() - Turn an XArray result into an errno.
184 * @entry: Result from calling an XArray function.
185 *
186 * If an XArray operation cannot complete an operation, it will return
187 * a special pointer value which encodes an errno. This function extracts
188 * the errno from the pointer value, or returns 0 if the pointer does not
189 * represent an errno.
190 *
191 * Context: Any context.
192 * Return: A negative errno or 0.
193 */
194static inline int xa_err(void *entry)
195{
196 /* xa_to_internal() would not do sign extension. */
197 if (xa_is_err(entry))
198 return (long)entry >> 2;
199 return 0;
200}
201
202typedef unsigned __bitwise xa_mark_t;
203#define XA_MARK_0 ((__force xa_mark_t)0U)
204#define XA_MARK_1 ((__force xa_mark_t)1U)
205#define XA_MARK_2 ((__force xa_mark_t)2U)
206#define XA_PRESENT ((__force xa_mark_t)8U)
207#define XA_MARK_MAX XA_MARK_2
208#define XA_FREE_MARK XA_MARK_0
209
210enum xa_lock_type {
211 XA_LOCK_IRQ = 1,
212 XA_LOCK_BH = 2,
213};
214
215/*
216 * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
217 * and we remain compatible with that.
218 */
219#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
220#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
221#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
222#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
223 (__force unsigned)(mark)))
224
225#define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK))
226
227/**
228 * struct xarray - The anchor of the XArray.
229 * @xa_lock: Lock that protects the contents of the XArray.
230 *
231 * To use the xarray, define it statically or embed it in your data structure.
232 * It is a very small data structure, so it does not usually make sense to
233 * allocate it separately and keep a pointer to it in your data structure.
234 *
235 * You may use the xa_lock to protect your own data structures as well.
236 */
237/*
238 * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
239 * If the only non-NULL entry in the array is at index 0, @xa_head is that
240 * entry. If any other entry in the array is non-NULL, @xa_head points
241 * to an @xa_node.
242 */
243struct xarray {
244 spinlock_t xa_lock;
245/* private: The rest of the data structure is not to be used directly. */
246 gfp_t xa_flags;
247 void __rcu * xa_head;
248};
249
250#define XARRAY_INIT(name, flags) { \
251 .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
252 .xa_flags = flags, \
253 .xa_head = NULL, \
254}
255
256/**
257 * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
258 * @name: A string that names your XArray.
259 * @flags: XA_FLAG values.
260 *
261 * This is intended for file scope definitions of XArrays. It declares
262 * and initialises an empty XArray with the chosen name and flags. It is
263 * equivalent to calling xa_init_flags() on the array, but it does the
264 * initialisation at compiletime instead of runtime.
265 */
266#define DEFINE_XARRAY_FLAGS(name, flags) \
267 struct xarray name = XARRAY_INIT(name, flags)
268
269/**
270 * DEFINE_XARRAY() - Define an XArray.
271 * @name: A string that names your XArray.
272 *
273 * This is intended for file scope definitions of XArrays. It declares
274 * and initialises an empty XArray with the chosen name. It is equivalent
275 * to calling xa_init() on the array, but it does the initialisation at
276 * compiletime instead of runtime.
277 */
278#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
279
280/**
281 * DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs.
282 * @name: A string that names your XArray.
283 *
284 * This is intended for file scope definitions of allocating XArrays.
285 * See also DEFINE_XARRAY().
286 */
287#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
288
289void xa_init_flags(struct xarray *, gfp_t flags);
290void *xa_load(struct xarray *, unsigned long index);
291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
292void *xa_cmpxchg(struct xarray *, unsigned long index,
293 void *old, void *entry, gfp_t);
294int xa_reserve(struct xarray *, unsigned long index, gfp_t);
295void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
296 void *entry, gfp_t);
297bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
298void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
299void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
300void *xa_find(struct xarray *xa, unsigned long *index,
301 unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
302void *xa_find_after(struct xarray *xa, unsigned long *index,
303 unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
304unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
305 unsigned long max, unsigned int n, xa_mark_t);
306void xa_destroy(struct xarray *);
307
308/**
309 * xa_init() - Initialise an empty XArray.
310 * @xa: XArray.
311 *
312 * An empty XArray is full of NULL entries.
313 *
314 * Context: Any context.
315 */
316static inline void xa_init(struct xarray *xa)
317{
318 xa_init_flags(xa, 0);
319}
320
321/**
322 * xa_empty() - Determine if an array has any present entries.
323 * @xa: XArray.
324 *
325 * Context: Any context.
326 * Return: %true if the array contains only NULL pointers.
327 */
328static inline bool xa_empty(const struct xarray *xa)
329{
330 return xa->xa_head == NULL;
331}
332
333/**
334 * xa_marked() - Inquire whether any entry in this array has a mark set
335 * @xa: Array
336 * @mark: Mark value
337 *
338 * Context: Any context.
339 * Return: %true if any entry has this mark set.
340 */
341static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
342{
343 return xa->xa_flags & XA_FLAGS_MARK(mark);
344}
345
346/**
347 * xa_erase() - Erase this entry from the XArray.
348 * @xa: XArray.
349 * @index: Index of entry.
350 *
351 * This function is the equivalent of calling xa_store() with %NULL as
352 * the third argument. The XArray does not need to allocate memory, so
353 * the user does not need to provide GFP flags.
354 *
355 * Context: Process context. Takes and releases the xa_lock.
356 * Return: The entry which used to be at this index.
357 */
358static inline void *xa_erase(struct xarray *xa, unsigned long index)
359{
360 return xa_store(xa, index, NULL, 0);
361}
362
363/**
364 * xa_insert() - Store this entry in the XArray unless another entry is
365 * already present.
366 * @xa: XArray.
367 * @index: Index into array.
368 * @entry: New entry.
369 * @gfp: Memory allocation flags.
370 *
371 * If you would rather see the existing entry in the array, use xa_cmpxchg().
372 * This function is for users who don't care what the entry is, only that
373 * one is present.
374 *
375 * Context: Process context. Takes and releases the xa_lock.
376 * May sleep if the @gfp flags permit.
377 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
378 * -ENOMEM if memory could not be allocated.
379 */
380static inline int xa_insert(struct xarray *xa, unsigned long index,
381 void *entry, gfp_t gfp)
382{
383 void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
384 if (!curr)
385 return 0;
386 if (xa_is_err(curr))
387 return xa_err(curr);
388 return -EEXIST;
389}
390
391/**
392 * xa_release() - Release a reserved entry.
393 * @xa: XArray.
394 * @index: Index of entry.
395 *
396 * After calling xa_reserve(), you can call this function to release the
397 * reservation. If the entry at @index has been stored to, this function
398 * will do nothing.
399 */
400static inline void xa_release(struct xarray *xa, unsigned long index)
401{
402 xa_cmpxchg(xa, index, NULL, NULL, 0);
403}
404
405/**
406 * xa_for_each() - Iterate over a portion of an XArray.
407 * @xa: XArray.
408 * @entry: Entry retrieved from array.
409 * @index: Index of @entry.
410 * @max: Maximum index to retrieve from array.
411 * @filter: Selection criterion.
412 *
413 * Initialise @index to the lowest index you want to retrieve from the
414 * array. During the iteration, @entry will have the value of the entry
415 * stored in @xa at @index. The iteration will skip all entries in the
416 * array which do not match @filter. You may modify @index during the
417 * iteration if you want to skip or reprocess indices. It is safe to modify
418 * the array during the iteration. At the end of the iteration, @entry will
419 * be set to NULL and @index will have a value less than or equal to max.
420 *
421 * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
422 * to handle your own locking with xas_for_each(), and if you have to unlock
423 * after each iteration, it will also end up being O(n.log(n)). xa_for_each()
424 * will spin if it hits a retry entry; if you intend to see retry entries,
425 * you should use the xas_for_each() iterator instead. The xas_for_each()
426 * iterator will expand into more inline code than xa_for_each().
427 *
428 * Context: Any context. Takes and releases the RCU lock.
429 */
430#define xa_for_each(xa, entry, index, max, filter) \
431 for (entry = xa_find(xa, &index, max, filter); entry; \
432 entry = xa_find_after(xa, &index, max, filter))
11 433
12#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) 434#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
13#define xa_lock(xa) spin_lock(&(xa)->xa_lock) 435#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
@@ -21,4 +443,873 @@
21#define xa_unlock_irqrestore(xa, flags) \ 443#define xa_unlock_irqrestore(xa, flags) \
22 spin_unlock_irqrestore(&(xa)->xa_lock, flags) 444 spin_unlock_irqrestore(&(xa)->xa_lock, flags)
23 445
446/*
447 * Versions of the normal API which require the caller to hold the
448 * xa_lock. If the GFP flags allow it, they will drop the lock to
449 * allocate memory, then reacquire it afterwards. These functions
450 * may also re-enable interrupts if the XArray flags indicate the
451 * locking should be interrupt safe.
452 */
453void *__xa_erase(struct xarray *, unsigned long index);
454void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
455void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
456 void *entry, gfp_t);
457int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
458void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
459void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
460
461/**
462 * __xa_insert() - Store this entry in the XArray unless another entry is
463 * already present.
464 * @xa: XArray.
465 * @index: Index into array.
466 * @entry: New entry.
467 * @gfp: Memory allocation flags.
468 *
469 * If you would rather see the existing entry in the array, use __xa_cmpxchg().
470 * This function is for users who don't care what the entry is, only that
471 * one is present.
472 *
473 * Context: Any context. Expects xa_lock to be held on entry. May
474 * release and reacquire xa_lock if the @gfp flags permit.
475 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
476 * -ENOMEM if memory could not be allocated.
477 */
478static inline int __xa_insert(struct xarray *xa, unsigned long index,
479 void *entry, gfp_t gfp)
480{
481 void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
482 if (!curr)
483 return 0;
484 if (xa_is_err(curr))
485 return xa_err(curr);
486 return -EEXIST;
487}
488
489/**
490 * xa_erase_bh() - Erase this entry from the XArray.
491 * @xa: XArray.
492 * @index: Index of entry.
493 *
494 * This function is the equivalent of calling xa_store() with %NULL as
495 * the third argument. The XArray does not need to allocate memory, so
496 * the user does not need to provide GFP flags.
497 *
498 * Context: Process context. Takes and releases the xa_lock while
499 * disabling softirqs.
500 * Return: The entry which used to be at this index.
501 */
502static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
503{
504 void *entry;
505
506 xa_lock_bh(xa);
507 entry = __xa_erase(xa, index);
508 xa_unlock_bh(xa);
509
510 return entry;
511}
512
513/**
514 * xa_erase_irq() - Erase this entry from the XArray.
515 * @xa: XArray.
516 * @index: Index of entry.
517 *
518 * This function is the equivalent of calling xa_store() with %NULL as
519 * the third argument. The XArray does not need to allocate memory, so
520 * the user does not need to provide GFP flags.
521 *
522 * Context: Process context. Takes and releases the xa_lock while
523 * disabling interrupts.
524 * Return: The entry which used to be at this index.
525 */
526static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
527{
528 void *entry;
529
530 xa_lock_irq(xa);
531 entry = __xa_erase(xa, index);
532 xa_unlock_irq(xa);
533
534 return entry;
535}
536
537/**
538 * xa_alloc() - Find somewhere to store this entry in the XArray.
539 * @xa: XArray.
540 * @id: Pointer to ID.
541 * @max: Maximum ID to allocate (inclusive).
542 * @entry: New entry.
543 * @gfp: Memory allocation flags.
544 *
545 * Allocates an unused ID in the range specified by @id and @max.
546 * Updates the @id pointer with the index, then stores the entry at that
547 * index. A concurrent lookup will not see an uninitialised @id.
548 *
549 * Context: Process context. Takes and releases the xa_lock. May sleep if
550 * the @gfp flags permit.
551 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
552 * there is no more space in the XArray.
553 */
554static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
555 gfp_t gfp)
556{
557 int err;
558
559 xa_lock(xa);
560 err = __xa_alloc(xa, id, max, entry, gfp);
561 xa_unlock(xa);
562
563 return err;
564}
565
566/**
567 * xa_alloc_bh() - Find somewhere to store this entry in the XArray.
568 * @xa: XArray.
569 * @id: Pointer to ID.
570 * @max: Maximum ID to allocate (inclusive).
571 * @entry: New entry.
572 * @gfp: Memory allocation flags.
573 *
574 * Allocates an unused ID in the range specified by @id and @max.
575 * Updates the @id pointer with the index, then stores the entry at that
576 * index. A concurrent lookup will not see an uninitialised @id.
577 *
578 * Context: Process context. Takes and releases the xa_lock while
579 * disabling softirqs. May sleep if the @gfp flags permit.
580 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
581 * there is no more space in the XArray.
582 */
583static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry,
584 gfp_t gfp)
585{
586 int err;
587
588 xa_lock_bh(xa);
589 err = __xa_alloc(xa, id, max, entry, gfp);
590 xa_unlock_bh(xa);
591
592 return err;
593}
594
595/**
596 * xa_alloc_irq() - Find somewhere to store this entry in the XArray.
597 * @xa: XArray.
598 * @id: Pointer to ID.
599 * @max: Maximum ID to allocate (inclusive).
600 * @entry: New entry.
601 * @gfp: Memory allocation flags.
602 *
603 * Allocates an unused ID in the range specified by @id and @max.
604 * Updates the @id pointer with the index, then stores the entry at that
605 * index. A concurrent lookup will not see an uninitialised @id.
606 *
607 * Context: Process context. Takes and releases the xa_lock while
608 * disabling interrupts. May sleep if the @gfp flags permit.
609 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
610 * there is no more space in the XArray.
611 */
612static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
613 gfp_t gfp)
614{
615 int err;
616
617 xa_lock_irq(xa);
618 err = __xa_alloc(xa, id, max, entry, gfp);
619 xa_unlock_irq(xa);
620
621 return err;
622}
623
624/* Everything below here is the Advanced API. Proceed with caution. */
625
626/*
627 * The xarray is constructed out of a set of 'chunks' of pointers. Choosing
628 * the best chunk size requires some tradeoffs. A power of two recommends
629 * itself so that we can walk the tree based purely on shifts and masks.
630 * Generally, the larger the better; as the number of slots per level of the
631 * tree increases, the less tall the tree needs to be. But that needs to be
632 * balanced against the memory consumption of each node. On a 64-bit system,
633 * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we
634 * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
635 */
636#ifndef XA_CHUNK_SHIFT
637#define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
638#endif
639#define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT)
640#define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
641#define XA_MAX_MARKS 3
642#define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
643
644/*
645 * @count is the count of every non-NULL element in the ->slots array
646 * whether that is a value entry, a retry entry, a user pointer,
647 * a sibling entry or a pointer to the next level of the tree.
648 * @nr_values is the count of every element in ->slots which is
649 * either a value entry or a sibling of a value entry.
650 */
651struct xa_node {
652 unsigned char shift; /* Bits remaining in each slot */
653 unsigned char offset; /* Slot offset in parent */
654 unsigned char count; /* Total entry count */
655 unsigned char nr_values; /* Value entry count */
656 struct xa_node __rcu *parent; /* NULL at top of tree */
657 struct xarray *array; /* The array we belong to */
658 union {
659 struct list_head private_list; /* For tree user */
660 struct rcu_head rcu_head; /* Used when freeing node */
661 };
662 void __rcu *slots[XA_CHUNK_SIZE];
663 union {
664 unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS];
665 unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS];
666 };
667};
668
669void xa_dump(const struct xarray *);
670void xa_dump_node(const struct xa_node *);
671
672#ifdef XA_DEBUG
673#define XA_BUG_ON(xa, x) do { \
674 if (x) { \
675 xa_dump(xa); \
676 BUG(); \
677 } \
678 } while (0)
679#define XA_NODE_BUG_ON(node, x) do { \
680 if (x) { \
681 if (node) xa_dump_node(node); \
682 BUG(); \
683 } \
684 } while (0)
685#else
686#define XA_BUG_ON(xa, x) do { } while (0)
687#define XA_NODE_BUG_ON(node, x) do { } while (0)
688#endif
689
690/* Private */
691static inline void *xa_head(const struct xarray *xa)
692{
693 return rcu_dereference_check(xa->xa_head,
694 lockdep_is_held(&xa->xa_lock));
695}
696
697/* Private */
698static inline void *xa_head_locked(const struct xarray *xa)
699{
700 return rcu_dereference_protected(xa->xa_head,
701 lockdep_is_held(&xa->xa_lock));
702}
703
704/* Private */
705static inline void *xa_entry(const struct xarray *xa,
706 const struct xa_node *node, unsigned int offset)
707{
708 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
709 return rcu_dereference_check(node->slots[offset],
710 lockdep_is_held(&xa->xa_lock));
711}
712
713/* Private */
714static inline void *xa_entry_locked(const struct xarray *xa,
715 const struct xa_node *node, unsigned int offset)
716{
717 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
718 return rcu_dereference_protected(node->slots[offset],
719 lockdep_is_held(&xa->xa_lock));
720}
721
722/* Private */
723static inline struct xa_node *xa_parent(const struct xarray *xa,
724 const struct xa_node *node)
725{
726 return rcu_dereference_check(node->parent,
727 lockdep_is_held(&xa->xa_lock));
728}
729
730/* Private */
731static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
732 const struct xa_node *node)
733{
734 return rcu_dereference_protected(node->parent,
735 lockdep_is_held(&xa->xa_lock));
736}
737
738/* Private */
739static inline void *xa_mk_node(const struct xa_node *node)
740{
741 return (void *)((unsigned long)node | 2);
742}
743
744/* Private */
745static inline struct xa_node *xa_to_node(const void *entry)
746{
747 return (struct xa_node *)((unsigned long)entry - 2);
748}
749
750/* Private */
751static inline bool xa_is_node(const void *entry)
752{
753 return xa_is_internal(entry) && (unsigned long)entry > 4096;
754}
755
756/* Private */
757static inline void *xa_mk_sibling(unsigned int offset)
758{
759 return xa_mk_internal(offset);
760}
761
762/* Private */
763static inline unsigned long xa_to_sibling(const void *entry)
764{
765 return xa_to_internal(entry);
766}
767
768/**
769 * xa_is_sibling() - Is the entry a sibling entry?
770 * @entry: Entry retrieved from the XArray
771 *
772 * Return: %true if the entry is a sibling entry.
773 */
774static inline bool xa_is_sibling(const void *entry)
775{
776 return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
777 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
778}
779
780#define XA_ZERO_ENTRY xa_mk_internal(256)
781#define XA_RETRY_ENTRY xa_mk_internal(257)
782
783/**
784 * xa_is_zero() - Is the entry a zero entry?
785 * @entry: Entry retrieved from the XArray
786 *
787 * Return: %true if the entry is a zero entry.
788 */
789static inline bool xa_is_zero(const void *entry)
790{
791 return unlikely(entry == XA_ZERO_ENTRY);
792}
793
794/**
795 * xa_is_retry() - Is the entry a retry entry?
796 * @entry: Entry retrieved from the XArray
797 *
798 * Return: %true if the entry is a retry entry.
799 */
800static inline bool xa_is_retry(const void *entry)
801{
802 return unlikely(entry == XA_RETRY_ENTRY);
803}
804
805/**
806 * typedef xa_update_node_t - A callback function from the XArray.
807 * @node: The node which is being processed
808 *
809 * This function is called every time the XArray updates the count of
810 * present and value entries in a node. It allows advanced users to
811 * maintain the private_list in the node.
812 *
813 * Context: The xa_lock is held and interrupts may be disabled.
814 * Implementations should not drop the xa_lock, nor re-enable
815 * interrupts.
816 */
817typedef void (*xa_update_node_t)(struct xa_node *node);
818
819/*
820 * The xa_state is opaque to its users. It contains various different pieces
821 * of state involved in the current operation on the XArray. It should be
822 * declared on the stack and passed between the various internal routines.
823 * The various elements in it should not be accessed directly, but only
824 * through the provided accessor functions. The below documentation is for
825 * the benefit of those working on the code, not for users of the XArray.
826 *
827 * @xa_node usually points to the xa_node containing the slot we're operating
828 * on (and @xa_offset is the offset in the slots array). If there is a
829 * single entry in the array at index 0, there are no allocated xa_nodes to
830 * point to, and so we store %NULL in @xa_node. @xa_node is set to
831 * the value %XAS_RESTART if the xa_state is not walked to the correct
832 * position in the tree of nodes for this operation. If an error occurs
833 * during an operation, it is set to an %XAS_ERROR value. If we run off the
834 * end of the allocated nodes, it is set to %XAS_BOUNDS.
835 */
836struct xa_state {
837 struct xarray *xa;
838 unsigned long xa_index;
839 unsigned char xa_shift;
840 unsigned char xa_sibs;
841 unsigned char xa_offset;
842 unsigned char xa_pad; /* Helps gcc generate better code */
843 struct xa_node *xa_node;
844 struct xa_node *xa_alloc;
845 xa_update_node_t xa_update;
846};
847
848/*
849 * We encode errnos in the xas->xa_node. If an error has happened, we need to
850 * drop the lock to fix it, and once we've done so the xa_state is invalid.
851 */
852#define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
853#define XAS_BOUNDS ((struct xa_node *)1UL)
854#define XAS_RESTART ((struct xa_node *)3UL)
855
856#define __XA_STATE(array, index, shift, sibs) { \
857 .xa = array, \
858 .xa_index = index, \
859 .xa_shift = shift, \
860 .xa_sibs = sibs, \
861 .xa_offset = 0, \
862 .xa_pad = 0, \
863 .xa_node = XAS_RESTART, \
864 .xa_alloc = NULL, \
865 .xa_update = NULL \
866}
867
868/**
869 * XA_STATE() - Declare an XArray operation state.
870 * @name: Name of this operation state (usually xas).
871 * @array: Array to operate on.
872 * @index: Initial index of interest.
873 *
874 * Declare and initialise an xa_state on the stack.
875 */
876#define XA_STATE(name, array, index) \
877 struct xa_state name = __XA_STATE(array, index, 0, 0)
878
879/**
880 * XA_STATE_ORDER() - Declare an XArray operation state.
881 * @name: Name of this operation state (usually xas).
882 * @array: Array to operate on.
883 * @index: Initial index of interest.
884 * @order: Order of entry.
885 *
886 * Declare and initialise an xa_state on the stack. This variant of
887 * XA_STATE() allows you to specify the 'order' of the element you
888 * want to operate on.`
889 */
890#define XA_STATE_ORDER(name, array, index, order) \
891 struct xa_state name = __XA_STATE(array, \
892 (index >> order) << order, \
893 order - (order % XA_CHUNK_SHIFT), \
894 (1U << (order % XA_CHUNK_SHIFT)) - 1)
895
896#define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
897#define xas_trylock(xas) xa_trylock((xas)->xa)
898#define xas_lock(xas) xa_lock((xas)->xa)
899#define xas_unlock(xas) xa_unlock((xas)->xa)
900#define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
901#define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
902#define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
903#define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
904#define xas_lock_irqsave(xas, flags) \
905 xa_lock_irqsave((xas)->xa, flags)
906#define xas_unlock_irqrestore(xas, flags) \
907 xa_unlock_irqrestore((xas)->xa, flags)
908
909/**
910 * xas_error() - Return an errno stored in the xa_state.
911 * @xas: XArray operation state.
912 *
913 * Return: 0 if no error has been noted. A negative errno if one has.
914 */
915static inline int xas_error(const struct xa_state *xas)
916{
917 return xa_err(xas->xa_node);
918}
919
920/**
921 * xas_set_err() - Note an error in the xa_state.
922 * @xas: XArray operation state.
923 * @err: Negative error number.
924 *
925 * Only call this function with a negative @err; zero or positive errors
926 * will probably not behave the way you think they should. If you want
927 * to clear the error from an xa_state, use xas_reset().
928 */
929static inline void xas_set_err(struct xa_state *xas, long err)
930{
931 xas->xa_node = XA_ERROR(err);
932}
933
934/**
935 * xas_invalid() - Is the xas in a retry or error state?
936 * @xas: XArray operation state.
937 *
938 * Return: %true if the xas cannot be used for operations.
939 */
940static inline bool xas_invalid(const struct xa_state *xas)
941{
942 return (unsigned long)xas->xa_node & 3;
943}
944
945/**
946 * xas_valid() - Is the xas a valid cursor into the array?
947 * @xas: XArray operation state.
948 *
949 * Return: %true if the xas can be used for operations.
950 */
951static inline bool xas_valid(const struct xa_state *xas)
952{
953 return !xas_invalid(xas);
954}
955
956/**
957 * xas_is_node() - Does the xas point to a node?
958 * @xas: XArray operation state.
959 *
960 * Return: %true if the xas currently references a node.
961 */
962static inline bool xas_is_node(const struct xa_state *xas)
963{
964 return xas_valid(xas) && xas->xa_node;
965}
966
967/* True if the pointer is something other than a node */
968static inline bool xas_not_node(struct xa_node *node)
969{
970 return ((unsigned long)node & 3) || !node;
971}
972
973/* True if the node represents RESTART or an error */
974static inline bool xas_frozen(struct xa_node *node)
975{
976 return (unsigned long)node & 2;
977}
978
979/* True if the node represents head-of-tree, RESTART or BOUNDS */
980static inline bool xas_top(struct xa_node *node)
981{
982 return node <= XAS_RESTART;
983}
984
985/**
986 * xas_reset() - Reset an XArray operation state.
987 * @xas: XArray operation state.
988 *
989 * Resets the error or walk state of the @xas so future walks of the
990 * array will start from the root. Use this if you have dropped the
991 * xarray lock and want to reuse the xa_state.
992 *
993 * Context: Any context.
994 */
995static inline void xas_reset(struct xa_state *xas)
996{
997 xas->xa_node = XAS_RESTART;
998}
999
1000/**
1001 * xas_retry() - Retry the operation if appropriate.
1002 * @xas: XArray operation state.
1003 * @entry: Entry from xarray.
1004 *
1005 * The advanced functions may sometimes return an internal entry, such as
1006 * a retry entry or a zero entry. This function sets up the @xas to restart
1007 * the walk from the head of the array if needed.
1008 *
1009 * Context: Any context.
1010 * Return: true if the operation needs to be retried.
1011 */
1012static inline bool xas_retry(struct xa_state *xas, const void *entry)
1013{
1014 if (xa_is_zero(entry))
1015 return true;
1016 if (!xa_is_retry(entry))
1017 return false;
1018 xas_reset(xas);
1019 return true;
1020}
1021
1022void *xas_load(struct xa_state *);
1023void *xas_store(struct xa_state *, void *entry);
1024void *xas_find(struct xa_state *, unsigned long max);
1025void *xas_find_conflict(struct xa_state *);
1026
1027bool xas_get_mark(const struct xa_state *, xa_mark_t);
1028void xas_set_mark(const struct xa_state *, xa_mark_t);
1029void xas_clear_mark(const struct xa_state *, xa_mark_t);
1030void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
1031void xas_init_marks(const struct xa_state *);
1032
1033bool xas_nomem(struct xa_state *, gfp_t);
1034void xas_pause(struct xa_state *);
1035
1036void xas_create_range(struct xa_state *);
1037
1038/**
1039 * xas_reload() - Refetch an entry from the xarray.
1040 * @xas: XArray operation state.
1041 *
1042 * Use this function to check that a previously loaded entry still has
1043 * the same value. This is useful for the lockless pagecache lookup where
1044 * we walk the array with only the RCU lock to protect us, lock the page,
1045 * then check that the page hasn't moved since we looked it up.
1046 *
1047 * The caller guarantees that @xas is still valid. If it may be in an
1048 * error or restart state, call xas_load() instead.
1049 *
1050 * Return: The entry at this location in the xarray.
1051 */
1052static inline void *xas_reload(struct xa_state *xas)
1053{
1054 struct xa_node *node = xas->xa_node;
1055
1056 if (node)
1057 return xa_entry(xas->xa, node, xas->xa_offset);
1058 return xa_head(xas->xa);
1059}
1060
1061/**
1062 * xas_set() - Set up XArray operation state for a different index.
1063 * @xas: XArray operation state.
1064 * @index: New index into the XArray.
1065 *
1066 * Move the operation state to refer to a different index. This will
1067 * have the effect of starting a walk from the top; see xas_next()
1068 * to move to an adjacent index.
1069 */
1070static inline void xas_set(struct xa_state *xas, unsigned long index)
1071{
1072 xas->xa_index = index;
1073 xas->xa_node = XAS_RESTART;
1074}
1075
1076/**
1077 * xas_set_order() - Set up XArray operation state for a multislot entry.
1078 * @xas: XArray operation state.
1079 * @index: Target of the operation.
1080 * @order: Entry occupies 2^@order indices.
1081 */
1082static inline void xas_set_order(struct xa_state *xas, unsigned long index,
1083 unsigned int order)
1084{
1085#ifdef CONFIG_XARRAY_MULTI
1086 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
1087 xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
1088 xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1089 xas->xa_node = XAS_RESTART;
1090#else
1091 BUG_ON(order > 0);
1092 xas_set(xas, index);
1093#endif
1094}
1095
1096/**
1097 * xas_set_update() - Set up XArray operation state for a callback.
1098 * @xas: XArray operation state.
1099 * @update: Function to call when updating a node.
1100 *
1101 * The XArray can notify a caller after it has updated an xa_node.
1102 * This is advanced functionality and is only needed by the page cache.
1103 */
1104static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
1105{
1106 xas->xa_update = update;
1107}
1108
1109/**
1110 * xas_next_entry() - Advance iterator to next present entry.
1111 * @xas: XArray operation state.
1112 * @max: Highest index to return.
1113 *
1114 * xas_next_entry() is an inline function to optimise xarray traversal for
1115 * speed. It is equivalent to calling xas_find(), and will call xas_find()
1116 * for all the hard cases.
1117 *
1118 * Return: The next present entry after the one currently referred to by @xas.
1119 */
1120static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
1121{
1122 struct xa_node *node = xas->xa_node;
1123 void *entry;
1124
1125 if (unlikely(xas_not_node(node) || node->shift ||
1126 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
1127 return xas_find(xas, max);
1128
1129 do {
1130 if (unlikely(xas->xa_index >= max))
1131 return xas_find(xas, max);
1132 if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
1133 return xas_find(xas, max);
1134 entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
1135 if (unlikely(xa_is_internal(entry)))
1136 return xas_find(xas, max);
1137 xas->xa_offset++;
1138 xas->xa_index++;
1139 } while (!entry);
1140
1141 return entry;
1142}
1143
1144/* Private */
1145static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
1146 xa_mark_t mark)
1147{
1148 unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
1149 unsigned int offset = xas->xa_offset;
1150
1151 if (advance)
1152 offset++;
1153 if (XA_CHUNK_SIZE == BITS_PER_LONG) {
1154 if (offset < XA_CHUNK_SIZE) {
1155 unsigned long data = *addr & (~0UL << offset);
1156 if (data)
1157 return __ffs(data);
1158 }
1159 return XA_CHUNK_SIZE;
1160 }
1161
1162 return find_next_bit(addr, XA_CHUNK_SIZE, offset);
1163}
1164
1165/**
1166 * xas_next_marked() - Advance iterator to next marked entry.
1167 * @xas: XArray operation state.
1168 * @max: Highest index to return.
1169 * @mark: Mark to search for.
1170 *
1171 * xas_next_marked() is an inline function to optimise xarray traversal for
1172 * speed. It is equivalent to calling xas_find_marked(), and will call
1173 * xas_find_marked() for all the hard cases.
1174 *
1175 * Return: The next marked entry after the one currently referred to by @xas.
1176 */
1177static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
1178 xa_mark_t mark)
1179{
1180 struct xa_node *node = xas->xa_node;
1181 unsigned int offset;
1182
1183 if (unlikely(xas_not_node(node) || node->shift))
1184 return xas_find_marked(xas, max, mark);
1185 offset = xas_find_chunk(xas, true, mark);
1186 xas->xa_offset = offset;
1187 xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
1188 if (xas->xa_index > max)
1189 return NULL;
1190 if (offset == XA_CHUNK_SIZE)
1191 return xas_find_marked(xas, max, mark);
1192 return xa_entry(xas->xa, node, offset);
1193}
1194
1195/*
1196 * If iterating while holding a lock, drop the lock and reschedule
1197 * every %XA_CHECK_SCHED loops.
1198 */
1199enum {
1200 XA_CHECK_SCHED = 4096,
1201};
1202
1203/**
1204 * xas_for_each() - Iterate over a range of an XArray.
1205 * @xas: XArray operation state.
1206 * @entry: Entry retrieved from the array.
1207 * @max: Maximum index to retrieve from array.
1208 *
1209 * The loop body will be executed for each entry present in the xarray
1210 * between the current xas position and @max. @entry will be set to
1211 * the entry retrieved from the xarray. It is safe to delete entries
1212 * from the array in the loop body. You should hold either the RCU lock
1213 * or the xa_lock while iterating. If you need to drop the lock, call
1214 * xas_pause() first.
1215 */
1216#define xas_for_each(xas, entry, max) \
1217 for (entry = xas_find(xas, max); entry; \
1218 entry = xas_next_entry(xas, max))
1219
1220/**
1221 * xas_for_each_marked() - Iterate over a range of an XArray.
1222 * @xas: XArray operation state.
1223 * @entry: Entry retrieved from the array.
1224 * @max: Maximum index to retrieve from array.
1225 * @mark: Mark to search for.
1226 *
1227 * The loop body will be executed for each marked entry in the xarray
1228 * between the current xas position and @max. @entry will be set to
1229 * the entry retrieved from the xarray. It is safe to delete entries
1230 * from the array in the loop body. You should hold either the RCU lock
1231 * or the xa_lock while iterating. If you need to drop the lock, call
1232 * xas_pause() first.
1233 */
1234#define xas_for_each_marked(xas, entry, max, mark) \
1235 for (entry = xas_find_marked(xas, max, mark); entry; \
1236 entry = xas_next_marked(xas, max, mark))
1237
1238/**
1239 * xas_for_each_conflict() - Iterate over a range of an XArray.
1240 * @xas: XArray operation state.
1241 * @entry: Entry retrieved from the array.
1242 *
1243 * The loop body will be executed for each entry in the XArray that lies
1244 * within the range specified by @xas. If the loop completes successfully,
1245 * any entries that lie in this range will be replaced by @entry. The caller
1246 * may break out of the loop; if they do so, the contents of the XArray will
1247 * be unchanged. The operation may fail due to an out of memory condition.
1248 * The caller may also call xa_set_err() to exit the loop while setting an
1249 * error to record the reason.
1250 */
1251#define xas_for_each_conflict(xas, entry) \
1252 while ((entry = xas_find_conflict(xas)))
1253
1254void *__xas_next(struct xa_state *);
1255void *__xas_prev(struct xa_state *);
1256
1257/**
1258 * xas_prev() - Move iterator to previous index.
1259 * @xas: XArray operation state.
1260 *
1261 * If the @xas was in an error state, it will remain in an error state
1262 * and this function will return %NULL. If the @xas has never been walked,
1263 * it will have the effect of calling xas_load(). Otherwise one will be
1264 * subtracted from the index and the state will be walked to the correct
1265 * location in the array for the next operation.
1266 *
1267 * If the iterator was referencing index 0, this function wraps
1268 * around to %ULONG_MAX.
1269 *
1270 * Return: The entry at the new index. This may be %NULL or an internal
1271 * entry.
1272 */
1273static inline void *xas_prev(struct xa_state *xas)
1274{
1275 struct xa_node *node = xas->xa_node;
1276
1277 if (unlikely(xas_not_node(node) || node->shift ||
1278 xas->xa_offset == 0))
1279 return __xas_prev(xas);
1280
1281 xas->xa_index--;
1282 xas->xa_offset--;
1283 return xa_entry(xas->xa, node, xas->xa_offset);
1284}
1285
1286/**
1287 * xas_next() - Move state to next index.
1288 * @xas: XArray operation state.
1289 *
1290 * If the @xas was in an error state, it will remain in an error state
1291 * and this function will return %NULL. If the @xas has never been walked,
1292 * it will have the effect of calling xas_load(). Otherwise one will be
1293 * added to the index and the state will be walked to the correct
1294 * location in the array for the next operation.
1295 *
1296 * If the iterator was referencing index %ULONG_MAX, this function wraps
1297 * around to 0.
1298 *
1299 * Return: The entry at the new index. This may be %NULL or an internal
1300 * entry.
1301 */
1302static inline void *xas_next(struct xa_state *xas)
1303{
1304 struct xa_node *node = xas->xa_node;
1305
1306 if (unlikely(xas_not_node(node) || node->shift ||
1307 xas->xa_offset == XA_CHUNK_MASK))
1308 return __xas_next(xas);
1309
1310 xas->xa_index++;
1311 xas->xa_offset++;
1312 return xa_entry(xas->xa, node, xas->xa_offset);
1313}
1314
24#endif /* _LINUX_XARRAY_H */ 1315#endif /* _LINUX_XARRAY_H */