aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/ghes.h4
-rw-r--r--include/acpi/processor.h3
-rw-r--r--include/asm-generic/atomic-instrumented.h197
-rw-r--r--include/asm-generic/atomic.h33
-rw-r--r--include/asm-generic/atomic64.h15
-rw-r--r--include/asm-generic/bitops/atomic.h188
-rw-r--r--include/asm-generic/bitops/lock.h68
-rw-r--r--include/asm-generic/pgtable.h20
-rw-r--r--include/asm-generic/qspinlock_types.h2
-rw-r--r--include/asm-generic/tlb.h18
-rw-r--r--include/crypto/dh.h4
-rw-r--r--include/crypto/drbg.h3
-rw-r--r--include/crypto/if_alg.h3
-rw-r--r--include/crypto/scatterwalk.h15
-rw-r--r--include/crypto/sha.h4
-rw-r--r--include/crypto/vmac.h63
-rw-r--r--include/drm/drmP.h18
-rw-r--r--include/drm/drm_atomic.h14
-rw-r--r--include/drm/drm_atomic_helper.h1
-rw-r--r--include/drm/drm_audio_component.h118
-rw-r--r--include/drm/drm_bridge.h48
-rw-r--r--include/drm/drm_client.h139
-rw-r--r--include/drm/drm_connector.h279
-rw-r--r--include/drm/drm_crtc.h276
-rw-r--r--include/drm/drm_debugfs_crc.h3
-rw-r--r--include/drm/drm_device.h21
-rw-r--r--include/drm/drm_dp_helper.h56
-rw-r--r--include/drm/drm_drv.h29
-rw-r--r--include/drm/drm_encoder.h16
-rw-r--r--include/drm/drm_fb_cma_helper.h6
-rw-r--r--include/drm/drm_fb_helper.h38
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_fourcc.h2
-rw-r--r--include/drm/drm_mm.h34
-rw-r--r--include/drm/drm_mode_config.h36
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h17
-rw-r--r--include/drm/drm_of.h8
-rw-r--r--include/drm/drm_panel.h3
-rw-r--r--include/drm/drm_pci.h7
-rw-r--r--include/drm/drm_plane.h197
-rw-r--r--include/drm/drm_plane_helper.h6
-rw-r--r--include/drm/drm_prime.h6
-rw-r--r--include/drm/drm_print.h77
-rw-r--r--include/drm/drm_property.h4
-rw-r--r--include/drm/drm_vma_manager.h1
-rw-r--r--include/drm/drm_writeback.h136
-rw-r--r--include/drm/gpu_scheduler.h174
-rw-r--r--include/drm/i915_component.h85
-rw-r--r--include/drm/i915_drm.h4
-rw-r--r--include/drm/i915_pciids.h37
-rw-r--r--include/drm/tinydrm/tinydrm.h23
-rw-r--r--include/drm/ttm/ttm_bo_api.h25
-rw-r--r--include/drm/ttm/ttm_set_memory.h150
-rw-r--r--include/dt-bindings/clock/actions,s700-cmu.h118
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h2
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h94
-rw-r--r--include/dt-bindings/clock/axg-clkc.h4
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h1
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h9
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h46
-rw-r--r--include/dt-bindings/clock/maxim,max9485.h18
-rw-r--r--include/dt-bindings/clock/px30-cru.h389
-rw-r--r--include/dt-bindings/clock/pxa-clock.h3
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sdm845.h45
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h2
-rw-r--r--include/dt-bindings/clock/r9a06g032-sysctrl.h148
-rw-r--r--include/dt-bindings/clock/rk3399-ddr.h56
-rw-r--r--include/dt-bindings/clock/sun8i-r40-ccu.h4
-rw-r--r--include/dt-bindings/clock/sun8i-tcon-top.h11
-rw-r--r--include/dt-bindings/pinctrl/at91.h4
-rw-r--r--include/dt-bindings/pinctrl/samsung.h7
-rw-r--r--include/dt-bindings/regulator/maxim,max77802.h5
-rw-r--r--include/dt-bindings/regulator/qcom,rpmh-regulator.h36
-rw-r--r--include/dt-bindings/soc/qcom,rpmh-rsc.h14
-rw-r--r--include/linux/acpi.h28
-rw-r--r--include/linux/ascii85.h38
-rw-r--r--include/linux/atmdev.h15
-rw-r--r--include/linux/atomic.h453
-rw-r--r--include/linux/audit.h5
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/bitfield.h7
-rw-r--r--include/linux/bitops.h22
-rw-r--r--include/linux/bits.h26
-rw-r--r--include/linux/blk-cgroup.h146
-rw-r--r--include/linux/blk-mq.h18
-rw-r--r--include/linux/blk_types.h27
-rw-r--r--include/linux/blkdev.h70
-rw-r--r--include/linux/bootmem.h17
-rw-r--r--include/linux/bpf-cgroup.h81
-rw-r--r--include/linux/bpf.h99
-rw-r--r--include/linux/bpf_lirc.h5
-rw-r--r--include/linux/bpf_types.h9
-rw-r--r--include/linux/bpfilter.h6
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/build-salt.h20
-rw-r--r--include/linux/can/dev.h7
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/cgroup-defs.h3
-rw-r--r--include/linux/cgroup.h30
-rw-r--r--include/linux/clk-provider.h26
-rw-r--r--include/linux/clk.h33
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h28
-rw-r--r--include/linux/compat_time.h9
-rw-r--r--include/linux/compiler-gcc.h54
-rw-r--r--include/linux/compiler_types.h18
-rw-r--r--include/linux/console.h5
-rw-r--r--include/linux/cpu.h25
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h18
-rw-r--r--include/linux/crc32poly.h20
-rw-r--r--include/linux/cred.h15
-rw-r--r--include/linux/crypto.h5
-rw-r--r--include/linux/dax.h2
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/delayacct.h2
-rw-r--r--include/linux/device.h26
-rw-r--r--include/linux/dma-buf.h21
-rw-r--r--include/linux/dma-contiguous.h2
-rw-r--r--include/linux/dma-fence.h32
-rw-r--r--include/linux/dma-mapping.h9
-rw-r--r--include/linux/dma-noncoherent.h8
-rw-r--r--include/linux/dma/pxa-dma.h9
-rw-r--r--include/linux/efi.h15
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/eventfd.h1
-rw-r--r--include/linux/file.h8
-rw-r--r--include/linux/filter.h112
-rw-r--r--include/linux/fs.h42
-rw-r--r--include/linux/fsl/guts.h1
-rw-r--r--include/linux/fsl/ptp_qoriq.h44
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/genhd.h14
-rw-r--r--include/linux/gpio.h2
-rw-r--r--include/linux/gpio/aspeed.h15
-rw-r--r--include/linux/gpio/consumer.h14
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/hwmon.h32
-rw-r--r--include/linux/i2c.h11
-rw-r--r--include/linux/idle_inject.h29
-rw-r--r--include/linux/ieee80211.h437
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/if_team.h18
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/iio/buffer-dma.h2
-rw-r--r--include/linux/ima.h11
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/input/mt.h2
-rw-r--r--include/linux/integrity.h13
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/iomap.h47
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/irqchip/arm-gic-v3.h3
-rw-r--r--include/linux/irqdesc.h5
-rw-r--r--include/linux/jump_label.h6
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kernfs.h28
-rw-r--r--include/linux/kobject.h4
-rw-r--r--include/linux/kprobes.h53
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/leds.h36
-rw-r--r--include/linux/libata.h26
-rw-r--r--include/linux/list.h30
-rw-r--r--include/linux/lsm_hooks.h8
-rw-r--r--include/linux/marvell_phy.h2
-rw-r--r--include/linux/memblock.h76
-rw-r--r--include/linux/memcontrol.h13
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx5/device.h24
-rw-r--r--include/linux/mlx5/driver.h30
-rw-r--r--include/linux/mlx5/eswitch.h2
-rw-r--r--include/linux/mlx5/fs.h6
-rw-r--r--include/linux/mlx5/mlx5_ifc.h188
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h1
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h23
-rw-r--r--include/linux/mm_types.h241
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/mroute_base.h3
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/mtd/rawnand.h126
-rw-r--r--include/linux/mtd/spi-nor.h1
-rw-r--r--include/linux/mtd/spinand.h421
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h235
-rw-r--r--include/linux/netfilter.h37
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h (renamed from include/linux/netfilter/nf_osf.h)23
-rw-r--r--include/linux/netfilter_bridge.h11
-rw-r--r--include/linux/netfilter_ipv4.h11
-rw-r--r--include/linux/netfilter_ipv6.h5
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nmi.h10
-rw-r--r--include/linux/nvme.h72
-rw-r--r--include/linux/openvswitch.h5
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/percpu_ida.h83
-rw-r--r--include/linux/perf/arm_pmu.h11
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/phylink.h1
-rw-r--r--include/linux/pinctrl/pinconf.h3
-rw-r--r--include/linux/platform_data/bt-nokia-h4p.h38
-rw-r--r--include/linux/platform_data/gpio-davinci.h3
-rw-r--r--include/linux/platform_data/jz4740/jz4740_nand.h34
-rw-r--r--include/linux/platform_data/media/sii9234.h24
-rw-r--r--include/linux/platform_data/mmp_dma.h4
-rw-r--r--include/linux/platform_data/mtd-orion_nand.h1
-rw-r--r--include/linux/platform_data/txx9/ndfmc.h30
-rw-r--r--include/linux/pm_domain.h21
-rw-r--r--include/linux/poll.h12
-rw-r--r--include/linux/posix-timers.h4
-rw-r--r--include/linux/printk.h10
-rw-r--r--include/linux/pti.h1
-rw-r--r--include/linux/pxa2xx_ssp.h10
-rw-r--r--include/linux/qcom_scm.h4
-rw-r--r--include/linux/qed/qed_eth_if.h6
-rw-r--r--include/linux/qed/qed_if.h15
-rw-r--r--include/linux/random.h3
-rw-r--r--include/linux/rculist.h19
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/reciprocal_div.h68
-rw-r--r--include/linux/refcount.h38
-rw-r--r--include/linux/regmap.h54
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/regulator/pfuze100.h11
-rw-r--r--include/linux/rfkill.h20
-rw-r--r--include/linux/rhashtable-types.h137
-rw-r--r--include/linux/rhashtable.h164
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/rmi.h2
-rw-r--r--include/linux/rtmutex.h7
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/scatterlist.h18
-rw-r--r--include/linux/sched.h38
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/sched_clock.h5
-rw-r--r--include/linux/sctp.h7
-rw-r--r--include/linux/security.h32
-rw-r--r--include/linux/sfp.h72
-rw-r--r--include/linux/skbuff.h32
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/smpboot.h15
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h180
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h8
-rw-r--r--include/linux/spi/adi_spi3.h254
-rw-r--r--include/linux/spi/spi-mem.h18
-rw-r--r--include/linux/spi/spi_bitbang.h5
-rw-r--r--include/linux/spinlock.h58
-rw-r--r--include/linux/srcu.h17
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swait.h36
-rw-r--r--include/linux/swap.h11
-rw-r--r--include/linux/swapfile.h2
-rw-r--r--include/linux/syscalls.h26
-rw-r--r--include/linux/sysfs.h14
-rw-r--r--include/linux/t10-pi.h24
-rw-r--r--include/linux/tcp.h18
-rw-r--r--include/linux/time.h4
-rw-r--r--include/linux/time64.h1
-rw-r--r--include/linux/timekeeping.h20
-rw-r--r--include/linux/torture.h4
-rw-r--r--include/linux/tpm.h7
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/linux/udp.h4
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/linux/usb/audio-v3.h19
-rw-r--r--include/linux/vga_switcheroo.h8
-rw-r--r--include/linux/virtio_config.h7
-rw-r--r--include/linux/ww_mutex.h45
-rw-r--r--include/media/cec-pin.h4
-rw-r--r--include/media/cec.h12
-rw-r--r--include/media/dvb_frontend.h49
-rw-r--r--include/media/i2c/lm3560.h1
-rw-r--r--include/media/v4l2-common.h2
-rw-r--r--include/media/v4l2-ctrls.h4
-rw-r--r--include/media/v4l2-ioctl.h15
-rw-r--r--include/media/v4l2-mediabus.h2
-rw-r--r--include/media/v4l2-mem2mem.h56
-rw-r--r--include/media/vsp1.h2
-rw-r--r--include/net/act_api.h31
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/af_ieee802154.h1
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/af_vsock.h4
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h224
-rw-r--r--include/net/bluetooth/hci_core.h34
-rw-r--r--include/net/bluetooth/mgmt.h55
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bonding.h13
-rw-r--r--include/net/busy_poll.h16
-rw-r--r--include/net/cfg80211.h118
-rw-r--r--include/net/dcbnl.h13
-rw-r--r--include/net/devlink.h195
-rw-r--r--include/net/dsa.h3
-rw-r--r--include/net/dst.h14
-rw-r--r--include/net/flow_dissector.h21
-rw-r--r--include/net/gen_stats.h4
-rw-r--r--include/net/ieee80211_radiotap.h123
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/inet_frag.h11
-rw-r--r--include/net/inet_sock.h9
-rw-r--r--include/net/ip.h27
-rw-r--r--include/net/ip6_fib.h10
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip_tunnels.h8
-rw-r--r--include/net/ip_vs.h18
-rw-r--r--include/net/ipv6.h76
-rw-r--r--include/net/ipv6_frag.h104
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/lag.h17
-rw-r--r--include/net/llc.h5
-rw-r--r--include/net/mac80211.h64
-rw-r--r--include/net/net_namespace.h11
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h7
-rw-r--r--include/net/netfilter/nf_conntrack_core.h15
-rw-r--r--include/net/netfilter/nf_conntrack_count.h37
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h84
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h16
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h39
-rw-r--r--include/net/netfilter/nf_flow_table.h2
-rw-r--r--include/net/netfilter/nf_log.h3
-rw-r--r--include/net/netfilter/nf_tables.h5
-rw-r--r--include/net/netfilter/nf_tables_core.h13
-rw-r--r--include/net/netfilter/nf_tproxy.h12
-rw-r--r--include/net/netns/hash.h7
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/pkt_cls.h30
-rw-r--r--include/net/pkt_sched.h7
-rw-r--r--include/net/sch_generic.h70
-rw-r--r--include/net/sctp/sctp.h3
-rw-r--r--include/net/sctp/structs.h52
-rw-r--r--include/net/seg6.h2
-rw-r--r--include/net/seg6_hmac.h2
-rw-r--r--include/net/seg6_local.h4
-rw-r--r--include/net/smc.h65
-rw-r--r--include/net/sock.h81
-rw-r--r--include/net/sock_reuseport.h19
-rw-r--r--include/net/tc_act/tc_csum.h1
-rw-r--r--include/net/tc_act/tc_pedit.h1
-rw-r--r--include/net/tc_act/tc_skbedit.h37
-rw-r--r--include/net/tc_act/tc_tunnel_key.h1
-rw-r--r--include/net/tcp.h74
-rw-r--r--include/net/tls.h92
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/udp.h6
-rw-r--r--include/net/udp_tunnel.h6
-rw-r--r--include/net/xdp.h20
-rw-r--r--include/net/xdp_sock.h4
-rw-r--r--include/net/xfrm.h61
-rw-r--r--include/rdma/ib_verbs.h13
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/scsi/scsi_cmnd.h13
-rw-r--r--include/scsi/scsi_device.h14
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/soc/qcom/rpmh.h51
-rw-r--r--include/soc/qcom/tcs.h56
-rw-r--r--include/sound/ac97/codec.h8
-rw-r--r--include/sound/ac97/compat.h9
-rw-r--r--include/sound/ac97/controller.h8
-rw-r--r--include/sound/ac97/regs.h20
-rw-r--r--include/sound/ac97_codec.h25
-rw-r--r--include/sound/compress_driver.h21
-rw-r--r--include/sound/dmaengine_pcm.h14
-rw-r--r--include/sound/hda_component.h61
-rw-r--r--include/sound/hda_i915.h37
-rw-r--r--include/sound/hdaudio.h65
-rw-r--r--include/sound/hdaudio_ext.h123
-rw-r--r--include/sound/memalloc.h18
-rw-r--r--include/sound/pcm.h7
-rw-r--r--include/sound/pcm_params.h10
-rw-r--r--include/sound/pxa2xx-lib.h13
-rw-r--r--include/sound/rt5682.h40
-rw-r--r--include/sound/sb16_csp.h2
-rw-r--r--include/sound/seq_midi_event.h6
-rw-r--r--include/sound/seq_virmidi.h3
-rw-r--r--include/sound/sh_fsi.h13
-rw-r--r--include/sound/simple_card.h7
-rw-r--r--include/sound/simple_card_utils.h23
-rw-r--r--include/sound/soc-acpi-intel-match.h19
-rw-r--r--include/sound/soc-acpi.h13
-rw-r--r--include/sound/soc-dai.h15
-rw-r--r--include/sound/soc-dapm.h11
-rw-r--r--include/sound/soc-dpcm.h7
-rw-r--r--include/sound/soc-topology.h37
-rw-r--r--include/sound/soc.h31
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/target/target_core_backend.h6
-rw-r--r--include/target/target_core_base.h16
-rw-r--r--include/target/target_core_fabric.h10
-rw-r--r--include/trace/events/btrfs.h3
-rw-r--r--include/trace/events/clk.h36
-rw-r--r--include/trace/events/fib.h2
-rw-r--r--include/trace/events/filelock.h5
-rw-r--r--include/trace/events/net.h7
-rw-r--r--include/trace/events/power.h25
-rw-r--r--include/trace/events/rcu.h112
-rw-r--r--include/trace/events/rxrpc.h129
-rw-r--r--include/trace/events/sock.h30
-rw-r--r--include/uapi/asm-generic/socket.h3
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h27
-rw-r--r--include/uapi/drm/drm.h9
-rw-r--r--include/uapi/drm/drm_fourcc.h176
-rw-r--r--include/uapi/drm/drm_mode.h8
-rw-r--r--include/uapi/drm/vmwgfx_drm.h166
-rw-r--r--include/uapi/linux/aio_abi.h6
-rw-r--r--include/uapi/linux/audit.h4
-rw-r--r--include/uapi/linux/bcache.h4
-rw-r--r--include/uapi/linux/blkzoned.h2
-rw-r--r--include/uapi/linux/bpf.h130
-rw-r--r--include/uapi/linux/btf.h2
-rw-r--r--include/uapi/linux/can.h2
-rw-r--r--include/uapi/linux/cec.h2
-rw-r--r--include/uapi/linux/dcbnl.h3
-rw-r--r--include/uapi/linux/devlink.h42
-rw-r--r--include/uapi/linux/dvb/audio.h37
-rw-r--r--include/uapi/linux/dvb/video.h58
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/errqueue.h4
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/if_link.h17
-rw-r--r--include/uapi/linux/ila.h1
-rw-r--r--include/uapi/linux/ip.h1
-rw-r--r--include/uapi/linux/kfd_ioctl.h33
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/l2tp.h15
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/media.h46
-rw-r--r--include/uapi/linux/mii.h1
-rw-r--r--include/uapi/linux/mroute.h2
-rw-r--r--include/uapi/linux/nbd.h3
-rw-r--r--include/uapi/linux/net_tstamp.h18
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h124
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_osf.h (renamed from include/uapi/linux/netfilter/nf_osf.h)32
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h22
-rw-r--r--include/uapi/linux/netfilter_bridge.h11
-rw-r--r--include/uapi/linux/nl80211.h102
-rw-r--r--include/uapi/linux/openvswitch.h3
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/pkt_cls.h41
-rw-r--r--include/uapi/linux/pkt_sched.h150
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/rds.h69
-rw-r--r--include/uapi/linux/rseq.h102
-rw-r--r--include/uapi/linux/rtnetlink.h7
-rw-r--r--include/uapi/linux/sctp.h5
-rw-r--r--include/uapi/linux/smc_diag.h25
-rw-r--r--include/uapi/linux/snmp.h3
-rw-r--r--include/uapi/linux/sysctl.h3
-rw-r--r--include/uapi/linux/target_core_user.h4
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h9
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h28
-rw-r--r--include/uapi/linux/tcp.h14
-rw-r--r--include/uapi/linux/time.h7
-rw-r--r--include/uapi/linux/tipc_netlink.h14
-rw-r--r--include/uapi/linux/types_32_64.h50
-rw-r--r--include/uapi/linux/usb/audio.h49
-rw-r--r--include/uapi/linux/uvcvideo.h2
-rw-r--r--include/uapi/linux/v4l2-controls.h20
-rw-r--r--include/uapi/linux/v4l2-subdev.h4
-rw-r--r--include/uapi/linux/vhost.h18
-rw-r--r--include/uapi/linux/videodev2.h8
-rw-r--r--include/uapi/linux/xfrm.h5
-rw-r--r--include/uapi/xen/gntdev.h106
-rw-r--r--include/video/mipi_display.h3
-rw-r--r--include/xen/grant_table.h21
-rw-r--r--include/xen/mem-reservation.h59
-rw-r--r--include/xen/xen.h6
493 files changed, 11827 insertions, 3541 deletions
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 48d84f0d9547..88072c92ace2 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
12 12
13/* Current ACPICA subsystem version in YYYYMMDD format */ 13/* Current ACPICA subsystem version in YYYYMMDD format */
14 14
15#define ACPI_CA_VERSION 0x20180531 15#define ACPI_CA_VERSION 0x20180629
16 16
17#include <acpi/acconfig.h> 17#include <acpi/acconfig.h>
18#include <acpi/actypes.h> 18#include <acpi/actypes.h>
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 1624e2be485c..82cb4eb225a4 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -118,6 +118,10 @@ static inline void *acpi_hest_get_next(struct acpi_hest_generic_data *gdata)
118 (void *)section - (void *)(estatus + 1) < estatus->data_length; \ 118 (void *)section - (void *)(estatus + 1) < estatus->data_length; \
119 section = acpi_hest_get_next(section)) 119 section = acpi_hest_get_next(section))
120 120
121#ifdef CONFIG_ACPI_APEI_SEA
121int ghes_notify_sea(void); 122int ghes_notify_sea(void);
123#else
124static inline int ghes_notify_sea(void) { return -ENOENT; }
125#endif
122 126
123#endif /* GHES_H */ 127#endif /* GHES_H */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 40a916efd7c0..1194a4c78d55 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -309,7 +309,7 @@ static inline void acpi_processor_ppc_exit(void)
309{ 309{
310 return; 310 return;
311} 311}
312static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr, 312static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr,
313 int event_flag) 313 int event_flag)
314{ 314{
315 static unsigned int printout = 1; 315 static unsigned int printout = 1;
@@ -320,7 +320,6 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
320 "Consider compiling CPUfreq support into your kernel.\n"); 320 "Consider compiling CPUfreq support into your kernel.\n");
321 printout = 0; 321 printout = 0;
322 } 322 }
323 return 0;
324} 323}
325static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) 324static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
326{ 325{
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index ec07f23678ea..0d4b1d3dbc1e 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -84,42 +84,59 @@ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 ne
84} 84}
85#endif 85#endif
86 86
87static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) 87#ifdef arch_atomic_fetch_add_unless
88#define atomic_fetch_add_unless atomic_fetch_add_unless
89static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
88{ 90{
89 kasan_check_write(v, sizeof(*v)); 91 kasan_check_write(v, sizeof(*v));
90 return __arch_atomic_add_unless(v, a, u); 92 return arch_atomic_fetch_add_unless(v, a, u);
91} 93}
94#endif
92 95
93 96#ifdef arch_atomic64_fetch_add_unless
94static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) 97#define atomic64_fetch_add_unless atomic64_fetch_add_unless
98static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
95{ 99{
96 kasan_check_write(v, sizeof(*v)); 100 kasan_check_write(v, sizeof(*v));
97 return arch_atomic64_add_unless(v, a, u); 101 return arch_atomic64_fetch_add_unless(v, a, u);
98} 102}
103#endif
99 104
105#ifdef arch_atomic_inc
106#define atomic_inc atomic_inc
100static __always_inline void atomic_inc(atomic_t *v) 107static __always_inline void atomic_inc(atomic_t *v)
101{ 108{
102 kasan_check_write(v, sizeof(*v)); 109 kasan_check_write(v, sizeof(*v));
103 arch_atomic_inc(v); 110 arch_atomic_inc(v);
104} 111}
112#endif
105 113
114#ifdef arch_atomic64_inc
115#define atomic64_inc atomic64_inc
106static __always_inline void atomic64_inc(atomic64_t *v) 116static __always_inline void atomic64_inc(atomic64_t *v)
107{ 117{
108 kasan_check_write(v, sizeof(*v)); 118 kasan_check_write(v, sizeof(*v));
109 arch_atomic64_inc(v); 119 arch_atomic64_inc(v);
110} 120}
121#endif
111 122
123#ifdef arch_atomic_dec
124#define atomic_dec atomic_dec
112static __always_inline void atomic_dec(atomic_t *v) 125static __always_inline void atomic_dec(atomic_t *v)
113{ 126{
114 kasan_check_write(v, sizeof(*v)); 127 kasan_check_write(v, sizeof(*v));
115 arch_atomic_dec(v); 128 arch_atomic_dec(v);
116} 129}
130#endif
117 131
132#ifdef atch_atomic64_dec
133#define atomic64_dec
118static __always_inline void atomic64_dec(atomic64_t *v) 134static __always_inline void atomic64_dec(atomic64_t *v)
119{ 135{
120 kasan_check_write(v, sizeof(*v)); 136 kasan_check_write(v, sizeof(*v));
121 arch_atomic64_dec(v); 137 arch_atomic64_dec(v);
122} 138}
139#endif
123 140
124static __always_inline void atomic_add(int i, atomic_t *v) 141static __always_inline void atomic_add(int i, atomic_t *v)
125{ 142{
@@ -181,65 +198,95 @@ static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
181 arch_atomic64_xor(i, v); 198 arch_atomic64_xor(i, v);
182} 199}
183 200
201#ifdef arch_atomic_inc_return
202#define atomic_inc_return atomic_inc_return
184static __always_inline int atomic_inc_return(atomic_t *v) 203static __always_inline int atomic_inc_return(atomic_t *v)
185{ 204{
186 kasan_check_write(v, sizeof(*v)); 205 kasan_check_write(v, sizeof(*v));
187 return arch_atomic_inc_return(v); 206 return arch_atomic_inc_return(v);
188} 207}
208#endif
189 209
210#ifdef arch_atomic64_in_return
211#define atomic64_inc_return atomic64_inc_return
190static __always_inline s64 atomic64_inc_return(atomic64_t *v) 212static __always_inline s64 atomic64_inc_return(atomic64_t *v)
191{ 213{
192 kasan_check_write(v, sizeof(*v)); 214 kasan_check_write(v, sizeof(*v));
193 return arch_atomic64_inc_return(v); 215 return arch_atomic64_inc_return(v);
194} 216}
217#endif
195 218
219#ifdef arch_atomic_dec_return
220#define atomic_dec_return atomic_dec_return
196static __always_inline int atomic_dec_return(atomic_t *v) 221static __always_inline int atomic_dec_return(atomic_t *v)
197{ 222{
198 kasan_check_write(v, sizeof(*v)); 223 kasan_check_write(v, sizeof(*v));
199 return arch_atomic_dec_return(v); 224 return arch_atomic_dec_return(v);
200} 225}
226#endif
201 227
228#ifdef arch_atomic64_dec_return
229#define atomic64_dec_return atomic64_dec_return
202static __always_inline s64 atomic64_dec_return(atomic64_t *v) 230static __always_inline s64 atomic64_dec_return(atomic64_t *v)
203{ 231{
204 kasan_check_write(v, sizeof(*v)); 232 kasan_check_write(v, sizeof(*v));
205 return arch_atomic64_dec_return(v); 233 return arch_atomic64_dec_return(v);
206} 234}
235#endif
207 236
208static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v) 237#ifdef arch_atomic64_inc_not_zero
238#define atomic64_inc_not_zero atomic64_inc_not_zero
239static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
209{ 240{
210 kasan_check_write(v, sizeof(*v)); 241 kasan_check_write(v, sizeof(*v));
211 return arch_atomic64_inc_not_zero(v); 242 return arch_atomic64_inc_not_zero(v);
212} 243}
244#endif
213 245
246#ifdef arch_atomic64_dec_if_positive
247#define atomic64_dec_if_positive atomic64_dec_if_positive
214static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) 248static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
215{ 249{
216 kasan_check_write(v, sizeof(*v)); 250 kasan_check_write(v, sizeof(*v));
217 return arch_atomic64_dec_if_positive(v); 251 return arch_atomic64_dec_if_positive(v);
218} 252}
253#endif
219 254
255#ifdef arch_atomic_dec_and_test
256#define atomic_dec_and_test atomic_dec_and_test
220static __always_inline bool atomic_dec_and_test(atomic_t *v) 257static __always_inline bool atomic_dec_and_test(atomic_t *v)
221{ 258{
222 kasan_check_write(v, sizeof(*v)); 259 kasan_check_write(v, sizeof(*v));
223 return arch_atomic_dec_and_test(v); 260 return arch_atomic_dec_and_test(v);
224} 261}
262#endif
225 263
264#ifdef arch_atomic64_dec_and_test
265#define atomic64_dec_and_test atomic64_dec_and_test
226static __always_inline bool atomic64_dec_and_test(atomic64_t *v) 266static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
227{ 267{
228 kasan_check_write(v, sizeof(*v)); 268 kasan_check_write(v, sizeof(*v));
229 return arch_atomic64_dec_and_test(v); 269 return arch_atomic64_dec_and_test(v);
230} 270}
271#endif
231 272
273#ifdef arch_atomic_inc_and_test
274#define atomic_inc_and_test atomic_inc_and_test
232static __always_inline bool atomic_inc_and_test(atomic_t *v) 275static __always_inline bool atomic_inc_and_test(atomic_t *v)
233{ 276{
234 kasan_check_write(v, sizeof(*v)); 277 kasan_check_write(v, sizeof(*v));
235 return arch_atomic_inc_and_test(v); 278 return arch_atomic_inc_and_test(v);
236} 279}
280#endif
237 281
282#ifdef arch_atomic64_inc_and_test
283#define atomic64_inc_and_test atomic64_inc_and_test
238static __always_inline bool atomic64_inc_and_test(atomic64_t *v) 284static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
239{ 285{
240 kasan_check_write(v, sizeof(*v)); 286 kasan_check_write(v, sizeof(*v));
241 return arch_atomic64_inc_and_test(v); 287 return arch_atomic64_inc_and_test(v);
242} 288}
289#endif
243 290
244static __always_inline int atomic_add_return(int i, atomic_t *v) 291static __always_inline int atomic_add_return(int i, atomic_t *v)
245{ 292{
@@ -325,152 +372,96 @@ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
325 return arch_atomic64_fetch_xor(i, v); 372 return arch_atomic64_fetch_xor(i, v);
326} 373}
327 374
375#ifdef arch_atomic_sub_and_test
376#define atomic_sub_and_test atomic_sub_and_test
328static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) 377static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
329{ 378{
330 kasan_check_write(v, sizeof(*v)); 379 kasan_check_write(v, sizeof(*v));
331 return arch_atomic_sub_and_test(i, v); 380 return arch_atomic_sub_and_test(i, v);
332} 381}
382#endif
333 383
384#ifdef arch_atomic64_sub_and_test
385#define atomic64_sub_and_test atomic64_sub_and_test
334static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) 386static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
335{ 387{
336 kasan_check_write(v, sizeof(*v)); 388 kasan_check_write(v, sizeof(*v));
337 return arch_atomic64_sub_and_test(i, v); 389 return arch_atomic64_sub_and_test(i, v);
338} 390}
391#endif
339 392
393#ifdef arch_atomic_add_negative
394#define atomic_add_negative atomic_add_negative
340static __always_inline bool atomic_add_negative(int i, atomic_t *v) 395static __always_inline bool atomic_add_negative(int i, atomic_t *v)
341{ 396{
342 kasan_check_write(v, sizeof(*v)); 397 kasan_check_write(v, sizeof(*v));
343 return arch_atomic_add_negative(i, v); 398 return arch_atomic_add_negative(i, v);
344} 399}
400#endif
345 401
402#ifdef arch_atomic64_add_negative
403#define atomic64_add_negative atomic64_add_negative
346static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) 404static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
347{ 405{
348 kasan_check_write(v, sizeof(*v)); 406 kasan_check_write(v, sizeof(*v));
349 return arch_atomic64_add_negative(i, v); 407 return arch_atomic64_add_negative(i, v);
350} 408}
409#endif
351 410
352static __always_inline unsigned long 411#define xchg(ptr, new) \
353cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size) 412({ \
354{ 413 typeof(ptr) __ai_ptr = (ptr); \
355 kasan_check_write(ptr, size); 414 kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
356 switch (size) { 415 arch_xchg(__ai_ptr, (new)); \
357 case 1: 416})
358 return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
359 case 2:
360 return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
361 case 4:
362 return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
363 case 8:
364 BUILD_BUG_ON(sizeof(unsigned long) != 8);
365 return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
366 }
367 BUILD_BUG();
368 return 0;
369}
370 417
371#define cmpxchg(ptr, old, new) \ 418#define cmpxchg(ptr, old, new) \
372({ \ 419({ \
373 ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \ 420 typeof(ptr) __ai_ptr = (ptr); \
374 (unsigned long)(new), sizeof(*(ptr)))); \ 421 kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
422 arch_cmpxchg(__ai_ptr, (old), (new)); \
375}) 423})
376 424
377static __always_inline unsigned long
378sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
379 int size)
380{
381 kasan_check_write(ptr, size);
382 switch (size) {
383 case 1:
384 return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
385 case 2:
386 return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
387 case 4:
388 return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
389 case 8:
390 BUILD_BUG_ON(sizeof(unsigned long) != 8);
391 return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
392 }
393 BUILD_BUG();
394 return 0;
395}
396
397#define sync_cmpxchg(ptr, old, new) \ 425#define sync_cmpxchg(ptr, old, new) \
398({ \ 426({ \
399 ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \ 427 typeof(ptr) __ai_ptr = (ptr); \
400 (unsigned long)(old), (unsigned long)(new), \ 428 kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
401 sizeof(*(ptr)))); \ 429 arch_sync_cmpxchg(__ai_ptr, (old), (new)); \
402}) 430})
403 431
404static __always_inline unsigned long
405cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
406 int size)
407{
408 kasan_check_write(ptr, size);
409 switch (size) {
410 case 1:
411 return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
412 case 2:
413 return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
414 case 4:
415 return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
416 case 8:
417 BUILD_BUG_ON(sizeof(unsigned long) != 8);
418 return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
419 }
420 BUILD_BUG();
421 return 0;
422}
423
424#define cmpxchg_local(ptr, old, new) \ 432#define cmpxchg_local(ptr, old, new) \
425({ \ 433({ \
426 ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \ 434 typeof(ptr) __ai_ptr = (ptr); \
427 (unsigned long)(old), (unsigned long)(new), \ 435 kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
428 sizeof(*(ptr)))); \ 436 arch_cmpxchg_local(__ai_ptr, (old), (new)); \
429}) 437})
430 438
431static __always_inline u64
432cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
433{
434 kasan_check_write(ptr, sizeof(*ptr));
435 return arch_cmpxchg64(ptr, old, new);
436}
437
438#define cmpxchg64(ptr, old, new) \ 439#define cmpxchg64(ptr, old, new) \
439({ \ 440({ \
440 ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \ 441 typeof(ptr) __ai_ptr = (ptr); \
441 (u64)(new))); \ 442 kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
443 arch_cmpxchg64(__ai_ptr, (old), (new)); \
442}) 444})
443 445
444static __always_inline u64
445cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
446{
447 kasan_check_write(ptr, sizeof(*ptr));
448 return arch_cmpxchg64_local(ptr, old, new);
449}
450
451#define cmpxchg64_local(ptr, old, new) \ 446#define cmpxchg64_local(ptr, old, new) \
452({ \ 447({ \
453 ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \ 448 typeof(ptr) __ai_ptr = (ptr); \
454 (u64)(new))); \ 449 kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
450 arch_cmpxchg64_local(__ai_ptr, (old), (new)); \
455}) 451})
456 452
457/*
458 * Originally we had the following code here:
459 * __typeof__(p1) ____p1 = (p1);
460 * kasan_check_write(____p1, 2 * sizeof(*____p1));
461 * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
462 * But it leads to compilation failures (see gcc issue 72873).
463 * So for now it's left non-instrumented.
464 * There are few callers of cmpxchg_double(), so it's not critical.
465 */
466#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ 453#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
467({ \ 454({ \
468 arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \ 455 typeof(p1) __ai_p1 = (p1); \
456 kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
457 arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
469}) 458})
470 459
471#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ 460#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
472({ \ 461({ \
473 arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \ 462 typeof(p1) __ai_p1 = (p1); \
463 kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
464 arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
474}) 465})
475 466
476#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ 467#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index abe6dd9ca2a8..13324aa828eb 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -186,11 +186,6 @@ ATOMIC_OP(xor, ^)
186 186
187#include <linux/irqflags.h> 187#include <linux/irqflags.h>
188 188
189static inline int atomic_add_negative(int i, atomic_t *v)
190{
191 return atomic_add_return(i, v) < 0;
192}
193
194static inline void atomic_add(int i, atomic_t *v) 189static inline void atomic_add(int i, atomic_t *v)
195{ 190{
196 atomic_add_return(i, v); 191 atomic_add_return(i, v);
@@ -201,35 +196,7 @@ static inline void atomic_sub(int i, atomic_t *v)
201 atomic_sub_return(i, v); 196 atomic_sub_return(i, v);
202} 197}
203 198
204static inline void atomic_inc(atomic_t *v)
205{
206 atomic_add_return(1, v);
207}
208
209static inline void atomic_dec(atomic_t *v)
210{
211 atomic_sub_return(1, v);
212}
213
214#define atomic_dec_return(v) atomic_sub_return(1, (v))
215#define atomic_inc_return(v) atomic_add_return(1, (v))
216
217#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
218#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
219#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
220
221#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) 199#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
222#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) 200#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
223 201
224#ifndef __atomic_add_unless
225static inline int __atomic_add_unless(atomic_t *v, int a, int u)
226{
227 int c, old;
228 c = atomic_read(v);
229 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
230 c = old;
231 return c;
232}
233#endif
234
235#endif /* __ASM_GENERIC_ATOMIC_H */ 202#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 8d28eb010d0d..97b28b7f1f29 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -11,6 +11,7 @@
11 */ 11 */
12#ifndef _ASM_GENERIC_ATOMIC64_H 12#ifndef _ASM_GENERIC_ATOMIC64_H
13#define _ASM_GENERIC_ATOMIC64_H 13#define _ASM_GENERIC_ATOMIC64_H
14#include <linux/types.h>
14 15
15typedef struct { 16typedef struct {
16 long long counter; 17 long long counter;
@@ -50,18 +51,10 @@ ATOMIC64_OPS(xor)
50#undef ATOMIC64_OP 51#undef ATOMIC64_OP
51 52
52extern long long atomic64_dec_if_positive(atomic64_t *v); 53extern long long atomic64_dec_if_positive(atomic64_t *v);
54#define atomic64_dec_if_positive atomic64_dec_if_positive
53extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); 55extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
54extern long long atomic64_xchg(atomic64_t *v, long long new); 56extern long long atomic64_xchg(atomic64_t *v, long long new);
55extern int atomic64_add_unless(atomic64_t *v, long long a, long long u); 57extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
56 58#define atomic64_fetch_add_unless atomic64_fetch_add_unless
57#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
58#define atomic64_inc(v) atomic64_add(1LL, (v))
59#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
60#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
61#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
62#define atomic64_dec(v) atomic64_sub(1LL, (v))
63#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
64#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
66 59
67#endif /* _ASM_GENERIC_ATOMIC64_H */ 60#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 04deffaf5f7d..dd90c9792909 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,189 +2,67 @@
2#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ 2#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
3#define _ASM_GENERIC_BITOPS_ATOMIC_H_ 3#define _ASM_GENERIC_BITOPS_ATOMIC_H_
4 4
5#include <asm/types.h> 5#include <linux/atomic.h>
6#include <linux/irqflags.h> 6#include <linux/compiler.h>
7 7#include <asm/barrier.h>
8#ifdef CONFIG_SMP
9#include <asm/spinlock.h>
10#include <asm/cache.h> /* we use L1_CACHE_BYTES */
11
12/* Use an array of spinlocks for our atomic_ts.
13 * Hash function to index into a different SPINLOCK.
14 * Since "a" is usually an address, use one spinlock per cacheline.
15 */
16# define ATOMIC_HASH_SIZE 4
17# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
18
19extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
20
21/* Can't use raw_spin_lock_irq because of #include problems, so
22 * this is the substitute */
23#define _atomic_spin_lock_irqsave(l,f) do { \
24 arch_spinlock_t *s = ATOMIC_HASH(l); \
25 local_irq_save(f); \
26 arch_spin_lock(s); \
27} while(0)
28
29#define _atomic_spin_unlock_irqrestore(l,f) do { \
30 arch_spinlock_t *s = ATOMIC_HASH(l); \
31 arch_spin_unlock(s); \
32 local_irq_restore(f); \
33} while(0)
34
35
36#else
37# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
38# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
39#endif
40 8
41/* 9/*
42 * NMI events can occur at any time, including when interrupts have been 10 * Implementation of atomic bitops using atomic-fetch ops.
43 * disabled by *_irqsave(). So you can get NMI events occurring while a 11 * See Documentation/atomic_bitops.txt for details.
44 * *_bit function is holding a spin lock. If the NMI handler also wants
45 * to do bit manipulation (and they do) then you can get a deadlock
46 * between the original caller of *_bit() and the NMI handler.
47 *
48 * by Keith Owens
49 */ 12 */
50 13
51/** 14static inline void set_bit(unsigned int nr, volatile unsigned long *p)
52 * set_bit - Atomically set a bit in memory
53 * @nr: the bit to set
54 * @addr: the address to start counting from
55 *
56 * This function is atomic and may not be reordered. See __set_bit()
57 * if you do not require the atomic guarantees.
58 *
59 * Note: there are no guarantees that this function will not be reordered
60 * on non x86 architectures, so if you are writing portable code,
61 * make sure not to rely on its reordering guarantees.
62 *
63 * Note that @nr may be almost arbitrarily large; this function is not
64 * restricted to acting on a single-word quantity.
65 */
66static inline void set_bit(int nr, volatile unsigned long *addr)
67{ 15{
68 unsigned long mask = BIT_MASK(nr); 16 p += BIT_WORD(nr);
69 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 17 atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
70 unsigned long flags;
71
72 _atomic_spin_lock_irqsave(p, flags);
73 *p |= mask;
74 _atomic_spin_unlock_irqrestore(p, flags);
75} 18}
76 19
77/** 20static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
78 * clear_bit - Clears a bit in memory
79 * @nr: Bit to clear
80 * @addr: Address to start counting from
81 *
82 * clear_bit() is atomic and may not be reordered. However, it does
83 * not contain a memory barrier, so if it is used for locking purposes,
84 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
85 * in order to ensure changes are visible on other processors.
86 */
87static inline void clear_bit(int nr, volatile unsigned long *addr)
88{ 21{
89 unsigned long mask = BIT_MASK(nr); 22 p += BIT_WORD(nr);
90 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 23 atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
91 unsigned long flags;
92
93 _atomic_spin_lock_irqsave(p, flags);
94 *p &= ~mask;
95 _atomic_spin_unlock_irqrestore(p, flags);
96} 24}
97 25
98/** 26static inline void change_bit(unsigned int nr, volatile unsigned long *p)
99 * change_bit - Toggle a bit in memory
100 * @nr: Bit to change
101 * @addr: Address to start counting from
102 *
103 * change_bit() is atomic and may not be reordered. It may be
104 * reordered on other architectures than x86.
105 * Note that @nr may be almost arbitrarily large; this function is not
106 * restricted to acting on a single-word quantity.
107 */
108static inline void change_bit(int nr, volatile unsigned long *addr)
109{ 27{
110 unsigned long mask = BIT_MASK(nr); 28 p += BIT_WORD(nr);
111 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 29 atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
112 unsigned long flags;
113
114 _atomic_spin_lock_irqsave(p, flags);
115 *p ^= mask;
116 _atomic_spin_unlock_irqrestore(p, flags);
117} 30}
118 31
119/** 32static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
120 * test_and_set_bit - Set a bit and return its old value
121 * @nr: Bit to set
122 * @addr: Address to count from
123 *
124 * This operation is atomic and cannot be reordered.
125 * It may be reordered on other architectures than x86.
126 * It also implies a memory barrier.
127 */
128static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
129{ 33{
34 long old;
130 unsigned long mask = BIT_MASK(nr); 35 unsigned long mask = BIT_MASK(nr);
131 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
132 unsigned long old;
133 unsigned long flags;
134 36
135 _atomic_spin_lock_irqsave(p, flags); 37 p += BIT_WORD(nr);
136 old = *p; 38 if (READ_ONCE(*p) & mask)
137 *p = old | mask; 39 return 1;
138 _atomic_spin_unlock_irqrestore(p, flags);
139 40
140 return (old & mask) != 0; 41 old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
42 return !!(old & mask);
141} 43}
142 44
143/** 45static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
144 * test_and_clear_bit - Clear a bit and return its old value
145 * @nr: Bit to clear
146 * @addr: Address to count from
147 *
148 * This operation is atomic and cannot be reordered.
149 * It can be reorderdered on other architectures other than x86.
150 * It also implies a memory barrier.
151 */
152static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
153{ 46{
47 long old;
154 unsigned long mask = BIT_MASK(nr); 48 unsigned long mask = BIT_MASK(nr);
155 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
156 unsigned long old;
157 unsigned long flags;
158 49
159 _atomic_spin_lock_irqsave(p, flags); 50 p += BIT_WORD(nr);
160 old = *p; 51 if (!(READ_ONCE(*p) & mask))
161 *p = old & ~mask; 52 return 0;
162 _atomic_spin_unlock_irqrestore(p, flags);
163 53
164 return (old & mask) != 0; 54 old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
55 return !!(old & mask);
165} 56}
166 57
167/** 58static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
168 * test_and_change_bit - Change a bit and return its old value
169 * @nr: Bit to change
170 * @addr: Address to count from
171 *
172 * This operation is atomic and cannot be reordered.
173 * It also implies a memory barrier.
174 */
175static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
176{ 59{
60 long old;
177 unsigned long mask = BIT_MASK(nr); 61 unsigned long mask = BIT_MASK(nr);
178 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
179 unsigned long old;
180 unsigned long flags;
181
182 _atomic_spin_lock_irqsave(p, flags);
183 old = *p;
184 *p = old ^ mask;
185 _atomic_spin_unlock_irqrestore(p, flags);
186 62
187 return (old & mask) != 0; 63 p += BIT_WORD(nr);
64 old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
65 return !!(old & mask);
188} 66}
189 67
190#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ 68#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 67ab280ad134..3ae021368f48 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -2,6 +2,10 @@
2#ifndef _ASM_GENERIC_BITOPS_LOCK_H_ 2#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
3#define _ASM_GENERIC_BITOPS_LOCK_H_ 3#define _ASM_GENERIC_BITOPS_LOCK_H_
4 4
5#include <linux/atomic.h>
6#include <linux/compiler.h>
7#include <asm/barrier.h>
8
5/** 9/**
6 * test_and_set_bit_lock - Set a bit and return its old value, for lock 10 * test_and_set_bit_lock - Set a bit and return its old value, for lock
7 * @nr: Bit to set 11 * @nr: Bit to set
@@ -11,7 +15,20 @@
11 * the returned value is 0. 15 * the returned value is 0.
12 * It can be used to implement bit locks. 16 * It can be used to implement bit locks.
13 */ 17 */
14#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr) 18static inline int test_and_set_bit_lock(unsigned int nr,
19 volatile unsigned long *p)
20{
21 long old;
22 unsigned long mask = BIT_MASK(nr);
23
24 p += BIT_WORD(nr);
25 if (READ_ONCE(*p) & mask)
26 return 1;
27
28 old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
29 return !!(old & mask);
30}
31
15 32
16/** 33/**
17 * clear_bit_unlock - Clear a bit in memory, for unlock 34 * clear_bit_unlock - Clear a bit in memory, for unlock
@@ -20,11 +37,11 @@
20 * 37 *
21 * This operation is atomic and provides release barrier semantics. 38 * This operation is atomic and provides release barrier semantics.
22 */ 39 */
23#define clear_bit_unlock(nr, addr) \ 40static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
24do { \ 41{
25 smp_mb__before_atomic(); \ 42 p += BIT_WORD(nr);
26 clear_bit(nr, addr); \ 43 atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
27} while (0) 44}
28 45
29/** 46/**
30 * __clear_bit_unlock - Clear a bit in memory, for unlock 47 * __clear_bit_unlock - Clear a bit in memory, for unlock
@@ -37,11 +54,38 @@ do { \
37 * 54 *
38 * See for example x86's implementation. 55 * See for example x86's implementation.
39 */ 56 */
40#define __clear_bit_unlock(nr, addr) \ 57static inline void __clear_bit_unlock(unsigned int nr,
41do { \ 58 volatile unsigned long *p)
42 smp_mb__before_atomic(); \ 59{
43 clear_bit(nr, addr); \ 60 unsigned long old;
44} while (0)
45 61
46#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ 62 p += BIT_WORD(nr);
63 old = READ_ONCE(*p);
64 old &= ~BIT_MASK(nr);
65 atomic_long_set_release((atomic_long_t *)p, old);
66}
67
68/**
69 * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
70 * byte is negative, for unlock.
71 * @nr: the bit to clear
72 * @addr: the address to start counting from
73 *
74 * This is a bit of a one-trick-pony for the filemap code, which clears
75 * PG_locked and tests PG_waiters,
76 */
77#ifndef clear_bit_unlock_is_negative_byte
78static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
79 volatile unsigned long *p)
80{
81 long old;
82 unsigned long mask = BIT_MASK(nr);
83
84 p += BIT_WORD(nr);
85 old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
86 return !!(old & BIT(7));
87}
88#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
89#endif
47 90
91#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f59639afaa39..a75cb371cd19 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1019int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); 1019int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1020int pud_clear_huge(pud_t *pud); 1020int pud_clear_huge(pud_t *pud);
1021int pmd_clear_huge(pmd_t *pmd); 1021int pmd_clear_huge(pmd_t *pmd);
1022int pud_free_pmd_page(pud_t *pud); 1022int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1023int pmd_free_pte_page(pmd_t *pmd); 1023int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1024#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 1024#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
1025static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) 1025static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1026{ 1026{
@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
1046{ 1046{
1047 return 0; 1047 return 0;
1048} 1048}
1049static inline int pud_free_pmd_page(pud_t *pud) 1049static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1050{ 1050{
1051 return 0; 1051 return 0;
1052} 1052}
1053static inline int pmd_free_pte_page(pmd_t *pmd) 1053static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1054{ 1054{
1055 return 0; 1055 return 0;
1056} 1056}
@@ -1083,6 +1083,18 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1083static inline void init_espfix_bsp(void) { } 1083static inline void init_espfix_bsp(void) { }
1084#endif 1084#endif
1085 1085
1086#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
1087static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1088{
1089 return true;
1090}
1091
1092static inline bool arch_has_pfn_modify_check(void)
1093{
1094 return false;
1095}
1096#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
1097
1086#endif /* !__ASSEMBLY__ */ 1098#endif /* !__ASSEMBLY__ */
1087 1099
1088#ifndef io_remap_pfn_range 1100#ifndef io_remap_pfn_range
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 0763f065b975..d10f1e7d6ba8 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -63,7 +63,7 @@ typedef struct qspinlock {
63/* 63/*
64 * Initializier 64 * Initializier
65 */ 65 */
66#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) } 66#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
67 67
68/* 68/*
69 * Bitfields in the atomic value: 69 * Bitfields in the atomic value:
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index faddde44de8c..e811ef7b8350 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -265,34 +265,52 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
265 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE 265 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
266 */ 266 */
267 267
268#ifndef pte_free_tlb
268#define pte_free_tlb(tlb, ptep, address) \ 269#define pte_free_tlb(tlb, ptep, address) \
269 do { \ 270 do { \
270 __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 271 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
271 __pte_free_tlb(tlb, ptep, address); \ 272 __pte_free_tlb(tlb, ptep, address); \
272 } while (0) 273 } while (0)
274#endif
273 275
276#ifndef pmd_free_tlb
274#define pmd_free_tlb(tlb, pmdp, address) \ 277#define pmd_free_tlb(tlb, pmdp, address) \
275 do { \ 278 do { \
276 __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 279 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
277 __pmd_free_tlb(tlb, pmdp, address); \ 280 __pmd_free_tlb(tlb, pmdp, address); \
278 } while (0) 281 } while (0)
282#endif
279 283
280#ifndef __ARCH_HAS_4LEVEL_HACK 284#ifndef __ARCH_HAS_4LEVEL_HACK
285#ifndef pud_free_tlb
281#define pud_free_tlb(tlb, pudp, address) \ 286#define pud_free_tlb(tlb, pudp, address) \
282 do { \ 287 do { \
283 __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 288 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
284 __pud_free_tlb(tlb, pudp, address); \ 289 __pud_free_tlb(tlb, pudp, address); \
285 } while (0) 290 } while (0)
286#endif 291#endif
292#endif
287 293
288#ifndef __ARCH_HAS_5LEVEL_HACK 294#ifndef __ARCH_HAS_5LEVEL_HACK
295#ifndef p4d_free_tlb
289#define p4d_free_tlb(tlb, pudp, address) \ 296#define p4d_free_tlb(tlb, pudp, address) \
290 do { \ 297 do { \
291 __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 298 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
292 __p4d_free_tlb(tlb, pudp, address); \ 299 __p4d_free_tlb(tlb, pudp, address); \
293 } while (0) 300 } while (0)
294#endif 301#endif
302#endif
295 303
296#define tlb_migrate_finish(mm) do {} while (0) 304#define tlb_migrate_finish(mm) do {} while (0)
297 305
306/*
307 * Used to flush the TLB when page tables are removed, when lazy
308 * TLB mode may cause a CPU to retain intermediate translations
309 * pointing to about-to-be-freed page table memory.
310 */
311#ifndef HAVE_TLB_FLUSH_REMOVE_TABLES
312#define tlb_flush_remove_tables(mm) do {} while (0)
313#define tlb_flush_remove_tables_local(mm) do {} while (0)
314#endif
315
298#endif /* _ASM_GENERIC__TLB_H */ 316#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index 71e1bb24d79f..7e0dad94cb2b 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -29,17 +29,21 @@
29 * 29 *
30 * @key: Private DH key 30 * @key: Private DH key
31 * @p: Diffie-Hellman parameter P 31 * @p: Diffie-Hellman parameter P
32 * @q: Diffie-Hellman parameter Q
32 * @g: Diffie-Hellman generator G 33 * @g: Diffie-Hellman generator G
33 * @key_size: Size of the private DH key 34 * @key_size: Size of the private DH key
34 * @p_size: Size of DH parameter P 35 * @p_size: Size of DH parameter P
36 * @q_size: Size of DH parameter Q
35 * @g_size: Size of DH generator G 37 * @g_size: Size of DH generator G
36 */ 38 */
37struct dh { 39struct dh {
38 void *key; 40 void *key;
39 void *p; 41 void *p;
42 void *q;
40 void *g; 43 void *g;
41 unsigned int key_size; 44 unsigned int key_size;
42 unsigned int p_size; 45 unsigned int p_size;
46 unsigned int q_size;
43 unsigned int g_size; 47 unsigned int g_size;
44}; 48};
45 49
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 8f941102af36..3fb581bf3b87 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -122,11 +122,10 @@ struct drbg_state {
122 122
123 struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ 123 struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */
124 struct skcipher_request *ctr_req; /* CTR mode request handle */ 124 struct skcipher_request *ctr_req; /* CTR mode request handle */
125 __u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */
126 __u8 *ctr_null_value; /* CTR mode aligned zero buf */
127 __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ 125 __u8 *outscratchpadbuf; /* CTR mode output scratchpad */
128 __u8 *outscratchpad; /* CTR mode aligned outbuf */ 126 __u8 *outscratchpad; /* CTR mode aligned outbuf */
129 struct crypto_wait ctr_wait; /* CTR mode async wait obj */ 127 struct crypto_wait ctr_wait; /* CTR mode async wait obj */
128 struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
130 129
131 bool seeded; /* DRBG fully seeded? */ 130 bool seeded; /* DRBG fully seeded? */
132 bool pr; /* Prediction resistance enabled? */ 131 bool pr; /* Prediction resistance enabled? */
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index cc414db9da0a..482461d8931d 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
245 int offset, size_t size, int flags); 245 int offset, size_t size, int flags);
246void af_alg_free_resources(struct af_alg_async_req *areq); 246void af_alg_free_resources(struct af_alg_async_req *areq);
247void af_alg_async_cb(struct crypto_async_request *_req, int err); 247void af_alg_async_cb(struct crypto_async_request *_req, int err);
248__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events); 248__poll_t af_alg_poll(struct file *file, struct socket *sock,
249 poll_table *wait);
249struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, 250struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
250 unsigned int areqlen); 251 unsigned int areqlen);
251int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, 252int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 880e6be9e95e..a66c127a20ed 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -22,27 +22,14 @@
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23 23
24static inline void scatterwalk_crypto_chain(struct scatterlist *head, 24static inline void scatterwalk_crypto_chain(struct scatterlist *head,
25 struct scatterlist *sg, 25 struct scatterlist *sg, int num)
26 int chain, int num)
27{ 26{
28 if (chain) {
29 head->length += sg->length;
30 sg = sg_next(sg);
31 }
32
33 if (sg) 27 if (sg)
34 sg_chain(head, num, sg); 28 sg_chain(head, num, sg);
35 else 29 else
36 sg_mark_end(head); 30 sg_mark_end(head);
37} 31}
38 32
39static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
40 struct scatter_walk *walk_out)
41{
42 return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
43 (int)(walk_in->offset - walk_out->offset));
44}
45
46static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) 33static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
47{ 34{
48 unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; 35 unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 0555b571dd34..8a46202b1857 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -71,6 +71,10 @@ extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
71 71
72extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE]; 72extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
73 73
74extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
75
76extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
77
74struct sha1_state { 78struct sha1_state {
75 u32 state[SHA1_DIGEST_SIZE / 4]; 79 u32 state[SHA1_DIGEST_SIZE / 4];
76 u64 count; 80 u64 count;
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
deleted file mode 100644
index 6b700c7b2fe1..000000000000
--- a/include/crypto/vmac.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Modified to interface to the Linux kernel
3 * Copyright (c) 2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#ifndef __CRYPTO_VMAC_H
20#define __CRYPTO_VMAC_H
21
22/* --------------------------------------------------------------------------
23 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
24 * This implementation is herby placed in the public domain.
25 * The authors offers no warranty. Use at your own risk.
26 * Please send bug reports to the authors.
27 * Last modified: 17 APR 08, 1700 PDT
28 * ----------------------------------------------------------------------- */
29
30/*
31 * User definable settings.
32 */
33#define VMAC_TAG_LEN 64
34#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
35#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
36#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
37
38/*
39 * This implementation uses u32 and u64 as names for unsigned 32-
40 * and 64-bit integer types. These are defined in C99 stdint.h. The
41 * following may need adaptation if you are not running a C99 or
42 * Microsoft C environment.
43 */
44struct vmac_ctx {
45 u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
46 u64 polykey[2*VMAC_TAG_LEN/64];
47 u64 l3key[2*VMAC_TAG_LEN/64];
48 u64 polytmp[2*VMAC_TAG_LEN/64];
49 u64 cached_nonce[2];
50 u64 cached_aes[2];
51 int first_block_processed;
52};
53
54typedef u64 vmac_t;
55
56struct vmac_ctx_t {
57 struct crypto_cipher *child;
58 struct vmac_ctx __vmac_ctx;
59 u8 partial[VMAC_NHBYTES]; /* partial block */
60 int partial_size; /* size of the partial block */
61};
62
63#endif /* __CRYPTO_VMAC_H */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index f5099c12c6a6..f7a19c2a7a80 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -97,29 +97,11 @@ struct pci_controller;
97 97
98#define DRM_IF_VERSION(maj, min) (maj << 16 | min) 98#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
99 99
100/**
101 * drm_drv_uses_atomic_modeset - check if the driver implements
102 * atomic_commit()
103 * @dev: DRM device
104 *
105 * This check is useful if drivers do not have DRIVER_ATOMIC set but
106 * have atomic modesetting internally implemented.
107 */
108static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
109{
110 return dev->mode_config.funcs->atomic_commit != NULL;
111}
112
113#define DRM_SWITCH_POWER_ON 0 100#define DRM_SWITCH_POWER_ON 0
114#define DRM_SWITCH_POWER_OFF 1 101#define DRM_SWITCH_POWER_OFF 1
115#define DRM_SWITCH_POWER_CHANGING 2 102#define DRM_SWITCH_POWER_CHANGING 2
116#define DRM_SWITCH_POWER_DYNAMIC_OFF 3 103#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
117 104
118static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
119{
120 return dev->driver->driver_features & feature;
121}
122
123/* returns true if currently okay to sleep */ 105/* returns true if currently okay to sleep */
124static inline bool drm_can_sleep(void) 106static inline bool drm_can_sleep(void)
125{ 107{
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index a57a8aa90ffb..da9d95a19580 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -160,6 +160,14 @@ struct __drm_crtcs_state {
160struct __drm_connnectors_state { 160struct __drm_connnectors_state {
161 struct drm_connector *ptr; 161 struct drm_connector *ptr;
162 struct drm_connector_state *state, *old_state, *new_state; 162 struct drm_connector_state *state, *old_state, *new_state;
163 /**
164 * @out_fence_ptr:
165 *
166 * User-provided pointer which the kernel uses to return a sync_file
167 * file descriptor. Used by writeback connectors to signal completion of
168 * the writeback.
169 */
170 s32 __user *out_fence_ptr;
163}; 171};
164 172
165struct drm_private_obj; 173struct drm_private_obj;
@@ -594,6 +602,9 @@ void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
594int __must_check 602int __must_check
595drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, 603drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
596 struct drm_crtc *crtc); 604 struct drm_crtc *crtc);
605int drm_atomic_set_writeback_fb_for_connector(
606 struct drm_connector_state *conn_state,
607 struct drm_framebuffer *fb);
597int __must_check 608int __must_check
598drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 609drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
599 struct drm_crtc *crtc); 610 struct drm_crtc *crtc);
@@ -601,9 +612,6 @@ int __must_check
601drm_atomic_add_affected_planes(struct drm_atomic_state *state, 612drm_atomic_add_affected_planes(struct drm_atomic_state *state,
602 struct drm_crtc *crtc); 613 struct drm_crtc *crtc);
603 614
604void
605drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
606
607int __must_check drm_atomic_check_only(struct drm_atomic_state *state); 615int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
608int __must_check drm_atomic_commit(struct drm_atomic_state *state); 616int __must_check drm_atomic_commit(struct drm_atomic_state *state);
609int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); 617int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 26aaba58d6ce..99e2a5297c69 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -100,6 +100,7 @@ int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state,
100int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, 100int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
101 bool nonblock); 101 bool nonblock);
102void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state); 102void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state);
103void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state);
103void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state); 104void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state);
104void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state); 105void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state);
105 106
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
new file mode 100644
index 000000000000..4923b00328c1
--- /dev/null
+++ b/include/drm/drm_audio_component.h
@@ -0,0 +1,118 @@
1// SPDX-License-Identifier: MIT
2// Copyright © 2014 Intel Corporation
3
4#ifndef _DRM_AUDIO_COMPONENT_H_
5#define _DRM_AUDIO_COMPONENT_H_
6
7struct drm_audio_component;
8
9/**
10 * struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver
11 */
12struct drm_audio_component_ops {
13 /**
14 * @owner: drm module to pin down
15 */
16 struct module *owner;
17 /**
18 * @get_power: get the POWER_DOMAIN_AUDIO power well
19 *
20 * Request the power well to be turned on.
21 */
22 void (*get_power)(struct device *);
23 /**
24 * @put_power: put the POWER_DOMAIN_AUDIO power well
25 *
26 * Allow the power well to be turned off.
27 */
28 void (*put_power)(struct device *);
29 /**
30 * @codec_wake_override: Enable/disable codec wake signal
31 */
32 void (*codec_wake_override)(struct device *, bool enable);
33 /**
34 * @get_cdclk_freq: Get the Core Display Clock in kHz
35 */
36 int (*get_cdclk_freq)(struct device *);
37 /**
38 * @sync_audio_rate: set n/cts based on the sample rate
39 *
40 * Called from audio driver. After audio driver sets the
41 * sample rate, it will call this function to set n/cts
42 */
43 int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
44 /**
45 * @get_eld: fill the audio state and ELD bytes for the given port
46 *
47 * Called from audio driver to get the HDMI/DP audio state of the given
48 * digital port, and also fetch ELD bytes to the given pointer.
49 *
50 * It returns the byte size of the original ELD (not the actually
51 * copied size), zero for an invalid ELD, or a negative error code.
52 *
53 * Note that the returned size may be over @max_bytes. Then it
54 * implies that only a part of ELD has been copied to the buffer.
55 */
56 int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
57 unsigned char *buf, int max_bytes);
58};
59
60/**
61 * struct drm_audio_component_audio_ops - Ops implemented by hda driver, called by DRM driver
62 */
63struct drm_audio_component_audio_ops {
64 /**
65 * @audio_ptr: Pointer to be used in call to pin_eld_notify
66 */
67 void *audio_ptr;
68 /**
69 * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
70 *
71 * Called when the DRM driver has set up audio pipeline or has just
72 * begun to tear it down. This allows the HDA driver to update its
73 * status accordingly (even when the HDA controller is in power save
74 * mode).
75 */
76 void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
77 /**
78 * @pin2port: Check and convert from pin node to port number
79 *
80 * Called by HDA driver to check and convert from the pin widget node
81 * number to a port number in the graphics side.
82 */
83 int (*pin2port)(void *audio_ptr, int pin);
84 /**
85 * @master_bind: (Optional) component master bind callback
86 *
87 * Called at binding master component, for HDA codec-specific
88 * handling of dynamic binding.
89 */
90 int (*master_bind)(struct device *dev, struct drm_audio_component *);
91 /**
92 * @master_unbind: (Optional) component master unbind callback
93 *
94 * Called at unbinding master component, for HDA codec-specific
95 * handling of dynamic unbinding.
96 */
97 void (*master_unbind)(struct device *dev, struct drm_audio_component *);
98};
99
100/**
101 * struct drm_audio_component - Used for direct communication between DRM and hda drivers
102 */
103struct drm_audio_component {
104 /**
105 * @dev: DRM device, used as parameter for ops
106 */
107 struct device *dev;
108 /**
109 * @ops: Ops implemented by DRM driver, called by hda driver
110 */
111 const struct drm_audio_component_ops *ops;
112 /**
113 * @audio_ops: Ops implemented by hda driver, called by DRM driver
114 */
115 const struct drm_audio_component_audio_ops *audio_ops;
116};
117
118#endif /* _DRM_AUDIO_COMPONENT_H_ */
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 3270fec46979..bd850747ce54 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -97,7 +97,7 @@ struct drm_bridge_funcs {
97 /** 97 /**
98 * @mode_fixup: 98 * @mode_fixup:
99 * 99 *
100 * This callback is used to validate and adjust a mode. The paramater 100 * This callback is used to validate and adjust a mode. The parameter
101 * mode is the display mode that should be fed to the next element in 101 * mode is the display mode that should be fed to the next element in
102 * the display chain, either the final &drm_connector or the next 102 * the display chain, either the final &drm_connector or the next
103 * &drm_bridge. The parameter adjusted_mode is the input mode the bridge 103 * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
@@ -178,6 +178,22 @@ struct drm_bridge_funcs {
178 * then this would be &drm_encoder_helper_funcs.mode_set. The display 178 * then this would be &drm_encoder_helper_funcs.mode_set. The display
179 * pipe (i.e. clocks and timing signals) is off when this function is 179 * pipe (i.e. clocks and timing signals) is off when this function is
180 * called. 180 * called.
181 *
182 * The adjusted_mode parameter is the mode output by the CRTC for the
183 * first bridge in the chain. It can be different from the mode
184 * parameter that contains the desired mode for the connector at the end
185 * of the bridges chain, for instance when the first bridge in the chain
186 * performs scaling. The adjusted mode is mostly useful for the first
187 * bridge in the chain and is likely irrelevant for the other bridges.
188 *
189 * For atomic drivers the adjusted_mode is the mode stored in
190 * &drm_crtc_state.adjusted_mode.
191 *
192 * NOTE:
193 *
194 * If a need arises to store and access modes adjusted for other
195 * locations than the connection between the CRTC and the first bridge,
196 * the DRM framework will have to be extended with DRM bridge states.
181 */ 197 */
182 void (*mode_set)(struct drm_bridge *bridge, 198 void (*mode_set)(struct drm_bridge *bridge,
183 struct drm_display_mode *mode, 199 struct drm_display_mode *mode,
@@ -254,27 +270,29 @@ struct drm_bridge_timings {
254 270
255/** 271/**
256 * struct drm_bridge - central DRM bridge control structure 272 * struct drm_bridge - central DRM bridge control structure
257 * @dev: DRM device this bridge belongs to
258 * @encoder: encoder to which this bridge is connected
259 * @next: the next bridge in the encoder chain
260 * @of_node: device node pointer to the bridge
261 * @list: to keep track of all added bridges
262 * @timings: the timing specification for the bridge, if any (may
263 * be NULL)
264 * @funcs: control functions
265 * @driver_private: pointer to the bridge driver's internal context
266 */ 273 */
267struct drm_bridge { 274struct drm_bridge {
275 /** @dev: DRM device this bridge belongs to */
268 struct drm_device *dev; 276 struct drm_device *dev;
277 /** @encoder: encoder to which this bridge is connected */
269 struct drm_encoder *encoder; 278 struct drm_encoder *encoder;
279 /** @next: the next bridge in the encoder chain */
270 struct drm_bridge *next; 280 struct drm_bridge *next;
271#ifdef CONFIG_OF 281#ifdef CONFIG_OF
282 /** @of_node: device node pointer to the bridge */
272 struct device_node *of_node; 283 struct device_node *of_node;
273#endif 284#endif
285 /** @list: to keep track of all added bridges */
274 struct list_head list; 286 struct list_head list;
287 /**
288 * @timings:
289 *
290 * the timing specification for the bridge, if any (may be NULL)
291 */
275 const struct drm_bridge_timings *timings; 292 const struct drm_bridge_timings *timings;
276 293 /** @funcs: control functions */
277 const struct drm_bridge_funcs *funcs; 294 const struct drm_bridge_funcs *funcs;
295 /** @driver_private: pointer to the bridge driver's internal context */
278 void *driver_private; 296 void *driver_private;
279}; 297};
280 298
@@ -285,15 +303,15 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
285 struct drm_bridge *previous); 303 struct drm_bridge *previous);
286 304
287bool drm_bridge_mode_fixup(struct drm_bridge *bridge, 305bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
288 const struct drm_display_mode *mode, 306 const struct drm_display_mode *mode,
289 struct drm_display_mode *adjusted_mode); 307 struct drm_display_mode *adjusted_mode);
290enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge, 308enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
291 const struct drm_display_mode *mode); 309 const struct drm_display_mode *mode);
292void drm_bridge_disable(struct drm_bridge *bridge); 310void drm_bridge_disable(struct drm_bridge *bridge);
293void drm_bridge_post_disable(struct drm_bridge *bridge); 311void drm_bridge_post_disable(struct drm_bridge *bridge);
294void drm_bridge_mode_set(struct drm_bridge *bridge, 312void drm_bridge_mode_set(struct drm_bridge *bridge,
295 struct drm_display_mode *mode, 313 struct drm_display_mode *mode,
296 struct drm_display_mode *adjusted_mode); 314 struct drm_display_mode *adjusted_mode);
297void drm_bridge_pre_enable(struct drm_bridge *bridge); 315void drm_bridge_pre_enable(struct drm_bridge *bridge);
298void drm_bridge_enable(struct drm_bridge *bridge); 316void drm_bridge_enable(struct drm_bridge *bridge);
299 317
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
new file mode 100644
index 000000000000..989f8e52864d
--- /dev/null
+++ b/include/drm/drm_client.h
@@ -0,0 +1,139 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _DRM_CLIENT_H_
4#define _DRM_CLIENT_H_
5
6#include <linux/types.h>
7
8struct drm_client_dev;
9struct drm_device;
10struct drm_file;
11struct drm_framebuffer;
12struct drm_gem_object;
13struct drm_minor;
14struct module;
15
16/**
17 * struct drm_client_funcs - DRM client callbacks
18 */
19struct drm_client_funcs {
20 /**
21 * @owner: The module owner
22 */
23 struct module *owner;
24
25 /**
26 * @unregister:
27 *
28 * Called when &drm_device is unregistered. The client should respond by
29 * releasing it's resources using drm_client_release().
30 *
31 * This callback is optional.
32 */
33 void (*unregister)(struct drm_client_dev *client);
34
35 /**
36 * @restore:
37 *
38 * Called on drm_lastclose(). The first client instance in the list that
39 * returns zero gets the privilege to restore and no more clients are
40 * called. This callback is not called after @unregister has been called.
41 *
42 * This callback is optional.
43 */
44 int (*restore)(struct drm_client_dev *client);
45
46 /**
47 * @hotplug:
48 *
49 * Called on drm_kms_helper_hotplug_event().
50 * This callback is not called after @unregister has been called.
51 *
52 * This callback is optional.
53 */
54 int (*hotplug)(struct drm_client_dev *client);
55};
56
57/**
58 * struct drm_client_dev - DRM client instance
59 */
60struct drm_client_dev {
61 /**
62 * @dev: DRM device
63 */
64 struct drm_device *dev;
65
66 /**
67 * @name: Name of the client.
68 */
69 const char *name;
70
71 /**
72 * @list:
73 *
74 * List of all clients of a DRM device, linked into
75 * &drm_device.clientlist. Protected by &drm_device.clientlist_mutex.
76 */
77 struct list_head list;
78
79 /**
80 * @funcs: DRM client functions (optional)
81 */
82 const struct drm_client_funcs *funcs;
83
84 /**
85 * @file: DRM file
86 */
87 struct drm_file *file;
88};
89
90int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
91 const char *name, const struct drm_client_funcs *funcs);
92void drm_client_release(struct drm_client_dev *client);
93
94void drm_client_dev_unregister(struct drm_device *dev);
95void drm_client_dev_hotplug(struct drm_device *dev);
96void drm_client_dev_restore(struct drm_device *dev);
97
98/**
99 * struct drm_client_buffer - DRM client buffer
100 */
101struct drm_client_buffer {
102 /**
103 * @client: DRM client
104 */
105 struct drm_client_dev *client;
106
107 /**
108 * @handle: Buffer handle
109 */
110 u32 handle;
111
112 /**
113 * @pitch: Buffer pitch
114 */
115 u32 pitch;
116
117 /**
118 * @gem: GEM object backing this buffer
119 */
120 struct drm_gem_object *gem;
121
122 /**
123 * @vaddr: Virtual address for the buffer
124 */
125 void *vaddr;
126
127 /**
128 * @fb: DRM framebuffer
129 */
130 struct drm_framebuffer *fb;
131};
132
133struct drm_client_buffer *
134drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
135void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
136
137int drm_client_debugfs_init(struct drm_minor *minor);
138
139#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 675cc3f8cf85..97ea41dc678f 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -290,6 +290,10 @@ struct drm_display_info {
290#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4) 290#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4)
291/* data is transmitted LSB to MSB on the bus */ 291/* data is transmitted LSB to MSB on the bus */
292#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5) 292#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5)
293/* drive sync on pos. edge */
294#define DRM_BUS_FLAG_SYNC_POSEDGE (1<<6)
295/* drive sync on neg. edge */
296#define DRM_BUS_FLAG_SYNC_NEGEDGE (1<<7)
293 297
294 /** 298 /**
295 * @bus_flags: Additional information (like pixel signal polarity) for 299 * @bus_flags: Additional information (like pixel signal polarity) for
@@ -374,12 +378,9 @@ struct drm_tv_connector_state {
374 378
375/** 379/**
376 * struct drm_connector_state - mutable connector state 380 * struct drm_connector_state - mutable connector state
377 * @connector: backpointer to the connector
378 * @best_encoder: can be used by helpers and drivers to select the encoder
379 * @state: backpointer to global drm_atomic_state
380 * @tv: TV connector state
381 */ 381 */
382struct drm_connector_state { 382struct drm_connector_state {
383 /** @connector: backpointer to the connector */
383 struct drm_connector *connector; 384 struct drm_connector *connector;
384 385
385 /** 386 /**
@@ -390,6 +391,13 @@ struct drm_connector_state {
390 */ 391 */
391 struct drm_crtc *crtc; 392 struct drm_crtc *crtc;
392 393
394 /**
395 * @best_encoder:
396 *
397 * Used by the atomic helpers to select the encoder, through the
398 * &drm_connector_helper_funcs.atomic_best_encoder or
399 * &drm_connector_helper_funcs.best_encoder callbacks.
400 */
393 struct drm_encoder *best_encoder; 401 struct drm_encoder *best_encoder;
394 402
395 /** 403 /**
@@ -398,6 +406,7 @@ struct drm_connector_state {
398 */ 406 */
399 enum drm_link_status link_status; 407 enum drm_link_status link_status;
400 408
409 /** @state: backpointer to global drm_atomic_state */
401 struct drm_atomic_state *state; 410 struct drm_atomic_state *state;
402 411
403 /** 412 /**
@@ -407,6 +416,7 @@ struct drm_connector_state {
407 */ 416 */
408 struct drm_crtc_commit *commit; 417 struct drm_crtc_commit *commit;
409 418
419 /** @tv: TV connector state */
410 struct drm_tv_connector_state tv; 420 struct drm_tv_connector_state tv;
411 421
412 /** 422 /**
@@ -419,6 +429,14 @@ struct drm_connector_state {
419 enum hdmi_picture_aspect picture_aspect_ratio; 429 enum hdmi_picture_aspect picture_aspect_ratio;
420 430
421 /** 431 /**
432 * @content_type: Connector property to control the
433 * HDMI infoframe content type setting.
434 * The %DRM_MODE_CONTENT_TYPE_\* values much
435 * match the values.
436 */
437 unsigned int content_type;
438
439 /**
422 * @scaling_mode: Connector property to control the 440 * @scaling_mode: Connector property to control the
423 * upscaling, mostly used for built-in panels. 441 * upscaling, mostly used for built-in panels.
424 */ 442 */
@@ -429,6 +447,19 @@ struct drm_connector_state {
429 * protection. This is most commonly used for HDCP. 447 * protection. This is most commonly used for HDCP.
430 */ 448 */
431 unsigned int content_protection; 449 unsigned int content_protection;
450
451 /**
452 * @writeback_job: Writeback job for writeback connectors
453 *
454 * Holds the framebuffer and out-fence for a writeback connector. As
455 * the writeback completion may be asynchronous to the normal commit
456 * cycle, the writeback job lifetime is managed separately from the
457 * normal atomic state by this object.
458 *
459 * See also: drm_writeback_queue_job() and
460 * drm_writeback_signal_completion()
461 */
462 struct drm_writeback_job *writeback_job;
432}; 463};
433 464
434/** 465/**
@@ -530,8 +561,7 @@ struct drm_connector_funcs {
530 * received for this output connector->edid must be NULL. 561 * received for this output connector->edid must be NULL.
531 * 562 *
532 * Drivers using the probe helpers should use 563 * Drivers using the probe helpers should use
533 * drm_helper_probe_single_connector_modes() or 564 * drm_helper_probe_single_connector_modes() to implement this
534 * drm_helper_probe_single_connector_modes_nomerge() to implement this
535 * function. 565 * function.
536 * 566 *
537 * RETURNS: 567 * RETURNS:
@@ -608,6 +638,8 @@ struct drm_connector_funcs {
608 * cleaned up by calling the @atomic_destroy_state hook in this 638 * cleaned up by calling the @atomic_destroy_state hook in this
609 * structure. 639 * structure.
610 * 640 *
641 * This callback is mandatory for atomic drivers.
642 *
611 * Atomic drivers which don't subclass &struct drm_connector_state should use 643 * Atomic drivers which don't subclass &struct drm_connector_state should use
612 * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the 644 * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
613 * state structure to extend it with driver-private state should use 645 * state structure to extend it with driver-private state should use
@@ -634,6 +666,8 @@ struct drm_connector_funcs {
634 * 666 *
635 * Destroy a state duplicated with @atomic_duplicate_state and release 667 * Destroy a state duplicated with @atomic_duplicate_state and release
636 * or unreference all resources it references 668 * or unreference all resources it references
669 *
670 * This callback is mandatory for atomic drivers.
637 */ 671 */
638 void (*atomic_destroy_state)(struct drm_connector *connector, 672 void (*atomic_destroy_state)(struct drm_connector *connector,
639 struct drm_connector_state *state); 673 struct drm_connector_state *state);
@@ -738,45 +772,6 @@ struct drm_cmdline_mode {
738 772
739/** 773/**
740 * struct drm_connector - central DRM connector control structure 774 * struct drm_connector - central DRM connector control structure
741 * @dev: parent DRM device
742 * @kdev: kernel device for sysfs attributes
743 * @attr: sysfs attributes
744 * @head: list management
745 * @base: base KMS object
746 * @name: human readable name, can be overwritten by the driver
747 * @connector_type: one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
748 * @connector_type_id: index into connector type enum
749 * @interlace_allowed: can this connector handle interlaced modes?
750 * @doublescan_allowed: can this connector handle doublescan?
751 * @stereo_allowed: can this connector handle stereo modes?
752 * @funcs: connector control functions
753 * @edid_blob_ptr: DRM property containing EDID if present
754 * @properties: property tracking for this connector
755 * @dpms: current dpms state
756 * @helper_private: mid-layer private data
757 * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
758 * @force: a DRM_FORCE_<foo> state for forced mode sets
759 * @override_edid: has the EDID been overwritten through debugfs for testing?
760 * @encoder_ids: valid encoders for this connector
761 * @eld: EDID-like data, if present
762 * @latency_present: AV delay info from ELD, if found
763 * @video_latency: video latency info from ELD, if found
764 * @audio_latency: audio latency info from ELD, if found
765 * @null_edid_counter: track sinks that give us all zeros for the EDID
766 * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
767 * @edid_corrupt: indicates whether the last read EDID was corrupt
768 * @debugfs_entry: debugfs directory for this connector
769 * @has_tile: is this connector connected to a tiled monitor
770 * @tile_group: tile group for the connected monitor
771 * @tile_is_single_monitor: whether the tile is one monitor housing
772 * @num_h_tile: number of horizontal tiles in the tile group
773 * @num_v_tile: number of vertical tiles in the tile group
774 * @tile_h_loc: horizontal location of this tile
775 * @tile_v_loc: vertical location of this tile
776 * @tile_h_size: horizontal size of this tile.
777 * @tile_v_size: vertical size of this tile.
778 * @scaling_mode_property: Optional atomic property to control the upscaling.
779 * @content_protection_property: Optional property to control content protection
780 * 775 *
781 * Each connector may be connected to one or more CRTCs, or may be clonable by 776 * Each connector may be connected to one or more CRTCs, or may be clonable by
782 * another connector if they can share a CRTC. Each connector also has a specific 777 * another connector if they can share a CRTC. Each connector also has a specific
@@ -784,13 +779,27 @@ struct drm_cmdline_mode {
784 * span multiple monitors). 779 * span multiple monitors).
785 */ 780 */
786struct drm_connector { 781struct drm_connector {
782 /** @dev: parent DRM device */
787 struct drm_device *dev; 783 struct drm_device *dev;
784 /** @kdev: kernel device for sysfs attributes */
788 struct device *kdev; 785 struct device *kdev;
786 /** @attr: sysfs attributes */
789 struct device_attribute *attr; 787 struct device_attribute *attr;
788
789 /**
790 * @head:
791 *
792 * List of all connectors on a @dev, linked from
793 * &drm_mode_config.connector_list. Protected by
794 * &drm_mode_config.connector_list_lock, but please only use
795 * &drm_connector_list_iter to walk this list.
796 */
790 struct list_head head; 797 struct list_head head;
791 798
799 /** @base: base KMS object */
792 struct drm_mode_object base; 800 struct drm_mode_object base;
793 801
802 /** @name: human readable name, can be overwritten by the driver */
794 char *name; 803 char *name;
795 804
796 /** 805 /**
@@ -808,10 +817,30 @@ struct drm_connector {
808 */ 817 */
809 unsigned index; 818 unsigned index;
810 819
820 /**
821 * @connector_type:
822 * one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
823 */
811 int connector_type; 824 int connector_type;
825 /** @connector_type_id: index into connector type enum */
812 int connector_type_id; 826 int connector_type_id;
827 /**
828 * @interlace_allowed:
829 * Can this connector handle interlaced modes? Only used by
830 * drm_helper_probe_single_connector_modes() for mode filtering.
831 */
813 bool interlace_allowed; 832 bool interlace_allowed;
833 /**
834 * @doublescan_allowed:
835 * Can this connector handle doublescan? Only used by
836 * drm_helper_probe_single_connector_modes() for mode filtering.
837 */
814 bool doublescan_allowed; 838 bool doublescan_allowed;
839 /**
840 * @stereo_allowed:
841 * Can this connector handle stereo modes? Only used by
842 * drm_helper_probe_single_connector_modes() for mode filtering.
843 */
815 bool stereo_allowed; 844 bool stereo_allowed;
816 845
817 /** 846 /**
@@ -860,45 +889,42 @@ struct drm_connector {
860 * Protected by &drm_mode_config.mutex. 889 * Protected by &drm_mode_config.mutex.
861 */ 890 */
862 struct drm_display_info display_info; 891 struct drm_display_info display_info;
892
893 /** @funcs: connector control functions */
863 const struct drm_connector_funcs *funcs; 894 const struct drm_connector_funcs *funcs;
864 895
896 /**
897 * @edid_blob_ptr: DRM property containing EDID if present. Protected by
898 * &drm_mode_config.mutex. This should be updated only by calling
899 * drm_connector_update_edid_property().
900 */
865 struct drm_property_blob *edid_blob_ptr; 901 struct drm_property_blob *edid_blob_ptr;
902
903 /** @properties: property tracking for this connector */
866 struct drm_object_properties properties; 904 struct drm_object_properties properties;
867 905
906 /**
907 * @scaling_mode_property: Optional atomic property to control the
908 * upscaling. See drm_connector_attach_content_protection_property().
909 */
868 struct drm_property *scaling_mode_property; 910 struct drm_property *scaling_mode_property;
869 911
870 /** 912 /**
871 * @content_protection_property: DRM ENUM property for content 913 * @content_protection_property: DRM ENUM property for content
872 * protection 914 * protection. See drm_connector_attach_content_protection_property().
873 */ 915 */
874 struct drm_property *content_protection_property; 916 struct drm_property *content_protection_property;
875 917
876 /** 918 /**
877 * @path_blob_ptr: 919 * @path_blob_ptr:
878 * 920 *
879 * DRM blob property data for the DP MST path property. 921 * DRM blob property data for the DP MST path property. This should only
922 * be updated by calling drm_connector_set_path_property().
880 */ 923 */
881 struct drm_property_blob *path_blob_ptr; 924 struct drm_property_blob *path_blob_ptr;
882 925
883 /**
884 * @tile_blob_ptr:
885 *
886 * DRM blob property data for the tile property (used mostly by DP MST).
887 * This is meant for screens which are driven through separate display
888 * pipelines represented by &drm_crtc, which might not be running with
889 * genlocked clocks. For tiled panels which are genlocked, like
890 * dual-link LVDS or dual-link DSI, the driver should try to not expose
891 * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
892 */
893 struct drm_property_blob *tile_blob_ptr;
894
895/* should we poll this connector for connects and disconnects */
896/* hot plug detectable */
897#define DRM_CONNECTOR_POLL_HPD (1 << 0) 926#define DRM_CONNECTOR_POLL_HPD (1 << 0)
898/* poll for connections */
899#define DRM_CONNECTOR_POLL_CONNECT (1 << 1) 927#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
900/* can cleanly poll for disconnections without flickering the screen */
901/* DACs should rarely do this without a lot of testing */
902#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) 928#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
903 929
904 /** 930 /**
@@ -915,25 +941,40 @@ struct drm_connector {
915 * Periodically poll the connector for connection. 941 * Periodically poll the connector for connection.
916 * 942 *
917 * DRM_CONNECTOR_POLL_DISCONNECT 943 * DRM_CONNECTOR_POLL_DISCONNECT
918 * Periodically poll the connector for disconnection. 944 * Periodically poll the connector for disconnection, without
945 * causing flickering even when the connector is in use. DACs should
946 * rarely do this without a lot of testing.
919 * 947 *
920 * Set to 0 for connectors that don't support connection status 948 * Set to 0 for connectors that don't support connection status
921 * discovery. 949 * discovery.
922 */ 950 */
923 uint8_t polled; 951 uint8_t polled;
924 952
925 /* requested DPMS state */ 953 /**
954 * @dpms: Current dpms state. For legacy drivers the
955 * &drm_connector_funcs.dpms callback must update this. For atomic
956 * drivers, this is handled by the core atomic code, and drivers must
957 * only take &drm_crtc_state.active into account.
958 */
926 int dpms; 959 int dpms;
927 960
961 /** @helper_private: mid-layer private data */
928 const struct drm_connector_helper_funcs *helper_private; 962 const struct drm_connector_helper_funcs *helper_private;
929 963
930 /* forced on connector */ 964 /** @cmdline_mode: mode line parsed from the kernel cmdline for this connector */
931 struct drm_cmdline_mode cmdline_mode; 965 struct drm_cmdline_mode cmdline_mode;
966 /** @force: a DRM_FORCE_<foo> state for forced mode sets */
932 enum drm_connector_force force; 967 enum drm_connector_force force;
968 /** @override_edid: has the EDID been overwritten through debugfs for testing? */
933 bool override_edid; 969 bool override_edid;
934 970
935#define DRM_CONNECTOR_MAX_ENCODER 3 971#define DRM_CONNECTOR_MAX_ENCODER 3
972 /**
973 * @encoder_ids: Valid encoders for this connector. Please only use
974 * drm_connector_for_each_possible_encoder() to enumerate these.
975 */
936 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; 976 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
977
937 /** 978 /**
938 * @encoder: Currently bound encoder driving this connector, if any. 979 * @encoder: Currently bound encoder driving this connector, if any.
939 * Only really meaningful for non-atomic drivers. Atomic drivers should 980 * Only really meaningful for non-atomic drivers. Atomic drivers should
@@ -943,19 +984,37 @@ struct drm_connector {
943 struct drm_encoder *encoder; 984 struct drm_encoder *encoder;
944 985
945#define MAX_ELD_BYTES 128 986#define MAX_ELD_BYTES 128
946 /* EDID bits */ 987 /** @eld: EDID-like data, if present */
947 uint8_t eld[MAX_ELD_BYTES]; 988 uint8_t eld[MAX_ELD_BYTES];
989 /** @latency_present: AV delay info from ELD, if found */
948 bool latency_present[2]; 990 bool latency_present[2];
949 int video_latency[2]; /* [0]: progressive, [1]: interlaced */ 991 /**
992 * @video_latency: Video latency info from ELD, if found.
993 * [0]: progressive, [1]: interlaced
994 */
995 int video_latency[2];
996 /**
997 * @audio_latency: audio latency info from ELD, if found
998 * [0]: progressive, [1]: interlaced
999 */
950 int audio_latency[2]; 1000 int audio_latency[2];
951 int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ 1001 /**
1002 * @null_edid_counter: track sinks that give us all zeros for the EDID.
1003 * Needed to workaround some HW bugs where we get all 0s
1004 */
1005 int null_edid_counter;
1006
1007 /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */
952 unsigned bad_edid_counter; 1008 unsigned bad_edid_counter;
953 1009
954 /* Flag for raw EDID header corruption - used in Displayport 1010 /**
955 * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6 1011 * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used
1012 * in Displayport compliance testing - Displayport Link CTS Core 1.2
1013 * rev1.1 4.2.2.6
956 */ 1014 */
957 bool edid_corrupt; 1015 bool edid_corrupt;
958 1016
1017 /** @debugfs_entry: debugfs directory for this connector */
959 struct dentry *debugfs_entry; 1018 struct dentry *debugfs_entry;
960 1019
961 /** 1020 /**
@@ -963,7 +1022,7 @@ struct drm_connector {
963 * 1022 *
964 * Current atomic state for this connector. 1023 * Current atomic state for this connector.
965 * 1024 *
966 * This is protected by @drm_mode_config.connection_mutex. Note that 1025 * This is protected by &drm_mode_config.connection_mutex. Note that
967 * nonblocking atomic commits access the current connector state without 1026 * nonblocking atomic commits access the current connector state without
968 * taking locks. Either by going through the &struct drm_atomic_state 1027 * taking locks. Either by going through the &struct drm_atomic_state
969 * pointers, see for_each_oldnew_connector_in_state(), 1028 * pointers, see for_each_oldnew_connector_in_state(),
@@ -974,19 +1033,44 @@ struct drm_connector {
974 */ 1033 */
975 struct drm_connector_state *state; 1034 struct drm_connector_state *state;
976 1035
977 /* DisplayID bits */ 1036 /* DisplayID bits. FIXME: Extract into a substruct? */
1037
1038 /**
1039 * @tile_blob_ptr:
1040 *
1041 * DRM blob property data for the tile property (used mostly by DP MST).
1042 * This is meant for screens which are driven through separate display
1043 * pipelines represented by &drm_crtc, which might not be running with
1044 * genlocked clocks. For tiled panels which are genlocked, like
1045 * dual-link LVDS or dual-link DSI, the driver should try to not expose
1046 * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
1047 *
1048 * This should only be updated by calling
1049 * drm_connector_set_tile_property().
1050 */
1051 struct drm_property_blob *tile_blob_ptr;
1052
1053 /** @has_tile: is this connector connected to a tiled monitor */
978 bool has_tile; 1054 bool has_tile;
1055 /** @tile_group: tile group for the connected monitor */
979 struct drm_tile_group *tile_group; 1056 struct drm_tile_group *tile_group;
1057 /** @tile_is_single_monitor: whether the tile is one monitor housing */
980 bool tile_is_single_monitor; 1058 bool tile_is_single_monitor;
981 1059
1060 /** @num_h_tile: number of horizontal tiles in the tile group */
1061 /** @num_v_tile: number of vertical tiles in the tile group */
982 uint8_t num_h_tile, num_v_tile; 1062 uint8_t num_h_tile, num_v_tile;
1063 /** @tile_h_loc: horizontal location of this tile */
1064 /** @tile_v_loc: vertical location of this tile */
983 uint8_t tile_h_loc, tile_v_loc; 1065 uint8_t tile_h_loc, tile_v_loc;
1066 /** @tile_h_size: horizontal size of this tile. */
1067 /** @tile_v_size: vertical size of this tile. */
984 uint16_t tile_h_size, tile_v_size; 1068 uint16_t tile_h_size, tile_v_size;
985 1069
986 /** 1070 /**
987 * @free_node: 1071 * @free_node:
988 * 1072 *
989 * List used only by &drm_connector_iter to be able to clean up a 1073 * List used only by &drm_connector_list_iter to be able to clean up a
990 * connector from any context, in conjunction with 1074 * connector from any context, in conjunction with
991 * &drm_mode_config.connector_free_work. 1075 * &drm_mode_config.connector_free_work.
992 */ 1076 */
@@ -1001,15 +1085,21 @@ int drm_connector_init(struct drm_device *dev,
1001 int connector_type); 1085 int connector_type);
1002int drm_connector_register(struct drm_connector *connector); 1086int drm_connector_register(struct drm_connector *connector);
1003void drm_connector_unregister(struct drm_connector *connector); 1087void drm_connector_unregister(struct drm_connector *connector);
1004int drm_mode_connector_attach_encoder(struct drm_connector *connector, 1088int drm_connector_attach_encoder(struct drm_connector *connector,
1005 struct drm_encoder *encoder); 1089 struct drm_encoder *encoder);
1006 1090
1007void drm_connector_cleanup(struct drm_connector *connector); 1091void drm_connector_cleanup(struct drm_connector *connector);
1008static inline unsigned drm_connector_index(struct drm_connector *connector) 1092
1093static inline unsigned int drm_connector_index(const struct drm_connector *connector)
1009{ 1094{
1010 return connector->index; 1095 return connector->index;
1011} 1096}
1012 1097
1098static inline u32 drm_connector_mask(const struct drm_connector *connector)
1099{
1100 return 1 << connector->index;
1101}
1102
1013/** 1103/**
1014 * drm_connector_lookup - lookup connector object 1104 * drm_connector_lookup - lookup connector object
1015 * @dev: DRM device 1105 * @dev: DRM device
@@ -1089,20 +1179,25 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
1089 unsigned int num_modes, 1179 unsigned int num_modes,
1090 const char * const modes[]); 1180 const char * const modes[]);
1091int drm_mode_create_scaling_mode_property(struct drm_device *dev); 1181int drm_mode_create_scaling_mode_property(struct drm_device *dev);
1182int drm_connector_attach_content_type_property(struct drm_connector *dev);
1092int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, 1183int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
1093 u32 scaling_mode_mask); 1184 u32 scaling_mode_mask);
1094int drm_connector_attach_content_protection_property( 1185int drm_connector_attach_content_protection_property(
1095 struct drm_connector *connector); 1186 struct drm_connector *connector);
1096int drm_mode_create_aspect_ratio_property(struct drm_device *dev); 1187int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
1188int drm_mode_create_content_type_property(struct drm_device *dev);
1189void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
1190 const struct drm_connector_state *conn_state);
1191
1097int drm_mode_create_suggested_offset_properties(struct drm_device *dev); 1192int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
1098 1193
1099int drm_mode_connector_set_path_property(struct drm_connector *connector, 1194int drm_connector_set_path_property(struct drm_connector *connector,
1100 const char *path); 1195 const char *path);
1101int drm_mode_connector_set_tile_property(struct drm_connector *connector); 1196int drm_connector_set_tile_property(struct drm_connector *connector);
1102int drm_mode_connector_update_edid_property(struct drm_connector *connector, 1197int drm_connector_update_edid_property(struct drm_connector *connector,
1103 const struct edid *edid); 1198 const struct edid *edid);
1104void drm_mode_connector_set_link_status_property(struct drm_connector *connector, 1199void drm_connector_set_link_status_property(struct drm_connector *connector,
1105 uint64_t link_status); 1200 uint64_t link_status);
1106int drm_connector_init_panel_orientation_property( 1201int drm_connector_init_panel_orientation_property(
1107 struct drm_connector *connector, int width, int height); 1202 struct drm_connector *connector, int width, int height);
1108 1203
@@ -1151,6 +1246,9 @@ struct drm_connector *
1151drm_connector_list_iter_next(struct drm_connector_list_iter *iter); 1246drm_connector_list_iter_next(struct drm_connector_list_iter *iter);
1152void drm_connector_list_iter_end(struct drm_connector_list_iter *iter); 1247void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
1153 1248
1249bool drm_connector_has_possible_encoder(struct drm_connector *connector,
1250 struct drm_encoder *encoder);
1251
1154/** 1252/**
1155 * drm_for_each_connector_iter - connector_list iterator macro 1253 * drm_for_each_connector_iter - connector_list iterator macro
1156 * @connector: &struct drm_connector pointer used as cursor 1254 * @connector: &struct drm_connector pointer used as cursor
@@ -1163,4 +1261,17 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
1163#define drm_for_each_connector_iter(connector, iter) \ 1261#define drm_for_each_connector_iter(connector, iter) \
1164 while ((connector = drm_connector_list_iter_next(iter))) 1262 while ((connector = drm_connector_list_iter_next(iter)))
1165 1263
1264/**
1265 * drm_connector_for_each_possible_encoder - iterate connector's possible encoders
1266 * @connector: &struct drm_connector pointer
1267 * @encoder: &struct drm_encoder pointer used as cursor
1268 * @__i: int iteration cursor, for macro-internal use
1269 */
1270#define drm_connector_for_each_possible_encoder(connector, encoder, __i) \
1271 for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
1272 (connector)->encoder_ids[(__i)] != 0; (__i)++) \
1273 for_each_if((encoder) = \
1274 drm_encoder_find((connector)->dev, NULL, \
1275 (connector)->encoder_ids[(__i)])) \
1276
1166#endif 1277#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index a2d81d2907a9..92e7fc7f05a4 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -77,21 +77,6 @@ struct drm_plane_helper_funcs;
77 77
78/** 78/**
79 * struct drm_crtc_state - mutable CRTC state 79 * struct drm_crtc_state - mutable CRTC state
80 * @crtc: backpointer to the CRTC
81 * @enable: whether the CRTC should be enabled, gates all other state
82 * @active: whether the CRTC is actively displaying (used for DPMS)
83 * @planes_changed: planes on this crtc are updated
84 * @mode_changed: @mode or @enable has been changed
85 * @active_changed: @active has been toggled.
86 * @connectors_changed: connectors to this crtc have been updated
87 * @zpos_changed: zpos values of planes on this crtc have been updated
88 * @color_mgmt_changed: color management properties have changed (degamma or
89 * gamma LUT or CSC matrix)
90 * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
91 * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
92 * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
93 * @mode_blob: &drm_property_blob for @mode
94 * @state: backpointer to global drm_atomic_state
95 * 80 *
96 * Note that the distinction between @enable and @active is rather subtile: 81 * Note that the distinction between @enable and @active is rather subtile:
97 * Flipping @active while @enable is set without changing anything else may 82 * Flipping @active while @enable is set without changing anything else may
@@ -102,31 +87,127 @@ struct drm_plane_helper_funcs;
102 * 87 *
103 * The three booleans active_changed, connectors_changed and mode_changed are 88 * The three booleans active_changed, connectors_changed and mode_changed are
104 * intended to indicate whether a full modeset is needed, rather than strictly 89 * intended to indicate whether a full modeset is needed, rather than strictly
105 * describing what has changed in a commit. 90 * describing what has changed in a commit. See also:
106 * See also: drm_atomic_crtc_needs_modeset() 91 * drm_atomic_crtc_needs_modeset()
92 *
93 * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
94 * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
95 * state like @plane_mask so drivers not converted over to atomic helpers should
96 * not rely on these being accurate!
107 */ 97 */
108struct drm_crtc_state { 98struct drm_crtc_state {
99 /** @crtc: backpointer to the CRTC */
109 struct drm_crtc *crtc; 100 struct drm_crtc *crtc;
110 101
102 /**
103 * @enable: Whether the CRTC should be enabled, gates all other state.
104 * This controls reservations of shared resources. Actual hardware state
105 * is controlled by @active.
106 */
111 bool enable; 107 bool enable;
108
109 /**
110 * @active: Whether the CRTC is actively displaying (used for DPMS).
111 * Implies that @enable is set. The driver must not release any shared
112 * resources if @active is set to false but @enable still true, because
113 * userspace expects that a DPMS ON always succeeds.
114 *
115 * Hence drivers must not consult @active in their various
116 * &drm_mode_config_funcs.atomic_check callback to reject an atomic
117 * commit. They can consult it to aid in the computation of derived
118 * hardware state, since even in the DPMS OFF state the display hardware
119 * should be as much powered down as when the CRTC is completely
120 * disabled through setting @enable to false.
121 */
112 bool active; 122 bool active;
113 123
114 /* computed state bits used by helpers and drivers */ 124 /**
125 * @planes_changed: Planes on this crtc are updated. Used by the atomic
126 * helpers and drivers to steer the atomic commit control flow.
127 */
115 bool planes_changed : 1; 128 bool planes_changed : 1;
129
130 /**
131 * @mode_changed: @mode or @enable has been changed. Used by the atomic
132 * helpers and drivers to steer the atomic commit control flow. See also
133 * drm_atomic_crtc_needs_modeset().
134 *
135 * Drivers are supposed to set this for any CRTC state changes that
136 * require a full modeset. They can also reset it to false if e.g. a
137 * @mode change can be done without a full modeset by only changing
138 * scaler settings.
139 */
116 bool mode_changed : 1; 140 bool mode_changed : 1;
141
142 /**
143 * @active_changed: @active has been toggled. Used by the atomic
144 * helpers and drivers to steer the atomic commit control flow. See also
145 * drm_atomic_crtc_needs_modeset().
146 */
117 bool active_changed : 1; 147 bool active_changed : 1;
148
149 /**
150 * @connectors_changed: Connectors to this crtc have been updated,
151 * either in their state or routing. Used by the atomic
152 * helpers and drivers to steer the atomic commit control flow. See also
153 * drm_atomic_crtc_needs_modeset().
154 *
155 * Drivers are supposed to set this as-needed from their own atomic
156 * check code, e.g. from &drm_encoder_helper_funcs.atomic_check
157 */
118 bool connectors_changed : 1; 158 bool connectors_changed : 1;
159 /**
160 * @zpos_changed: zpos values of planes on this crtc have been updated.
161 * Used by the atomic helpers and drivers to steer the atomic commit
162 * control flow.
163 */
119 bool zpos_changed : 1; 164 bool zpos_changed : 1;
165 /**
166 * @color_mgmt_changed: Color management properties have changed
167 * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and
168 * drivers to steer the atomic commit control flow.
169 */
120 bool color_mgmt_changed : 1; 170 bool color_mgmt_changed : 1;
121 171
122 /* attached planes bitmask: 172 /**
123 * WARNING: transitional helpers do not maintain plane_mask so 173 * @no_vblank:
124 * drivers not converted over to atomic helpers should not rely 174 *
125 * on plane_mask being accurate! 175 * Reflects the ability of a CRTC to send VBLANK events. This state
176 * usually depends on the pipeline configuration, and the main usuage
177 * is CRTCs feeding a writeback connector operating in oneshot mode.
178 * In this case the VBLANK event is only generated when a job is queued
179 * to the writeback connector, and we want the core to fake VBLANK
180 * events when this part of the pipeline hasn't changed but others had
181 * or when the CRTC and connectors are being disabled.
182 *
183 * __drm_atomic_helper_crtc_duplicate_state() will not reset the value
184 * from the current state, the CRTC driver is then responsible for
185 * updating this field when needed.
186 *
187 * Note that the combination of &drm_crtc_state.event == NULL and
188 * &drm_crtc_state.no_blank == true is valid and usually used when the
189 * writeback connector attached to the CRTC has a new job queued. In
190 * this case the driver will send the VBLANK event on its own when the
191 * writeback job is complete.
192 */
193 bool no_vblank : 1;
194
195 /**
196 * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to
197 * this CRTC.
126 */ 198 */
127 u32 plane_mask; 199 u32 plane_mask;
128 200
201 /**
202 * @connector_mask: Bitmask of drm_connector_mask(connector) of
203 * connectors attached to this CRTC.
204 */
129 u32 connector_mask; 205 u32 connector_mask;
206
207 /**
208 * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders
209 * attached to this CRTC.
210 */
130 u32 encoder_mask; 211 u32 encoder_mask;
131 212
132 /** 213 /**
@@ -134,10 +215,13 @@ struct drm_crtc_state {
134 * 215 *
135 * Internal display timings which can be used by the driver to handle 216 * Internal display timings which can be used by the driver to handle
136 * differences between the mode requested by userspace in @mode and what 217 * differences between the mode requested by userspace in @mode and what
137 * is actually programmed into the hardware. It is purely driver 218 * is actually programmed into the hardware.
138 * implementation defined what exactly this adjusted mode means. Usually 219 *
139 * it is used to store the hardware display timings used between the 220 * For drivers using &drm_bridge, this stores hardware display timings
140 * CRTC and encoder blocks. 221 * used between the CRTC and the first bridge. For other drivers, the
222 * meaning of the adjusted_mode field is purely driver implementation
223 * defined information, and will usually be used to store the hardware
224 * display timings used between the CRTC and encoder blocks.
141 */ 225 */
142 struct drm_display_mode adjusted_mode; 226 struct drm_display_mode adjusted_mode;
143 227
@@ -158,7 +242,10 @@ struct drm_crtc_state {
158 */ 242 */
159 struct drm_display_mode mode; 243 struct drm_display_mode mode;
160 244
161 /* blob property to expose current mode to atomic userspace */ 245 /**
246 * @mode_blob: &drm_property_blob for @mode, for exposing the mode to
247 * atomic userspace.
248 */
162 struct drm_property_blob *mode_blob; 249 struct drm_property_blob *mode_blob;
163 250
164 /** 251 /**
@@ -262,6 +349,7 @@ struct drm_crtc_state {
262 */ 349 */
263 struct drm_crtc_commit *commit; 350 struct drm_crtc_commit *commit;
264 351
352 /** @state: backpointer to global drm_atomic_state */
265 struct drm_atomic_state *state; 353 struct drm_atomic_state *state;
266}; 354};
267 355
@@ -503,6 +591,8 @@ struct drm_crtc_funcs {
503 * cleaned up by calling the @atomic_destroy_state hook in this 591 * cleaned up by calling the @atomic_destroy_state hook in this
504 * structure. 592 * structure.
505 * 593 *
594 * This callback is mandatory for atomic drivers.
595 *
506 * Atomic drivers which don't subclass &struct drm_crtc_state should use 596 * Atomic drivers which don't subclass &struct drm_crtc_state should use
507 * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the 597 * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the
508 * state structure to extend it with driver-private state should use 598 * state structure to extend it with driver-private state should use
@@ -529,6 +619,8 @@ struct drm_crtc_funcs {
529 * 619 *
530 * Destroy a state duplicated with @atomic_duplicate_state and release 620 * Destroy a state duplicated with @atomic_duplicate_state and release
531 * or unreference all resources it references 621 * or unreference all resources it references
622 *
623 * This callback is mandatory for atomic drivers.
532 */ 624 */
533 void (*atomic_destroy_state)(struct drm_crtc *crtc, 625 void (*atomic_destroy_state)(struct drm_crtc *crtc,
534 struct drm_crtc_state *state); 626 struct drm_crtc_state *state);
@@ -717,35 +809,25 @@ struct drm_crtc_funcs {
717 809
718/** 810/**
719 * struct drm_crtc - central CRTC control structure 811 * struct drm_crtc - central CRTC control structure
720 * @dev: parent DRM device
721 * @port: OF node used by drm_of_find_possible_crtcs()
722 * @head: list management
723 * @name: human readable name, can be overwritten by the driver
724 * @mutex: per-CRTC locking
725 * @base: base KMS object for ID tracking etc.
726 * @primary: primary plane for this CRTC
727 * @cursor: cursor plane for this CRTC
728 * @cursor_x: current x position of the cursor, used for universal cursor planes
729 * @cursor_y: current y position of the cursor, used for universal cursor planes
730 * @enabled: is this CRTC enabled?
731 * @mode: current mode timings
732 * @hwmode: mode timings as programmed to hw regs
733 * @x: x position on screen
734 * @y: y position on screen
735 * @funcs: CRTC control functions
736 * @gamma_size: size of gamma ramp
737 * @gamma_store: gamma ramp values
738 * @helper_private: mid-layer private data
739 * @properties: property tracking for this CRTC
740 * 812 *
741 * Each CRTC may have one or more connectors associated with it. This structure 813 * Each CRTC may have one or more connectors associated with it. This structure
742 * allows the CRTC to be controlled. 814 * allows the CRTC to be controlled.
743 */ 815 */
744struct drm_crtc { 816struct drm_crtc {
817 /** @dev: parent DRM device */
745 struct drm_device *dev; 818 struct drm_device *dev;
819 /** @port: OF node used by drm_of_find_possible_crtcs(). */
746 struct device_node *port; 820 struct device_node *port;
821 /**
822 * @head:
823 *
824 * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list.
825 * Invariant over the lifetime of @dev and therefore does not need
826 * locking.
827 */
747 struct list_head head; 828 struct list_head head;
748 829
830 /** @name: human readable name, can be overwritten by the driver */
749 char *name; 831 char *name;
750 832
751 /** 833 /**
@@ -760,10 +842,25 @@ struct drm_crtc {
760 */ 842 */
761 struct drm_modeset_lock mutex; 843 struct drm_modeset_lock mutex;
762 844
845 /** @base: base KMS object for ID tracking etc. */
763 struct drm_mode_object base; 846 struct drm_mode_object base;
764 847
765 /* primary and cursor planes for CRTC */ 848 /**
849 * @primary:
850 * Primary plane for this CRTC. Note that this is only
851 * relevant for legacy IOCTL, it specifies the plane implicitly used by
852 * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance
853 * beyond that.
854 */
766 struct drm_plane *primary; 855 struct drm_plane *primary;
856
857 /**
858 * @cursor:
859 * Cursor plane for this CRTC. Note that this is only relevant for
860 * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR
861 * and SETCURSOR2 IOCTLs. It does not have any significance
862 * beyond that.
863 */
767 struct drm_plane *cursor; 864 struct drm_plane *cursor;
768 865
769 /** 866 /**
@@ -772,30 +869,94 @@ struct drm_crtc {
772 */ 869 */
773 unsigned index; 870 unsigned index;
774 871
775 /* position of cursor plane on crtc */ 872 /**
873 * @cursor_x: Current x position of the cursor, used for universal
874 * cursor planes because the SETCURSOR IOCTL only can update the
875 * framebuffer without supplying the coordinates. Drivers should not use
876 * this directly, atomic drivers should look at &drm_plane_state.crtc_x
877 * of the cursor plane instead.
878 */
776 int cursor_x; 879 int cursor_x;
880 /**
881 * @cursor_y: Current y position of the cursor, used for universal
882 * cursor planes because the SETCURSOR IOCTL only can update the
883 * framebuffer without supplying the coordinates. Drivers should not use
884 * this directly, atomic drivers should look at &drm_plane_state.crtc_y
885 * of the cursor plane instead.
886 */
777 int cursor_y; 887 int cursor_y;
778 888
889 /**
890 * @enabled:
891 *
892 * Is this CRTC enabled? Should only be used by legacy drivers, atomic
893 * drivers should instead consult &drm_crtc_state.enable and
894 * &drm_crtc_state.active. Atomic drivers can update this by calling
895 * drm_atomic_helper_update_legacy_modeset_state().
896 */
779 bool enabled; 897 bool enabled;
780 898
781 /* Requested mode from modesetting. */ 899 /**
900 * @mode:
901 *
902 * Current mode timings. Should only be used by legacy drivers, atomic
903 * drivers should instead consult &drm_crtc_state.mode. Atomic drivers
904 * can update this by calling
905 * drm_atomic_helper_update_legacy_modeset_state().
906 */
782 struct drm_display_mode mode; 907 struct drm_display_mode mode;
783 908
784 /* Programmed mode in hw, after adjustments for encoders, 909 /**
785 * crtc, panel scaling etc. Needed for timestamping etc. 910 * @hwmode:
911 *
912 * Programmed mode in hw, after adjustments for encoders, crtc, panel
913 * scaling etc. Should only be used by legacy drivers, for high
914 * precision vblank timestamps in
915 * drm_calc_vbltimestamp_from_scanoutpos().
916 *
917 * Note that atomic drivers should not use this, but instead use
918 * &drm_crtc_state.adjusted_mode. And for high-precision timestamps
919 * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode,
920 * which is filled out by calling drm_calc_timestamping_constants().
786 */ 921 */
787 struct drm_display_mode hwmode; 922 struct drm_display_mode hwmode;
788 923
789 int x, y; 924 /**
925 * @x:
926 * x position on screen. Should only be used by legacy drivers, atomic
927 * drivers should look at &drm_plane_state.crtc_x of the primary plane
928 * instead. Updated by calling
929 * drm_atomic_helper_update_legacy_modeset_state().
930 */
931 int x;
932 /**
933 * @y:
934 * y position on screen. Should only be used by legacy drivers, atomic
935 * drivers should look at &drm_plane_state.crtc_y of the primary plane
936 * instead. Updated by calling
937 * drm_atomic_helper_update_legacy_modeset_state().
938 */
939 int y;
940
941 /** @funcs: CRTC control functions */
790 const struct drm_crtc_funcs *funcs; 942 const struct drm_crtc_funcs *funcs;
791 943
792 /* Legacy FB CRTC gamma size for reporting to userspace */ 944 /**
945 * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up
946 * by calling drm_mode_crtc_set_gamma_size().
947 */
793 uint32_t gamma_size; 948 uint32_t gamma_size;
949
950 /**
951 * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and
952 * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size().
953 */
794 uint16_t *gamma_store; 954 uint16_t *gamma_store;
795 955
796 /* if you are using the helper */ 956 /** @helper_private: mid-layer private data */
797 const struct drm_crtc_helper_funcs *helper_private; 957 const struct drm_crtc_helper_funcs *helper_private;
798 958
959 /** @properties: property tracking for this CRTC */
799 struct drm_object_properties properties; 960 struct drm_object_properties properties;
800 961
801 /** 962 /**
@@ -865,7 +1026,6 @@ struct drm_crtc {
865 * 1026 *
866 * spinlock to protect the fences in the fence_context. 1027 * spinlock to protect the fences in the fence_context.
867 */ 1028 */
868
869 spinlock_t fence_lock; 1029 spinlock_t fence_lock;
870 /** 1030 /**
871 * @fence_seqno: 1031 * @fence_seqno:
@@ -935,8 +1095,8 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
935 * drm_crtc_mask - find the mask of a registered CRTC 1095 * drm_crtc_mask - find the mask of a registered CRTC
936 * @crtc: CRTC to find mask for 1096 * @crtc: CRTC to find mask for
937 * 1097 *
938 * Given a registered CRTC, return the mask bit of that CRTC for an 1098 * Given a registered CRTC, return the mask bit of that CRTC for the
939 * encoder's possible_crtcs field. 1099 * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields.
940 */ 1100 */
941static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc) 1101static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
942{ 1102{
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
index 7d63b1d4adb9..b225eeb30d05 100644
--- a/include/drm/drm_debugfs_crc.h
+++ b/include/drm/drm_debugfs_crc.h
@@ -43,6 +43,7 @@ struct drm_crtc_crc_entry {
43 * @lock: protects the fields in this struct 43 * @lock: protects the fields in this struct
44 * @source: name of the currently configured source of CRCs 44 * @source: name of the currently configured source of CRCs
45 * @opened: whether userspace has opened the data file for reading 45 * @opened: whether userspace has opened the data file for reading
46 * @overflow: whether an overflow occured.
46 * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR 47 * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
47 * @head: head of circular queue 48 * @head: head of circular queue
48 * @tail: tail of circular queue 49 * @tail: tail of circular queue
@@ -52,7 +53,7 @@ struct drm_crtc_crc_entry {
52struct drm_crtc_crc { 53struct drm_crtc_crc {
53 spinlock_t lock; 54 spinlock_t lock;
54 const char *source; 55 const char *source;
55 bool opened; 56 bool opened, overflow;
56 struct drm_crtc_crc_entry *entries; 57 struct drm_crtc_crc_entry *entries;
57 int head, tail; 58 int head, tail;
58 size_t values_cnt; 59 size_t values_cnt;
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 858ba19a3e29..f9c6e0e3aec7 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -74,6 +74,27 @@ struct drm_device {
74 struct mutex filelist_mutex; 74 struct mutex filelist_mutex;
75 struct list_head filelist; 75 struct list_head filelist;
76 76
77 /**
78 * @filelist_internal:
79 *
80 * List of open DRM files for in-kernel clients. Protected by @filelist_mutex.
81 */
82 struct list_head filelist_internal;
83
84 /**
85 * @clientlist_mutex:
86 *
87 * Protects @clientlist access.
88 */
89 struct mutex clientlist_mutex;
90
91 /**
92 * @clientlist:
93 *
94 * List of in-kernel clients. Protected by @clientlist_mutex.
95 */
96 struct list_head clientlist;
97
77 /** \name Memory management */ 98 /** \name Memory management */
78 /*@{ */ 99 /*@{ */
79 struct list_head maplist; /**< Linked list of regions */ 100 struct list_head maplist; /**< Linked list of regions */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index c01564991a9f..05cc31b5db16 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1078,6 +1078,25 @@ struct drm_dp_aux_msg {
1078 size_t size; 1078 size_t size;
1079}; 1079};
1080 1080
1081struct cec_adapter;
1082struct edid;
1083
1084/**
1085 * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
1086 * @lock: mutex protecting this struct
1087 * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
1088 * @name: name of the CEC adapter
1089 * @parent: parent device of the CEC adapter
1090 * @unregister_work: unregister the CEC adapter
1091 */
1092struct drm_dp_aux_cec {
1093 struct mutex lock;
1094 struct cec_adapter *adap;
1095 const char *name;
1096 struct device *parent;
1097 struct delayed_work unregister_work;
1098};
1099
1081/** 1100/**
1082 * struct drm_dp_aux - DisplayPort AUX channel 1101 * struct drm_dp_aux - DisplayPort AUX channel
1083 * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter 1102 * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
@@ -1136,6 +1155,10 @@ struct drm_dp_aux {
1136 * @i2c_defer_count: Counts I2C DEFERs, used for DP validation. 1155 * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
1137 */ 1156 */
1138 unsigned i2c_defer_count; 1157 unsigned i2c_defer_count;
1158 /**
1159 * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
1160 */
1161 struct drm_dp_aux_cec cec;
1139}; 1162};
1140 1163
1141ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, 1164ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
@@ -1258,4 +1281,37 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
1258 return desc->quirks & BIT(quirk); 1281 return desc->quirks & BIT(quirk);
1259} 1282}
1260 1283
1284#ifdef CONFIG_DRM_DP_CEC
1285void drm_dp_cec_irq(struct drm_dp_aux *aux);
1286void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
1287 struct device *parent);
1288void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
1289void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
1290void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
1291#else
1292static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
1293{
1294}
1295
1296static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
1297 const char *name,
1298 struct device *parent)
1299{
1300}
1301
1302static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
1303{
1304}
1305
1306static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
1307 const struct edid *edid)
1308{
1309}
1310
1311static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
1312{
1313}
1314
1315#endif
1316
1261#endif /* _DRM_DP_HELPER_H_ */ 1317#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 7e545f5f94d3..46a8009784df 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -649,6 +649,35 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
649 return true; 649 return true;
650} 650}
651 651
652/**
653 * drm_core_check_feature - check driver feature flags
654 * @dev: DRM device to check
655 * @feature: feature flag
656 *
657 * This checks @dev for driver features, see &drm_driver.driver_features and the
658 * various DRIVER_\* flags.
659 *
660 * Returns true if the @feature is supported, false otherwise.
661 */
662static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
663{
664 return dev->driver->driver_features & feature;
665}
666
667/**
668 * drm_drv_uses_atomic_modeset - check if the driver implements
669 * atomic_commit()
670 * @dev: DRM device
671 *
672 * This check is useful if drivers do not have DRIVER_ATOMIC set but
673 * have atomic modesetting internally implemented.
674 */
675static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
676{
677 return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
678 dev->mode_config.funcs->atomic_commit != NULL;
679}
680
652 681
653int drm_dev_set_unique(struct drm_device *dev, const char *name); 682int drm_dev_set_unique(struct drm_device *dev, const char *name);
654 683
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index fb299696c7c4..4f597c0730b4 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -191,12 +191,24 @@ int drm_encoder_init(struct drm_device *dev,
191 * Given a registered encoder, return the index of that encoder within a DRM 191 * Given a registered encoder, return the index of that encoder within a DRM
192 * device's list of encoders. 192 * device's list of encoders.
193 */ 193 */
194static inline unsigned int drm_encoder_index(struct drm_encoder *encoder) 194static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder)
195{ 195{
196 return encoder->index; 196 return encoder->index;
197} 197}
198 198
199/** 199/**
200 * drm_encoder_mask - find the mask of a registered ENCODER
201 * @encoder: encoder to find mask for
202 *
203 * Given a registered encoder, return the mask bit of that encoder for an
204 * encoder's possible_clones field.
205 */
206static inline u32 drm_encoder_mask(const struct drm_encoder *encoder)
207{
208 return 1 << drm_encoder_index(encoder);
209}
210
211/**
200 * drm_encoder_crtc_ok - can a given crtc drive a given encoder? 212 * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
201 * @encoder: encoder to test 213 * @encoder: encoder to test
202 * @crtc: crtc to test 214 * @crtc: crtc to test
@@ -241,7 +253,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder);
241 */ 253 */
242#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \ 254#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
243 list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \ 255 list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
244 for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder))) 256 for_each_if ((encoder_mask) & drm_encoder_mask(encoder))
245 257
246/** 258/**
247 * drm_for_each_encoder - iterate over all encoders 259 * drm_for_each_encoder - iterate over all encoders
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index d532f88a8d55..96e26e3b9a0c 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -16,16 +16,10 @@ struct drm_mode_fb_cmd2;
16struct drm_plane; 16struct drm_plane;
17struct drm_plane_state; 17struct drm_plane_state;
18 18
19int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
20 unsigned int preferred_bpp, unsigned int max_conn_count,
21 const struct drm_framebuffer_funcs *funcs);
22int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp, 19int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
23 unsigned int max_conn_count); 20 unsigned int max_conn_count);
24void drm_fb_cma_fbdev_fini(struct drm_device *dev); 21void drm_fb_cma_fbdev_fini(struct drm_device *dev);
25 22
26struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
27 unsigned int preferred_bpp, unsigned int max_conn_count,
28 const struct drm_framebuffer_funcs *funcs);
29struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, 23struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
30 unsigned int preferred_bpp, unsigned int max_conn_count); 24 unsigned int preferred_bpp, unsigned int max_conn_count);
31void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); 25void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index b069433e7fc1..5db08c8f1d25 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -32,6 +32,7 @@
32 32
33struct drm_fb_helper; 33struct drm_fb_helper;
34 34
35#include <drm/drm_client.h>
35#include <drm/drm_crtc.h> 36#include <drm/drm_crtc.h>
36#include <drm/drm_device.h> 37#include <drm/drm_device.h>
37#include <linux/kgdb.h> 38#include <linux/kgdb.h>
@@ -154,6 +155,20 @@ struct drm_fb_helper_connector {
154 * operations. 155 * operations.
155 */ 156 */
156struct drm_fb_helper { 157struct drm_fb_helper {
158 /**
159 * @client:
160 *
161 * DRM client used by the generic fbdev emulation.
162 */
163 struct drm_client_dev client;
164
165 /**
166 * @buffer:
167 *
168 * Framebuffer used by the generic fbdev emulation.
169 */
170 struct drm_client_buffer *buffer;
171
157 struct drm_framebuffer *fb; 172 struct drm_framebuffer *fb;
158 struct drm_device *dev; 173 struct drm_device *dev;
159 int crtc_count; 174 int crtc_count;
@@ -234,6 +249,12 @@ struct drm_fb_helper {
234 int preferred_bpp; 249 int preferred_bpp;
235}; 250};
236 251
252static inline struct drm_fb_helper *
253drm_fb_helper_from_client(struct drm_client_dev *client)
254{
255 return container_of(client, struct drm_fb_helper, client);
256}
257
237/** 258/**
238 * define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers 259 * define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers
239 * 260 *
@@ -330,6 +351,10 @@ void drm_fb_helper_fbdev_teardown(struct drm_device *dev);
330 351
331void drm_fb_helper_lastclose(struct drm_device *dev); 352void drm_fb_helper_lastclose(struct drm_device *dev);
332void drm_fb_helper_output_poll_changed(struct drm_device *dev); 353void drm_fb_helper_output_poll_changed(struct drm_device *dev);
354
355int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
356 struct drm_fb_helper_surface_size *sizes);
357int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
333#else 358#else
334static inline void drm_fb_helper_prepare(struct drm_device *dev, 359static inline void drm_fb_helper_prepare(struct drm_device *dev,
335 struct drm_fb_helper *helper, 360 struct drm_fb_helper *helper,
@@ -564,6 +589,19 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
564{ 589{
565} 590}
566 591
592static inline int
593drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
594 struct drm_fb_helper_surface_size *sizes)
595{
596 return 0;
597}
598
599static inline int
600drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
601{
602 return 0;
603}
604
567#endif 605#endif
568 606
569static inline int 607static inline int
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 027ac16da3d1..26485acc51d7 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -193,6 +193,13 @@ struct drm_file {
193 unsigned aspect_ratio_allowed:1; 193 unsigned aspect_ratio_allowed:1;
194 194
195 /** 195 /**
196 * @writeback_connectors:
197 *
198 * True if client understands writeback connectors
199 */
200 unsigned writeback_connectors:1;
201
202 /**
196 * @is_master: 203 * @is_master:
197 * 204 *
198 * This client is the creator of @master. Protected by struct 205 * This client is the creator of @master. Protected by struct
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 3e86408dac9f..f9c15845f465 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -39,6 +39,7 @@ struct drm_mode_fb_cmd2;
39 * @hsub: Horizontal chroma subsampling factor 39 * @hsub: Horizontal chroma subsampling factor
40 * @vsub: Vertical chroma subsampling factor 40 * @vsub: Vertical chroma subsampling factor
41 * @has_alpha: Does the format embeds an alpha component? 41 * @has_alpha: Does the format embeds an alpha component?
42 * @is_yuv: Is it a YUV format?
42 */ 43 */
43struct drm_format_info { 44struct drm_format_info {
44 u32 format; 45 u32 format;
@@ -48,6 +49,7 @@ struct drm_format_info {
48 u8 hsub; 49 u8 hsub;
49 u8 vsub; 50 u8 vsub;
50 bool has_alpha; 51 bool has_alpha;
52 bool is_yuv;
51}; 53};
52 54
53/** 55/**
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 101f566ae43d..2c3bbb43c7d1 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -109,6 +109,38 @@ enum drm_mm_insert_mode {
109 * Allocates the node from the bottom of the found hole. 109 * Allocates the node from the bottom of the found hole.
110 */ 110 */
111 DRM_MM_INSERT_EVICT, 111 DRM_MM_INSERT_EVICT,
112
113 /**
114 * @DRM_MM_INSERT_ONCE:
115 *
116 * Only check the first hole for suitablity and report -ENOSPC
117 * immediately otherwise, rather than check every hole until a
118 * suitable one is found. Can only be used in conjunction with another
119 * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
120 */
121 DRM_MM_INSERT_ONCE = BIT(31),
122
123 /**
124 * @DRM_MM_INSERT_HIGHEST:
125 *
126 * Only check the highest hole (the hole with the largest address) and
127 * insert the node at the top of the hole or report -ENOSPC if
128 * unsuitable.
129 *
130 * Does not search all holes.
131 */
132 DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
133
134 /**
135 * @DRM_MM_INSERT_LOWEST:
136 *
137 * Only check the lowest hole (the hole with the smallest address) and
138 * insert the node at the bottom of the hole or report -ENOSPC if
139 * unsuitable.
140 *
141 * Does not search all holes.
142 */
143 DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
112}; 144};
113 145
114/** 146/**
@@ -173,7 +205,7 @@ struct drm_mm {
173 struct drm_mm_node head_node; 205 struct drm_mm_node head_node;
174 /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ 206 /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
175 struct rb_root_cached interval_tree; 207 struct rb_root_cached interval_tree;
176 struct rb_root holes_size; 208 struct rb_root_cached holes_size;
177 struct rb_root holes_addr; 209 struct rb_root holes_addr;
178 210
179 unsigned long scan_active; 211 unsigned long scan_active;
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 33b3a96d66d0..a0b202e1d69a 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -329,10 +329,10 @@ struct drm_mode_config_funcs {
329 329
330/** 330/**
331 * struct drm_mode_config - Mode configuration control structure 331 * struct drm_mode_config - Mode configuration control structure
332 * @min_width: minimum pixel width on this device 332 * @min_width: minimum fb pixel width on this device
333 * @min_height: minimum pixel height on this device 333 * @min_height: minimum fb pixel height on this device
334 * @max_width: maximum pixel width on this device 334 * @max_width: maximum fb pixel width on this device
335 * @max_height: maximum pixel height on this device 335 * @max_height: maximum fb pixel height on this device
336 * @funcs: core driver provided mode setting functions 336 * @funcs: core driver provided mode setting functions
337 * @fb_base: base address of the framebuffer 337 * @fb_base: base address of the framebuffer
338 * @poll_enabled: track polling support for this device 338 * @poll_enabled: track polling support for this device
@@ -727,6 +727,11 @@ struct drm_mode_config {
727 */ 727 */
728 struct drm_property *aspect_ratio_property; 728 struct drm_property *aspect_ratio_property;
729 /** 729 /**
730 * @content_type_property: Optional connector property to control the
731 * HDMI infoframe content type setting.
732 */
733 struct drm_property *content_type_property;
734 /**
730 * @degamma_lut_property: Optional CRTC property to set the LUT used to 735 * @degamma_lut_property: Optional CRTC property to set the LUT used to
731 * convert the framebuffer's colors to linear gamma. 736 * convert the framebuffer's colors to linear gamma.
732 */ 737 */
@@ -779,6 +784,29 @@ struct drm_mode_config {
779 */ 784 */
780 struct drm_property *panel_orientation_property; 785 struct drm_property *panel_orientation_property;
781 786
787 /**
788 * @writeback_fb_id_property: Property for writeback connectors, storing
789 * the ID of the output framebuffer.
790 * See also: drm_writeback_connector_init()
791 */
792 struct drm_property *writeback_fb_id_property;
793
794 /**
795 * @writeback_pixel_formats_property: Property for writeback connectors,
796 * storing an array of the supported pixel formats for the writeback
797 * engine (read-only).
798 * See also: drm_writeback_connector_init()
799 */
800 struct drm_property *writeback_pixel_formats_property;
801 /**
802 * @writeback_out_fence_ptr_property: Property for writeback connectors,
803 * fd pointer representing the outgoing fences for a writeback
804 * connector. Userspace should provide a pointer to a value of type s32,
805 * and then cast that pointer to u64.
806 * See also: drm_writeback_connector_init()
807 */
808 struct drm_property *writeback_out_fence_ptr_property;
809
782 /* dumb ioctl parameters */ 810 /* dumb ioctl parameters */
783 uint32_t preferred_depth, prefer_shadow; 811 uint32_t preferred_depth, prefer_shadow;
784 812
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index b159fe07fcf9..baded6514456 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -530,7 +530,7 @@ drm_mode_validate_ycbcr420(const struct drm_display_mode *mode,
530void drm_mode_prune_invalid(struct drm_device *dev, 530void drm_mode_prune_invalid(struct drm_device *dev,
531 struct list_head *mode_list, bool verbose); 531 struct list_head *mode_list, bool verbose);
532void drm_mode_sort(struct list_head *mode_list); 532void drm_mode_sort(struct list_head *mode_list);
533void drm_mode_connector_list_update(struct drm_connector *connector); 533void drm_connector_list_update(struct drm_connector *connector);
534 534
535/* parsing cmdline modes */ 535/* parsing cmdline modes */
536bool 536bool
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 35e2a3a79fc5..61142aa0ab23 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -785,7 +785,7 @@ struct drm_connector_helper_funcs {
785 * 785 *
786 * This function should fill in all modes currently valid for the sink 786 * This function should fill in all modes currently valid for the sink
787 * into the &drm_connector.probed_modes list. It should also update the 787 * into the &drm_connector.probed_modes list. It should also update the
788 * EDID property by calling drm_mode_connector_update_edid_property(). 788 * EDID property by calling drm_connector_update_edid_property().
789 * 789 *
790 * The usual way to implement this is to cache the EDID retrieved in the 790 * The usual way to implement this is to cache the EDID retrieved in the
791 * probe callback somewhere in the driver-private connector structure. 791 * probe callback somewhere in the driver-private connector structure.
@@ -974,6 +974,21 @@ struct drm_connector_helper_funcs {
974 */ 974 */
975 int (*atomic_check)(struct drm_connector *connector, 975 int (*atomic_check)(struct drm_connector *connector,
976 struct drm_connector_state *state); 976 struct drm_connector_state *state);
977
978 /**
979 * @atomic_commit:
980 *
981 * This hook is to be used by drivers implementing writeback connectors
982 * that need a point when to commit the writeback job to the hardware.
983 * The writeback_job to commit is available in
984 * &drm_connector_state.writeback_job.
985 *
986 * This hook is optional.
987 *
988 * This callback is used by the atomic modeset helpers.
989 */
990 void (*atomic_commit)(struct drm_connector *connector,
991 struct drm_connector_state *state);
977}; 992};
978 993
979/** 994/**
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index b93c239afb60..ead34ab5ca4e 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -17,6 +17,8 @@ struct drm_bridge;
17struct device_node; 17struct device_node;
18 18
19#ifdef CONFIG_OF 19#ifdef CONFIG_OF
20uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
21 struct device_node *port);
20uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, 22uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
21 struct device_node *port); 23 struct device_node *port);
22void drm_of_component_match_add(struct device *master, 24void drm_of_component_match_add(struct device *master,
@@ -34,6 +36,12 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
34 struct drm_panel **panel, 36 struct drm_panel **panel,
35 struct drm_bridge **bridge); 37 struct drm_bridge **bridge);
36#else 38#else
39static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
40 struct device_node *port)
41{
42 return 0;
43}
44
37static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, 45static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
38 struct device_node *port) 46 struct device_node *port)
39{ 47{
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 14ac240a1f64..582a0ec0aa70 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -89,6 +89,7 @@ struct drm_panel {
89 struct drm_device *drm; 89 struct drm_device *drm;
90 struct drm_connector *connector; 90 struct drm_connector *connector;
91 struct device *dev; 91 struct device *dev;
92 struct device_link *link;
92 93
93 const struct drm_panel_funcs *funcs; 94 const struct drm_panel_funcs *funcs;
94 95
@@ -199,7 +200,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np);
199#else 200#else
200static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) 201static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
201{ 202{
202 return NULL; 203 return ERR_PTR(-ENODEV);
203} 204}
204#endif 205#endif
205 206
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 674599025d7d..8181e9e7cf1d 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -58,11 +58,4 @@ static inline int drm_get_pci_dev(struct pci_dev *pdev,
58} 58}
59#endif 59#endif
60 60
61#define DRM_PCIE_SPEED_25 1
62#define DRM_PCIE_SPEED_50 2
63#define DRM_PCIE_SPEED_80 4
64
65int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
66int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
67
68#endif /* _DRM_PCI_H_ */ 61#endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 26fa50c2a50e..8a152dc16ea5 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -34,31 +34,15 @@ struct drm_modeset_acquire_ctx;
34 34
35/** 35/**
36 * struct drm_plane_state - mutable plane state 36 * struct drm_plane_state - mutable plane state
37 * @plane: backpointer to the plane 37 *
38 * @crtc_w: width of visible portion of plane on crtc 38 * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
39 * @crtc_h: height of visible portion of plane on crtc 39 * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
40 * @src_x: left position of visible portion of plane within 40 * raw coordinates provided by userspace. Drivers should use
41 * plane (in 16.16) 41 * drm_atomic_helper_check_plane_state() and only use the derived rectangles in
42 * @src_y: upper position of visible portion of plane within 42 * @src and @dst to program the hardware.
43 * plane (in 16.16)
44 * @src_w: width of visible portion of plane (in 16.16)
45 * @src_h: height of visible portion of plane (in 16.16)
46 * @alpha: opacity of the plane
47 * @rotation: rotation of the plane
48 * @zpos: priority of the given plane on crtc (optional)
49 * Note that multiple active planes on the same crtc can have an identical
50 * zpos value. The rule to solving the conflict is to compare the plane
51 * object IDs; the plane with a higher ID must be stacked on top of a
52 * plane with a lower ID.
53 * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
54 * where N is the number of active planes for given crtc. Note that
55 * the driver must set drm_mode_config.normalize_zpos or call
56 * drm_atomic_normalize_zpos() to update this before it can be trusted.
57 * @src: clipped source coordinates of the plane (in 16.16)
58 * @dst: clipped destination coordinates of the plane
59 * @state: backpointer to global drm_atomic_state
60 */ 43 */
61struct drm_plane_state { 44struct drm_plane_state {
45 /** @plane: backpointer to the plane */
62 struct drm_plane *plane; 46 struct drm_plane *plane;
63 47
64 /** 48 /**
@@ -87,7 +71,7 @@ struct drm_plane_state {
87 * preserved. 71 * preserved.
88 * 72 *
89 * Drivers should store any implicit fence in this from their 73 * Drivers should store any implicit fence in this from their
90 * &drm_plane_helper.prepare_fb callback. See drm_gem_fb_prepare_fb() 74 * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb()
91 * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers. 75 * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
92 */ 76 */
93 struct dma_fence *fence; 77 struct dma_fence *fence;
@@ -108,20 +92,60 @@ struct drm_plane_state {
108 */ 92 */
109 int32_t crtc_y; 93 int32_t crtc_y;
110 94
95 /** @crtc_w: width of visible portion of plane on crtc */
96 /** @crtc_h: height of visible portion of plane on crtc */
111 uint32_t crtc_w, crtc_h; 97 uint32_t crtc_w, crtc_h;
112 98
113 /* Source values are 16.16 fixed point */ 99 /**
114 uint32_t src_x, src_y; 100 * @src_x: left position of visible portion of plane within plane (in
101 * 16.16 fixed point).
102 */
103 uint32_t src_x;
104 /**
105 * @src_y: upper position of visible portion of plane within plane (in
106 * 16.16 fixed point).
107 */
108 uint32_t src_y;
109 /** @src_w: width of visible portion of plane (in 16.16) */
110 /** @src_h: height of visible portion of plane (in 16.16) */
115 uint32_t src_h, src_w; 111 uint32_t src_h, src_w;
116 112
117 /* Plane opacity */ 113 /**
114 * @alpha:
115 * Opacity of the plane with 0 as completely transparent and 0xffff as
116 * completely opaque. See drm_plane_create_alpha_property() for more
117 * details.
118 */
118 u16 alpha; 119 u16 alpha;
119 120
120 /* Plane rotation */ 121 /**
122 * @rotation:
123 * Rotation of the plane. See drm_plane_create_rotation_property() for
124 * more details.
125 */
121 unsigned int rotation; 126 unsigned int rotation;
122 127
123 /* Plane zpos */ 128 /**
129 * @zpos:
130 * Priority of the given plane on crtc (optional).
131 *
132 * Note that multiple active planes on the same crtc can have an
133 * identical zpos value. The rule to solving the conflict is to compare
134 * the plane object IDs; the plane with a higher ID must be stacked on
135 * top of a plane with a lower ID.
136 *
137 * See drm_plane_create_zpos_property() and
138 * drm_plane_create_zpos_immutable_property() for more details.
139 */
124 unsigned int zpos; 140 unsigned int zpos;
141
142 /**
143 * @normalized_zpos:
144 * Normalized value of zpos: unique, range from 0 to N-1 where N is the
145 * number of active planes for given crtc. Note that the driver must set
146 * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to
147 * update this before it can be trusted.
148 */
125 unsigned int normalized_zpos; 149 unsigned int normalized_zpos;
126 150
127 /** 151 /**
@@ -138,7 +162,8 @@ struct drm_plane_state {
138 */ 162 */
139 enum drm_color_range color_range; 163 enum drm_color_range color_range;
140 164
141 /* Clipped coordinates */ 165 /** @src: clipped source coordinates of the plane (in 16.16) */
166 /** @dst: clipped destination coordinates of the plane */
142 struct drm_rect src, dst; 167 struct drm_rect src, dst;
143 168
144 /** 169 /**
@@ -157,6 +182,7 @@ struct drm_plane_state {
157 */ 182 */
158 struct drm_crtc_commit *commit; 183 struct drm_crtc_commit *commit;
159 184
185 /** @state: backpointer to global drm_atomic_state */
160 struct drm_atomic_state *state; 186 struct drm_atomic_state *state;
161}; 187};
162 188
@@ -288,6 +314,8 @@ struct drm_plane_funcs {
288 * cleaned up by calling the @atomic_destroy_state hook in this 314 * cleaned up by calling the @atomic_destroy_state hook in this
289 * structure. 315 * structure.
290 * 316 *
317 * This callback is mandatory for atomic drivers.
318 *
291 * Atomic drivers which don't subclass &struct drm_plane_state should use 319 * Atomic drivers which don't subclass &struct drm_plane_state should use
292 * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the 320 * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
293 * state structure to extend it with driver-private state should use 321 * state structure to extend it with driver-private state should use
@@ -314,6 +342,8 @@ struct drm_plane_funcs {
314 * 342 *
315 * Destroy a state duplicated with @atomic_duplicate_state and release 343 * Destroy a state duplicated with @atomic_duplicate_state and release
316 * or unreference all resources it references 344 * or unreference all resources it references
345 *
346 * This callback is mandatory for atomic drivers.
317 */ 347 */
318 void (*atomic_destroy_state)(struct drm_plane *plane, 348 void (*atomic_destroy_state)(struct drm_plane *plane,
319 struct drm_plane_state *state); 349 struct drm_plane_state *state);
@@ -431,7 +461,10 @@ struct drm_plane_funcs {
431 * This optional hook is used for the DRM to determine if the given 461 * This optional hook is used for the DRM to determine if the given
432 * format/modifier combination is valid for the plane. This allows the 462 * format/modifier combination is valid for the plane. This allows the
433 * DRM to generate the correct format bitmask (which formats apply to 463 * DRM to generate the correct format bitmask (which formats apply to
434 * which modifier). 464 * which modifier), and to valdiate modifiers at atomic_check time.
465 *
466 * If not present, then any modifier in the plane's modifier
467 * list is allowed with any of the plane's formats.
435 * 468 *
436 * Returns: 469 * Returns:
437 * 470 *
@@ -492,30 +525,27 @@ enum drm_plane_type {
492 525
493/** 526/**
494 * struct drm_plane - central DRM plane control structure 527 * struct drm_plane - central DRM plane control structure
495 * @dev: DRM device this plane belongs to 528 *
496 * @head: for list management 529 * Planes represent the scanout hardware of a display block. They receive their
497 * @name: human readable name, can be overwritten by the driver 530 * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control
498 * @base: base mode object 531 * the color conversion, see `Plane Composition Properties`_ for more details,
499 * @possible_crtcs: pipes this plane can be bound to 532 * and are also involved in the color conversion of input pixels, see `Color
500 * @format_types: array of formats supported by this plane 533 * Management Properties`_ for details on that.
501 * @format_count: number of formats supported
502 * @format_default: driver hasn't supplied supported formats for the plane
503 * @modifiers: array of modifiers supported by this plane
504 * @modifier_count: number of modifiers supported
505 * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
506 * drm_mode_set_config_internal() to implement correct refcounting.
507 * @funcs: helper functions
508 * @properties: property tracking for this plane
509 * @type: type of plane (overlay, primary, cursor)
510 * @alpha_property: alpha property for this plane
511 * @zpos_property: zpos property for this plane
512 * @rotation_property: rotation property for this plane
513 * @helper_private: mid-layer private data
514 */ 534 */
515struct drm_plane { 535struct drm_plane {
536 /** @dev: DRM device this plane belongs to */
516 struct drm_device *dev; 537 struct drm_device *dev;
538
539 /**
540 * @head:
541 *
542 * List of all planes on @dev, linked from &drm_mode_config.plane_list.
543 * Invariant over the lifetime of @dev and therefore does not need
544 * locking.
545 */
517 struct list_head head; 546 struct list_head head;
518 547
548 /** @name: human readable name, can be overwritten by the driver */
519 char *name; 549 char *name;
520 550
521 /** 551 /**
@@ -529,35 +559,62 @@ struct drm_plane {
529 */ 559 */
530 struct drm_modeset_lock mutex; 560 struct drm_modeset_lock mutex;
531 561
562 /** @base: base mode object */
532 struct drm_mode_object base; 563 struct drm_mode_object base;
533 564
565 /**
566 * @possible_crtcs: pipes this plane can be bound to constructed from
567 * drm_crtc_mask()
568 */
534 uint32_t possible_crtcs; 569 uint32_t possible_crtcs;
570 /** @format_types: array of formats supported by this plane */
535 uint32_t *format_types; 571 uint32_t *format_types;
572 /** @format_count: Size of the array pointed at by @format_types. */
536 unsigned int format_count; 573 unsigned int format_count;
574 /**
575 * @format_default: driver hasn't supplied supported formats for the
576 * plane. Used by the drm_plane_init compatibility wrapper only.
577 */
537 bool format_default; 578 bool format_default;
538 579
580 /** @modifiers: array of modifiers supported by this plane */
539 uint64_t *modifiers; 581 uint64_t *modifiers;
582 /** @modifier_count: Size of the array pointed at by @modifier_count. */
540 unsigned int modifier_count; 583 unsigned int modifier_count;
541 584
542 /** 585 /**
543 * @crtc: Currently bound CRTC, only really meaningful for non-atomic 586 * @crtc:
544 * drivers. Atomic drivers should instead check &drm_plane_state.crtc. 587 *
588 * Currently bound CRTC, only meaningful for non-atomic drivers. For
589 * atomic drivers this is forced to be NULL, atomic drivers should
590 * instead check &drm_plane_state.crtc.
545 */ 591 */
546 struct drm_crtc *crtc; 592 struct drm_crtc *crtc;
547 593
548 /** 594 /**
549 * @fb: Currently bound framebuffer, only really meaningful for 595 * @fb:
550 * non-atomic drivers. Atomic drivers should instead check 596 *
551 * &drm_plane_state.fb. 597 * Currently bound framebuffer, only meaningful for non-atomic drivers.
598 * For atomic drivers this is forced to be NULL, atomic drivers should
599 * instead check &drm_plane_state.fb.
552 */ 600 */
553 struct drm_framebuffer *fb; 601 struct drm_framebuffer *fb;
554 602
603 /**
604 * @old_fb:
605 *
606 * Temporary tracking of the old fb while a modeset is ongoing. Only
607 * used by non-atomic drivers, forced to be NULL for atomic drivers.
608 */
555 struct drm_framebuffer *old_fb; 609 struct drm_framebuffer *old_fb;
556 610
611 /** @funcs: plane control functions */
557 const struct drm_plane_funcs *funcs; 612 const struct drm_plane_funcs *funcs;
558 613
614 /** @properties: property tracking for this plane */
559 struct drm_object_properties properties; 615 struct drm_object_properties properties;
560 616
617 /** @type: Type of plane, see &enum drm_plane_type for details. */
561 enum drm_plane_type type; 618 enum drm_plane_type type;
562 619
563 /** 620 /**
@@ -566,6 +623,7 @@ struct drm_plane {
566 */ 623 */
567 unsigned index; 624 unsigned index;
568 625
626 /** @helper_private: mid-layer private data */
569 const struct drm_plane_helper_funcs *helper_private; 627 const struct drm_plane_helper_funcs *helper_private;
570 628
571 /** 629 /**
@@ -583,8 +641,23 @@ struct drm_plane {
583 */ 641 */
584 struct drm_plane_state *state; 642 struct drm_plane_state *state;
585 643
644 /**
645 * @alpha_property:
646 * Optional alpha property for this plane. See
647 * drm_plane_create_alpha_property().
648 */
586 struct drm_property *alpha_property; 649 struct drm_property *alpha_property;
650 /**
651 * @zpos_property:
652 * Optional zpos property for this plane. See
653 * drm_plane_create_zpos_property().
654 */
587 struct drm_property *zpos_property; 655 struct drm_property *zpos_property;
656 /**
657 * @rotation_property:
658 * Optional rotation property for this plane. See
659 * drm_plane_create_rotation_property().
660 */
588 struct drm_property *rotation_property; 661 struct drm_property *rotation_property;
589 662
590 /** 663 /**
@@ -632,10 +705,20 @@ void drm_plane_cleanup(struct drm_plane *plane);
632 * Given a registered plane, return the index of that plane within a DRM 705 * Given a registered plane, return the index of that plane within a DRM
633 * device's list of planes. 706 * device's list of planes.
634 */ 707 */
635static inline unsigned int drm_plane_index(struct drm_plane *plane) 708static inline unsigned int drm_plane_index(const struct drm_plane *plane)
636{ 709{
637 return plane->index; 710 return plane->index;
638} 711}
712
713/**
714 * drm_plane_mask - find the mask of a registered plane
715 * @plane: plane to find mask for
716 */
717static inline u32 drm_plane_mask(const struct drm_plane *plane)
718{
719 return 1 << drm_plane_index(plane);
720}
721
639struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); 722struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
640void drm_plane_force_disable(struct drm_plane *plane); 723void drm_plane_force_disable(struct drm_plane *plane);
641 724
@@ -671,7 +754,7 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
671 */ 754 */
672#define drm_for_each_plane_mask(plane, dev, plane_mask) \ 755#define drm_for_each_plane_mask(plane, dev, plane_mask) \
673 list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ 756 list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
674 for_each_if ((plane_mask) & (1 << drm_plane_index(plane))) 757 for_each_if ((plane_mask) & drm_plane_mask(plane))
675 758
676/** 759/**
677 * drm_for_each_legacy_plane - iterate over all planes for legacy userspace 760 * drm_for_each_legacy_plane - iterate over all planes for legacy userspace
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 28d7ce620729..26cee2934781 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -67,8 +67,10 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
67 int crtc_x, int crtc_y, 67 int crtc_x, int crtc_y,
68 unsigned int crtc_w, unsigned int crtc_h, 68 unsigned int crtc_w, unsigned int crtc_h,
69 uint32_t src_x, uint32_t src_y, 69 uint32_t src_x, uint32_t src_y,
70 uint32_t src_w, uint32_t src_h); 70 uint32_t src_w, uint32_t src_h,
71int drm_plane_helper_disable(struct drm_plane *plane); 71 struct drm_modeset_acquire_ctx *ctx);
72int drm_plane_helper_disable(struct drm_plane *plane,
73 struct drm_modeset_acquire_ctx *ctx);
72 74
73/* For use by drm_crtc_helper.c */ 75/* For use by drm_crtc_helper.c */
74int drm_plane_helper_commit(struct drm_plane *plane, 76int drm_plane_helper_commit(struct drm_plane *plane,
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 4d5f5d6cf6a6..d716d653b096 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -82,7 +82,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
82struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, 82struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
83 struct dma_buf_export_info *exp_info); 83 struct dma_buf_export_info *exp_info);
84void drm_gem_dmabuf_release(struct dma_buf *dma_buf); 84void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
85int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev, 85int drm_gem_map_attach(struct dma_buf *dma_buf,
86 struct dma_buf_attachment *attach); 86 struct dma_buf_attachment *attach);
87void drm_gem_map_detach(struct dma_buf *dma_buf, 87void drm_gem_map_detach(struct dma_buf *dma_buf,
88 struct dma_buf_attachment *attach); 88 struct dma_buf_attachment *attach);
@@ -93,10 +93,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
93 enum dma_data_direction dir); 93 enum dma_data_direction dir);
94void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf); 94void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
95void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); 95void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
96void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
97 unsigned long page_num);
98void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
99 unsigned long page_num, void *addr);
100void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num); 96void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
101void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, 97void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
102 void *addr); 98 void *addr);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index e1a46e9991cc..f3e6eed3e79c 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -69,16 +69,21 @@
69struct drm_printer { 69struct drm_printer {
70 /* private: */ 70 /* private: */
71 void (*printfn)(struct drm_printer *p, struct va_format *vaf); 71 void (*printfn)(struct drm_printer *p, struct va_format *vaf);
72 void (*puts)(struct drm_printer *p, const char *str);
72 void *arg; 73 void *arg;
73 const char *prefix; 74 const char *prefix;
74}; 75};
75 76
77void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf);
78void __drm_puts_coredump(struct drm_printer *p, const char *str);
76void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); 79void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
80void __drm_puts_seq_file(struct drm_printer *p, const char *str);
77void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); 81void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
78void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf); 82void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
79 83
80__printf(2, 3) 84__printf(2, 3)
81void drm_printf(struct drm_printer *p, const char *f, ...); 85void drm_printf(struct drm_printer *p, const char *f, ...);
86void drm_puts(struct drm_printer *p, const char *str);
82 87
83__printf(2, 0) 88__printf(2, 0)
84/** 89/**
@@ -105,6 +110,71 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
105 drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__) 110 drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__)
106 111
107/** 112/**
113 * struct drm_print_iterator - local struct used with drm_printer_coredump
114 * @data: Pointer to the devcoredump output buffer
115 * @start: The offset within the buffer to start writing
116 * @remain: The number of bytes to write for this iteration
117 */
118struct drm_print_iterator {
119 void *data;
120 ssize_t start;
121 ssize_t remain;
122 /* private: */
123 ssize_t offset;
124};
125
126/**
127 * drm_coredump_printer - construct a &drm_printer that can output to a buffer
128 * from the read function for devcoredump
129 * @iter: A pointer to a struct drm_print_iterator for the read instance
130 *
131 * This wrapper extends drm_printf() to work with a dev_coredumpm() callback
132 * function. The passed in drm_print_iterator struct contains the buffer
133 * pointer, size and offset as passed in from devcoredump.
134 *
135 * For example::
136 *
137 * void coredump_read(char *buffer, loff_t offset, size_t count,
138 * void *data, size_t datalen)
139 * {
140 * struct drm_print_iterator iter;
141 * struct drm_printer p;
142 *
143 * iter.data = buffer;
144 * iter.start = offset;
145 * iter.remain = count;
146 *
147 * p = drm_coredump_printer(&iter);
148 *
149 * drm_printf(p, "foo=%d\n", foo);
150 * }
151 *
152 * void makecoredump(...)
153 * {
154 * ...
155 * dev_coredumpm(dev, THIS_MODULE, data, 0, GFP_KERNEL,
156 * coredump_read, ...)
157 * }
158 *
159 * RETURNS:
160 * The &drm_printer object
161 */
162static inline struct drm_printer
163drm_coredump_printer(struct drm_print_iterator *iter)
164{
165 struct drm_printer p = {
166 .printfn = __drm_printfn_coredump,
167 .puts = __drm_puts_coredump,
168 .arg = iter,
169 };
170
171 /* Set the internal offset of the iterator to zero */
172 iter->offset = 0;
173
174 return p;
175}
176
177/**
108 * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file 178 * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
109 * @f: the &struct seq_file to output to 179 * @f: the &struct seq_file to output to
110 * 180 *
@@ -115,6 +185,7 @@ static inline struct drm_printer drm_seq_file_printer(struct seq_file *f)
115{ 185{
116 struct drm_printer p = { 186 struct drm_printer p = {
117 .printfn = __drm_printfn_seq_file, 187 .printfn = __drm_printfn_seq_file,
188 .puts = __drm_puts_seq_file,
118 .arg = f, 189 .arg = f,
119 }; 190 };
120 return p; 191 return p;
@@ -195,6 +266,7 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
195#define DRM_UT_VBL 0x20 266#define DRM_UT_VBL 0x20
196#define DRM_UT_STATE 0x40 267#define DRM_UT_STATE 0x40
197#define DRM_UT_LEASE 0x80 268#define DRM_UT_LEASE 0x80
269#define DRM_UT_DP 0x100
198 270
199__printf(3, 4) 271__printf(3, 4)
200void drm_dev_printk(const struct device *dev, const char *level, 272void drm_dev_printk(const struct device *dev, const char *level,
@@ -307,6 +379,11 @@ void drm_err(const char *format, ...);
307#define DRM_DEBUG_LEASE(fmt, ...) \ 379#define DRM_DEBUG_LEASE(fmt, ...) \
308 drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__) 380 drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
309 381
382#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
383 drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
384#define DRM_DEBUG_DP(dev, fmt, ...) \
385 drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
386
310#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \ 387#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
311({ \ 388({ \
312 static DEFINE_RATELIMIT_STATE(_rs, \ 389 static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 1d5c0b2a8956..c030f6ccab99 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -147,10 +147,10 @@ struct drm_property {
147 * properties are not exposed to legacy userspace. 147 * properties are not exposed to legacy userspace.
148 * 148 *
149 * DRM_MODE_PROP_IMMUTABLE 149 * DRM_MODE_PROP_IMMUTABLE
150 * Set for properties where userspace cannot be changed by 150 * Set for properties whose values cannot be changed by
151 * userspace. The kernel is allowed to update the value of these 151 * userspace. The kernel is allowed to update the value of these
152 * properties. This is generally used to expose probe state to 152 * properties. This is generally used to expose probe state to
153 * usersapce, e.g. the EDID, or the connector path property on DP 153 * userspace, e.g. the EDID, or the connector path property on DP
154 * MST sinks. 154 * MST sinks.
155 */ 155 */
156 uint32_t flags; 156 uint32_t flags;
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 8758df94e9a0..c7987daeaed0 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -41,6 +41,7 @@ struct drm_vma_offset_node {
41 rwlock_t vm_lock; 41 rwlock_t vm_lock;
42 struct drm_mm_node vm_node; 42 struct drm_mm_node vm_node;
43 struct rb_root vm_files; 43 struct rb_root vm_files;
44 bool readonly:1;
44}; 45};
45 46
46struct drm_vma_offset_manager { 47struct drm_vma_offset_manager {
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
new file mode 100644
index 000000000000..23df9d463003
--- /dev/null
+++ b/include/drm/drm_writeback.h
@@ -0,0 +1,136 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
4 * Author: Brian Starkey <brian.starkey@arm.com>
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU licence.
10 */
11
12#ifndef __DRM_WRITEBACK_H__
13#define __DRM_WRITEBACK_H__
14#include <drm/drm_connector.h>
15#include <drm/drm_encoder.h>
16#include <linux/workqueue.h>
17
18struct drm_writeback_connector {
19 struct drm_connector base;
20
21 /**
22 * @encoder: Internal encoder used by the connector to fulfill
23 * the DRM framework requirements. The users of the
24 * @drm_writeback_connector control the behaviour of the @encoder
25 * by passing the @enc_funcs parameter to drm_writeback_connector_init()
26 * function.
27 */
28 struct drm_encoder encoder;
29
30 /**
31 * @pixel_formats_blob_ptr:
32 *
33 * DRM blob property data for the pixel formats list on writeback
34 * connectors
35 * See also drm_writeback_connector_init()
36 */
37 struct drm_property_blob *pixel_formats_blob_ptr;
38
39 /** @job_lock: Protects job_queue */
40 spinlock_t job_lock;
41
42 /**
43 * @job_queue:
44 *
45 * Holds a list of a connector's writeback jobs; the last item is the
46 * most recent. The first item may be either waiting for the hardware
47 * to begin writing, or currently being written.
48 *
49 * See also: drm_writeback_queue_job() and
50 * drm_writeback_signal_completion()
51 */
52 struct list_head job_queue;
53
54 /**
55 * @fence_context:
56 *
57 * timeline context used for fence operations.
58 */
59 unsigned int fence_context;
60 /**
61 * @fence_lock:
62 *
63 * spinlock to protect the fences in the fence_context.
64 */
65 spinlock_t fence_lock;
66 /**
67 * @fence_seqno:
68 *
69 * Seqno variable used as monotonic counter for the fences
70 * created on the connector's timeline.
71 */
72 unsigned long fence_seqno;
73 /**
74 * @timeline_name:
75 *
76 * The name of the connector's fence timeline.
77 */
78 char timeline_name[32];
79};
80
81struct drm_writeback_job {
82 /**
83 * @cleanup_work:
84 *
85 * Used to allow drm_writeback_signal_completion to defer dropping the
86 * framebuffer reference to a workqueue
87 */
88 struct work_struct cleanup_work;
89
90 /**
91 * @list_entry:
92 *
93 * List item for the writeback connector's @job_queue
94 */
95 struct list_head list_entry;
96
97 /**
98 * @fb:
99 *
100 * Framebuffer to be written to by the writeback connector. Do not set
101 * directly, use drm_atomic_set_writeback_fb_for_connector()
102 */
103 struct drm_framebuffer *fb;
104
105 /**
106 * @out_fence:
107 *
108 * Fence which will signal once the writeback has completed
109 */
110 struct dma_fence *out_fence;
111};
112
113static inline struct drm_writeback_connector *
114drm_connector_to_writeback(struct drm_connector *connector)
115{
116 return container_of(connector, struct drm_writeback_connector, base);
117}
118
119int drm_writeback_connector_init(struct drm_device *dev,
120 struct drm_writeback_connector *wb_connector,
121 const struct drm_connector_funcs *con_funcs,
122 const struct drm_encoder_helper_funcs *enc_helper_funcs,
123 const u32 *formats, int n_formats);
124
125void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
126 struct drm_writeback_job *job);
127
128void drm_writeback_cleanup_job(struct drm_writeback_job *job);
129
130void
131drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
132 int status);
133
134struct dma_fence *
135drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector);
136#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index dec655894d08..21c648b0b2a1 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -27,6 +27,8 @@
27#include <drm/spsc_queue.h> 27#include <drm/spsc_queue.h>
28#include <linux/dma-fence.h> 28#include <linux/dma-fence.h>
29 29
30#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
31
30struct drm_gpu_scheduler; 32struct drm_gpu_scheduler;
31struct drm_sched_rq; 33struct drm_sched_rq;
32 34
@@ -43,18 +45,37 @@ enum drm_sched_priority {
43}; 45};
44 46
45/** 47/**
46 * drm_sched_entity - A wrapper around a job queue (typically attached 48 * struct drm_sched_entity - A wrapper around a job queue (typically
47 * to the DRM file_priv). 49 * attached to the DRM file_priv).
50 *
51 * @list: used to append this struct to the list of entities in the
52 * runqueue.
53 * @rq: runqueue to which this entity belongs.
54 * @rq_lock: lock to modify the runqueue to which this entity belongs.
55 * @job_queue: the list of jobs of this entity.
56 * @fence_seq: a linearly increasing seqno incremented with each
57 * new &drm_sched_fence which is part of the entity.
58 * @fence_context: a unique context for all the fences which belong
59 * to this entity.
60 * The &drm_sched_fence.scheduled uses the
61 * fence_context but &drm_sched_fence.finished uses
62 * fence_context + 1.
63 * @dependency: the dependency fence of the job which is on the top
64 * of the job queue.
65 * @cb: callback for the dependency fence above.
66 * @guilty: points to ctx's guilty.
67 * @fini_status: contains the exit status in case the process was signalled.
68 * @last_scheduled: points to the finished fence of the last scheduled job.
69 * @last_user: last group leader pushing a job into the entity.
48 * 70 *
49 * Entities will emit jobs in order to their corresponding hardware 71 * Entities will emit jobs in order to their corresponding hardware
50 * ring, and the scheduler will alternate between entities based on 72 * ring, and the scheduler will alternate between entities based on
51 * scheduling policy. 73 * scheduling policy.
52*/ 74 */
53struct drm_sched_entity { 75struct drm_sched_entity {
54 struct list_head list; 76 struct list_head list;
55 struct drm_sched_rq *rq; 77 struct drm_sched_rq *rq;
56 spinlock_t rq_lock; 78 spinlock_t rq_lock;
57 struct drm_gpu_scheduler *sched;
58 79
59 struct spsc_queue job_queue; 80 struct spsc_queue job_queue;
60 81
@@ -63,47 +84,98 @@ struct drm_sched_entity {
63 84
64 struct dma_fence *dependency; 85 struct dma_fence *dependency;
65 struct dma_fence_cb cb; 86 struct dma_fence_cb cb;
66 atomic_t *guilty; /* points to ctx's guilty */ 87 atomic_t *guilty;
67 int fini_status; 88 struct dma_fence *last_scheduled;
68 struct dma_fence *last_scheduled; 89 struct task_struct *last_user;
69}; 90};
70 91
71/** 92/**
93 * struct drm_sched_rq - queue of entities to be scheduled.
94 *
95 * @lock: to modify the entities list.
96 * @sched: the scheduler to which this rq belongs to.
97 * @entities: list of the entities to be scheduled.
98 * @current_entity: the entity which is to be scheduled.
99 *
72 * Run queue is a set of entities scheduling command submissions for 100 * Run queue is a set of entities scheduling command submissions for
73 * one specific ring. It implements the scheduling policy that selects 101 * one specific ring. It implements the scheduling policy that selects
74 * the next entity to emit commands from. 102 * the next entity to emit commands from.
75*/ 103 */
76struct drm_sched_rq { 104struct drm_sched_rq {
77 spinlock_t lock; 105 spinlock_t lock;
106 struct drm_gpu_scheduler *sched;
78 struct list_head entities; 107 struct list_head entities;
79 struct drm_sched_entity *current_entity; 108 struct drm_sched_entity *current_entity;
80}; 109};
81 110
111/**
112 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
113 */
82struct drm_sched_fence { 114struct drm_sched_fence {
115 /**
116 * @scheduled: this fence is what will be signaled by the scheduler
117 * when the job is scheduled.
118 */
83 struct dma_fence scheduled; 119 struct dma_fence scheduled;
84 120
85 /* This fence is what will be signaled by the scheduler when 121 /**
86 * the job is completed. 122 * @finished: this fence is what will be signaled by the scheduler
87 * 123 * when the job is completed.
88 * When setting up an out fence for the job, you should use 124 *
89 * this, since it's available immediately upon 125 * When setting up an out fence for the job, you should use
90 * drm_sched_job_init(), and the fence returned by the driver 126 * this, since it's available immediately upon
91 * from run_job() won't be created until the dependencies have 127 * drm_sched_job_init(), and the fence returned by the driver
92 * resolved. 128 * from run_job() won't be created until the dependencies have
93 */ 129 * resolved.
130 */
94 struct dma_fence finished; 131 struct dma_fence finished;
95 132
133 /**
134 * @cb: the callback for the parent fence below.
135 */
96 struct dma_fence_cb cb; 136 struct dma_fence_cb cb;
137 /**
138 * @parent: the fence returned by &drm_sched_backend_ops.run_job
139 * when scheduling the job on hardware. We signal the
140 * &drm_sched_fence.finished fence once parent is signalled.
141 */
97 struct dma_fence *parent; 142 struct dma_fence *parent;
143 /**
144 * @sched: the scheduler instance to which the job having this struct
145 * belongs to.
146 */
98 struct drm_gpu_scheduler *sched; 147 struct drm_gpu_scheduler *sched;
148 /**
149 * @lock: the lock used by the scheduled and the finished fences.
150 */
99 spinlock_t lock; 151 spinlock_t lock;
152 /**
153 * @owner: job owner for debugging
154 */
100 void *owner; 155 void *owner;
101}; 156};
102 157
103struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); 158struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
104 159
105/** 160/**
106 * drm_sched_job - A job to be run by an entity. 161 * struct drm_sched_job - A job to be run by an entity.
162 *
163 * @queue_node: used to append this struct to the queue of jobs in an entity.
164 * @sched: the scheduler instance on which this job is scheduled.
165 * @s_fence: contains the fences for the scheduling of job.
166 * @finish_cb: the callback for the finished fence.
167 * @finish_work: schedules the function @drm_sched_job_finish once the job has
168 * finished to remove the job from the
169 * @drm_gpu_scheduler.ring_mirror_list.
170 * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
171 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout
172 * interval is over.
173 * @id: a unique id assigned to each job scheduled on the scheduler.
174 * @karma: increment on every hang caused by this job. If this exceeds the hang
175 * limit of the scheduler then the job is marked guilty and will not
176 * be scheduled further.
177 * @s_priority: the priority of the job.
178 * @entity: the entity to which this job belongs.
107 * 179 *
108 * A job is created by the driver using drm_sched_job_init(), and 180 * A job is created by the driver using drm_sched_job_init(), and
109 * should call drm_sched_entity_push_job() once it wants the scheduler 181 * should call drm_sched_entity_push_job() once it wants the scheduler
@@ -130,38 +202,64 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
130} 202}
131 203
132/** 204/**
205 * struct drm_sched_backend_ops
206 *
133 * Define the backend operations called by the scheduler, 207 * Define the backend operations called by the scheduler,
134 * these functions should be implemented in driver side 208 * these functions should be implemented in driver side.
135*/ 209 */
136struct drm_sched_backend_ops { 210struct drm_sched_backend_ops {
137 /* Called when the scheduler is considering scheduling this 211 /**
138 * job next, to get another struct dma_fence for this job to 212 * @dependency: Called when the scheduler is considering scheduling
213 * this job next, to get another struct dma_fence for this job to
139 * block on. Once it returns NULL, run_job() may be called. 214 * block on. Once it returns NULL, run_job() may be called.
140 */ 215 */
141 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, 216 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
142 struct drm_sched_entity *s_entity); 217 struct drm_sched_entity *s_entity);
143 218
144 /* Called to execute the job once all of the dependencies have 219 /**
145 * been resolved. This may be called multiple times, if 220 * @run_job: Called to execute the job once all of the dependencies
221 * have been resolved. This may be called multiple times, if
146 * timedout_job() has happened and drm_sched_job_recovery() 222 * timedout_job() has happened and drm_sched_job_recovery()
147 * decides to try it again. 223 * decides to try it again.
148 */ 224 */
149 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); 225 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
150 226
151 /* Called when a job has taken too long to execute, to trigger 227 /**
152 * GPU recovery. 228 * @timedout_job: Called when a job has taken too long to execute,
229 * to trigger GPU recovery.
153 */ 230 */
154 void (*timedout_job)(struct drm_sched_job *sched_job); 231 void (*timedout_job)(struct drm_sched_job *sched_job);
155 232
156 /* Called once the job's finished fence has been signaled and 233 /**
157 * it's time to clean it up. 234 * @free_job: Called once the job's finished fence has been signaled
235 * and it's time to clean it up.
158 */ 236 */
159 void (*free_job)(struct drm_sched_job *sched_job); 237 void (*free_job)(struct drm_sched_job *sched_job);
160}; 238};
161 239
162/** 240/**
163 * One scheduler is implemented for each hardware ring 241 * struct drm_gpu_scheduler
164*/ 242 *
243 * @ops: backend operations provided by the driver.
244 * @hw_submission_limit: the max size of the hardware queue.
245 * @timeout: the time after which a job is removed from the scheduler.
246 * @name: name of the ring for which this scheduler is being used.
247 * @sched_rq: priority wise array of run queues.
248 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
249 * is ready to be scheduled.
250 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
251 * waits on this wait queue until all the scheduled jobs are
252 * finished.
253 * @hw_rq_count: the number of jobs currently in the hardware queue.
254 * @job_id_count: used to assign unique id to the each job.
255 * @thread: the kthread on which the scheduler which run.
256 * @ring_mirror_list: the list of jobs which are currently in the job queue.
257 * @job_list_lock: lock to protect the ring_mirror_list.
258 * @hang_limit: once the hangs by a job crosses this limit then it is marked
259 * guilty and it will be considered for scheduling further.
260 *
261 * One scheduler is implemented for each hardware ring.
262 */
165struct drm_gpu_scheduler { 263struct drm_gpu_scheduler {
166 const struct drm_sched_backend_ops *ops; 264 const struct drm_sched_backend_ops *ops;
167 uint32_t hw_submission_limit; 265 uint32_t hw_submission_limit;
@@ -184,16 +282,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
184 const char *name); 282 const char *name);
185void drm_sched_fini(struct drm_gpu_scheduler *sched); 283void drm_sched_fini(struct drm_gpu_scheduler *sched);
186 284
187int drm_sched_entity_init(struct drm_gpu_scheduler *sched, 285int drm_sched_entity_init(struct drm_sched_entity *entity,
188 struct drm_sched_entity *entity, 286 struct drm_sched_rq **rq_list,
189 struct drm_sched_rq *rq, 287 unsigned int num_rq_list,
190 atomic_t *guilty); 288 atomic_t *guilty);
191void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched, 289long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
192 struct drm_sched_entity *entity); 290void drm_sched_entity_fini(struct drm_sched_entity *entity);
193void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched, 291void drm_sched_entity_destroy(struct drm_sched_entity *entity);
194 struct drm_sched_entity *entity);
195void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
196 struct drm_sched_entity *entity);
197void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 292void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
198 struct drm_sched_entity *entity); 293 struct drm_sched_entity *entity);
199void drm_sched_entity_set_rq(struct drm_sched_entity *entity, 294void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
@@ -204,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
204void drm_sched_fence_scheduled(struct drm_sched_fence *fence); 299void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
205void drm_sched_fence_finished(struct drm_sched_fence *fence); 300void drm_sched_fence_finished(struct drm_sched_fence *fence);
206int drm_sched_job_init(struct drm_sched_job *job, 301int drm_sched_job_init(struct drm_sched_job *job,
207 struct drm_gpu_scheduler *sched,
208 struct drm_sched_entity *entity, 302 struct drm_sched_entity *entity,
209 void *owner); 303 void *owner);
210void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, 304void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 346b1f5cb180..fca22d463e1b 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -24,101 +24,26 @@
24#ifndef _I915_COMPONENT_H_ 24#ifndef _I915_COMPONENT_H_
25#define _I915_COMPONENT_H_ 25#define _I915_COMPONENT_H_
26 26
27#include "drm_audio_component.h"
28
27/* MAX_PORT is the number of port 29/* MAX_PORT is the number of port
28 * It must be sync with I915_MAX_PORTS defined i915_drv.h 30 * It must be sync with I915_MAX_PORTS defined i915_drv.h
29 */ 31 */
30#define MAX_PORTS 6 32#define MAX_PORTS 6
31 33
32/** 34/**
33 * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver
34 */
35struct i915_audio_component_ops {
36 /**
37 * @owner: i915 module
38 */
39 struct module *owner;
40 /**
41 * @get_power: get the POWER_DOMAIN_AUDIO power well
42 *
43 * Request the power well to be turned on.
44 */
45 void (*get_power)(struct device *);
46 /**
47 * @put_power: put the POWER_DOMAIN_AUDIO power well
48 *
49 * Allow the power well to be turned off.
50 */
51 void (*put_power)(struct device *);
52 /**
53 * @codec_wake_override: Enable/disable codec wake signal
54 */
55 void (*codec_wake_override)(struct device *, bool enable);
56 /**
57 * @get_cdclk_freq: Get the Core Display Clock in kHz
58 */
59 int (*get_cdclk_freq)(struct device *);
60 /**
61 * @sync_audio_rate: set n/cts based on the sample rate
62 *
63 * Called from audio driver. After audio driver sets the
64 * sample rate, it will call this function to set n/cts
65 */
66 int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
67 /**
68 * @get_eld: fill the audio state and ELD bytes for the given port
69 *
70 * Called from audio driver to get the HDMI/DP audio state of the given
71 * digital port, and also fetch ELD bytes to the given pointer.
72 *
73 * It returns the byte size of the original ELD (not the actually
74 * copied size), zero for an invalid ELD, or a negative error code.
75 *
76 * Note that the returned size may be over @max_bytes. Then it
77 * implies that only a part of ELD has been copied to the buffer.
78 */
79 int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
80 unsigned char *buf, int max_bytes);
81};
82
83/**
84 * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver
85 */
86struct i915_audio_component_audio_ops {
87 /**
88 * @audio_ptr: Pointer to be used in call to pin_eld_notify
89 */
90 void *audio_ptr;
91 /**
92 * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
93 *
94 * Called when the i915 driver has set up audio pipeline or has just
95 * begun to tear it down. This allows the HDA driver to update its
96 * status accordingly (even when the HDA controller is in power save
97 * mode).
98 */
99 void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
100};
101
102/**
103 * struct i915_audio_component - Used for direct communication between i915 and hda drivers 35 * struct i915_audio_component - Used for direct communication between i915 and hda drivers
104 */ 36 */
105struct i915_audio_component { 37struct i915_audio_component {
106 /** 38 /**
107 * @dev: i915 device, used as parameter for ops 39 * @base: the drm_audio_component base class
108 */ 40 */
109 struct device *dev; 41 struct drm_audio_component base;
42
110 /** 43 /**
111 * @aud_sample_rate: the array of audio sample rate per port 44 * @aud_sample_rate: the array of audio sample rate per port
112 */ 45 */
113 int aud_sample_rate[MAX_PORTS]; 46 int aud_sample_rate[MAX_PORTS];
114 /**
115 * @ops: Ops implemented by i915 driver, called by hda driver
116 */
117 const struct i915_audio_component_ops *ops;
118 /**
119 * @audio_ops: Ops implemented by hda driver, called by i915 driver
120 */
121 const struct i915_audio_component_audio_ops *audio_ops;
122}; 47};
123 48
124#endif /* _I915_COMPONENT_H_ */ 49#endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index c9e5a6621b95..c44703f471b3 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -95,7 +95,9 @@ extern struct resource intel_graphics_stolen_res;
95#define I845_TSEG_SIZE_512K (2 << 1) 95#define I845_TSEG_SIZE_512K (2 << 1)
96#define I845_TSEG_SIZE_1M (3 << 1) 96#define I845_TSEG_SIZE_1M (3 << 1)
97 97
98#define INTEL_BSM 0x5c 98#define INTEL_BSM 0x5c
99#define INTEL_GEN11_BSM_DW0 0xc0
100#define INTEL_GEN11_BSM_DW1 0xc4
99#define INTEL_BSM_MASK (-(1u << 20)) 101#define INTEL_BSM_MASK (-(1u << 20))
100 102
101#endif /* _I915_DRM_H_ */ 103#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index bab70ff6e78b..fbf5cfc9b352 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -349,7 +349,6 @@
349#define INTEL_KBL_GT2_IDS(info) \ 349#define INTEL_KBL_GT2_IDS(info) \
350 INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ 350 INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
351 INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \ 351 INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
352 INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
353 INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \ 352 INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
354 INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \ 353 INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
355 INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ 354 INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
@@ -365,11 +364,17 @@
365#define INTEL_KBL_GT4_IDS(info) \ 364#define INTEL_KBL_GT4_IDS(info) \
366 INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */ 365 INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
367 366
367/* AML/KBL Y GT2 */
368#define INTEL_AML_GT2_IDS(info) \
369 INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
370 INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
371
368#define INTEL_KBL_IDS(info) \ 372#define INTEL_KBL_IDS(info) \
369 INTEL_KBL_GT1_IDS(info), \ 373 INTEL_KBL_GT1_IDS(info), \
370 INTEL_KBL_GT2_IDS(info), \ 374 INTEL_KBL_GT2_IDS(info), \
371 INTEL_KBL_GT3_IDS(info), \ 375 INTEL_KBL_GT3_IDS(info), \
372 INTEL_KBL_GT4_IDS(info) 376 INTEL_KBL_GT4_IDS(info), \
377 INTEL_AML_GT2_IDS(info)
373 378
374/* CFL S */ 379/* CFL S */
375#define INTEL_CFL_S_GT1_IDS(info) \ 380#define INTEL_CFL_S_GT1_IDS(info) \
@@ -388,32 +393,40 @@
388 INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \ 393 INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
389 INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */ 394 INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
390 395
391/* CFL U GT1 */
392#define INTEL_CFL_U_GT1_IDS(info) \
393 INTEL_VGA_DEVICE(0x3EA1, info), \
394 INTEL_VGA_DEVICE(0x3EA4, info)
395
396/* CFL U GT2 */ 396/* CFL U GT2 */
397#define INTEL_CFL_U_GT2_IDS(info) \ 397#define INTEL_CFL_U_GT2_IDS(info) \
398 INTEL_VGA_DEVICE(0x3EA0, info), \
399 INTEL_VGA_DEVICE(0x3EA3, info), \
400 INTEL_VGA_DEVICE(0x3EA9, info) 398 INTEL_VGA_DEVICE(0x3EA9, info)
401 399
402/* CFL U GT3 */ 400/* CFL U GT3 */
403#define INTEL_CFL_U_GT3_IDS(info) \ 401#define INTEL_CFL_U_GT3_IDS(info) \
404 INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \
405 INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \ 402 INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
406 INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ 403 INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
407 INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ 404 INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
408 INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */ 405 INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
409 406
407/* WHL/CFL U GT1 */
408#define INTEL_WHL_U_GT1_IDS(info) \
409 INTEL_VGA_DEVICE(0x3EA1, info)
410
411/* WHL/CFL U GT2 */
412#define INTEL_WHL_U_GT2_IDS(info) \
413 INTEL_VGA_DEVICE(0x3EA0, info)
414
415/* WHL/CFL U GT3 */
416#define INTEL_WHL_U_GT3_IDS(info) \
417 INTEL_VGA_DEVICE(0x3EA2, info), \
418 INTEL_VGA_DEVICE(0x3EA3, info), \
419 INTEL_VGA_DEVICE(0x3EA4, info)
420
410#define INTEL_CFL_IDS(info) \ 421#define INTEL_CFL_IDS(info) \
411 INTEL_CFL_S_GT1_IDS(info), \ 422 INTEL_CFL_S_GT1_IDS(info), \
412 INTEL_CFL_S_GT2_IDS(info), \ 423 INTEL_CFL_S_GT2_IDS(info), \
413 INTEL_CFL_H_GT2_IDS(info), \ 424 INTEL_CFL_H_GT2_IDS(info), \
414 INTEL_CFL_U_GT1_IDS(info), \
415 INTEL_CFL_U_GT2_IDS(info), \ 425 INTEL_CFL_U_GT2_IDS(info), \
416 INTEL_CFL_U_GT3_IDS(info) 426 INTEL_CFL_U_GT3_IDS(info), \
427 INTEL_WHL_U_GT1_IDS(info), \
428 INTEL_WHL_U_GT2_IDS(info), \
429 INTEL_WHL_U_GT3_IDS(info)
417 430
418/* CNL */ 431/* CNL */
419#define INTEL_CNL_IDS(info) \ 432#define INTEL_CNL_IDS(info) \
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 56e4a916b5e8..fe9827d0ca8a 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -16,16 +16,31 @@
16 16
17/** 17/**
18 * struct tinydrm_device - tinydrm device 18 * struct tinydrm_device - tinydrm device
19 * @drm: DRM device
20 * @pipe: Display pipe structure
21 * @dirty_lock: Serializes framebuffer flushing
22 * @fb_funcs: Framebuffer functions used when creating framebuffers
23 */ 19 */
24struct tinydrm_device { 20struct tinydrm_device {
21 /**
22 * @drm: DRM device
23 */
25 struct drm_device *drm; 24 struct drm_device *drm;
25
26 /**
27 * @pipe: Display pipe structure
28 */
26 struct drm_simple_display_pipe pipe; 29 struct drm_simple_display_pipe pipe;
30
31 /**
32 * @dirty_lock: Serializes framebuffer flushing
33 */
27 struct mutex dirty_lock; 34 struct mutex dirty_lock;
35
36 /**
37 * @fb_funcs: Framebuffer functions used when creating framebuffers
38 */
28 const struct drm_framebuffer_funcs *fb_funcs; 39 const struct drm_framebuffer_funcs *fb_funcs;
40
41 /**
42 * @fb_dirty: Framebuffer dirty callback
43 */
29 int (*fb_dirty)(struct drm_framebuffer *framebuffer, 44 int (*fb_dirty)(struct drm_framebuffer *framebuffer,
30 struct drm_file *file_priv, unsigned flags, 45 struct drm_file *file_priv, unsigned flags,
31 unsigned color, struct drm_clip_rect *clips, 46 unsigned color, struct drm_clip_rect *clips,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index c67977aa1a0e..a01ba2032f0e 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -284,17 +284,29 @@ struct ttm_operation_ctx {
284#define TTM_OPT_FLAG_FORCE_ALLOC 0x2 284#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
285 285
286/** 286/**
287 * ttm_bo_get - reference a struct ttm_buffer_object
288 *
289 * @bo: The buffer object.
290 */
291static inline void ttm_bo_get(struct ttm_buffer_object *bo)
292{
293 kref_get(&bo->kref);
294}
295
296/**
287 * ttm_bo_reference - reference a struct ttm_buffer_object 297 * ttm_bo_reference - reference a struct ttm_buffer_object
288 * 298 *
289 * @bo: The buffer object. 299 * @bo: The buffer object.
290 * 300 *
291 * Returns a refcounted pointer to a buffer object. 301 * Returns a refcounted pointer to a buffer object.
302 *
303 * This function is deprecated. Use @ttm_bo_get instead.
292 */ 304 */
293 305
294static inline struct ttm_buffer_object * 306static inline struct ttm_buffer_object *
295ttm_bo_reference(struct ttm_buffer_object *bo) 307ttm_bo_reference(struct ttm_buffer_object *bo)
296{ 308{
297 kref_get(&bo->kref); 309 ttm_bo_get(bo);
298 return bo; 310 return bo;
299} 311}
300 312
@@ -346,11 +358,22 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
346 struct ttm_operation_ctx *ctx); 358 struct ttm_operation_ctx *ctx);
347 359
348/** 360/**
361 * ttm_bo_put
362 *
363 * @bo: The buffer object.
364 *
365 * Unreference a buffer object.
366 */
367void ttm_bo_put(struct ttm_buffer_object *bo);
368
369/**
349 * ttm_bo_unref 370 * ttm_bo_unref
350 * 371 *
351 * @bo: The buffer object. 372 * @bo: The buffer object.
352 * 373 *
353 * Unreference and clear a pointer to a buffer object. 374 * Unreference and clear a pointer to a buffer object.
375 *
376 * This function is deprecated. Use @ttm_bo_put instead.
354 */ 377 */
355void ttm_bo_unref(struct ttm_buffer_object **bo); 378void ttm_bo_unref(struct ttm_buffer_object **bo);
356 379
diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
new file mode 100644
index 000000000000..7c492b49e38c
--- /dev/null
+++ b/include/drm/ttm/ttm_set_memory.h
@@ -0,0 +1,150 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2018 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Huang Rui <ray.huang@amd.com>
29 */
30
31#ifndef TTM_SET_MEMORY
32#define TTM_SET_MEMORY
33
34#include <linux/mm.h>
35
36#ifdef CONFIG_X86
37
38#include <asm/set_memory.h>
39
40static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
41{
42 return set_pages_array_wb(pages, addrinarray);
43}
44
45static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
46{
47 return set_pages_array_wc(pages, addrinarray);
48}
49
50static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
51{
52 return set_pages_array_uc(pages, addrinarray);
53}
54
55static inline int ttm_set_pages_wb(struct page *page, int numpages)
56{
57 return set_pages_wb(page, numpages);
58}
59
60static inline int ttm_set_pages_wc(struct page *page, int numpages)
61{
62 unsigned long addr = (unsigned long)page_address(page);
63
64 return set_memory_wc(addr, numpages);
65}
66
67static inline int ttm_set_pages_uc(struct page *page, int numpages)
68{
69 return set_pages_uc(page, numpages);
70}
71
72#else /* for CONFIG_X86 */
73
74#if IS_ENABLED(CONFIG_AGP)
75
76#include <asm/agp.h>
77
78static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
79{
80 int i;
81
82 for (i = 0; i < addrinarray; i++)
83 unmap_page_from_agp(pages[i]);
84 return 0;
85}
86
87static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
88{
89 int i;
90
91 for (i = 0; i < addrinarray; i++)
92 map_page_into_agp(pages[i]);
93 return 0;
94}
95
96static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
97{
98 int i;
99
100 for (i = 0; i < addrinarray; i++)
101 map_page_into_agp(pages[i]);
102 return 0;
103}
104
105static inline int ttm_set_pages_wb(struct page *page, int numpages)
106{
107 int i;
108
109 for (i = 0; i < numpages; i++)
110 unmap_page_from_agp(page++);
111 return 0;
112}
113
114#else /* for CONFIG_AGP */
115
116static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
117{
118 return 0;
119}
120
121static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
122{
123 return 0;
124}
125
126static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
127{
128 return 0;
129}
130
131static inline int ttm_set_pages_wb(struct page *page, int numpages)
132{
133 return 0;
134}
135
136#endif /* for CONFIG_AGP */
137
138static inline int ttm_set_pages_wc(struct page *page, int numpages)
139{
140 return 0;
141}
142
143static inline int ttm_set_pages_uc(struct page *page, int numpages)
144{
145 return 0;
146}
147
148#endif /* for CONFIG_X86 */
149
150#endif
diff --git a/include/dt-bindings/clock/actions,s700-cmu.h b/include/dt-bindings/clock/actions,s700-cmu.h
new file mode 100644
index 000000000000..3e1942996724
--- /dev/null
+++ b/include/dt-bindings/clock/actions,s700-cmu.h
@@ -0,0 +1,118 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Device Tree binding constants for Actions Semi S700 Clock Management Unit
4 *
5 * Copyright (c) 2014 Actions Semi Inc.
6 * Author: David Liu <liuwei@actions-semi.com>
7 *
8 * Author: Pathiban Nallathambi <pn@denx.de>
9 * Author: Saravanan Sekar <sravanhome@gmail.com>
10 */
11
12#ifndef __DT_BINDINGS_CLOCK_S700_H
13#define __DT_BINDINGS_CLOCK_S700_H
14
15#define CLK_NONE 0
16
17/* pll clocks */
18#define CLK_CORE_PLL 1
19#define CLK_DEV_PLL 2
20#define CLK_DDR_PLL 3
21#define CLK_NAND_PLL 4
22#define CLK_DISPLAY_PLL 5
23#define CLK_TVOUT_PLL 6
24#define CLK_CVBS_PLL 7
25#define CLK_AUDIO_PLL 8
26#define CLK_ETHERNET_PLL 9
27
28/* system clock */
29#define CLK_CPU 10
30#define CLK_DEV 11
31#define CLK_AHB 12
32#define CLK_APB 13
33#define CLK_DMAC 14
34#define CLK_NOC0_CLK_MUX 15
35#define CLK_NOC1_CLK_MUX 16
36#define CLK_HP_CLK_MUX 17
37#define CLK_HP_CLK_DIV 18
38#define CLK_NOC1_CLK_DIV 19
39#define CLK_NOC0 20
40#define CLK_NOC1 21
41#define CLK_SENOR_SRC 22
42
43/* peripheral device clock */
44#define CLK_GPIO 23
45#define CLK_TIMER 24
46#define CLK_DSI 25
47#define CLK_CSI 26
48#define CLK_SI 27
49#define CLK_DE 28
50#define CLK_HDE 29
51#define CLK_VDE 30
52#define CLK_VCE 31
53#define CLK_NAND 32
54#define CLK_SD0 33
55#define CLK_SD1 34
56#define CLK_SD2 35
57
58#define CLK_UART0 36
59#define CLK_UART1 37
60#define CLK_UART2 38
61#define CLK_UART3 39
62#define CLK_UART4 40
63#define CLK_UART5 41
64#define CLK_UART6 42
65
66#define CLK_PWM0 43
67#define CLK_PWM1 44
68#define CLK_PWM2 45
69#define CLK_PWM3 46
70#define CLK_PWM4 47
71#define CLK_PWM5 48
72#define CLK_GPU3D 49
73
74#define CLK_I2C0 50
75#define CLK_I2C1 51
76#define CLK_I2C2 52
77#define CLK_I2C3 53
78
79#define CLK_SPI0 54
80#define CLK_SPI1 55
81#define CLK_SPI2 56
82#define CLK_SPI3 57
83
84#define CLK_USB3_480MPLL0 58
85#define CLK_USB3_480MPHY0 59
86#define CLK_USB3_5GPHY 60
87#define CLK_USB3_CCE 61
88#define CLK_USB3_MAC 62
89
90#define CLK_LCD 63
91#define CLK_HDMI_AUDIO 64
92#define CLK_I2SRX 65
93#define CLK_I2STX 66
94
95#define CLK_SENSOR0 67
96#define CLK_SENSOR1 68
97
98#define CLK_HDMI_DEV 69
99
100#define CLK_ETHERNET 70
101#define CLK_RMII_REF 71
102
103#define CLK_USB2H0_PLLEN 72
104#define CLK_USB2H0_PHY 73
105#define CLK_USB2H0_CCE 74
106#define CLK_USB2H1_PLLEN 75
107#define CLK_USB2H1_PHY 76
108#define CLK_USB2H1_CCE 77
109
110#define CLK_TVOUT 78
111
112#define CLK_THERMAL_SENSOR 79
113
114#define CLK_IRC_SWITCH 80
115#define CLK_PCM1 81
116#define CLK_NR_CLKS (CLK_PCM1 + 1)
117
118#endif /* __DT_BINDINGS_CLOCK_S700_H */
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index 44761849fcbe..f43738607d77 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -25,7 +25,7 @@
25#define ASPEED_CLK_GATE_RSACLK 19 25#define ASPEED_CLK_GATE_RSACLK 19
26#define ASPEED_CLK_GATE_UART3CLK 20 26#define ASPEED_CLK_GATE_UART3CLK 20
27#define ASPEED_CLK_GATE_UART4CLK 21 27#define ASPEED_CLK_GATE_UART4CLK 21
28#define ASPEED_CLK_GATE_SDCLKCLK 22 28#define ASPEED_CLK_GATE_SDCLK 22
29#define ASPEED_CLK_GATE_LHCCLK 23 29#define ASPEED_CLK_GATE_LHCCLK 23
30#define ASPEED_CLK_HPLL 24 30#define ASPEED_CLK_HPLL 24
31#define ASPEED_CLK_AHB 25 31#define ASPEED_CLK_AHB 25
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
new file mode 100644
index 000000000000..fd9c362099d9
--- /dev/null
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -0,0 +1,94 @@
1/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
2/*
3 * Copyright (c) 2018 Baylibre SAS.
4 * Author: Jerome Brunet <jbrunet@baylibre.com>
5 */
6
7#ifndef __AXG_AUDIO_CLKC_BINDINGS_H
8#define __AXG_AUDIO_CLKC_BINDINGS_H
9
10#define AUD_CLKID_SLV_SCLK0 9
11#define AUD_CLKID_SLV_SCLK1 10
12#define AUD_CLKID_SLV_SCLK2 11
13#define AUD_CLKID_SLV_SCLK3 12
14#define AUD_CLKID_SLV_SCLK4 13
15#define AUD_CLKID_SLV_SCLK5 14
16#define AUD_CLKID_SLV_SCLK6 15
17#define AUD_CLKID_SLV_SCLK7 16
18#define AUD_CLKID_SLV_SCLK8 17
19#define AUD_CLKID_SLV_SCLK9 18
20#define AUD_CLKID_SLV_LRCLK0 19
21#define AUD_CLKID_SLV_LRCLK1 20
22#define AUD_CLKID_SLV_LRCLK2 21
23#define AUD_CLKID_SLV_LRCLK3 22
24#define AUD_CLKID_SLV_LRCLK4 23
25#define AUD_CLKID_SLV_LRCLK5 24
26#define AUD_CLKID_SLV_LRCLK6 25
27#define AUD_CLKID_SLV_LRCLK7 26
28#define AUD_CLKID_SLV_LRCLK8 27
29#define AUD_CLKID_SLV_LRCLK9 28
30#define AUD_CLKID_DDR_ARB 29
31#define AUD_CLKID_PDM 30
32#define AUD_CLKID_TDMIN_A 31
33#define AUD_CLKID_TDMIN_B 32
34#define AUD_CLKID_TDMIN_C 33
35#define AUD_CLKID_TDMIN_LB 34
36#define AUD_CLKID_TDMOUT_A 35
37#define AUD_CLKID_TDMOUT_B 36
38#define AUD_CLKID_TDMOUT_C 37
39#define AUD_CLKID_FRDDR_A 38
40#define AUD_CLKID_FRDDR_B 39
41#define AUD_CLKID_FRDDR_C 40
42#define AUD_CLKID_TODDR_A 41
43#define AUD_CLKID_TODDR_B 42
44#define AUD_CLKID_TODDR_C 43
45#define AUD_CLKID_LOOPBACK 44
46#define AUD_CLKID_SPDIFIN 45
47#define AUD_CLKID_SPDIFOUT 46
48#define AUD_CLKID_RESAMPLE 47
49#define AUD_CLKID_POWER_DETECT 48
50#define AUD_CLKID_MST_A_MCLK 49
51#define AUD_CLKID_MST_B_MCLK 50
52#define AUD_CLKID_MST_C_MCLK 51
53#define AUD_CLKID_MST_D_MCLK 52
54#define AUD_CLKID_MST_E_MCLK 53
55#define AUD_CLKID_MST_F_MCLK 54
56#define AUD_CLKID_SPDIFOUT_CLK 55
57#define AUD_CLKID_SPDIFIN_CLK 56
58#define AUD_CLKID_PDM_DCLK 57
59#define AUD_CLKID_PDM_SYSCLK 58
60#define AUD_CLKID_MST_A_SCLK 79
61#define AUD_CLKID_MST_B_SCLK 80
62#define AUD_CLKID_MST_C_SCLK 81
63#define AUD_CLKID_MST_D_SCLK 82
64#define AUD_CLKID_MST_E_SCLK 83
65#define AUD_CLKID_MST_F_SCLK 84
66#define AUD_CLKID_MST_A_LRCLK 86
67#define AUD_CLKID_MST_B_LRCLK 87
68#define AUD_CLKID_MST_C_LRCLK 88
69#define AUD_CLKID_MST_D_LRCLK 89
70#define AUD_CLKID_MST_E_LRCLK 90
71#define AUD_CLKID_MST_F_LRCLK 91
72#define AUD_CLKID_TDMIN_A_SCLK_SEL 116
73#define AUD_CLKID_TDMIN_B_SCLK_SEL 117
74#define AUD_CLKID_TDMIN_C_SCLK_SEL 118
75#define AUD_CLKID_TDMIN_LB_SCLK_SEL 119
76#define AUD_CLKID_TDMOUT_A_SCLK_SEL 120
77#define AUD_CLKID_TDMOUT_B_SCLK_SEL 121
78#define AUD_CLKID_TDMOUT_C_SCLK_SEL 122
79#define AUD_CLKID_TDMIN_A_SCLK 123
80#define AUD_CLKID_TDMIN_B_SCLK 124
81#define AUD_CLKID_TDMIN_C_SCLK 125
82#define AUD_CLKID_TDMIN_LB_SCLK 126
83#define AUD_CLKID_TDMOUT_A_SCLK 127
84#define AUD_CLKID_TDMOUT_B_SCLK 128
85#define AUD_CLKID_TDMOUT_C_SCLK 129
86#define AUD_CLKID_TDMIN_A_LRCLK 130
87#define AUD_CLKID_TDMIN_B_LRCLK 131
88#define AUD_CLKID_TDMIN_C_LRCLK 132
89#define AUD_CLKID_TDMIN_LB_LRCLK 133
90#define AUD_CLKID_TDMOUT_A_LRCLK 134
91#define AUD_CLKID_TDMOUT_B_LRCLK 135
92#define AUD_CLKID_TDMOUT_C_LRCLK 136
93
94#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
index 555937a25504..fd1f938c38d1 100644
--- a/include/dt-bindings/clock/axg-clkc.h
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -68,5 +68,9 @@
68#define CLKID_SD_EMMC_B_CLK0 59 68#define CLKID_SD_EMMC_B_CLK0 59
69#define CLKID_SD_EMMC_C_CLK0 60 69#define CLKID_SD_EMMC_C_CLK0 60
70#define CLKID_HIFI_PLL 69 70#define CLKID_HIFI_PLL 69
71#define CLKID_PCIE_CML_EN0 79
72#define CLKID_PCIE_CML_EN1 80
73#define CLKID_MIPI_ENABLE 81
74#define CLKID_GEN_CLK 84
71 75
72#endif /* __AXG_CLKC_H */ 76#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 7a892be90549..3979d48c025f 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -127,5 +127,6 @@
127#define CLKID_VAPB 140 127#define CLKID_VAPB 140
128#define CLKID_VDEC_1 153 128#define CLKID_VDEC_1 153
129#define CLKID_VDEC_HEVC 156 129#define CLKID_VDEC_HEVC 156
130#define CLKID_GEN_CLK 159
130 131
131#endif /* __GXBB_CLKC_H */ 132#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h
index 151111e68f4f..1036475f997d 100644
--- a/include/dt-bindings/clock/imx6sll-clock.h
+++ b/include/dt-bindings/clock/imx6sll-clock.h
@@ -197,6 +197,13 @@
197#define IMX6SLL_CLK_EXTERN_AUDIO_PODF 171 197#define IMX6SLL_CLK_EXTERN_AUDIO_PODF 171
198#define IMX6SLL_CLK_EXTERN_AUDIO 172 198#define IMX6SLL_CLK_EXTERN_AUDIO 172
199 199
200#define IMX6SLL_CLK_END 173 200#define IMX6SLL_CLK_GPIO1 173
201#define IMX6SLL_CLK_GPIO2 174
202#define IMX6SLL_CLK_GPIO3 175
203#define IMX6SLL_CLK_GPIO4 176
204#define IMX6SLL_CLK_GPIO5 177
205#define IMX6SLL_CLK_GPIO6 178
206
207#define IMX6SLL_CLK_END 179
201 208
202#endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */ 209#endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index 9564597cbfac..f8e0476a3a0e 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -235,27 +235,31 @@
235#define IMX6UL_CLK_CSI_PODF 222 235#define IMX6UL_CLK_CSI_PODF 222
236#define IMX6UL_CLK_PLL3_120M 223 236#define IMX6UL_CLK_PLL3_120M 223
237#define IMX6UL_CLK_KPP 224 237#define IMX6UL_CLK_KPP 224
238#define IMX6UL_CLK_CKO1_SEL 225 238#define IMX6ULL_CLK_ESAI_PRED 225
239#define IMX6UL_CLK_CKO1_PODF 226 239#define IMX6ULL_CLK_ESAI_PODF 226
240#define IMX6UL_CLK_CKO1 227 240#define IMX6ULL_CLK_ESAI_EXTAL 227
241#define IMX6UL_CLK_CKO2_SEL 228 241#define IMX6ULL_CLK_ESAI_MEM 228
242#define IMX6UL_CLK_CKO2_PODF 229 242#define IMX6ULL_CLK_ESAI_IPG 229
243#define IMX6UL_CLK_CKO2 230 243#define IMX6ULL_CLK_DCP_CLK 230
244#define IMX6UL_CLK_CKO 231 244#define IMX6ULL_CLK_EPDC_PRE_SEL 231
245#define IMX6ULL_CLK_EPDC_SEL 232
246#define IMX6ULL_CLK_EPDC_PODF 233
247#define IMX6ULL_CLK_EPDC_ACLK 234
248#define IMX6ULL_CLK_EPDC_PIX 235
249#define IMX6ULL_CLK_ESAI_SEL 236
250#define IMX6UL_CLK_CKO1_SEL 237
251#define IMX6UL_CLK_CKO1_PODF 238
252#define IMX6UL_CLK_CKO1 239
253#define IMX6UL_CLK_CKO2_SEL 240
254#define IMX6UL_CLK_CKO2_PODF 241
255#define IMX6UL_CLK_CKO2 242
256#define IMX6UL_CLK_CKO 243
257#define IMX6UL_CLK_GPIO1 244
258#define IMX6UL_CLK_GPIO2 245
259#define IMX6UL_CLK_GPIO3 246
260#define IMX6UL_CLK_GPIO4 247
261#define IMX6UL_CLK_GPIO5 248
245 262
246/* For i.MX6ULL */ 263#define IMX6UL_CLK_END 249
247#define IMX6ULL_CLK_ESAI_PRED 232
248#define IMX6ULL_CLK_ESAI_PODF 233
249#define IMX6ULL_CLK_ESAI_EXTAL 234
250#define IMX6ULL_CLK_ESAI_MEM 235
251#define IMX6ULL_CLK_ESAI_IPG 236
252#define IMX6ULL_CLK_DCP_CLK 237
253#define IMX6ULL_CLK_EPDC_PRE_SEL 238
254#define IMX6ULL_CLK_EPDC_SEL 239
255#define IMX6ULL_CLK_EPDC_PODF 240
256#define IMX6ULL_CLK_EPDC_ACLK 241
257#define IMX6ULL_CLK_EPDC_PIX 242
258#define IMX6ULL_CLK_ESAI_SEL 243
259#define IMX6UL_CLK_END 244
260 264
261#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */ 265#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/maxim,max9485.h b/include/dt-bindings/clock/maxim,max9485.h
new file mode 100644
index 000000000000..185b09ce1869
--- /dev/null
+++ b/include/dt-bindings/clock/maxim,max9485.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2018 Daniel Mack
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#ifndef __DT_BINDINGS_MAX9485_CLK_H
11#define __DT_BINDINGS_MAX9485_CLK_H
12
13#define MAX9485_MCLKOUT 0
14#define MAX9485_CLKOUT 1
15#define MAX9485_CLKOUT1 2
16#define MAX9485_CLKOUT2 3
17
18#endif /* __DT_BINDINGS_MAX9485_CLK_H */
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
new file mode 100644
index 000000000000..00101479f7c4
--- /dev/null
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -0,0 +1,389 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _DT_BINDINGS_CLK_ROCKCHIP_PX30_H
4#define _DT_BINDINGS_CLK_ROCKCHIP_PX30_H
5
6/* core clocks */
7#define PLL_APLL 1
8#define PLL_DPLL 2
9#define PLL_CPLL 3
10#define PLL_NPLL 4
11#define APLL_BOOST_H 5
12#define APLL_BOOST_L 6
13#define ARMCLK 7
14
15/* sclk gates (special clocks) */
16#define USB480M 14
17#define SCLK_PDM 15
18#define SCLK_I2S0_TX 16
19#define SCLK_I2S0_TX_OUT 17
20#define SCLK_I2S0_RX 18
21#define SCLK_I2S0_RX_OUT 19
22#define SCLK_I2S1 20
23#define SCLK_I2S1_OUT 21
24#define SCLK_I2S2 22
25#define SCLK_I2S2_OUT 23
26#define SCLK_UART1 24
27#define SCLK_UART2 25
28#define SCLK_UART3 26
29#define SCLK_UART4 27
30#define SCLK_UART5 28
31#define SCLK_I2C0 29
32#define SCLK_I2C1 30
33#define SCLK_I2C2 31
34#define SCLK_I2C3 32
35#define SCLK_I2C4 33
36#define SCLK_PWM0 34
37#define SCLK_PWM1 35
38#define SCLK_SPI0 36
39#define SCLK_SPI1 37
40#define SCLK_TIMER0 38
41#define SCLK_TIMER1 39
42#define SCLK_TIMER2 40
43#define SCLK_TIMER3 41
44#define SCLK_TIMER4 42
45#define SCLK_TIMER5 43
46#define SCLK_TSADC 44
47#define SCLK_SARADC 45
48#define SCLK_OTP 46
49#define SCLK_OTP_USR 47
50#define SCLK_CRYPTO 48
51#define SCLK_CRYPTO_APK 49
52#define SCLK_DDRC 50
53#define SCLK_ISP 51
54#define SCLK_CIF_OUT 52
55#define SCLK_RGA_CORE 53
56#define SCLK_VOPB_PWM 54
57#define SCLK_NANDC 55
58#define SCLK_SDIO 56
59#define SCLK_EMMC 57
60#define SCLK_SFC 58
61#define SCLK_SDMMC 59
62#define SCLK_OTG_ADP 60
63#define SCLK_GMAC_SRC 61
64#define SCLK_GMAC 62
65#define SCLK_GMAC_RX_TX 63
66#define SCLK_MAC_REF 64
67#define SCLK_MAC_REFOUT 65
68#define SCLK_MAC_OUT 66
69#define SCLK_SDMMC_DRV 67
70#define SCLK_SDMMC_SAMPLE 68
71#define SCLK_SDIO_DRV 69
72#define SCLK_SDIO_SAMPLE 70
73#define SCLK_EMMC_DRV 71
74#define SCLK_EMMC_SAMPLE 72
75#define SCLK_GPU 73
76#define SCLK_PVTM 74
77#define SCLK_CORE_VPU 75
78#define SCLK_GMAC_RMII 76
79#define SCLK_UART2_SRC 77
80#define SCLK_NANDC_DIV 78
81#define SCLK_NANDC_DIV50 79
82#define SCLK_SDIO_DIV 80
83#define SCLK_SDIO_DIV50 81
84#define SCLK_EMMC_DIV 82
85#define SCLK_EMMC_DIV50 83
86#define SCLK_DDRCLK 84
87#define SCLK_UART1_SRC 85
88
89/* dclk gates */
90#define DCLK_VOPB 150
91#define DCLK_VOPL 151
92
93/* aclk gates */
94#define ACLK_GPU 170
95#define ACLK_BUS_PRE 171
96#define ACLK_CRYPTO 172
97#define ACLK_VI_PRE 173
98#define ACLK_VO_PRE 174
99#define ACLK_VPU 175
100#define ACLK_PERI_PRE 176
101#define ACLK_GMAC 178
102#define ACLK_CIF 179
103#define ACLK_ISP 180
104#define ACLK_VOPB 181
105#define ACLK_VOPL 182
106#define ACLK_RGA 183
107#define ACLK_GIC 184
108#define ACLK_DCF 186
109#define ACLK_DMAC 187
110#define ACLK_BUS_SRC 188
111#define ACLK_PERI_SRC 189
112
113/* hclk gates */
114#define HCLK_BUS_PRE 240
115#define HCLK_CRYPTO 241
116#define HCLK_VI_PRE 242
117#define HCLK_VO_PRE 243
118#define HCLK_VPU 244
119#define HCLK_PERI_PRE 245
120#define HCLK_MMC_NAND 246
121#define HCLK_SDMMC 247
122#define HCLK_USB 248
123#define HCLK_CIF 249
124#define HCLK_ISP 250
125#define HCLK_VOPB 251
126#define HCLK_VOPL 252
127#define HCLK_RGA 253
128#define HCLK_NANDC 254
129#define HCLK_SDIO 255
130#define HCLK_EMMC 256
131#define HCLK_SFC 257
132#define HCLK_OTG 258
133#define HCLK_HOST 259
134#define HCLK_HOST_ARB 260
135#define HCLK_PDM 261
136#define HCLK_I2S0 262
137#define HCLK_I2S1 263
138#define HCLK_I2S2 264
139
140/* pclk gates */
141#define PCLK_BUS_PRE 320
142#define PCLK_DDR 321
143#define PCLK_VO_PRE 322
144#define PCLK_GMAC 323
145#define PCLK_MIPI_DSI 324
146#define PCLK_MIPIDSIPHY 325
147#define PCLK_MIPICSIPHY 326
148#define PCLK_USB_GRF 327
149#define PCLK_DCF 328
150#define PCLK_UART1 329
151#define PCLK_UART2 330
152#define PCLK_UART3 331
153#define PCLK_UART4 332
154#define PCLK_UART5 333
155#define PCLK_I2C0 334
156#define PCLK_I2C1 335
157#define PCLK_I2C2 336
158#define PCLK_I2C3 337
159#define PCLK_I2C4 338
160#define PCLK_PWM0 339
161#define PCLK_PWM1 340
162#define PCLK_SPI0 341
163#define PCLK_SPI1 342
164#define PCLK_SARADC 343
165#define PCLK_TSADC 344
166#define PCLK_TIMER 345
167#define PCLK_OTP_NS 346
168#define PCLK_WDT_NS 347
169#define PCLK_GPIO1 348
170#define PCLK_GPIO2 349
171#define PCLK_GPIO3 350
172#define PCLK_ISP 351
173#define PCLK_CIF 352
174#define PCLK_OTP_PHY 353
175
176#define CLK_NR_CLKS (PCLK_OTP_PHY + 1)
177
178/* pmu-clocks indices */
179
180#define PLL_GPLL 1
181
182#define SCLK_RTC32K_PMU 4
183#define SCLK_WIFI_PMU 5
184#define SCLK_UART0_PMU 6
185#define SCLK_PVTM_PMU 7
186#define PCLK_PMU_PRE 8
187#define SCLK_REF24M_PMU 9
188#define SCLK_USBPHY_REF 10
189#define SCLK_MIPIDSIPHY_REF 11
190
191#define XIN24M_DIV 12
192
193#define PCLK_GPIO0_PMU 20
194#define PCLK_UART0_PMU 21
195
196#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1)
197
198/* soft-reset indices */
199#define SRST_CORE0_PO 0
200#define SRST_CORE1_PO 1
201#define SRST_CORE2_PO 2
202#define SRST_CORE3_PO 3
203#define SRST_CORE0 4
204#define SRST_CORE1 5
205#define SRST_CORE2 6
206#define SRST_CORE3 7
207#define SRST_CORE0_DBG 8
208#define SRST_CORE1_DBG 9
209#define SRST_CORE2_DBG 10
210#define SRST_CORE3_DBG 11
211#define SRST_TOPDBG 12
212#define SRST_CORE_NOC 13
213#define SRST_STRC_A 14
214#define SRST_L2C 15
215
216#define SRST_DAP 16
217#define SRST_CORE_PVTM 17
218#define SRST_GPU 18
219#define SRST_GPU_NIU 19
220#define SRST_UPCTL2 20
221#define SRST_UPCTL2_A 21
222#define SRST_UPCTL2_P 22
223#define SRST_MSCH 23
224#define SRST_MSCH_P 24
225#define SRST_DDRMON_P 25
226#define SRST_DDRSTDBY_P 26
227#define SRST_DDRSTDBY 27
228#define SRST_DDRGRF_p 28
229#define SRST_AXI_SPLIT_A 29
230#define SRST_AXI_CMD_A 30
231#define SRST_AXI_CMD_P 31
232
233#define SRST_DDRPHY 32
234#define SRST_DDRPHYDIV 33
235#define SRST_DDRPHY_P 34
236#define SRST_VPU_A 36
237#define SRST_VPU_NIU_A 37
238#define SRST_VPU_H 38
239#define SRST_VPU_NIU_H 39
240#define SRST_VI_NIU_A 40
241#define SRST_VI_NIU_H 41
242#define SRST_ISP_H 42
243#define SRST_ISP 43
244#define SRST_CIF_A 44
245#define SRST_CIF_H 45
246#define SRST_CIF_PCLKIN 46
247#define SRST_MIPICSIPHY_P 47
248
249#define SRST_VO_NIU_A 48
250#define SRST_VO_NIU_H 49
251#define SRST_VO_NIU_P 50
252#define SRST_VOPB_A 51
253#define SRST_VOPB_H 52
254#define SRST_VOPB 53
255#define SRST_PWM_VOPB 54
256#define SRST_VOPL_A 55
257#define SRST_VOPL_H 56
258#define SRST_VOPL 57
259#define SRST_RGA_A 58
260#define SRST_RGA_H 59
261#define SRST_RGA 60
262#define SRST_MIPIDSI_HOST_P 61
263#define SRST_MIPIDSIPHY_P 62
264#define SRST_VPU_CORE 63
265
266#define SRST_PERI_NIU_A 64
267#define SRST_USB_NIU_H 65
268#define SRST_USB2OTG_H 66
269#define SRST_USB2OTG 67
270#define SRST_USB2OTG_ADP 68
271#define SRST_USB2HOST_H 69
272#define SRST_USB2HOST_ARB_H 70
273#define SRST_USB2HOST_AUX_H 71
274#define SRST_USB2HOST_EHCI 72
275#define SRST_USB2HOST 73
276#define SRST_USBPHYPOR 74
277#define SRST_USBPHY_OTG_PORT 75
278#define SRST_USBPHY_HOST_PORT 76
279#define SRST_USBPHY_GRF 77
280#define SRST_CPU_BOOST_P 78
281#define SRST_CPU_BOOST 79
282
283#define SRST_MMC_NAND_NIU_H 80
284#define SRST_SDIO_H 81
285#define SRST_EMMC_H 82
286#define SRST_SFC_H 83
287#define SRST_SFC 84
288#define SRST_SDCARD_NIU_H 85
289#define SRST_SDMMC_H 86
290#define SRST_NANDC_H 89
291#define SRST_NANDC 90
292#define SRST_GMAC_NIU_A 92
293#define SRST_GMAC_NIU_P 93
294#define SRST_GMAC_A 94
295
296#define SRST_PMU_NIU_P 96
297#define SRST_PMU_SGRF_P 97
298#define SRST_PMU_GRF_P 98
299#define SRST_PMU 99
300#define SRST_PMU_MEM_P 100
301#define SRST_PMU_GPIO0_P 101
302#define SRST_PMU_UART0_P 102
303#define SRST_PMU_CRU_P 103
304#define SRST_PMU_PVTM 104
305#define SRST_PMU_UART 105
306#define SRST_PMU_NIU_H 106
307#define SRST_PMU_DDR_FAIL_SAVE 107
308#define SRST_PMU_CORE_PERF_A 108
309#define SRST_PMU_CORE_GRF_P 109
310#define SRST_PMU_GPU_PERF_A 110
311#define SRST_PMU_GPU_GRF_P 111
312
313#define SRST_CRYPTO_NIU_A 112
314#define SRST_CRYPTO_NIU_H 113
315#define SRST_CRYPTO_A 114
316#define SRST_CRYPTO_H 115
317#define SRST_CRYPTO 116
318#define SRST_CRYPTO_APK 117
319#define SRST_BUS_NIU_H 120
320#define SRST_USB_NIU_P 121
321#define SRST_BUS_TOP_NIU_P 122
322#define SRST_INTMEM_A 123
323#define SRST_GIC_A 124
324#define SRST_ROM_H 126
325#define SRST_DCF_A 127
326
327#define SRST_DCF_P 128
328#define SRST_PDM_H 129
329#define SRST_PDM 130
330#define SRST_I2S0_H 131
331#define SRST_I2S0_TX 132
332#define SRST_I2S1_H 133
333#define SRST_I2S1 134
334#define SRST_I2S2_H 135
335#define SRST_I2S2 136
336#define SRST_UART1_P 137
337#define SRST_UART1 138
338#define SRST_UART2_P 139
339#define SRST_UART2 140
340#define SRST_UART3_P 141
341#define SRST_UART3 142
342#define SRST_UART4_P 143
343
344#define SRST_UART4 144
345#define SRST_UART5_P 145
346#define SRST_UART5 146
347#define SRST_I2C0_P 147
348#define SRST_I2C0 148
349#define SRST_I2C1_P 149
350#define SRST_I2C1 150
351#define SRST_I2C2_P 151
352#define SRST_I2C2 152
353#define SRST_I2C3_P 153
354#define SRST_I2C3 154
355#define SRST_PWM0_P 157
356#define SRST_PWM0 158
357#define SRST_PWM1_P 159
358
359#define SRST_PWM1 160
360#define SRST_SPI0_P 161
361#define SRST_SPI0 162
362#define SRST_SPI1_P 163
363#define SRST_SPI1 164
364#define SRST_SARADC_P 165
365#define SRST_SARADC 166
366#define SRST_TSADC_P 167
367#define SRST_TSADC 168
368#define SRST_TIMER_P 169
369#define SRST_TIMER0 170
370#define SRST_TIMER1 171
371#define SRST_TIMER2 172
372#define SRST_TIMER3 173
373#define SRST_TIMER4 174
374#define SRST_TIMER5 175
375
376#define SRST_OTP_NS_P 176
377#define SRST_OTP_NS_SBPI 177
378#define SRST_OTP_NS_USR 178
379#define SRST_OTP_PHY_P 179
380#define SRST_OTP_PHY 180
381#define SRST_WDT_NS_P 181
382#define SRST_GPIO1_P 182
383#define SRST_GPIO2_P 183
384#define SRST_GPIO3_P 184
385#define SRST_SGRF_P 185
386#define SRST_GRF_P 186
387#define SRST_I2S0_RX 191
388
389#endif
diff --git a/include/dt-bindings/clock/pxa-clock.h b/include/dt-bindings/clock/pxa-clock.h
index e65803b1dc7e..0b0fd2b01538 100644
--- a/include/dt-bindings/clock/pxa-clock.h
+++ b/include/dt-bindings/clock/pxa-clock.h
@@ -72,6 +72,7 @@
72#define CLK_USIM 58 72#define CLK_USIM 58
73#define CLK_USIM1 59 73#define CLK_USIM1 59
74#define CLK_USMI0 60 74#define CLK_USMI0 60
75#define CLK_MAX 61 75#define CLK_OSC32k768 61
76#define CLK_MAX 62
76 77
77#endif 78#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
new file mode 100644
index 000000000000..11eed4bc9646
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -0,0 +1,45 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
4 */
5
6#ifndef _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
7#define _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
8
9/* DISP_CC clock registers */
10#define DISP_CC_MDSS_AHB_CLK 0
11#define DISP_CC_MDSS_AXI_CLK 1
12#define DISP_CC_MDSS_BYTE0_CLK 2
13#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
14#define DISP_CC_MDSS_BYTE0_INTF_CLK 4
15#define DISP_CC_MDSS_BYTE1_CLK 5
16#define DISP_CC_MDSS_BYTE1_CLK_SRC 6
17#define DISP_CC_MDSS_BYTE1_INTF_CLK 7
18#define DISP_CC_MDSS_ESC0_CLK 8
19#define DISP_CC_MDSS_ESC0_CLK_SRC 9
20#define DISP_CC_MDSS_ESC1_CLK 10
21#define DISP_CC_MDSS_ESC1_CLK_SRC 11
22#define DISP_CC_MDSS_MDP_CLK 12
23#define DISP_CC_MDSS_MDP_CLK_SRC 13
24#define DISP_CC_MDSS_MDP_LUT_CLK 14
25#define DISP_CC_MDSS_PCLK0_CLK 15
26#define DISP_CC_MDSS_PCLK0_CLK_SRC 16
27#define DISP_CC_MDSS_PCLK1_CLK 17
28#define DISP_CC_MDSS_PCLK1_CLK_SRC 18
29#define DISP_CC_MDSS_ROT_CLK 19
30#define DISP_CC_MDSS_ROT_CLK_SRC 20
31#define DISP_CC_MDSS_RSCC_AHB_CLK 21
32#define DISP_CC_MDSS_RSCC_VSYNC_CLK 22
33#define DISP_CC_MDSS_VSYNC_CLK 23
34#define DISP_CC_MDSS_VSYNC_CLK_SRC 24
35#define DISP_CC_PLL0 25
36#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 26
37#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 27
38
39/* DISP_CC Reset */
40#define DISP_CC_MDSS_RSCC_BCR 0
41
42/* DISP_CC GDSCR */
43#define MDSS_GDSC 0
44
45#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index aca61264f12c..f96fc2dbf60e 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -192,6 +192,8 @@
192#define GCC_VS_CTRL_CLK_SRC 182 192#define GCC_VS_CTRL_CLK_SRC 182
193#define GCC_VSENSOR_CLK_SRC 183 193#define GCC_VSENSOR_CLK_SRC 183
194#define GPLL4 184 194#define GPLL4 184
195#define GCC_CPUSS_DVM_BUS_CLK 185
196#define GCC_CPUSS_GNOC_CLK 186
195 197
196/* GCC Resets */ 198/* GCC Resets */
197#define GCC_MMSS_BCR 0 199#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/r9a06g032-sysctrl.h b/include/dt-bindings/clock/r9a06g032-sysctrl.h
new file mode 100644
index 000000000000..90c0f3dc1ba1
--- /dev/null
+++ b/include/dt-bindings/clock/r9a06g032-sysctrl.h
@@ -0,0 +1,148 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * R9A06G032 sysctrl IDs
4 *
5 * Copyright (C) 2018 Renesas Electronics Europe Limited
6 *
7 * Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
8 */
9
10#ifndef __DT_BINDINGS_R9A06G032_SYSCTRL_H__
11#define __DT_BINDINGS_R9A06G032_SYSCTRL_H__
12
13#define R9A06G032_CLK_PLL_USB 1
14#define R9A06G032_CLK_48 1 /* AKA CLK_PLL_USB */
15#define R9A06G032_MSEBIS_CLK 3 /* AKA CLKOUT_D16 */
16#define R9A06G032_MSEBIM_CLK 3 /* AKA CLKOUT_D16 */
17#define R9A06G032_CLK_DDRPHY_PLLCLK 5 /* AKA CLKOUT_D1OR2 */
18#define R9A06G032_CLK50 6 /* AKA CLKOUT_D20 */
19#define R9A06G032_CLK25 7 /* AKA CLKOUT_D40 */
20#define R9A06G032_CLK125 9 /* AKA CLKOUT_D8 */
21#define R9A06G032_CLK_P5_PG1 17 /* AKA DIV_P5_PG */
22#define R9A06G032_CLK_REF_SYNC 21 /* AKA DIV_REF_SYNC */
23#define R9A06G032_CLK_25_PG4 26
24#define R9A06G032_CLK_25_PG5 27
25#define R9A06G032_CLK_25_PG6 28
26#define R9A06G032_CLK_25_PG7 29
27#define R9A06G032_CLK_25_PG8 30
28#define R9A06G032_CLK_ADC 31
29#define R9A06G032_CLK_ECAT100 32
30#define R9A06G032_CLK_HSR100 33
31#define R9A06G032_CLK_I2C0 34
32#define R9A06G032_CLK_I2C1 35
33#define R9A06G032_CLK_MII_REF 36
34#define R9A06G032_CLK_NAND 37
35#define R9A06G032_CLK_NOUSBP2_PG6 38
36#define R9A06G032_CLK_P1_PG2 39
37#define R9A06G032_CLK_P1_PG3 40
38#define R9A06G032_CLK_P1_PG4 41
39#define R9A06G032_CLK_P4_PG3 42
40#define R9A06G032_CLK_P4_PG4 43
41#define R9A06G032_CLK_P6_PG1 44
42#define R9A06G032_CLK_P6_PG2 45
43#define R9A06G032_CLK_P6_PG3 46
44#define R9A06G032_CLK_P6_PG4 47
45#define R9A06G032_CLK_PCI_USB 48
46#define R9A06G032_CLK_QSPI0 49
47#define R9A06G032_CLK_QSPI1 50
48#define R9A06G032_CLK_RGMII_REF 51
49#define R9A06G032_CLK_RMII_REF 52
50#define R9A06G032_CLK_SDIO0 53
51#define R9A06G032_CLK_SDIO1 54
52#define R9A06G032_CLK_SERCOS100 55
53#define R9A06G032_CLK_SLCD 56
54#define R9A06G032_CLK_SPI0 57
55#define R9A06G032_CLK_SPI1 58
56#define R9A06G032_CLK_SPI2 59
57#define R9A06G032_CLK_SPI3 60
58#define R9A06G032_CLK_SPI4 61
59#define R9A06G032_CLK_SPI5 62
60#define R9A06G032_CLK_SWITCH 63
61#define R9A06G032_HCLK_ECAT125 65
62#define R9A06G032_HCLK_PINCONFIG 66
63#define R9A06G032_HCLK_SERCOS 67
64#define R9A06G032_HCLK_SGPIO2 68
65#define R9A06G032_HCLK_SGPIO3 69
66#define R9A06G032_HCLK_SGPIO4 70
67#define R9A06G032_HCLK_TIMER0 71
68#define R9A06G032_HCLK_TIMER1 72
69#define R9A06G032_HCLK_USBF 73
70#define R9A06G032_HCLK_USBH 74
71#define R9A06G032_HCLK_USBPM 75
72#define R9A06G032_CLK_48_PG_F 76
73#define R9A06G032_CLK_48_PG4 77
74#define R9A06G032_CLK_DDRPHY_PCLK 81 /* AKA CLK_REF_SYNC_D4 */
75#define R9A06G032_CLK_FW 81 /* AKA CLK_REF_SYNC_D4 */
76#define R9A06G032_CLK_CRYPTO 81 /* AKA CLK_REF_SYNC_D4 */
77#define R9A06G032_CLK_A7MP 84 /* AKA DIV_CA7 */
78#define R9A06G032_HCLK_CAN0 85
79#define R9A06G032_HCLK_CAN1 86
80#define R9A06G032_HCLK_DELTASIGMA 87
81#define R9A06G032_HCLK_PWMPTO 88
82#define R9A06G032_HCLK_RSV 89
83#define R9A06G032_HCLK_SGPIO0 90
84#define R9A06G032_HCLK_SGPIO1 91
85#define R9A06G032_RTOS_MDC 92
86#define R9A06G032_CLK_CM3 93
87#define R9A06G032_CLK_DDRC 94
88#define R9A06G032_CLK_ECAT25 95
89#define R9A06G032_CLK_HSR50 96
90#define R9A06G032_CLK_HW_RTOS 97
91#define R9A06G032_CLK_SERCOS50 98
92#define R9A06G032_HCLK_ADC 99
93#define R9A06G032_HCLK_CM3 100
94#define R9A06G032_HCLK_CRYPTO_EIP150 101
95#define R9A06G032_HCLK_CRYPTO_EIP93 102
96#define R9A06G032_HCLK_DDRC 103
97#define R9A06G032_HCLK_DMA0 104
98#define R9A06G032_HCLK_DMA1 105
99#define R9A06G032_HCLK_GMAC0 106
100#define R9A06G032_HCLK_GMAC1 107
101#define R9A06G032_HCLK_GPIO0 108
102#define R9A06G032_HCLK_GPIO1 109
103#define R9A06G032_HCLK_GPIO2 110
104#define R9A06G032_HCLK_HSR 111
105#define R9A06G032_HCLK_I2C0 112
106#define R9A06G032_HCLK_I2C1 113
107#define R9A06G032_HCLK_LCD 114
108#define R9A06G032_HCLK_MSEBI_M 115
109#define R9A06G032_HCLK_MSEBI_S 116
110#define R9A06G032_HCLK_NAND 117
111#define R9A06G032_HCLK_PG_I 118
112#define R9A06G032_HCLK_PG19 119
113#define R9A06G032_HCLK_PG20 120
114#define R9A06G032_HCLK_PG3 121
115#define R9A06G032_HCLK_PG4 122
116#define R9A06G032_HCLK_QSPI0 123
117#define R9A06G032_HCLK_QSPI1 124
118#define R9A06G032_HCLK_ROM 125
119#define R9A06G032_HCLK_RTC 126
120#define R9A06G032_HCLK_SDIO0 127
121#define R9A06G032_HCLK_SDIO1 128
122#define R9A06G032_HCLK_SEMAP 129
123#define R9A06G032_HCLK_SPI0 130
124#define R9A06G032_HCLK_SPI1 131
125#define R9A06G032_HCLK_SPI2 132
126#define R9A06G032_HCLK_SPI3 133
127#define R9A06G032_HCLK_SPI4 134
128#define R9A06G032_HCLK_SPI5 135
129#define R9A06G032_HCLK_SWITCH 136
130#define R9A06G032_HCLK_SWITCH_RG 137
131#define R9A06G032_HCLK_UART0 138
132#define R9A06G032_HCLK_UART1 139
133#define R9A06G032_HCLK_UART2 140
134#define R9A06G032_HCLK_UART3 141
135#define R9A06G032_HCLK_UART4 142
136#define R9A06G032_HCLK_UART5 143
137#define R9A06G032_HCLK_UART6 144
138#define R9A06G032_HCLK_UART7 145
139#define R9A06G032_CLK_UART0 146
140#define R9A06G032_CLK_UART1 147
141#define R9A06G032_CLK_UART2 148
142#define R9A06G032_CLK_UART3 149
143#define R9A06G032_CLK_UART4 150
144#define R9A06G032_CLK_UART5 151
145#define R9A06G032_CLK_UART6 152
146#define R9A06G032_CLK_UART7 153
147
148#endif /* __DT_BINDINGS_R9A06G032_SYSCTRL_H__ */
diff --git a/include/dt-bindings/clock/rk3399-ddr.h b/include/dt-bindings/clock/rk3399-ddr.h
new file mode 100644
index 000000000000..ed2280844963
--- /dev/null
+++ b/include/dt-bindings/clock/rk3399-ddr.h
@@ -0,0 +1,56 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
2
3#ifndef DT_BINDINGS_DDR_H
4#define DT_BINDINGS_DDR_H
5
6/*
7 * DDR3 SDRAM Standard Speed Bins include tCK, tRCD, tRP, tRAS and tRC for
8 * each corresponding bin.
9 */
10
11/* DDR3-800 (5-5-5) */
12#define DDR3_800D 0
13/* DDR3-800 (6-6-6) */
14#define DDR3_800E 1
15/* DDR3-1066 (6-6-6) */
16#define DDR3_1066E 2
17/* DDR3-1066 (7-7-7) */
18#define DDR3_1066F 3
19/* DDR3-1066 (8-8-8) */
20#define DDR3_1066G 4
21/* DDR3-1333 (7-7-7) */
22#define DDR3_1333F 5
23/* DDR3-1333 (8-8-8) */
24#define DDR3_1333G 6
25/* DDR3-1333 (9-9-9) */
26#define DDR3_1333H 7
27/* DDR3-1333 (10-10-10) */
28#define DDR3_1333J 8
29/* DDR3-1600 (8-8-8) */
30#define DDR3_1600G 9
31/* DDR3-1600 (9-9-9) */
32#define DDR3_1600H 10
33/* DDR3-1600 (10-10-10) */
34#define DDR3_1600J 11
35/* DDR3-1600 (11-11-11) */
36#define DDR3_1600K 12
37/* DDR3-1600 (10-10-10) */
38#define DDR3_1866J 13
39/* DDR3-1866 (11-11-11) */
40#define DDR3_1866K 14
41/* DDR3-1866 (12-12-12) */
42#define DDR3_1866L 15
43/* DDR3-1866 (13-13-13) */
44#define DDR3_1866M 16
45/* DDR3-2133 (11-11-11) */
46#define DDR3_2133K 17
47/* DDR3-2133 (12-12-12) */
48#define DDR3_2133L 18
49/* DDR3-2133 (13-13-13) */
50#define DDR3_2133M 19
51/* DDR3-2133 (14-14-14) */
52#define DDR3_2133N 20
53/* DDR3 ATF default */
54#define DDR3_DEFAULT 21
55
56#endif
diff --git a/include/dt-bindings/clock/sun8i-r40-ccu.h b/include/dt-bindings/clock/sun8i-r40-ccu.h
index 4fa5f69fc297..f9e15a235626 100644
--- a/include/dt-bindings/clock/sun8i-r40-ccu.h
+++ b/include/dt-bindings/clock/sun8i-r40-ccu.h
@@ -43,6 +43,10 @@
43#ifndef _DT_BINDINGS_CLK_SUN8I_R40_H_ 43#ifndef _DT_BINDINGS_CLK_SUN8I_R40_H_
44#define _DT_BINDINGS_CLK_SUN8I_R40_H_ 44#define _DT_BINDINGS_CLK_SUN8I_R40_H_
45 45
46#define CLK_PLL_VIDEO0 7
47
48#define CLK_PLL_VIDEO1 16
49
46#define CLK_CPU 24 50#define CLK_CPU 24
47 51
48#define CLK_BUS_MIPI_DSI 29 52#define CLK_BUS_MIPI_DSI 29
diff --git a/include/dt-bindings/clock/sun8i-tcon-top.h b/include/dt-bindings/clock/sun8i-tcon-top.h
new file mode 100644
index 000000000000..25164d767835
--- /dev/null
+++ b/include/dt-bindings/clock/sun8i-tcon-top.h
@@ -0,0 +1,11 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
2/* Copyright (C) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
3
4#ifndef _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
5#define _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
6
7#define CLK_TCON_TOP_TV0 0
8#define CLK_TCON_TOP_TV1 1
9#define CLK_TCON_TOP_DSI 2
10
11#endif /* _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ */
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index 2732d6c0fb39..eb81867eac77 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -39,4 +39,8 @@
39#define AT91_PERIPH_C 3 39#define AT91_PERIPH_C 3
40#define AT91_PERIPH_D 4 40#define AT91_PERIPH_D 4
41 41
42#define ATMEL_PIO_DRVSTR_LO 1
43#define ATMEL_PIO_DRVSTR_ME 2
44#define ATMEL_PIO_DRVSTR_HI 3
45
42#endif /* __DT_BINDINGS_AT91_PINCTRL_H__ */ 46#endif /* __DT_BINDINGS_AT91_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
index ceb672305f59..b1832506b923 100644
--- a/include/dt-bindings/pinctrl/samsung.h
+++ b/include/dt-bindings/pinctrl/samsung.h
@@ -1,14 +1,11 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Samsung's Exynos pinctrl bindings 3 * Samsung's Exynos pinctrl bindings
3 * 4 *
4 * Copyright (c) 2016 Samsung Electronics Co., Ltd. 5 * Copyright (c) 2016 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com 6 * http://www.samsung.com
6 * Author: Krzysztof Kozlowski <krzk@kernel.org> 7 * Author: Krzysztof Kozlowski <krzk@kernel.org>
7 * 8 */
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12 9
13#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__ 10#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__
14#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__ 11#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__
diff --git a/include/dt-bindings/regulator/maxim,max77802.h b/include/dt-bindings/regulator/maxim,max77802.h
index cf28631d7109..d0baba1973d4 100644
--- a/include/dt-bindings/regulator/maxim,max77802.h
+++ b/include/dt-bindings/regulator/maxim,max77802.h
@@ -1,10 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright (C) 2014 Google, Inc 3 * Copyright (C) 2014 Google, Inc
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Device Tree binding constants for the Maxim 77802 PMIC regulators 5 * Device Tree binding constants for the Maxim 77802 PMIC regulators
9 */ 6 */
10 7
diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator.h b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
new file mode 100644
index 000000000000..86713dcf9e02
--- /dev/null
+++ b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
@@ -0,0 +1,36 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
3
4#ifndef __QCOM_RPMH_REGULATOR_H
5#define __QCOM_RPMH_REGULATOR_H
6
7/*
8 * These mode constants may be used to specify modes for various RPMh regulator
9 * device tree properties (e.g. regulator-initial-mode). Each type of regulator
10 * supports a subset of the possible modes.
11 *
12 * %RPMH_REGULATOR_MODE_RET: Retention mode in which only an extremely small
13 * load current is allowed. This mode is supported
14 * by LDO and SMPS type regulators.
15 * %RPMH_REGULATOR_MODE_LPM: Low power mode in which a small load current is
16 * allowed. This mode corresponds to PFM for SMPS
17 * and BOB type regulators. This mode is supported
18 * by LDO, HFSMPS, BOB, and PMIC4 FTSMPS type
19 * regulators.
20 * %RPMH_REGULATOR_MODE_AUTO: Auto mode in which the regulator hardware
21 * automatically switches between LPM and HPM based
22 * upon the real-time load current. This mode is
23 * supported by HFSMPS, BOB, and PMIC4 FTSMPS type
24 * regulators.
25 * %RPMH_REGULATOR_MODE_HPM: High power mode in which the full rated current
26 * of the regulator is allowed. This mode
27 * corresponds to PWM for SMPS and BOB type
28 * regulators. This mode is supported by all types
29 * of regulators.
30 */
31#define RPMH_REGULATOR_MODE_RET 0
32#define RPMH_REGULATOR_MODE_LPM 1
33#define RPMH_REGULATOR_MODE_AUTO 2
34#define RPMH_REGULATOR_MODE_HPM 3
35
36#endif
diff --git a/include/dt-bindings/soc/qcom,rpmh-rsc.h b/include/dt-bindings/soc/qcom,rpmh-rsc.h
new file mode 100644
index 000000000000..868f998ea998
--- /dev/null
+++ b/include/dt-bindings/soc/qcom,rpmh-rsc.h
@@ -0,0 +1,14 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6#ifndef __DT_QCOM_RPMH_RSC_H__
7#define __DT_QCOM_RPMH_RSC_H__
8
9#define SLEEP_TCS 0
10#define WAKE_TCS 1
11#define ACTIVE_TCS 2
12#define CONTROL_TCS 3
13
14#endif /* __DT_QCOM_RPMH_RSC_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4b35a66383f9..de8d3d3fa651 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res);
443int acpi_check_region(resource_size_t start, resource_size_t n, 443int acpi_check_region(resource_size_t start, resource_size_t n,
444 const char *name); 444 const char *name);
445 445
446acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
447 u32 level);
448
446int acpi_resources_are_enforced(void); 449int acpi_resources_are_enforced(void);
447 450
448#ifdef CONFIG_HIBERNATION 451#ifdef CONFIG_HIBERNATION
@@ -1055,27 +1058,20 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
1055 1058
1056/* Device properties */ 1059/* Device properties */
1057 1060
1058#define MAX_ACPI_REFERENCE_ARGS 8
1059struct acpi_reference_args {
1060 struct acpi_device *adev;
1061 size_t nargs;
1062 u64 args[MAX_ACPI_REFERENCE_ARGS];
1063};
1064
1065#ifdef CONFIG_ACPI 1061#ifdef CONFIG_ACPI
1066int acpi_dev_get_property(const struct acpi_device *adev, const char *name, 1062int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
1067 acpi_object_type type, const union acpi_object **obj); 1063 acpi_object_type type, const union acpi_object **obj);
1068int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, 1064int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
1069 const char *name, size_t index, size_t num_args, 1065 const char *name, size_t index, size_t num_args,
1070 struct acpi_reference_args *args); 1066 struct fwnode_reference_args *args);
1071 1067
1072static inline int acpi_node_get_property_reference( 1068static inline int acpi_node_get_property_reference(
1073 const struct fwnode_handle *fwnode, 1069 const struct fwnode_handle *fwnode,
1074 const char *name, size_t index, 1070 const char *name, size_t index,
1075 struct acpi_reference_args *args) 1071 struct fwnode_reference_args *args)
1076{ 1072{
1077 return __acpi_node_get_property_reference(fwnode, name, index, 1073 return __acpi_node_get_property_reference(fwnode, name, index,
1078 MAX_ACPI_REFERENCE_ARGS, args); 1074 NR_FWNODE_REFERENCE_ARGS, args);
1079} 1075}
1080 1076
1081int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, 1077int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
@@ -1093,14 +1089,6 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
1093 struct fwnode_handle *child); 1089 struct fwnode_handle *child);
1094struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode); 1090struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
1095 1091
1096struct fwnode_handle *
1097acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
1098 struct fwnode_handle *prev);
1099int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
1100 struct fwnode_handle **remote,
1101 struct fwnode_handle **port,
1102 struct fwnode_handle **endpoint);
1103
1104struct acpi_probe_entry; 1092struct acpi_probe_entry;
1105typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, 1093typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
1106 struct acpi_probe_entry *); 1094 struct acpi_probe_entry *);
@@ -1166,7 +1154,7 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
1166static inline int 1154static inline int
1167__acpi_node_get_property_reference(const struct fwnode_handle *fwnode, 1155__acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
1168 const char *name, size_t index, size_t num_args, 1156 const char *name, size_t index, size_t num_args,
1169 struct acpi_reference_args *args) 1157 struct fwnode_reference_args *args)
1170{ 1158{
1171 return -ENXIO; 1159 return -ENXIO;
1172} 1160}
@@ -1174,7 +1162,7 @@ __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
1174static inline int 1162static inline int
1175acpi_node_get_property_reference(const struct fwnode_handle *fwnode, 1163acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
1176 const char *name, size_t index, 1164 const char *name, size_t index,
1177 struct acpi_reference_args *args) 1165 struct fwnode_reference_args *args)
1178{ 1166{
1179 return -ENXIO; 1167 return -ENXIO;
1180} 1168}
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
new file mode 100644
index 000000000000..4cc40201273e
--- /dev/null
+++ b/include/linux/ascii85.h
@@ -0,0 +1,38 @@
1/*
2 * SPDX-License-Identifier: GPL-2.0
3 *
4 * Copyright (c) 2008 Intel Corporation
5 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
6 */
7
8#ifndef _ASCII85_H_
9#define _ASCII85_H_
10
11#include <linux/kernel.h>
12
13#define ASCII85_BUFSZ 6
14
15static inline long
16ascii85_encode_len(long len)
17{
18 return DIV_ROUND_UP(len, 4);
19}
20
21static inline const char *
22ascii85_encode(u32 in, char *out)
23{
24 int i;
25
26 if (in == 0)
27 return "z";
28
29 out[5] = '\0';
30 for (i = 5; i--; ) {
31 out[i] = '!' + in % 85;
32 in /= 85;
33 }
34
35 return out;
36}
37
38#endif
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 0c27515d2cf6..8124815eb121 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -214,6 +214,7 @@ struct atmphy_ops {
214struct atm_skb_data { 214struct atm_skb_data {
215 struct atm_vcc *vcc; /* ATM VCC */ 215 struct atm_vcc *vcc; /* ATM VCC */
216 unsigned long atm_options; /* ATM layer options */ 216 unsigned long atm_options; /* ATM layer options */
217 unsigned int acct_truesize; /* truesize accounted to vcc */
217}; 218};
218 219
219#define VCC_HTABLE_SIZE 32 220#define VCC_HTABLE_SIZE 32
@@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
241 242
242void atm_dev_release_vccs(struct atm_dev *dev); 243void atm_dev_release_vccs(struct atm_dev *dev);
243 244
245static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
246{
247 /*
248 * Because ATM skbs may not belong to a sock (and we don't
249 * necessarily want to), skb->truesize may be adjusted,
250 * escaping the hack in pskb_expand_head() which avoids
251 * doing so for some cases. So stash the value of truesize
252 * at the time we accounted it, and atm_pop_raw() can use
253 * that value later, in case it changes.
254 */
255 refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
256 ATM_SKB(skb)->acct_truesize = skb->truesize;
257 ATM_SKB(skb)->atm_options = vcc->atm_options;
258}
244 259
245static inline void atm_force_charge(struct atm_vcc *vcc,int truesize) 260static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
246{ 261{
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 01ce3997cb42..1e8e88bdaf09 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,8 @@
2/* Atomic operations usable in machine independent code */ 2/* Atomic operations usable in machine independent code */
3#ifndef _LINUX_ATOMIC_H 3#ifndef _LINUX_ATOMIC_H
4#define _LINUX_ATOMIC_H 4#define _LINUX_ATOMIC_H
5#include <linux/types.h>
6
5#include <asm/atomic.h> 7#include <asm/atomic.h>
6#include <asm/barrier.h> 8#include <asm/barrier.h>
7 9
@@ -36,40 +38,46 @@
36 * barriers on top of the relaxed variant. In the case where the relaxed 38 * barriers on top of the relaxed variant. In the case where the relaxed
37 * variant is already fully ordered, no additional barriers are needed. 39 * variant is already fully ordered, no additional barriers are needed.
38 * 40 *
39 * Besides, if an arch has a special barrier for acquire/release, it could 41 * If an architecture overrides __atomic_acquire_fence() it will probably
40 * implement its own __atomic_op_* and use the same framework for building 42 * want to define smp_mb__after_spinlock().
41 * variants
42 *
43 * If an architecture overrides __atomic_op_acquire() it will probably want
44 * to define smp_mb__after_spinlock().
45 */ 43 */
46#ifndef __atomic_op_acquire 44#ifndef __atomic_acquire_fence
45#define __atomic_acquire_fence smp_mb__after_atomic
46#endif
47
48#ifndef __atomic_release_fence
49#define __atomic_release_fence smp_mb__before_atomic
50#endif
51
52#ifndef __atomic_pre_full_fence
53#define __atomic_pre_full_fence smp_mb__before_atomic
54#endif
55
56#ifndef __atomic_post_full_fence
57#define __atomic_post_full_fence smp_mb__after_atomic
58#endif
59
47#define __atomic_op_acquire(op, args...) \ 60#define __atomic_op_acquire(op, args...) \
48({ \ 61({ \
49 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ 62 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
50 smp_mb__after_atomic(); \ 63 __atomic_acquire_fence(); \
51 __ret; \ 64 __ret; \
52}) 65})
53#endif
54 66
55#ifndef __atomic_op_release
56#define __atomic_op_release(op, args...) \ 67#define __atomic_op_release(op, args...) \
57({ \ 68({ \
58 smp_mb__before_atomic(); \ 69 __atomic_release_fence(); \
59 op##_relaxed(args); \ 70 op##_relaxed(args); \
60}) 71})
61#endif
62 72
63#ifndef __atomic_op_fence
64#define __atomic_op_fence(op, args...) \ 73#define __atomic_op_fence(op, args...) \
65({ \ 74({ \
66 typeof(op##_relaxed(args)) __ret; \ 75 typeof(op##_relaxed(args)) __ret; \
67 smp_mb__before_atomic(); \ 76 __atomic_pre_full_fence(); \
68 __ret = op##_relaxed(args); \ 77 __ret = op##_relaxed(args); \
69 smp_mb__after_atomic(); \ 78 __atomic_post_full_fence(); \
70 __ret; \ 79 __ret; \
71}) 80})
72#endif
73 81
74/* atomic_add_return_relaxed */ 82/* atomic_add_return_relaxed */
75#ifndef atomic_add_return_relaxed 83#ifndef atomic_add_return_relaxed
@@ -95,11 +103,23 @@
95#endif 103#endif
96#endif /* atomic_add_return_relaxed */ 104#endif /* atomic_add_return_relaxed */
97 105
106#ifndef atomic_inc
107#define atomic_inc(v) atomic_add(1, (v))
108#endif
109
98/* atomic_inc_return_relaxed */ 110/* atomic_inc_return_relaxed */
99#ifndef atomic_inc_return_relaxed 111#ifndef atomic_inc_return_relaxed
112
113#ifndef atomic_inc_return
114#define atomic_inc_return(v) atomic_add_return(1, (v))
115#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
116#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
117#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
118#else /* atomic_inc_return */
100#define atomic_inc_return_relaxed atomic_inc_return 119#define atomic_inc_return_relaxed atomic_inc_return
101#define atomic_inc_return_acquire atomic_inc_return 120#define atomic_inc_return_acquire atomic_inc_return
102#define atomic_inc_return_release atomic_inc_return 121#define atomic_inc_return_release atomic_inc_return
122#endif /* atomic_inc_return */
103 123
104#else /* atomic_inc_return_relaxed */ 124#else /* atomic_inc_return_relaxed */
105 125
@@ -143,11 +163,23 @@
143#endif 163#endif
144#endif /* atomic_sub_return_relaxed */ 164#endif /* atomic_sub_return_relaxed */
145 165
166#ifndef atomic_dec
167#define atomic_dec(v) atomic_sub(1, (v))
168#endif
169
146/* atomic_dec_return_relaxed */ 170/* atomic_dec_return_relaxed */
147#ifndef atomic_dec_return_relaxed 171#ifndef atomic_dec_return_relaxed
172
173#ifndef atomic_dec_return
174#define atomic_dec_return(v) atomic_sub_return(1, (v))
175#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
176#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
177#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
178#else /* atomic_dec_return */
148#define atomic_dec_return_relaxed atomic_dec_return 179#define atomic_dec_return_relaxed atomic_dec_return
149#define atomic_dec_return_acquire atomic_dec_return 180#define atomic_dec_return_acquire atomic_dec_return
150#define atomic_dec_return_release atomic_dec_return 181#define atomic_dec_return_release atomic_dec_return
182#endif /* atomic_dec_return */
151 183
152#else /* atomic_dec_return_relaxed */ 184#else /* atomic_dec_return_relaxed */
153 185
@@ -328,12 +360,22 @@
328#endif 360#endif
329#endif /* atomic_fetch_and_relaxed */ 361#endif /* atomic_fetch_and_relaxed */
330 362
331#ifdef atomic_andnot 363#ifndef atomic_andnot
332/* atomic_fetch_andnot_relaxed */ 364#define atomic_andnot(i, v) atomic_and(~(int)(i), (v))
365#endif
366
333#ifndef atomic_fetch_andnot_relaxed 367#ifndef atomic_fetch_andnot_relaxed
334#define atomic_fetch_andnot_relaxed atomic_fetch_andnot 368
335#define atomic_fetch_andnot_acquire atomic_fetch_andnot 369#ifndef atomic_fetch_andnot
336#define atomic_fetch_andnot_release atomic_fetch_andnot 370#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v))
371#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v))
372#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v))
373#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v))
374#else /* atomic_fetch_andnot */
375#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
376#define atomic_fetch_andnot_acquire atomic_fetch_andnot
377#define atomic_fetch_andnot_release atomic_fetch_andnot
378#endif /* atomic_fetch_andnot */
337 379
338#else /* atomic_fetch_andnot_relaxed */ 380#else /* atomic_fetch_andnot_relaxed */
339 381
@@ -352,7 +394,6 @@
352 __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) 394 __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
353#endif 395#endif
354#endif /* atomic_fetch_andnot_relaxed */ 396#endif /* atomic_fetch_andnot_relaxed */
355#endif /* atomic_andnot */
356 397
357/* atomic_fetch_xor_relaxed */ 398/* atomic_fetch_xor_relaxed */
358#ifndef atomic_fetch_xor_relaxed 399#ifndef atomic_fetch_xor_relaxed
@@ -520,112 +561,140 @@
520#endif /* xchg_relaxed */ 561#endif /* xchg_relaxed */
521 562
522/** 563/**
564 * atomic_fetch_add_unless - add unless the number is already a given value
565 * @v: pointer of type atomic_t
566 * @a: the amount to add to v...
567 * @u: ...unless v is equal to u.
568 *
569 * Atomically adds @a to @v, if @v was not already @u.
570 * Returns the original value of @v.
571 */
572#ifndef atomic_fetch_add_unless
573static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
574{
575 int c = atomic_read(v);
576
577 do {
578 if (unlikely(c == u))
579 break;
580 } while (!atomic_try_cmpxchg(v, &c, c + a));
581
582 return c;
583}
584#endif
585
586/**
523 * atomic_add_unless - add unless the number is already a given value 587 * atomic_add_unless - add unless the number is already a given value
524 * @v: pointer of type atomic_t 588 * @v: pointer of type atomic_t
525 * @a: the amount to add to v... 589 * @a: the amount to add to v...
526 * @u: ...unless v is equal to u. 590 * @u: ...unless v is equal to u.
527 * 591 *
528 * Atomically adds @a to @v, so long as @v was not already @u. 592 * Atomically adds @a to @v, if @v was not already @u.
529 * Returns non-zero if @v was not @u, and zero otherwise. 593 * Returns true if the addition was done.
530 */ 594 */
531static inline int atomic_add_unless(atomic_t *v, int a, int u) 595static inline bool atomic_add_unless(atomic_t *v, int a, int u)
532{ 596{
533 return __atomic_add_unless(v, a, u) != u; 597 return atomic_fetch_add_unless(v, a, u) != u;
534} 598}
535 599
536/** 600/**
537 * atomic_inc_not_zero - increment unless the number is zero 601 * atomic_inc_not_zero - increment unless the number is zero
538 * @v: pointer of type atomic_t 602 * @v: pointer of type atomic_t
539 * 603 *
540 * Atomically increments @v by 1, so long as @v is non-zero. 604 * Atomically increments @v by 1, if @v is non-zero.
541 * Returns non-zero if @v was non-zero, and zero otherwise. 605 * Returns true if the increment was done.
542 */ 606 */
543#ifndef atomic_inc_not_zero 607#ifndef atomic_inc_not_zero
544#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 608#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
545#endif 609#endif
546 610
547#ifndef atomic_andnot 611/**
548static inline void atomic_andnot(int i, atomic_t *v) 612 * atomic_inc_and_test - increment and test
549{ 613 * @v: pointer of type atomic_t
550 atomic_and(~i, v); 614 *
551} 615 * Atomically increments @v by 1
552 616 * and returns true if the result is zero, or false for all
553static inline int atomic_fetch_andnot(int i, atomic_t *v) 617 * other cases.
554{ 618 */
555 return atomic_fetch_and(~i, v); 619#ifndef atomic_inc_and_test
556} 620static inline bool atomic_inc_and_test(atomic_t *v)
557
558static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
559{ 621{
560 return atomic_fetch_and_relaxed(~i, v); 622 return atomic_inc_return(v) == 0;
561} 623}
624#endif
562 625
563static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) 626/**
627 * atomic_dec_and_test - decrement and test
628 * @v: pointer of type atomic_t
629 *
630 * Atomically decrements @v by 1 and
631 * returns true if the result is 0, or false for all other
632 * cases.
633 */
634#ifndef atomic_dec_and_test
635static inline bool atomic_dec_and_test(atomic_t *v)
564{ 636{
565 return atomic_fetch_and_acquire(~i, v); 637 return atomic_dec_return(v) == 0;
566} 638}
639#endif
567 640
568static inline int atomic_fetch_andnot_release(int i, atomic_t *v) 641/**
642 * atomic_sub_and_test - subtract value from variable and test result
643 * @i: integer value to subtract
644 * @v: pointer of type atomic_t
645 *
646 * Atomically subtracts @i from @v and returns
647 * true if the result is zero, or false for all
648 * other cases.
649 */
650#ifndef atomic_sub_and_test
651static inline bool atomic_sub_and_test(int i, atomic_t *v)
569{ 652{
570 return atomic_fetch_and_release(~i, v); 653 return atomic_sub_return(i, v) == 0;
571} 654}
572#endif 655#endif
573 656
574/** 657/**
575 * atomic_inc_not_zero_hint - increment if not null 658 * atomic_add_negative - add and test if negative
659 * @i: integer value to add
576 * @v: pointer of type atomic_t 660 * @v: pointer of type atomic_t
577 * @hint: probable value of the atomic before the increment
578 *
579 * This version of atomic_inc_not_zero() gives a hint of probable
580 * value of the atomic. This helps processor to not read the memory
581 * before doing the atomic read/modify/write cycle, lowering
582 * number of bus transactions on some arches.
583 * 661 *
584 * Returns: 0 if increment was not done, 1 otherwise. 662 * Atomically adds @i to @v and returns true
663 * if the result is negative, or false when
664 * result is greater than or equal to zero.
585 */ 665 */
586#ifndef atomic_inc_not_zero_hint 666#ifndef atomic_add_negative
587static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) 667static inline bool atomic_add_negative(int i, atomic_t *v)
588{ 668{
589 int val, c = hint; 669 return atomic_add_return(i, v) < 0;
590
591 /* sanity test, should be removed by compiler if hint is a constant */
592 if (!hint)
593 return atomic_inc_not_zero(v);
594
595 do {
596 val = atomic_cmpxchg(v, c, c + 1);
597 if (val == c)
598 return 1;
599 c = val;
600 } while (c);
601
602 return 0;
603} 670}
604#endif 671#endif
605 672
606#ifndef atomic_inc_unless_negative 673#ifndef atomic_inc_unless_negative
607static inline int atomic_inc_unless_negative(atomic_t *p) 674static inline bool atomic_inc_unless_negative(atomic_t *v)
608{ 675{
609 int v, v1; 676 int c = atomic_read(v);
610 for (v = 0; v >= 0; v = v1) { 677
611 v1 = atomic_cmpxchg(p, v, v + 1); 678 do {
612 if (likely(v1 == v)) 679 if (unlikely(c < 0))
613 return 1; 680 return false;
614 } 681 } while (!atomic_try_cmpxchg(v, &c, c + 1));
615 return 0; 682
683 return true;
616} 684}
617#endif 685#endif
618 686
619#ifndef atomic_dec_unless_positive 687#ifndef atomic_dec_unless_positive
620static inline int atomic_dec_unless_positive(atomic_t *p) 688static inline bool atomic_dec_unless_positive(atomic_t *v)
621{ 689{
622 int v, v1; 690 int c = atomic_read(v);
623 for (v = 0; v <= 0; v = v1) { 691
624 v1 = atomic_cmpxchg(p, v, v - 1); 692 do {
625 if (likely(v1 == v)) 693 if (unlikely(c > 0))
626 return 1; 694 return false;
627 } 695 } while (!atomic_try_cmpxchg(v, &c, c - 1));
628 return 0; 696
697 return true;
629} 698}
630#endif 699#endif
631 700
@@ -639,17 +708,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p)
639#ifndef atomic_dec_if_positive 708#ifndef atomic_dec_if_positive
640static inline int atomic_dec_if_positive(atomic_t *v) 709static inline int atomic_dec_if_positive(atomic_t *v)
641{ 710{
642 int c, old, dec; 711 int dec, c = atomic_read(v);
643 c = atomic_read(v); 712
644 for (;;) { 713 do {
645 dec = c - 1; 714 dec = c - 1;
646 if (unlikely(dec < 0)) 715 if (unlikely(dec < 0))
647 break; 716 break;
648 old = atomic_cmpxchg((v), c, dec); 717 } while (!atomic_try_cmpxchg(v, &c, dec));
649 if (likely(old == c)) 718
650 break;
651 c = old;
652 }
653 return dec; 719 return dec;
654} 720}
655#endif 721#endif
@@ -693,11 +759,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
693#endif 759#endif
694#endif /* atomic64_add_return_relaxed */ 760#endif /* atomic64_add_return_relaxed */
695 761
762#ifndef atomic64_inc
763#define atomic64_inc(v) atomic64_add(1, (v))
764#endif
765
696/* atomic64_inc_return_relaxed */ 766/* atomic64_inc_return_relaxed */
697#ifndef atomic64_inc_return_relaxed 767#ifndef atomic64_inc_return_relaxed
768
769#ifndef atomic64_inc_return
770#define atomic64_inc_return(v) atomic64_add_return(1, (v))
771#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
772#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
773#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
774#else /* atomic64_inc_return */
698#define atomic64_inc_return_relaxed atomic64_inc_return 775#define atomic64_inc_return_relaxed atomic64_inc_return
699#define atomic64_inc_return_acquire atomic64_inc_return 776#define atomic64_inc_return_acquire atomic64_inc_return
700#define atomic64_inc_return_release atomic64_inc_return 777#define atomic64_inc_return_release atomic64_inc_return
778#endif /* atomic64_inc_return */
701 779
702#else /* atomic64_inc_return_relaxed */ 780#else /* atomic64_inc_return_relaxed */
703 781
@@ -742,11 +820,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
742#endif 820#endif
743#endif /* atomic64_sub_return_relaxed */ 821#endif /* atomic64_sub_return_relaxed */
744 822
823#ifndef atomic64_dec
824#define atomic64_dec(v) atomic64_sub(1, (v))
825#endif
826
745/* atomic64_dec_return_relaxed */ 827/* atomic64_dec_return_relaxed */
746#ifndef atomic64_dec_return_relaxed 828#ifndef atomic64_dec_return_relaxed
829
830#ifndef atomic64_dec_return
831#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
832#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
833#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
834#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
835#else /* atomic64_dec_return */
747#define atomic64_dec_return_relaxed atomic64_dec_return 836#define atomic64_dec_return_relaxed atomic64_dec_return
748#define atomic64_dec_return_acquire atomic64_dec_return 837#define atomic64_dec_return_acquire atomic64_dec_return
749#define atomic64_dec_return_release atomic64_dec_return 838#define atomic64_dec_return_release atomic64_dec_return
839#endif /* atomic64_dec_return */
750 840
751#else /* atomic64_dec_return_relaxed */ 841#else /* atomic64_dec_return_relaxed */
752 842
@@ -927,12 +1017,22 @@ static inline int atomic_dec_if_positive(atomic_t *v)
927#endif 1017#endif
928#endif /* atomic64_fetch_and_relaxed */ 1018#endif /* atomic64_fetch_and_relaxed */
929 1019
930#ifdef atomic64_andnot 1020#ifndef atomic64_andnot
931/* atomic64_fetch_andnot_relaxed */ 1021#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v))
1022#endif
1023
932#ifndef atomic64_fetch_andnot_relaxed 1024#ifndef atomic64_fetch_andnot_relaxed
933#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot 1025
934#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot 1026#ifndef atomic64_fetch_andnot
935#define atomic64_fetch_andnot_release atomic64_fetch_andnot 1027#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v))
1028#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v))
1029#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v))
1030#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v))
1031#else /* atomic64_fetch_andnot */
1032#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
1033#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
1034#define atomic64_fetch_andnot_release atomic64_fetch_andnot
1035#endif /* atomic64_fetch_andnot */
936 1036
937#else /* atomic64_fetch_andnot_relaxed */ 1037#else /* atomic64_fetch_andnot_relaxed */
938 1038
@@ -951,7 +1051,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
951 __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) 1051 __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
952#endif 1052#endif
953#endif /* atomic64_fetch_andnot_relaxed */ 1053#endif /* atomic64_fetch_andnot_relaxed */
954#endif /* atomic64_andnot */
955 1054
956/* atomic64_fetch_xor_relaxed */ 1055/* atomic64_fetch_xor_relaxed */
957#ifndef atomic64_fetch_xor_relaxed 1056#ifndef atomic64_fetch_xor_relaxed
@@ -1049,30 +1148,164 @@ static inline int atomic_dec_if_positive(atomic_t *v)
1049#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg 1148#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
1050#endif /* atomic64_try_cmpxchg */ 1149#endif /* atomic64_try_cmpxchg */
1051 1150
1052#ifndef atomic64_andnot 1151/**
1053static inline void atomic64_andnot(long long i, atomic64_t *v) 1152 * atomic64_fetch_add_unless - add unless the number is already a given value
1153 * @v: pointer of type atomic64_t
1154 * @a: the amount to add to v...
1155 * @u: ...unless v is equal to u.
1156 *
1157 * Atomically adds @a to @v, if @v was not already @u.
1158 * Returns the original value of @v.
1159 */
1160#ifndef atomic64_fetch_add_unless
1161static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
1162 long long u)
1054{ 1163{
1055 atomic64_and(~i, v); 1164 long long c = atomic64_read(v);
1165
1166 do {
1167 if (unlikely(c == u))
1168 break;
1169 } while (!atomic64_try_cmpxchg(v, &c, c + a));
1170
1171 return c;
1056} 1172}
1173#endif
1057 1174
1058static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v) 1175/**
1176 * atomic64_add_unless - add unless the number is already a given value
1177 * @v: pointer of type atomic_t
1178 * @a: the amount to add to v...
1179 * @u: ...unless v is equal to u.
1180 *
1181 * Atomically adds @a to @v, if @v was not already @u.
1182 * Returns true if the addition was done.
1183 */
1184static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
1059{ 1185{
1060 return atomic64_fetch_and(~i, v); 1186 return atomic64_fetch_add_unless(v, a, u) != u;
1061} 1187}
1062 1188
1063static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v) 1189/**
1190 * atomic64_inc_not_zero - increment unless the number is zero
1191 * @v: pointer of type atomic64_t
1192 *
1193 * Atomically increments @v by 1, if @v is non-zero.
1194 * Returns true if the increment was done.
1195 */
1196#ifndef atomic64_inc_not_zero
1197#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
1198#endif
1199
1200/**
1201 * atomic64_inc_and_test - increment and test
1202 * @v: pointer of type atomic64_t
1203 *
1204 * Atomically increments @v by 1
1205 * and returns true if the result is zero, or false for all
1206 * other cases.
1207 */
1208#ifndef atomic64_inc_and_test
1209static inline bool atomic64_inc_and_test(atomic64_t *v)
1064{ 1210{
1065 return atomic64_fetch_and_relaxed(~i, v); 1211 return atomic64_inc_return(v) == 0;
1066} 1212}
1213#endif
1067 1214
1068static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v) 1215/**
1216 * atomic64_dec_and_test - decrement and test
1217 * @v: pointer of type atomic64_t
1218 *
1219 * Atomically decrements @v by 1 and
1220 * returns true if the result is 0, or false for all other
1221 * cases.
1222 */
1223#ifndef atomic64_dec_and_test
1224static inline bool atomic64_dec_and_test(atomic64_t *v)
1069{ 1225{
1070 return atomic64_fetch_and_acquire(~i, v); 1226 return atomic64_dec_return(v) == 0;
1071} 1227}
1228#endif
1072 1229
1073static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v) 1230/**
1231 * atomic64_sub_and_test - subtract value from variable and test result
1232 * @i: integer value to subtract
1233 * @v: pointer of type atomic64_t
1234 *
1235 * Atomically subtracts @i from @v and returns
1236 * true if the result is zero, or false for all
1237 * other cases.
1238 */
1239#ifndef atomic64_sub_and_test
1240static inline bool atomic64_sub_and_test(long long i, atomic64_t *v)
1241{
1242 return atomic64_sub_return(i, v) == 0;
1243}
1244#endif
1245
1246/**
1247 * atomic64_add_negative - add and test if negative
1248 * @i: integer value to add
1249 * @v: pointer of type atomic64_t
1250 *
1251 * Atomically adds @i to @v and returns true
1252 * if the result is negative, or false when
1253 * result is greater than or equal to zero.
1254 */
1255#ifndef atomic64_add_negative
1256static inline bool atomic64_add_negative(long long i, atomic64_t *v)
1074{ 1257{
1075 return atomic64_fetch_and_release(~i, v); 1258 return atomic64_add_return(i, v) < 0;
1259}
1260#endif
1261
1262#ifndef atomic64_inc_unless_negative
1263static inline bool atomic64_inc_unless_negative(atomic64_t *v)
1264{
1265 long long c = atomic64_read(v);
1266
1267 do {
1268 if (unlikely(c < 0))
1269 return false;
1270 } while (!atomic64_try_cmpxchg(v, &c, c + 1));
1271
1272 return true;
1273}
1274#endif
1275
1276#ifndef atomic64_dec_unless_positive
1277static inline bool atomic64_dec_unless_positive(atomic64_t *v)
1278{
1279 long long c = atomic64_read(v);
1280
1281 do {
1282 if (unlikely(c > 0))
1283 return false;
1284 } while (!atomic64_try_cmpxchg(v, &c, c - 1));
1285
1286 return true;
1287}
1288#endif
1289
1290/*
1291 * atomic64_dec_if_positive - decrement by 1 if old value positive
1292 * @v: pointer of type atomic64_t
1293 *
1294 * The function returns the old value of *v minus 1, even if
1295 * the atomic64 variable, v, was not decremented.
1296 */
1297#ifndef atomic64_dec_if_positive
1298static inline long long atomic64_dec_if_positive(atomic64_t *v)
1299{
1300 long long dec, c = atomic64_read(v);
1301
1302 do {
1303 dec = c - 1;
1304 if (unlikely(dec < 0))
1305 break;
1306 } while (!atomic64_try_cmpxchg(v, &c, dec));
1307
1308 return dec;
1076} 1309}
1077#endif 1310#endif
1078 1311
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 69c78477590b..9334fbef7bae 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -117,6 +117,9 @@ struct filename;
117 117
118extern void audit_log_session_info(struct audit_buffer *ab); 118extern void audit_log_session_info(struct audit_buffer *ab);
119 119
120#define AUDIT_OFF 0
121#define AUDIT_ON 1
122#define AUDIT_LOCKED 2
120#ifdef CONFIG_AUDIT 123#ifdef CONFIG_AUDIT
121/* These are defined in audit.c */ 124/* These are defined in audit.c */
122 /* Public API */ 125 /* Public API */
@@ -202,7 +205,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
202static inline void audit_log_task_info(struct audit_buffer *ab, 205static inline void audit_log_task_info(struct audit_buffer *ab,
203 struct task_struct *tsk) 206 struct task_struct *tsk)
204{ } 207{ }
205#define audit_enabled 0 208#define audit_enabled AUDIT_OFF
206#endif /* CONFIG_AUDIT */ 209#endif /* CONFIG_AUDIT */
207 210
208#ifdef CONFIG_AUDIT_COMPAT_GENERIC 211#ifdef CONFIG_AUDIT_COMPAT_GENERIC
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 0bd432a4d7bd..24251762c20c 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -22,7 +22,6 @@ struct dentry;
22 */ 22 */
23enum wb_state { 23enum wb_state {
24 WB_registered, /* bdi_register() was done */ 24 WB_registered, /* bdi_register() was done */
25 WB_shutting_down, /* wb_shutdown() in progress */
26 WB_writeback_running, /* Writeback is in progress */ 25 WB_writeback_running, /* Writeback is in progress */
27 WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ 26 WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
28 WB_start_all, /* nr_pages == 0 (all) work pending */ 27 WB_start_all, /* nr_pages == 0 (all) work pending */
@@ -189,6 +188,7 @@ struct backing_dev_info {
189#ifdef CONFIG_CGROUP_WRITEBACK 188#ifdef CONFIG_CGROUP_WRITEBACK
190 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ 189 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
191 struct rb_root cgwb_congested_tree; /* their congested states */ 190 struct rb_root cgwb_congested_tree; /* their congested states */
191 struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
192#else 192#else
193 struct bdi_writeback_congested *wb_congested; 193 struct bdi_writeback_congested *wb_congested;
194#endif 194#endif
diff --git a/include/linux/bio.h b/include/linux/bio.h
index f08f5fe7bd08..51371740d2a8 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -429,7 +429,6 @@ extern void bio_put(struct bio *);
429 429
430extern void __bio_clone_fast(struct bio *, struct bio *); 430extern void __bio_clone_fast(struct bio *, struct bio *);
431extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); 431extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
432extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
433 432
434extern struct bio_set fs_bio_set; 433extern struct bio_set fs_bio_set;
435 434
@@ -443,12 +442,6 @@ static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
443 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); 442 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
444} 443}
445 444
446static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
447{
448 return bio_clone_bioset(bio, gfp_mask, NULL);
449
450}
451
452extern blk_qc_t submit_bio(struct bio *); 445extern blk_qc_t submit_bio(struct bio *);
453 446
454extern void bio_endio(struct bio *); 447extern void bio_endio(struct bio *);
@@ -496,9 +489,9 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
496extern void bio_set_pages_dirty(struct bio *bio); 489extern void bio_set_pages_dirty(struct bio *bio);
497extern void bio_check_pages_dirty(struct bio *bio); 490extern void bio_check_pages_dirty(struct bio *bio);
498 491
499void generic_start_io_acct(struct request_queue *q, int rw, 492void generic_start_io_acct(struct request_queue *q, int op,
500 unsigned long sectors, struct hd_struct *part); 493 unsigned long sectors, struct hd_struct *part);
501void generic_end_io_acct(struct request_queue *q, int rw, 494void generic_end_io_acct(struct request_queue *q, int op,
502 struct hd_struct *part, 495 struct hd_struct *part,
503 unsigned long start_time); 496 unsigned long start_time);
504 497
@@ -553,8 +546,16 @@ do { \
553#define bio_dev(bio) \ 546#define bio_dev(bio) \
554 disk_devt((bio)->bi_disk) 547 disk_devt((bio)->bi_disk)
555 548
549#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
550int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
551#else
552static inline int bio_associate_blkcg_from_page(struct bio *bio,
553 struct page *page) { return 0; }
554#endif
555
556#ifdef CONFIG_BLK_CGROUP 556#ifdef CONFIG_BLK_CGROUP
557int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); 557int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
558int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
558void bio_disassociate_task(struct bio *bio); 559void bio_disassociate_task(struct bio *bio);
559void bio_clone_blkcg_association(struct bio *dst, struct bio *src); 560void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
560#else /* CONFIG_BLK_CGROUP */ 561#else /* CONFIG_BLK_CGROUP */
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index cf2588d81148..65a6981eef7b 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -104,7 +104,7 @@
104 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ 104 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
105 }) 105 })
106 106
107extern void __compiletime_warning("value doesn't fit into mask") 107extern void __compiletime_error("value doesn't fit into mask")
108__field_overflow(void); 108__field_overflow(void);
109extern void __compiletime_error("bad bitfield mask") 109extern void __compiletime_error("bad bitfield mask")
110__bad_mask(void); 110__bad_mask(void);
@@ -121,8 +121,8 @@ static __always_inline u64 field_mask(u64 field)
121#define ____MAKE_OP(type,base,to,from) \ 121#define ____MAKE_OP(type,base,to,from) \
122static __always_inline __##type type##_encode_bits(base v, base field) \ 122static __always_inline __##type type##_encode_bits(base v, base field) \
123{ \ 123{ \
124 if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \ 124 if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
125 __field_overflow(); \ 125 __field_overflow(); \
126 return to((v & field_mask(field)) * field_multiplier(field)); \ 126 return to((v & field_mask(field)) * field_multiplier(field)); \
127} \ 127} \
128static __always_inline __##type type##_replace_bits(__##type old, \ 128static __always_inline __##type type##_replace_bits(__##type old, \
@@ -143,6 +143,7 @@ static __always_inline base type##_get_bits(__##type v, base field) \
143 ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ 143 ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
144 ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ 144 ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
145 ____MAKE_OP(u##size,u##size,,) 145 ____MAKE_OP(u##size,u##size,,)
146____MAKE_OP(u8,u8,,)
146__MAKE_OP(16) 147__MAKE_OP(16)
147__MAKE_OP(32) 148__MAKE_OP(32)
148__MAKE_OP(64) 149__MAKE_OP(64)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 4cac4e1a72ff..af419012d77d 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -2,29 +2,9 @@
2#ifndef _LINUX_BITOPS_H 2#ifndef _LINUX_BITOPS_H
3#define _LINUX_BITOPS_H 3#define _LINUX_BITOPS_H
4#include <asm/types.h> 4#include <asm/types.h>
5#include <linux/bits.h>
5 6
6#ifdef __KERNEL__
7#define BIT(nr) (1UL << (nr))
8#define BIT_ULL(nr) (1ULL << (nr))
9#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
10#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
11#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
12#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
13#define BITS_PER_BYTE 8
14#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 7#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
15#endif
16
17/*
18 * Create a contiguous bitmask starting at bit position @l and ending at
19 * position @h. For example
20 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
21 */
22#define GENMASK(h, l) \
23 (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
24
25#define GENMASK_ULL(h, l) \
26 (((~0ULL) - (1ULL << (l)) + 1) & \
27 (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
28 8
29extern unsigned int __sw_hweight8(unsigned int w); 9extern unsigned int __sw_hweight8(unsigned int w);
30extern unsigned int __sw_hweight16(unsigned int w); 10extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,26 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_BITS_H
3#define __LINUX_BITS_H
4#include <asm/bitsperlong.h>
5
6#define BIT(nr) (1UL << (nr))
7#define BIT_ULL(nr) (1ULL << (nr))
8#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
9#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
10#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
11#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
12#define BITS_PER_BYTE 8
13
14/*
15 * Create a contiguous bitmask starting at bit position @l and ending at
16 * position @h. For example
17 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
18 */
19#define GENMASK(h, l) \
20 (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
21
22#define GENMASK_ULL(h, l) \
23 (((~0ULL) - (1ULL << (l)) + 1) & \
24 (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
25
26#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 6c666fd7de3c..34aec30e06c7 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -35,6 +35,7 @@ enum blkg_rwstat_type {
35 BLKG_RWSTAT_WRITE, 35 BLKG_RWSTAT_WRITE,
36 BLKG_RWSTAT_SYNC, 36 BLKG_RWSTAT_SYNC,
37 BLKG_RWSTAT_ASYNC, 37 BLKG_RWSTAT_ASYNC,
38 BLKG_RWSTAT_DISCARD,
38 39
39 BLKG_RWSTAT_NR, 40 BLKG_RWSTAT_NR,
40 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, 41 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
@@ -136,6 +137,12 @@ struct blkcg_gq {
136 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 137 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
137 138
138 struct rcu_head rcu_head; 139 struct rcu_head rcu_head;
140
141 atomic_t use_delay;
142 atomic64_t delay_nsec;
143 atomic64_t delay_start;
144 u64 last_delay;
145 int last_use;
139}; 146};
140 147
141typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); 148typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
@@ -148,6 +155,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
148typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); 155typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
149typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); 156typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
150typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); 157typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
158typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
159 size_t size);
151 160
152struct blkcg_policy { 161struct blkcg_policy {
153 int plid; 162 int plid;
@@ -167,6 +176,7 @@ struct blkcg_policy {
167 blkcg_pol_offline_pd_fn *pd_offline_fn; 176 blkcg_pol_offline_pd_fn *pd_offline_fn;
168 blkcg_pol_free_pd_fn *pd_free_fn; 177 blkcg_pol_free_pd_fn *pd_free_fn;
169 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 178 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
179 blkcg_pol_stat_pd_fn *pd_stat_fn;
170}; 180};
171 181
172extern struct blkcg blkcg_root; 182extern struct blkcg blkcg_root;
@@ -238,6 +248,42 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
238 return css_to_blkcg(task_css(current, io_cgrp_id)); 248 return css_to_blkcg(task_css(current, io_cgrp_id));
239} 249}
240 250
251static inline bool blk_cgroup_congested(void)
252{
253 struct cgroup_subsys_state *css;
254 bool ret = false;
255
256 rcu_read_lock();
257 css = kthread_blkcg();
258 if (!css)
259 css = task_css(current, io_cgrp_id);
260 while (css) {
261 if (atomic_read(&css->cgroup->congestion_count)) {
262 ret = true;
263 break;
264 }
265 css = css->parent;
266 }
267 rcu_read_unlock();
268 return ret;
269}
270
271/**
272 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
273 * @return: true if this bio needs to be submitted with the root blkg context.
274 *
275 * In order to avoid priority inversions we sometimes need to issue a bio as if
276 * it were attached to the root blkg, and then backcharge to the actual owning
277 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
278 * bio and attach the appropriate blkg to the bio. Then we call this helper and
279 * if it is true run with the root blkg for that queue and then do any
280 * backcharging to the originating cgroup once the io is complete.
281 */
282static inline bool bio_issue_as_root_blkg(struct bio *bio)
283{
284 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
285}
286
241/** 287/**
242 * blkcg_parent - get the parent of a blkcg 288 * blkcg_parent - get the parent of a blkcg
243 * @blkcg: blkcg of interest 289 * @blkcg: blkcg of interest
@@ -296,6 +342,17 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
296} 342}
297 343
298/** 344/**
345 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
346 * @q: request_queue of interest
347 *
348 * Lookup blkg for @q at the root level. See also blkg_lookup().
349 */
350static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
351{
352 return q->root_blkg;
353}
354
355/**
299 * blkg_to_pdata - get policy private data 356 * blkg_to_pdata - get policy private data
300 * @blkg: blkg of interest 357 * @blkg: blkg of interest
301 * @pol: policy of interest 358 * @pol: policy of interest
@@ -355,6 +412,21 @@ static inline void blkg_get(struct blkcg_gq *blkg)
355 atomic_inc(&blkg->refcnt); 412 atomic_inc(&blkg->refcnt);
356} 413}
357 414
415/**
416 * blkg_try_get - try and get a blkg reference
417 * @blkg: blkg to get
418 *
419 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
420 * of freeing this blkg, so we can only use it if the refcnt is not zero.
421 */
422static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
423{
424 if (atomic_inc_not_zero(&blkg->refcnt))
425 return blkg;
426 return NULL;
427}
428
429
358void __blkg_release_rcu(struct rcu_head *rcu); 430void __blkg_release_rcu(struct rcu_head *rcu);
359 431
360/** 432/**
@@ -589,7 +661,9 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
589{ 661{
590 struct percpu_counter *cnt; 662 struct percpu_counter *cnt;
591 663
592 if (op_is_write(op)) 664 if (op_is_discard(op))
665 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
666 else if (op_is_write(op))
593 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; 667 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
594 else 668 else
595 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; 669 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
@@ -706,8 +780,14 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
706 780
707 if (!throtl) { 781 if (!throtl) {
708 blkg = blkg ?: q->root_blkg; 782 blkg = blkg ?: q->root_blkg;
709 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, 783 /*
710 bio->bi_iter.bi_size); 784 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
785 * is a split bio and we would have already accounted for the
786 * size of the bio.
787 */
788 if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
789 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
790 bio->bi_iter.bi_size);
711 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); 791 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
712 } 792 }
713 793
@@ -715,6 +795,59 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
715 return !throtl; 795 return !throtl;
716} 796}
717 797
798static inline void blkcg_use_delay(struct blkcg_gq *blkg)
799{
800 if (atomic_add_return(1, &blkg->use_delay) == 1)
801 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
802}
803
804static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
805{
806 int old = atomic_read(&blkg->use_delay);
807
808 if (old == 0)
809 return 0;
810
811 /*
812 * We do this song and dance because we can race with somebody else
813 * adding or removing delay. If we just did an atomic_dec we'd end up
814 * negative and we'd already be in trouble. We need to subtract 1 and
815 * then check to see if we were the last delay so we can drop the
816 * congestion count on the cgroup.
817 */
818 while (old) {
819 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
820 if (cur == old)
821 break;
822 old = cur;
823 }
824
825 if (old == 0)
826 return 0;
827 if (old == 1)
828 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
829 return 1;
830}
831
832static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
833{
834 int old = atomic_read(&blkg->use_delay);
835 if (!old)
836 return;
837 /* We only want 1 person clearing the congestion count for this blkg. */
838 while (old) {
839 int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
840 if (cur == old) {
841 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
842 break;
843 }
844 old = cur;
845 }
846}
847
848void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
849void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
850void blkcg_maybe_throttle_current(void);
718#else /* CONFIG_BLK_CGROUP */ 851#else /* CONFIG_BLK_CGROUP */
719 852
720struct blkcg { 853struct blkcg {
@@ -734,9 +867,16 @@ struct blkcg_policy {
734 867
735#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) 868#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
736 869
870static inline void blkcg_maybe_throttle_current(void) { }
871static inline bool blk_cgroup_congested(void) { return false; }
872
737#ifdef CONFIG_BLOCK 873#ifdef CONFIG_BLOCK
738 874
875static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
876
739static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } 877static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
878static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
879{ return NULL; }
740static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 880static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
741static inline void blkcg_drain_queue(struct request_queue *q) { } 881static inline void blkcg_drain_queue(struct request_queue *q) { }
742static inline void blkcg_exit_queue(struct request_queue *q) { } 882static inline void blkcg_exit_queue(struct request_queue *q) { }
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3147eb74222..1da59c16f637 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -35,10 +35,12 @@ struct blk_mq_hw_ctx {
35 struct sbitmap ctx_map; 35 struct sbitmap ctx_map;
36 36
37 struct blk_mq_ctx *dispatch_from; 37 struct blk_mq_ctx *dispatch_from;
38 unsigned int dispatch_busy;
38 39
39 struct blk_mq_ctx **ctxs;
40 unsigned int nr_ctx; 40 unsigned int nr_ctx;
41 struct blk_mq_ctx **ctxs;
41 42
43 spinlock_t dispatch_wait_lock;
42 wait_queue_entry_t dispatch_wait; 44 wait_queue_entry_t dispatch_wait;
43 atomic_t wait_index; 45 atomic_t wait_index;
44 46
@@ -287,6 +289,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
287 289
288void blk_mq_quiesce_queue_nowait(struct request_queue *q); 290void blk_mq_quiesce_queue_nowait(struct request_queue *q);
289 291
292/**
293 * blk_mq_mark_complete() - Set request state to complete
294 * @rq: request to set to complete state
295 *
296 * Returns true if request state was successfully set to complete. If
297 * successful, the caller is responsibile for seeing this request is ended, as
298 * blk_mq_complete_request will not work again.
299 */
300static inline bool blk_mq_mark_complete(struct request *rq)
301{
302 return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
303 MQ_RQ_IN_FLIGHT;
304}
305
290/* 306/*
291 * Driver command data is immediately after the request. So subtract request 307 * Driver command data is immediately after the request. So subtract request
292 * size to get back to the original request, add request size to get the PDU. 308 * size to get back to the original request, add request size to get the PDU.
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 3c4f390aea4b..f6dfb30737d8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -179,11 +179,9 @@ struct bio {
179 */ 179 */
180 struct io_context *bi_ioc; 180 struct io_context *bi_ioc;
181 struct cgroup_subsys_state *bi_css; 181 struct cgroup_subsys_state *bi_css;
182#ifdef CONFIG_BLK_DEV_THROTTLING_LOW 182 struct blkcg_gq *bi_blkg;
183 void *bi_cg_private;
184 struct bio_issue bi_issue; 183 struct bio_issue bi_issue;
185#endif 184#endif
186#endif
187 union { 185 union {
188#if defined(CONFIG_BLK_DEV_INTEGRITY) 186#if defined(CONFIG_BLK_DEV_INTEGRITY)
189 struct bio_integrity_payload *bi_integrity; /* data integrity */ 187 struct bio_integrity_payload *bi_integrity; /* data integrity */
@@ -329,7 +327,7 @@ enum req_flag_bits {
329 327
330 /* for driver use */ 328 /* for driver use */
331 __REQ_DRV, 329 __REQ_DRV,
332 330 __REQ_SWAP, /* swapping request. */
333 __REQ_NR_BITS, /* stops here */ 331 __REQ_NR_BITS, /* stops here */
334}; 332};
335 333
@@ -351,6 +349,7 @@ enum req_flag_bits {
351#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) 349#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
352 350
353#define REQ_DRV (1ULL << __REQ_DRV) 351#define REQ_DRV (1ULL << __REQ_DRV)
352#define REQ_SWAP (1ULL << __REQ_SWAP)
354 353
355#define REQ_FAILFAST_MASK \ 354#define REQ_FAILFAST_MASK \
356 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 355 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -358,6 +357,14 @@ enum req_flag_bits {
358#define REQ_NOMERGE_FLAGS \ 357#define REQ_NOMERGE_FLAGS \
359 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 358 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
360 359
360enum stat_group {
361 STAT_READ,
362 STAT_WRITE,
363 STAT_DISCARD,
364
365 NR_STAT_GROUPS
366};
367
361#define bio_op(bio) \ 368#define bio_op(bio) \
362 ((bio)->bi_opf & REQ_OP_MASK) 369 ((bio)->bi_opf & REQ_OP_MASK)
363#define req_op(req) \ 370#define req_op(req) \
@@ -395,6 +402,18 @@ static inline bool op_is_sync(unsigned int op)
395 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 402 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
396} 403}
397 404
405static inline bool op_is_discard(unsigned int op)
406{
407 return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
408}
409
410static inline int op_stat_group(unsigned int op)
411{
412 if (op_is_discard(op))
413 return STAT_DISCARD;
414 return op_is_write(op);
415}
416
398typedef unsigned int blk_qc_t; 417typedef unsigned int blk_qc_t;
399#define BLK_QC_T_NONE -1U 418#define BLK_QC_T_NONE -1U
400#define BLK_QC_T_SHIFT 16 419#define BLK_QC_T_SHIFT 16
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9154570edf29..d6869e0e2b64 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,8 +27,6 @@
27#include <linux/percpu-refcount.h> 27#include <linux/percpu-refcount.h>
28#include <linux/scatterlist.h> 28#include <linux/scatterlist.h>
29#include <linux/blkzoned.h> 29#include <linux/blkzoned.h>
30#include <linux/seqlock.h>
31#include <linux/u64_stats_sync.h>
32 30
33struct module; 31struct module;
34struct scsi_ioctl_command; 32struct scsi_ioctl_command;
@@ -42,7 +40,7 @@ struct bsg_job;
42struct blkcg_gq; 40struct blkcg_gq;
43struct blk_flush_queue; 41struct blk_flush_queue;
44struct pr_ops; 42struct pr_ops;
45struct rq_wb; 43struct rq_qos;
46struct blk_queue_stats; 44struct blk_queue_stats;
47struct blk_stat_callback; 45struct blk_stat_callback;
48 46
@@ -442,10 +440,8 @@ struct request_queue {
442 int nr_rqs[2]; /* # allocated [a]sync rqs */ 440 int nr_rqs[2]; /* # allocated [a]sync rqs */
443 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 441 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
444 442
445 atomic_t shared_hctx_restart;
446
447 struct blk_queue_stats *stats; 443 struct blk_queue_stats *stats;
448 struct rq_wb *rq_wb; 444 struct rq_qos *rq_qos;
449 445
450 /* 446 /*
451 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 447 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
@@ -592,6 +588,7 @@ struct request_queue {
592 588
593 struct queue_limits limits; 589 struct queue_limits limits;
594 590
591#ifdef CONFIG_BLK_DEV_ZONED
595 /* 592 /*
596 * Zoned block device information for request dispatch control. 593 * Zoned block device information for request dispatch control.
597 * nr_zones is the total number of zones of the device. This is always 594 * nr_zones is the total number of zones of the device. This is always
@@ -612,6 +609,7 @@ struct request_queue {
612 unsigned int nr_zones; 609 unsigned int nr_zones;
613 unsigned long *seq_zones_bitmap; 610 unsigned long *seq_zones_bitmap;
614 unsigned long *seq_zones_wlock; 611 unsigned long *seq_zones_wlock;
612#endif /* CONFIG_BLK_DEV_ZONED */
615 613
616 /* 614 /*
617 * sg stuff 615 * sg stuff
@@ -800,11 +798,7 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
800 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 798 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
801} 799}
802 800
803static inline unsigned int blk_queue_nr_zones(struct request_queue *q) 801#ifdef CONFIG_BLK_DEV_ZONED
804{
805 return q->nr_zones;
806}
807
808static inline unsigned int blk_queue_zone_no(struct request_queue *q, 802static inline unsigned int blk_queue_zone_no(struct request_queue *q,
809 sector_t sector) 803 sector_t sector)
810{ 804{
@@ -820,6 +814,7 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
820 return false; 814 return false;
821 return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); 815 return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
822} 816}
817#endif /* CONFIG_BLK_DEV_ZONED */
823 818
824static inline bool rq_is_sync(struct request *rq) 819static inline bool rq_is_sync(struct request *rq)
825{ 820{
@@ -1070,6 +1065,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1070 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; 1065 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1071} 1066}
1072 1067
1068#ifdef CONFIG_BLK_DEV_ZONED
1073static inline unsigned int blk_rq_zone_no(struct request *rq) 1069static inline unsigned int blk_rq_zone_no(struct request *rq)
1074{ 1070{
1075 return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); 1071 return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
@@ -1079,6 +1075,7 @@ static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1079{ 1075{
1080 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); 1076 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
1081} 1077}
1078#endif /* CONFIG_BLK_DEV_ZONED */
1082 1079
1083/* 1080/*
1084 * Some commands like WRITE SAME have a payload or data transfer size which 1081 * Some commands like WRITE SAME have a payload or data transfer size which
@@ -1119,8 +1116,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
1119 if (!q->limits.chunk_sectors) 1116 if (!q->limits.chunk_sectors)
1120 return q->limits.max_sectors; 1117 return q->limits.max_sectors;
1121 1118
1122 return q->limits.chunk_sectors - 1119 return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
1123 (offset & (q->limits.chunk_sectors - 1)); 1120 (offset & (q->limits.chunk_sectors - 1))));
1124} 1121}
1125 1122
1126static inline unsigned int blk_rq_get_max_sectors(struct request *rq, 1123static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
@@ -1437,8 +1434,6 @@ enum blk_default_limits {
1437 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1434 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1438}; 1435};
1439 1436
1440#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1441
1442static inline unsigned long queue_segment_boundary(struct request_queue *q) 1437static inline unsigned long queue_segment_boundary(struct request_queue *q)
1443{ 1438{
1444 return q->limits.seg_boundary_mask; 1439 return q->limits.seg_boundary_mask;
@@ -1639,15 +1634,6 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
1639 return 0; 1634 return 0;
1640} 1635}
1641 1636
1642static inline unsigned int bdev_nr_zones(struct block_device *bdev)
1643{
1644 struct request_queue *q = bdev_get_queue(bdev);
1645
1646 if (q)
1647 return blk_queue_nr_zones(q);
1648 return 0;
1649}
1650
1651static inline int queue_dma_alignment(struct request_queue *q) 1637static inline int queue_dma_alignment(struct request_queue *q)
1652{ 1638{
1653 return q ? q->dma_alignment : 511; 1639 return q ? q->dma_alignment : 511;
@@ -1877,6 +1863,28 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
1877 bip_next->bip_vec[0].bv_offset); 1863 bip_next->bip_vec[0].bv_offset);
1878} 1864}
1879 1865
1866/**
1867 * bio_integrity_intervals - Return number of integrity intervals for a bio
1868 * @bi: blk_integrity profile for device
1869 * @sectors: Size of the bio in 512-byte sectors
1870 *
1871 * Description: The block layer calculates everything in 512 byte
1872 * sectors but integrity metadata is done in terms of the data integrity
1873 * interval size of the storage device. Convert the block layer sectors
1874 * to the appropriate number of integrity intervals.
1875 */
1876static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1877 unsigned int sectors)
1878{
1879 return sectors >> (bi->interval_exp - 9);
1880}
1881
1882static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1883 unsigned int sectors)
1884{
1885 return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
1886}
1887
1880#else /* CONFIG_BLK_DEV_INTEGRITY */ 1888#else /* CONFIG_BLK_DEV_INTEGRITY */
1881 1889
1882struct bio; 1890struct bio;
@@ -1950,12 +1958,24 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
1950 return false; 1958 return false;
1951} 1959}
1952 1960
1961static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1962 unsigned int sectors)
1963{
1964 return 0;
1965}
1966
1967static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1968 unsigned int sectors)
1969{
1970 return 0;
1971}
1972
1953#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1973#endif /* CONFIG_BLK_DEV_INTEGRITY */
1954 1974
1955struct block_device_operations { 1975struct block_device_operations {
1956 int (*open) (struct block_device *, fmode_t); 1976 int (*open) (struct block_device *, fmode_t);
1957 void (*release) (struct gendisk *, fmode_t); 1977 void (*release) (struct gendisk *, fmode_t);
1958 int (*rw_page)(struct block_device *, sector_t, struct page *, bool); 1978 int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
1959 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1979 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1960 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1980 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1961 unsigned int (*check_events) (struct gendisk *disk, 1981 unsigned int (*check_events) (struct gendisk *disk,
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 7942a96b1a9d..42515195d7d8 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -27,9 +27,20 @@ extern unsigned long max_pfn;
27extern unsigned long long max_possible_pfn; 27extern unsigned long long max_possible_pfn;
28 28
29#ifndef CONFIG_NO_BOOTMEM 29#ifndef CONFIG_NO_BOOTMEM
30/* 30/**
31 * node_bootmem_map is a map pointer - the bits represent all physical 31 * struct bootmem_data - per-node information used by the bootmem allocator
32 * memory pages (including holes) on the node. 32 * @node_min_pfn: the starting physical address of the node's memory
33 * @node_low_pfn: the end physical address of the directly addressable memory
34 * @node_bootmem_map: is a bitmap pointer - the bits represent all physical
35 * memory pages (including holes) on the node.
36 * @last_end_off: the offset within the page of the end of the last allocation;
37 * if 0, the page used is full
38 * @hint_idx: the PFN of the page used with the last allocation;
39 * together with using this with the @last_end_offset field,
40 * a test can be made to see if allocations can be merged
41 * with the page used for the last allocation rather than
42 * using up a full new page.
43 * @list: list entry in the linked list ordered by the memory addresses
33 */ 44 */
34typedef struct bootmem_data { 45typedef struct bootmem_data {
35 unsigned long node_min_pfn; 46 unsigned long node_min_pfn;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 975fb4cf1bb7..f91b0f8ff3a9 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -2,23 +2,48 @@
2#ifndef _BPF_CGROUP_H 2#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H 3#define _BPF_CGROUP_H
4 4
5#include <linux/errno.h>
5#include <linux/jump_label.h> 6#include <linux/jump_label.h>
7#include <linux/percpu.h>
8#include <linux/rbtree.h>
6#include <uapi/linux/bpf.h> 9#include <uapi/linux/bpf.h>
7 10
8struct sock; 11struct sock;
9struct sockaddr; 12struct sockaddr;
10struct cgroup; 13struct cgroup;
11struct sk_buff; 14struct sk_buff;
15struct bpf_map;
16struct bpf_prog;
12struct bpf_sock_ops_kern; 17struct bpf_sock_ops_kern;
18struct bpf_cgroup_storage;
13 19
14#ifdef CONFIG_CGROUP_BPF 20#ifdef CONFIG_CGROUP_BPF
15 21
16extern struct static_key_false cgroup_bpf_enabled_key; 22extern struct static_key_false cgroup_bpf_enabled_key;
17#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) 23#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
18 24
25DECLARE_PER_CPU(void*, bpf_cgroup_storage);
26
27struct bpf_cgroup_storage_map;
28
29struct bpf_storage_buffer {
30 struct rcu_head rcu;
31 char data[0];
32};
33
34struct bpf_cgroup_storage {
35 struct bpf_storage_buffer *buf;
36 struct bpf_cgroup_storage_map *map;
37 struct bpf_cgroup_storage_key key;
38 struct list_head list;
39 struct rb_node node;
40 struct rcu_head rcu;
41};
42
19struct bpf_prog_list { 43struct bpf_prog_list {
20 struct list_head node; 44 struct list_head node;
21 struct bpf_prog *prog; 45 struct bpf_prog *prog;
46 struct bpf_cgroup_storage *storage;
22}; 47};
23 48
24struct bpf_prog_array; 49struct bpf_prog_array;
@@ -76,6 +101,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
76int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 101int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
77 short access, enum bpf_attach_type type); 102 short access, enum bpf_attach_type type);
78 103
104static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
105{
106 struct bpf_storage_buffer *buf;
107
108 if (!storage)
109 return;
110
111 buf = READ_ONCE(storage->buf);
112 this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
113}
114
115struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
116void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
117void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
118 struct cgroup *cgroup,
119 enum bpf_attach_type type);
120void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
121int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
122void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
123
79/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ 124/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
80#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ 125#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
81({ \ 126({ \
@@ -188,12 +233,48 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
188 \ 233 \
189 __ret; \ 234 __ret; \
190}) 235})
236int cgroup_bpf_prog_attach(const union bpf_attr *attr,
237 enum bpf_prog_type ptype, struct bpf_prog *prog);
238int cgroup_bpf_prog_detach(const union bpf_attr *attr,
239 enum bpf_prog_type ptype);
240int cgroup_bpf_prog_query(const union bpf_attr *attr,
241 union bpf_attr __user *uattr);
191#else 242#else
192 243
244struct bpf_prog;
193struct cgroup_bpf {}; 245struct cgroup_bpf {};
194static inline void cgroup_bpf_put(struct cgroup *cgrp) {} 246static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
195static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } 247static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
196 248
249static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
250 enum bpf_prog_type ptype,
251 struct bpf_prog *prog)
252{
253 return -EINVAL;
254}
255
256static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
257 enum bpf_prog_type ptype)
258{
259 return -EINVAL;
260}
261
262static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
263 union bpf_attr __user *uattr)
264{
265 return -EINVAL;
266}
267
268static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
269static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
270 struct bpf_map *map) { return 0; }
271static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
272 struct bpf_map *map) {}
273static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
274 struct bpf_prog *prog) { return 0; }
275static inline void bpf_cgroup_storage_free(
276 struct bpf_cgroup_storage *storage) {}
277
197#define cgroup_bpf_enabled (0) 278#define cgroup_bpf_enabled (0)
198#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) 279#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
199#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) 280#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 995c3b1e59bf..523481a3471b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,7 +23,7 @@ struct bpf_prog;
23struct bpf_map; 23struct bpf_map;
24struct sock; 24struct sock;
25struct seq_file; 25struct seq_file;
26struct btf; 26struct btf_type;
27 27
28/* map is generic key/value storage optionally accesible by eBPF programs */ 28/* map is generic key/value storage optionally accesible by eBPF programs */
29struct bpf_map_ops { 29struct bpf_map_ops {
@@ -48,8 +48,9 @@ struct bpf_map_ops {
48 u32 (*map_fd_sys_lookup_elem)(void *ptr); 48 u32 (*map_fd_sys_lookup_elem)(void *ptr);
49 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 49 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
50 struct seq_file *m); 50 struct seq_file *m);
51 int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf, 51 int (*map_check_btf)(const struct bpf_map *map,
52 u32 key_type_id, u32 value_type_id); 52 const struct btf_type *key_type,
53 const struct btf_type *value_type);
53}; 54};
54 55
55struct bpf_map { 56struct bpf_map {
@@ -85,6 +86,7 @@ struct bpf_map {
85 char name[BPF_OBJ_NAME_LEN]; 86 char name[BPF_OBJ_NAME_LEN];
86}; 87};
87 88
89struct bpf_offload_dev;
88struct bpf_offloaded_map; 90struct bpf_offloaded_map;
89 91
90struct bpf_map_dev_ops { 92struct bpf_map_dev_ops {
@@ -117,9 +119,13 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
117 119
118static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 120static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
119{ 121{
120 return map->ops->map_seq_show_elem && map->ops->map_check_btf; 122 return map->btf && map->ops->map_seq_show_elem;
121} 123}
122 124
125int map_check_no_btf(const struct bpf_map *map,
126 const struct btf_type *key_type,
127 const struct btf_type *value_type);
128
123extern const struct bpf_map_ops bpf_map_offload_ops; 129extern const struct bpf_map_ops bpf_map_offload_ops;
124 130
125/* function argument constraints */ 131/* function argument constraints */
@@ -154,6 +160,7 @@ enum bpf_arg_type {
154enum bpf_return_type { 160enum bpf_return_type {
155 RET_INTEGER, /* function returns integer */ 161 RET_INTEGER, /* function returns integer */
156 RET_VOID, /* function doesn't return anything */ 162 RET_VOID, /* function doesn't return anything */
163 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
157 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 164 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
158}; 165};
159 166
@@ -281,6 +288,7 @@ struct bpf_prog_aux {
281 struct bpf_prog *prog; 288 struct bpf_prog *prog;
282 struct user_struct *user; 289 struct user_struct *user;
283 u64 load_time; /* ns since boottime */ 290 u64 load_time; /* ns since boottime */
291 struct bpf_map *cgroup_storage;
284 char name[BPF_OBJ_NAME_LEN]; 292 char name[BPF_OBJ_NAME_LEN];
285#ifdef CONFIG_SECURITY 293#ifdef CONFIG_SECURITY
286 void *security; 294 void *security;
@@ -347,12 +355,17 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
347 * The 'struct bpf_prog_array *' should only be replaced with xchg() 355 * The 'struct bpf_prog_array *' should only be replaced with xchg()
348 * since other cpus are walking the array of pointers in parallel. 356 * since other cpus are walking the array of pointers in parallel.
349 */ 357 */
358struct bpf_prog_array_item {
359 struct bpf_prog *prog;
360 struct bpf_cgroup_storage *cgroup_storage;
361};
362
350struct bpf_prog_array { 363struct bpf_prog_array {
351 struct rcu_head rcu; 364 struct rcu_head rcu;
352 struct bpf_prog *progs[0]; 365 struct bpf_prog_array_item items[0];
353}; 366};
354 367
355struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 368struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
356void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); 369void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
357int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); 370int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
358int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, 371int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
@@ -370,7 +383,8 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
370 383
371#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 384#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
372 ({ \ 385 ({ \
373 struct bpf_prog **_prog, *__prog; \ 386 struct bpf_prog_array_item *_item; \
387 struct bpf_prog *_prog; \
374 struct bpf_prog_array *_array; \ 388 struct bpf_prog_array *_array; \
375 u32 _ret = 1; \ 389 u32 _ret = 1; \
376 preempt_disable(); \ 390 preempt_disable(); \
@@ -378,10 +392,11 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
378 _array = rcu_dereference(array); \ 392 _array = rcu_dereference(array); \
379 if (unlikely(check_non_null && !_array))\ 393 if (unlikely(check_non_null && !_array))\
380 goto _out; \ 394 goto _out; \
381 _prog = _array->progs; \ 395 _item = &_array->items[0]; \
382 while ((__prog = READ_ONCE(*_prog))) { \ 396 while ((_prog = READ_ONCE(_item->prog))) { \
383 _ret &= func(__prog, ctx); \ 397 bpf_cgroup_storage_set(_item->cgroup_storage); \
384 _prog++; \ 398 _ret &= func(_prog, ctx); \
399 _item++; \
385 } \ 400 } \
386_out: \ 401_out: \
387 rcu_read_unlock(); \ 402 rcu_read_unlock(); \
@@ -434,6 +449,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
434void bpf_map_put_with_uref(struct bpf_map *map); 449void bpf_map_put_with_uref(struct bpf_map *map);
435void bpf_map_put(struct bpf_map *map); 450void bpf_map_put(struct bpf_map *map);
436int bpf_map_precharge_memlock(u32 pages); 451int bpf_map_precharge_memlock(u32 pages);
452int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
453void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
437void *bpf_map_area_alloc(size_t size, int numa_node); 454void *bpf_map_area_alloc(size_t size, int numa_node);
438void bpf_map_area_free(void *base); 455void bpf_map_area_free(void *base);
439void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 456void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
@@ -488,12 +505,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
488 505
489/* Map specifics */ 506/* Map specifics */
490struct xdp_buff; 507struct xdp_buff;
508struct sk_buff;
491 509
492struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 510struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
493void __dev_map_insert_ctx(struct bpf_map *map, u32 index); 511void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
494void __dev_map_flush(struct bpf_map *map); 512void __dev_map_flush(struct bpf_map *map);
495int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, 513int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
496 struct net_device *dev_rx); 514 struct net_device *dev_rx);
515int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
516 struct bpf_prog *xdp_prog);
497 517
498struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 518struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
499void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); 519void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
@@ -509,6 +529,7 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
509} 529}
510 530
511struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 531struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
532int array_map_alloc_check(union bpf_attr *attr);
512 533
513#else /* !CONFIG_BPF_SYSCALL */ 534#else /* !CONFIG_BPF_SYSCALL */
514static inline struct bpf_prog *bpf_prog_get(u32 ufd) 535static inline struct bpf_prog *bpf_prog_get(u32 ufd)
@@ -586,6 +607,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
586 return 0; 607 return 0;
587} 608}
588 609
610struct sk_buff;
611
612static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
613 struct sk_buff *skb,
614 struct bpf_prog *xdp_prog)
615{
616 return 0;
617}
618
589static inline 619static inline
590struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 620struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
591{ 621{
@@ -636,7 +666,15 @@ int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
636int bpf_map_offload_get_next_key(struct bpf_map *map, 666int bpf_map_offload_get_next_key(struct bpf_map *map,
637 void *key, void *next_key); 667 void *key, void *next_key);
638 668
639bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map); 669bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
670
671struct bpf_offload_dev *bpf_offload_dev_create(void);
672void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
673int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
674 struct net_device *netdev);
675void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
676 struct net_device *netdev);
677bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
640 678
641#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 679#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
642int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 680int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
@@ -684,6 +722,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
684struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); 722struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
685struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); 723struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
686int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); 724int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
725int sockmap_get_from_fd(const union bpf_attr *attr, int type,
726 struct bpf_prog *prog);
687#else 727#else
688static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) 728static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
689{ 729{
@@ -702,6 +742,12 @@ static inline int sock_map_prog(struct bpf_map *map,
702{ 742{
703 return -EOPNOTSUPP; 743 return -EOPNOTSUPP;
704} 744}
745
746static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
747 struct bpf_prog *prog)
748{
749 return -EINVAL;
750}
705#endif 751#endif
706 752
707#if defined(CONFIG_XDP_SOCKETS) 753#if defined(CONFIG_XDP_SOCKETS)
@@ -729,6 +775,33 @@ static inline void __xsk_map_flush(struct bpf_map *map)
729} 775}
730#endif 776#endif
731 777
778#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
779void bpf_sk_reuseport_detach(struct sock *sk);
780int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
781 void *value);
782int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
783 void *value, u64 map_flags);
784#else
785static inline void bpf_sk_reuseport_detach(struct sock *sk)
786{
787}
788
789#ifdef CONFIG_BPF_SYSCALL
790static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
791 void *key, void *value)
792{
793 return -EOPNOTSUPP;
794}
795
796static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
797 void *key, void *value,
798 u64 map_flags)
799{
800 return -EOPNOTSUPP;
801}
802#endif /* CONFIG_BPF_SYSCALL */
803#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
804
732/* verifier prototypes for helper functions called from eBPF programs */ 805/* verifier prototypes for helper functions called from eBPF programs */
733extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 806extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
734extern const struct bpf_func_proto bpf_map_update_elem_proto; 807extern const struct bpf_func_proto bpf_map_update_elem_proto;
@@ -748,6 +821,8 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto;
748extern const struct bpf_func_proto bpf_sock_hash_update_proto; 821extern const struct bpf_func_proto bpf_sock_hash_update_proto;
749extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 822extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
750 823
824extern const struct bpf_func_proto bpf_get_local_storage_proto;
825
751/* Shared helpers among cBPF and eBPF. */ 826/* Shared helpers among cBPF and eBPF. */
752void bpf_user_rnd_init_once(void); 827void bpf_user_rnd_init_once(void);
753u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 828u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h
index 5f8a4283092d..9d9ff755ec29 100644
--- a/include/linux/bpf_lirc.h
+++ b/include/linux/bpf_lirc.h
@@ -5,11 +5,12 @@
5#include <uapi/linux/bpf.h> 5#include <uapi/linux/bpf.h>
6 6
7#ifdef CONFIG_BPF_LIRC_MODE2 7#ifdef CONFIG_BPF_LIRC_MODE2
8int lirc_prog_attach(const union bpf_attr *attr); 8int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
9int lirc_prog_detach(const union bpf_attr *attr); 9int lirc_prog_detach(const union bpf_attr *attr);
10int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); 10int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
11#else 11#else
12static inline int lirc_prog_attach(const union bpf_attr *attr) 12static inline int lirc_prog_attach(const union bpf_attr *attr,
13 struct bpf_prog *prog)
13{ 14{
14 return -EINVAL; 15 return -EINVAL;
15} 16}
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index c5700c2d5549..cd26c090e7c0 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -29,6 +29,9 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
29#ifdef CONFIG_BPF_LIRC_MODE2 29#ifdef CONFIG_BPF_LIRC_MODE2
30BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) 30BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
31#endif 31#endif
32#ifdef CONFIG_INET
33BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport)
34#endif
32 35
33BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) 36BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
34BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) 37BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -37,6 +40,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
37#ifdef CONFIG_CGROUPS 40#ifdef CONFIG_CGROUPS
38BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops) 41BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
39#endif 42#endif
43#ifdef CONFIG_CGROUP_BPF
44BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
45#endif
40BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) 46BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
41BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) 47BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
42BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops) 48BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
@@ -57,4 +63,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
57#if defined(CONFIG_XDP_SOCKETS) 63#if defined(CONFIG_XDP_SOCKETS)
58BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops) 64BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
59#endif 65#endif
66#ifdef CONFIG_INET
67BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
68#endif
60#endif 69#endif
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
index 687b1760bb9f..f02cee0225d4 100644
--- a/include/linux/bpfilter.h
+++ b/include/linux/bpfilter.h
@@ -5,10 +5,10 @@
5#include <uapi/linux/bpfilter.h> 5#include <uapi/linux/bpfilter.h>
6 6
7struct sock; 7struct sock;
8int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval, 8int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
9 unsigned int optlen); 9 unsigned int optlen);
10int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval, 10int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
11 int *optlen); 11 int __user *optlen);
12extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, 12extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
13 char __user *optval, 13 char __user *optval,
14 unsigned int optlen, bool is_set); 14 unsigned int optlen, bool is_set);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index daa9234a9baf..949e9af8d9d6 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -45,6 +45,7 @@
45#define PHY_ID_BCM7445 0x600d8510 45#define PHY_ID_BCM7445 0x600d8510
46 46
47#define PHY_ID_BCM_CYGNUS 0xae025200 47#define PHY_ID_BCM_CYGNUS 0xae025200
48#define PHY_ID_BCM_OMEGA 0xae025100
48 49
49#define PHY_BCM_OUI_MASK 0xfffffc00 50#define PHY_BCM_OUI_MASK 0xfffffc00
50#define PHY_BCM_OUI_1 0x00206000 51#define PHY_BCM_OUI_1 0x00206000
diff --git a/include/linux/build-salt.h b/include/linux/build-salt.h
new file mode 100644
index 000000000000..bb007bd05e7a
--- /dev/null
+++ b/include/linux/build-salt.h
@@ -0,0 +1,20 @@
1#ifndef __BUILD_SALT_H
2#define __BUILD_SALT_H
3
4#include <linux/elfnote.h>
5
6#define LINUX_ELFNOTE_BUILD_SALT 0x100
7
8#ifdef __ASSEMBLER__
9
10#define BUILD_SALT \
11 ELFNOTE(Linux, LINUX_ELFNOTE_BUILD_SALT, .asciz CONFIG_BUILD_SALT)
12
13#else
14
15#define BUILD_SALT \
16 ELFNOTE32("Linux", LINUX_ELFNOTE_BUILD_SALT, CONFIG_BUILD_SALT)
17
18#endif
19
20#endif /* __BUILD_SALT_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 055aaf5ed9af..a83e1f632eb7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -143,7 +143,12 @@ u8 can_dlc2len(u8 can_dlc);
143/* map the sanitized data length to an appropriate data length code */ 143/* map the sanitized data length to an appropriate data length code */
144u8 can_len2dlc(u8 len); 144u8 can_len2dlc(u8 len);
145 145
146struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); 146struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
147 unsigned int txqs, unsigned int rxqs);
148#define alloc_candev(sizeof_priv, echo_skb_max) \
149 alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1)
150#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \
151 alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count)
147void free_candev(struct net_device *dev); 152void free_candev(struct net_device *dev);
148 153
149/* a candev safe wrapper around netdev_priv */ 154/* a candev safe wrapper around netdev_priv */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index e75dfd1f1dec..528271c60018 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/fs.h> /* not really needed, later.. */ 14#include <linux/fs.h> /* not really needed, later.. */
15#include <linux/list.h> 15#include <linux/list.h>
16#include <scsi/scsi_common.h>
16#include <uapi/linux/cdrom.h> 17#include <uapi/linux/cdrom.h>
17 18
18struct packet_command 19struct packet_command
@@ -21,7 +22,7 @@ struct packet_command
21 unsigned char *buffer; 22 unsigned char *buffer;
22 unsigned int buflen; 23 unsigned int buflen;
23 int stat; 24 int stat;
24 struct request_sense *sense; 25 struct scsi_sense_hdr *sshdr;
25 unsigned char data_direction; 26 unsigned char data_direction;
26 int quiet; 27 int quiet;
27 int timeout; 28 int timeout;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index c0e68f903011..ff20b677fb9f 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -438,6 +438,9 @@ struct cgroup {
438 /* used to store eBPF programs */ 438 /* used to store eBPF programs */
439 struct cgroup_bpf bpf; 439 struct cgroup_bpf bpf;
440 440
441 /* If there is block congestion on this cgroup. */
442 atomic_t congestion_count;
443
441 /* ids of the ancestors at each level including self */ 444 /* ids of the ancestors at each level including self */
442 int ancestor_ids[]; 445 int ancestor_ids[];
443}; 446};
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c9fdf6f57913..32c553556bbd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -554,6 +554,36 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
554} 554}
555 555
556/** 556/**
557 * cgroup_ancestor - find ancestor of cgroup
558 * @cgrp: cgroup to find ancestor of
559 * @ancestor_level: level of ancestor to find starting from root
560 *
561 * Find ancestor of cgroup at specified level starting from root if it exists
562 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
563 * @ancestor_level.
564 *
565 * This function is safe to call as long as @cgrp is accessible.
566 */
567static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
568 int ancestor_level)
569{
570 struct cgroup *ptr;
571
572 if (cgrp->level < ancestor_level)
573 return NULL;
574
575 for (ptr = cgrp;
576 ptr && ptr->level > ancestor_level;
577 ptr = cgroup_parent(ptr))
578 ;
579
580 if (ptr && ptr->level == ancestor_level)
581 return ptr;
582
583 return NULL;
584}
585
586/**
557 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry 587 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
558 * @task: the task to be tested 588 * @task: the task to be tested
559 * @ancestor: possible ancestor of @task's cgroup 589 * @ancestor: possible ancestor of @task's cgroup
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index b7cfa037e593..08b1aa70a38d 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -38,6 +38,8 @@
38#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */ 38#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
39/* parents need enable during gate/ungate, set rate and re-parent */ 39/* parents need enable during gate/ungate, set rate and re-parent */
40#define CLK_OPS_PARENT_ENABLE BIT(12) 40#define CLK_OPS_PARENT_ENABLE BIT(12)
41/* duty cycle call may be forwarded to the parent clock */
42#define CLK_DUTY_CYCLE_PARENT BIT(13)
41 43
42struct clk; 44struct clk;
43struct clk_hw; 45struct clk_hw;
@@ -67,6 +69,17 @@ struct clk_rate_request {
67}; 69};
68 70
69/** 71/**
72 * struct clk_duty - Struture encoding the duty cycle ratio of a clock
73 *
74 * @num: Numerator of the duty cycle ratio
75 * @den: Denominator of the duty cycle ratio
76 */
77struct clk_duty {
78 unsigned int num;
79 unsigned int den;
80};
81
82/**
70 * struct clk_ops - Callback operations for hardware clocks; these are to 83 * struct clk_ops - Callback operations for hardware clocks; these are to
71 * be provided by the clock implementation, and will be called by drivers 84 * be provided by the clock implementation, and will be called by drivers
72 * through the clk_* api. 85 * through the clk_* api.
@@ -169,6 +182,15 @@ struct clk_rate_request {
169 * by the second argument. Valid values for degrees are 182 * by the second argument. Valid values for degrees are
170 * 0-359. Return 0 on success, otherwise -EERROR. 183 * 0-359. Return 0 on success, otherwise -EERROR.
171 * 184 *
185 * @get_duty_cycle: Queries the hardware to get the current duty cycle ratio
186 * of a clock. Returned values denominator cannot be 0 and must be
187 * superior or equal to the numerator.
188 *
189 * @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by
190 * the numerator (2nd argurment) and denominator (3rd argument).
191 * Argument must be a valid ratio (denominator > 0
192 * and >= numerator) Return 0 on success, otherwise -EERROR.
193 *
172 * @init: Perform platform-specific initialization magic. 194 * @init: Perform platform-specific initialization magic.
173 * This is not not used by any of the basic clock types. 195 * This is not not used by any of the basic clock types.
174 * Please consider other ways of solving initialization problems 196 * Please consider other ways of solving initialization problems
@@ -218,6 +240,10 @@ struct clk_ops {
218 unsigned long parent_accuracy); 240 unsigned long parent_accuracy);
219 int (*get_phase)(struct clk_hw *hw); 241 int (*get_phase)(struct clk_hw *hw);
220 int (*set_phase)(struct clk_hw *hw, int degrees); 242 int (*set_phase)(struct clk_hw *hw, int degrees);
243 int (*get_duty_cycle)(struct clk_hw *hw,
244 struct clk_duty *duty);
245 int (*set_duty_cycle)(struct clk_hw *hw,
246 struct clk_duty *duty);
221 void (*init)(struct clk_hw *hw); 247 void (*init)(struct clk_hw *hw);
222 void (*debug_init)(struct clk_hw *hw, struct dentry *dentry); 248 void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
223}; 249};
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0dbd0885b2c2..4f750c481b82 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -142,6 +142,27 @@ int clk_set_phase(struct clk *clk, int degrees);
142int clk_get_phase(struct clk *clk); 142int clk_get_phase(struct clk *clk);
143 143
144/** 144/**
145 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
146 * @clk: clock signal source
147 * @num: numerator of the duty cycle ratio to be applied
148 * @den: denominator of the duty cycle ratio to be applied
149 *
150 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
151 * success, -EERROR otherwise.
152 */
153int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
154
155/**
156 * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
157 * @clk: clock signal source
158 * @scale: scaling factor to be applied to represent the ratio as an integer
159 *
160 * Returns the duty cycle ratio multiplied by the scale provided, otherwise
161 * returns -EERROR.
162 */
163int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
164
165/**
145 * clk_is_match - check if two clk's point to the same hardware clock 166 * clk_is_match - check if two clk's point to the same hardware clock
146 * @p: clk compared against q 167 * @p: clk compared against q
147 * @q: clk compared against p 168 * @q: clk compared against p
@@ -183,6 +204,18 @@ static inline long clk_get_phase(struct clk *clk)
183 return -ENOTSUPP; 204 return -ENOTSUPP;
184} 205}
185 206
207static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
208 unsigned int den)
209{
210 return -ENOTSUPP;
211}
212
213static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
214 unsigned int scale)
215{
216 return 0;
217}
218
186static inline bool clk_is_match(const struct clk *p, const struct clk *q) 219static inline bool clk_is_match(const struct clk *p, const struct clk *q)
187{ 220{
188 return p == q; 221 return p == q;
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 7dff1963c185..308918928767 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -194,6 +194,9 @@ extern void clocksource_suspend(void);
194extern void clocksource_resume(void); 194extern void clocksource_resume(void);
195extern struct clocksource * __init clocksource_default_clock(void); 195extern struct clocksource * __init clocksource_default_clock(void);
196extern void clocksource_mark_unstable(struct clocksource *cs); 196extern void clocksource_mark_unstable(struct clocksource *cs);
197extern void
198clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
199extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
197 200
198extern u64 201extern u64
199clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); 202clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index b1a5562b3215..1a3c4f37e908 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -72,6 +72,9 @@
72 */ 72 */
73#ifndef COMPAT_SYSCALL_DEFINEx 73#ifndef COMPAT_SYSCALL_DEFINEx
74#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ 74#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
75 __diag_push(); \
76 __diag_ignore(GCC, 8, "-Wattribute-alias", \
77 "Type aliasing is used to sanitize syscall arguments");\
75 asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ 78 asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
76 asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ 79 asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
77 __attribute__((alias(__stringify(__se_compat_sys##name)))); \ 80 __attribute__((alias(__stringify(__se_compat_sys##name)))); \
@@ -80,8 +83,11 @@
80 asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ 83 asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
81 asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ 84 asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
82 { \ 85 { \
83 return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ 86 long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
87 __MAP(x,__SC_TEST,__VA_ARGS__); \
88 return ret; \
84 } \ 89 } \
90 __diag_pop(); \
85 static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) 91 static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
86#endif /* COMPAT_SYSCALL_DEFINEx */ 92#endif /* COMPAT_SYSCALL_DEFINEx */
87 93
@@ -109,11 +115,6 @@ typedef compat_ulong_t compat_aio_context_t;
109struct compat_sel_arg_struct; 115struct compat_sel_arg_struct;
110struct rusage; 116struct rusage;
111 117
112struct compat_itimerspec {
113 struct compat_timespec it_interval;
114 struct compat_timespec it_value;
115};
116
117struct compat_utimbuf { 118struct compat_utimbuf {
118 compat_time_t actime; 119 compat_time_t actime;
119 compat_time_t modtime; 120 compat_time_t modtime;
@@ -294,10 +295,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *);
294extern int compat_put_timespec(const struct timespec *, void __user *); 295extern int compat_put_timespec(const struct timespec *, void __user *);
295extern int compat_get_timeval(struct timeval *, const void __user *); 296extern int compat_get_timeval(struct timeval *, const void __user *);
296extern int compat_put_timeval(const struct timeval *, void __user *); 297extern int compat_put_timeval(const struct timeval *, void __user *);
297extern int get_compat_itimerspec64(struct itimerspec64 *its,
298 const struct compat_itimerspec __user *uits);
299extern int put_compat_itimerspec64(const struct itimerspec64 *its,
300 struct compat_itimerspec __user *uits);
301 298
302struct compat_iovec { 299struct compat_iovec {
303 compat_uptr_t iov_base; 300 compat_uptr_t iov_base;
@@ -1022,6 +1019,17 @@ static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
1022 return ctv; 1019 return ctv;
1023} 1020}
1024 1021
1022/*
1023 * Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz())
1024 * directly. Instead, use one of the functions which work equivalently, such
1025 * as the kcompat_sys_xyzyyz() functions prototyped below.
1026 */
1027
1028int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
1029 struct compat_statfs64 __user * buf);
1030int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
1031 struct compat_statfs64 __user * buf);
1032
1025#else /* !CONFIG_COMPAT */ 1033#else /* !CONFIG_COMPAT */
1026 1034
1027#define is_compat_task() (0) 1035#define is_compat_task() (0)
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h
index 31f2774f1994..e70bfd1d2c3f 100644
--- a/include/linux/compat_time.h
+++ b/include/linux/compat_time.h
@@ -17,7 +17,16 @@ struct compat_timeval {
17 s32 tv_usec; 17 s32 tv_usec;
18}; 18};
19 19
20struct compat_itimerspec {
21 struct compat_timespec it_interval;
22 struct compat_timespec it_value;
23};
24
20extern int compat_get_timespec64(struct timespec64 *, const void __user *); 25extern int compat_get_timespec64(struct timespec64 *, const void __user *);
21extern int compat_put_timespec64(const struct timespec64 *, void __user *); 26extern int compat_put_timespec64(const struct timespec64 *, void __user *);
27extern int get_compat_itimerspec64(struct itimerspec64 *its,
28 const struct compat_itimerspec __user *uits);
29extern int put_compat_itimerspec64(const struct itimerspec64 *its,
30 struct compat_itimerspec __user *uits);
22 31
23#endif /* _LINUX_COMPAT_TIME_H */ 32#endif /* _LINUX_COMPAT_TIME_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index f1a7492a5cc8..573f5a7d42d4 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -66,25 +66,40 @@
66#endif 66#endif
67 67
68/* 68/*
69 * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
70 * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
71 * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
72 * defined so the gnu89 semantics are the default.
73 */
74#ifdef __GNUC_STDC_INLINE__
75# define __gnu_inline __attribute__((gnu_inline))
76#else
77# define __gnu_inline
78#endif
79
80/*
69 * Force always-inline if the user requests it so via the .config, 81 * Force always-inline if the user requests it so via the .config,
70 * or if gcc is too old. 82 * or if gcc is too old.
71 * GCC does not warn about unused static inline functions for 83 * GCC does not warn about unused static inline functions for
72 * -Wunused-function. This turns out to avoid the need for complex #ifdef 84 * -Wunused-function. This turns out to avoid the need for complex #ifdef
73 * directives. Suppress the warning in clang as well by using "unused" 85 * directives. Suppress the warning in clang as well by using "unused"
74 * function attribute, which is redundant but not harmful for gcc. 86 * function attribute, which is redundant but not harmful for gcc.
87 * Prefer gnu_inline, so that extern inline functions do not emit an
88 * externally visible function. This makes extern inline behave as per gnu89
89 * semantics rather than c99. This prevents multiple symbol definition errors
90 * of extern inline functions at link time.
91 * A lot of inline functions can cause havoc with function tracing.
75 */ 92 */
76#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ 93#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
77 !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) 94 !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
78#define inline inline __attribute__((always_inline,unused)) notrace 95#define inline \
79#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace 96 inline __attribute__((always_inline, unused)) notrace __gnu_inline
80#define __inline __inline __attribute__((always_inline,unused)) notrace
81#else 97#else
82/* A lot of inline functions can cause havoc with function tracing */ 98#define inline inline __attribute__((unused)) notrace __gnu_inline
83#define inline inline __attribute__((unused)) notrace
84#define __inline__ __inline__ __attribute__((unused)) notrace
85#define __inline __inline __attribute__((unused)) notrace
86#endif 99#endif
87 100
101#define __inline__ inline
102#define __inline inline
88#define __always_inline inline __attribute__((always_inline)) 103#define __always_inline inline __attribute__((always_inline))
89#define noinline __attribute__((noinline)) 104#define noinline __attribute__((noinline))
90 105
@@ -347,3 +362,28 @@
347#if GCC_VERSION >= 50100 362#if GCC_VERSION >= 50100
348#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 363#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
349#endif 364#endif
365
366/*
367 * Turn individual warnings and errors on and off locally, depending
368 * on version.
369 */
370#define __diag_GCC(version, severity, s) \
371 __diag_GCC_ ## version(__diag_GCC_ ## severity s)
372
373/* Severity used in pragma directives */
374#define __diag_GCC_ignore ignored
375#define __diag_GCC_warn warning
376#define __diag_GCC_error error
377
378/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
379#if GCC_VERSION >= 40600
380#define __diag_str1(s) #s
381#define __diag_str(s) __diag_str1(s)
382#define __diag(s) _Pragma(__diag_str(GCC diagnostic s))
383#endif
384
385#if GCC_VERSION >= 80000
386#define __diag_GCC_8(s) __diag(s)
387#else
388#define __diag_GCC_8(s)
389#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 6b79a9bba9a7..a8ba6b04152c 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -271,4 +271,22 @@ struct ftrace_likely_data {
271# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) 271# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
272#endif 272#endif
273 273
274#ifndef __diag
275#define __diag(string)
276#endif
277
278#ifndef __diag_GCC
279#define __diag_GCC(version, severity, string)
280#endif
281
282#define __diag_push() __diag(push)
283#define __diag_pop() __diag(pop)
284
285#define __diag_ignore(compiler, version, option, comment) \
286 __diag_ ## compiler(version, ignore, option)
287#define __diag_warn(compiler, version, option, comment) \
288 __diag_ ## compiler(version, warn, option)
289#define __diag_error(compiler, version, option, comment) \
290 __diag_ ## compiler(version, error, option)
291
274#endif /* __LINUX_COMPILER_TYPES_H */ 292#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/console.h b/include/linux/console.h
index dfd6b0e97855..f59f3dbca65c 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -21,6 +21,7 @@ struct console_font_op;
21struct console_font; 21struct console_font;
22struct module; 22struct module;
23struct tty_struct; 23struct tty_struct;
24struct notifier_block;
24 25
25/* 26/*
26 * this is what the terminal answers to a ESC-Z or csi0c query. 27 * this is what the terminal answers to a ESC-Z or csi0c query.
@@ -220,4 +221,8 @@ static inline bool vgacon_text_force(void) { return false; }
220 221
221extern void console_init(void); 222extern void console_init(void);
222 223
224/* For deferred console takeover */
225void dummycon_register_output_notifier(struct notifier_block *nb);
226void dummycon_unregister_output_notifier(struct notifier_block *nb);
227
223#endif /* _LINUX_CONSOLE_H */ 228#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a97a63eef59f..218df7f4d3e1 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -30,7 +30,7 @@ struct cpu {
30}; 30};
31 31
32extern void boot_cpu_init(void); 32extern void boot_cpu_init(void);
33extern void boot_cpu_state_init(void); 33extern void boot_cpu_hotplug_init(void);
34extern void cpu_init(void); 34extern void cpu_init(void);
35extern void trap_init(void); 35extern void trap_init(void);
36 36
@@ -55,6 +55,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev,
55 struct device_attribute *attr, char *buf); 55 struct device_attribute *attr, char *buf);
56extern ssize_t cpu_show_spec_store_bypass(struct device *dev, 56extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
57 struct device_attribute *attr, char *buf); 57 struct device_attribute *attr, char *buf);
58extern ssize_t cpu_show_l1tf(struct device *dev,
59 struct device_attribute *attr, char *buf);
58 60
59extern __printf(4, 5) 61extern __printf(4, 5)
60struct device *cpu_device_create(struct device *parent, void *drvdata, 62struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -103,6 +105,7 @@ extern void cpus_write_lock(void);
103extern void cpus_write_unlock(void); 105extern void cpus_write_unlock(void);
104extern void cpus_read_lock(void); 106extern void cpus_read_lock(void);
105extern void cpus_read_unlock(void); 107extern void cpus_read_unlock(void);
108extern int cpus_read_trylock(void);
106extern void lockdep_assert_cpus_held(void); 109extern void lockdep_assert_cpus_held(void);
107extern void cpu_hotplug_disable(void); 110extern void cpu_hotplug_disable(void);
108extern void cpu_hotplug_enable(void); 111extern void cpu_hotplug_enable(void);
@@ -115,6 +118,7 @@ static inline void cpus_write_lock(void) { }
115static inline void cpus_write_unlock(void) { } 118static inline void cpus_write_unlock(void) { }
116static inline void cpus_read_lock(void) { } 119static inline void cpus_read_lock(void) { }
117static inline void cpus_read_unlock(void) { } 120static inline void cpus_read_unlock(void) { }
121static inline int cpus_read_trylock(void) { return true; }
118static inline void lockdep_assert_cpus_held(void) { } 122static inline void lockdep_assert_cpus_held(void) { }
119static inline void cpu_hotplug_disable(void) { } 123static inline void cpu_hotplug_disable(void) { }
120static inline void cpu_hotplug_enable(void) { } 124static inline void cpu_hotplug_enable(void) { }
@@ -166,4 +170,23 @@ void cpuhp_report_idle_dead(void);
166static inline void cpuhp_report_idle_dead(void) { } 170static inline void cpuhp_report_idle_dead(void) { }
167#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 171#endif /* #ifdef CONFIG_HOTPLUG_CPU */
168 172
173enum cpuhp_smt_control {
174 CPU_SMT_ENABLED,
175 CPU_SMT_DISABLED,
176 CPU_SMT_FORCE_DISABLED,
177 CPU_SMT_NOT_SUPPORTED,
178};
179
180#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
181extern enum cpuhp_smt_control cpu_smt_control;
182extern void cpu_smt_disable(bool force);
183extern void cpu_smt_check_topology_early(void);
184extern void cpu_smt_check_topology(void);
185#else
186# define cpu_smt_control (CPU_SMT_ENABLED)
187static inline void cpu_smt_disable(bool force) { }
188static inline void cpu_smt_check_topology_early(void) { }
189static inline void cpu_smt_check_topology(void) { }
190#endif
191
169#endif /* _LINUX_CPU_H_ */ 192#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 8796ba387152..4cf06a64bc02 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -164,6 +164,7 @@ enum cpuhp_state {
164 CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, 164 CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
165 CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, 165 CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
166 CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, 166 CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
167 CPUHP_AP_WATCHDOG_ONLINE,
167 CPUHP_AP_WORKQUEUE_ONLINE, 168 CPUHP_AP_WORKQUEUE_ONLINE,
168 CPUHP_AP_RCUTREE_ONLINE, 169 CPUHP_AP_RCUTREE_ONLINE,
169 CPUHP_AP_ONLINE_DYN, 170 CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bf53d893ad02..147bdec42215 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -115,12 +115,17 @@ extern struct cpumask __cpu_active_mask;
115#define cpu_active(cpu) ((cpu) == 0) 115#define cpu_active(cpu) ((cpu) == 0)
116#endif 116#endif
117 117
118/* verify cpu argument to cpumask_* operators */ 118static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
119static inline unsigned int cpumask_check(unsigned int cpu)
120{ 119{
121#ifdef CONFIG_DEBUG_PER_CPU_MAPS 120#ifdef CONFIG_DEBUG_PER_CPU_MAPS
122 WARN_ON_ONCE(cpu >= nr_cpumask_bits); 121 WARN_ON_ONCE(cpu >= bits);
123#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ 122#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
123}
124
125/* verify cpu argument to cpumask_* operators */
126static inline unsigned int cpumask_check(unsigned int cpu)
127{
128 cpu_max_bits_warn(cpu, nr_cpumask_bits);
124 return cpu; 129 return cpu;
125} 130}
126 131
@@ -154,6 +159,13 @@ static inline unsigned int cpumask_next_and(int n,
154 return n+1; 159 return n+1;
155} 160}
156 161
162static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
163 int start, bool wrap)
164{
165 /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
166 return (wrap && n == 0);
167}
168
157/* cpu must be a valid cpu, ie 0, so there's no other choice. */ 169/* cpu must be a valid cpu, ie 0, so there's no other choice. */
158static inline unsigned int cpumask_any_but(const struct cpumask *mask, 170static inline unsigned int cpumask_any_but(const struct cpumask *mask,
159 unsigned int cpu) 171 unsigned int cpu)
diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
new file mode 100644
index 000000000000..62c4b7790a28
--- /dev/null
+++ b/include/linux/crc32poly.h
@@ -0,0 +1,20 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CRC32_POLY_H
3#define _LINUX_CRC32_POLY_H
4
5/*
6 * There are multiple 16-bit CRC polynomials in common use, but this is
7 * *the* standard CRC-32 polynomial, first popularized by Ethernet.
8 * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
9 */
10#define CRC32_POLY_LE 0xedb88320
11#define CRC32_POLY_BE 0x04c11db7
12
13/*
14 * This is the CRC32c polynomial, as outlined by Castagnoli.
15 * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
16 * x^8+x^6+x^0
17 */
18#define CRC32C_POLY_LE 0x82F63B78
19
20#endif /* _LINUX_CRC32_POLY_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 631286535d0f..7eed6101c791 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -65,6 +65,12 @@ extern void groups_free(struct group_info *);
65 65
66extern int in_group_p(kgid_t); 66extern int in_group_p(kgid_t);
67extern int in_egroup_p(kgid_t); 67extern int in_egroup_p(kgid_t);
68extern int groups_search(const struct group_info *, kgid_t);
69
70extern int set_current_groups(struct group_info *);
71extern void set_groups(struct cred *, struct group_info *);
72extern bool may_setgroups(void);
73extern void groups_sort(struct group_info *);
68#else 74#else
69static inline void groups_free(struct group_info *group_info) 75static inline void groups_free(struct group_info *group_info)
70{ 76{
@@ -78,12 +84,11 @@ static inline int in_egroup_p(kgid_t grp)
78{ 84{
79 return 1; 85 return 1;
80} 86}
87static inline int groups_search(const struct group_info *group_info, kgid_t grp)
88{
89 return 1;
90}
81#endif 91#endif
82extern int set_current_groups(struct group_info *);
83extern void set_groups(struct cred *, struct group_info *);
84extern int groups_search(const struct group_info *, kgid_t);
85extern bool may_setgroups(void);
86extern void groups_sort(struct group_info *);
87 92
88/* 93/*
89 * The security context of a task 94 * The security context of a task
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6eb06101089f..e8839d3a7559 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -113,6 +113,11 @@
113#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 113#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
114 114
115/* 115/*
116 * Don't trigger module loading
117 */
118#define CRYPTO_NOLOAD 0x00008000
119
120/*
116 * Transform masks and values (for crt_flags). 121 * Transform masks and values (for crt_flags).
117 */ 122 */
118#define CRYPTO_TFM_NEED_KEY 0x00000001 123#define CRYPTO_TFM_NEED_KEY 0x00000001
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 3855e3800f48..deb0f663252f 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
135 135
136ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 136ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
137 const struct iomap_ops *ops); 137 const struct iomap_ops *ops);
138int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 138vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
139 pfn_t *pfnp, int *errp, const struct iomap_ops *ops); 139 pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
140vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 140vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
141 enum page_entry_size pe_size, pfn_t pfn); 141 enum page_entry_size pe_size, pfn_t pfn);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 66c6e17e61e5..d32957b423d5 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -227,7 +227,6 @@ extern void d_instantiate(struct dentry *, struct inode *);
227extern void d_instantiate_new(struct dentry *, struct inode *); 227extern void d_instantiate_new(struct dentry *, struct inode *);
228extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); 228extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
229extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); 229extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
230extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
231extern void __d_drop(struct dentry *dentry); 230extern void __d_drop(struct dentry *dentry);
232extern void d_drop(struct dentry *dentry); 231extern void d_drop(struct dentry *dentry);
233extern void d_delete(struct dentry *); 232extern void d_delete(struct dentry *);
@@ -271,8 +270,6 @@ extern void d_rehash(struct dentry *);
271 270
272extern void d_add(struct dentry *, struct inode *); 271extern void d_add(struct dentry *, struct inode *);
273 272
274extern void dentry_update_name_case(struct dentry *, const struct qstr *);
275
276/* used for rename() and baskets */ 273/* used for rename() and baskets */
277extern void d_move(struct dentry *, struct dentry *); 274extern void d_move(struct dentry *, struct dentry *);
278extern void d_exchange(struct dentry *, struct dentry *); 275extern void d_exchange(struct dentry *, struct dentry *);
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index e6c0448ebcc7..31c865d1842e 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
124 124
125static inline void delayacct_blkio_end(struct task_struct *p) 125static inline void delayacct_blkio_end(struct task_struct *p)
126{ 126{
127 if (current->delays) 127 if (p->delays)
128 __delayacct_blkio_end(p); 128 __delayacct_blkio_end(p);
129 delayacct_clear_flag(DELAYACCT_PF_BLKIO); 129 delayacct_clear_flag(DELAYACCT_PF_BLKIO);
130} 130}
diff --git a/include/linux/device.h b/include/linux/device.h
index 055a69dbcd18..2a562f4ded07 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -90,7 +90,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
90 * @num_vf: Called to find out how many virtual functions a device on this 90 * @num_vf: Called to find out how many virtual functions a device on this
91 * bus supports. 91 * bus supports.
92 * @dma_configure: Called to setup DMA configuration on a device on 92 * @dma_configure: Called to setup DMA configuration on a device on
93 this bus. 93 * this bus.
94 * @pm: Power management operations of this bus, callback the specific 94 * @pm: Power management operations of this bus, callback the specific
95 * device driver's pm-ops. 95 * device driver's pm-ops.
96 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU 96 * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -384,6 +384,9 @@ int subsys_virtual_register(struct bus_type *subsys,
384 * @shutdown_pre: Called at shut-down time before driver shutdown. 384 * @shutdown_pre: Called at shut-down time before driver shutdown.
385 * @ns_type: Callbacks so sysfs can detemine namespaces. 385 * @ns_type: Callbacks so sysfs can detemine namespaces.
386 * @namespace: Namespace of the device belongs to this class. 386 * @namespace: Namespace of the device belongs to this class.
387 * @get_ownership: Allows class to specify uid/gid of the sysfs directories
388 * for the devices belonging to the class. Usually tied to
389 * device's namespace.
387 * @pm: The default device power management operations of this class. 390 * @pm: The default device power management operations of this class.
388 * @p: The private data of the driver core, no one other than the 391 * @p: The private data of the driver core, no one other than the
389 * driver core can touch this. 392 * driver core can touch this.
@@ -413,6 +416,8 @@ struct class {
413 const struct kobj_ns_type_operations *ns_type; 416 const struct kobj_ns_type_operations *ns_type;
414 const void *(*namespace)(struct device *dev); 417 const void *(*namespace)(struct device *dev);
415 418
419 void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
420
416 const struct dev_pm_ops *pm; 421 const struct dev_pm_ops *pm;
417 422
418 struct subsys_private *p; 423 struct subsys_private *p;
@@ -784,14 +789,16 @@ enum device_link_state {
784 * Device link flags. 789 * Device link flags.
785 * 790 *
786 * STATELESS: The core won't track the presence of supplier/consumer drivers. 791 * STATELESS: The core won't track the presence of supplier/consumer drivers.
787 * AUTOREMOVE: Remove this link automatically on consumer driver unbind. 792 * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
788 * PM_RUNTIME: If set, the runtime PM framework will use this link. 793 * PM_RUNTIME: If set, the runtime PM framework will use this link.
789 * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. 794 * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
795 * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
790 */ 796 */
791#define DL_FLAG_STATELESS BIT(0) 797#define DL_FLAG_STATELESS BIT(0)
792#define DL_FLAG_AUTOREMOVE BIT(1) 798#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
793#define DL_FLAG_PM_RUNTIME BIT(2) 799#define DL_FLAG_PM_RUNTIME BIT(2)
794#define DL_FLAG_RPM_ACTIVE BIT(3) 800#define DL_FLAG_RPM_ACTIVE BIT(3)
801#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
795 802
796/** 803/**
797 * struct device_link - Device link representation. 804 * struct device_link - Device link representation.
@@ -886,6 +893,8 @@ struct dev_links_info {
886 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all 893 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
887 * hardware supports 64-bit addresses for consistent allocations 894 * hardware supports 64-bit addresses for consistent allocations
888 * such descriptors. 895 * such descriptors.
896 * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
897 * limit than the device itself supports.
889 * @dma_pfn_offset: offset of DMA memory range relatively of RAM 898 * @dma_pfn_offset: offset of DMA memory range relatively of RAM
890 * @dma_parms: A low level driver may set these to teach IOMMU code about 899 * @dma_parms: A low level driver may set these to teach IOMMU code about
891 * segment limitations. 900 * segment limitations.
@@ -912,8 +921,6 @@ struct dev_links_info {
912 * @offline: Set after successful invocation of bus type's .offline(). 921 * @offline: Set after successful invocation of bus type's .offline().
913 * @of_node_reused: Set if the device-tree node is shared with an ancestor 922 * @of_node_reused: Set if the device-tree node is shared with an ancestor
914 * device. 923 * device.
915 * @dma_32bit_limit: bridge limited to 32bit DMA even if the device itself
916 * indicates support for a higher limit in the dma_mask field.
917 * 924 *
918 * At the lowest level, every device in a Linux system is represented by an 925 * At the lowest level, every device in a Linux system is represented by an
919 * instance of struct device. The device structure contains the information 926 * instance of struct device. The device structure contains the information
@@ -967,6 +974,7 @@ struct device {
967 not all hardware supports 974 not all hardware supports
968 64 bit addresses for consistent 975 64 bit addresses for consistent
969 allocations such descriptors. */ 976 allocations such descriptors. */
977 u64 bus_dma_mask; /* upstream dma_mask constraint */
970 unsigned long dma_pfn_offset; 978 unsigned long dma_pfn_offset;
971 979
972 struct device_dma_parameters *dma_parms; 980 struct device_dma_parameters *dma_parms;
@@ -1002,7 +1010,6 @@ struct device {
1002 bool offline_disabled:1; 1010 bool offline_disabled:1;
1003 bool offline:1; 1011 bool offline:1;
1004 bool of_node_reused:1; 1012 bool of_node_reused:1;
1005 bool dma_32bit_limit:1;
1006}; 1013};
1007 1014
1008static inline struct device *kobj_to_dev(struct kobject *kobj) 1015static inline struct device *kobj_to_dev(struct kobject *kobj)
@@ -1316,6 +1323,7 @@ extern const char *dev_driver_string(const struct device *dev);
1316struct device_link *device_link_add(struct device *consumer, 1323struct device_link *device_link_add(struct device *consumer,
1317 struct device *supplier, u32 flags); 1324 struct device *supplier, u32 flags);
1318void device_link_del(struct device_link *link); 1325void device_link_del(struct device_link *link);
1326void device_link_remove(void *consumer, struct device *supplier);
1319 1327
1320#ifdef CONFIG_PRINTK 1328#ifdef CONFIG_PRINTK
1321 1329
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 085db2fee2d7..58725f890b5b 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -39,12 +39,12 @@ struct dma_buf_attachment;
39 39
40/** 40/**
41 * struct dma_buf_ops - operations possible on struct dma_buf 41 * struct dma_buf_ops - operations possible on struct dma_buf
42 * @map_atomic: maps a page from the buffer into kernel address 42 * @map_atomic: [optional] maps a page from the buffer into kernel address
43 * space, users may not block until the subsequent unmap call. 43 * space, users may not block until the subsequent unmap call.
44 * This callback must not sleep. 44 * This callback must not sleep.
45 * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. 45 * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
46 * This Callback must not sleep. 46 * This Callback must not sleep.
47 * @map: maps a page from the buffer into kernel address space. 47 * @map: [optional] maps a page from the buffer into kernel address space.
48 * @unmap: [optional] unmaps a page from the buffer. 48 * @unmap: [optional] unmaps a page from the buffer.
49 * @vmap: [optional] creates a virtual mapping for the buffer into kernel 49 * @vmap: [optional] creates a virtual mapping for the buffer into kernel
50 * address space. Same restrictions as for vmap and friends apply. 50 * address space. Same restrictions as for vmap and friends apply.
@@ -55,11 +55,11 @@ struct dma_buf_ops {
55 * @attach: 55 * @attach:
56 * 56 *
57 * This is called from dma_buf_attach() to make sure that a given 57 * This is called from dma_buf_attach() to make sure that a given
58 * &device can access the provided &dma_buf. Exporters which support 58 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
59 * buffer objects in special locations like VRAM or device-specific 59 * which support buffer objects in special locations like VRAM or
60 * carveout areas should check whether the buffer could be move to 60 * device-specific carveout areas should check whether the buffer could
61 * system memory (or directly accessed by the provided device), and 61 * be move to system memory (or directly accessed by the provided
62 * otherwise need to fail the attach operation. 62 * device), and otherwise need to fail the attach operation.
63 * 63 *
64 * The exporter should also in general check whether the current 64 * The exporter should also in general check whether the current
65 * allocation fullfills the DMA constraints of the new device. If this 65 * allocation fullfills the DMA constraints of the new device. If this
@@ -77,8 +77,7 @@ struct dma_buf_ops {
77 * to signal that backing storage is already allocated and incompatible 77 * to signal that backing storage is already allocated and incompatible
78 * with the requirements of requesting device. 78 * with the requirements of requesting device.
79 */ 79 */
80 int (*attach)(struct dma_buf *, struct device *, 80 int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
81 struct dma_buf_attachment *);
82 81
83 /** 82 /**
84 * @detach: 83 * @detach:
@@ -206,8 +205,6 @@ struct dma_buf_ops {
206 * to be restarted. 205 * to be restarted.
207 */ 206 */
208 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 207 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
209 void *(*map_atomic)(struct dma_buf *, unsigned long);
210 void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
211 void *(*map)(struct dma_buf *, unsigned long); 208 void *(*map)(struct dma_buf *, unsigned long);
212 void (*unmap)(struct dma_buf *, unsigned long, void *); 209 void (*unmap)(struct dma_buf *, unsigned long, void *);
213 210
@@ -395,8 +392,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
395 enum dma_data_direction dir); 392 enum dma_data_direction dir);
396int dma_buf_end_cpu_access(struct dma_buf *dma_buf, 393int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
397 enum dma_data_direction dir); 394 enum dma_data_direction dir);
398void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
399void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
400void *dma_buf_kmap(struct dma_buf *, unsigned long); 395void *dma_buf_kmap(struct dma_buf *, unsigned long);
401void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); 396void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
402 397
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index b67bf6ac907d..3c5a4cb3eb95 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -48,7 +48,7 @@
48 * CMA should not be used by the device drivers directly. It is 48 * CMA should not be used by the device drivers directly. It is
49 * only a helper framework for dma-mapping subsystem. 49 * only a helper framework for dma-mapping subsystem.
50 * 50 *
51 * For more information, see kernel-docs in drivers/base/dma-contiguous.c 51 * For more information, see kernel-docs in kernel/dma/contiguous.c
52 */ 52 */
53 53
54#ifdef __KERNEL__ 54#ifdef __KERNEL__
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index eb9b05aa5aea..02dba8cd033d 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -166,7 +166,8 @@ struct dma_fence_ops {
166 * released when the fence is signalled (through e.g. the interrupt 166 * released when the fence is signalled (through e.g. the interrupt
167 * handler). 167 * handler).
168 * 168 *
169 * This callback is mandatory. 169 * This callback is optional. If this callback is not present, then the
170 * driver must always have signaling enabled.
170 */ 171 */
171 bool (*enable_signaling)(struct dma_fence *fence); 172 bool (*enable_signaling)(struct dma_fence *fence);
172 173
@@ -190,11 +191,14 @@ struct dma_fence_ops {
190 /** 191 /**
191 * @wait: 192 * @wait:
192 * 193 *
193 * Custom wait implementation, or dma_fence_default_wait. 194 * Custom wait implementation, defaults to dma_fence_default_wait() if
195 * not set.
194 * 196 *
195 * Must not be NULL, set to dma_fence_default_wait for default implementation. 197 * The dma_fence_default_wait implementation should work for any fence, as long
196 * the dma_fence_default_wait implementation should work for any fence, as long 198 * as @enable_signaling works correctly. This hook allows drivers to
197 * as enable_signaling works correctly. 199 * have an optimized version for the case where a process context is
200 * already available, e.g. if @enable_signaling for the general case
201 * needs to set up a worker thread.
198 * 202 *
199 * Must return -ERESTARTSYS if the wait is intr = true and the wait was 203 * Must return -ERESTARTSYS if the wait is intr = true and the wait was
200 * interrupted, and remaining jiffies if fence has signaled, or 0 if wait 204 * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
@@ -202,7 +206,7 @@ struct dma_fence_ops {
202 * which should be treated as if the fence is signaled. For example a hardware 206 * which should be treated as if the fence is signaled. For example a hardware
203 * lockup could be reported like that. 207 * lockup could be reported like that.
204 * 208 *
205 * This callback is mandatory. 209 * This callback is optional.
206 */ 210 */
207 signed long (*wait)(struct dma_fence *fence, 211 signed long (*wait)(struct dma_fence *fence,
208 bool intr, signed long timeout); 212 bool intr, signed long timeout);
@@ -218,17 +222,6 @@ struct dma_fence_ops {
218 void (*release)(struct dma_fence *fence); 222 void (*release)(struct dma_fence *fence);
219 223
220 /** 224 /**
221 * @fill_driver_data:
222 *
223 * Callback to fill in free-form debug info.
224 *
225 * Returns amount of bytes filled, or negative error on failure.
226 *
227 * This callback is optional.
228 */
229 int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
230
231 /**
232 * @fence_value_str: 225 * @fence_value_str:
233 * 226 *
234 * Callback to fill in free-form debug info specific to this fence, like 227 * Callback to fill in free-form debug info specific to this fence, like
@@ -242,8 +235,9 @@ struct dma_fence_ops {
242 * @timeline_value_str: 235 * @timeline_value_str:
243 * 236 *
244 * Fills in the current value of the timeline as a string, like the 237 * Fills in the current value of the timeline as a string, like the
245 * sequence number. This should match what @fill_driver_data prints for 238 * sequence number. Note that the specific fence passed to this function
246 * the most recently signalled fence (assuming no delayed signalling). 239 * should not matter, drivers should only use it to look up the
240 * corresponding timeline structures.
247 */ 241 */
248 void (*timeline_value_str)(struct dma_fence *fence, 242 void (*timeline_value_str)(struct dma_fence *fence,
249 char *str, int size); 243 char *str, int size);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f9cc309507d9..1db6a6b46d0d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -538,10 +538,17 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
538 const struct dma_map_ops *ops = get_dma_ops(dev); 538 const struct dma_map_ops *ops = get_dma_ops(dev);
539 539
540 BUG_ON(!ops); 540 BUG_ON(!ops);
541 WARN_ON(irqs_disabled());
542 541
543 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) 542 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
544 return; 543 return;
544 /*
545 * On non-coherent platforms which implement DMA-coherent buffers via
546 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
547 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
548 * sleep on some machines, and b) an indication that the driver is
549 * probably misusing the coherent API anyway.
550 */
551 WARN_ON(irqs_disabled());
545 552
546 if (!ops->free || !cpu_addr) 553 if (!ops->free || !cpu_addr)
547 return; 554 return;
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 10b2654d549b..a0aa00cc909d 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -44,4 +44,12 @@ static inline void arch_sync_dma_for_cpu(struct device *dev,
44} 44}
45#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ 45#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
46 46
47#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
48void arch_sync_dma_for_cpu_all(struct device *dev);
49#else
50static inline void arch_sync_dma_for_cpu_all(struct device *dev)
51{
52}
53#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
54
47#endif /* _LINUX_DMA_NONCOHERENT_H */ 55#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h
index e56ec7af4fd7..9fc594f69eff 100644
--- a/include/linux/dma/pxa-dma.h
+++ b/include/linux/dma/pxa-dma.h
@@ -9,6 +9,15 @@ enum pxad_chan_prio {
9 PXAD_PRIO_LOWEST, 9 PXAD_PRIO_LOWEST,
10}; 10};
11 11
12/**
13 * struct pxad_param - dma channel request parameters
14 * @drcmr: requestor line number
15 * @prio: minimal mandatory priority of the channel
16 *
17 * If a requested channel is granted, its priority will be at least @prio,
18 * ie. if PXAD_PRIO_LOW is required, the requested channel will be either
19 * PXAD_PRIO_LOW, PXAD_PRIO_NORMAL or PXAD_PRIO_HIGHEST.
20 */
12struct pxad_param { 21struct pxad_param {
13 unsigned int drcmr; 22 unsigned int drcmr;
14 enum pxad_chan_prio prio; 23 enum pxad_chan_prio prio;
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 56add823f190..401e4b254e30 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -894,6 +894,16 @@ typedef struct _efi_file_handle {
894 void *flush; 894 void *flush;
895} efi_file_handle_t; 895} efi_file_handle_t;
896 896
897typedef struct {
898 u64 revision;
899 u32 open_volume;
900} efi_file_io_interface_32_t;
901
902typedef struct {
903 u64 revision;
904 u64 open_volume;
905} efi_file_io_interface_64_t;
906
897typedef struct _efi_file_io_interface { 907typedef struct _efi_file_io_interface {
898 u64 revision; 908 u64 revision;
899 int (*open_volume)(struct _efi_file_io_interface *, 909 int (*open_volume)(struct _efi_file_io_interface *,
@@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
988extern void efi_gettimeofday (struct timespec64 *ts); 998extern void efi_gettimeofday (struct timespec64 *ts);
989extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ 999extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
990#ifdef CONFIG_X86 1000#ifdef CONFIG_X86
991extern void efi_late_init(void);
992extern void efi_free_boot_services(void); 1001extern void efi_free_boot_services(void);
993extern efi_status_t efi_query_variable_store(u32 attributes, 1002extern efi_status_t efi_query_variable_store(u32 attributes,
994 unsigned long size, 1003 unsigned long size,
995 bool nonblocking); 1004 bool nonblocking);
996extern void efi_find_mirror(void); 1005extern void efi_find_mirror(void);
997#else 1006#else
998static inline void efi_late_init(void) {}
999static inline void efi_free_boot_services(void) {} 1007static inline void efi_free_boot_services(void) {}
1000 1008
1001static inline efi_status_t efi_query_variable_store(u32 attributes, 1009static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog {
1651 1659
1652extern int efi_tpm_eventlog_init(void); 1660extern int efi_tpm_eventlog_init(void);
1653 1661
1662/* Workqueue to queue EFI Runtime Services */
1663extern struct workqueue_struct *efi_rts_wq;
1664
1654#endif /* _LINUX_EFI_H */ 1665#endif /* _LINUX_EFI_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 79563840c295..572e11bb8696 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -59,8 +59,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
59 unsigned int rxqs); 59 unsigned int rxqs);
60#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1) 60#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
61 61
62struct sk_buff **eth_gro_receive(struct sk_buff **head, 62struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
63 struct sk_buff *skb);
64int eth_gro_complete(struct sk_buff *skb, int nhoff); 63int eth_gro_complete(struct sk_buff *skb, int nhoff);
65 64
66/* Reserved Ethernet Addresses per IEEE 802.1Q */ 65/* Reserved Ethernet Addresses per IEEE 802.1Q */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 7094718b653b..ffcc7724ca21 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/fcntl.h> 12#include <linux/fcntl.h>
13#include <linux/wait.h> 13#include <linux/wait.h>
14#include <linux/err.h>
14 15
15/* 16/*
16 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining 17 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
diff --git a/include/linux/file.h b/include/linux/file.h
index 279720db984a..6b2fb032416c 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -17,9 +17,12 @@ extern void fput(struct file *);
17struct file_operations; 17struct file_operations;
18struct vfsmount; 18struct vfsmount;
19struct dentry; 19struct dentry;
20struct inode;
20struct path; 21struct path;
21extern struct file *alloc_file(const struct path *, fmode_t mode, 22extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
22 const struct file_operations *fop); 23 const char *, int flags, const struct file_operations *);
24extern struct file *alloc_file_clone(struct file *, int flags,
25 const struct file_operations *);
23 26
24static inline void fput_light(struct file *file, int fput_needed) 27static inline void fput_light(struct file *file, int fput_needed)
25{ 28{
@@ -78,7 +81,6 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
78extern int replace_fd(unsigned fd, struct file *file, unsigned flags); 81extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
79extern void set_close_on_exec(unsigned int fd, int flag); 82extern void set_close_on_exec(unsigned int fd, int flag);
80extern bool get_close_on_exec(unsigned int fd); 83extern bool get_close_on_exec(unsigned int fd);
81extern void put_filp(struct file *);
82extern int get_unused_fd_flags(unsigned flags); 84extern int get_unused_fd_flags(unsigned flags);
83extern void put_unused_fd(unsigned int fd); 85extern void put_unused_fd(unsigned int fd);
84 86
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 45fc0f5000d8..5d565c50bcb2 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -19,6 +19,7 @@
19#include <linux/cryptohash.h> 19#include <linux/cryptohash.h>
20#include <linux/set_memory.h> 20#include <linux/set_memory.h>
21#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
22#include <linux/if_vlan.h>
22 23
23#include <net/sch_generic.h> 24#include <net/sch_generic.h>
24 25
@@ -31,6 +32,7 @@ struct seccomp_data;
31struct bpf_prog_aux; 32struct bpf_prog_aux;
32struct xdp_rxq_info; 33struct xdp_rxq_info;
33struct xdp_buff; 34struct xdp_buff;
35struct sock_reuseport;
34 36
35/* ArgX, context and stack frame pointer register positions. Note, 37/* ArgX, context and stack frame pointer register positions. Note,
36 * Arg1, Arg2, Arg3, etc are used as argument mappings of function 38 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -469,15 +471,16 @@ struct sock_fprog_kern {
469}; 471};
470 472
471struct bpf_binary_header { 473struct bpf_binary_header {
472 unsigned int pages; 474 u32 pages;
473 u8 image[]; 475 /* Some arches need word alignment for their instructions */
476 u8 image[] __aligned(4);
474}; 477};
475 478
476struct bpf_prog { 479struct bpf_prog {
477 u16 pages; /* Number of allocated pages */ 480 u16 pages; /* Number of allocated pages */
478 u16 jited:1, /* Is our filter JIT'ed? */ 481 u16 jited:1, /* Is our filter JIT'ed? */
479 jit_requested:1,/* archs need to JIT the prog */ 482 jit_requested:1,/* archs need to JIT the prog */
480 locked:1, /* Program image locked? */ 483 undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
481 gpl_compatible:1, /* Is filter GPL compatible? */ 484 gpl_compatible:1, /* Is filter GPL compatible? */
482 cb_access:1, /* Is control block accessed? */ 485 cb_access:1, /* Is control block accessed? */
483 dst_needed:1, /* Do we need dst entry? */ 486 dst_needed:1, /* Do we need dst entry? */
@@ -535,6 +538,20 @@ struct sk_msg_buff {
535 struct list_head list; 538 struct list_head list;
536}; 539};
537 540
541struct bpf_redirect_info {
542 u32 ifindex;
543 u32 flags;
544 struct bpf_map *map;
545 struct bpf_map *map_to_flush;
546 unsigned long map_owner;
547 u32 kern_flags;
548};
549
550DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
551
552/* flags for bpf_redirect_info kern_flags */
553#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
554
538/* Compute the linear packet data range [data, data_end) which 555/* Compute the linear packet data range [data, data_end) which
539 * will be accessed by various program types (cls_bpf, act_bpf, 556 * will be accessed by various program types (cls_bpf, act_bpf,
540 * lwt, ...). Subsystems allowing direct data access must (!) 557 * lwt, ...). Subsystems allowing direct data access must (!)
@@ -671,51 +688,28 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
671 688
672#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) 689#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
673 690
674#ifdef CONFIG_ARCH_HAS_SET_MEMORY
675static inline void bpf_prog_lock_ro(struct bpf_prog *fp) 691static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
676{ 692{
677 fp->locked = 1; 693 fp->undo_set_mem = 1;
678 WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages)); 694 set_memory_ro((unsigned long)fp, fp->pages);
679} 695}
680 696
681static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) 697static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
682{ 698{
683 if (fp->locked) { 699 if (fp->undo_set_mem)
684 WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages)); 700 set_memory_rw((unsigned long)fp, fp->pages);
685 /* In case set_memory_rw() fails, we want to be the first
686 * to crash here instead of some random place later on.
687 */
688 fp->locked = 0;
689 }
690} 701}
691 702
692static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) 703static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
693{ 704{
694 WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages)); 705 set_memory_ro((unsigned long)hdr, hdr->pages);
695} 706}
696 707
697static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) 708static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
698{ 709{
699 WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages)); 710 set_memory_rw((unsigned long)hdr, hdr->pages);
700}
701#else
702static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
703{
704} 711}
705 712
706static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
707{
708}
709
710static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
711{
712}
713
714static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
715{
716}
717#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
718
719static inline struct bpf_binary_header * 713static inline struct bpf_binary_header *
720bpf_jit_binary_hdr(const struct bpf_prog *fp) 714bpf_jit_binary_hdr(const struct bpf_prog *fp)
721{ 715{
@@ -759,6 +753,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
759int sk_attach_bpf(u32 ufd, struct sock *sk); 753int sk_attach_bpf(u32 ufd, struct sock *sk);
760int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); 754int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
761int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); 755int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
756void sk_reuseport_prog_free(struct bpf_prog *prog);
762int sk_detach_filter(struct sock *sk); 757int sk_detach_filter(struct sock *sk);
763int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, 758int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
764 unsigned int len); 759 unsigned int len);
@@ -786,6 +781,42 @@ static inline bool bpf_dump_raw_ok(void)
786struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 781struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
787 const struct bpf_insn *patch, u32 len); 782 const struct bpf_insn *patch, u32 len);
788 783
784static inline bool xdp_return_frame_no_direct(void)
785{
786 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
787
788 return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
789}
790
791static inline void xdp_set_return_frame_no_direct(void)
792{
793 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
794
795 ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
796}
797
798static inline void xdp_clear_return_frame_no_direct(void)
799{
800 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
801
802 ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
803}
804
805static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
806 unsigned int pktlen)
807{
808 unsigned int len;
809
810 if (unlikely(!(fwd->flags & IFF_UP)))
811 return -ENETDOWN;
812
813 len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
814 if (pktlen > len)
815 return -EMSGSIZE;
816
817 return 0;
818}
819
789/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the 820/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
790 * same cpu context. Further for best results no more than a single map 821 * same cpu context. Further for best results no more than a single map
791 * for the do_redirect/do_flush pair should be used. This limitation is 822 * for the do_redirect/do_flush pair should be used. This limitation is
@@ -804,6 +835,20 @@ void bpf_warn_invalid_xdp_action(u32 act);
804struct sock *do_sk_redirect_map(struct sk_buff *skb); 835struct sock *do_sk_redirect_map(struct sk_buff *skb);
805struct sock *do_msg_redirect_map(struct sk_msg_buff *md); 836struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
806 837
838#ifdef CONFIG_INET
839struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
840 struct bpf_prog *prog, struct sk_buff *skb,
841 u32 hash);
842#else
843static inline struct sock *
844bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
845 struct bpf_prog *prog, struct sk_buff *skb,
846 u32 hash)
847{
848 return NULL;
849}
850#endif
851
807#ifdef CONFIG_BPF_JIT 852#ifdef CONFIG_BPF_JIT
808extern int bpf_jit_enable; 853extern int bpf_jit_enable;
809extern int bpf_jit_harden; 854extern int bpf_jit_harden;
@@ -961,6 +1006,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
961} 1006}
962#endif /* CONFIG_BPF_JIT */ 1007#endif /* CONFIG_BPF_JIT */
963 1008
1009void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
1010void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
1011
964#define BPF_ANC BIT(15) 1012#define BPF_ANC BIT(15)
965 1013
966static inline bool bpf_needs_clear_a(const struct sock_filter *first) 1014static inline bool bpf_needs_clear_a(const struct sock_filter *first)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5c91108846db..1ec33fd0423f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -148,6 +148,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
148/* Has write method(s) */ 148/* Has write method(s) */
149#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) 149#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
150 150
151#define FMODE_OPENED ((__force fmode_t)0x80000)
152#define FMODE_CREATED ((__force fmode_t)0x100000)
153
151/* File was opened by fanotify and shouldn't generate fanotify events */ 154/* File was opened by fanotify and shouldn't generate fanotify events */
152#define FMODE_NONOTIFY ((__force fmode_t)0x4000000) 155#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
153 156
@@ -275,6 +278,7 @@ struct writeback_control;
275 278
276/* 279/*
277 * Write life time hint values. 280 * Write life time hint values.
281 * Stored in struct inode as u8.
278 */ 282 */
279enum rw_hint { 283enum rw_hint {
280 WRITE_LIFE_NOT_SET = 0, 284 WRITE_LIFE_NOT_SET = 0,
@@ -609,8 +613,8 @@ struct inode {
609 struct timespec64 i_ctime; 613 struct timespec64 i_ctime;
610 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 614 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
611 unsigned short i_bytes; 615 unsigned short i_bytes;
612 unsigned int i_blkbits; 616 u8 i_blkbits;
613 enum rw_hint i_write_hint; 617 u8 i_write_hint;
614 blkcnt_t i_blocks; 618 blkcnt_t i_blocks;
615 619
616#ifdef __NEED_I_SIZE_ORDERED 620#ifdef __NEED_I_SIZE_ORDERED
@@ -685,6 +689,17 @@ static inline int inode_unhashed(struct inode *inode)
685} 689}
686 690
687/* 691/*
692 * __mark_inode_dirty expects inodes to be hashed. Since we don't
693 * want special inodes in the fileset inode space, we make them
694 * appear hashed, but do not put on any lists. hlist_del()
695 * will work fine and require no locking.
696 */
697static inline void inode_fake_hash(struct inode *inode)
698{
699 hlist_add_fake(&inode->i_hash);
700}
701
702/*
688 * inode->i_mutex nesting subclasses for the lock validator: 703 * inode->i_mutex nesting subclasses for the lock validator:
689 * 704 *
690 * 0: the object of the current VFS operation 705 * 0: the object of the current VFS operation
@@ -1720,8 +1735,6 @@ struct file_operations {
1720 int (*iterate) (struct file *, struct dir_context *); 1735 int (*iterate) (struct file *, struct dir_context *);
1721 int (*iterate_shared) (struct file *, struct dir_context *); 1736 int (*iterate_shared) (struct file *, struct dir_context *);
1722 __poll_t (*poll) (struct file *, struct poll_table_struct *); 1737 __poll_t (*poll) (struct file *, struct poll_table_struct *);
1723 struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
1724 __poll_t (*poll_mask) (struct file *, __poll_t);
1725 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 1738 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
1726 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 1739 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
1727 int (*mmap) (struct file *, struct vm_area_struct *); 1740 int (*mmap) (struct file *, struct vm_area_struct *);
@@ -1778,7 +1791,7 @@ struct inode_operations {
1778 int (*update_time)(struct inode *, struct timespec64 *, int); 1791 int (*update_time)(struct inode *, struct timespec64 *, int);
1779 int (*atomic_open)(struct inode *, struct dentry *, 1792 int (*atomic_open)(struct inode *, struct dentry *,
1780 struct file *, unsigned open_flag, 1793 struct file *, unsigned open_flag,
1781 umode_t create_mode, int *opened); 1794 umode_t create_mode);
1782 int (*tmpfile) (struct inode *, struct dentry *, umode_t); 1795 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
1783 int (*set_acl)(struct inode *, struct posix_acl *, int); 1796 int (*set_acl)(struct inode *, struct posix_acl *, int);
1784} ____cacheline_aligned; 1797} ____cacheline_aligned;
@@ -2016,6 +2029,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
2016 * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper 2029 * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
2017 * and work dirs among overlayfs mounts. 2030 * and work dirs among overlayfs mounts.
2018 * 2031 *
2032 * I_CREATING New object's inode in the middle of setting up.
2033 *
2019 * Q: What is the difference between I_WILL_FREE and I_FREEING? 2034 * Q: What is the difference between I_WILL_FREE and I_FREEING?
2020 */ 2035 */
2021#define I_DIRTY_SYNC (1 << 0) 2036#define I_DIRTY_SYNC (1 << 0)
@@ -2036,7 +2051,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
2036#define __I_DIRTY_TIME_EXPIRED 12 2051#define __I_DIRTY_TIME_EXPIRED 12
2037#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) 2052#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
2038#define I_WB_SWITCH (1 << 13) 2053#define I_WB_SWITCH (1 << 13)
2039#define I_OVL_INUSE (1 << 14) 2054#define I_OVL_INUSE (1 << 14)
2055#define I_CREATING (1 << 15)
2040 2056
2041#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) 2057#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
2042#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) 2058#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2422,6 +2438,10 @@ extern struct file *filp_open(const char *, int, umode_t);
2422extern struct file *file_open_root(struct dentry *, struct vfsmount *, 2438extern struct file *file_open_root(struct dentry *, struct vfsmount *,
2423 const char *, int, umode_t); 2439 const char *, int, umode_t);
2424extern struct file * dentry_open(const struct path *, int, const struct cred *); 2440extern struct file * dentry_open(const struct path *, int, const struct cred *);
2441static inline struct file *file_clone_open(struct file *file)
2442{
2443 return dentry_open(&file->f_path, file->f_flags, file->f_cred);
2444}
2425extern int filp_close(struct file *, fl_owner_t id); 2445extern int filp_close(struct file *, fl_owner_t id);
2426 2446
2427extern struct filename *getname_flags(const char __user *, int, int *); 2447extern struct filename *getname_flags(const char __user *, int, int *);
@@ -2429,13 +2449,8 @@ extern struct filename *getname(const char __user *);
2429extern struct filename *getname_kernel(const char *); 2449extern struct filename *getname_kernel(const char *);
2430extern void putname(struct filename *name); 2450extern void putname(struct filename *name);
2431 2451
2432enum {
2433 FILE_CREATED = 1,
2434 FILE_OPENED = 2
2435};
2436extern int finish_open(struct file *file, struct dentry *dentry, 2452extern int finish_open(struct file *file, struct dentry *dentry,
2437 int (*open)(struct inode *, struct file *), 2453 int (*open)(struct inode *, struct file *));
2438 int *opened);
2439extern int finish_no_open(struct file *file, struct dentry *dentry); 2454extern int finish_no_open(struct file *file, struct dentry *dentry);
2440 2455
2441/* fs/ioctl.c */ 2456/* fs/ioctl.c */
@@ -2623,8 +2638,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
2623 2638
2624extern bool filemap_range_has_page(struct address_space *, loff_t lstart, 2639extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
2625 loff_t lend); 2640 loff_t lend);
2626extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
2627 loff_t lend);
2628extern int filemap_write_and_wait(struct address_space *mapping); 2641extern int filemap_write_and_wait(struct address_space *mapping);
2629extern int filemap_write_and_wait_range(struct address_space *mapping, 2642extern int filemap_write_and_wait_range(struct address_space *mapping,
2630 loff_t lstart, loff_t lend); 2643 loff_t lstart, loff_t lend);
@@ -2919,6 +2932,7 @@ extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
2919static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; 2932static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
2920#endif 2933#endif
2921extern void unlock_new_inode(struct inode *); 2934extern void unlock_new_inode(struct inode *);
2935extern void discard_new_inode(struct inode *);
2922extern unsigned int get_next_ino(void); 2936extern unsigned int get_next_ino(void);
2923extern void evict_inodes(struct super_block *sb); 2937extern void evict_inodes(struct super_block *sb);
2924 2938
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 3efa3b861d44..941b11811f85 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -16,6 +16,7 @@
16#define __FSL_GUTS_H__ 16#define __FSL_GUTS_H__
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/io.h>
19 20
20/** 21/**
21 * Global Utility Registers. 22 * Global Utility Registers.
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index b462d9ea8007..c1f003aadcce 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -11,9 +11,8 @@
11 11
12/* 12/*
13 * qoriq ptp registers 13 * qoriq ptp registers
14 * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
15 */ 14 */
16struct qoriq_ptp_registers { 15struct ctrl_regs {
17 u32 tmr_ctrl; /* Timer control register */ 16 u32 tmr_ctrl; /* Timer control register */
18 u32 tmr_tevent; /* Timestamp event register */ 17 u32 tmr_tevent; /* Timestamp event register */
19 u32 tmr_temask; /* Timer event mask register */ 18 u32 tmr_temask; /* Timer event mask register */
@@ -28,22 +27,47 @@ struct qoriq_ptp_registers {
28 u8 res1[4]; 27 u8 res1[4];
29 u32 tmroff_h; /* Timer offset high */ 28 u32 tmroff_h; /* Timer offset high */
30 u32 tmroff_l; /* Timer offset low */ 29 u32 tmroff_l; /* Timer offset low */
31 u8 res2[8]; 30};
31
32struct alarm_regs {
32 u32 tmr_alarm1_h; /* Timer alarm 1 high register */ 33 u32 tmr_alarm1_h; /* Timer alarm 1 high register */
33 u32 tmr_alarm1_l; /* Timer alarm 1 high register */ 34 u32 tmr_alarm1_l; /* Timer alarm 1 high register */
34 u32 tmr_alarm2_h; /* Timer alarm 2 high register */ 35 u32 tmr_alarm2_h; /* Timer alarm 2 high register */
35 u32 tmr_alarm2_l; /* Timer alarm 2 high register */ 36 u32 tmr_alarm2_l; /* Timer alarm 2 high register */
36 u8 res3[48]; 37};
38
39struct fiper_regs {
37 u32 tmr_fiper1; /* Timer fixed period interval */ 40 u32 tmr_fiper1; /* Timer fixed period interval */
38 u32 tmr_fiper2; /* Timer fixed period interval */ 41 u32 tmr_fiper2; /* Timer fixed period interval */
39 u32 tmr_fiper3; /* Timer fixed period interval */ 42 u32 tmr_fiper3; /* Timer fixed period interval */
40 u8 res4[20]; 43};
44
45struct etts_regs {
41 u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ 46 u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
42 u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ 47 u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
43 u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ 48 u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
44 u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ 49 u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
45}; 50};
46 51
52struct qoriq_ptp_registers {
53 struct ctrl_regs __iomem *ctrl_regs;
54 struct alarm_regs __iomem *alarm_regs;
55 struct fiper_regs __iomem *fiper_regs;
56 struct etts_regs __iomem *etts_regs;
57};
58
59/* Offset definitions for the four register groups */
60#define CTRL_REGS_OFFSET 0x0
61#define ALARM_REGS_OFFSET 0x40
62#define FIPER_REGS_OFFSET 0x80
63#define ETTS_REGS_OFFSET 0xa0
64
65#define FMAN_CTRL_REGS_OFFSET 0x80
66#define FMAN_ALARM_REGS_OFFSET 0xb8
67#define FMAN_FIPER_REGS_OFFSET 0xd0
68#define FMAN_ETTS_REGS_OFFSET 0xe0
69
70
47/* Bit definitions for the TMR_CTRL register */ 71/* Bit definitions for the TMR_CTRL register */
48#define ALM1P (1<<31) /* Alarm1 output polarity */ 72#define ALM1P (1<<31) /* Alarm1 output polarity */
49#define ALM2P (1<<30) /* Alarm2 output polarity */ 73#define ALM2P (1<<30) /* Alarm2 output polarity */
@@ -103,12 +127,16 @@ struct qoriq_ptp_registers {
103 127
104 128
105#define DRIVER "ptp_qoriq" 129#define DRIVER "ptp_qoriq"
106#define DEFAULT_CKSEL 1
107#define N_EXT_TS 2 130#define N_EXT_TS 2
108#define REG_SIZE sizeof(struct qoriq_ptp_registers) 131
132#define DEFAULT_CKSEL 1
133#define DEFAULT_TMR_PRSC 2
134#define DEFAULT_FIPER1_PERIOD 1000000000
135#define DEFAULT_FIPER2_PERIOD 100000
109 136
110struct qoriq_ptp { 137struct qoriq_ptp {
111 struct qoriq_ptp_registers __iomem *regs; 138 void __iomem *base;
139 struct qoriq_ptp_registers regs;
112 spinlock_t lock; /* protects regs */ 140 spinlock_t lock; /* protects regs */
113 struct ptp_clock *clock; 141 struct ptp_clock *clock;
114 struct ptp_clock_info caps; 142 struct ptp_clock_info caps;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 8154f4920fcb..ebb77674be90 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type;
223 */ 223 */
224int register_ftrace_function(struct ftrace_ops *ops); 224int register_ftrace_function(struct ftrace_ops *ops);
225int unregister_ftrace_function(struct ftrace_ops *ops); 225int unregister_ftrace_function(struct ftrace_ops *ops);
226void clear_ftrace_function(void);
227 226
228extern void ftrace_stub(unsigned long a0, unsigned long a1, 227extern void ftrace_stub(unsigned long a0, unsigned long a1,
229 struct ftrace_ops *op, struct pt_regs *regs); 228 struct ftrace_ops *op, struct pt_regs *regs);
@@ -239,7 +238,6 @@ static inline int ftrace_nr_registered_ops(void)
239{ 238{
240 return 0; 239 return 0;
241} 240}
242static inline void clear_ftrace_function(void) { }
243static inline void ftrace_kill(void) { } 241static inline void ftrace_kill(void) { }
244static inline void ftrace_free_init_mem(void) { } 242static inline void ftrace_free_init_mem(void) { }
245static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } 243static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 4fe8f289b3f6..faebf0ca0686 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -45,7 +45,7 @@ struct fwnode_endpoint {
45struct fwnode_reference_args { 45struct fwnode_reference_args {
46 struct fwnode_handle *fwnode; 46 struct fwnode_handle *fwnode;
47 unsigned int nargs; 47 unsigned int nargs;
48 unsigned int args[NR_FWNODE_REFERENCE_ARGS]; 48 u64 args[NR_FWNODE_REFERENCE_ARGS];
49}; 49};
50 50
51/** 51/**
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 6cb8a5789668..57864422a2c8 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -16,6 +16,7 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/percpu-refcount.h> 17#include <linux/percpu-refcount.h>
18#include <linux/uuid.h> 18#include <linux/uuid.h>
19#include <linux/blk_types.h>
19 20
20#ifdef CONFIG_BLOCK 21#ifdef CONFIG_BLOCK
21 22
@@ -82,10 +83,10 @@ struct partition {
82} __attribute__((packed)); 83} __attribute__((packed));
83 84
84struct disk_stats { 85struct disk_stats {
85 unsigned long sectors[2]; /* READs and WRITEs */ 86 unsigned long sectors[NR_STAT_GROUPS];
86 unsigned long ios[2]; 87 unsigned long ios[NR_STAT_GROUPS];
87 unsigned long merges[2]; 88 unsigned long merges[NR_STAT_GROUPS];
88 unsigned long ticks[2]; 89 unsigned long ticks[NR_STAT_GROUPS];
89 unsigned long io_ticks; 90 unsigned long io_ticks;
90 unsigned long time_in_queue; 91 unsigned long time_in_queue;
91}; 92};
@@ -353,6 +354,11 @@ static inline void free_part_stats(struct hd_struct *part)
353 354
354#endif /* CONFIG_SMP */ 355#endif /* CONFIG_SMP */
355 356
357#define part_stat_read_accum(part, field) \
358 (part_stat_read(part, field[STAT_READ]) + \
359 part_stat_read(part, field[STAT_WRITE]) + \
360 part_stat_read(part, field[STAT_DISCARD]))
361
356#define part_stat_add(cpu, part, field, addnd) do { \ 362#define part_stat_add(cpu, part, field, addnd) do { \
357 __part_stat_add((cpu), (part), field, addnd); \ 363 __part_stat_add((cpu), (part), field, addnd); \
358 if ((part)->partno) \ 364 if ((part)->partno) \
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 91ed23468530..39745b8bdd65 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -14,7 +14,7 @@
14 14
15#include <linux/errno.h> 15#include <linux/errno.h>
16 16
17/* see Documentation/gpio/gpio-legacy.txt */ 17/* see Documentation/driver-api/gpio/legacy.rst */
18 18
19/* make these flag values available regardless of GPIO kconfig options */ 19/* make these flag values available regardless of GPIO kconfig options */
20#define GPIOF_DIR_OUT (0 << 0) 20#define GPIOF_DIR_OUT (0 << 0)
diff --git a/include/linux/gpio/aspeed.h b/include/linux/gpio/aspeed.h
new file mode 100644
index 000000000000..1bfb3cdc86d0
--- /dev/null
+++ b/include/linux/gpio/aspeed.h
@@ -0,0 +1,15 @@
1#ifndef __GPIO_ASPEED_H
2#define __GPIO_ASPEED_H
3
4struct aspeed_gpio_copro_ops {
5 int (*request_access)(void *data);
6 int (*release_access)(void *data);
7};
8
9int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
10 u16 *vreg_offset, u16 *dreg_offset, u8 *bit);
11int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc);
12int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data);
13
14
15#endif /* __GPIO_ASPEED_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 243112c7fa7d..21ddbe440030 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -41,11 +41,8 @@ enum gpiod_flags {
41 GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT, 41 GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
42 GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT | 42 GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
43 GPIOD_FLAGS_BIT_DIR_VAL, 43 GPIOD_FLAGS_BIT_DIR_VAL,
44 GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET | 44 GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_OPEN_DRAIN,
45 GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_OPEN_DRAIN, 45 GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_OPEN_DRAIN,
46 GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
47 GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_DIR_VAL |
48 GPIOD_FLAGS_BIT_OPEN_DRAIN,
49}; 46};
50 47
51#ifdef CONFIG_GPIOLIB 48#ifdef CONFIG_GPIOLIB
@@ -145,6 +142,7 @@ int gpiod_is_active_low(const struct gpio_desc *desc);
145int gpiod_cansleep(const struct gpio_desc *desc); 142int gpiod_cansleep(const struct gpio_desc *desc);
146 143
147int gpiod_to_irq(const struct gpio_desc *desc); 144int gpiod_to_irq(const struct gpio_desc *desc);
145void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
148 146
149/* Convert between the old gpio_ and new gpiod_ interfaces */ 147/* Convert between the old gpio_ and new gpiod_ interfaces */
150struct gpio_desc *gpio_to_desc(unsigned gpio); 148struct gpio_desc *gpio_to_desc(unsigned gpio);
@@ -467,6 +465,12 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
467 return -EINVAL; 465 return -EINVAL;
468} 466}
469 467
468static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
469{
470 /* GPIO can never have been requested */
471 WARN_ON(1);
472}
473
470static inline struct gpio_desc *gpio_to_desc(unsigned gpio) 474static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
471{ 475{
472 return ERR_PTR(-EINVAL); 476 return ERR_PTR(-EINVAL);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 5382b5183b7e..0ea328e71ec9 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -201,6 +201,8 @@ static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
201 * @reg_set: output set register (out=high) for generic GPIO 201 * @reg_set: output set register (out=high) for generic GPIO
202 * @reg_clr: output clear register (out=low) for generic GPIO 202 * @reg_clr: output clear register (out=low) for generic GPIO
203 * @reg_dir: direction setting register for generic GPIO 203 * @reg_dir: direction setting register for generic GPIO
204 * @bgpio_dir_inverted: indicates that the direction register is inverted
205 * (gpiolib private state variable)
204 * @bgpio_bits: number of register bits used for a generic GPIO i.e. 206 * @bgpio_bits: number of register bits used for a generic GPIO i.e.
205 * <register width> * 8 207 * <register width> * 8
206 * @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep 208 * @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep
@@ -267,6 +269,7 @@ struct gpio_chip {
267 void __iomem *reg_set; 269 void __iomem *reg_set;
268 void __iomem *reg_clr; 270 void __iomem *reg_clr;
269 void __iomem *reg_dir; 271 void __iomem *reg_dir;
272 bool bgpio_dir_inverted;
270 int bgpio_bits; 273 int bgpio_bits;
271 spinlock_t bgpio_lock; 274 spinlock_t bgpio_lock;
272 unsigned long bgpio_data; 275 unsigned long bgpio_data;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 41a3d5775394..773bcb1d4044 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -511,6 +511,7 @@ struct hid_output_fifo {
511#define HID_STAT_ADDED BIT(0) 511#define HID_STAT_ADDED BIT(0)
512#define HID_STAT_PARSED BIT(1) 512#define HID_STAT_PARSED BIT(1)
513#define HID_STAT_DUP_DETECTED BIT(2) 513#define HID_STAT_DUP_DETECTED BIT(2)
514#define HID_STAT_REPROBED BIT(3)
514 515
515struct hid_input { 516struct hid_input {
516 struct list_head list; 517 struct list_head list;
@@ -579,7 +580,7 @@ struct hid_device { /* device report descriptor */
579 bool battery_avoid_query; 580 bool battery_avoid_query;
580#endif 581#endif
581 582
582 unsigned int status; /* see STAT flags above */ 583 unsigned long status; /* see STAT flags above */
583 unsigned claimed; /* Claimed by hidinput, hiddev? */ 584 unsigned claimed; /* Claimed by hidinput, hiddev? */
584 unsigned quirks; /* Various quirks the device can pull on us */ 585 unsigned quirks; /* Various quirks the device can pull on us */
585 bool io_started; /* If IO has started */ 586 bool io_started; /* If IO has started */
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index e5fd2707b6df..9493d4a388db 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -93,6 +93,7 @@ enum hwmon_temp_attributes {
93#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) 93#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
94#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) 94#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
95#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) 95#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
96#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm)
96#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm) 97#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
97#define HWMON_T_FAULT BIT(hwmon_temp_fault) 98#define HWMON_T_FAULT BIT(hwmon_temp_fault)
98#define HWMON_T_OFFSET BIT(hwmon_temp_offset) 99#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
@@ -187,12 +188,16 @@ enum hwmon_power_attributes {
187 hwmon_power_cap_hyst, 188 hwmon_power_cap_hyst,
188 hwmon_power_cap_max, 189 hwmon_power_cap_max,
189 hwmon_power_cap_min, 190 hwmon_power_cap_min,
191 hwmon_power_min,
190 hwmon_power_max, 192 hwmon_power_max,
191 hwmon_power_crit, 193 hwmon_power_crit,
194 hwmon_power_lcrit,
192 hwmon_power_label, 195 hwmon_power_label,
193 hwmon_power_alarm, 196 hwmon_power_alarm,
194 hwmon_power_cap_alarm, 197 hwmon_power_cap_alarm,
198 hwmon_power_min_alarm,
195 hwmon_power_max_alarm, 199 hwmon_power_max_alarm,
200 hwmon_power_lcrit_alarm,
196 hwmon_power_crit_alarm, 201 hwmon_power_crit_alarm,
197}; 202};
198 203
@@ -213,12 +218,16 @@ enum hwmon_power_attributes {
213#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst) 218#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
214#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max) 219#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
215#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min) 220#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
221#define HWMON_P_MIN BIT(hwmon_power_min)
216#define HWMON_P_MAX BIT(hwmon_power_max) 222#define HWMON_P_MAX BIT(hwmon_power_max)
223#define HWMON_P_LCRIT BIT(hwmon_power_lcrit)
217#define HWMON_P_CRIT BIT(hwmon_power_crit) 224#define HWMON_P_CRIT BIT(hwmon_power_crit)
218#define HWMON_P_LABEL BIT(hwmon_power_label) 225#define HWMON_P_LABEL BIT(hwmon_power_label)
219#define HWMON_P_ALARM BIT(hwmon_power_alarm) 226#define HWMON_P_ALARM BIT(hwmon_power_alarm)
220#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm) 227#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
228#define HWMON_P_MIN_ALARM BIT(hwmon_power_max_alarm)
221#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm) 229#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
230#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
222#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm) 231#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
223 232
224enum hwmon_energy_attributes { 233enum hwmon_energy_attributes {
@@ -389,4 +398,27 @@ devm_hwmon_device_register_with_info(struct device *dev,
389void hwmon_device_unregister(struct device *dev); 398void hwmon_device_unregister(struct device *dev);
390void devm_hwmon_device_unregister(struct device *dev); 399void devm_hwmon_device_unregister(struct device *dev);
391 400
401/**
402 * hwmon_is_bad_char - Is the char invalid in a hwmon name
403 * @ch: the char to be considered
404 *
405 * hwmon_is_bad_char() can be used to determine if the given character
406 * may not be used in a hwmon name.
407 *
408 * Returns true if the char is invalid, false otherwise.
409 */
410static inline bool hwmon_is_bad_char(const char ch)
411{
412 switch (ch) {
413 case '-':
414 case '*':
415 case ' ':
416 case '\t':
417 case '\n':
418 return true;
419 default:
420 return false;
421 }
422}
423
392#endif 424#endif
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 254cd34eeae2..465afb092fa7 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -140,9 +140,14 @@ extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
140 and probably just as fast. 140 and probably just as fast.
141 Note that we use i2c_adapter here, because you do not need a specific 141 Note that we use i2c_adapter here, because you do not need a specific
142 smbus adapter to call this function. */ 142 smbus adapter to call this function. */
143extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, 143s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
144 unsigned short flags, char read_write, u8 command, 144 unsigned short flags, char read_write, u8 command,
145 int size, union i2c_smbus_data *data); 145 int protocol, union i2c_smbus_data *data);
146
147/* Unlocked flavor */
148s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
149 unsigned short flags, char read_write, u8 command,
150 int protocol, union i2c_smbus_data *data);
146 151
147/* Now follow the 'nice' access routines. These also document the calling 152/* Now follow the 'nice' access routines. These also document the calling
148 conventions of i2c_smbus_xfer. */ 153 conventions of i2c_smbus_xfer. */
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
new file mode 100644
index 000000000000..bdc0293fb6cb
--- /dev/null
+++ b/include/linux/idle_inject.h
@@ -0,0 +1,29 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2018 Linaro Ltd
4 *
5 * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
6 *
7 */
8#ifndef __IDLE_INJECT_H__
9#define __IDLE_INJECT_H__
10
11/* private idle injection device structure */
12struct idle_inject_device;
13
14struct idle_inject_device *idle_inject_register(struct cpumask *cpumask);
15
16void idle_inject_unregister(struct idle_inject_device *ii_dev);
17
18int idle_inject_start(struct idle_inject_device *ii_dev);
19
20void idle_inject_stop(struct idle_inject_device *ii_dev);
21
22void idle_inject_set_duration(struct idle_inject_device *ii_dev,
23 unsigned int run_duration_ms,
24 unsigned int idle_duration_ms);
25
26void idle_inject_get_duration(struct idle_inject_device *ii_dev,
27 unsigned int *run_duration_ms,
28 unsigned int *idle_duration_ms);
29#endif /* __IDLE_INJECT_H__ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 8fe7e4306816..9c03a7d5e400 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1433,11 +1433,13 @@ struct ieee80211_ht_operation {
1433#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 1433#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
1434 1434
1435/* 1435/*
1436 * A-PMDU buffer sizes 1436 * A-MPDU buffer sizes
1437 * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) 1437 * According to HT size varies from 8 to 64 frames
1438 * HE adds the ability to have up to 256 frames.
1438 */ 1439 */
1439#define IEEE80211_MIN_AMPDU_BUF 0x8 1440#define IEEE80211_MIN_AMPDU_BUF 0x8
1440#define IEEE80211_MAX_AMPDU_BUF 0x40 1441#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
1442#define IEEE80211_MAX_AMPDU_BUF 0x100
1441 1443
1442 1444
1443/* Spatial Multiplexing Power Save Modes (for capability) */ 1445/* Spatial Multiplexing Power Save Modes (for capability) */
@@ -1539,6 +1541,106 @@ struct ieee80211_vht_operation {
1539 __le16 basic_mcs_set; 1541 __le16 basic_mcs_set;
1540} __packed; 1542} __packed;
1541 1543
1544/**
1545 * struct ieee80211_he_cap_elem - HE capabilities element
1546 *
1547 * This structure is the "HE capabilities element" fixed fields as
1548 * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3
1549 */
1550struct ieee80211_he_cap_elem {
1551 u8 mac_cap_info[5];
1552 u8 phy_cap_info[9];
1553} __packed;
1554
1555#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
1556
1557/**
1558 * enum ieee80211_he_mcs_support - HE MCS support definitions
1559 * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
1560 * number of streams
1561 * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
1562 * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
1563 * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
1564 *
1565 * These definitions are used in each 2-bit subfield of the rx_mcs_*
1566 * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
1567 * both split into 8 subfields by number of streams. These values indicate
1568 * which MCSes are supported for the number of streams the value appears
1569 * for.
1570 */
1571enum ieee80211_he_mcs_support {
1572 IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
1573 IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
1574 IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
1575 IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
1576};
1577
1578/**
1579 * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
1580 *
1581 * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
1582 * described in P802.11ax_D2.0 section 9.4.2.237.4
1583 *
1584 * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
1585 * widths less than 80MHz.
1586 * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
1587 * widths less than 80MHz.
1588 * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
1589 * width 160MHz.
1590 * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
1591 * width 160MHz.
1592 * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
1593 * channel width 80p80MHz.
1594 * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
1595 * channel width 80p80MHz.
1596 */
1597struct ieee80211_he_mcs_nss_supp {
1598 __le16 rx_mcs_80;
1599 __le16 tx_mcs_80;
1600 __le16 rx_mcs_160;
1601 __le16 tx_mcs_160;
1602 __le16 rx_mcs_80p80;
1603 __le16 tx_mcs_80p80;
1604} __packed;
1605
1606/**
1607 * struct ieee80211_he_operation - HE capabilities element
1608 *
1609 * This structure is the "HE operation element" fields as
1610 * described in P802.11ax_D2.0 section 9.4.2.238
1611 */
1612struct ieee80211_he_operation {
1613 __le32 he_oper_params;
1614 __le16 he_mcs_nss_set;
1615 /* Optional 0,1,3 or 4 bytes: depends on @he_oper_params */
1616 u8 optional[0];
1617} __packed;
1618
1619/**
1620 * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
1621 *
1622 * This structure is the "MU AC Parameter Record" fields as
1623 * described in P802.11ax_D2.0 section 9.4.2.240
1624 */
1625struct ieee80211_he_mu_edca_param_ac_rec {
1626 u8 aifsn;
1627 u8 ecw_min_max;
1628 u8 mu_edca_timer;
1629} __packed;
1630
1631/**
1632 * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
1633 *
1634 * This structure is the "MU EDCA Parameter Set element" fields as
1635 * described in P802.11ax_D2.0 section 9.4.2.240
1636 */
1637struct ieee80211_mu_edca_param_set {
1638 u8 mu_qos_info;
1639 struct ieee80211_he_mu_edca_param_ac_rec ac_be;
1640 struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
1641 struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
1642 struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
1643} __packed;
1542 1644
1543/* 802.11ac VHT Capabilities */ 1645/* 802.11ac VHT Capabilities */
1544#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 1646#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
@@ -1577,6 +1679,328 @@ struct ieee80211_vht_operation {
1577#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 1679#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
1578#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 1680#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
1579 1681
1682/* 802.11ax HE MAC capabilities */
1683#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
1684#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
1685#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
1686#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
1687#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
1688#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
1689#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
1690#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
1691#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
1692#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
1693#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
1694#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
1695#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
1696#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
1697#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
1698#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
1699#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
1700
1701#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
1702#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
1703#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
1704#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
1705#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
1706#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
1707#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
1708#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
1709#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
1710#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00
1711#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10
1712#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20
1713#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30
1714#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40
1715#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50
1716#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60
1717#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70
1718#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70
1719
1720/* Link adaptation is split between byte HE_MAC_CAP1 and
1721 * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
1722 * in which case the following values apply:
1723 * 0 = No feedback.
1724 * 1 = reserved.
1725 * 2 = Unsolicited feedback.
1726 * 3 = both
1727 */
1728#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
1729
1730#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
1731#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
1732#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04
1733#define IEEE80211_HE_MAC_CAP2_BSR 0x08
1734#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
1735#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
1736#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
1737#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
1738
1739#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01
1740#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
1741#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
1742
1743/* The maximum length of an A-MDPU is defined by the combination of the Maximum
1744 * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
1745 * same field in the HE capabilities.
1746 */
1747#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00
1748#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08
1749#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10
1750#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18
1751#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18
1752#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20
1753#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
1754#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
1755
1756#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
1757#define IEEE80211_HE_MAC_CAP4_QTP 0x02
1758#define IEEE80211_HE_MAC_CAP4_BQR 0x04
1759#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08
1760#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
1761#define IEEE80211_HE_MAC_CAP4_OPS 0x20
1762#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
1763
1764/* 802.11ax HE PHY capabilities */
1765#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01
1766#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
1767#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
1768#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
1769#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
1770#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
1771#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
1772#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
1773
1774#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
1775#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
1776#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
1777#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
1778#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
1779#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
1780#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
1781#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
1782/* Midamble RX Max NSTS is split between byte #2 and byte #3 */
1783#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80
1784
1785#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01
1786#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
1787#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
1788#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
1789#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
1790#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
1791
1792/* Note that the meaning of UL MU below is different between an AP and a non-AP
1793 * sta, where in the AP case it indicates support for Rx and in the non-AP sta
1794 * case it indicates support for Tx.
1795 */
1796#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
1797#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
1798
1799#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
1800#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
1801#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
1802#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
1803#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
1804#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
1805#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
1806#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
1807#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
1808#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
1809#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
1810#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
1811#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
1812#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
1813#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40
1814#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
1815
1816#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
1817#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
1818
1819/* Minimal allowed value of Max STS under 80MHz is 3 */
1820#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
1821#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
1822#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
1823#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
1824#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
1825#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
1826
1827/* Minimal allowed value of Max STS above 80MHz is 3 */
1828#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
1829#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
1830#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
1831#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
1832#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
1833#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
1834
1835#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
1836#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
1837#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
1838#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
1839#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
1840#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
1841#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
1842#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
1843#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
1844
1845#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
1846#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
1847#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
1848#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
1849#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
1850#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
1851#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
1852#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
1853#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
1854
1855#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
1856#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
1857
1858#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
1859#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
1860#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04
1861#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08
1862#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
1863#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
1864#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
1865#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
1866
1867#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01
1868#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02
1869#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
1870#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
1871#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
1872#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
1873#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
1874#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
1875#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
1876#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
1877#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
1878#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
1879#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
1880
1881#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
1882#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
1883#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
1884#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
1885#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
1886#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20
1887
1888/* 802.11ax HE TX/RX MCS NSS Support */
1889#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
1890#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
1891#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
1892#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
1893#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
1894
1895/* TX/RX HE MCS Support field Highest MCS subfield encoding */
1896enum ieee80211_he_highest_mcs_supported_subfield_enc {
1897 HIGHEST_MCS_SUPPORTED_MCS7 = 0,
1898 HIGHEST_MCS_SUPPORTED_MCS8,
1899 HIGHEST_MCS_SUPPORTED_MCS9,
1900 HIGHEST_MCS_SUPPORTED_MCS10,
1901 HIGHEST_MCS_SUPPORTED_MCS11,
1902};
1903
1904/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
1905static inline u8
1906ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
1907{
1908 u8 count = 4;
1909
1910 if (he_cap->phy_cap_info[0] &
1911 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
1912 count += 4;
1913
1914 if (he_cap->phy_cap_info[0] &
1915 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
1916 count += 4;
1917
1918 return count;
1919}
1920
1921/* 802.11ax HE PPE Thresholds */
1922#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
1923#define IEEE80211_PPE_THRES_NSS_POS (0)
1924#define IEEE80211_PPE_THRES_NSS_MASK (7)
1925#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
1926 (BIT(5) | BIT(6))
1927#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
1928#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
1929#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
1930
1931/*
1932 * Calculate 802.11ax HE capabilities IE PPE field size
1933 * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
1934 */
1935static inline u8
1936ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
1937{
1938 u8 n;
1939
1940 if ((phy_cap_info[6] &
1941 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
1942 return 0;
1943
1944 n = hweight8(ppe_thres_hdr &
1945 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
1946 n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
1947 IEEE80211_PPE_THRES_NSS_POS));
1948
1949 /*
1950 * Each pair is 6 bits, and we need to add the 7 "header" bits to the
1951 * total size.
1952 */
1953 n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
1954 n = DIV_ROUND_UP(n, 8);
1955
1956 return n;
1957}
1958
1959/* HE Operation defines */
1960#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f
1961#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0
1962#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6
1963#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
1964#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
1965#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
1966#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000
1967#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000
1968#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
1969#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
1970#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000
1971
1972/*
1973 * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
1974 * @he_oper_ie: byte data of the He Operations IE, stating from the the byte
1975 * after the ext ID byte. It is assumed that he_oper_ie has at least
1976 * sizeof(struct ieee80211_he_operation) bytes, checked already in
1977 * ieee802_11_parse_elems_crc()
1978 * @return the actual size of the IE data (not including header), or 0 on error
1979 */
1980static inline u8
1981ieee80211_he_oper_size(const u8 *he_oper_ie)
1982{
1983 struct ieee80211_he_operation *he_oper = (void *)he_oper_ie;
1984 u8 oper_len = sizeof(struct ieee80211_he_operation);
1985 u32 he_oper_params;
1986
1987 /* Make sure the input is not NULL */
1988 if (!he_oper_ie)
1989 return 0;
1990
1991 /* Calc required length */
1992 he_oper_params = le32_to_cpu(he_oper->he_oper_params);
1993 if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
1994 oper_len += 3;
1995 if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP)
1996 oper_len++;
1997
1998 /* Add the first byte (extension ID) to the total length */
1999 oper_len++;
2000
2001 return oper_len;
2002}
2003
1580/* Authentication algorithms */ 2004/* Authentication algorithms */
1581#define WLAN_AUTH_OPEN 0 2005#define WLAN_AUTH_OPEN 0
1582#define WLAN_AUTH_SHARED_KEY 1 2006#define WLAN_AUTH_SHARED_KEY 1
@@ -1992,6 +2416,11 @@ enum ieee80211_eid_ext {
1992 WLAN_EID_EXT_FILS_WRAPPED_DATA = 8, 2416 WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
1993 WLAN_EID_EXT_FILS_PUBLIC_KEY = 12, 2417 WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
1994 WLAN_EID_EXT_FILS_NONCE = 13, 2418 WLAN_EID_EXT_FILS_NONCE = 13,
2419 WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
2420 WLAN_EID_EXT_HE_CAPABILITY = 35,
2421 WLAN_EID_EXT_HE_OPERATION = 36,
2422 WLAN_EID_EXT_UORA = 37,
2423 WLAN_EID_EXT_HE_MU_EDCA = 38,
1995}; 2424};
1996 2425
1997/* Action category code */ 2426/* Action category code */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 7843b98e1c6e..c20c7e197d07 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev)
105 105
106static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) 106static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
107{ 107{
108 return -1; 108 return -EINVAL;
109} 109}
110 110
111static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, 111static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
112 struct bridge_vlan_info *p_vinfo) 112 struct bridge_vlan_info *p_vinfo)
113{ 113{
114 return -1; 114 return -EINVAL;
115} 115}
116#endif 116#endif
117 117
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index d95cae09dea0..ac42da56f7a2 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -74,6 +74,11 @@ struct team_port {
74 long mode_priv[0]; 74 long mode_priv[0];
75}; 75};
76 76
77static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
78{
79 return rcu_dereference(dev->rx_handler_data);
80}
81
77static inline bool team_port_enabled(struct team_port *port) 82static inline bool team_port_enabled(struct team_port *port)
78{ 83{
79 return port->index != -1; 84 return port->index != -1;
@@ -84,6 +89,19 @@ static inline bool team_port_txable(struct team_port *port)
84 return port->linkup && team_port_enabled(port); 89 return port->linkup && team_port_enabled(port);
85} 90}
86 91
92static inline bool team_port_dev_txable(const struct net_device *port_dev)
93{
94 struct team_port *port;
95 bool txable;
96
97 rcu_read_lock();
98 port = team_port_get_rcu(port_dev);
99 txable = port ? team_port_txable(port) : false;
100 rcu_read_unlock();
101
102 return txable;
103}
104
87#ifdef CONFIG_NET_POLL_CONTROLLER 105#ifdef CONFIG_NET_POLL_CONTROLLER
88static inline void team_netpoll_send_skb(struct team_port *port, 106static inline void team_netpoll_send_skb(struct team_port *port,
89 struct sk_buff *skb) 107 struct sk_buff *skb)
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index f8231854b5d6..119f53941c12 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -109,6 +109,8 @@ struct ip_mc_list {
109extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); 109extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
110extern int igmp_rcv(struct sk_buff *); 110extern int igmp_rcv(struct sk_buff *);
111extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); 111extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
112extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
113 unsigned int mode);
112extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); 114extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
113extern void ip_mc_drop_socket(struct sock *sk); 115extern void ip_mc_drop_socket(struct sock *sk);
114extern int ip_mc_source(int add, int omode, struct sock *sk, 116extern int ip_mc_source(int add, int omode, struct sock *sk,
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 767467d886de..67c75372b691 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
141 char __user *user_buffer); 141 char __user *user_buffer);
142size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); 142size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
143int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); 143int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
144int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length); 144int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
145int iio_dma_buffer_request_update(struct iio_buffer *buffer); 145int iio_dma_buffer_request_update(struct iio_buffer *buffer);
146 146
147int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, 147int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e4647e0eb60..97914a2833d1 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -11,14 +11,16 @@
11#define _LINUX_IMA_H 11#define _LINUX_IMA_H
12 12
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/security.h>
14#include <linux/kexec.h> 15#include <linux/kexec.h>
15struct linux_binprm; 16struct linux_binprm;
16 17
17#ifdef CONFIG_IMA 18#ifdef CONFIG_IMA
18extern int ima_bprm_check(struct linux_binprm *bprm); 19extern int ima_bprm_check(struct linux_binprm *bprm);
19extern int ima_file_check(struct file *file, int mask, int opened); 20extern int ima_file_check(struct file *file, int mask);
20extern void ima_file_free(struct file *file); 21extern void ima_file_free(struct file *file);
21extern int ima_file_mmap(struct file *file, unsigned long prot); 22extern int ima_file_mmap(struct file *file, unsigned long prot);
23extern int ima_load_data(enum kernel_load_data_id id);
22extern int ima_read_file(struct file *file, enum kernel_read_file_id id); 24extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
23extern int ima_post_read_file(struct file *file, void *buf, loff_t size, 25extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
24 enum kernel_read_file_id id); 26 enum kernel_read_file_id id);
@@ -34,7 +36,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
34 return 0; 36 return 0;
35} 37}
36 38
37static inline int ima_file_check(struct file *file, int mask, int opened) 39static inline int ima_file_check(struct file *file, int mask)
38{ 40{
39 return 0; 41 return 0;
40} 42}
@@ -49,6 +51,11 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
49 return 0; 51 return 0;
50} 52}
51 53
54static inline int ima_load_data(enum kernel_load_data_id id)
55{
56 return 0;
57}
58
52static inline int ima_read_file(struct file *file, enum kernel_read_file_id id) 59static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
53{ 60{
54 return 0; 61 return 0;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 27650f1bff3d..c759d1cbcedd 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -93,6 +93,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
93 93
94#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) 94#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
95#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) 95#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
96#define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING)
96#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) 97#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
97#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) 98#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
98#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ 99#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index d7188de4db96..3f4bf60b0bb5 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -100,7 +100,7 @@ static inline bool input_is_mt_axis(int axis)
100 return axis == ABS_MT_SLOT || input_is_mt_value(axis); 100 return axis == ABS_MT_SLOT || input_is_mt_value(axis);
101} 101}
102 102
103void input_mt_report_slot_state(struct input_dev *dev, 103bool input_mt_report_slot_state(struct input_dev *dev,
104 unsigned int tool_type, bool active); 104 unsigned int tool_type, bool active);
105 105
106void input_mt_report_finger_count(struct input_dev *dev, int count); 106void input_mt_report_finger_count(struct input_dev *dev, int count);
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 858d3f4a2241..54c853ec2fd1 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -44,4 +44,17 @@ static inline void integrity_load_keys(void)
44} 44}
45#endif /* CONFIG_INTEGRITY */ 45#endif /* CONFIG_INTEGRITY */
46 46
47#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
48
49extern int integrity_kernel_module_request(char *kmod_name);
50
51#else
52
53static inline int integrity_kernel_module_request(char *kmod_name)
54{
55 return 0;
56}
57
58#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
59
47#endif /* _LINUX_INTEGRITY_H */ 60#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 1df940196ab2..ef169d67df92 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -121,6 +121,7 @@
121#define ecap_srs(e) ((e >> 31) & 0x1) 121#define ecap_srs(e) ((e >> 31) & 0x1)
122#define ecap_ers(e) ((e >> 30) & 0x1) 122#define ecap_ers(e) ((e >> 30) & 0x1)
123#define ecap_prs(e) ((e >> 29) & 0x1) 123#define ecap_prs(e) ((e >> 29) & 0x1)
124#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
124#define ecap_dis(e) ((e >> 27) & 0x1) 125#define ecap_dis(e) ((e >> 27) & 0x1)
125#define ecap_nest(e) ((e >> 26) & 0x1) 126#define ecap_nest(e) ((e >> 26) & 0x1)
126#define ecap_mts(e) ((e >> 25) & 0x1) 127#define ecap_mts(e) ((e >> 25) & 0x1)
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index a044a824da85..3555d54bf79a 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -2,6 +2,9 @@
2#ifndef LINUX_IOMAP_H 2#ifndef LINUX_IOMAP_H
3#define LINUX_IOMAP_H 1 3#define LINUX_IOMAP_H 1
4 4
5#include <linux/atomic.h>
6#include <linux/bitmap.h>
7#include <linux/mm.h>
5#include <linux/types.h> 8#include <linux/types.h>
6 9
7struct address_space; 10struct address_space;
@@ -9,6 +12,7 @@ struct fiemap_extent_info;
9struct inode; 12struct inode;
10struct iov_iter; 13struct iov_iter;
11struct kiocb; 14struct kiocb;
15struct page;
12struct vm_area_struct; 16struct vm_area_struct;
13struct vm_fault; 17struct vm_fault;
14 18
@@ -29,6 +33,7 @@ struct vm_fault;
29 */ 33 */
30#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ 34#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
31#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */ 35#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
36#define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */
32 37
33/* 38/*
34 * Flags that only need to be reported for IOMAP_REPORT requests: 39 * Flags that only need to be reported for IOMAP_REPORT requests:
@@ -55,6 +60,16 @@ struct iomap {
55 u16 flags; /* flags for mapping */ 60 u16 flags; /* flags for mapping */
56 struct block_device *bdev; /* block device for I/O */ 61 struct block_device *bdev; /* block device for I/O */
57 struct dax_device *dax_dev; /* dax_dev for dax operations */ 62 struct dax_device *dax_dev; /* dax_dev for dax operations */
63 void *inline_data;
64 void *private; /* filesystem private */
65
66 /*
67 * Called when finished processing a page in the mapping returned in
68 * this iomap. At least for now this is only supported in the buffered
69 * write path.
70 */
71 void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
72 struct page *page, struct iomap *iomap);
58}; 73};
59 74
60/* 75/*
@@ -86,8 +101,40 @@ struct iomap_ops {
86 ssize_t written, unsigned flags, struct iomap *iomap); 101 ssize_t written, unsigned flags, struct iomap *iomap);
87}; 102};
88 103
104/*
105 * Structure allocate for each page when block size < PAGE_SIZE to track
106 * sub-page uptodate status and I/O completions.
107 */
108struct iomap_page {
109 atomic_t read_count;
110 atomic_t write_count;
111 DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
112};
113
114static inline struct iomap_page *to_iomap_page(struct page *page)
115{
116 if (page_has_private(page))
117 return (struct iomap_page *)page_private(page);
118 return NULL;
119}
120
89ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, 121ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
90 const struct iomap_ops *ops); 122 const struct iomap_ops *ops);
123int iomap_readpage(struct page *page, const struct iomap_ops *ops);
124int iomap_readpages(struct address_space *mapping, struct list_head *pages,
125 unsigned nr_pages, const struct iomap_ops *ops);
126int iomap_set_page_dirty(struct page *page);
127int iomap_is_partially_uptodate(struct page *page, unsigned long from,
128 unsigned long count);
129int iomap_releasepage(struct page *page, gfp_t gfp_mask);
130void iomap_invalidatepage(struct page *page, unsigned int offset,
131 unsigned int len);
132#ifdef CONFIG_MIGRATION
133int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
134 struct page *page, enum migrate_mode mode);
135#else
136#define iomap_migrate_page NULL
137#endif
91int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, 138int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
92 const struct iomap_ops *ops); 139 const struct iomap_ops *ops);
93int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, 140int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 6cc2df7f7ac9..e1c9eea6015b 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -4,7 +4,7 @@
4 4
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <linux/uidgid.h> 6#include <linux/uidgid.h>
7#include <linux/rhashtable.h> 7#include <linux/rhashtable-types.h>
8#include <uapi/linux/ipc.h> 8#include <uapi/linux/ipc.h>
9#include <linux/refcount.h> 9#include <linux/refcount.h>
10 10
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index b5630c8eb2f3..6cea726612b7 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -9,7 +9,7 @@
9#include <linux/nsproxy.h> 9#include <linux/nsproxy.h>
10#include <linux/ns_common.h> 10#include <linux/ns_common.h>
11#include <linux/refcount.h> 11#include <linux/refcount.h>
12#include <linux/rhashtable.h> 12#include <linux/rhashtable-types.h>
13 13
14struct user_namespace; 14struct user_namespace;
15 15
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4bd2f34947f4..201de12a9957 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -503,6 +503,7 @@ struct irq_chip {
503 * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip 503 * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
504 * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask 504 * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
505 * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode 505 * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
506 * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
506 */ 507 */
507enum { 508enum {
508 IRQCHIP_SET_TYPE_MASKED = (1 << 0), 509 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index cbb872c1b607..9d2ea3e907d0 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -73,6 +73,7 @@
73#define GICD_TYPER_MBIS (1U << 16) 73#define GICD_TYPER_MBIS (1U << 16)
74 74
75#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) 75#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
76#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
76#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) 77#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
77 78
78#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) 79#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
@@ -576,8 +577,8 @@ struct rdists {
576 phys_addr_t phys_base; 577 phys_addr_t phys_base;
577 } __percpu *rdist; 578 } __percpu *rdist;
578 struct page *prop_page; 579 struct page *prop_page;
579 int id_bits;
580 u64 flags; 580 u64 flags;
581 u32 gicd_typer;
581 bool has_vlpis; 582 bool has_vlpis;
582 bool has_direct_lpi; 583 bool has_direct_lpi;
583}; 584};
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 25b33b664537..dd1e40ddac7d 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
145 return desc->irq_common_data.handler_data; 145 return desc->irq_common_data.handler_data;
146} 146}
147 147
148static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
149{
150 return desc->irq_common_data.msi_desc;
151}
152
153/* 148/*
154 * Architectures call this to let the generic IRQ layer 149 * Architectures call this to let the generic IRQ layer
155 * handle an interrupt. 150 * handle an interrupt.
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b46b541c67c4..1a0b6f17a5d6 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -299,12 +299,18 @@ struct static_key_false {
299#define DEFINE_STATIC_KEY_TRUE(name) \ 299#define DEFINE_STATIC_KEY_TRUE(name) \
300 struct static_key_true name = STATIC_KEY_TRUE_INIT 300 struct static_key_true name = STATIC_KEY_TRUE_INIT
301 301
302#define DEFINE_STATIC_KEY_TRUE_RO(name) \
303 struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
304
302#define DECLARE_STATIC_KEY_TRUE(name) \ 305#define DECLARE_STATIC_KEY_TRUE(name) \
303 extern struct static_key_true name 306 extern struct static_key_true name
304 307
305#define DEFINE_STATIC_KEY_FALSE(name) \ 308#define DEFINE_STATIC_KEY_FALSE(name) \
306 struct static_key_false name = STATIC_KEY_FALSE_INIT 309 struct static_key_false name = STATIC_KEY_FALSE_INIT
307 310
311#define DEFINE_STATIC_KEY_FALSE_RO(name) \
312 struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
313
308#define DECLARE_STATIC_KEY_FALSE(name) \ 314#define DECLARE_STATIC_KEY_FALSE(name) \
309 extern struct static_key_false name 315 extern struct static_key_false name
310 316
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d23123238534..941dc0a5a877 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -666,7 +666,7 @@ do { \
666 * your code. (Extra memory is used for special buffers that are 666 * your code. (Extra memory is used for special buffers that are
667 * allocated when trace_printk() is used.) 667 * allocated when trace_printk() is used.)
668 * 668 *
669 * A little optization trick is done here. If there's only one 669 * A little optimization trick is done here. If there's only one
670 * argument, there's no need to scan the string for printf formats. 670 * argument, there's no need to scan the string for printf formats.
671 * The trace_puts() will suffice. But how can we take advantage of 671 * The trace_puts() will suffice. But how can we take advantage of
672 * using trace_puts() when trace_printk() has only one argument? 672 * using trace_puts() when trace_printk() has only one argument?
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index ab25c8b6d9e3..814643f7ee52 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -15,6 +15,7 @@
15#include <linux/lockdep.h> 15#include <linux/lockdep.h>
16#include <linux/rbtree.h> 16#include <linux/rbtree.h>
17#include <linux/atomic.h> 17#include <linux/atomic.h>
18#include <linux/uidgid.h>
18#include <linux/wait.h> 19#include <linux/wait.h>
19 20
20struct file; 21struct file;
@@ -325,12 +326,14 @@ void kernfs_destroy_root(struct kernfs_root *root);
325 326
326struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, 327struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
327 const char *name, umode_t mode, 328 const char *name, umode_t mode,
329 kuid_t uid, kgid_t gid,
328 void *priv, const void *ns); 330 void *priv, const void *ns);
329struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, 331struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
330 const char *name); 332 const char *name);
331struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, 333struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
332 const char *name, 334 const char *name, umode_t mode,
333 umode_t mode, loff_t size, 335 kuid_t uid, kgid_t gid,
336 loff_t size,
334 const struct kernfs_ops *ops, 337 const struct kernfs_ops *ops,
335 void *priv, const void *ns, 338 void *priv, const void *ns,
336 struct lock_class_key *key); 339 struct lock_class_key *key);
@@ -415,12 +418,14 @@ static inline void kernfs_destroy_root(struct kernfs_root *root) { }
415 418
416static inline struct kernfs_node * 419static inline struct kernfs_node *
417kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, 420kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
418 umode_t mode, void *priv, const void *ns) 421 umode_t mode, kuid_t uid, kgid_t gid,
422 void *priv, const void *ns)
419{ return ERR_PTR(-ENOSYS); } 423{ return ERR_PTR(-ENOSYS); }
420 424
421static inline struct kernfs_node * 425static inline struct kernfs_node *
422__kernfs_create_file(struct kernfs_node *parent, const char *name, 426__kernfs_create_file(struct kernfs_node *parent, const char *name,
423 umode_t mode, loff_t size, const struct kernfs_ops *ops, 427 umode_t mode, kuid_t uid, kgid_t gid,
428 loff_t size, const struct kernfs_ops *ops,
424 void *priv, const void *ns, struct lock_class_key *key) 429 void *priv, const void *ns, struct lock_class_key *key)
425{ return ERR_PTR(-ENOSYS); } 430{ return ERR_PTR(-ENOSYS); }
426 431
@@ -498,12 +503,15 @@ static inline struct kernfs_node *
498kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode, 503kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
499 void *priv) 504 void *priv)
500{ 505{
501 return kernfs_create_dir_ns(parent, name, mode, priv, NULL); 506 return kernfs_create_dir_ns(parent, name, mode,
507 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
508 priv, NULL);
502} 509}
503 510
504static inline struct kernfs_node * 511static inline struct kernfs_node *
505kernfs_create_file_ns(struct kernfs_node *parent, const char *name, 512kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
506 umode_t mode, loff_t size, const struct kernfs_ops *ops, 513 umode_t mode, kuid_t uid, kgid_t gid,
514 loff_t size, const struct kernfs_ops *ops,
507 void *priv, const void *ns) 515 void *priv, const void *ns)
508{ 516{
509 struct lock_class_key *key = NULL; 517 struct lock_class_key *key = NULL;
@@ -511,15 +519,17 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
511#ifdef CONFIG_DEBUG_LOCK_ALLOC 519#ifdef CONFIG_DEBUG_LOCK_ALLOC
512 key = (struct lock_class_key *)&ops->lockdep_key; 520 key = (struct lock_class_key *)&ops->lockdep_key;
513#endif 521#endif
514 return __kernfs_create_file(parent, name, mode, size, ops, priv, ns, 522 return __kernfs_create_file(parent, name, mode, uid, gid,
515 key); 523 size, ops, priv, ns, key);
516} 524}
517 525
518static inline struct kernfs_node * 526static inline struct kernfs_node *
519kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode, 527kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
520 loff_t size, const struct kernfs_ops *ops, void *priv) 528 loff_t size, const struct kernfs_ops *ops, void *priv)
521{ 529{
522 return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL); 530 return kernfs_create_file_ns(parent, name, mode,
531 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
532 size, ops, priv, NULL);
523} 533}
524 534
525static inline int kernfs_remove_by_name(struct kernfs_node *parent, 535static inline int kernfs_remove_by_name(struct kernfs_node *parent,
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 7f6f93c3df9c..b49ff230beba 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -26,6 +26,7 @@
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <linux/atomic.h> 27#include <linux/atomic.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/uidgid.h>
29 30
30#define UEVENT_HELPER_PATH_LEN 256 31#define UEVENT_HELPER_PATH_LEN 256
31#define UEVENT_NUM_ENVP 32 /* number of env pointers */ 32#define UEVENT_NUM_ENVP 32 /* number of env pointers */
@@ -114,6 +115,8 @@ extern struct kobject * __must_check kobject_get_unless_zero(
114extern void kobject_put(struct kobject *kobj); 115extern void kobject_put(struct kobject *kobj);
115 116
116extern const void *kobject_namespace(struct kobject *kobj); 117extern const void *kobject_namespace(struct kobject *kobj);
118extern void kobject_get_ownership(struct kobject *kobj,
119 kuid_t *uid, kgid_t *gid);
117extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); 120extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
118 121
119struct kobj_type { 122struct kobj_type {
@@ -122,6 +125,7 @@ struct kobj_type {
122 struct attribute **default_attrs; 125 struct attribute **default_attrs;
123 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); 126 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
124 const void *(*namespace)(struct kobject *kobj); 127 const void *(*namespace)(struct kobject *kobj);
128 void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid);
125}; 129};
126 130
127struct kobj_uevent_env { 131struct kobj_uevent_env {
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 9440a2fc8893..e909413e4e38 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -63,7 +63,6 @@ struct pt_regs;
63struct kretprobe; 63struct kretprobe;
64struct kretprobe_instance; 64struct kretprobe_instance;
65typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); 65typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
66typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
67typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, 66typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
68 unsigned long flags); 67 unsigned long flags);
69typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, 68typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
@@ -101,12 +100,6 @@ struct kprobe {
101 */ 100 */
102 kprobe_fault_handler_t fault_handler; 101 kprobe_fault_handler_t fault_handler;
103 102
104 /*
105 * ... called if breakpoint trap occurs in probe handler.
106 * Return 1 if it handled break, otherwise kernel will see it.
107 */
108 kprobe_break_handler_t break_handler;
109
110 /* Saved opcode (which has been replaced with breakpoint) */ 103 /* Saved opcode (which has been replaced with breakpoint) */
111 kprobe_opcode_t opcode; 104 kprobe_opcode_t opcode;
112 105
@@ -155,24 +148,6 @@ static inline int kprobe_ftrace(struct kprobe *p)
155} 148}
156 149
157/* 150/*
158 * Special probe type that uses setjmp-longjmp type tricks to resume
159 * execution at a specified entry with a matching prototype corresponding
160 * to the probed function - a trick to enable arguments to become
161 * accessible seamlessly by probe handling logic.
162 * Note:
163 * Because of the way compilers allocate stack space for local variables
164 * etc upfront, regardless of sub-scopes within a function, this mirroring
165 * principle currently works only for probes placed on function entry points.
166 */
167struct jprobe {
168 struct kprobe kp;
169 void *entry; /* probe handling code to jump to */
170};
171
172/* For backward compatibility with old code using JPROBE_ENTRY() */
173#define JPROBE_ENTRY(handler) (handler)
174
175/*
176 * Function-return probe - 151 * Function-return probe -
177 * Note: 152 * Note:
178 * User needs to provide a handler function, and initialize maxactive. 153 * User needs to provide a handler function, and initialize maxactive.
@@ -389,9 +364,6 @@ int register_kprobe(struct kprobe *p);
389void unregister_kprobe(struct kprobe *p); 364void unregister_kprobe(struct kprobe *p);
390int register_kprobes(struct kprobe **kps, int num); 365int register_kprobes(struct kprobe **kps, int num);
391void unregister_kprobes(struct kprobe **kps, int num); 366void unregister_kprobes(struct kprobe **kps, int num);
392int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
393int longjmp_break_handler(struct kprobe *, struct pt_regs *);
394void jprobe_return(void);
395unsigned long arch_deref_entry_point(void *); 367unsigned long arch_deref_entry_point(void *);
396 368
397int register_kretprobe(struct kretprobe *rp); 369int register_kretprobe(struct kretprobe *rp);
@@ -439,9 +411,6 @@ static inline void unregister_kprobe(struct kprobe *p)
439static inline void unregister_kprobes(struct kprobe **kps, int num) 411static inline void unregister_kprobes(struct kprobe **kps, int num)
440{ 412{
441} 413}
442static inline void jprobe_return(void)
443{
444}
445static inline int register_kretprobe(struct kretprobe *rp) 414static inline int register_kretprobe(struct kretprobe *rp)
446{ 415{
447 return -ENOSYS; 416 return -ENOSYS;
@@ -468,20 +437,6 @@ static inline int enable_kprobe(struct kprobe *kp)
468 return -ENOSYS; 437 return -ENOSYS;
469} 438}
470#endif /* CONFIG_KPROBES */ 439#endif /* CONFIG_KPROBES */
471static inline int register_jprobe(struct jprobe *p)
472{
473 return -ENOSYS;
474}
475static inline int register_jprobes(struct jprobe **jps, int num)
476{
477 return -ENOSYS;
478}
479static inline void unregister_jprobe(struct jprobe *p)
480{
481}
482static inline void unregister_jprobes(struct jprobe **jps, int num)
483{
484}
485static inline int disable_kretprobe(struct kretprobe *rp) 440static inline int disable_kretprobe(struct kretprobe *rp)
486{ 441{
487 return disable_kprobe(&rp->kp); 442 return disable_kprobe(&rp->kp);
@@ -490,14 +445,6 @@ static inline int enable_kretprobe(struct kretprobe *rp)
490{ 445{
491 return enable_kprobe(&rp->kp); 446 return enable_kprobe(&rp->kp);
492} 447}
493static inline int disable_jprobe(struct jprobe *jp)
494{
495 return -ENOSYS;
496}
497static inline int enable_jprobe(struct jprobe *jp)
498{
499 return -ENOSYS;
500}
501 448
502#ifndef CONFIG_KPROBES 449#ifndef CONFIG_KPROBES
503static inline bool is_kprobe_insn_slot(unsigned long addr) 450static inline bool is_kprobe_insn_slot(unsigned long addr)
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 2803264c512f..c1961761311d 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k);
62int kthread_park(struct task_struct *k); 62int kthread_park(struct task_struct *k);
63void kthread_unpark(struct task_struct *k); 63void kthread_unpark(struct task_struct *k);
64void kthread_parkme(void); 64void kthread_parkme(void);
65void kthread_park_complete(struct task_struct *k);
66 65
67int kthreadd(void *unused); 66int kthreadd(void *unused);
68extern struct task_struct *kthreadd_task; 67extern struct task_struct *kthreadd_task;
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 5b9fddbaac41..b2bb44f87f5a 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -93,8 +93,11 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
93/* Map the ktime_t to timeval conversion to ns_to_timeval function */ 93/* Map the ktime_t to timeval conversion to ns_to_timeval function */
94#define ktime_to_timeval(kt) ns_to_timeval((kt)) 94#define ktime_to_timeval(kt) ns_to_timeval((kt))
95 95
96/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ 96/* Convert ktime_t to nanoseconds */
97#define ktime_to_ns(kt) (kt) 97static inline s64 ktime_to_ns(const ktime_t kt)
98{
99 return kt;
100}
98 101
99/** 102/**
100 * ktime_compare - Compares two ktime_t variables for less, greater or equal 103 * ktime_compare - Compares two ktime_t variables for less, greater or equal
diff --git a/include/linux/leds.h b/include/linux/leds.h
index b7e82550e655..834683d603f9 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -253,7 +253,7 @@ static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
253struct led_trigger { 253struct led_trigger {
254 /* Trigger Properties */ 254 /* Trigger Properties */
255 const char *name; 255 const char *name;
256 void (*activate)(struct led_classdev *led_cdev); 256 int (*activate)(struct led_classdev *led_cdev);
257 void (*deactivate)(struct led_classdev *led_cdev); 257 void (*deactivate)(struct led_classdev *led_cdev);
258 258
259 /* LEDs under control by this trigger (for simple triggers) */ 259 /* LEDs under control by this trigger (for simple triggers) */
@@ -262,8 +262,19 @@ struct led_trigger {
262 262
263 /* Link to next registered trigger */ 263 /* Link to next registered trigger */
264 struct list_head next_trig; 264 struct list_head next_trig;
265
266 const struct attribute_group **groups;
265}; 267};
266 268
269/*
270 * Currently the attributes in struct led_trigger::groups are added directly to
271 * the LED device. As this might change in the future, the following
272 * macros abstract getting the LED device and its trigger_data from the dev
273 * parameter passed to the attribute accessor functions.
274 */
275#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev)))
276#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev)))
277
267ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, 278ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
268 const char *buf, size_t count); 279 const char *buf, size_t count);
269ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, 280ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
@@ -288,10 +299,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
288 unsigned long *delay_off, 299 unsigned long *delay_off,
289 int invert); 300 int invert);
290extern void led_trigger_set_default(struct led_classdev *led_cdev); 301extern void led_trigger_set_default(struct led_classdev *led_cdev);
291extern void led_trigger_set(struct led_classdev *led_cdev, 302extern int led_trigger_set(struct led_classdev *led_cdev,
292 struct led_trigger *trigger); 303 struct led_trigger *trigger);
293extern void led_trigger_remove(struct led_classdev *led_cdev); 304extern void led_trigger_remove(struct led_classdev *led_cdev);
294 305
306static inline void led_set_trigger_data(struct led_classdev *led_cdev,
307 void *trigger_data)
308{
309 led_cdev->trigger_data = trigger_data;
310}
311
295static inline void *led_get_trigger_data(struct led_classdev *led_cdev) 312static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
296{ 313{
297 return led_cdev->trigger_data; 314 return led_cdev->trigger_data;
@@ -315,6 +332,10 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
315extern void led_trigger_rename_static(const char *name, 332extern void led_trigger_rename_static(const char *name,
316 struct led_trigger *trig); 333 struct led_trigger *trig);
317 334
335#define module_led_trigger(__led_trigger) \
336 module_driver(__led_trigger, led_trigger_register, \
337 led_trigger_unregister)
338
318#else 339#else
319 340
320/* Trigger has no members */ 341/* Trigger has no members */
@@ -334,9 +355,14 @@ static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
334 unsigned long *delay_off, 355 unsigned long *delay_off,
335 int invert) {} 356 int invert) {}
336static inline void led_trigger_set_default(struct led_classdev *led_cdev) {} 357static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
337static inline void led_trigger_set(struct led_classdev *led_cdev, 358static inline int led_trigger_set(struct led_classdev *led_cdev,
338 struct led_trigger *trigger) {} 359 struct led_trigger *trigger)
360{
361 return 0;
362}
363
339static inline void led_trigger_remove(struct led_classdev *led_cdev) {} 364static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
365static inline void led_set_trigger_data(struct led_classdev *led_cdev) {}
340static inline void *led_get_trigger_data(struct led_classdev *led_cdev) 366static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
341{ 367{
342 return NULL; 368 return NULL;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 8b8946dd63b9..bc4f87cbe7f4 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -210,6 +210,7 @@ enum {
210 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 210 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
211 /* (doesn't imply presence) */ 211 /* (doesn't imply presence) */
212 ATA_FLAG_SATA = (1 << 1), 212 ATA_FLAG_SATA = (1 << 1),
213 ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
213 ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */ 214 ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
214 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ 215 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
215 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ 216 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
@@ -1110,6 +1111,8 @@ extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
1110extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, 1111extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
1111 const struct ata_port_info * const * ppi, int n_ports); 1112 const struct ata_port_info * const * ppi, int n_ports);
1112extern int ata_slave_link_init(struct ata_port *ap); 1113extern int ata_slave_link_init(struct ata_port *ap);
1114extern void ata_host_get(struct ata_host *host);
1115extern void ata_host_put(struct ata_host *host);
1113extern int ata_host_start(struct ata_host *host); 1116extern int ata_host_start(struct ata_host *host);
1114extern int ata_host_register(struct ata_host *host, 1117extern int ata_host_register(struct ata_host *host,
1115 struct scsi_host_template *sht); 1118 struct scsi_host_template *sht);
@@ -1495,6 +1498,29 @@ static inline bool ata_tag_valid(unsigned int tag)
1495 return tag < ATA_MAX_QUEUE || ata_tag_internal(tag); 1498 return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
1496} 1499}
1497 1500
1501#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \
1502 for ((tag) = 0; (tag) < (max_tag) && \
1503 ({ qc = fn((ap), (tag)); 1; }); (tag)++) \
1504
1505/*
1506 * Internal use only, iterate commands ignoring error handling and
1507 * status of 'qc'.
1508 */
1509#define ata_qc_for_each_raw(ap, qc, tag) \
1510 __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag)
1511
1512/*
1513 * Iterate all potential commands that can be queued
1514 */
1515#define ata_qc_for_each(ap, qc, tag) \
1516 __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag)
1517
1518/*
1519 * Like ata_qc_for_each, but with the internal tag included
1520 */
1521#define ata_qc_for_each_with_internal(ap, qc, tag) \
1522 __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag)
1523
1498/* 1524/*
1499 * device helpers 1525 * device helpers
1500 */ 1526 */
diff --git a/include/linux/list.h b/include/linux/list.h
index 4b129df4d46b..de04cc5ed536 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list,
285 __list_cut_position(list, head, entry); 285 __list_cut_position(list, head, entry);
286} 286}
287 287
288/**
289 * list_cut_before - cut a list into two, before given entry
290 * @list: a new list to add all removed entries
291 * @head: a list with entries
292 * @entry: an entry within head, could be the head itself
293 *
294 * This helper moves the initial part of @head, up to but
295 * excluding @entry, from @head to @list. You should pass
296 * in @entry an element you know is on @head. @list should
297 * be an empty list or a list you do not care about losing
298 * its data.
299 * If @entry == @head, all entries on @head are moved to
300 * @list.
301 */
302static inline void list_cut_before(struct list_head *list,
303 struct list_head *head,
304 struct list_head *entry)
305{
306 if (head->next == entry) {
307 INIT_LIST_HEAD(list);
308 return;
309 }
310 list->next = head->next;
311 list->next->prev = list;
312 list->prev = entry->prev;
313 list->prev->next = list;
314 head->next = entry;
315 entry->prev = head;
316}
317
288static inline void __list_splice(const struct list_head *list, 318static inline void __list_splice(const struct list_head *list,
289 struct list_head *prev, 319 struct list_head *prev,
290 struct list_head *next) 320 struct list_head *next)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 8f1131c8dd54..97a020c616ad 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -576,6 +576,10 @@
576 * userspace to load a kernel module with the given name. 576 * userspace to load a kernel module with the given name.
577 * @kmod_name name of the module requested by the kernel 577 * @kmod_name name of the module requested by the kernel
578 * Return 0 if successful. 578 * Return 0 if successful.
579 * @kernel_load_data:
580 * Load data provided by userspace.
581 * @id kernel load data identifier
582 * Return 0 if permission is granted.
579 * @kernel_read_file: 583 * @kernel_read_file:
580 * Read a file specified by userspace. 584 * Read a file specified by userspace.
581 * @file contains the file structure pointing to the file being read 585 * @file contains the file structure pointing to the file being read
@@ -1569,7 +1573,7 @@ union security_list_options {
1569 int (*file_send_sigiotask)(struct task_struct *tsk, 1573 int (*file_send_sigiotask)(struct task_struct *tsk,
1570 struct fown_struct *fown, int sig); 1574 struct fown_struct *fown, int sig);
1571 int (*file_receive)(struct file *file); 1575 int (*file_receive)(struct file *file);
1572 int (*file_open)(struct file *file, const struct cred *cred); 1576 int (*file_open)(struct file *file);
1573 1577
1574 int (*task_alloc)(struct task_struct *task, unsigned long clone_flags); 1578 int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
1575 void (*task_free)(struct task_struct *task); 1579 void (*task_free)(struct task_struct *task);
@@ -1582,6 +1586,7 @@ union security_list_options {
1582 int (*kernel_act_as)(struct cred *new, u32 secid); 1586 int (*kernel_act_as)(struct cred *new, u32 secid);
1583 int (*kernel_create_files_as)(struct cred *new, struct inode *inode); 1587 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1584 int (*kernel_module_request)(char *kmod_name); 1588 int (*kernel_module_request)(char *kmod_name);
1589 int (*kernel_load_data)(enum kernel_load_data_id id);
1585 int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id); 1590 int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
1586 int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size, 1591 int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
1587 enum kernel_read_file_id id); 1592 enum kernel_read_file_id id);
@@ -1872,6 +1877,7 @@ struct security_hook_heads {
1872 struct hlist_head cred_getsecid; 1877 struct hlist_head cred_getsecid;
1873 struct hlist_head kernel_act_as; 1878 struct hlist_head kernel_act_as;
1874 struct hlist_head kernel_create_files_as; 1879 struct hlist_head kernel_create_files_as;
1880 struct hlist_head kernel_load_data;
1875 struct hlist_head kernel_read_file; 1881 struct hlist_head kernel_read_file;
1876 struct hlist_head kernel_post_read_file; 1882 struct hlist_head kernel_post_read_file;
1877 struct hlist_head kernel_module_request; 1883 struct hlist_head kernel_module_request;
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 4f5f8c21e283..1eb6f244588d 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -27,6 +27,8 @@
27 */ 27 */
28#define MARVELL_PHY_ID_88E6390 0x01410f90 28#define MARVELL_PHY_ID_88E6390 0x01410f90
29 29
30#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
31
30/* struct phy_device dev_flags definitions */ 32/* struct phy_device dev_flags definitions */
31#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 33#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
32#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 34#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ca59883c8364..516920549378 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -20,31 +20,60 @@
20#define INIT_MEMBLOCK_REGIONS 128 20#define INIT_MEMBLOCK_REGIONS 128
21#define INIT_PHYSMEM_REGIONS 4 21#define INIT_PHYSMEM_REGIONS 4
22 22
23/* Definition of memblock flags. */ 23/**
24enum { 24 * enum memblock_flags - definition of memory region attributes
25 * @MEMBLOCK_NONE: no special request
26 * @MEMBLOCK_HOTPLUG: hotpluggable region
27 * @MEMBLOCK_MIRROR: mirrored region
28 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
29 */
30enum memblock_flags {
25 MEMBLOCK_NONE = 0x0, /* No special request */ 31 MEMBLOCK_NONE = 0x0, /* No special request */
26 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ 32 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
27 MEMBLOCK_MIRROR = 0x2, /* mirrored region */ 33 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
28 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ 34 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
29}; 35};
30 36
37/**
38 * struct memblock_region - represents a memory region
39 * @base: physical address of the region
40 * @size: size of the region
41 * @flags: memory region attributes
42 * @nid: NUMA node id
43 */
31struct memblock_region { 44struct memblock_region {
32 phys_addr_t base; 45 phys_addr_t base;
33 phys_addr_t size; 46 phys_addr_t size;
34 unsigned long flags; 47 enum memblock_flags flags;
35#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 48#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
36 int nid; 49 int nid;
37#endif 50#endif
38}; 51};
39 52
53/**
54 * struct memblock_type - collection of memory regions of certain type
55 * @cnt: number of regions
56 * @max: size of the allocated array
57 * @total_size: size of all regions
58 * @regions: array of regions
59 * @name: the memory type symbolic name
60 */
40struct memblock_type { 61struct memblock_type {
41 unsigned long cnt; /* number of regions */ 62 unsigned long cnt;
42 unsigned long max; /* size of the allocated array */ 63 unsigned long max;
43 phys_addr_t total_size; /* size of all regions */ 64 phys_addr_t total_size;
44 struct memblock_region *regions; 65 struct memblock_region *regions;
45 char *name; 66 char *name;
46}; 67};
47 68
69/**
70 * struct memblock - memblock allocator metadata
71 * @bottom_up: is bottom up direction?
72 * @current_limit: physical address of the current allocation limit
73 * @memory: usabe memory regions
74 * @reserved: reserved memory regions
75 * @physmem: all physical memory
76 */
48struct memblock { 77struct memblock {
49 bool bottom_up; /* is bottom up direction? */ 78 bool bottom_up; /* is bottom up direction? */
50 phys_addr_t current_limit; 79 phys_addr_t current_limit;
@@ -72,7 +101,7 @@ void memblock_discard(void);
72 101
73phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, 102phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
74 phys_addr_t start, phys_addr_t end, 103 phys_addr_t start, phys_addr_t end,
75 int nid, ulong flags); 104 int nid, enum memblock_flags flags);
76phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, 105phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
77 phys_addr_t size, phys_addr_t align); 106 phys_addr_t size, phys_addr_t align);
78void memblock_allow_resize(void); 107void memblock_allow_resize(void);
@@ -89,19 +118,19 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
89int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); 118int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
90int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); 119int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
91int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); 120int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
92ulong choose_memblock_flags(void); 121enum memblock_flags choose_memblock_flags(void);
93 122
94/* Low level functions */ 123/* Low level functions */
95int memblock_add_range(struct memblock_type *type, 124int memblock_add_range(struct memblock_type *type,
96 phys_addr_t base, phys_addr_t size, 125 phys_addr_t base, phys_addr_t size,
97 int nid, unsigned long flags); 126 int nid, enum memblock_flags flags);
98 127
99void __next_mem_range(u64 *idx, int nid, ulong flags, 128void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
100 struct memblock_type *type_a, 129 struct memblock_type *type_a,
101 struct memblock_type *type_b, phys_addr_t *out_start, 130 struct memblock_type *type_b, phys_addr_t *out_start,
102 phys_addr_t *out_end, int *out_nid); 131 phys_addr_t *out_end, int *out_nid);
103 132
104void __next_mem_range_rev(u64 *idx, int nid, ulong flags, 133void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
105 struct memblock_type *type_a, 134 struct memblock_type *type_a,
106 struct memblock_type *type_b, phys_addr_t *out_start, 135 struct memblock_type *type_b, phys_addr_t *out_start,
107 phys_addr_t *out_end, int *out_nid); 136 phys_addr_t *out_end, int *out_nid);
@@ -239,7 +268,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
239/** 268/**
240 * for_each_resv_unavail_range - iterate through reserved and unavailable memory 269 * for_each_resv_unavail_range - iterate through reserved and unavailable memory
241 * @i: u64 used as loop variable 270 * @i: u64 used as loop variable
242 * @flags: pick from blocks based on memory attributes
243 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 271 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
244 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 272 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
245 * 273 *
@@ -253,13 +281,13 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
253 NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL) 281 NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
254 282
255static inline void memblock_set_region_flags(struct memblock_region *r, 283static inline void memblock_set_region_flags(struct memblock_region *r,
256 unsigned long flags) 284 enum memblock_flags flags)
257{ 285{
258 r->flags |= flags; 286 r->flags |= flags;
259} 287}
260 288
261static inline void memblock_clear_region_flags(struct memblock_region *r, 289static inline void memblock_clear_region_flags(struct memblock_region *r,
262 unsigned long flags) 290 enum memblock_flags flags)
263{ 291{
264 r->flags &= ~flags; 292 r->flags &= ~flags;
265} 293}
@@ -317,10 +345,10 @@ static inline bool memblock_bottom_up(void)
317 345
318phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 346phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
319 phys_addr_t start, phys_addr_t end, 347 phys_addr_t start, phys_addr_t end,
320 ulong flags); 348 enum memblock_flags flags);
321phys_addr_t memblock_alloc_base_nid(phys_addr_t size, 349phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
322 phys_addr_t align, phys_addr_t max_addr, 350 phys_addr_t align, phys_addr_t max_addr,
323 int nid, ulong flags); 351 int nid, enum memblock_flags flags);
324phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, 352phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
325 phys_addr_t max_addr); 353 phys_addr_t max_addr);
326phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, 354phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
@@ -367,8 +395,10 @@ phys_addr_t memblock_get_current_limit(void);
367 */ 395 */
368 396
369/** 397/**
370 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region 398 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
371 * @reg: memblock_region structure 399 * @reg: memblock_region structure
400 *
401 * Return: the lowest pfn intersecting with the memory region
372 */ 402 */
373static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) 403static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
374{ 404{
@@ -376,8 +406,10 @@ static inline unsigned long memblock_region_memory_base_pfn(const struct membloc
376} 406}
377 407
378/** 408/**
379 * memblock_region_memory_end_pfn - Return the end_pfn this region 409 * memblock_region_memory_end_pfn - get the end pfn of the memory region
380 * @reg: memblock_region structure 410 * @reg: memblock_region structure
411 *
412 * Return: the end_pfn of the reserved region
381 */ 413 */
382static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) 414static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
383{ 415{
@@ -385,8 +417,10 @@ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock
385} 417}
386 418
387/** 419/**
388 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region 420 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
389 * @reg: memblock_region structure 421 * @reg: memblock_region structure
422 *
423 * Return: the lowest pfn intersecting with the reserved region
390 */ 424 */
391static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) 425static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
392{ 426{
@@ -394,8 +428,10 @@ static inline unsigned long memblock_region_reserved_base_pfn(const struct membl
394} 428}
395 429
396/** 430/**
397 * memblock_region_reserved_end_pfn - Return the end_pfn this region 431 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
398 * @reg: memblock_region structure 432 * @reg: memblock_region structure
433 *
434 * Return: the end_pfn of the reserved region
399 */ 435 */
400static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) 436static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
401{ 437{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c6fb116e925..680d3395fc83 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -317,6 +317,9 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
317int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 317int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
318 gfp_t gfp_mask, struct mem_cgroup **memcgp, 318 gfp_t gfp_mask, struct mem_cgroup **memcgp,
319 bool compound); 319 bool compound);
320int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
321 gfp_t gfp_mask, struct mem_cgroup **memcgp,
322 bool compound);
320void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 323void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
321 bool lrucare, bool compound); 324 bool lrucare, bool compound);
322void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 325void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
@@ -789,6 +792,16 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
789 return 0; 792 return 0;
790} 793}
791 794
795static inline int mem_cgroup_try_charge_delay(struct page *page,
796 struct mm_struct *mm,
797 gfp_t gfp_mask,
798 struct mem_cgroup **memcgp,
799 bool compound)
800{
801 *memcgp = NULL;
802 return 0;
803}
804
792static inline void mem_cgroup_commit_charge(struct page *page, 805static inline void mem_cgroup_commit_charge(struct page *page,
793 struct mem_cgroup *memcg, 806 struct mem_cgroup *memcg,
794 bool lrucare, bool compound) 807 bool lrucare, bool compound)
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 31ca3e28b0eb..a6ddefc60517 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -38,6 +38,7 @@ struct memory_block {
38 38
39int arch_get_memory_phys_device(unsigned long start_pfn); 39int arch_get_memory_phys_device(unsigned long start_pfn);
40unsigned long memory_block_size_bytes(void); 40unsigned long memory_block_size_bytes(void);
41int set_memory_block_size_order(unsigned int order);
41 42
42/* These states are exposed to userspace as text strings in sysfs */ 43/* These states are exposed to userspace as text strings in sysfs */
43#define MEM_ONLINE (1<<0) /* exposed to userspace */ 44#define MEM_ONLINE (1<<0) /* exposed to userspace */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 122e7e9d3091..dca6ab4eaa99 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -630,6 +630,7 @@ struct mlx4_caps {
630 u32 vf_caps; 630 u32 vf_caps;
631 bool wol_port[MLX4_MAX_PORTS + 1]; 631 bool wol_port[MLX4_MAX_PORTS + 1];
632 struct mlx4_rate_limit_caps rl_caps; 632 struct mlx4_rate_limit_caps rl_caps;
633 u32 health_buffer_addrs;
633}; 634};
634 635
635struct mlx4_buf_list { 636struct mlx4_buf_list {
@@ -851,6 +852,12 @@ struct mlx4_vf_dev {
851 u8 n_ports; 852 u8 n_ports;
852}; 853};
853 854
855struct mlx4_fw_crdump {
856 bool snapshot_enable;
857 struct devlink_region *region_crspace;
858 struct devlink_region *region_fw_health;
859};
860
854enum mlx4_pci_status { 861enum mlx4_pci_status {
855 MLX4_PCI_STATUS_DISABLED, 862 MLX4_PCI_STATUS_DISABLED,
856 MLX4_PCI_STATUS_ENABLED, 863 MLX4_PCI_STATUS_ENABLED,
@@ -871,6 +878,7 @@ struct mlx4_dev_persistent {
871 u8 interface_state; 878 u8 interface_state;
872 struct mutex pci_status_mutex; /* sync pci state */ 879 struct mutex pci_status_mutex; /* sync pci state */
873 enum mlx4_pci_status pci_status; 880 enum mlx4_pci_status pci_status;
881 struct mlx4_fw_crdump crdump;
874}; 882};
875 883
876struct mlx4_dev { 884struct mlx4_dev {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 02f72ebf31a7..11fa4e66afc5 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -332,6 +332,13 @@ enum mlx5_event {
332 332
333 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, 333 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
334 MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, 334 MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
335
336 MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
337};
338
339enum {
340 MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
341 MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
335}; 342};
336 343
337enum { 344enum {
@@ -750,7 +757,7 @@ enum {
750 757
751#define MLX5_MINI_CQE_ARRAY_SIZE 8 758#define MLX5_MINI_CQE_ARRAY_SIZE 8
752 759
753static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) 760static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
754{ 761{
755 return (cqe->op_own >> 2) & 0x3; 762 return (cqe->op_own >> 2) & 0x3;
756} 763}
@@ -770,14 +777,14 @@ static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
770 return (cqe->l4_l3_hdr_type >> 2) & 0x3; 777 return (cqe->l4_l3_hdr_type >> 2) & 0x3;
771} 778}
772 779
773static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe) 780static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
774{ 781{
775 return cqe->outer_l3_tunneled & 0x1; 782 return cqe->outer_l3_tunneled & 0x1;
776} 783}
777 784
778static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) 785static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
779{ 786{
780 return !!(cqe->l4_l3_hdr_type & 0x1); 787 return cqe->l4_l3_hdr_type & 0x1;
781} 788}
782 789
783static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) 790static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
@@ -939,9 +946,9 @@ enum {
939}; 946};
940 947
941enum { 948enum {
942 MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0, 949 MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
943 MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1, 950 MLX5_VPORT_ADMIN_STATE_UP = 0x1,
944 MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2, 951 MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
945}; 952};
946 953
947enum { 954enum {
@@ -1071,6 +1078,9 @@ enum mlx5_qcam_feature_groups {
1071#define MLX5_CAP_GEN(mdev, cap) \ 1078#define MLX5_CAP_GEN(mdev, cap) \
1072 MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) 1079 MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
1073 1080
1081#define MLX5_CAP_GEN_64(mdev, cap) \
1082 MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
1083
1074#define MLX5_CAP_GEN_MAX(mdev, cap) \ 1084#define MLX5_CAP_GEN_MAX(mdev, cap) \
1075 MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) 1085 MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
1076 1086
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 80cbb7fdce4a..54f385cc8811 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -138,9 +138,14 @@ enum {
138 MLX5_REG_HOST_ENDIANNESS = 0x7004, 138 MLX5_REG_HOST_ENDIANNESS = 0x7004,
139 MLX5_REG_MCIA = 0x9014, 139 MLX5_REG_MCIA = 0x9014,
140 MLX5_REG_MLCR = 0x902b, 140 MLX5_REG_MLCR = 0x902b,
141 MLX5_REG_MTRC_CAP = 0x9040,
142 MLX5_REG_MTRC_CONF = 0x9041,
143 MLX5_REG_MTRC_STDB = 0x9042,
144 MLX5_REG_MTRC_CTRL = 0x9043,
141 MLX5_REG_MPCNT = 0x9051, 145 MLX5_REG_MPCNT = 0x9051,
142 MLX5_REG_MTPPS = 0x9053, 146 MLX5_REG_MTPPS = 0x9053,
143 MLX5_REG_MTPPSE = 0x9054, 147 MLX5_REG_MTPPSE = 0x9054,
148 MLX5_REG_MPEGC = 0x9056,
144 MLX5_REG_MCQI = 0x9061, 149 MLX5_REG_MCQI = 0x9061,
145 MLX5_REG_MCC = 0x9062, 150 MLX5_REG_MCC = 0x9062,
146 MLX5_REG_MCDA = 0x9063, 151 MLX5_REG_MCDA = 0x9063,
@@ -358,6 +363,7 @@ struct mlx5_frag_buf_ctrl {
358 struct mlx5_frag_buf frag_buf; 363 struct mlx5_frag_buf frag_buf;
359 u32 sz_m1; 364 u32 sz_m1;
360 u32 frag_sz_m1; 365 u32 frag_sz_m1;
366 u32 strides_offset;
361 u8 log_sz; 367 u8 log_sz;
362 u8 log_stride; 368 u8 log_stride;
363 u8 log_frag_strides; 369 u8 log_frag_strides;
@@ -811,6 +817,9 @@ struct mlx5_clock {
811 struct mlx5_pps pps_info; 817 struct mlx5_pps pps_info;
812}; 818};
813 819
820struct mlx5_fw_tracer;
821struct mlx5_vxlan;
822
814struct mlx5_core_dev { 823struct mlx5_core_dev {
815 struct pci_dev *pdev; 824 struct pci_dev *pdev;
816 /* sync pci state */ 825 /* sync pci state */
@@ -842,6 +851,7 @@ struct mlx5_core_dev {
842 atomic_t num_qps; 851 atomic_t num_qps;
843 u32 issi; 852 u32 issi;
844 struct mlx5e_resources mlx5e_res; 853 struct mlx5e_resources mlx5e_res;
854 struct mlx5_vxlan *vxlan;
845 struct { 855 struct {
846 struct mlx5_rsvd_gids reserved_gids; 856 struct mlx5_rsvd_gids reserved_gids;
847 u32 roce_en; 857 u32 roce_en;
@@ -855,6 +865,7 @@ struct mlx5_core_dev {
855 struct mlx5_clock clock; 865 struct mlx5_clock clock;
856 struct mlx5_ib_clock_info *clock_info; 866 struct mlx5_ib_clock_info *clock_info;
857 struct page *clock_info_page; 867 struct page *clock_info_page;
868 struct mlx5_fw_tracer *tracer;
858}; 869};
859 870
860struct mlx5_db { 871struct mlx5_db {
@@ -983,14 +994,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
983 return key & 0xffffff00u; 994 return key & 0xffffff00u;
984} 995}
985 996
986static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, 997static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
987 struct mlx5_frag_buf_ctrl *fbc) 998 u32 strides_offset,
999 struct mlx5_frag_buf_ctrl *fbc)
988{ 1000{
989 fbc->log_stride = log_stride; 1001 fbc->log_stride = log_stride;
990 fbc->log_sz = log_sz; 1002 fbc->log_sz = log_sz;
991 fbc->sz_m1 = (1 << fbc->log_sz) - 1; 1003 fbc->sz_m1 = (1 << fbc->log_sz) - 1;
992 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; 1004 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
993 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; 1005 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
1006 fbc->strides_offset = strides_offset;
1007}
1008
1009static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
1010 struct mlx5_frag_buf_ctrl *fbc)
1011{
1012 mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
994} 1013}
995 1014
996static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, 1015static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
@@ -1004,7 +1023,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
1004static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, 1023static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
1005 u32 ix) 1024 u32 ix)
1006{ 1025{
1007 unsigned int frag = (ix >> fbc->log_frag_strides); 1026 unsigned int frag;
1027
1028 ix += fbc->strides_offset;
1029 frag = ix >> fbc->log_frag_strides;
1008 1030
1009 return fbc->frag_buf.frags[frag].buf + 1031 return fbc->frag_buf.frags[frag].buf +
1010 ((fbc->frag_sz_m1 & ix) << fbc->log_stride); 1032 ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
@@ -1067,8 +1089,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
1067 struct mlx5_core_mkey *mkey); 1089 struct mlx5_core_mkey *mkey);
1068int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, 1090int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
1069 u32 *out, int outlen); 1091 u32 *out, int outlen);
1070int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
1071 u32 *mkey);
1072int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 1092int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
1073int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); 1093int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
1074int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, 1094int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index d3c9db492b30..fab5121ffb8f 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -8,6 +8,8 @@
8 8
9#include <linux/mlx5/driver.h> 9#include <linux/mlx5/driver.h>
10 10
11#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
12
11enum { 13enum {
12 SRIOV_NONE, 14 SRIOV_NONE,
13 SRIOV_LEGACY, 15 SRIOV_LEGACY,
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 757b4a30281e..71fb503b2b52 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -152,6 +152,8 @@ struct mlx5_fs_vlan {
152 u8 prio; 152 u8 prio;
153}; 153};
154 154
155#define MLX5_FS_VLAN_DEPTH 2
156
155struct mlx5_flow_act { 157struct mlx5_flow_act {
156 u32 action; 158 u32 action;
157 bool has_flow_tag; 159 bool has_flow_tag;
@@ -159,7 +161,7 @@ struct mlx5_flow_act {
159 u32 encap_id; 161 u32 encap_id;
160 u32 modify_id; 162 u32 modify_id;
161 uintptr_t esp_id; 163 uintptr_t esp_id;
162 struct mlx5_fs_vlan vlan; 164 struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
163 struct ib_counters *counters; 165 struct ib_counters *counters;
164}; 166};
165 167
@@ -175,7 +177,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
175 struct mlx5_flow_spec *spec, 177 struct mlx5_flow_spec *spec,
176 struct mlx5_flow_act *flow_act, 178 struct mlx5_flow_act *flow_act,
177 struct mlx5_flow_destination *dest, 179 struct mlx5_flow_destination *dest,
178 int dest_num); 180 int num_dest);
179void mlx5_del_flow_rules(struct mlx5_flow_handle *fr); 181void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
180 182
181int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, 183int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 27134c4fcb76..6ead9c1a5396 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -76,6 +76,15 @@ enum {
76}; 76};
77 77
78enum { 78enum {
79 MLX5_GENERAL_OBJ_TYPES_CAP_UCTX = (1ULL << 4),
80 MLX5_GENERAL_OBJ_TYPES_CAP_UMEM = (1ULL << 5),
81};
82
83enum {
84 MLX5_OBJ_TYPE_UCTX = 0x0004,
85};
86
87enum {
79 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, 88 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
80 MLX5_CMD_OP_QUERY_ADAPTER = 0x101, 89 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
81 MLX5_CMD_OP_INIT_HCA = 0x102, 90 MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -242,6 +251,8 @@ enum {
242 MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, 251 MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
243 MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963, 252 MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963,
244 MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964, 253 MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964,
254 MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
255 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03,
245 MLX5_CMD_OP_MAX 256 MLX5_CMD_OP_MAX
246}; 257};
247 258
@@ -326,7 +337,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
326 u8 reserved_at_9[0x1]; 337 u8 reserved_at_9[0x1];
327 u8 pop_vlan[0x1]; 338 u8 pop_vlan[0x1];
328 u8 push_vlan[0x1]; 339 u8 push_vlan[0x1];
329 u8 reserved_at_c[0x14]; 340 u8 reserved_at_c[0x1];
341 u8 pop_vlan_2[0x1];
342 u8 push_vlan_2[0x1];
343 u8 reserved_at_f[0x11];
330 344
331 u8 reserved_at_20[0x2]; 345 u8 reserved_at_20[0x2];
332 u8 log_max_ft_size[0x6]; 346 u8 log_max_ft_size[0x6];
@@ -654,7 +668,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
654 u8 swp[0x1]; 668 u8 swp[0x1];
655 u8 swp_csum[0x1]; 669 u8 swp_csum[0x1];
656 u8 swp_lso[0x1]; 670 u8 swp_lso[0x1];
657 u8 reserved_at_23[0x1b]; 671 u8 reserved_at_23[0xd];
672 u8 max_vxlan_udp_ports[0x8];
673 u8 reserved_at_38[0x6];
658 u8 max_geneve_opt_len[0x1]; 674 u8 max_geneve_opt_len[0x1];
659 u8 tunnel_stateless_geneve_rx[0x1]; 675 u8 tunnel_stateless_geneve_rx[0x1];
660 676
@@ -874,7 +890,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
874 u8 log_max_eq_sz[0x8]; 890 u8 log_max_eq_sz[0x8];
875 u8 reserved_at_e8[0x2]; 891 u8 reserved_at_e8[0x2];
876 u8 log_max_mkey[0x6]; 892 u8 log_max_mkey[0x6];
877 u8 reserved_at_f0[0xc]; 893 u8 reserved_at_f0[0x8];
894 u8 dump_fill_mkey[0x1];
895 u8 reserved_at_f9[0x3];
878 u8 log_max_eq[0x4]; 896 u8 log_max_eq[0x4];
879 897
880 u8 max_indirection[0x8]; 898 u8 max_indirection[0x8];
@@ -922,7 +940,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
922 u8 vnic_env_queue_counters[0x1]; 940 u8 vnic_env_queue_counters[0x1];
923 u8 ets[0x1]; 941 u8 ets[0x1];
924 u8 nic_flow_table[0x1]; 942 u8 nic_flow_table[0x1];
925 u8 eswitch_flow_table[0x1]; 943 u8 eswitch_manager[0x1];
926 u8 device_memory[0x1]; 944 u8 device_memory[0x1];
927 u8 mcam_reg[0x1]; 945 u8 mcam_reg[0x1];
928 u8 pcam_reg[0x1]; 946 u8 pcam_reg[0x1];
@@ -1113,7 +1131,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
1113 u8 reserved_at_3f8[0x3]; 1131 u8 reserved_at_3f8[0x3];
1114 u8 log_max_current_uc_list[0x5]; 1132 u8 log_max_current_uc_list[0x5];
1115 1133
1116 u8 reserved_at_400[0x80]; 1134 u8 general_obj_types[0x40];
1135
1136 u8 reserved_at_440[0x20];
1137
1138 u8 reserved_at_460[0x10];
1139 u8 max_num_eqs[0x10];
1117 1140
1118 u8 reserved_at_480[0x3]; 1141 u8 reserved_at_480[0x3];
1119 u8 log_max_l2_table[0x5]; 1142 u8 log_max_l2_table[0x5];
@@ -1668,7 +1691,11 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
1668 1691
1669 u8 rx_buffer_full_low[0x20]; 1692 u8 rx_buffer_full_low[0x20];
1670 1693
1671 u8 reserved_at_1c0[0x600]; 1694 u8 rx_icrc_encapsulated_high[0x20];
1695
1696 u8 rx_icrc_encapsulated_low[0x20];
1697
1698 u8 reserved_at_200[0x5c0];
1672}; 1699};
1673 1700
1674struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { 1701struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -2367,6 +2394,8 @@ enum {
2367 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, 2394 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
2368 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, 2395 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
2369 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, 2396 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
2397 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
2398 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
2370}; 2399};
2371 2400
2372struct mlx5_ifc_vlan_bits { 2401struct mlx5_ifc_vlan_bits {
@@ -2397,7 +2426,9 @@ struct mlx5_ifc_flow_context_bits {
2397 2426
2398 u8 modify_header_id[0x20]; 2427 u8 modify_header_id[0x20];
2399 2428
2400 u8 reserved_at_100[0x100]; 2429 struct mlx5_ifc_vlan_bits push_vlan_2;
2430
2431 u8 reserved_at_120[0xe0];
2401 2432
2402 struct mlx5_ifc_fte_match_param_bits match_value; 2433 struct mlx5_ifc_fte_match_param_bits match_value;
2403 2434
@@ -3733,8 +3764,8 @@ struct mlx5_ifc_query_vport_state_out_bits {
3733}; 3764};
3734 3765
3735enum { 3766enum {
3736 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, 3767 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
3737 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1, 3768 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
3738}; 3769};
3739 3770
3740struct mlx5_ifc_query_vport_state_in_bits { 3771struct mlx5_ifc_query_vport_state_in_bits {
@@ -8030,9 +8061,23 @@ struct mlx5_ifc_peir_reg_bits {
8030 u8 error_type[0x8]; 8061 u8 error_type[0x8];
8031}; 8062};
8032 8063
8033struct mlx5_ifc_pcam_enhanced_features_bits { 8064struct mlx5_ifc_mpegc_reg_bits {
8034 u8 reserved_at_0[0x76]; 8065 u8 reserved_at_0[0x30];
8066 u8 field_select[0x10];
8035 8067
8068 u8 tx_overflow_sense[0x1];
8069 u8 mark_cqe[0x1];
8070 u8 mark_cnp[0x1];
8071 u8 reserved_at_43[0x1b];
8072 u8 tx_lossy_overflow_oper[0x2];
8073
8074 u8 reserved_at_60[0x100];
8075};
8076
8077struct mlx5_ifc_pcam_enhanced_features_bits {
8078 u8 reserved_at_0[0x6d];
8079 u8 rx_icrc_encapsulated_counter[0x1];
8080 u8 reserved_at_6e[0x8];
8036 u8 pfcc_mask[0x1]; 8081 u8 pfcc_mask[0x1];
8037 u8 reserved_at_77[0x4]; 8082 u8 reserved_at_77[0x4];
8038 u8 rx_buffer_fullness_counters[0x1]; 8083 u8 rx_buffer_fullness_counters[0x1];
@@ -8077,7 +8122,11 @@ struct mlx5_ifc_pcam_reg_bits {
8077}; 8122};
8078 8123
8079struct mlx5_ifc_mcam_enhanced_features_bits { 8124struct mlx5_ifc_mcam_enhanced_features_bits {
8080 u8 reserved_at_0[0x7b]; 8125 u8 reserved_at_0[0x74];
8126 u8 mark_tx_action_cnp[0x1];
8127 u8 mark_tx_action_cqe[0x1];
8128 u8 dynamic_tx_overflow[0x1];
8129 u8 reserved_at_77[0x4];
8081 u8 pcie_outbound_stalled[0x1]; 8130 u8 pcie_outbound_stalled[0x1];
8082 u8 tx_overflow_buffer_pkt[0x1]; 8131 u8 tx_overflow_buffer_pkt[0x1];
8083 u8 mtpps_enh_out_per_adj[0x1]; 8132 u8 mtpps_enh_out_per_adj[0x1];
@@ -8092,7 +8141,11 @@ struct mlx5_ifc_mcam_access_reg_bits {
8092 u8 mcqi[0x1]; 8141 u8 mcqi[0x1];
8093 u8 reserved_at_1f[0x1]; 8142 u8 reserved_at_1f[0x1];
8094 8143
8095 u8 regs_95_to_64[0x20]; 8144 u8 regs_95_to_87[0x9];
8145 u8 mpegc[0x1];
8146 u8 regs_85_to_68[0x12];
8147 u8 tracer_registers[0x4];
8148
8096 u8 regs_63_to_32[0x20]; 8149 u8 regs_63_to_32[0x20];
8097 u8 regs_31_to_0[0x20]; 8150 u8 regs_31_to_0[0x20];
8098}; 8151};
@@ -9115,4 +9168,113 @@ struct mlx5_ifc_dealloc_memic_out_bits {
9115 u8 reserved_at_40[0x40]; 9168 u8 reserved_at_40[0x40];
9116}; 9169};
9117 9170
9171struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
9172 u8 opcode[0x10];
9173 u8 uid[0x10];
9174
9175 u8 reserved_at_20[0x10];
9176 u8 obj_type[0x10];
9177
9178 u8 obj_id[0x20];
9179
9180 u8 reserved_at_60[0x20];
9181};
9182
9183struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
9184 u8 status[0x8];
9185 u8 reserved_at_8[0x18];
9186
9187 u8 syndrome[0x20];
9188
9189 u8 obj_id[0x20];
9190
9191 u8 reserved_at_60[0x20];
9192};
9193
9194struct mlx5_ifc_umem_bits {
9195 u8 modify_field_select[0x40];
9196
9197 u8 reserved_at_40[0x5b];
9198 u8 log_page_size[0x5];
9199
9200 u8 page_offset[0x20];
9201
9202 u8 num_of_mtt[0x40];
9203
9204 struct mlx5_ifc_mtt_bits mtt[0];
9205};
9206
9207struct mlx5_ifc_uctx_bits {
9208 u8 modify_field_select[0x40];
9209
9210 u8 reserved_at_40[0x1c0];
9211};
9212
9213struct mlx5_ifc_create_umem_in_bits {
9214 struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
9215 struct mlx5_ifc_umem_bits umem;
9216};
9217
9218struct mlx5_ifc_create_uctx_in_bits {
9219 struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
9220 struct mlx5_ifc_uctx_bits uctx;
9221};
9222
9223struct mlx5_ifc_mtrc_string_db_param_bits {
9224 u8 string_db_base_address[0x20];
9225
9226 u8 reserved_at_20[0x8];
9227 u8 string_db_size[0x18];
9228};
9229
9230struct mlx5_ifc_mtrc_cap_bits {
9231 u8 trace_owner[0x1];
9232 u8 trace_to_memory[0x1];
9233 u8 reserved_at_2[0x4];
9234 u8 trc_ver[0x2];
9235 u8 reserved_at_8[0x14];
9236 u8 num_string_db[0x4];
9237
9238 u8 first_string_trace[0x8];
9239 u8 num_string_trace[0x8];
9240 u8 reserved_at_30[0x28];
9241
9242 u8 log_max_trace_buffer_size[0x8];
9243
9244 u8 reserved_at_60[0x20];
9245
9246 struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8];
9247
9248 u8 reserved_at_280[0x180];
9249};
9250
9251struct mlx5_ifc_mtrc_conf_bits {
9252 u8 reserved_at_0[0x1c];
9253 u8 trace_mode[0x4];
9254 u8 reserved_at_20[0x18];
9255 u8 log_trace_buffer_size[0x8];
9256 u8 trace_mkey[0x20];
9257 u8 reserved_at_60[0x3a0];
9258};
9259
9260struct mlx5_ifc_mtrc_stdb_bits {
9261 u8 string_db_index[0x4];
9262 u8 reserved_at_4[0x4];
9263 u8 read_size[0x18];
9264 u8 start_offset[0x20];
9265 u8 string_db_data[0];
9266};
9267
9268struct mlx5_ifc_mtrc_ctrl_bits {
9269 u8 trace_status[0x2];
9270 u8 reserved_at_2[0x2];
9271 u8 arm_event[0x1];
9272 u8 reserved_at_5[0xb];
9273 u8 modify_field_select[0x10];
9274 u8 reserved_at_20[0x2b];
9275 u8 current_timestamp52_32[0x15];
9276 u8 current_timestamp31_0[0x20];
9277 u8 reserved_at_80[0x180];
9278};
9279
9118#endif /* MLX5_IFC_H */ 9280#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 64d0f40d4cc3..37e065a80a43 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -576,6 +576,7 @@ struct mlx5_ifc_fpga_ipsec_sa {
576enum fpga_tls_cmds { 576enum fpga_tls_cmds {
577 CMD_SETUP_STREAM = 0x1001, 577 CMD_SETUP_STREAM = 0x1001,
578 CMD_TEARDOWN_STREAM = 0x1002, 578 CMD_TEARDOWN_STREAM = 0x1002,
579 CMD_RESYNC_RX = 0x1003,
579}; 580};
580 581
581#define MLX5_TLS_1_2 (0) 582#define MLX5_TLS_1_2 (0)
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 9208cb8809ac..7e7c6dfcfb09 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -43,8 +43,6 @@ enum {
43}; 43};
44 44
45u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); 45u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
46u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
47 u16 vport);
48int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, 46int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
49 u16 vport, u8 state); 47 u16 vport, u8 state);
50int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, 48int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a0fbb9ffe380..68a5121694ef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
155 * mmap() functions). 155 * mmap() functions).
156 */ 156 */
157 157
158extern struct kmem_cache *vm_area_cachep; 158struct vm_area_struct *vm_area_alloc(struct mm_struct *);
159struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
160void vm_area_free(struct vm_area_struct *);
159 161
160#ifndef CONFIG_MMU 162#ifndef CONFIG_MMU
161extern struct rb_root nommu_region_tree; 163extern struct rb_root nommu_region_tree;
@@ -450,6 +452,23 @@ struct vm_operations_struct {
450 unsigned long addr); 452 unsigned long addr);
451}; 453};
452 454
455static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
456{
457 static const struct vm_operations_struct dummy_vm_ops = {};
458
459 vma->vm_mm = mm;
460 vma->vm_ops = &dummy_vm_ops;
461 INIT_LIST_HEAD(&vma->anon_vma_chain);
462}
463
464static inline void vma_set_anonymous(struct vm_area_struct *vma)
465{
466 vma->vm_ops = NULL;
467}
468
469/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
470#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
471
453struct mmu_gather; 472struct mmu_gather;
454struct inode; 473struct inode;
455 474
@@ -2132,7 +2151,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2132 struct mminit_pfnnid_cache *state); 2151 struct mminit_pfnnid_cache *state);
2133#endif 2152#endif
2134 2153
2135#ifdef CONFIG_HAVE_MEMBLOCK 2154#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
2136void zero_resv_unavail(void); 2155void zero_resv_unavail(void);
2137#else 2156#else
2138static inline void zero_resv_unavail(void) {} 2157static inline void zero_resv_unavail(void) {}
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 99ce070e7dcb..efdc24dd9e97 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -335,176 +335,183 @@ struct core_state {
335 335
336struct kioctx_table; 336struct kioctx_table;
337struct mm_struct { 337struct mm_struct {
338 struct vm_area_struct *mmap; /* list of VMAs */ 338 struct {
339 struct rb_root mm_rb; 339 struct vm_area_struct *mmap; /* list of VMAs */
340 u32 vmacache_seqnum; /* per-thread vmacache */ 340 struct rb_root mm_rb;
341 u32 vmacache_seqnum; /* per-thread vmacache */
341#ifdef CONFIG_MMU 342#ifdef CONFIG_MMU
342 unsigned long (*get_unmapped_area) (struct file *filp, 343 unsigned long (*get_unmapped_area) (struct file *filp,
343 unsigned long addr, unsigned long len, 344 unsigned long addr, unsigned long len,
344 unsigned long pgoff, unsigned long flags); 345 unsigned long pgoff, unsigned long flags);
345#endif 346#endif
346 unsigned long mmap_base; /* base of mmap area */ 347 unsigned long mmap_base; /* base of mmap area */
347 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ 348 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
348#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES 349#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
349 /* Base adresses for compatible mmap() */ 350 /* Base adresses for compatible mmap() */
350 unsigned long mmap_compat_base; 351 unsigned long mmap_compat_base;
351 unsigned long mmap_compat_legacy_base; 352 unsigned long mmap_compat_legacy_base;
352#endif 353#endif
353 unsigned long task_size; /* size of task vm space */ 354 unsigned long task_size; /* size of task vm space */
354 unsigned long highest_vm_end; /* highest vma end address */ 355 unsigned long highest_vm_end; /* highest vma end address */
355 pgd_t * pgd; 356 pgd_t * pgd;
356 357
357 /** 358 /**
358 * @mm_users: The number of users including userspace. 359 * @mm_users: The number of users including userspace.
359 * 360 *
360 * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops 361 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
361 * to 0 (i.e. when the task exits and there are no other temporary 362 * drops to 0 (i.e. when the task exits and there are no other
362 * reference holders), we also release a reference on @mm_count 363 * temporary reference holders), we also release a reference on
363 * (which may then free the &struct mm_struct if @mm_count also 364 * @mm_count (which may then free the &struct mm_struct if
364 * drops to 0). 365 * @mm_count also drops to 0).
365 */ 366 */
366 atomic_t mm_users; 367 atomic_t mm_users;
367 368
368 /** 369 /**
369 * @mm_count: The number of references to &struct mm_struct 370 * @mm_count: The number of references to &struct mm_struct
370 * (@mm_users count as 1). 371 * (@mm_users count as 1).
371 * 372 *
372 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the 373 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
373 * &struct mm_struct is freed. 374 * &struct mm_struct is freed.
374 */ 375 */
375 atomic_t mm_count; 376 atomic_t mm_count;
376 377
377#ifdef CONFIG_MMU 378#ifdef CONFIG_MMU
378 atomic_long_t pgtables_bytes; /* PTE page table pages */ 379 atomic_long_t pgtables_bytes; /* PTE page table pages */
379#endif 380#endif
380 int map_count; /* number of VMAs */ 381 int map_count; /* number of VMAs */
381 382
382 spinlock_t page_table_lock; /* Protects page tables and some counters */ 383 spinlock_t page_table_lock; /* Protects page tables and some
383 struct rw_semaphore mmap_sem; 384 * counters
385 */
386 struct rw_semaphore mmap_sem;
384 387
385 struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung 388 struct list_head mmlist; /* List of maybe swapped mm's. These
386 * together off init_mm.mmlist, and are protected 389 * are globally strung together off
387 * by mmlist_lock 390 * init_mm.mmlist, and are protected
388 */ 391 * by mmlist_lock
392 */
389 393
390 394
391 unsigned long hiwater_rss; /* High-watermark of RSS usage */ 395 unsigned long hiwater_rss; /* High-watermark of RSS usage */
392 unsigned long hiwater_vm; /* High-water virtual memory usage */ 396 unsigned long hiwater_vm; /* High-water virtual memory usage */
393 397
394 unsigned long total_vm; /* Total pages mapped */ 398 unsigned long total_vm; /* Total pages mapped */
395 unsigned long locked_vm; /* Pages that have PG_mlocked set */ 399 unsigned long locked_vm; /* Pages that have PG_mlocked set */
396 unsigned long pinned_vm; /* Refcount permanently increased */ 400 unsigned long pinned_vm; /* Refcount permanently increased */
397 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ 401 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
398 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ 402 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
399 unsigned long stack_vm; /* VM_STACK */ 403 unsigned long stack_vm; /* VM_STACK */
400 unsigned long def_flags; 404 unsigned long def_flags;
401 405
402 spinlock_t arg_lock; /* protect the below fields */ 406 spinlock_t arg_lock; /* protect the below fields */
403 unsigned long start_code, end_code, start_data, end_data; 407 unsigned long start_code, end_code, start_data, end_data;
404 unsigned long start_brk, brk, start_stack; 408 unsigned long start_brk, brk, start_stack;
405 unsigned long arg_start, arg_end, env_start, env_end; 409 unsigned long arg_start, arg_end, env_start, env_end;
406 410
407 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ 411 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
408 412
409 /* 413 /*
410 * Special counters, in some configurations protected by the 414 * Special counters, in some configurations protected by the
411 * page_table_lock, in other configurations by being atomic. 415 * page_table_lock, in other configurations by being atomic.
412 */ 416 */
413 struct mm_rss_stat rss_stat; 417 struct mm_rss_stat rss_stat;
414
415 struct linux_binfmt *binfmt;
416 418
417 cpumask_var_t cpu_vm_mask_var; 419 struct linux_binfmt *binfmt;
418 420
419 /* Architecture-specific MM context */ 421 /* Architecture-specific MM context */
420 mm_context_t context; 422 mm_context_t context;
421 423
422 unsigned long flags; /* Must use atomic bitops to access the bits */ 424 unsigned long flags; /* Must use atomic bitops to access */
423 425
424 struct core_state *core_state; /* coredumping support */ 426 struct core_state *core_state; /* coredumping support */
425#ifdef CONFIG_MEMBARRIER 427#ifdef CONFIG_MEMBARRIER
426 atomic_t membarrier_state; 428 atomic_t membarrier_state;
427#endif 429#endif
428#ifdef CONFIG_AIO 430#ifdef CONFIG_AIO
429 spinlock_t ioctx_lock; 431 spinlock_t ioctx_lock;
430 struct kioctx_table __rcu *ioctx_table; 432 struct kioctx_table __rcu *ioctx_table;
431#endif 433#endif
432#ifdef CONFIG_MEMCG 434#ifdef CONFIG_MEMCG
433 /* 435 /*
434 * "owner" points to a task that is regarded as the canonical 436 * "owner" points to a task that is regarded as the canonical
435 * user/owner of this mm. All of the following must be true in 437 * user/owner of this mm. All of the following must be true in
436 * order for it to be changed: 438 * order for it to be changed:
437 * 439 *
438 * current == mm->owner 440 * current == mm->owner
439 * current->mm != mm 441 * current->mm != mm
440 * new_owner->mm == mm 442 * new_owner->mm == mm
441 * new_owner->alloc_lock is held 443 * new_owner->alloc_lock is held
442 */ 444 */
443 struct task_struct __rcu *owner; 445 struct task_struct __rcu *owner;
444#endif 446#endif
445 struct user_namespace *user_ns; 447 struct user_namespace *user_ns;
446 448
447 /* store ref to file /proc/<pid>/exe symlink points to */ 449 /* store ref to file /proc/<pid>/exe symlink points to */
448 struct file __rcu *exe_file; 450 struct file __rcu *exe_file;
449#ifdef CONFIG_MMU_NOTIFIER 451#ifdef CONFIG_MMU_NOTIFIER
450 struct mmu_notifier_mm *mmu_notifier_mm; 452 struct mmu_notifier_mm *mmu_notifier_mm;
451#endif 453#endif
452#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 454#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
453 pgtable_t pmd_huge_pte; /* protected by page_table_lock */ 455 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
454#endif
455#ifdef CONFIG_CPUMASK_OFFSTACK
456 struct cpumask cpumask_allocation;
457#endif 456#endif
458#ifdef CONFIG_NUMA_BALANCING 457#ifdef CONFIG_NUMA_BALANCING
459 /* 458 /*
460 * numa_next_scan is the next time that the PTEs will be marked 459 * numa_next_scan is the next time that the PTEs will be marked
461 * pte_numa. NUMA hinting faults will gather statistics and migrate 460 * pte_numa. NUMA hinting faults will gather statistics and
462 * pages to new nodes if necessary. 461 * migrate pages to new nodes if necessary.
463 */ 462 */
464 unsigned long numa_next_scan; 463 unsigned long numa_next_scan;
465 464
466 /* Restart point for scanning and setting pte_numa */ 465 /* Restart point for scanning and setting pte_numa */
467 unsigned long numa_scan_offset; 466 unsigned long numa_scan_offset;
468 467
469 /* numa_scan_seq prevents two threads setting pte_numa */ 468 /* numa_scan_seq prevents two threads setting pte_numa */
470 int numa_scan_seq; 469 int numa_scan_seq;
471#endif 470#endif
472 /* 471 /*
473 * An operation with batched TLB flushing is going on. Anything that 472 * An operation with batched TLB flushing is going on. Anything
474 * can move process memory needs to flush the TLB when moving a 473 * that can move process memory needs to flush the TLB when
475 * PROT_NONE or PROT_NUMA mapped page. 474 * moving a PROT_NONE or PROT_NUMA mapped page.
476 */ 475 */
477 atomic_t tlb_flush_pending; 476 atomic_t tlb_flush_pending;
478#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 477#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
479 /* See flush_tlb_batched_pending() */ 478 /* See flush_tlb_batched_pending() */
480 bool tlb_flush_batched; 479 bool tlb_flush_batched;
481#endif 480#endif
482 struct uprobes_state uprobes_state; 481 struct uprobes_state uprobes_state;
483#ifdef CONFIG_HUGETLB_PAGE 482#ifdef CONFIG_HUGETLB_PAGE
484 atomic_long_t hugetlb_usage; 483 atomic_long_t hugetlb_usage;
485#endif 484#endif
486 struct work_struct async_put_work; 485 struct work_struct async_put_work;
487 486
488#if IS_ENABLED(CONFIG_HMM) 487#if IS_ENABLED(CONFIG_HMM)
489 /* HMM needs to track a few things per mm */ 488 /* HMM needs to track a few things per mm */
490 struct hmm *hmm; 489 struct hmm *hmm;
491#endif 490#endif
492} __randomize_layout; 491 } __randomize_layout;
492
493 /*
494 * The mm_cpumask needs to be at the end of mm_struct, because it
495 * is dynamically sized based on nr_cpu_ids.
496 */
497 unsigned long cpu_bitmap[];
498};
493 499
494extern struct mm_struct init_mm; 500extern struct mm_struct init_mm;
495 501
502/* Pointer magic because the dynamic array size confuses some compilers. */
496static inline void mm_init_cpumask(struct mm_struct *mm) 503static inline void mm_init_cpumask(struct mm_struct *mm)
497{ 504{
498#ifdef CONFIG_CPUMASK_OFFSTACK 505 unsigned long cpu_bitmap = (unsigned long)mm;
499 mm->cpu_vm_mask_var = &mm->cpumask_allocation; 506
500#endif 507 cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
501 cpumask_clear(mm->cpu_vm_mask_var); 508 cpumask_clear((struct cpumask *)cpu_bitmap);
502} 509}
503 510
504/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ 511/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
505static inline cpumask_t *mm_cpumask(struct mm_struct *mm) 512static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
506{ 513{
507 return mm->cpu_vm_mask_var; 514 return (struct cpumask *)&mm->cpu_bitmap;
508} 515}
509 516
510struct mmu_gather; 517struct mmu_gather;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 2014bd19f28e..96a71a648eed 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -501,6 +501,7 @@ enum dmi_field {
501 DMI_PRODUCT_VERSION, 501 DMI_PRODUCT_VERSION,
502 DMI_PRODUCT_SERIAL, 502 DMI_PRODUCT_SERIAL,
503 DMI_PRODUCT_UUID, 503 DMI_PRODUCT_UUID,
504 DMI_PRODUCT_SKU,
504 DMI_PRODUCT_FAMILY, 505 DMI_PRODUCT_FAMILY,
505 DMI_BOARD_VENDOR, 506 DMI_BOARD_VENDOR,
506 DMI_BOARD_NAME, 507 DMI_BOARD_NAME,
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index d633f737b3c6..6675b9f81979 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -2,7 +2,7 @@
2#define __LINUX_MROUTE_BASE_H 2#define __LINUX_MROUTE_BASE_H
3 3
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/rhashtable.h> 5#include <linux/rhashtable-types.h>
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <net/net_namespace.h> 7#include <net/net_namespace.h>
8#include <net/sock.h> 8#include <net/sock.h>
@@ -254,6 +254,7 @@ struct mr_table {
254 atomic_t cache_resolve_queue_len; 254 atomic_t cache_resolve_queue_len;
255 bool mroute_do_assert; 255 bool mroute_do_assert;
256 bool mroute_do_pim; 256 bool mroute_do_pim;
257 bool mroute_do_wrvifwhole;
257 int mroute_reg_vif_num; 258 int mroute_reg_vif_num;
258}; 259};
259 260
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index a86c4fa93115..cd0be91bdefa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -67,9 +67,11 @@ struct mtd_erase_region_info {
67 * @datbuf: data buffer - if NULL only oob data are read/written 67 * @datbuf: data buffer - if NULL only oob data are read/written
68 * @oobbuf: oob data buffer 68 * @oobbuf: oob data buffer
69 * 69 *
70 * Note, it is allowed to read more than one OOB area at one go, but not write. 70 * Note, some MTD drivers do not allow you to write more than one OOB area at
71 * The interface assumes that the OOB write requests program only one page's 71 * one go. If you try to do that on such an MTD device, -EINVAL will be
72 * OOB area. 72 * returned. If you want to make your implementation portable on all kind of MTD
73 * devices you should split the write request into several sub-requests when the
74 * request crosses a page boundary.
73 */ 75 */
74struct mtd_oob_ops { 76struct mtd_oob_ops {
75 unsigned int mode; 77 unsigned int mode;
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 3e8ec3b8a39c..efb2345359bb 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -21,11 +21,10 @@
21#include <linux/mtd/mtd.h> 21#include <linux/mtd/mtd.h>
22#include <linux/mtd/flashchip.h> 22#include <linux/mtd/flashchip.h>
23#include <linux/mtd/bbm.h> 23#include <linux/mtd/bbm.h>
24#include <linux/of.h>
24#include <linux/types.h> 25#include <linux/types.h>
25 26
26struct mtd_info;
27struct nand_flash_dev; 27struct nand_flash_dev;
28struct device_node;
29 28
30/* Scan and identify a NAND device */ 29/* Scan and identify a NAND device */
31int nand_scan_with_ids(struct mtd_info *mtd, int max_chips, 30int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
@@ -36,17 +35,6 @@ static inline int nand_scan(struct mtd_info *mtd, int max_chips)
36 return nand_scan_with_ids(mtd, max_chips, NULL); 35 return nand_scan_with_ids(mtd, max_chips, NULL);
37} 36}
38 37
39/*
40 * Separate phases of nand_scan(), allowing board driver to intervene
41 * and override command or ECC setup according to flash type.
42 */
43int nand_scan_ident(struct mtd_info *mtd, int max_chips,
44 struct nand_flash_dev *table);
45int nand_scan_tail(struct mtd_info *mtd);
46
47/* Unregister the MTD device and free resources held by the NAND device */
48void nand_release(struct mtd_info *mtd);
49
50/* Internal helper for board drivers which need to override command function */ 38/* Internal helper for board drivers which need to override command function */
51void nand_wait_ready(struct mtd_info *mtd); 39void nand_wait_ready(struct mtd_info *mtd);
52 40
@@ -121,6 +109,7 @@ enum nand_ecc_algo {
121 NAND_ECC_UNKNOWN, 109 NAND_ECC_UNKNOWN,
122 NAND_ECC_HAMMING, 110 NAND_ECC_HAMMING,
123 NAND_ECC_BCH, 111 NAND_ECC_BCH,
112 NAND_ECC_RS,
124}; 113};
125 114
126/* 115/*
@@ -218,6 +207,12 @@ enum nand_ecc_algo {
218 */ 207 */
219#define NAND_WAIT_TCCS 0x00200000 208#define NAND_WAIT_TCCS 0x00200000
220 209
210/*
211 * Whether the NAND chip is a boot medium. Drivers might use this information
212 * to select ECC algorithms supported by the boot ROM or similar restrictions.
213 */
214#define NAND_IS_BOOT_MEDIUM 0x00400000
215
221/* Options set by nand scan */ 216/* Options set by nand scan */
222/* Nand scan has allocated controller struct */ 217/* Nand scan has allocated controller struct */
223#define NAND_CONTROLLER_ALLOC 0x80000000 218#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -230,6 +225,17 @@ enum nand_ecc_algo {
230/* Keep gcc happy */ 225/* Keep gcc happy */
231struct nand_chip; 226struct nand_chip;
232 227
228/* ONFI version bits */
229#define ONFI_VERSION_1_0 BIT(1)
230#define ONFI_VERSION_2_0 BIT(2)
231#define ONFI_VERSION_2_1 BIT(3)
232#define ONFI_VERSION_2_2 BIT(4)
233#define ONFI_VERSION_2_3 BIT(5)
234#define ONFI_VERSION_3_0 BIT(6)
235#define ONFI_VERSION_3_1 BIT(7)
236#define ONFI_VERSION_3_2 BIT(8)
237#define ONFI_VERSION_4_0 BIT(9)
238
233/* ONFI features */ 239/* ONFI features */
234#define ONFI_FEATURE_16_BIT_BUS (1 << 0) 240#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
235#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) 241#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
@@ -470,13 +476,13 @@ struct onfi_params {
470 */ 476 */
471struct nand_parameters { 477struct nand_parameters {
472 /* Generic parameters */ 478 /* Generic parameters */
473 char model[100]; 479 const char *model;
474 bool supports_set_get_features; 480 bool supports_set_get_features;
475 DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER); 481 DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
476 DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER); 482 DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
477 483
478 /* ONFI parameters */ 484 /* ONFI parameters */
479 struct onfi_params onfi; 485 struct onfi_params *onfi;
480}; 486};
481 487
482/* The maximum expected count of bytes in the NAND ID sequence */ 488/* The maximum expected count of bytes in the NAND ID sequence */
@@ -493,20 +499,42 @@ struct nand_id {
493}; 499};
494 500
495/** 501/**
496 * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices 502 * struct nand_controller_ops - Controller operations
503 *
504 * @attach_chip: this method is called after the NAND detection phase after
505 * flash ID and MTD fields such as erase size, page size and OOB
506 * size have been set up. ECC requirements are available if
507 * provided by the NAND chip or device tree. Typically used to
508 * choose the appropriate ECC configuration and allocate
509 * associated resources.
510 * This hook is optional.
511 * @detach_chip: free all resources allocated/claimed in
512 * nand_controller_ops->attach_chip().
513 * This hook is optional.
514 */
515struct nand_controller_ops {
516 int (*attach_chip)(struct nand_chip *chip);
517 void (*detach_chip)(struct nand_chip *chip);
518};
519
520/**
521 * struct nand_controller - Structure used to describe a NAND controller
522 *
497 * @lock: protection lock 523 * @lock: protection lock
498 * @active: the mtd device which holds the controller currently 524 * @active: the mtd device which holds the controller currently
499 * @wq: wait queue to sleep on if a NAND operation is in 525 * @wq: wait queue to sleep on if a NAND operation is in
500 * progress used instead of the per chip wait queue 526 * progress used instead of the per chip wait queue
501 * when a hw controller is available. 527 * when a hw controller is available.
528 * @ops: NAND controller operations.
502 */ 529 */
503struct nand_hw_control { 530struct nand_controller {
504 spinlock_t lock; 531 spinlock_t lock;
505 struct nand_chip *active; 532 struct nand_chip *active;
506 wait_queue_head_t wq; 533 wait_queue_head_t wq;
534 const struct nand_controller_ops *ops;
507}; 535};
508 536
509static inline void nand_hw_control_init(struct nand_hw_control *nfc) 537static inline void nand_controller_init(struct nand_controller *nfc)
510{ 538{
511 nfc->active = NULL; 539 nfc->active = NULL;
512 spin_lock_init(&nfc->lock); 540 spin_lock_init(&nfc->lock);
@@ -778,11 +806,15 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
778 * implementation) if any. 806 * implementation) if any.
779 * @cleanup: the ->init() function may have allocated resources, ->cleanup() 807 * @cleanup: the ->init() function may have allocated resources, ->cleanup()
780 * is here to let vendor specific code release those resources. 808 * is here to let vendor specific code release those resources.
809 * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
810 * page. This is called after the checksum is verified.
781 */ 811 */
782struct nand_manufacturer_ops { 812struct nand_manufacturer_ops {
783 void (*detect)(struct nand_chip *chip); 813 void (*detect)(struct nand_chip *chip);
784 int (*init)(struct nand_chip *chip); 814 int (*init)(struct nand_chip *chip);
785 void (*cleanup)(struct nand_chip *chip); 815 void (*cleanup)(struct nand_chip *chip);
816 void (*fixup_onfi_param_page)(struct nand_chip *chip,
817 struct nand_onfi_params *p);
786}; 818};
787 819
788/** 820/**
@@ -986,14 +1018,14 @@ struct nand_subop {
986 unsigned int last_instr_end_off; 1018 unsigned int last_instr_end_off;
987}; 1019};
988 1020
989int nand_subop_get_addr_start_off(const struct nand_subop *subop, 1021unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
990 unsigned int op_id); 1022 unsigned int op_id);
991int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, 1023unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
992 unsigned int op_id); 1024 unsigned int op_id);
993int nand_subop_get_data_start_off(const struct nand_subop *subop, 1025unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
994 unsigned int op_id); 1026 unsigned int op_id);
995int nand_subop_get_data_len(const struct nand_subop *subop, 1027unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
996 unsigned int op_id); 1028 unsigned int op_id);
997 1029
998/** 1030/**
999 * struct nand_op_parser_addr_constraints - Constraints for address instructions 1031 * struct nand_op_parser_addr_constraints - Constraints for address instructions
@@ -1176,9 +1208,9 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
1176 * setting the read-retry mode. Mostly needed for MLC NAND. 1208 * setting the read-retry mode. Mostly needed for MLC NAND.
1177 * @ecc: [BOARDSPECIFIC] ECC control structure 1209 * @ecc: [BOARDSPECIFIC] ECC control structure
1178 * @buf_align: minimum buffer alignment required by a platform 1210 * @buf_align: minimum buffer alignment required by a platform
1179 * @hwcontrol: platform-specific hardware control structure 1211 * @dummy_controller: dummy controller implementation for drivers that can
1212 * only control a single chip
1180 * @erase: [REPLACEABLE] erase function 1213 * @erase: [REPLACEABLE] erase function
1181 * @scan_bbt: [REPLACEABLE] function to scan bad block table
1182 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring 1214 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
1183 * data from array to read regs (tR). 1215 * data from array to read regs (tR).
1184 * @state: [INTERN] the current state of the NAND device 1216 * @state: [INTERN] the current state of the NAND device
@@ -1271,7 +1303,6 @@ struct nand_chip {
1271 const struct nand_operation *op, 1303 const struct nand_operation *op,
1272 bool check_only); 1304 bool check_only);
1273 int (*erase)(struct mtd_info *mtd, int page); 1305 int (*erase)(struct mtd_info *mtd, int page);
1274 int (*scan_bbt)(struct mtd_info *mtd);
1275 int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip, 1306 int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip,
1276 int feature_addr, uint8_t *subfeature_para); 1307 int feature_addr, uint8_t *subfeature_para);
1277 int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip, 1308 int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1314,11 +1345,11 @@ struct nand_chip {
1314 flstate_t state; 1345 flstate_t state;
1315 1346
1316 uint8_t *oob_poi; 1347 uint8_t *oob_poi;
1317 struct nand_hw_control *controller; 1348 struct nand_controller *controller;
1318 1349
1319 struct nand_ecc_ctrl ecc; 1350 struct nand_ecc_ctrl ecc;
1320 unsigned long buf_align; 1351 unsigned long buf_align;
1321 struct nand_hw_control hwcontrol; 1352 struct nand_controller dummy_controller;
1322 1353
1323 uint8_t *bbt; 1354 uint8_t *bbt;
1324 struct nand_bbt_descr *bbt_td; 1355 struct nand_bbt_descr *bbt_td;
@@ -1517,14 +1548,12 @@ extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
1517extern const struct nand_manufacturer_ops amd_nand_manuf_ops; 1548extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
1518extern const struct nand_manufacturer_ops macronix_nand_manuf_ops; 1549extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
1519 1550
1520int nand_default_bbt(struct mtd_info *mtd); 1551int nand_create_bbt(struct nand_chip *chip);
1521int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); 1552int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
1522int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs); 1553int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
1523int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt); 1554int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
1524int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, 1555int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
1525 int allowbbt); 1556 int allowbbt);
1526int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
1527 size_t *retlen, uint8_t *buf);
1528 1557
1529/** 1558/**
1530 * struct platform_nand_chip - chip level device structure 1559 * struct platform_nand_chip - chip level device structure
@@ -1555,14 +1584,12 @@ struct platform_device;
1555 * struct platform_nand_ctrl - controller level device structure 1584 * struct platform_nand_ctrl - controller level device structure
1556 * @probe: platform specific function to probe/setup hardware 1585 * @probe: platform specific function to probe/setup hardware
1557 * @remove: platform specific function to remove/teardown hardware 1586 * @remove: platform specific function to remove/teardown hardware
1558 * @hwcontrol: platform specific hardware control structure
1559 * @dev_ready: platform specific function to read ready/busy pin 1587 * @dev_ready: platform specific function to read ready/busy pin
1560 * @select_chip: platform specific chip select function 1588 * @select_chip: platform specific chip select function
1561 * @cmd_ctrl: platform specific function for controlling 1589 * @cmd_ctrl: platform specific function for controlling
1562 * ALE/CLE/nCE. Also used to write command and address 1590 * ALE/CLE/nCE. Also used to write command and address
1563 * @write_buf: platform specific function for write buffer 1591 * @write_buf: platform specific function for write buffer
1564 * @read_buf: platform specific function for read buffer 1592 * @read_buf: platform specific function for read buffer
1565 * @read_byte: platform specific function to read one byte from chip
1566 * @priv: private data to transport driver specific settings 1593 * @priv: private data to transport driver specific settings
1567 * 1594 *
1568 * All fields are optional and depend on the hardware driver requirements 1595 * All fields are optional and depend on the hardware driver requirements
@@ -1570,13 +1597,11 @@ struct platform_device;
1570struct platform_nand_ctrl { 1597struct platform_nand_ctrl {
1571 int (*probe)(struct platform_device *pdev); 1598 int (*probe)(struct platform_device *pdev);
1572 void (*remove)(struct platform_device *pdev); 1599 void (*remove)(struct platform_device *pdev);
1573 void (*hwcontrol)(struct mtd_info *mtd, int cmd);
1574 int (*dev_ready)(struct mtd_info *mtd); 1600 int (*dev_ready)(struct mtd_info *mtd);
1575 void (*select_chip)(struct mtd_info *mtd, int chip); 1601 void (*select_chip)(struct mtd_info *mtd, int chip);
1576 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); 1602 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
1577 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); 1603 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
1578 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); 1604 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
1579 unsigned char (*read_byte)(struct mtd_info *mtd);
1580 void *priv; 1605 void *priv;
1581}; 1606};
1582 1607
@@ -1593,10 +1618,10 @@ struct platform_nand_data {
1593/* return the supported asynchronous timing mode. */ 1618/* return the supported asynchronous timing mode. */
1594static inline int onfi_get_async_timing_mode(struct nand_chip *chip) 1619static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
1595{ 1620{
1596 if (!chip->parameters.onfi.version) 1621 if (!chip->parameters.onfi)
1597 return ONFI_TIMING_MODE_UNKNOWN; 1622 return ONFI_TIMING_MODE_UNKNOWN;
1598 1623
1599 return chip->parameters.onfi.async_timing_mode; 1624 return chip->parameters.onfi->async_timing_mode;
1600} 1625}
1601 1626
1602int onfi_fill_data_interface(struct nand_chip *chip, 1627int onfi_fill_data_interface(struct nand_chip *chip,
@@ -1641,14 +1666,8 @@ int nand_check_erased_ecc_chunk(void *data, int datalen,
1641 void *extraoob, int extraooblen, 1666 void *extraoob, int extraooblen,
1642 int threshold); 1667 int threshold);
1643 1668
1644int nand_check_ecc_caps(struct nand_chip *chip, 1669int nand_ecc_choose_conf(struct nand_chip *chip,
1645 const struct nand_ecc_caps *caps, int oobavail); 1670 const struct nand_ecc_caps *caps, int oobavail);
1646
1647int nand_match_ecc_req(struct nand_chip *chip,
1648 const struct nand_ecc_caps *caps, int oobavail);
1649
1650int nand_maximize_ecc(struct nand_chip *chip,
1651 const struct nand_ecc_caps *caps, int oobavail);
1652 1671
1653/* Default write_oob implementation */ 1672/* Default write_oob implementation */
1654int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); 1673int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
@@ -1674,10 +1693,14 @@ int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
1674/* Default read_page_raw implementation */ 1693/* Default read_page_raw implementation */
1675int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1694int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1676 uint8_t *buf, int oob_required, int page); 1695 uint8_t *buf, int oob_required, int page);
1696int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
1697 u8 *buf, int oob_required, int page);
1677 1698
1678/* Default write_page_raw implementation */ 1699/* Default write_page_raw implementation */
1679int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1700int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1680 const uint8_t *buf, int oob_required, int page); 1701 const uint8_t *buf, int oob_required, int page);
1702int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
1703 const u8 *buf, int oob_required, int page);
1681 1704
1682/* Reset and initialize a NAND device */ 1705/* Reset and initialize a NAND device */
1683int nand_reset(struct nand_chip *chip, int chipnr); 1706int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1711,8 +1734,13 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1711int nand_write_data_op(struct nand_chip *chip, const void *buf, 1734int nand_write_data_op(struct nand_chip *chip, const void *buf,
1712 unsigned int len, bool force_8bit); 1735 unsigned int len, bool force_8bit);
1713 1736
1714/* Free resources held by the NAND device */ 1737/*
1738 * Free resources held by the NAND device, must be called on error after a
1739 * sucessful nand_scan().
1740 */
1715void nand_cleanup(struct nand_chip *chip); 1741void nand_cleanup(struct nand_chip *chip);
1742/* Unregister the MTD device and calls nand_cleanup() */
1743void nand_release(struct mtd_info *mtd);
1716 1744
1717/* Default extended ID decoding function */ 1745/* Default extended ID decoding function */
1718void nand_decode_ext_id(struct nand_chip *chip); 1746void nand_decode_ext_id(struct nand_chip *chip);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index e60da0d34cc1..c922e97f205a 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -235,6 +235,7 @@ enum spi_nor_option_flags {
235 SNOR_F_S3AN_ADDR_DEFAULT = BIT(3), 235 SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
236 SNOR_F_READY_XSR_RDY = BIT(4), 236 SNOR_F_READY_XSR_RDY = BIT(4),
237 SNOR_F_USE_CLSR = BIT(5), 237 SNOR_F_USE_CLSR = BIT(5),
238 SNOR_F_BROKEN_RESET = BIT(6),
238}; 239};
239 240
240/** 241/**
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
new file mode 100644
index 000000000000..088ff96c3eb6
--- /dev/null
+++ b/include/linux/mtd/spinand.h
@@ -0,0 +1,421 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2016-2017 Micron Technology, Inc.
4 *
5 * Authors:
6 * Peter Pan <peterpandong@micron.com>
7 */
8#ifndef __LINUX_MTD_SPINAND_H
9#define __LINUX_MTD_SPINAND_H
10
11#include <linux/mutex.h>
12#include <linux/bitops.h>
13#include <linux/device.h>
14#include <linux/mtd/mtd.h>
15#include <linux/mtd/nand.h>
16#include <linux/spi/spi.h>
17#include <linux/spi/spi-mem.h>
18
19/**
20 * Standard SPI NAND flash operations
21 */
22
23#define SPINAND_RESET_OP \
24 SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
25 SPI_MEM_OP_NO_ADDR, \
26 SPI_MEM_OP_NO_DUMMY, \
27 SPI_MEM_OP_NO_DATA)
28
29#define SPINAND_WR_EN_DIS_OP(enable) \
30 SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
31 SPI_MEM_OP_NO_ADDR, \
32 SPI_MEM_OP_NO_DUMMY, \
33 SPI_MEM_OP_NO_DATA)
34
35#define SPINAND_READID_OP(ndummy, buf, len) \
36 SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
37 SPI_MEM_OP_NO_ADDR, \
38 SPI_MEM_OP_DUMMY(ndummy, 1), \
39 SPI_MEM_OP_DATA_IN(len, buf, 1))
40
41#define SPINAND_SET_FEATURE_OP(reg, valptr) \
42 SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
43 SPI_MEM_OP_ADDR(1, reg, 1), \
44 SPI_MEM_OP_NO_DUMMY, \
45 SPI_MEM_OP_DATA_OUT(1, valptr, 1))
46
47#define SPINAND_GET_FEATURE_OP(reg, valptr) \
48 SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
49 SPI_MEM_OP_ADDR(1, reg, 1), \
50 SPI_MEM_OP_NO_DUMMY, \
51 SPI_MEM_OP_DATA_IN(1, valptr, 1))
52
53#define SPINAND_BLK_ERASE_OP(addr) \
54 SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
55 SPI_MEM_OP_ADDR(3, addr, 1), \
56 SPI_MEM_OP_NO_DUMMY, \
57 SPI_MEM_OP_NO_DATA)
58
59#define SPINAND_PAGE_READ_OP(addr) \
60 SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
61 SPI_MEM_OP_ADDR(3, addr, 1), \
62 SPI_MEM_OP_NO_DUMMY, \
63 SPI_MEM_OP_NO_DATA)
64
65#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
66 SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
67 SPI_MEM_OP_ADDR(2, addr, 1), \
68 SPI_MEM_OP_DUMMY(ndummy, 1), \
69 SPI_MEM_OP_DATA_IN(len, buf, 1))
70
71#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
72 SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
73 SPI_MEM_OP_ADDR(2, addr, 1), \
74 SPI_MEM_OP_DUMMY(ndummy, 1), \
75 SPI_MEM_OP_DATA_IN(len, buf, 2))
76
77#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
78 SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
79 SPI_MEM_OP_ADDR(2, addr, 1), \
80 SPI_MEM_OP_DUMMY(ndummy, 1), \
81 SPI_MEM_OP_DATA_IN(len, buf, 4))
82
83#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
84 SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
85 SPI_MEM_OP_ADDR(2, addr, 2), \
86 SPI_MEM_OP_DUMMY(ndummy, 2), \
87 SPI_MEM_OP_DATA_IN(len, buf, 2))
88
89#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
90 SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
91 SPI_MEM_OP_ADDR(2, addr, 4), \
92 SPI_MEM_OP_DUMMY(ndummy, 4), \
93 SPI_MEM_OP_DATA_IN(len, buf, 4))
94
95#define SPINAND_PROG_EXEC_OP(addr) \
96 SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
97 SPI_MEM_OP_ADDR(3, addr, 1), \
98 SPI_MEM_OP_NO_DUMMY, \
99 SPI_MEM_OP_NO_DATA)
100
101#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
102 SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
103 SPI_MEM_OP_ADDR(2, addr, 1), \
104 SPI_MEM_OP_NO_DUMMY, \
105 SPI_MEM_OP_DATA_OUT(len, buf, 1))
106
107#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
108 SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
109 SPI_MEM_OP_ADDR(2, addr, 1), \
110 SPI_MEM_OP_NO_DUMMY, \
111 SPI_MEM_OP_DATA_OUT(len, buf, 4))
112
113/**
114 * Standard SPI NAND flash commands
115 */
116#define SPINAND_CMD_PROG_LOAD_X4 0x32
117#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
118
119/* feature register */
120#define REG_BLOCK_LOCK 0xa0
121#define BL_ALL_UNLOCKED 0x00
122
123/* configuration register */
124#define REG_CFG 0xb0
125#define CFG_OTP_ENABLE BIT(6)
126#define CFG_ECC_ENABLE BIT(4)
127#define CFG_QUAD_ENABLE BIT(0)
128
129/* status register */
130#define REG_STATUS 0xc0
131#define STATUS_BUSY BIT(0)
132#define STATUS_ERASE_FAILED BIT(2)
133#define STATUS_PROG_FAILED BIT(3)
134#define STATUS_ECC_MASK GENMASK(5, 4)
135#define STATUS_ECC_NO_BITFLIPS (0 << 4)
136#define STATUS_ECC_HAS_BITFLIPS (1 << 4)
137#define STATUS_ECC_UNCOR_ERROR (2 << 4)
138
139struct spinand_op;
140struct spinand_device;
141
142#define SPINAND_MAX_ID_LEN 4
143
144/**
145 * struct spinand_id - SPI NAND id structure
146 * @data: buffer containing the id bytes. Currently 4 bytes large, but can
147 * be extended if required
148 * @len: ID length
149 *
150 * struct_spinand_id->data contains all bytes returned after a READ_ID command,
151 * including dummy bytes if the chip does not emit ID bytes right after the
152 * READ_ID command. The responsibility to extract real ID bytes is left to
153 * struct_manufacurer_ops->detect().
154 */
155struct spinand_id {
156 u8 data[SPINAND_MAX_ID_LEN];
157 int len;
158};
159
160/**
161 * struct manufacurer_ops - SPI NAND manufacturer specific operations
162 * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
163 * the core calls the struct_manufacurer_ops->detect() hook of each
164 * registered manufacturer until one of them return 1. Note that
165 * the first thing to check in this hook is that the manufacturer ID
166 * in struct_spinand_device->id matches the manufacturer whose
167 * ->detect() hook has been called. Should return 1 if there's a
168 * match, 0 if the manufacturer ID does not match and a negative
169 * error code otherwise. When true is returned, the core assumes
170 * that properties of the NAND chip (spinand->base.memorg and
171 * spinand->base.eccreq) have been filled
172 * @init: initialize a SPI NAND device
173 * @cleanup: cleanup a SPI NAND device
174 *
175 * Each SPI NAND manufacturer driver should implement this interface so that
176 * NAND chips coming from this vendor can be detected and initialized properly.
177 */
178struct spinand_manufacturer_ops {
179 int (*detect)(struct spinand_device *spinand);
180 int (*init)(struct spinand_device *spinand);
181 void (*cleanup)(struct spinand_device *spinand);
182};
183
184/**
185 * struct spinand_manufacturer - SPI NAND manufacturer instance
186 * @id: manufacturer ID
187 * @name: manufacturer name
188 * @ops: manufacturer operations
189 */
190struct spinand_manufacturer {
191 u8 id;
192 char *name;
193 const struct spinand_manufacturer_ops *ops;
194};
195
196/* SPI NAND manufacturers */
197extern const struct spinand_manufacturer macronix_spinand_manufacturer;
198extern const struct spinand_manufacturer micron_spinand_manufacturer;
199extern const struct spinand_manufacturer winbond_spinand_manufacturer;
200
201/**
202 * struct spinand_op_variants - SPI NAND operation variants
203 * @ops: the list of variants for a given operation
204 * @nops: the number of variants
205 *
206 * Some operations like read-from-cache/write-to-cache have several variants
207 * depending on the number of IO lines you use to transfer data or address
208 * cycles. This structure is a way to describe the different variants supported
209 * by a chip and let the core pick the best one based on the SPI mem controller
210 * capabilities.
211 */
212struct spinand_op_variants {
213 const struct spi_mem_op *ops;
214 unsigned int nops;
215};
216
217#define SPINAND_OP_VARIANTS(name, ...) \
218 const struct spinand_op_variants name = { \
219 .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \
220 .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \
221 sizeof(struct spi_mem_op), \
222 }
223
224/**
225 * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND
226 * chip
227 * @get_status: get the ECC status. Should return a positive number encoding
228 * the number of corrected bitflips if correction was possible or
229 * -EBADMSG if there are uncorrectable errors. I can also return
230 * other negative error codes if the error is not caused by
231 * uncorrectable bitflips
232 * @ooblayout: the OOB layout used by the on-die ECC implementation
233 */
234struct spinand_ecc_info {
235 int (*get_status)(struct spinand_device *spinand, u8 status);
236 const struct mtd_ooblayout_ops *ooblayout;
237};
238
239#define SPINAND_HAS_QE_BIT BIT(0)
240
241/**
242 * struct spinand_info - Structure used to describe SPI NAND chips
243 * @model: model name
244 * @devid: device ID
245 * @flags: OR-ing of the SPINAND_XXX flags
246 * @memorg: memory organization
247 * @eccreq: ECC requirements
248 * @eccinfo: on-die ECC info
249 * @op_variants: operations variants
250 * @op_variants.read_cache: variants of the read-cache operation
251 * @op_variants.write_cache: variants of the write-cache operation
252 * @op_variants.update_cache: variants of the update-cache operation
253 * @select_target: function used to select a target/die. Required only for
254 * multi-die chips
255 *
256 * Each SPI NAND manufacturer driver should have a spinand_info table
257 * describing all the chips supported by the driver.
258 */
259struct spinand_info {
260 const char *model;
261 u8 devid;
262 u32 flags;
263 struct nand_memory_organization memorg;
264 struct nand_ecc_req eccreq;
265 struct spinand_ecc_info eccinfo;
266 struct {
267 const struct spinand_op_variants *read_cache;
268 const struct spinand_op_variants *write_cache;
269 const struct spinand_op_variants *update_cache;
270 } op_variants;
271 int (*select_target)(struct spinand_device *spinand,
272 unsigned int target);
273};
274
275#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
276 { \
277 .read_cache = __read, \
278 .write_cache = __write, \
279 .update_cache = __update, \
280 }
281
282#define SPINAND_ECCINFO(__ooblayout, __get_status) \
283 .eccinfo = { \
284 .ooblayout = __ooblayout, \
285 .get_status = __get_status, \
286 }
287
288#define SPINAND_SELECT_TARGET(__func) \
289 .select_target = __func,
290
291#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
292 __flags, ...) \
293 { \
294 .model = __model, \
295 .devid = __id, \
296 .memorg = __memorg, \
297 .eccreq = __eccreq, \
298 .op_variants = __op_variants, \
299 .flags = __flags, \
300 __VA_ARGS__ \
301 }
302
303/**
304 * struct spinand_device - SPI NAND device instance
305 * @base: NAND device instance
306 * @spimem: pointer to the SPI mem object
307 * @lock: lock used to serialize accesses to the NAND
308 * @id: NAND ID as returned by READ_ID
309 * @flags: NAND flags
310 * @op_templates: various SPI mem op templates
311 * @op_templates.read_cache: read cache op template
312 * @op_templates.write_cache: write cache op template
313 * @op_templates.update_cache: update cache op template
314 * @select_target: select a specific target/die. Usually called before sending
315 * a command addressing a page or an eraseblock embedded in
316 * this die. Only required if your chip exposes several dies
317 * @cur_target: currently selected target/die
318 * @eccinfo: on-die ECC information
319 * @cfg_cache: config register cache. One entry per die
320 * @databuf: bounce buffer for data
321 * @oobbuf: bounce buffer for OOB data
322 * @scratchbuf: buffer used for everything but page accesses. This is needed
323 * because the spi-mem interface explicitly requests that buffers
324 * passed in spi_mem_op be DMA-able, so we can't based the bufs on
325 * the stack
326 * @manufacturer: SPI NAND manufacturer information
327 * @priv: manufacturer private data
328 */
329struct spinand_device {
330 struct nand_device base;
331 struct spi_mem *spimem;
332 struct mutex lock;
333 struct spinand_id id;
334 u32 flags;
335
336 struct {
337 const struct spi_mem_op *read_cache;
338 const struct spi_mem_op *write_cache;
339 const struct spi_mem_op *update_cache;
340 } op_templates;
341
342 int (*select_target)(struct spinand_device *spinand,
343 unsigned int target);
344 unsigned int cur_target;
345
346 struct spinand_ecc_info eccinfo;
347
348 u8 *cfg_cache;
349 u8 *databuf;
350 u8 *oobbuf;
351 u8 *scratchbuf;
352 const struct spinand_manufacturer *manufacturer;
353 void *priv;
354};
355
356/**
357 * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
358 * @mtd: MTD instance
359 *
360 * Return: the SPI NAND device attached to @mtd.
361 */
362static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
363{
364 return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
365}
366
367/**
368 * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device
369 * @spinand: SPI NAND device
370 *
371 * Return: the MTD device embedded in @spinand.
372 */
373static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
374{
375 return nanddev_to_mtd(&spinand->base);
376}
377
378/**
379 * nand_to_spinand() - Get the SPI NAND device embedding an NAND object
380 * @nand: NAND object
381 *
382 * Return: the SPI NAND device embedding @nand.
383 */
384static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
385{
386 return container_of(nand, struct spinand_device, base);
387}
388
389/**
390 * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object
391 * @spinand: SPI NAND device
392 *
393 * Return: the NAND device embedded in @spinand.
394 */
395static inline struct nand_device *
396spinand_to_nand(struct spinand_device *spinand)
397{
398 return &spinand->base;
399}
400
401/**
402 * spinand_set_of_node - Attach a DT node to a SPI NAND device
403 * @spinand: SPI NAND device
404 * @np: DT node
405 *
406 * Attach a DT node to a SPI NAND device.
407 */
408static inline void spinand_set_of_node(struct spinand_device *spinand,
409 struct device_node *np)
410{
411 nanddev_set_of_node(&spinand->base, np);
412}
413
414int spinand_match_and_init(struct spinand_device *dev,
415 const struct spinand_info *table,
416 unsigned int table_size, u8 devid);
417
418int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
419int spinand_select_target(struct spinand_device *spinand, unsigned int target);
420
421#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index 08b6eb964dd6..e0930678c8bf 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -114,7 +114,7 @@ struct socket {
114 114
115 unsigned long flags; 115 unsigned long flags;
116 116
117 struct socket_wq __rcu *wq; 117 struct socket_wq *wq;
118 118
119 struct file *file; 119 struct file *file;
120 struct sock *sk; 120 struct sock *sk;
@@ -147,7 +147,6 @@ struct proto_ops {
147 int (*getname) (struct socket *sock, 147 int (*getname) (struct socket *sock,
148 struct sockaddr *addr, 148 struct sockaddr *addr,
149 int peer); 149 int peer);
150 __poll_t (*poll_mask) (struct socket *sock, __poll_t events);
151 __poll_t (*poll) (struct file *file, struct socket *sock, 150 __poll_t (*poll) (struct file *file, struct socket *sock,
152 struct poll_table_struct *wait); 151 struct poll_table_struct *wait);
153 int (*ioctl) (struct socket *sock, unsigned int cmd, 152 int (*ioctl) (struct socket *sock, unsigned int cmd,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 623bb8ced060..2b2a6dce1630 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -79,6 +79,7 @@ enum {
79 NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ 79 NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
80 NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ 80 NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
81 NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */ 81 NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */
82 NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */
82 83
83 NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ 84 NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
84 NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ 85 NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
@@ -151,6 +152,7 @@ enum {
151#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD) 152#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
152#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4) 153#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
153#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) 154#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
155#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
154 156
155#define for_each_netdev_feature(mask_addr, bit) \ 157#define for_each_netdev_feature(mask_addr, bit) \
156 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) 158 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3ec9850c7936..ca5ab98053c8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -302,6 +302,17 @@ struct netdev_boot_setup {
302 302
303int __init netdev_boot_setup(char *str); 303int __init netdev_boot_setup(char *str);
304 304
305struct gro_list {
306 struct list_head list;
307 int count;
308};
309
310/*
311 * size of gro hash buckets, must less than bit number of
312 * napi_struct::gro_bitmask
313 */
314#define GRO_HASH_BUCKETS 8
315
305/* 316/*
306 * Structure for NAPI scheduling similar to tasklet but with weighting 317 * Structure for NAPI scheduling similar to tasklet but with weighting
307 */ 318 */
@@ -316,13 +327,13 @@ struct napi_struct {
316 327
317 unsigned long state; 328 unsigned long state;
318 int weight; 329 int weight;
319 unsigned int gro_count; 330 unsigned long gro_bitmask;
320 int (*poll)(struct napi_struct *, int); 331 int (*poll)(struct napi_struct *, int);
321#ifdef CONFIG_NETPOLL 332#ifdef CONFIG_NETPOLL
322 int poll_owner; 333 int poll_owner;
323#endif 334#endif
324 struct net_device *dev; 335 struct net_device *dev;
325 struct sk_buff *gro_list; 336 struct gro_list gro_hash[GRO_HASH_BUCKETS];
326 struct sk_buff *skb; 337 struct sk_buff *skb;
327 struct hrtimer timer; 338 struct hrtimer timer;
328 struct list_head dev_list; 339 struct list_head dev_list;
@@ -569,6 +580,9 @@ struct netdev_queue {
569 * (/sys/class/net/DEV/Q/trans_timeout) 580 * (/sys/class/net/DEV/Q/trans_timeout)
570 */ 581 */
571 unsigned long trans_timeout; 582 unsigned long trans_timeout;
583
584 /* Subordinate device that the queue has been assigned to */
585 struct net_device *sb_dev;
572/* 586/*
573 * write-mostly part 587 * write-mostly part
574 */ 588 */
@@ -730,10 +744,15 @@ struct xps_map {
730 */ 744 */
731struct xps_dev_maps { 745struct xps_dev_maps {
732 struct rcu_head rcu; 746 struct rcu_head rcu;
733 struct xps_map __rcu *cpu_map[0]; 747 struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
734}; 748};
735#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ 749
750#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
736 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) 751 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
752
753#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
754 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
755
737#endif /* CONFIG_XPS */ 756#endif /* CONFIG_XPS */
738 757
739#define TC_MAX_QUEUE 16 758#define TC_MAX_QUEUE 16
@@ -779,7 +798,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
779} 798}
780 799
781typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 800typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
782 struct sk_buff *skb); 801 struct sk_buff *skb,
802 struct net_device *sb_dev);
783 803
784enum tc_setup_type { 804enum tc_setup_type {
785 TC_SETUP_QDISC_MQPRIO, 805 TC_SETUP_QDISC_MQPRIO,
@@ -792,6 +812,7 @@ enum tc_setup_type {
792 TC_SETUP_QDISC_RED, 812 TC_SETUP_QDISC_RED,
793 TC_SETUP_QDISC_PRIO, 813 TC_SETUP_QDISC_PRIO,
794 TC_SETUP_QDISC_MQ, 814 TC_SETUP_QDISC_MQ,
815 TC_SETUP_QDISC_ETF,
795}; 816};
796 817
797/* These structures hold the attributes of bpf state that are being passed 818/* These structures hold the attributes of bpf state that are being passed
@@ -807,11 +828,8 @@ enum bpf_netdev_command {
807 */ 828 */
808 XDP_SETUP_PROG, 829 XDP_SETUP_PROG,
809 XDP_SETUP_PROG_HW, 830 XDP_SETUP_PROG_HW,
810 /* Check if a bpf program is set on the device. The callee should
811 * set @prog_attached to one of XDP_ATTACHED_* values, note that "true"
812 * is equivalent to XDP_ATTACHED_DRV.
813 */
814 XDP_QUERY_PROG, 831 XDP_QUERY_PROG,
832 XDP_QUERY_PROG_HW,
815 /* BPF program for offload callbacks, invoked at program load time. */ 833 /* BPF program for offload callbacks, invoked at program load time. */
816 BPF_OFFLOAD_VERIFIER_PREP, 834 BPF_OFFLOAD_VERIFIER_PREP,
817 BPF_OFFLOAD_TRANSLATE, 835 BPF_OFFLOAD_TRANSLATE,
@@ -835,9 +853,8 @@ struct netdev_bpf {
835 struct bpf_prog *prog; 853 struct bpf_prog *prog;
836 struct netlink_ext_ack *extack; 854 struct netlink_ext_ack *extack;
837 }; 855 };
838 /* XDP_QUERY_PROG */ 856 /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
839 struct { 857 struct {
840 u8 prog_attached;
841 u32 prog_id; 858 u32 prog_id;
842 /* flags with which program was installed */ 859 /* flags with which program was installed */
843 u32 prog_flags; 860 u32 prog_flags;
@@ -855,10 +872,10 @@ struct netdev_bpf {
855 struct { 872 struct {
856 struct bpf_offloaded_map *offmap; 873 struct bpf_offloaded_map *offmap;
857 }; 874 };
858 /* XDP_SETUP_XSK_UMEM */ 875 /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
859 struct { 876 struct {
860 struct xdp_umem *umem; 877 struct xdp_umem *umem; /* out for query*/
861 u16 queue_id; 878 u16 queue_id; /* in for query */
862 } xsk; 879 } xsk;
863 }; 880 };
864}; 881};
@@ -891,6 +908,8 @@ struct tlsdev_ops {
891 void (*tls_dev_del)(struct net_device *netdev, 908 void (*tls_dev_del)(struct net_device *netdev,
892 struct tls_context *ctx, 909 struct tls_context *ctx,
893 enum tls_offload_ctx_dir direction); 910 enum tls_offload_ctx_dir direction);
911 void (*tls_dev_resync_rx)(struct net_device *netdev,
912 struct sock *sk, u32 seq, u64 rcd_sn);
894}; 913};
895#endif 914#endif
896 915
@@ -942,7 +961,8 @@ struct dev_ifalias {
942 * those the driver believes to be appropriate. 961 * those the driver believes to be appropriate.
943 * 962 *
944 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 963 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
945 * void *accel_priv, select_queue_fallback_t fallback); 964 * struct net_device *sb_dev,
965 * select_queue_fallback_t fallback);
946 * Called to decide which queue to use when device supports multiple 966 * Called to decide which queue to use when device supports multiple
947 * transmit queues. 967 * transmit queues.
948 * 968 *
@@ -1214,7 +1234,7 @@ struct net_device_ops {
1214 netdev_features_t features); 1234 netdev_features_t features);
1215 u16 (*ndo_select_queue)(struct net_device *dev, 1235 u16 (*ndo_select_queue)(struct net_device *dev,
1216 struct sk_buff *skb, 1236 struct sk_buff *skb,
1217 void *accel_priv, 1237 struct net_device *sb_dev,
1218 select_queue_fallback_t fallback); 1238 select_queue_fallback_t fallback);
1219 void (*ndo_change_rx_flags)(struct net_device *dev, 1239 void (*ndo_change_rx_flags)(struct net_device *dev,
1220 int flags); 1240 int flags);
@@ -1909,7 +1929,8 @@ struct net_device {
1909 int watchdog_timeo; 1929 int watchdog_timeo;
1910 1930
1911#ifdef CONFIG_XPS 1931#ifdef CONFIG_XPS
1912 struct xps_dev_maps __rcu *xps_maps; 1932 struct xps_dev_maps __rcu *xps_cpus_map;
1933 struct xps_dev_maps __rcu *xps_rxqs_map;
1913#endif 1934#endif
1914#ifdef CONFIG_NET_CLS_ACT 1935#ifdef CONFIG_NET_CLS_ACT
1915 struct mini_Qdisc __rcu *miniq_egress; 1936 struct mini_Qdisc __rcu *miniq_egress;
@@ -1978,7 +1999,7 @@ struct net_device {
1978#ifdef CONFIG_DCB 1999#ifdef CONFIG_DCB
1979 const struct dcbnl_rtnl_ops *dcbnl_ops; 2000 const struct dcbnl_rtnl_ops *dcbnl_ops;
1980#endif 2001#endif
1981 u8 num_tc; 2002 s16 num_tc;
1982 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 2003 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1983 u8 prio_tc_map[TC_BITMASK + 1]; 2004 u8 prio_tc_map[TC_BITMASK + 1];
1984 2005
@@ -2032,6 +2053,17 @@ int netdev_get_num_tc(struct net_device *dev)
2032 return dev->num_tc; 2053 return dev->num_tc;
2033} 2054}
2034 2055
2056void netdev_unbind_sb_channel(struct net_device *dev,
2057 struct net_device *sb_dev);
2058int netdev_bind_sb_channel_queue(struct net_device *dev,
2059 struct net_device *sb_dev,
2060 u8 tc, u16 count, u16 offset);
2061int netdev_set_sb_channel(struct net_device *dev, u16 channel);
2062static inline int netdev_get_sb_channel(struct net_device *dev)
2063{
2064 return max_t(int, -dev->num_tc, 0);
2065}
2066
2035static inline 2067static inline
2036struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 2068struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2037 unsigned int index) 2069 unsigned int index)
@@ -2076,7 +2108,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
2076 2108
2077struct netdev_queue *netdev_pick_tx(struct net_device *dev, 2109struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2078 struct sk_buff *skb, 2110 struct sk_buff *skb,
2079 void *accel_priv); 2111 struct net_device *sb_dev);
2080 2112
2081/* returns the headroom that the master device needs to take in account 2113/* returns the headroom that the master device needs to take in account
2082 * when forwarding to this dev 2114 * when forwarding to this dev
@@ -2255,10 +2287,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb)
2255 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; 2287 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2256} 2288}
2257 2289
2258typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); 2290typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2259static inline struct sk_buff **call_gro_receive(gro_receive_t cb, 2291static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2260 struct sk_buff **head, 2292 struct list_head *head,
2261 struct sk_buff *skb) 2293 struct sk_buff *skb)
2262{ 2294{
2263 if (unlikely(gro_recursion_inc_test(skb))) { 2295 if (unlikely(gro_recursion_inc_test(skb))) {
2264 NAPI_GRO_CB(skb)->flush |= 1; 2296 NAPI_GRO_CB(skb)->flush |= 1;
@@ -2268,12 +2300,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
2268 return cb(head, skb); 2300 return cb(head, skb);
2269} 2301}
2270 2302
2271typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, 2303typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2272 struct sk_buff *); 2304 struct sk_buff *);
2273static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, 2305static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2274 struct sock *sk, 2306 struct sock *sk,
2275 struct sk_buff **head, 2307 struct list_head *head,
2276 struct sk_buff *skb) 2308 struct sk_buff *skb)
2277{ 2309{
2278 if (unlikely(gro_recursion_inc_test(skb))) { 2310 if (unlikely(gro_recursion_inc_test(skb))) {
2279 NAPI_GRO_CB(skb)->flush |= 1; 2311 NAPI_GRO_CB(skb)->flush |= 1;
@@ -2290,6 +2322,9 @@ struct packet_type {
2290 struct net_device *, 2322 struct net_device *,
2291 struct packet_type *, 2323 struct packet_type *,
2292 struct net_device *); 2324 struct net_device *);
2325 void (*list_func) (struct list_head *,
2326 struct packet_type *,
2327 struct net_device *);
2293 bool (*id_match)(struct packet_type *ptype, 2328 bool (*id_match)(struct packet_type *ptype,
2294 struct sock *sk); 2329 struct sock *sk);
2295 void *af_packet_priv; 2330 void *af_packet_priv;
@@ -2299,8 +2334,8 @@ struct packet_type {
2299struct offload_callbacks { 2334struct offload_callbacks {
2300 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2335 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2301 netdev_features_t features); 2336 netdev_features_t features);
2302 struct sk_buff **(*gro_receive)(struct sk_buff **head, 2337 struct sk_buff *(*gro_receive)(struct list_head *head,
2303 struct sk_buff *skb); 2338 struct sk_buff *skb);
2304 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2339 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2305}; 2340};
2306 2341
@@ -2537,8 +2572,14 @@ void dev_close(struct net_device *dev);
2537void dev_close_many(struct list_head *head, bool unlink); 2572void dev_close_many(struct list_head *head, bool unlink);
2538void dev_disable_lro(struct net_device *dev); 2573void dev_disable_lro(struct net_device *dev);
2539int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); 2574int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2575u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2576 struct net_device *sb_dev,
2577 select_queue_fallback_t fallback);
2578u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2579 struct net_device *sb_dev,
2580 select_queue_fallback_t fallback);
2540int dev_queue_xmit(struct sk_buff *skb); 2581int dev_queue_xmit(struct sk_buff *skb);
2541int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); 2582int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2542int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 2583int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2543int register_netdevice(struct net_device *dev); 2584int register_netdevice(struct net_device *dev);
2544void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 2585void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2568,7 +2609,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2568struct net_device *dev_get_by_napi_id(unsigned int napi_id); 2609struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2569int netdev_get_name(struct net *net, char *name, int ifindex); 2610int netdev_get_name(struct net *net, char *name, int ifindex);
2570int dev_restart(struct net_device *dev); 2611int dev_restart(struct net_device *dev);
2571int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); 2612int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2572 2613
2573static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 2614static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2574{ 2615{
@@ -2784,15 +2825,35 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2784} 2825}
2785 2826
2786#ifdef CONFIG_XFRM_OFFLOAD 2827#ifdef CONFIG_XFRM_OFFLOAD
2787static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) 2828static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2788{ 2829{
2789 if (PTR_ERR(pp) != -EINPROGRESS) 2830 if (PTR_ERR(pp) != -EINPROGRESS)
2790 NAPI_GRO_CB(skb)->flush |= flush; 2831 NAPI_GRO_CB(skb)->flush |= flush;
2791} 2832}
2833static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2834 struct sk_buff *pp,
2835 int flush,
2836 struct gro_remcsum *grc)
2837{
2838 if (PTR_ERR(pp) != -EINPROGRESS) {
2839 NAPI_GRO_CB(skb)->flush |= flush;
2840 skb_gro_remcsum_cleanup(skb, grc);
2841 skb->remcsum_offload = 0;
2842 }
2843}
2792#else 2844#else
2793static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) 2845static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2846{
2847 NAPI_GRO_CB(skb)->flush |= flush;
2848}
2849static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
2850 struct sk_buff *pp,
2851 int flush,
2852 struct gro_remcsum *grc)
2794{ 2853{
2795 NAPI_GRO_CB(skb)->flush |= flush; 2854 NAPI_GRO_CB(skb)->flush |= flush;
2855 skb_gro_remcsum_cleanup(skb, grc);
2856 skb->remcsum_offload = 0;
2796} 2857}
2797#endif 2858#endif
2798 2859
@@ -3258,6 +3319,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3258#ifdef CONFIG_XPS 3319#ifdef CONFIG_XPS
3259int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 3320int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3260 u16 index); 3321 u16 index);
3322int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3323 u16 index, bool is_rxqs_map);
3324
3325/**
3326 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3327 * @j: CPU/Rx queue index
3328 * @mask: bitmask of all cpus/rx queues
3329 * @nr_bits: number of bits in the bitmask
3330 *
3331 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3332 */
3333static inline bool netif_attr_test_mask(unsigned long j,
3334 const unsigned long *mask,
3335 unsigned int nr_bits)
3336{
3337 cpu_max_bits_warn(j, nr_bits);
3338 return test_bit(j, mask);
3339}
3340
3341/**
3342 * netif_attr_test_online - Test for online CPU/Rx queue
3343 * @j: CPU/Rx queue index
3344 * @online_mask: bitmask for CPUs/Rx queues that are online
3345 * @nr_bits: number of bits in the bitmask
3346 *
3347 * Returns true if a CPU/Rx queue is online.
3348 */
3349static inline bool netif_attr_test_online(unsigned long j,
3350 const unsigned long *online_mask,
3351 unsigned int nr_bits)
3352{
3353 cpu_max_bits_warn(j, nr_bits);
3354
3355 if (online_mask)
3356 return test_bit(j, online_mask);
3357
3358 return (j < nr_bits);
3359}
3360
3361/**
3362 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3363 * @n: CPU/Rx queue index
3364 * @srcp: the cpumask/Rx queue mask pointer
3365 * @nr_bits: number of bits in the bitmask
3366 *
3367 * Returns >= nr_bits if no further CPUs/Rx queues set.
3368 */
3369static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3370 unsigned int nr_bits)
3371{
3372 /* -1 is a legal arg here. */
3373 if (n != -1)
3374 cpu_max_bits_warn(n, nr_bits);
3375
3376 if (srcp)
3377 return find_next_bit(srcp, nr_bits, n + 1);
3378
3379 return n + 1;
3380}
3381
3382/**
3383 * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
3384 * @n: CPU/Rx queue index
3385 * @src1p: the first CPUs/Rx queues mask pointer
3386 * @src2p: the second CPUs/Rx queues mask pointer
3387 * @nr_bits: number of bits in the bitmask
3388 *
3389 * Returns >= nr_bits if no further CPUs/Rx queues set in both.
3390 */
3391static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3392 const unsigned long *src2p,
3393 unsigned int nr_bits)
3394{
3395 /* -1 is a legal arg here. */
3396 if (n != -1)
3397 cpu_max_bits_warn(n, nr_bits);
3398
3399 if (src1p && src2p)
3400 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3401 else if (src1p)
3402 return find_next_bit(src1p, nr_bits, n + 1);
3403 else if (src2p)
3404 return find_next_bit(src2p, nr_bits, n + 1);
3405
3406 return n + 1;
3407}
3261#else 3408#else
3262static inline int netif_set_xps_queue(struct net_device *dev, 3409static inline int netif_set_xps_queue(struct net_device *dev,
3263 const struct cpumask *mask, 3410 const struct cpumask *mask,
@@ -3265,6 +3412,13 @@ static inline int netif_set_xps_queue(struct net_device *dev,
3265{ 3412{
3266 return 0; 3413 return 0;
3267} 3414}
3415
3416static inline int __netif_set_xps_queue(struct net_device *dev,
3417 const unsigned long *mask,
3418 u16 index, bool is_rxqs_map)
3419{
3420 return 0;
3421}
3268#endif 3422#endif
3269 3423
3270/** 3424/**
@@ -3284,8 +3438,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3284int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 3438int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3285#else 3439#else
3286static inline int netif_set_real_num_rx_queues(struct net_device *dev, 3440static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3287 unsigned int rxq) 3441 unsigned int rxqs)
3288{ 3442{
3443 dev->real_num_rx_queues = rxqs;
3289 return 0; 3444 return 0;
3290} 3445}
3291#endif 3446#endif
@@ -3364,6 +3519,7 @@ int netif_rx(struct sk_buff *skb);
3364int netif_rx_ni(struct sk_buff *skb); 3519int netif_rx_ni(struct sk_buff *skb);
3365int netif_receive_skb(struct sk_buff *skb); 3520int netif_receive_skb(struct sk_buff *skb);
3366int netif_receive_skb_core(struct sk_buff *skb); 3521int netif_receive_skb_core(struct sk_buff *skb);
3522void netif_receive_skb_list(struct list_head *head);
3367gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 3523gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3368void napi_gro_flush(struct napi_struct *napi, bool flush_old); 3524void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3369struct sk_buff *napi_get_frags(struct napi_struct *napi); 3525struct sk_buff *napi_get_frags(struct napi_struct *napi);
@@ -3398,6 +3554,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
3398int dev_get_alias(const struct net_device *, char *, size_t); 3554int dev_get_alias(const struct net_device *, char *, size_t);
3399int dev_change_net_namespace(struct net_device *, struct net *, const char *); 3555int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3400int __dev_set_mtu(struct net_device *, int); 3556int __dev_set_mtu(struct net_device *, int);
3557int dev_set_mtu_ext(struct net_device *dev, int mtu,
3558 struct netlink_ext_ack *extack);
3401int dev_set_mtu(struct net_device *, int); 3559int dev_set_mtu(struct net_device *, int);
3402int dev_change_tx_queue_len(struct net_device *, unsigned long); 3560int dev_change_tx_queue_len(struct net_device *, unsigned long);
3403void dev_set_group(struct net_device *, int); 3561void dev_set_group(struct net_device *, int);
@@ -3415,8 +3573,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3415typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); 3573typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3416int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 3574int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3417 int fd, u32 flags); 3575 int fd, u32 flags);
3418void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, 3576u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3419 struct netdev_bpf *xdp); 3577 enum bpf_netdev_command cmd);
3578int xdp_umem_query(struct net_device *dev, u16 queue_id);
3420 3579
3421int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3580int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3422int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3581int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index dd2052f0efb7..07efffd0c759 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -288,6 +288,24 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct
288 return ret; 288 return ret;
289} 289}
290 290
291static inline void
292NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
293 struct list_head *head, struct net_device *in, struct net_device *out,
294 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
295{
296 struct sk_buff *skb, *next;
297 struct list_head sublist;
298
299 INIT_LIST_HEAD(&sublist);
300 list_for_each_entry_safe(skb, next, head, list) {
301 list_del(&skb->list);
302 if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
303 list_add_tail(&skb->list, &sublist);
304 }
305 /* Put passed packets back on main list */
306 list_splice(&sublist, head);
307}
308
291/* Call setsockopt() */ 309/* Call setsockopt() */
292int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 310int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
293 unsigned int len); 311 unsigned int len);
@@ -369,6 +387,14 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
369 return okfn(net, sk, skb); 387 return okfn(net, sk, skb);
370} 388}
371 389
390static inline void
391NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
392 struct list_head *head, struct net_device *in, struct net_device *out,
393 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
394{
395 /* nothing to do */
396}
397
372static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, 398static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
373 struct sock *sk, struct sk_buff *skb, 399 struct sock *sk, struct sk_buff *skb,
374 struct net_device *indev, struct net_device *outdev, 400 struct net_device *indev, struct net_device *outdev,
@@ -388,8 +414,17 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
388 414
389extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; 415extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
390void nf_ct_attach(struct sk_buff *, const struct sk_buff *); 416void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
417struct nf_conntrack_tuple;
418bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
419 const struct sk_buff *skb);
391#else 420#else
392static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} 421static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
422struct nf_conntrack_tuple;
423static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
424 const struct sk_buff *skb)
425{
426 return false;
427}
393#endif 428#endif
394 429
395struct nf_conn; 430struct nf_conn;
@@ -398,6 +433,8 @@ enum ip_conntrack_info;
398struct nf_ct_hook { 433struct nf_ct_hook {
399 int (*update)(struct net *net, struct sk_buff *skb); 434 int (*update)(struct net *net, struct sk_buff *skb);
400 void (*destroy)(struct nf_conntrack *); 435 void (*destroy)(struct nf_conntrack *);
436 bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
437 const struct sk_buff *);
401}; 438};
402extern struct nf_ct_hook __rcu *nf_ct_hook; 439extern struct nf_ct_hook __rcu *nf_ct_hook;
403 440
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 3ecc3050be0e..4a520d3304a2 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -29,6 +29,7 @@ struct nfnetlink_subsystem {
29 __u8 subsys_id; /* nfnetlink subsystem ID */ 29 __u8 subsys_id; /* nfnetlink subsystem ID */
30 __u8 cb_count; /* number of callbacks */ 30 __u8 cb_count; /* number of callbacks */
31 const struct nfnl_callback *cb; /* callback for individual types */ 31 const struct nfnl_callback *cb; /* callback for individual types */
32 struct module *owner;
32 int (*commit)(struct net *net, struct sk_buff *skb); 33 int (*commit)(struct net *net, struct sk_buff *skb);
33 int (*abort)(struct net *net, struct sk_buff *skb); 34 int (*abort)(struct net *net, struct sk_buff *skb);
34 void (*cleanup)(struct net *net); 35 void (*cleanup)(struct net *net);
diff --git a/include/linux/netfilter/nf_osf.h b/include/linux/netfilter/nfnetlink_osf.h
index 0e114c492fb8..ecf7dab81e9e 100644
--- a/include/linux/netfilter/nf_osf.h
+++ b/include/linux/netfilter/nfnetlink_osf.h
@@ -1,16 +1,8 @@
1#include <uapi/linux/netfilter/nf_osf.h> 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _NFOSF_H
3#define _NFOSF_H
2 4
3/* Initial window size option state machine: multiple of mss, mtu or 5#include <uapi/linux/netfilter/nfnetlink_osf.h>
4 * plain numeric value. Can also be made as plain numeric value which
5 * is not a multiple of specified value.
6 */
7enum nf_osf_window_size_options {
8 OSF_WSS_PLAIN = 0,
9 OSF_WSS_MSS,
10 OSF_WSS_MTU,
11 OSF_WSS_MODULO,
12 OSF_WSS_MAX,
13};
14 6
15enum osf_fmatch_states { 7enum osf_fmatch_states {
16 /* Packet does not match the fingerprint */ 8 /* Packet does not match the fingerprint */
@@ -21,6 +13,8 @@ enum osf_fmatch_states {
21 FMATCH_OPT_WRONG, 13 FMATCH_OPT_WRONG,
22}; 14};
23 15
16extern struct list_head nf_osf_fingers[2];
17
24struct nf_osf_finger { 18struct nf_osf_finger {
25 struct rcu_head rcu_head; 19 struct rcu_head rcu_head;
26 struct list_head finger_entry; 20 struct list_head finger_entry;
@@ -31,3 +25,8 @@ bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
31 int hooknum, struct net_device *in, struct net_device *out, 25 int hooknum, struct net_device *in, struct net_device *out,
32 const struct nf_osf_info *info, struct net *net, 26 const struct nf_osf_info *info, struct net *net,
33 const struct list_head *nf_osf_fingers); 27 const struct list_head *nf_osf_fingers);
28
29const char *nf_osf_find(const struct sk_buff *skb,
30 const struct list_head *nf_osf_fingers);
31
32#endif /* _NFOSF_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index b671fdfd212b..fa0686500970 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -5,17 +5,6 @@
5#include <uapi/linux/netfilter_bridge.h> 5#include <uapi/linux/netfilter_bridge.h>
6#include <linux/skbuff.h> 6#include <linux/skbuff.h>
7 7
8enum nf_br_hook_priorities {
9 NF_BR_PRI_FIRST = INT_MIN,
10 NF_BR_PRI_NAT_DST_BRIDGED = -300,
11 NF_BR_PRI_FILTER_BRIDGED = -200,
12 NF_BR_PRI_BRNF = 0,
13 NF_BR_PRI_NAT_DST_OTHER = 100,
14 NF_BR_PRI_FILTER_OTHER = 200,
15 NF_BR_PRI_NAT_SRC = 300,
16 NF_BR_PRI_LAST = INT_MAX,
17};
18
19#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 8#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
20 9
21int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); 10int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index b31dabfdb453..95ab5cc64422 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -23,9 +23,6 @@ struct nf_queue_entry;
23#ifdef CONFIG_INET 23#ifdef CONFIG_INET
24__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, 24__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
25 unsigned int dataoff, u_int8_t protocol); 25 unsigned int dataoff, u_int8_t protocol);
26__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
27 unsigned int dataoff, unsigned int len,
28 u_int8_t protocol);
29int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, 26int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
30 bool strict); 27 bool strict);
31int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry); 28int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
@@ -35,14 +32,6 @@ static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
35{ 32{
36 return 0; 33 return 0;
37} 34}
38static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb,
39 unsigned int hook,
40 unsigned int dataoff,
41 unsigned int len,
42 u_int8_t protocol)
43{
44 return 0;
45}
46static inline int nf_ip_route(struct net *net, struct dst_entry **dst, 35static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
47 struct flowi *fl, bool strict) 36 struct flowi *fl, bool strict)
48{ 37{
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 288c597e75b3..c0dc4dd78887 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -30,11 +30,6 @@ struct nf_ipv6_ops {
30 void (*route_input)(struct sk_buff *skb); 30 void (*route_input)(struct sk_buff *skb);
31 int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, 31 int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
32 int (*output)(struct net *, struct sock *, struct sk_buff *)); 32 int (*output)(struct net *, struct sock *, struct sk_buff *));
33 __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
34 unsigned int dataoff, u_int8_t protocol);
35 __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook,
36 unsigned int dataoff, unsigned int len,
37 u_int8_t protocol);
38 int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, 33 int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
39 bool strict); 34 bool strict);
40 int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); 35 int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index f3075d6c7e82..71f121b66ca8 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -170,7 +170,6 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
170struct netlink_callback { 170struct netlink_callback {
171 struct sk_buff *skb; 171 struct sk_buff *skb;
172 const struct nlmsghdr *nlh; 172 const struct nlmsghdr *nlh;
173 int (*start)(struct netlink_callback *);
174 int (*dump)(struct sk_buff * skb, 173 int (*dump)(struct sk_buff * skb,
175 struct netlink_callback *cb); 174 struct netlink_callback *cb);
176 int (*done)(struct netlink_callback *cb); 175 int (*done)(struct netlink_callback *cb);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 9dee3c23895d..712eed156d09 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1438,6 +1438,8 @@ enum {
1438 NFS_IOHDR_EOF, 1438 NFS_IOHDR_EOF,
1439 NFS_IOHDR_REDO, 1439 NFS_IOHDR_REDO,
1440 NFS_IOHDR_STAT, 1440 NFS_IOHDR_STAT,
1441 NFS_IOHDR_RESEND_PNFS,
1442 NFS_IOHDR_RESEND_MDS,
1441}; 1443};
1442 1444
1443struct nfs_io_completion; 1445struct nfs_io_completion;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index b8d868d23e79..08f9247e9827 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -45,12 +45,18 @@ extern void touch_softlockup_watchdog(void);
45extern void touch_softlockup_watchdog_sync(void); 45extern void touch_softlockup_watchdog_sync(void);
46extern void touch_all_softlockup_watchdogs(void); 46extern void touch_all_softlockup_watchdogs(void);
47extern unsigned int softlockup_panic; 47extern unsigned int softlockup_panic;
48#else 48
49extern int lockup_detector_online_cpu(unsigned int cpu);
50extern int lockup_detector_offline_cpu(unsigned int cpu);
51#else /* CONFIG_SOFTLOCKUP_DETECTOR */
49static inline void touch_softlockup_watchdog_sched(void) { } 52static inline void touch_softlockup_watchdog_sched(void) { }
50static inline void touch_softlockup_watchdog(void) { } 53static inline void touch_softlockup_watchdog(void) { }
51static inline void touch_softlockup_watchdog_sync(void) { } 54static inline void touch_softlockup_watchdog_sync(void) { }
52static inline void touch_all_softlockup_watchdogs(void) { } 55static inline void touch_all_softlockup_watchdogs(void) { }
53#endif 56
57#define lockup_detector_online_cpu NULL
58#define lockup_detector_offline_cpu NULL
59#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
54 60
55#ifdef CONFIG_DETECT_HUNG_TASK 61#ifdef CONFIG_DETECT_HUNG_TASK
56void reset_hung_task_detector(void); 62void reset_hung_task_detector(void);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 2950ce957656..68e91ef5494c 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -242,7 +242,12 @@ struct nvme_id_ctrl {
242 __le32 sanicap; 242 __le32 sanicap;
243 __le32 hmminds; 243 __le32 hmminds;
244 __le16 hmmaxd; 244 __le16 hmmaxd;
245 __u8 rsvd338[174]; 245 __u8 rsvd338[4];
246 __u8 anatt;
247 __u8 anacap;
248 __le32 anagrpmax;
249 __le32 nanagrpid;
250 __u8 rsvd352[160];
246 __u8 sqes; 251 __u8 sqes;
247 __u8 cqes; 252 __u8 cqes;
248 __le16 maxcmd; 253 __le16 maxcmd;
@@ -254,11 +259,12 @@ struct nvme_id_ctrl {
254 __le16 awun; 259 __le16 awun;
255 __le16 awupf; 260 __le16 awupf;
256 __u8 nvscc; 261 __u8 nvscc;
257 __u8 rsvd531; 262 __u8 nwpc;
258 __le16 acwu; 263 __le16 acwu;
259 __u8 rsvd534[2]; 264 __u8 rsvd534[2];
260 __le32 sgls; 265 __le32 sgls;
261 __u8 rsvd540[228]; 266 __le32 mnan;
267 __u8 rsvd544[224];
262 char subnqn[256]; 268 char subnqn[256];
263 __u8 rsvd1024[768]; 269 __u8 rsvd1024[768];
264 __le32 ioccsz; 270 __le32 ioccsz;
@@ -312,7 +318,11 @@ struct nvme_id_ns {
312 __le16 nabspf; 318 __le16 nabspf;
313 __le16 noiob; 319 __le16 noiob;
314 __u8 nvmcap[16]; 320 __u8 nvmcap[16];
315 __u8 rsvd64[40]; 321 __u8 rsvd64[28];
322 __le32 anagrpid;
323 __u8 rsvd96[3];
324 __u8 nsattr;
325 __u8 rsvd100[4];
316 __u8 nguid[16]; 326 __u8 nguid[16];
317 __u8 eui64[8]; 327 __u8 eui64[8];
318 struct nvme_lbaf lbaf[16]; 328 struct nvme_lbaf lbaf[16];
@@ -425,6 +435,32 @@ struct nvme_effects_log {
425 __u8 resv[2048]; 435 __u8 resv[2048];
426}; 436};
427 437
438enum nvme_ana_state {
439 NVME_ANA_OPTIMIZED = 0x01,
440 NVME_ANA_NONOPTIMIZED = 0x02,
441 NVME_ANA_INACCESSIBLE = 0x03,
442 NVME_ANA_PERSISTENT_LOSS = 0x04,
443 NVME_ANA_CHANGE = 0x0f,
444};
445
446struct nvme_ana_group_desc {
447 __le32 grpid;
448 __le32 nnsids;
449 __le64 chgcnt;
450 __u8 state;
451 __u8 rsvd17[15];
452 __le32 nsids[];
453};
454
455/* flag for the log specific field of the ANA log */
456#define NVME_ANA_LOG_RGO (1 << 0)
457
458struct nvme_ana_rsp_hdr {
459 __le64 chgcnt;
460 __le16 ngrps;
461 __le16 rsvd10[3];
462};
463
428enum { 464enum {
429 NVME_SMART_CRIT_SPARE = 1 << 0, 465 NVME_SMART_CRIT_SPARE = 1 << 0,
430 NVME_SMART_CRIT_TEMPERATURE = 1 << 1, 466 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
@@ -444,11 +480,13 @@ enum {
444enum { 480enum {
445 NVME_AER_NOTICE_NS_CHANGED = 0x00, 481 NVME_AER_NOTICE_NS_CHANGED = 0x00,
446 NVME_AER_NOTICE_FW_ACT_STARTING = 0x01, 482 NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
483 NVME_AER_NOTICE_ANA = 0x03,
447}; 484};
448 485
449enum { 486enum {
450 NVME_AEN_CFG_NS_ATTR = 1 << 8, 487 NVME_AEN_CFG_NS_ATTR = 1 << 8,
451 NVME_AEN_CFG_FW_ACT = 1 << 9, 488 NVME_AEN_CFG_FW_ACT = 1 << 9,
489 NVME_AEN_CFG_ANA_CHANGE = 1 << 11,
452}; 490};
453 491
454struct nvme_lba_range_type { 492struct nvme_lba_range_type {
@@ -749,15 +787,22 @@ enum {
749 NVME_FEAT_HOST_MEM_BUF = 0x0d, 787 NVME_FEAT_HOST_MEM_BUF = 0x0d,
750 NVME_FEAT_TIMESTAMP = 0x0e, 788 NVME_FEAT_TIMESTAMP = 0x0e,
751 NVME_FEAT_KATO = 0x0f, 789 NVME_FEAT_KATO = 0x0f,
790 NVME_FEAT_HCTM = 0x10,
791 NVME_FEAT_NOPSC = 0x11,
792 NVME_FEAT_RRL = 0x12,
793 NVME_FEAT_PLM_CONFIG = 0x13,
794 NVME_FEAT_PLM_WINDOW = 0x14,
752 NVME_FEAT_SW_PROGRESS = 0x80, 795 NVME_FEAT_SW_PROGRESS = 0x80,
753 NVME_FEAT_HOST_ID = 0x81, 796 NVME_FEAT_HOST_ID = 0x81,
754 NVME_FEAT_RESV_MASK = 0x82, 797 NVME_FEAT_RESV_MASK = 0x82,
755 NVME_FEAT_RESV_PERSIST = 0x83, 798 NVME_FEAT_RESV_PERSIST = 0x83,
799 NVME_FEAT_WRITE_PROTECT = 0x84,
756 NVME_LOG_ERROR = 0x01, 800 NVME_LOG_ERROR = 0x01,
757 NVME_LOG_SMART = 0x02, 801 NVME_LOG_SMART = 0x02,
758 NVME_LOG_FW_SLOT = 0x03, 802 NVME_LOG_FW_SLOT = 0x03,
759 NVME_LOG_CHANGED_NS = 0x04, 803 NVME_LOG_CHANGED_NS = 0x04,
760 NVME_LOG_CMD_EFFECTS = 0x05, 804 NVME_LOG_CMD_EFFECTS = 0x05,
805 NVME_LOG_ANA = 0x0c,
761 NVME_LOG_DISC = 0x70, 806 NVME_LOG_DISC = 0x70,
762 NVME_LOG_RESERVATION = 0x80, 807 NVME_LOG_RESERVATION = 0x80,
763 NVME_FWACT_REPL = (0 << 3), 808 NVME_FWACT_REPL = (0 << 3),
@@ -765,6 +810,14 @@ enum {
765 NVME_FWACT_ACTV = (2 << 3), 810 NVME_FWACT_ACTV = (2 << 3),
766}; 811};
767 812
813/* NVMe Namespace Write Protect State */
814enum {
815 NVME_NS_NO_WRITE_PROTECT = 0,
816 NVME_NS_WRITE_PROTECT,
817 NVME_NS_WRITE_PROTECT_POWER_CYCLE,
818 NVME_NS_WRITE_PROTECT_PERMANENT,
819};
820
768#define NVME_MAX_CHANGED_NAMESPACES 1024 821#define NVME_MAX_CHANGED_NAMESPACES 1024
769 822
770struct nvme_identify { 823struct nvme_identify {
@@ -880,7 +933,7 @@ struct nvme_get_log_page_command {
880 __u64 rsvd2[2]; 933 __u64 rsvd2[2];
881 union nvme_data_ptr dptr; 934 union nvme_data_ptr dptr;
882 __u8 lid; 935 __u8 lid;
883 __u8 rsvd10; 936 __u8 lsp; /* upper 4 bits reserved */
884 __le16 numdl; 937 __le16 numdl;
885 __le16 numdu; 938 __le16 numdu;
886 __u16 rsvd11; 939 __u16 rsvd11;
@@ -1111,6 +1164,8 @@ enum {
1111 NVME_SC_SGL_INVALID_OFFSET = 0x16, 1164 NVME_SC_SGL_INVALID_OFFSET = 0x16,
1112 NVME_SC_SGL_INVALID_SUBTYPE = 0x17, 1165 NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
1113 1166
1167 NVME_SC_NS_WRITE_PROTECTED = 0x20,
1168
1114 NVME_SC_LBA_RANGE = 0x80, 1169 NVME_SC_LBA_RANGE = 0x80,
1115 NVME_SC_CAP_EXCEEDED = 0x81, 1170 NVME_SC_CAP_EXCEEDED = 0x81,
1116 NVME_SC_NS_NOT_READY = 0x82, 1171 NVME_SC_NS_NOT_READY = 0x82,
@@ -1180,6 +1235,13 @@ enum {
1180 NVME_SC_ACCESS_DENIED = 0x286, 1235 NVME_SC_ACCESS_DENIED = 0x286,
1181 NVME_SC_UNWRITTEN_BLOCK = 0x287, 1236 NVME_SC_UNWRITTEN_BLOCK = 0x287,
1182 1237
1238 /*
1239 * Path-related Errors:
1240 */
1241 NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
1242 NVME_SC_ANA_INACCESSIBLE = 0x302,
1243 NVME_SC_ANA_TRANSITION = 0x303,
1244
1183 NVME_SC_DNR = 0x4000, 1245 NVME_SC_DNR = 0x4000,
1184}; 1246};
1185 1247
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
index e6b240b6196c..379affc63e24 100644
--- a/include/linux/openvswitch.h
+++ b/include/linux/openvswitch.h
@@ -21,4 +21,9 @@
21 21
22#include <uapi/linux/openvswitch.h> 22#include <uapi/linux/openvswitch.h>
23 23
24#define OVS_CLONE_ATTR_EXEC 0 /* Specify an u32 value. When nonzero,
25 * actions in clone will not change flow
26 * keys. False otherwise.
27 */
28
24#endif /* _LINUX_OPENVSWITCH_H */ 29#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0543800ec565..9b87f1936906 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -261,6 +261,9 @@ enum pci_bus_speed {
261 PCI_SPEED_UNKNOWN = 0xff, 261 PCI_SPEED_UNKNOWN = 0xff,
262}; 262};
263 263
264enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
265enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
266
264struct pci_cap_saved_data { 267struct pci_cap_saved_data {
265 u16 cap_nr; 268 u16 cap_nr;
266 bool cap_extended; 269 bool cap_extended;
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
deleted file mode 100644
index 07d78e4653bc..000000000000
--- a/include/linux/percpu_ida.h
+++ /dev/null
@@ -1,83 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERCPU_IDA_H__
3#define __PERCPU_IDA_H__
4
5#include <linux/types.h>
6#include <linux/bitops.h>
7#include <linux/init.h>
8#include <linux/sched.h>
9#include <linux/spinlock_types.h>
10#include <linux/wait.h>
11#include <linux/cpumask.h>
12
13struct percpu_ida_cpu;
14
15struct percpu_ida {
16 /*
17 * number of tags available to be allocated, as passed to
18 * percpu_ida_init()
19 */
20 unsigned nr_tags;
21 unsigned percpu_max_size;
22 unsigned percpu_batch_size;
23
24 struct percpu_ida_cpu __percpu *tag_cpu;
25
26 /*
27 * Bitmap of cpus that (may) have tags on their percpu freelists:
28 * steal_tags() uses this to decide when to steal tags, and which cpus
29 * to try stealing from.
30 *
31 * It's ok for a freelist to be empty when its bit is set - steal_tags()
32 * will just keep looking - but the bitmap _must_ be set whenever a
33 * percpu freelist does have tags.
34 */
35 cpumask_t cpus_have_tags;
36
37 struct {
38 spinlock_t lock;
39 /*
40 * When we go to steal tags from another cpu (see steal_tags()),
41 * we want to pick a cpu at random. Cycling through them every
42 * time we steal is a bit easier and more or less equivalent:
43 */
44 unsigned cpu_last_stolen;
45
46 /* For sleeping on allocation failure */
47 wait_queue_head_t wait;
48
49 /*
50 * Global freelist - it's a stack where nr_free points to the
51 * top
52 */
53 unsigned nr_free;
54 unsigned *freelist;
55 } ____cacheline_aligned_in_smp;
56};
57
58/*
59 * Number of tags we move between the percpu freelist and the global freelist at
60 * a time
61 */
62#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
63/* Max size of percpu freelist, */
64#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
65
66int percpu_ida_alloc(struct percpu_ida *pool, int state);
67void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
68
69void percpu_ida_destroy(struct percpu_ida *pool);
70int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
71 unsigned long max_size, unsigned long batch_size);
72static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
73{
74 return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
75 IDA_DEFAULT_PCPU_BATCH_MOVE);
76}
77
78typedef int (*percpu_ida_cb)(unsigned, void *);
79int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
80 void *data);
81
82unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
83#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index ad5444491975..10f92e1d8e7b 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -25,6 +25,12 @@
25 */ 25 */
26#define ARMPMU_MAX_HWEVENTS 32 26#define ARMPMU_MAX_HWEVENTS 32
27 27
28/*
29 * ARM PMU hw_event flags
30 */
31/* Event uses a 64bit counter */
32#define ARMPMU_EVT_64BIT 1
33
28#define HW_OP_UNSUPPORTED 0xFFFF 34#define HW_OP_UNSUPPORTED 0xFFFF
29#define C(_x) PERF_COUNT_HW_CACHE_##_x 35#define C(_x) PERF_COUNT_HW_CACHE_##_x
30#define CACHE_OP_UNSUPPORTED 0xFFFF 36#define CACHE_OP_UNSUPPORTED 0xFFFF
@@ -87,14 +93,13 @@ struct arm_pmu {
87 struct perf_event *event); 93 struct perf_event *event);
88 int (*set_event_filter)(struct hw_perf_event *evt, 94 int (*set_event_filter)(struct hw_perf_event *evt,
89 struct perf_event_attr *attr); 95 struct perf_event_attr *attr);
90 u32 (*read_counter)(struct perf_event *event); 96 u64 (*read_counter)(struct perf_event *event);
91 void (*write_counter)(struct perf_event *event, u32 val); 97 void (*write_counter)(struct perf_event *event, u64 val);
92 void (*start)(struct arm_pmu *); 98 void (*start)(struct arm_pmu *);
93 void (*stop)(struct arm_pmu *); 99 void (*stop)(struct arm_pmu *);
94 void (*reset)(void *); 100 void (*reset)(void *);
95 int (*map_event)(struct perf_event *event); 101 int (*map_event)(struct perf_event *event);
96 int num_events; 102 int num_events;
97 u64 max_period;
98 bool secure_access; /* 32-bit ARM only */ 103 bool secure_access; /* 32-bit ARM only */
99#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 104#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
100 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); 105 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1fa12887ec02..53c500f0ca79 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -490,7 +490,7 @@ struct perf_addr_filters_head {
490}; 490};
491 491
492/** 492/**
493 * enum perf_event_state - the states of a event 493 * enum perf_event_state - the states of an event:
494 */ 494 */
495enum perf_event_state { 495enum perf_event_state {
496 PERF_EVENT_STATE_DEAD = -4, 496 PERF_EVENT_STATE_DEAD = -4,
@@ -1130,6 +1130,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
1130extern struct perf_callchain_entry * 1130extern struct perf_callchain_entry *
1131get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 1131get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1132 u32 max_stack, bool crosstask, bool add_mark); 1132 u32 max_stack, bool crosstask, bool add_mark);
1133extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1133extern int get_callchain_buffers(int max_stack); 1134extern int get_callchain_buffers(int max_stack);
1134extern void put_callchain_buffers(void); 1135extern void put_callchain_buffers(void);
1135 1136
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6cd09098427c..cd6f637cbbfb 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -825,6 +825,16 @@ static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
825} 825}
826 826
827/** 827/**
828 * phy_polling_mode - Convenience function for testing whether polling is
829 * used to detect PHY status changes
830 * @phydev: the phy_device struct
831 */
832static inline bool phy_polling_mode(struct phy_device *phydev)
833{
834 return phydev->irq == PHY_POLL;
835}
836
837/**
828 * phy_is_internal - Convenience function for testing if a PHY is internal 838 * phy_is_internal - Convenience function for testing if a PHY is internal
829 * @phydev: the phy_device struct 839 * @phydev: the phy_device struct
830 */ 840 */
@@ -942,6 +952,8 @@ void phy_start(struct phy_device *phydev);
942void phy_stop(struct phy_device *phydev); 952void phy_stop(struct phy_device *phydev);
943int phy_start_aneg(struct phy_device *phydev); 953int phy_start_aneg(struct phy_device *phydev);
944int phy_aneg_done(struct phy_device *phydev); 954int phy_aneg_done(struct phy_device *phydev);
955int phy_speed_down(struct phy_device *phydev, bool sync);
956int phy_speed_up(struct phy_device *phydev);
945 957
946int phy_stop_interrupts(struct phy_device *phydev); 958int phy_stop_interrupts(struct phy_device *phydev);
947int phy_restart_aneg(struct phy_device *phydev); 959int phy_restart_aneg(struct phy_device *phydev);
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 50eeae025f1e..021fc6595856 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -234,5 +234,6 @@ int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
234#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode) 234#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
235 235
236void phylink_set_port_modes(unsigned long *bits); 236void phylink_set_port_modes(unsigned long *bits);
237void phylink_helper_basex_speed(struct phylink_link_state *state);
237 238
238#endif 239#endif
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index 09eb80f2574a..8dd85d302b90 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -28,7 +28,8 @@ struct seq_file;
28 * is not available on this controller this should return -ENOTSUPP 28 * is not available on this controller this should return -ENOTSUPP
29 * and if it is available but disabled it should return -EINVAL 29 * and if it is available but disabled it should return -EINVAL
30 * @pin_config_set: configure an individual pin 30 * @pin_config_set: configure an individual pin
31 * @pin_config_group_get: get configurations for an entire pin group 31 * @pin_config_group_get: get configurations for an entire pin group; should
32 * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get.
32 * @pin_config_group_set: configure all pins in a group 33 * @pin_config_group_set: configure all pins in a group
33 * @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration 34 * @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration
34 * @pin_config_dbg_show: optional debugfs display hook that will provide 35 * @pin_config_dbg_show: optional debugfs display hook that will provide
diff --git a/include/linux/platform_data/bt-nokia-h4p.h b/include/linux/platform_data/bt-nokia-h4p.h
deleted file mode 100644
index 30d169dfadf3..000000000000
--- a/include/linux/platform_data/bt-nokia-h4p.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/*
2 * This file is part of Nokia H4P bluetooth driver
3 *
4 * Copyright (C) 2010 Nokia Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22
23/**
24 * struct hci_h4p_platform data - hci_h4p Platform data structure
25 */
26struct hci_h4p_platform_data {
27 int chip_type;
28 int bt_sysclk;
29 unsigned int bt_wakeup_gpio;
30 unsigned int host_wakeup_gpio;
31 unsigned int reset_gpio;
32 int reset_gpio_shared;
33 unsigned int uart_irq;
34 phys_addr_t uart_base;
35 const char *uart_iclk;
36 const char *uart_fclk;
37 void (*set_pm_limits)(struct device *dev, bool set);
38};
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index 90ae19ca828f..57a5a35e0073 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -22,6 +22,7 @@
22#include <asm-generic/gpio.h> 22#include <asm-generic/gpio.h>
23 23
24#define MAX_REGS_BANKS 5 24#define MAX_REGS_BANKS 5
25#define MAX_INT_PER_BANK 32
25 26
26struct davinci_gpio_platform_data { 27struct davinci_gpio_platform_data {
27 u32 ngpio; 28 u32 ngpio;
@@ -41,7 +42,7 @@ struct davinci_gpio_controller {
41 spinlock_t lock; 42 spinlock_t lock;
42 void __iomem *regs[MAX_REGS_BANKS]; 43 void __iomem *regs[MAX_REGS_BANKS];
43 int gpio_unbanked; 44 int gpio_unbanked;
44 unsigned int base_irq; 45 int irqs[MAX_INT_PER_BANK];
45 unsigned int base; 46 unsigned int base;
46}; 47};
47 48
diff --git a/include/linux/platform_data/jz4740/jz4740_nand.h b/include/linux/platform_data/jz4740/jz4740_nand.h
new file mode 100644
index 000000000000..bc571f6d5ced
--- /dev/null
+++ b/include/linux/platform_data/jz4740/jz4740_nand.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
3 * JZ4740 SoC NAND controller driver
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * You should have received a copy of the GNU General Public License along
11 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * 675 Mass Ave, Cambridge, MA 02139, USA.
13 *
14 */
15
16#ifndef __JZ4740_NAND_H__
17#define __JZ4740_NAND_H__
18
19#include <linux/mtd/rawnand.h>
20#include <linux/mtd/partitions.h>
21
22#define JZ_NAND_NUM_BANKS 4
23
24struct jz_nand_platform_data {
25 int num_partitions;
26 struct mtd_partition *partitions;
27
28 unsigned char banks[JZ_NAND_NUM_BANKS];
29
30 void (*ident_callback)(struct platform_device *, struct mtd_info *,
31 struct mtd_partition **, int *num_partitions);
32};
33
34#endif
diff --git a/include/linux/platform_data/media/sii9234.h b/include/linux/platform_data/media/sii9234.h
deleted file mode 100644
index 6a4a809fe9a3..000000000000
--- a/include/linux/platform_data/media/sii9234.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * Driver header for SII9234 MHL converter chip.
3 *
4 * Copyright (c) 2011 Samsung Electronics, Co. Ltd
5 * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef SII9234_H
14#define SII9234_H
15
16/**
17 * @gpio_n_reset: GPIO driving nRESET pin
18 */
19
20struct sii9234_platform_data {
21 int gpio_n_reset;
22};
23
24#endif /* SII9234_H */
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
index d1397c8ed94e..6397b9c8149a 100644
--- a/include/linux/platform_data/mmp_dma.h
+++ b/include/linux/platform_data/mmp_dma.h
@@ -12,9 +12,13 @@
12#ifndef MMP_DMA_H 12#ifndef MMP_DMA_H
13#define MMP_DMA_H 13#define MMP_DMA_H
14 14
15struct dma_slave_map;
16
15struct mmp_dma_platdata { 17struct mmp_dma_platdata {
16 int dma_channels; 18 int dma_channels;
17 int nb_requestors; 19 int nb_requestors;
20 int slave_map_cnt;
21 const struct dma_slave_map *slave_map;
18}; 22};
19 23
20#endif /* MMP_DMA_H */ 24#endif /* MMP_DMA_H */
diff --git a/include/linux/platform_data/mtd-orion_nand.h b/include/linux/platform_data/mtd-orion_nand.h
index a7ce77c7c1a8..34828eb85982 100644
--- a/include/linux/platform_data/mtd-orion_nand.h
+++ b/include/linux/platform_data/mtd-orion_nand.h
@@ -12,7 +12,6 @@
12 */ 12 */
13struct orion_nand_data { 13struct orion_nand_data {
14 struct mtd_partition *parts; 14 struct mtd_partition *parts;
15 int (*dev_ready)(struct mtd_info *mtd);
16 u32 nr_parts; 15 u32 nr_parts;
17 u8 ale; /* address line number connected to ALE */ 16 u8 ale; /* address line number connected to ALE */
18 u8 cle; /* address line number connected to CLE */ 17 u8 cle; /* address line number connected to CLE */
diff --git a/include/linux/platform_data/txx9/ndfmc.h b/include/linux/platform_data/txx9/ndfmc.h
new file mode 100644
index 000000000000..fc172627d54e
--- /dev/null
+++ b/include/linux/platform_data/txx9/ndfmc.h
@@ -0,0 +1,30 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * (C) Copyright TOSHIBA CORPORATION 2007
7 */
8#ifndef __TXX9_NDFMC_H
9#define __TXX9_NDFMC_H
10
11#define NDFMC_PLAT_FLAG_USE_BSPRT 0x01
12#define NDFMC_PLAT_FLAG_NO_RSTR 0x02
13#define NDFMC_PLAT_FLAG_HOLDADD 0x04
14#define NDFMC_PLAT_FLAG_DUMMYWRITE 0x08
15
16struct txx9ndfmc_platform_data {
17 unsigned int shift;
18 unsigned int gbus_clock;
19 unsigned int hold; /* hold time in nanosecond */
20 unsigned int spw; /* strobe pulse width in nanosecond */
21 unsigned int flags;
22 unsigned char ch_mask; /* available channel bitmask */
23 unsigned char wp_mask; /* write-protect bitmask */
24 unsigned char wide_mask; /* 16bit-nand bitmask */
25};
26
27void txx9_ndfmc_init(unsigned long baseaddr,
28 const struct txx9ndfmc_platform_data *plat_data);
29
30#endif /* __TXX9_NDFMC_H */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 9206a4fef9ac..776c546d581a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -234,11 +234,13 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
234int of_genpd_parse_idle_states(struct device_node *dn, 234int of_genpd_parse_idle_states(struct device_node *dn,
235 struct genpd_power_state **states, int *n); 235 struct genpd_power_state **states, int *n);
236unsigned int of_genpd_opp_to_performance_state(struct device *dev, 236unsigned int of_genpd_opp_to_performance_state(struct device *dev,
237 struct device_node *opp_node); 237 struct device_node *np);
238 238
239int genpd_dev_pm_attach(struct device *dev); 239int genpd_dev_pm_attach(struct device *dev);
240struct device *genpd_dev_pm_attach_by_id(struct device *dev, 240struct device *genpd_dev_pm_attach_by_id(struct device *dev,
241 unsigned int index); 241 unsigned int index);
242struct device *genpd_dev_pm_attach_by_name(struct device *dev,
243 char *name);
242#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ 244#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
243static inline int of_genpd_add_provider_simple(struct device_node *np, 245static inline int of_genpd_add_provider_simple(struct device_node *np,
244 struct generic_pm_domain *genpd) 246 struct generic_pm_domain *genpd)
@@ -274,9 +276,9 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
274 276
275static inline unsigned int 277static inline unsigned int
276of_genpd_opp_to_performance_state(struct device *dev, 278of_genpd_opp_to_performance_state(struct device *dev,
277 struct device_node *opp_node) 279 struct device_node *np)
278{ 280{
279 return -ENODEV; 281 return 0;
280} 282}
281 283
282static inline int genpd_dev_pm_attach(struct device *dev) 284static inline int genpd_dev_pm_attach(struct device *dev)
@@ -290,6 +292,12 @@ static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
290 return NULL; 292 return NULL;
291} 293}
292 294
295static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
296 char *name)
297{
298 return NULL;
299}
300
293static inline 301static inline
294struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) 302struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
295{ 303{
@@ -301,6 +309,8 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
301int dev_pm_domain_attach(struct device *dev, bool power_on); 309int dev_pm_domain_attach(struct device *dev, bool power_on);
302struct device *dev_pm_domain_attach_by_id(struct device *dev, 310struct device *dev_pm_domain_attach_by_id(struct device *dev,
303 unsigned int index); 311 unsigned int index);
312struct device *dev_pm_domain_attach_by_name(struct device *dev,
313 char *name);
304void dev_pm_domain_detach(struct device *dev, bool power_off); 314void dev_pm_domain_detach(struct device *dev, bool power_off);
305void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); 315void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
306#else 316#else
@@ -313,6 +323,11 @@ static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
313{ 323{
314 return NULL; 324 return NULL;
315} 325}
326static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
327 char *name)
328{
329 return NULL;
330}
316static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {} 331static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
317static inline void dev_pm_domain_set(struct device *dev, 332static inline void dev_pm_domain_set(struct device *dev,
318 struct dev_pm_domain *pd) {} 333 struct dev_pm_domain *pd) {}
diff --git a/include/linux/poll.h b/include/linux/poll.h
index fdf86b4cbc71..7e0fdcf905d2 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -74,18 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
74 pt->_key = ~(__poll_t)0; /* all events enabled */ 74 pt->_key = ~(__poll_t)0; /* all events enabled */
75} 75}
76 76
77static inline bool file_has_poll_mask(struct file *file) 77static inline bool file_can_poll(struct file *file)
78{ 78{
79 return file->f_op->get_poll_head && file->f_op->poll_mask; 79 return file->f_op->poll;
80} 80}
81 81
82static inline bool file_can_poll(struct file *file) 82static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
83{ 83{
84 return file->f_op->poll || file_has_poll_mask(file); 84 if (unlikely(!file->f_op->poll))
85 return DEFAULT_POLLMASK;
86 return file->f_op->poll(file, pt);
85} 87}
86 88
87__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt);
88
89struct poll_table_entry { 89struct poll_table_entry {
90 struct file *filp; 90 struct file *filp;
91 __poll_t key; 91 __poll_t key;
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index c85704fcdbd2..ee7e987ea1b4 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -95,8 +95,8 @@ struct k_itimer {
95 clockid_t it_clock; 95 clockid_t it_clock;
96 timer_t it_id; 96 timer_t it_id;
97 int it_active; 97 int it_active;
98 int it_overrun; 98 s64 it_overrun;
99 int it_overrun_last; 99 s64 it_overrun_last;
100 int it_requeue_pending; 100 int it_requeue_pending;
101 int it_sigev_notify; 101 int it_sigev_notify;
102 ktime_t it_interval; 102 ktime_t it_interval;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 6d7e800affd8..cf3eccfe1543 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -50,15 +50,15 @@ static inline const char *printk_skip_headers(const char *buffer)
50/* We show everything that is MORE important than this.. */ 50/* We show everything that is MORE important than this.. */
51#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ 51#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
52#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ 52#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */
53#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */
54#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ 53#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */
55#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ 54#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */
56 55
57/* 56/*
58 * Default used to be hard-coded at 7, we're now allowing it to be set from 57 * Default used to be hard-coded at 7, quiet used to be hardcoded at 4,
59 * kernel config. 58 * we're now allowing both to be set from kernel config.
60 */ 59 */
61#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT 60#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
61#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
62 62
63extern int console_printk[]; 63extern int console_printk[];
64 64
@@ -148,9 +148,13 @@ void early_printk(const char *s, ...) { }
148#ifdef CONFIG_PRINTK_NMI 148#ifdef CONFIG_PRINTK_NMI
149extern void printk_nmi_enter(void); 149extern void printk_nmi_enter(void);
150extern void printk_nmi_exit(void); 150extern void printk_nmi_exit(void);
151extern void printk_nmi_direct_enter(void);
152extern void printk_nmi_direct_exit(void);
151#else 153#else
152static inline void printk_nmi_enter(void) { } 154static inline void printk_nmi_enter(void) { }
153static inline void printk_nmi_exit(void) { } 155static inline void printk_nmi_exit(void) { }
156static inline void printk_nmi_direct_enter(void) { }
157static inline void printk_nmi_direct_exit(void) { }
154#endif /* PRINTK_NMI */ 158#endif /* PRINTK_NMI */
155 159
156#ifdef CONFIG_PRINTK 160#ifdef CONFIG_PRINTK
diff --git a/include/linux/pti.h b/include/linux/pti.h
index 0174883a935a..1a941efcaa62 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -6,6 +6,7 @@
6#include <asm/pti.h> 6#include <asm/pti.h>
7#else 7#else
8static inline void pti_init(void) { } 8static inline void pti_init(void) { }
9static inline void pti_finalize(void) { }
9#endif 10#endif
10 11
11#endif 12#endif
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 8461b18e4608..13b4244d44c1 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -171,6 +171,14 @@
171#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */ 171#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */
172#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */ 172#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
173#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */ 173#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
174#define SSACD_ACDS_1 (0)
175#define SSACD_ACDS_2 (1)
176#define SSACD_ACDS_4 (2)
177#define SSACD_ACDS_8 (3)
178#define SSACD_ACDS_16 (4)
179#define SSACD_ACDS_32 (5)
180#define SSACD_SCDB_4X (0)
181#define SSACD_SCDB_1X (1)
174#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */ 182#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
175 183
176/* LPSS SSP */ 184/* LPSS SSP */
@@ -212,8 +220,6 @@ struct ssp_device {
212 int type; 220 int type;
213 int use_count; 221 int use_count;
214 int irq; 222 int irq;
215 int drcmr_rx;
216 int drcmr_tx;
217 223
218 struct device_node *of_node; 224 struct device_node *of_node;
219}; 225};
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index b401b962afff..5d65521260b3 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -87,6 +87,10 @@ static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
87static inline int 87static inline int
88qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; } 88qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; }
89static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; } 89static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
90static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
91 unsigned int *src,
92 struct qcom_scm_vmperm *newvm,
93 int dest_cnt) { return -ENODEV; }
90static inline void qcom_scm_cpu_power_down(u32 flags) {} 94static inline void qcom_scm_cpu_power_down(u32 flags) {}
91static inline u32 qcom_scm_get_version(void) { return 0; } 95static inline u32 qcom_scm_get_version(void) { return 0; }
92static inline u32 96static inline u32
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 2978fa4add42..a1310482c4ed 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -39,6 +39,10 @@
39#include <linux/qed/qed_if.h> 39#include <linux/qed/qed_if.h>
40#include <linux/qed/qed_iov_if.h> 40#include <linux/qed/qed_iov_if.h>
41 41
42/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */
43#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2)
44#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS))
45
42struct qed_queue_start_common_params { 46struct qed_queue_start_common_params {
43 /* Should always be relative to entity sending this. */ 47 /* Should always be relative to entity sending this. */
44 u8 vport_id; 48 u8 vport_id;
@@ -49,6 +53,8 @@ struct qed_queue_start_common_params {
49 53
50 struct qed_sb_info *p_sb; 54 struct qed_sb_info *p_sb;
51 u8 sb_idx; 55 u8 sb_idx;
56
57 u8 tc;
52}; 58};
53 59
54struct qed_rxq_start_ret_params { 60struct qed_rxq_start_ret_params {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index b4040023cbfb..8cd34645e892 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -759,6 +759,9 @@ struct qed_generic_tlvs {
759 u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN]; 759 u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
760}; 760};
761 761
762#define QED_I2C_DEV_ADDR_A0 0xA0
763#define QED_I2C_DEV_ADDR_A2 0xA2
764
762#define QED_NVM_SIGNATURE 0x12435687 765#define QED_NVM_SIGNATURE 0x12435687
763 766
764enum qed_nvm_flash_cmd { 767enum qed_nvm_flash_cmd {
@@ -1026,6 +1029,18 @@ struct qed_common_ops {
1026 * @param enabled - true iff WoL should be enabled. 1029 * @param enabled - true iff WoL should be enabled.
1027 */ 1030 */
1028 int (*update_wol) (struct qed_dev *cdev, bool enabled); 1031 int (*update_wol) (struct qed_dev *cdev, bool enabled);
1032
1033/**
1034 * @brief read_module_eeprom
1035 *
1036 * @param cdev
1037 * @param buf - buffer
1038 * @param dev_addr - PHY device memory region
1039 * @param offset - offset into eeprom contents to be read
1040 * @param len - buffer length, i.e., max bytes to be read
1041 */
1042 int (*read_module_eeprom)(struct qed_dev *cdev,
1043 char *buf, u8 dev_addr, u32 offset, u32 len);
1029}; 1044};
1030 1045
1031#define MASK_FIELD(_name, _value) \ 1046#define MASK_FIELD(_name, _value) \
diff --git a/include/linux/random.h b/include/linux/random.h
index 2ddf13b4281e..445a0ea4ff49 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -36,9 +36,10 @@ extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
36 36
37extern void get_random_bytes(void *buf, int nbytes); 37extern void get_random_bytes(void *buf, int nbytes);
38extern int wait_for_random_bytes(void); 38extern int wait_for_random_bytes(void);
39extern bool rng_is_initialized(void);
39extern int add_random_ready_callback(struct random_ready_callback *rdy); 40extern int add_random_ready_callback(struct random_ready_callback *rdy);
40extern void del_random_ready_callback(struct random_ready_callback *rdy); 41extern void del_random_ready_callback(struct random_ready_callback *rdy);
41extern void get_random_bytes_arch(void *buf, int nbytes); 42extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
42 43
43#ifndef MODULE 44#ifndef MODULE
44extern const struct file_operations random_fops, urandom_fops; 45extern const struct file_operations random_fops, urandom_fops;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 36df6ccbc874..4786c2235b98 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
396 * @member: the name of the list_head within the struct. 396 * @member: the name of the list_head within the struct.
397 * 397 *
398 * Continue to iterate over list of given type, continuing after 398 * Continue to iterate over list of given type, continuing after
399 * the current position. 399 * the current position which must have been in the list when the RCU read
400 * lock was taken.
401 * This would typically require either that you obtained the node from a
402 * previous walk of the list in the same RCU read-side critical section, or
403 * that you held some sort of non-RCU reference (such as a reference count)
404 * to keep the node alive *and* in the list.
405 *
406 * This iterator is similar to list_for_each_entry_from_rcu() except
407 * this starts after the given position and that one starts at the given
408 * position.
400 */ 409 */
401#define list_for_each_entry_continue_rcu(pos, head, member) \ 410#define list_for_each_entry_continue_rcu(pos, head, member) \
402 for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ 411 for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
@@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
411 * 420 *
412 * Iterate over the tail of a list starting from a given position, 421 * Iterate over the tail of a list starting from a given position,
413 * which must have been in the list when the RCU read lock was taken. 422 * which must have been in the list when the RCU read lock was taken.
423 * This would typically require either that you obtained the node from a
424 * previous walk of the list in the same RCU read-side critical section, or
425 * that you held some sort of non-RCU reference (such as a reference count)
426 * to keep the node alive *and* in the list.
427 *
428 * This iterator is similar to list_for_each_entry_continue_rcu() except
429 * this starts from the given position and that one starts from the position
430 * after the given position.
414 */ 431 */
415#define list_for_each_entry_from_rcu(pos, head, member) \ 432#define list_for_each_entry_from_rcu(pos, head, member) \
416 for (; &(pos)->member != (head); \ 433 for (; &(pos)->member != (head); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 65163aa0bb04..75e5b393cf44 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -64,7 +64,6 @@ void rcu_barrier_tasks(void);
64 64
65void __rcu_read_lock(void); 65void __rcu_read_lock(void);
66void __rcu_read_unlock(void); 66void __rcu_read_unlock(void);
67void rcu_read_unlock_special(struct task_struct *t);
68void synchronize_rcu(void); 67void synchronize_rcu(void);
69 68
70/* 69/*
@@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
159 } while (0) 158 } while (0)
160 159
161/* 160/*
162 * Note a voluntary context switch for RCU-tasks benefit. This is a 161 * Note a quasi-voluntary context switch for RCU-tasks's benefit.
163 * macro rather than an inline function to avoid #include hell. 162 * This is a macro rather than an inline function to avoid #include hell.
164 */ 163 */
165#ifdef CONFIG_TASKS_RCU 164#ifdef CONFIG_TASKS_RCU
166#define rcu_note_voluntary_context_switch_lite(t) \ 165#define rcu_tasks_qs(t) \
167 do { \ 166 do { \
168 if (READ_ONCE((t)->rcu_tasks_holdout)) \ 167 if (READ_ONCE((t)->rcu_tasks_holdout)) \
169 WRITE_ONCE((t)->rcu_tasks_holdout, false); \ 168 WRITE_ONCE((t)->rcu_tasks_holdout, false); \
@@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
171#define rcu_note_voluntary_context_switch(t) \ 170#define rcu_note_voluntary_context_switch(t) \
172 do { \ 171 do { \
173 rcu_all_qs(); \ 172 rcu_all_qs(); \
174 rcu_note_voluntary_context_switch_lite(t); \ 173 rcu_tasks_qs(t); \
175 } while (0) 174 } while (0)
176void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); 175void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
177void synchronize_rcu_tasks(void); 176void synchronize_rcu_tasks(void);
178void exit_tasks_rcu_start(void); 177void exit_tasks_rcu_start(void);
179void exit_tasks_rcu_finish(void); 178void exit_tasks_rcu_finish(void);
180#else /* #ifdef CONFIG_TASKS_RCU */ 179#else /* #ifdef CONFIG_TASKS_RCU */
181#define rcu_note_voluntary_context_switch_lite(t) do { } while (0) 180#define rcu_tasks_qs(t) do { } while (0)
182#define rcu_note_voluntary_context_switch(t) rcu_all_qs() 181#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
183#define call_rcu_tasks call_rcu_sched 182#define call_rcu_tasks call_rcu_sched
184#define synchronize_rcu_tasks synchronize_sched 183#define synchronize_rcu_tasks synchronize_sched
@@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { }
195 */ 194 */
196#define cond_resched_tasks_rcu_qs() \ 195#define cond_resched_tasks_rcu_qs() \
197do { \ 196do { \
198 if (!cond_resched()) \ 197 rcu_tasks_qs(current); \
199 rcu_note_voluntary_context_switch_lite(current); \ 198 cond_resched(); \
200} while (0) 199} while (0)
201 200
202/* 201/*
@@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { }
567 * This is simply an identity function, but it documents where a pointer 566 * This is simply an identity function, but it documents where a pointer
568 * is handed off from RCU to some other synchronization mechanism, for 567 * is handed off from RCU to some other synchronization mechanism, for
569 * example, reference counting or locking. In C11, it would map to 568 * example, reference counting or locking. In C11, it would map to
570 * kill_dependency(). It could be used as follows: 569 * kill_dependency(). It could be used as follows::
571 * `` 570 *
572 * rcu_read_lock(); 571 * rcu_read_lock();
573 * p = rcu_dereference(gp); 572 * p = rcu_dereference(gp);
574 * long_lived = is_long_lived(p); 573 * long_lived = is_long_lived(p);
@@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { }
579 * p = rcu_pointer_handoff(p); 578 * p = rcu_pointer_handoff(p);
580 * } 579 * }
581 * rcu_read_unlock(); 580 * rcu_read_unlock();
582 *``
583 */ 581 */
584#define rcu_pointer_handoff(p) (p) 582#define rcu_pointer_handoff(p) (p)
585 583
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7b3c82e8a625..8d9a0ea8f0b5 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
93#define rcu_note_context_switch(preempt) \ 93#define rcu_note_context_switch(preempt) \
94 do { \ 94 do { \
95 rcu_sched_qs(); \ 95 rcu_sched_qs(); \
96 rcu_note_voluntary_context_switch_lite(current); \ 96 rcu_tasks_qs(current); \
97 } while (0) 97 } while (0)
98 98
99static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) 99static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h
index e031e9f2f9d8..585ce89c0f33 100644
--- a/include/linux/reciprocal_div.h
+++ b/include/linux/reciprocal_div.h
@@ -25,6 +25,9 @@ struct reciprocal_value {
25 u8 sh1, sh2; 25 u8 sh1, sh2;
26}; 26};
27 27
28/* "reciprocal_value" and "reciprocal_divide" together implement the basic
29 * version of the algorithm described in Figure 4.1 of the paper.
30 */
28struct reciprocal_value reciprocal_value(u32 d); 31struct reciprocal_value reciprocal_value(u32 d);
29 32
30static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R) 33static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
@@ -33,4 +36,69 @@ static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
33 return (t + ((a - t) >> R.sh1)) >> R.sh2; 36 return (t + ((a - t) >> R.sh1)) >> R.sh2;
34} 37}
35 38
39struct reciprocal_value_adv {
40 u32 m;
41 u8 sh, exp;
42 bool is_wide_m;
43};
44
45/* "reciprocal_value_adv" implements the advanced version of the algorithm
46 * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose
47 * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The
48 * exception case could be easily handled before calling "reciprocal_value_adv".
49 *
50 * The advanced version requires more complex calculation to get the reciprocal
51 * multiplier and other control variables, but then could reduce the required
52 * emulation operations.
53 *
54 * It makes no sense to use this advanced version for host divide emulation,
55 * those extra complexities for calculating multiplier etc could completely
56 * waive our saving on emulation operations.
57 *
58 * However, it makes sense to use it for JIT divide code generation for which
59 * we are willing to trade performance of JITed code with that of host. As shown
60 * by the following pseudo code, the required emulation operations could go down
61 * from 6 (the basic version) to 3 or 4.
62 *
63 * To use the result of "reciprocal_value_adv", suppose we want to calculate
64 * n/d, the pseudo C code will be:
65 *
66 * struct reciprocal_value_adv rvalue;
67 * u8 pre_shift, exp;
68 *
69 * // handle exception case.
70 * if (d >= (1U << 31)) {
71 * result = n >= d;
72 * return;
73 * }
74 *
75 * rvalue = reciprocal_value_adv(d, 32)
76 * exp = rvalue.exp;
77 * if (rvalue.is_wide_m && !(d & 1)) {
78 * // floor(log2(d & (2^32 -d)))
79 * pre_shift = fls(d & -d) - 1;
80 * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift);
81 * } else {
82 * pre_shift = 0;
83 * }
84 *
85 * // code generation starts.
86 * if (imm == 1U << exp) {
87 * result = n >> exp;
88 * } else if (rvalue.is_wide_m) {
89 * // pre_shift must be zero when reached here.
90 * t = (n * rvalue.m) >> 32;
91 * result = n - t;
92 * result >>= 1;
93 * result += t;
94 * result >>= rvalue.sh - 1;
95 * } else {
96 * if (pre_shift)
97 * result = n >> pre_shift;
98 * result = ((u64)result * rvalue.m) >> 32;
99 * result >>= rvalue.sh;
100 * }
101 */
102struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec);
103
36#endif /* _LINUX_RECIPROCAL_DIV_H */ 104#endif /* _LINUX_RECIPROCAL_DIV_H */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 4193c41e383a..e28cce21bad6 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -3,9 +3,10 @@
3#define _LINUX_REFCOUNT_H 3#define _LINUX_REFCOUNT_H
4 4
5#include <linux/atomic.h> 5#include <linux/atomic.h>
6#include <linux/mutex.h> 6#include <linux/compiler.h>
7#include <linux/spinlock.h> 7#include <linux/spinlock_types.h>
8#include <linux/kernel.h> 8
9struct mutex;
9 10
10/** 11/**
11 * struct refcount_t - variant of atomic_t specialized for reference counts 12 * struct refcount_t - variant of atomic_t specialized for reference counts
@@ -42,17 +43,30 @@ static inline unsigned int refcount_read(const refcount_t *r)
42 return atomic_read(&r->refs); 43 return atomic_read(&r->refs);
43} 44}
44 45
46extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r);
47extern void refcount_add_checked(unsigned int i, refcount_t *r);
48
49extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r);
50extern void refcount_inc_checked(refcount_t *r);
51
52extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r);
53
54extern __must_check bool refcount_dec_and_test_checked(refcount_t *r);
55extern void refcount_dec_checked(refcount_t *r);
56
45#ifdef CONFIG_REFCOUNT_FULL 57#ifdef CONFIG_REFCOUNT_FULL
46extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
47extern void refcount_add(unsigned int i, refcount_t *r);
48 58
49extern __must_check bool refcount_inc_not_zero(refcount_t *r); 59#define refcount_add_not_zero refcount_add_not_zero_checked
50extern void refcount_inc(refcount_t *r); 60#define refcount_add refcount_add_checked
61
62#define refcount_inc_not_zero refcount_inc_not_zero_checked
63#define refcount_inc refcount_inc_checked
51 64
52extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); 65#define refcount_sub_and_test refcount_sub_and_test_checked
66
67#define refcount_dec_and_test refcount_dec_and_test_checked
68#define refcount_dec refcount_dec_checked
53 69
54extern __must_check bool refcount_dec_and_test(refcount_t *r);
55extern void refcount_dec(refcount_t *r);
56#else 70#else
57# ifdef CONFIG_ARCH_HAS_REFCOUNT 71# ifdef CONFIG_ARCH_HAS_REFCOUNT
58# include <asm/refcount.h> 72# include <asm/refcount.h>
@@ -98,5 +112,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
98extern __must_check bool refcount_dec_not_one(refcount_t *r); 112extern __must_check bool refcount_dec_not_one(refcount_t *r);
99extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); 113extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
100extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); 114extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
101 115extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
116 spinlock_t *lock,
117 unsigned long *flags);
102#endif /* _LINUX_REFCOUNT_H */ 118#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4f38068ffb71..379505a53722 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -268,6 +268,13 @@ typedef void (*regmap_unlock)(void *);
268 * field is NULL but precious_table (see below) is not, the 268 * field is NULL but precious_table (see below) is not, the
269 * check is performed on such table (a register is precious if 269 * check is performed on such table (a register is precious if
270 * it belongs to one of the ranges specified by precious_table). 270 * it belongs to one of the ranges specified by precious_table).
271 * @readable_noinc_reg: Optional callback returning true if the register
272 * supports multiple read operations without incrementing
273 * the register number. If this field is NULL but
274 * rd_noinc_table (see below) is not, the check is
275 * performed on such table (a register is no increment
276 * readable if it belongs to one of the ranges specified
277 * by rd_noinc_table).
271 * @disable_locking: This regmap is either protected by external means or 278 * @disable_locking: This regmap is either protected by external means or
272 * is guaranteed not be be accessed from multiple threads. 279 * is guaranteed not be be accessed from multiple threads.
273 * Don't use any locking mechanisms. 280 * Don't use any locking mechanisms.
@@ -295,6 +302,7 @@ typedef void (*regmap_unlock)(void *);
295 * @rd_table: As above, for read access. 302 * @rd_table: As above, for read access.
296 * @volatile_table: As above, for volatile registers. 303 * @volatile_table: As above, for volatile registers.
297 * @precious_table: As above, for precious registers. 304 * @precious_table: As above, for precious registers.
305 * @rd_noinc_table: As above, for no increment readable registers.
298 * @reg_defaults: Power on reset values for registers (for use with 306 * @reg_defaults: Power on reset values for registers (for use with
299 * register cache support). 307 * register cache support).
300 * @num_reg_defaults: Number of elements in reg_defaults. 308 * @num_reg_defaults: Number of elements in reg_defaults.
@@ -344,6 +352,7 @@ struct regmap_config {
344 bool (*readable_reg)(struct device *dev, unsigned int reg); 352 bool (*readable_reg)(struct device *dev, unsigned int reg);
345 bool (*volatile_reg)(struct device *dev, unsigned int reg); 353 bool (*volatile_reg)(struct device *dev, unsigned int reg);
346 bool (*precious_reg)(struct device *dev, unsigned int reg); 354 bool (*precious_reg)(struct device *dev, unsigned int reg);
355 bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
347 356
348 bool disable_locking; 357 bool disable_locking;
349 regmap_lock lock; 358 regmap_lock lock;
@@ -360,6 +369,7 @@ struct regmap_config {
360 const struct regmap_access_table *rd_table; 369 const struct regmap_access_table *rd_table;
361 const struct regmap_access_table *volatile_table; 370 const struct regmap_access_table *volatile_table;
362 const struct regmap_access_table *precious_table; 371 const struct regmap_access_table *precious_table;
372 const struct regmap_access_table *rd_noinc_table;
363 const struct reg_default *reg_defaults; 373 const struct reg_default *reg_defaults;
364 unsigned int num_reg_defaults; 374 unsigned int num_reg_defaults;
365 enum regcache_type cache_type; 375 enum regcache_type cache_type;
@@ -514,6 +524,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
514 const struct regmap_config *config, 524 const struct regmap_config *config,
515 struct lock_class_key *lock_key, 525 struct lock_class_key *lock_key,
516 const char *lock_name); 526 const char *lock_name);
527struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
528 const struct regmap_config *config,
529 struct lock_class_key *lock_key,
530 const char *lock_name);
517struct regmap *__regmap_init_slimbus(struct slim_device *slimbus, 531struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
518 const struct regmap_config *config, 532 const struct regmap_config *config,
519 struct lock_class_key *lock_key, 533 struct lock_class_key *lock_key,
@@ -558,6 +572,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
558 const struct regmap_config *config, 572 const struct regmap_config *config,
559 struct lock_class_key *lock_key, 573 struct lock_class_key *lock_key,
560 const char *lock_name); 574 const char *lock_name);
575struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
576 const struct regmap_config *config,
577 struct lock_class_key *lock_key,
578 const char *lock_name);
561struct regmap *__devm_regmap_init_spi(struct spi_device *dev, 579struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
562 const struct regmap_config *config, 580 const struct regmap_config *config,
563 struct lock_class_key *lock_key, 581 struct lock_class_key *lock_key,
@@ -646,6 +664,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
646 i2c, config) 664 i2c, config)
647 665
648/** 666/**
667 * regmap_init_sccb() - Initialise register map
668 *
669 * @i2c: Device that will be interacted with
670 * @config: Configuration for register map
671 *
672 * The return value will be an ERR_PTR() on error or a valid pointer to
673 * a struct regmap.
674 */
675#define regmap_init_sccb(i2c, config) \
676 __regmap_lockdep_wrapper(__regmap_init_sccb, #config, \
677 i2c, config)
678
679/**
649 * regmap_init_slimbus() - Initialise register map 680 * regmap_init_slimbus() - Initialise register map
650 * 681 *
651 * @slimbus: Device that will be interacted with 682 * @slimbus: Device that will be interacted with
@@ -798,6 +829,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
798 i2c, config) 829 i2c, config)
799 830
800/** 831/**
832 * devm_regmap_init_sccb() - Initialise managed register map
833 *
834 * @i2c: Device that will be interacted with
835 * @config: Configuration for register map
836 *
837 * The return value will be an ERR_PTR() on error or a valid pointer
838 * to a struct regmap. The regmap will be automatically freed by the
839 * device management code.
840 */
841#define devm_regmap_init_sccb(i2c, config) \
842 __regmap_lockdep_wrapper(__devm_regmap_init_sccb, #config, \
843 i2c, config)
844
845/**
801 * devm_regmap_init_spi() - Initialise register map 846 * devm_regmap_init_spi() - Initialise register map
802 * 847 *
803 * @dev: Device that will be interacted with 848 * @dev: Device that will be interacted with
@@ -946,6 +991,8 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
946int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); 991int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
947int regmap_raw_read(struct regmap *map, unsigned int reg, 992int regmap_raw_read(struct regmap *map, unsigned int reg,
948 void *val, size_t val_len); 993 void *val, size_t val_len);
994int regmap_noinc_read(struct regmap *map, unsigned int reg,
995 void *val, size_t val_len);
949int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 996int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
950 size_t val_count); 997 size_t val_count);
951int regmap_update_bits_base(struct regmap *map, unsigned int reg, 998int regmap_update_bits_base(struct regmap *map, unsigned int reg,
@@ -1196,6 +1243,13 @@ static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
1196 return -EINVAL; 1243 return -EINVAL;
1197} 1244}
1198 1245
1246static inline int regmap_noinc_read(struct regmap *map, unsigned int reg,
1247 void *val, size_t val_len)
1248{
1249 WARN_ONCE(1, "regmap API is disabled");
1250 return -EINVAL;
1251}
1252
1199static inline int regmap_bulk_read(struct regmap *map, unsigned int reg, 1253static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
1200 void *val, size_t val_count) 1254 void *val, size_t val_count)
1201{ 1255{
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fc2dc8df476f..0fd8fbb74763 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -46,7 +46,7 @@ enum regulator_status {
46/** 46/**
47 * struct regulator_linear_range - specify linear voltage ranges 47 * struct regulator_linear_range - specify linear voltage ranges
48 * 48 *
49 * Specify a range of voltages for regulator_map_linar_range() and 49 * Specify a range of voltages for regulator_map_linear_range() and
50 * regulator_list_linear_range(). 50 * regulator_list_linear_range().
51 * 51 *
52 * @min_uV: Lowest voltage in range 52 * @min_uV: Lowest voltage in range
@@ -220,7 +220,7 @@ struct regulator_ops {
220 /* set regulator suspend operating mode (defined in consumer.h) */ 220 /* set regulator suspend operating mode (defined in consumer.h) */
221 int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); 221 int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
222 222
223 int (*resume_early)(struct regulator_dev *rdev); 223 int (*resume)(struct regulator_dev *rdev);
224 224
225 int (*set_pull_down) (struct regulator_dev *); 225 int (*set_pull_down) (struct regulator_dev *);
226}; 226};
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index e0ccf46f66cf..cb5aecd40f07 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -64,6 +64,17 @@
64#define PFUZE3000_VLDO3 11 64#define PFUZE3000_VLDO3 11
65#define PFUZE3000_VLDO4 12 65#define PFUZE3000_VLDO4 12
66 66
67#define PFUZE3001_SW1 0
68#define PFUZE3001_SW2 1
69#define PFUZE3001_SW3 2
70#define PFUZE3001_VSNVS 3
71#define PFUZE3001_VLDO1 4
72#define PFUZE3001_VLDO2 5
73#define PFUZE3001_VCCSD 6
74#define PFUZE3001_V33 7
75#define PFUZE3001_VLDO3 8
76#define PFUZE3001_VLDO4 9
77
67struct regulator_init_data; 78struct regulator_init_data;
68 79
69struct pfuze_regulator_platform_data { 80struct pfuze_regulator_platform_data {
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index e6a0031d1b1f..8ad2487a86d5 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -66,7 +66,7 @@ struct rfkill_ops {
66 66
67#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 67#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
68/** 68/**
69 * rfkill_alloc - allocate rfkill structure 69 * rfkill_alloc - Allocate rfkill structure
70 * @name: name of the struct -- the string is not copied internally 70 * @name: name of the struct -- the string is not copied internally
71 * @parent: device that has rf switch on it 71 * @parent: device that has rf switch on it
72 * @type: type of the switch (RFKILL_TYPE_*) 72 * @type: type of the switch (RFKILL_TYPE_*)
@@ -112,7 +112,7 @@ void rfkill_pause_polling(struct rfkill *rfkill);
112/** 112/**
113 * rfkill_resume_polling(struct rfkill *rfkill) 113 * rfkill_resume_polling(struct rfkill *rfkill)
114 * 114 *
115 * Pause polling -- say transmitter is off for other reasons. 115 * Resume polling
116 * NOTE: not necessary for suspend/resume -- in that case the 116 * NOTE: not necessary for suspend/resume -- in that case the
117 * core stops polling anyway 117 * core stops polling anyway
118 */ 118 */
@@ -130,7 +130,7 @@ void rfkill_resume_polling(struct rfkill *rfkill);
130void rfkill_unregister(struct rfkill *rfkill); 130void rfkill_unregister(struct rfkill *rfkill);
131 131
132/** 132/**
133 * rfkill_destroy - free rfkill structure 133 * rfkill_destroy - Free rfkill structure
134 * @rfkill: rfkill structure to be destroyed 134 * @rfkill: rfkill structure to be destroyed
135 * 135 *
136 * Destroys the rfkill structure. 136 * Destroys the rfkill structure.
@@ -140,7 +140,7 @@ void rfkill_destroy(struct rfkill *rfkill);
140/** 140/**
141 * rfkill_set_hw_state - Set the internal rfkill hardware block state 141 * rfkill_set_hw_state - Set the internal rfkill hardware block state
142 * @rfkill: pointer to the rfkill class to modify. 142 * @rfkill: pointer to the rfkill class to modify.
143 * @state: the current hardware block state to set 143 * @blocked: the current hardware block state to set
144 * 144 *
145 * rfkill drivers that get events when the hard-blocked state changes 145 * rfkill drivers that get events when the hard-blocked state changes
146 * use this function to notify the rfkill core (and through that also 146 * use this function to notify the rfkill core (and through that also
@@ -161,7 +161,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
161/** 161/**
162 * rfkill_set_sw_state - Set the internal rfkill software block state 162 * rfkill_set_sw_state - Set the internal rfkill software block state
163 * @rfkill: pointer to the rfkill class to modify. 163 * @rfkill: pointer to the rfkill class to modify.
164 * @state: the current software block state to set 164 * @blocked: the current software block state to set
165 * 165 *
166 * rfkill drivers that get events when the soft-blocked state changes 166 * rfkill drivers that get events when the soft-blocked state changes
167 * (yes, some platforms directly act on input but allow changing again) 167 * (yes, some platforms directly act on input but allow changing again)
@@ -183,7 +183,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
183/** 183/**
184 * rfkill_init_sw_state - Initialize persistent software block state 184 * rfkill_init_sw_state - Initialize persistent software block state
185 * @rfkill: pointer to the rfkill class to modify. 185 * @rfkill: pointer to the rfkill class to modify.
186 * @state: the current software block state to set 186 * @blocked: the current software block state to set
187 * 187 *
188 * rfkill drivers that preserve their software block state over power off 188 * rfkill drivers that preserve their software block state over power off
189 * use this function to notify the rfkill core (and through that also 189 * use this function to notify the rfkill core (and through that also
@@ -208,17 +208,17 @@ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
208void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); 208void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
209 209
210/** 210/**
211 * rfkill_blocked - query rfkill block 211 * rfkill_blocked - Query rfkill block state
212 * 212 *
213 * @rfkill: rfkill struct to query 213 * @rfkill: rfkill struct to query
214 */ 214 */
215bool rfkill_blocked(struct rfkill *rfkill); 215bool rfkill_blocked(struct rfkill *rfkill);
216 216
217/** 217/**
218 * rfkill_find_type - Helpper for finding rfkill type by name 218 * rfkill_find_type - Helper for finding rfkill type by name
219 * @name: the name of the type 219 * @name: the name of the type
220 * 220 *
221 * Returns enum rfkill_type that conrresponds the name. 221 * Returns enum rfkill_type that corresponds to the name.
222 */ 222 */
223enum rfkill_type rfkill_find_type(const char *name); 223enum rfkill_type rfkill_find_type(const char *name);
224 224
@@ -296,7 +296,7 @@ static inline enum rfkill_type rfkill_find_type(const char *name)
296const char *rfkill_get_led_trigger_name(struct rfkill *rfkill); 296const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
297 297
298/** 298/**
299 * rfkill_set_led_trigger_name -- set the LED trigger name 299 * rfkill_set_led_trigger_name - Set the LED trigger name
300 * @rfkill: rfkill struct 300 * @rfkill: rfkill struct
301 * @name: LED trigger name 301 * @name: LED trigger name
302 * 302 *
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
new file mode 100644
index 000000000000..763d613ce2c2
--- /dev/null
+++ b/include/linux/rhashtable-types.h
@@ -0,0 +1,137 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Resizable, Scalable, Concurrent Hash Table
4 *
5 * Simple structures that might be needed in include
6 * files.
7 */
8
9#ifndef _LINUX_RHASHTABLE_TYPES_H
10#define _LINUX_RHASHTABLE_TYPES_H
11
12#include <linux/atomic.h>
13#include <linux/compiler.h>
14#include <linux/mutex.h>
15#include <linux/workqueue.h>
16
17struct rhash_head {
18 struct rhash_head __rcu *next;
19};
20
21struct rhlist_head {
22 struct rhash_head rhead;
23 struct rhlist_head __rcu *next;
24};
25
26struct bucket_table;
27
28/**
29 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
30 * @ht: Hash table
31 * @key: Key to compare against
32 */
33struct rhashtable_compare_arg {
34 struct rhashtable *ht;
35 const void *key;
36};
37
38typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
39typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
40typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
41 const void *obj);
42
43/**
44 * struct rhashtable_params - Hash table construction parameters
45 * @nelem_hint: Hint on number of elements, should be 75% of desired size
46 * @key_len: Length of key
47 * @key_offset: Offset of key in struct to be hashed
48 * @head_offset: Offset of rhash_head in struct to be hashed
49 * @max_size: Maximum size while expanding
50 * @min_size: Minimum size while shrinking
51 * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
52 * @automatic_shrinking: Enable automatic shrinking of tables
53 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
54 * @obj_hashfn: Function to hash object
55 * @obj_cmpfn: Function to compare key with object
56 */
57struct rhashtable_params {
58 u16 nelem_hint;
59 u16 key_len;
60 u16 key_offset;
61 u16 head_offset;
62 unsigned int max_size;
63 u16 min_size;
64 bool automatic_shrinking;
65 u8 locks_mul;
66 rht_hashfn_t hashfn;
67 rht_obj_hashfn_t obj_hashfn;
68 rht_obj_cmpfn_t obj_cmpfn;
69};
70
71/**
72 * struct rhashtable - Hash table handle
73 * @tbl: Bucket table
74 * @key_len: Key length for hashfn
75 * @max_elems: Maximum number of elements in table
76 * @p: Configuration parameters
77 * @rhlist: True if this is an rhltable
78 * @run_work: Deferred worker to expand/shrink asynchronously
79 * @mutex: Mutex to protect current/future table swapping
80 * @lock: Spin lock to protect walker list
81 * @nelems: Number of elements in table
82 */
83struct rhashtable {
84 struct bucket_table __rcu *tbl;
85 unsigned int key_len;
86 unsigned int max_elems;
87 struct rhashtable_params p;
88 bool rhlist;
89 struct work_struct run_work;
90 struct mutex mutex;
91 spinlock_t lock;
92 atomic_t nelems;
93};
94
95/**
96 * struct rhltable - Hash table with duplicate objects in a list
97 * @ht: Underlying rhtable
98 */
99struct rhltable {
100 struct rhashtable ht;
101};
102
103/**
104 * struct rhashtable_walker - Hash table walker
105 * @list: List entry on list of walkers
106 * @tbl: The table that we were walking over
107 */
108struct rhashtable_walker {
109 struct list_head list;
110 struct bucket_table *tbl;
111};
112
113/**
114 * struct rhashtable_iter - Hash table iterator
115 * @ht: Table to iterate through
116 * @p: Current pointer
117 * @list: Current hash list pointer
118 * @walker: Associated rhashtable walker
119 * @slot: Current slot
120 * @skip: Number of entries to skip in slot
121 */
122struct rhashtable_iter {
123 struct rhashtable *ht;
124 struct rhash_head *p;
125 struct rhlist_head *list;
126 struct rhashtable_walker walker;
127 unsigned int slot;
128 unsigned int skip;
129 bool end_of_table;
130};
131
132int rhashtable_init(struct rhashtable *ht,
133 const struct rhashtable_params *params);
134int rhltable_init(struct rhltable *hlt,
135 const struct rhashtable_params *params);
136
137#endif /* _LINUX_RHASHTABLE_TYPES_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 4e1f535c2034..eb7111039247 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Resizable, Scalable, Concurrent Hash Table 3 * Resizable, Scalable, Concurrent Hash Table
3 * 4 *
@@ -17,37 +18,18 @@
17#ifndef _LINUX_RHASHTABLE_H 18#ifndef _LINUX_RHASHTABLE_H
18#define _LINUX_RHASHTABLE_H 19#define _LINUX_RHASHTABLE_H
19 20
20#include <linux/atomic.h>
21#include <linux/compiler.h>
22#include <linux/err.h> 21#include <linux/err.h>
23#include <linux/errno.h> 22#include <linux/errno.h>
24#include <linux/jhash.h> 23#include <linux/jhash.h>
25#include <linux/list_nulls.h> 24#include <linux/list_nulls.h>
26#include <linux/workqueue.h> 25#include <linux/workqueue.h>
27#include <linux/mutex.h>
28#include <linux/rculist.h> 26#include <linux/rculist.h>
29 27
28#include <linux/rhashtable-types.h>
30/* 29/*
31 * The end of the chain is marked with a special nulls marks which has 30 * The end of the chain is marked with a special nulls marks which has
32 * the following format: 31 * the least significant bit set.
33 *
34 * +-------+-----------------------------------------------------+-+
35 * | Base | Hash |1|
36 * +-------+-----------------------------------------------------+-+
37 *
38 * Base (4 bits) : Reserved to distinguish between multiple tables.
39 * Specified via &struct rhashtable_params.nulls_base.
40 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
41 * 1 (1 bit) : Nulls marker (always set)
42 *
43 * The remaining bits of the next pointer remain unused for now.
44 */ 32 */
45#define RHT_BASE_BITS 4
46#define RHT_HASH_BITS 27
47#define RHT_BASE_SHIFT RHT_HASH_BITS
48
49/* Base bits plus 1 bit for nulls marker */
50#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
51 33
52/* Maximum chain length before rehash 34/* Maximum chain length before rehash
53 * 35 *
@@ -64,15 +46,6 @@
64 */ 46 */
65#define RHT_ELASTICITY 16u 47#define RHT_ELASTICITY 16u
66 48
67struct rhash_head {
68 struct rhash_head __rcu *next;
69};
70
71struct rhlist_head {
72 struct rhash_head rhead;
73 struct rhlist_head __rcu *next;
74};
75
76/** 49/**
77 * struct bucket_table - Table of hash buckets 50 * struct bucket_table - Table of hash buckets
78 * @size: Number of hash buckets 51 * @size: Number of hash buckets
@@ -102,132 +75,14 @@ struct bucket_table {
102 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; 75 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
103}; 76};
104 77
105/** 78#define INIT_RHT_NULLS_HEAD(ptr) \
106 * struct rhashtable_compare_arg - Key for the function rhashtable_compare 79 ((ptr) = (typeof(ptr)) NULLS_MARKER(0))
107 * @ht: Hash table
108 * @key: Key to compare against
109 */
110struct rhashtable_compare_arg {
111 struct rhashtable *ht;
112 const void *key;
113};
114
115typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
116typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
117typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
118 const void *obj);
119
120struct rhashtable;
121
122/**
123 * struct rhashtable_params - Hash table construction parameters
124 * @nelem_hint: Hint on number of elements, should be 75% of desired size
125 * @key_len: Length of key
126 * @key_offset: Offset of key in struct to be hashed
127 * @head_offset: Offset of rhash_head in struct to be hashed
128 * @max_size: Maximum size while expanding
129 * @min_size: Minimum size while shrinking
130 * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
131 * @automatic_shrinking: Enable automatic shrinking of tables
132 * @nulls_base: Base value to generate nulls marker
133 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
134 * @obj_hashfn: Function to hash object
135 * @obj_cmpfn: Function to compare key with object
136 */
137struct rhashtable_params {
138 u16 nelem_hint;
139 u16 key_len;
140 u16 key_offset;
141 u16 head_offset;
142 unsigned int max_size;
143 u16 min_size;
144 bool automatic_shrinking;
145 u8 locks_mul;
146 u32 nulls_base;
147 rht_hashfn_t hashfn;
148 rht_obj_hashfn_t obj_hashfn;
149 rht_obj_cmpfn_t obj_cmpfn;
150};
151
152/**
153 * struct rhashtable - Hash table handle
154 * @tbl: Bucket table
155 * @key_len: Key length for hashfn
156 * @max_elems: Maximum number of elements in table
157 * @p: Configuration parameters
158 * @rhlist: True if this is an rhltable
159 * @run_work: Deferred worker to expand/shrink asynchronously
160 * @mutex: Mutex to protect current/future table swapping
161 * @lock: Spin lock to protect walker list
162 * @nelems: Number of elements in table
163 */
164struct rhashtable {
165 struct bucket_table __rcu *tbl;
166 unsigned int key_len;
167 unsigned int max_elems;
168 struct rhashtable_params p;
169 bool rhlist;
170 struct work_struct run_work;
171 struct mutex mutex;
172 spinlock_t lock;
173 atomic_t nelems;
174};
175
176/**
177 * struct rhltable - Hash table with duplicate objects in a list
178 * @ht: Underlying rhtable
179 */
180struct rhltable {
181 struct rhashtable ht;
182};
183
184/**
185 * struct rhashtable_walker - Hash table walker
186 * @list: List entry on list of walkers
187 * @tbl: The table that we were walking over
188 */
189struct rhashtable_walker {
190 struct list_head list;
191 struct bucket_table *tbl;
192};
193
194/**
195 * struct rhashtable_iter - Hash table iterator
196 * @ht: Table to iterate through
197 * @p: Current pointer
198 * @list: Current hash list pointer
199 * @walker: Associated rhashtable walker
200 * @slot: Current slot
201 * @skip: Number of entries to skip in slot
202 */
203struct rhashtable_iter {
204 struct rhashtable *ht;
205 struct rhash_head *p;
206 struct rhlist_head *list;
207 struct rhashtable_walker walker;
208 unsigned int slot;
209 unsigned int skip;
210 bool end_of_table;
211};
212
213static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
214{
215 return NULLS_MARKER(ht->p.nulls_base + hash);
216}
217
218#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
219 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
220 80
221static inline bool rht_is_a_nulls(const struct rhash_head *ptr) 81static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
222{ 82{
223 return ((unsigned long) ptr & 1); 83 return ((unsigned long) ptr & 1);
224} 84}
225 85
226static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
227{
228 return ((unsigned long) ptr) >> 1;
229}
230
231static inline void *rht_obj(const struct rhashtable *ht, 86static inline void *rht_obj(const struct rhashtable *ht,
232 const struct rhash_head *he) 87 const struct rhash_head *he)
233{ 88{
@@ -237,7 +92,7 @@ static inline void *rht_obj(const struct rhashtable *ht,
237static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, 92static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
238 unsigned int hash) 93 unsigned int hash)
239{ 94{
240 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); 95 return hash & (tbl->size - 1);
241} 96}
242 97
243static inline unsigned int rht_key_get_hash(struct rhashtable *ht, 98static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
@@ -376,11 +231,6 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
376} 231}
377#endif /* CONFIG_PROVE_LOCKING */ 232#endif /* CONFIG_PROVE_LOCKING */
378 233
379int rhashtable_init(struct rhashtable *ht,
380 const struct rhashtable_params *params);
381int rhltable_init(struct rhltable *hlt,
382 const struct rhashtable_params *params);
383
384void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, 234void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
385 struct rhash_head *obj); 235 struct rhash_head *obj);
386 236
@@ -745,7 +595,7 @@ static inline void *__rhashtable_insert_fast(
745 lock = rht_bucket_lock(tbl, hash); 595 lock = rht_bucket_lock(tbl, hash);
746 spin_lock_bh(lock); 596 spin_lock_bh(lock);
747 597
748 if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) { 598 if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
749slow_path: 599slow_path:
750 spin_unlock_bh(lock); 600 spin_unlock_bh(lock);
751 rcu_read_unlock(); 601 rcu_read_unlock();
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b72ebdff0b77..003d09ab308d 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
165void ring_buffer_record_off(struct ring_buffer *buffer); 165void ring_buffer_record_off(struct ring_buffer *buffer);
166void ring_buffer_record_on(struct ring_buffer *buffer); 166void ring_buffer_record_on(struct ring_buffer *buffer);
167int ring_buffer_record_is_on(struct ring_buffer *buffer); 167int ring_buffer_record_is_on(struct ring_buffer *buffer);
168int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
168void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); 169void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
169void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); 170void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
170 171
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index 64125443f8a6..5ef5c7c412a7 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -354,6 +354,8 @@ struct rmi_driver_data {
354 struct mutex irq_mutex; 354 struct mutex irq_mutex;
355 struct input_dev *input; 355 struct input_dev *input;
356 356
357 struct irq_domain *irqdomain;
358
357 u8 pdt_props; 359 u8 pdt_props;
358 360
359 u8 num_rx_electrodes; 361 u8 num_rx_electrodes;
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 1b92a28dd672..6fd615a0eea9 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
106extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); 106extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
107extern void rt_mutex_destroy(struct rt_mutex *lock); 107extern void rt_mutex_destroy(struct rt_mutex *lock);
108 108
109#ifdef CONFIG_DEBUG_LOCK_ALLOC
110extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
111#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
112#else
109extern void rt_mutex_lock(struct rt_mutex *lock); 113extern void rt_mutex_lock(struct rt_mutex *lock);
114#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
115#endif
116
110extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); 117extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
111extern int rt_mutex_timed_lock(struct rt_mutex *lock, 118extern int rt_mutex_timed_lock(struct rt_mutex *lock,
112 struct hrtimer_sleeper *timeout); 119 struct hrtimer_sleeper *timeout);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index e6539536dea9..804a50983ec5 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -23,6 +23,8 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26struct seq_file;
27
26/** 28/**
27 * struct sbitmap_word - Word in a &struct sbitmap. 29 * struct sbitmap_word - Word in a &struct sbitmap.
28 */ 30 */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 51f52020ad5f..093aa57120b0 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -9,9 +9,6 @@
9#include <asm/io.h> 9#include <asm/io.h>
10 10
11struct scatterlist { 11struct scatterlist {
12#ifdef CONFIG_DEBUG_SG
13 unsigned long sg_magic;
14#endif
15 unsigned long page_link; 12 unsigned long page_link;
16 unsigned int offset; 13 unsigned int offset;
17 unsigned int length; 14 unsigned int length;
@@ -64,7 +61,6 @@ struct sg_table {
64 * 61 *
65 */ 62 */
66 63
67#define SG_MAGIC 0x87654321
68#define SG_CHAIN 0x01UL 64#define SG_CHAIN 0x01UL
69#define SG_END 0x02UL 65#define SG_END 0x02UL
70 66
@@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
98 */ 94 */
99 BUG_ON((unsigned long) page & (SG_CHAIN | SG_END)); 95 BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
100#ifdef CONFIG_DEBUG_SG 96#ifdef CONFIG_DEBUG_SG
101 BUG_ON(sg->sg_magic != SG_MAGIC);
102 BUG_ON(sg_is_chain(sg)); 97 BUG_ON(sg_is_chain(sg));
103#endif 98#endif
104 sg->page_link = page_link | (unsigned long) page; 99 sg->page_link = page_link | (unsigned long) page;
@@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
129static inline struct page *sg_page(struct scatterlist *sg) 124static inline struct page *sg_page(struct scatterlist *sg)
130{ 125{
131#ifdef CONFIG_DEBUG_SG 126#ifdef CONFIG_DEBUG_SG
132 BUG_ON(sg->sg_magic != SG_MAGIC);
133 BUG_ON(sg_is_chain(sg)); 127 BUG_ON(sg_is_chain(sg));
134#endif 128#endif
135 return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END)); 129 return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
@@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
195 **/ 189 **/
196static inline void sg_mark_end(struct scatterlist *sg) 190static inline void sg_mark_end(struct scatterlist *sg)
197{ 191{
198#ifdef CONFIG_DEBUG_SG
199 BUG_ON(sg->sg_magic != SG_MAGIC);
200#endif
201 /* 192 /*
202 * Set termination bit, clear potential chain bit 193 * Set termination bit, clear potential chain bit
203 */ 194 */
@@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
215 **/ 206 **/
216static inline void sg_unmark_end(struct scatterlist *sg) 207static inline void sg_unmark_end(struct scatterlist *sg)
217{ 208{
218#ifdef CONFIG_DEBUG_SG
219 BUG_ON(sg->sg_magic != SG_MAGIC);
220#endif
221 sg->page_link &= ~SG_END; 209 sg->page_link &= ~SG_END;
222} 210}
223 211
@@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg)
260static inline void sg_init_marker(struct scatterlist *sgl, 248static inline void sg_init_marker(struct scatterlist *sgl,
261 unsigned int nents) 249 unsigned int nents)
262{ 250{
263#ifdef CONFIG_DEBUG_SG
264 unsigned int i;
265
266 for (i = 0; i < nents; i++)
267 sgl[i].sg_magic = SG_MAGIC;
268#endif
269 sg_mark_end(&sgl[nents - 1]); 251 sg_mark_end(&sgl[nents - 1]);
270} 252}
271 253
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 87bf02d93a27..95a5018c338e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -118,7 +118,7 @@ struct task_group;
118 * the comment with set_special_state(). 118 * the comment with set_special_state().
119 */ 119 */
120#define is_special_task_state(state) \ 120#define is_special_task_state(state) \
121 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD)) 121 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
122 122
123#define __set_current_state(state_value) \ 123#define __set_current_state(state_value) \
124 do { \ 124 do { \
@@ -167,8 +167,8 @@ struct task_group;
167 * need_sleep = false; 167 * need_sleep = false;
168 * wake_up_state(p, TASK_UNINTERRUPTIBLE); 168 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
169 * 169 *
170 * Where wake_up_state() (and all other wakeup primitives) imply enough 170 * where wake_up_state() executes a full memory barrier before accessing the
171 * barriers to order the store of the variable against wakeup. 171 * task state.
172 * 172 *
173 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, 173 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
174 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 174 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
@@ -734,6 +734,10 @@ struct task_struct {
734 /* disallow userland-initiated cgroup migration */ 734 /* disallow userland-initiated cgroup migration */
735 unsigned no_cgroup_migration:1; 735 unsigned no_cgroup_migration:1;
736#endif 736#endif
737#ifdef CONFIG_BLK_CGROUP
738 /* to be used once the psi infrastructure lands upstream. */
739 unsigned use_memdelay:1;
740#endif
737 741
738 unsigned long atomic_flags; /* Flags requiring atomic access. */ 742 unsigned long atomic_flags; /* Flags requiring atomic access. */
739 743
@@ -1017,7 +1021,6 @@ struct task_struct {
1017 u64 last_sum_exec_runtime; 1021 u64 last_sum_exec_runtime;
1018 struct callback_head numa_work; 1022 struct callback_head numa_work;
1019 1023
1020 struct list_head numa_entry;
1021 struct numa_group *numa_group; 1024 struct numa_group *numa_group;
1022 1025
1023 /* 1026 /*
@@ -1151,6 +1154,10 @@ struct task_struct {
1151 unsigned int memcg_nr_pages_over_high; 1154 unsigned int memcg_nr_pages_over_high;
1152#endif 1155#endif
1153 1156
1157#ifdef CONFIG_BLK_CGROUP
1158 struct request_queue *throttle_queue;
1159#endif
1160
1154#ifdef CONFIG_UPROBES 1161#ifdef CONFIG_UPROBES
1155 struct uprobe_task *utask; 1162 struct uprobe_task *utask;
1156#endif 1163#endif
@@ -1799,20 +1806,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
1799 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); 1806 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1800} 1807}
1801 1808
1802void __rseq_handle_notify_resume(struct pt_regs *regs); 1809void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1803 1810
1804static inline void rseq_handle_notify_resume(struct pt_regs *regs) 1811static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1812 struct pt_regs *regs)
1805{ 1813{
1806 if (current->rseq) 1814 if (current->rseq)
1807 __rseq_handle_notify_resume(regs); 1815 __rseq_handle_notify_resume(ksig, regs);
1808} 1816}
1809 1817
1810static inline void rseq_signal_deliver(struct pt_regs *regs) 1818static inline void rseq_signal_deliver(struct ksignal *ksig,
1819 struct pt_regs *regs)
1811{ 1820{
1812 preempt_disable(); 1821 preempt_disable();
1813 __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask); 1822 __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
1814 preempt_enable(); 1823 preempt_enable();
1815 rseq_handle_notify_resume(regs); 1824 rseq_handle_notify_resume(ksig, regs);
1816} 1825}
1817 1826
1818/* rseq_preempt() requires preemption to be disabled. */ 1827/* rseq_preempt() requires preemption to be disabled. */
@@ -1831,9 +1840,7 @@ static inline void rseq_migrate(struct task_struct *t)
1831 1840
1832/* 1841/*
1833 * If parent process has a registered restartable sequences area, the 1842 * If parent process has a registered restartable sequences area, the
1834 * child inherits. Only applies when forking a process, not a thread. In 1843 * child inherits. Only applies when forking a process, not a thread.
1835 * case a parent fork() in the middle of a restartable sequence, set the
1836 * resume notifier to force the child to retry.
1837 */ 1844 */
1838static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 1845static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1839{ 1846{
@@ -1847,7 +1854,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1847 t->rseq_len = current->rseq_len; 1854 t->rseq_len = current->rseq_len;
1848 t->rseq_sig = current->rseq_sig; 1855 t->rseq_sig = current->rseq_sig;
1849 t->rseq_event_mask = current->rseq_event_mask; 1856 t->rseq_event_mask = current->rseq_event_mask;
1850 rseq_preempt(t);
1851 } 1857 }
1852} 1858}
1853 1859
@@ -1864,10 +1870,12 @@ static inline void rseq_execve(struct task_struct *t)
1864static inline void rseq_set_notify_resume(struct task_struct *t) 1870static inline void rseq_set_notify_resume(struct task_struct *t)
1865{ 1871{
1866} 1872}
1867static inline void rseq_handle_notify_resume(struct pt_regs *regs) 1873static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1874 struct pt_regs *regs)
1868{ 1875{
1869} 1876}
1870static inline void rseq_signal_deliver(struct pt_regs *regs) 1877static inline void rseq_signal_deliver(struct ksignal *ksig,
1878 struct pt_regs *regs)
1871{ 1879{
1872} 1880}
1873static inline void rseq_preempt(struct task_struct *t) 1881static inline void rseq_preempt(struct task_struct *t)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 1c1a1512ec55..913488d828cb 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -40,7 +40,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
40#ifdef CONFIG_SCHED_DEBUG 40#ifdef CONFIG_SCHED_DEBUG
41extern __read_mostly unsigned int sysctl_sched_migration_cost; 41extern __read_mostly unsigned int sysctl_sched_migration_cost;
42extern __read_mostly unsigned int sysctl_sched_nr_migrate; 42extern __read_mostly unsigned int sysctl_sched_nr_migrate;
43extern __read_mostly unsigned int sysctl_sched_time_avg;
44 43
45int sched_proc_update_handler(struct ctl_table *table, int write, 44int sched_proc_update_handler(struct ctl_table *table, int write,
46 void __user *buffer, size_t *length, 45 void __user *buffer, size_t *length,
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 5be31eb7b266..108ede99e533 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *,
75extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); 75extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
76struct task_struct *fork_idle(int); 76struct task_struct *fork_idle(int);
77extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 77extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
78extern long kernel_wait4(pid_t, int *, int, struct rusage *); 78extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
79 79
80extern void free_task(struct task_struct *tsk); 80extern void free_task(struct task_struct *tsk);
81 81
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 411b52e424e1..abe28d5cb3f4 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -9,17 +9,16 @@
9#define LINUX_SCHED_CLOCK 9#define LINUX_SCHED_CLOCK
10 10
11#ifdef CONFIG_GENERIC_SCHED_CLOCK 11#ifdef CONFIG_GENERIC_SCHED_CLOCK
12extern void sched_clock_postinit(void); 12extern void generic_sched_clock_init(void);
13 13
14extern void sched_clock_register(u64 (*read)(void), int bits, 14extern void sched_clock_register(u64 (*read)(void), int bits,
15 unsigned long rate); 15 unsigned long rate);
16#else 16#else
17static inline void sched_clock_postinit(void) { } 17static inline void generic_sched_clock_init(void) { }
18 18
19static inline void sched_clock_register(u64 (*read)(void), int bits, 19static inline void sched_clock_register(u64 (*read)(void), int bits,
20 unsigned long rate) 20 unsigned long rate)
21{ 21{
22 ;
23} 22}
24#endif 23#endif
25 24
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index b36c76635f18..83d94341e003 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -801,4 +801,11 @@ struct sctp_strreset_resptsn {
801 __be32 receivers_next_tsn; 801 __be32 receivers_next_tsn;
802}; 802};
803 803
804enum {
805 SCTP_DSCP_SET_MASK = 0x1,
806 SCTP_DSCP_VAL_MASK = 0xfc,
807 SCTP_FLOWLABEL_SET_MASK = 0x100000,
808 SCTP_FLOWLABEL_VAL_MASK = 0xfffff
809};
810
804#endif /* __LINUX_SCTP_H__ */ 811#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index 63030c85ee19..75f4156c84d7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -159,6 +159,27 @@ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
159typedef int (*initxattrs) (struct inode *inode, 159typedef int (*initxattrs) (struct inode *inode,
160 const struct xattr *xattr_array, void *fs_data); 160 const struct xattr *xattr_array, void *fs_data);
161 161
162
163/* Keep the kernel_load_data_id enum in sync with kernel_read_file_id */
164#define __data_id_enumify(ENUM, dummy) LOADING_ ## ENUM,
165#define __data_id_stringify(dummy, str) #str,
166
167enum kernel_load_data_id {
168 __kernel_read_file_id(__data_id_enumify)
169};
170
171static const char * const kernel_load_data_str[] = {
172 __kernel_read_file_id(__data_id_stringify)
173};
174
175static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id)
176{
177 if ((unsigned)id >= LOADING_MAX_ID)
178 return kernel_load_data_str[LOADING_UNKNOWN];
179
180 return kernel_load_data_str[id];
181}
182
162#ifdef CONFIG_SECURITY 183#ifdef CONFIG_SECURITY
163 184
164struct security_mnt_opts { 185struct security_mnt_opts {
@@ -309,7 +330,7 @@ void security_file_set_fowner(struct file *file);
309int security_file_send_sigiotask(struct task_struct *tsk, 330int security_file_send_sigiotask(struct task_struct *tsk,
310 struct fown_struct *fown, int sig); 331 struct fown_struct *fown, int sig);
311int security_file_receive(struct file *file); 332int security_file_receive(struct file *file);
312int security_file_open(struct file *file, const struct cred *cred); 333int security_file_open(struct file *file);
313int security_task_alloc(struct task_struct *task, unsigned long clone_flags); 334int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
314void security_task_free(struct task_struct *task); 335void security_task_free(struct task_struct *task);
315int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); 336int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -320,6 +341,7 @@ void security_cred_getsecid(const struct cred *c, u32 *secid);
320int security_kernel_act_as(struct cred *new, u32 secid); 341int security_kernel_act_as(struct cred *new, u32 secid);
321int security_kernel_create_files_as(struct cred *new, struct inode *inode); 342int security_kernel_create_files_as(struct cred *new, struct inode *inode);
322int security_kernel_module_request(char *kmod_name); 343int security_kernel_module_request(char *kmod_name);
344int security_kernel_load_data(enum kernel_load_data_id id);
323int security_kernel_read_file(struct file *file, enum kernel_read_file_id id); 345int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
324int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, 346int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
325 enum kernel_read_file_id id); 347 enum kernel_read_file_id id);
@@ -858,8 +880,7 @@ static inline int security_file_receive(struct file *file)
858 return 0; 880 return 0;
859} 881}
860 882
861static inline int security_file_open(struct file *file, 883static inline int security_file_open(struct file *file)
862 const struct cred *cred)
863{ 884{
864 return 0; 885 return 0;
865} 886}
@@ -909,6 +930,11 @@ static inline int security_kernel_module_request(char *kmod_name)
909 return 0; 930 return 0;
910} 931}
911 932
933static inline int security_kernel_load_data(enum kernel_load_data_id id)
934{
935 return 0;
936}
937
912static inline int security_kernel_read_file(struct file *file, 938static inline int security_kernel_read_file(struct file *file,
913 enum kernel_read_file_id id) 939 enum kernel_read_file_id id)
914{ 940{
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index ebce9e24906a..d37518e89db2 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -231,6 +231,50 @@ struct sfp_eeprom_id {
231 struct sfp_eeprom_ext ext; 231 struct sfp_eeprom_ext ext;
232} __packed; 232} __packed;
233 233
234struct sfp_diag {
235 __be16 temp_high_alarm;
236 __be16 temp_low_alarm;
237 __be16 temp_high_warn;
238 __be16 temp_low_warn;
239 __be16 volt_high_alarm;
240 __be16 volt_low_alarm;
241 __be16 volt_high_warn;
242 __be16 volt_low_warn;
243 __be16 bias_high_alarm;
244 __be16 bias_low_alarm;
245 __be16 bias_high_warn;
246 __be16 bias_low_warn;
247 __be16 txpwr_high_alarm;
248 __be16 txpwr_low_alarm;
249 __be16 txpwr_high_warn;
250 __be16 txpwr_low_warn;
251 __be16 rxpwr_high_alarm;
252 __be16 rxpwr_low_alarm;
253 __be16 rxpwr_high_warn;
254 __be16 rxpwr_low_warn;
255 __be16 laser_temp_high_alarm;
256 __be16 laser_temp_low_alarm;
257 __be16 laser_temp_high_warn;
258 __be16 laser_temp_low_warn;
259 __be16 tec_cur_high_alarm;
260 __be16 tec_cur_low_alarm;
261 __be16 tec_cur_high_warn;
262 __be16 tec_cur_low_warn;
263 __be32 cal_rxpwr4;
264 __be32 cal_rxpwr3;
265 __be32 cal_rxpwr2;
266 __be32 cal_rxpwr1;
267 __be32 cal_rxpwr0;
268 __be16 cal_txi_slope;
269 __be16 cal_txi_offset;
270 __be16 cal_txpwr_slope;
271 __be16 cal_txpwr_offset;
272 __be16 cal_t_slope;
273 __be16 cal_t_offset;
274 __be16 cal_v_slope;
275 __be16 cal_v_offset;
276} __packed;
277
234/* SFP EEPROM registers */ 278/* SFP EEPROM registers */
235enum { 279enum {
236 SFP_PHYS_ID = 0x00, 280 SFP_PHYS_ID = 0x00,
@@ -384,7 +428,33 @@ enum {
384 SFP_TEC_CUR = 0x6c, 428 SFP_TEC_CUR = 0x6c,
385 429
386 SFP_STATUS = 0x6e, 430 SFP_STATUS = 0x6e,
387 SFP_ALARM = 0x70, 431 SFP_ALARM0 = 0x70,
432 SFP_ALARM0_TEMP_HIGH = BIT(7),
433 SFP_ALARM0_TEMP_LOW = BIT(6),
434 SFP_ALARM0_VCC_HIGH = BIT(5),
435 SFP_ALARM0_VCC_LOW = BIT(4),
436 SFP_ALARM0_TX_BIAS_HIGH = BIT(3),
437 SFP_ALARM0_TX_BIAS_LOW = BIT(2),
438 SFP_ALARM0_TXPWR_HIGH = BIT(1),
439 SFP_ALARM0_TXPWR_LOW = BIT(0),
440
441 SFP_ALARM1 = 0x71,
442 SFP_ALARM1_RXPWR_HIGH = BIT(7),
443 SFP_ALARM1_RXPWR_LOW = BIT(6),
444
445 SFP_WARN0 = 0x74,
446 SFP_WARN0_TEMP_HIGH = BIT(7),
447 SFP_WARN0_TEMP_LOW = BIT(6),
448 SFP_WARN0_VCC_HIGH = BIT(5),
449 SFP_WARN0_VCC_LOW = BIT(4),
450 SFP_WARN0_TX_BIAS_HIGH = BIT(3),
451 SFP_WARN0_TX_BIAS_LOW = BIT(2),
452 SFP_WARN0_TXPWR_HIGH = BIT(1),
453 SFP_WARN0_TXPWR_LOW = BIT(0),
454
455 SFP_WARN1 = 0x75,
456 SFP_WARN1_RXPWR_HIGH = BIT(7),
457 SFP_WARN1_RXPWR_LOW = BIT(6),
388 458
389 SFP_EXT_STATUS = 0x76, 459 SFP_EXT_STATUS = 0x76,
390 SFP_VSL = 0x78, 460 SFP_VSL = 0x78,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c86885954994..17a13e4785fc 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
630 * @hash: the packet hash 630 * @hash: the packet hash
631 * @queue_mapping: Queue mapping for multiqueue devices 631 * @queue_mapping: Queue mapping for multiqueue devices
632 * @xmit_more: More SKBs are pending for this queue 632 * @xmit_more: More SKBs are pending for this queue
633 * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
633 * @ndisc_nodetype: router type (from link layer) 634 * @ndisc_nodetype: router type (from link layer)
634 * @ooo_okay: allow the mapping of a socket to a queue to be changed 635 * @ooo_okay: allow the mapping of a socket to a queue to be changed
635 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport 636 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -640,6 +641,7 @@ typedef unsigned char *sk_buff_data_t;
640 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 641 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
641 * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL 642 * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
642 * @dst_pending_confirm: need to confirm neighbour 643 * @dst_pending_confirm: need to confirm neighbour
644 * @decrypted: Decrypted SKB
643 * @napi_id: id of the NAPI struct this skb came from 645 * @napi_id: id of the NAPI struct this skb came from
644 * @secmark: security marking 646 * @secmark: security marking
645 * @mark: Generic packet mark 647 * @mark: Generic packet mark
@@ -674,12 +676,16 @@ struct sk_buff {
674 * UDP receive path is one user. 676 * UDP receive path is one user.
675 */ 677 */
676 unsigned long dev_scratch; 678 unsigned long dev_scratch;
677 int ip_defrag_offset;
678 }; 679 };
679 }; 680 };
680 struct rb_node rbnode; /* used in netem & tcp stack */ 681 struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
682 struct list_head list;
683 };
684
685 union {
686 struct sock *sk;
687 int ip_defrag_offset;
681 }; 688 };
682 struct sock *sk;
683 689
684 union { 690 union {
685 ktime_t tstamp; 691 ktime_t tstamp;
@@ -735,7 +741,7 @@ struct sk_buff {
735 peeked:1, 741 peeked:1,
736 head_frag:1, 742 head_frag:1,
737 xmit_more:1, 743 xmit_more:1,
738 __unused:1; /* one bit hole */ 744 pfmemalloc:1;
739 745
740 /* fields enclosed in headers_start/headers_end are copied 746 /* fields enclosed in headers_start/headers_end are copied
741 * using a single memcpy() in __copy_skb_header() 747 * using a single memcpy() in __copy_skb_header()
@@ -754,31 +760,30 @@ struct sk_buff {
754 760
755 __u8 __pkt_type_offset[0]; 761 __u8 __pkt_type_offset[0];
756 __u8 pkt_type:3; 762 __u8 pkt_type:3;
757 __u8 pfmemalloc:1;
758 __u8 ignore_df:1; 763 __u8 ignore_df:1;
759
760 __u8 nf_trace:1; 764 __u8 nf_trace:1;
761 __u8 ip_summed:2; 765 __u8 ip_summed:2;
762 __u8 ooo_okay:1; 766 __u8 ooo_okay:1;
767
763 __u8 l4_hash:1; 768 __u8 l4_hash:1;
764 __u8 sw_hash:1; 769 __u8 sw_hash:1;
765 __u8 wifi_acked_valid:1; 770 __u8 wifi_acked_valid:1;
766 __u8 wifi_acked:1; 771 __u8 wifi_acked:1;
767
768 __u8 no_fcs:1; 772 __u8 no_fcs:1;
769 /* Indicates the inner headers are valid in the skbuff. */ 773 /* Indicates the inner headers are valid in the skbuff. */
770 __u8 encapsulation:1; 774 __u8 encapsulation:1;
771 __u8 encap_hdr_csum:1; 775 __u8 encap_hdr_csum:1;
772 __u8 csum_valid:1; 776 __u8 csum_valid:1;
777
773 __u8 csum_complete_sw:1; 778 __u8 csum_complete_sw:1;
774 __u8 csum_level:2; 779 __u8 csum_level:2;
775 __u8 csum_not_inet:1; 780 __u8 csum_not_inet:1;
776
777 __u8 dst_pending_confirm:1; 781 __u8 dst_pending_confirm:1;
778#ifdef CONFIG_IPV6_NDISC_NODETYPE 782#ifdef CONFIG_IPV6_NDISC_NODETYPE
779 __u8 ndisc_nodetype:2; 783 __u8 ndisc_nodetype:2;
780#endif 784#endif
781 __u8 ipvs_property:1; 785 __u8 ipvs_property:1;
786
782 __u8 inner_protocol_type:1; 787 __u8 inner_protocol_type:1;
783 __u8 remcsum_offload:1; 788 __u8 remcsum_offload:1;
784#ifdef CONFIG_NET_SWITCHDEV 789#ifdef CONFIG_NET_SWITCHDEV
@@ -791,6 +796,9 @@ struct sk_buff {
791 __u8 tc_redirected:1; 796 __u8 tc_redirected:1;
792 __u8 tc_from_ingress:1; 797 __u8 tc_from_ingress:1;
793#endif 798#endif
799#ifdef CONFIG_TLS_DEVICE
800 __u8 decrypted:1;
801#endif
794 802
795#ifdef CONFIG_NET_SCHED 803#ifdef CONFIG_NET_SCHED
796 __u16 tc_index; /* traffic control index */ 804 __u16 tc_index; /* traffic control index */
@@ -1030,6 +1038,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1030} 1038}
1031 1039
1032struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 1040struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1041void skb_headers_offset_update(struct sk_buff *skb, int off);
1033int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 1042int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1034struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); 1043struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1035void skb_copy_header(struct sk_buff *new, const struct sk_buff *old); 1044void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
@@ -2354,7 +2363,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
2354 if (skb_transport_header_was_set(skb)) 2363 if (skb_transport_header_was_set(skb))
2355 return; 2364 return;
2356 2365
2357 if (skb_flow_dissect_flow_keys_basic(skb, &keys, 0, 0, 0, 0, 0)) 2366 if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
2358 skb_set_transport_header(skb, keys.control.thoff); 2367 skb_set_transport_header(skb, keys.control.thoff);
2359 else 2368 else
2360 skb_set_transport_header(skb, offset_hint); 2369 skb_set_transport_header(skb, offset_hint);
@@ -2580,7 +2589,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
2580 kfree_skb(skb); 2589 kfree_skb(skb);
2581} 2590}
2582 2591
2583void skb_rbtree_purge(struct rb_root *root); 2592unsigned int skb_rbtree_purge(struct rb_root *root);
2584 2593
2585void *netdev_alloc_frag(unsigned int fragsz); 2594void *netdev_alloc_frag(unsigned int fragsz);
2586 2595
@@ -3252,7 +3261,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3252 int *peeked, int *off, int *err); 3261 int *peeked, int *off, int *err);
3253struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, 3262struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3254 int *err); 3263 int *err);
3255__poll_t datagram_poll_mask(struct socket *sock, __poll_t events); 3264__poll_t datagram_poll(struct file *file, struct socket *sock,
3265 struct poll_table_struct *wait);
3256int skb_copy_datagram_iter(const struct sk_buff *from, int offset, 3266int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3257 struct iov_iter *to, int size); 3267 struct iov_iter *to, int size);
3258static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, 3268static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 09fa2c6f0e68..3a1a1dbc6f49 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -155,8 +155,12 @@ struct kmem_cache {
155 155
156#ifdef CONFIG_SYSFS 156#ifdef CONFIG_SYSFS
157#define SLAB_SUPPORTS_SYSFS 157#define SLAB_SUPPORTS_SYSFS
158void sysfs_slab_unlink(struct kmem_cache *);
158void sysfs_slab_release(struct kmem_cache *); 159void sysfs_slab_release(struct kmem_cache *);
159#else 160#else
161static inline void sysfs_slab_unlink(struct kmem_cache *s)
162{
163}
160static inline void sysfs_slab_release(struct kmem_cache *s) 164static inline void sysfs_slab_release(struct kmem_cache *s)
161{ 165{
162} 166}
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index c174844cf663..d0884b525001 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -25,8 +25,6 @@ struct smpboot_thread_data;
25 * parked (cpu offline) 25 * parked (cpu offline)
26 * @unpark: Optional unpark function, called when the thread is 26 * @unpark: Optional unpark function, called when the thread is
27 * unparked (cpu online) 27 * unparked (cpu online)
28 * @cpumask: Internal state. To update which threads are unparked,
29 * call smpboot_update_cpumask_percpu_thread().
30 * @selfparking: Thread is not parked by the park function. 28 * @selfparking: Thread is not parked by the park function.
31 * @thread_comm: The base name of the thread 29 * @thread_comm: The base name of the thread
32 */ 30 */
@@ -40,23 +38,12 @@ struct smp_hotplug_thread {
40 void (*cleanup)(unsigned int cpu, bool online); 38 void (*cleanup)(unsigned int cpu, bool online);
41 void (*park)(unsigned int cpu); 39 void (*park)(unsigned int cpu);
42 void (*unpark)(unsigned int cpu); 40 void (*unpark)(unsigned int cpu);
43 cpumask_var_t cpumask;
44 bool selfparking; 41 bool selfparking;
45 const char *thread_comm; 42 const char *thread_comm;
46}; 43};
47 44
48int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread, 45int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
49 const struct cpumask *cpumask);
50
51static inline int
52smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
53{
54 return smpboot_register_percpu_thread_cpumask(plug_thread,
55 cpu_possible_mask);
56}
57 46
58void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); 47void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
59void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
60 const struct cpumask *);
61 48
62#endif 49#endif
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
new file mode 100644
index 000000000000..7e3b9c605ab2
--- /dev/null
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -0,0 +1,180 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
4 *
5 */
6
7#include <linux/platform_device.h>
8#ifndef __LLCC_QCOM__
9#define __LLCC_QCOM__
10
11#define LLCC_CPUSS 1
12#define LLCC_VIDSC0 2
13#define LLCC_VIDSC1 3
14#define LLCC_ROTATOR 4
15#define LLCC_VOICE 5
16#define LLCC_AUDIO 6
17#define LLCC_MDMHPGRW 7
18#define LLCC_MDM 8
19#define LLCC_CMPT 10
20#define LLCC_GPUHTW 11
21#define LLCC_GPU 12
22#define LLCC_MMUHWT 13
23#define LLCC_CMPTDMA 15
24#define LLCC_DISP 16
25#define LLCC_VIDFW 17
26#define LLCC_MDMHPFX 20
27#define LLCC_MDMPNG 21
28#define LLCC_AUDHW 22
29
30/**
31 * llcc_slice_desc - Cache slice descriptor
32 * @slice_id: llcc slice id
33 * @slice_size: Size allocated for the llcc slice
34 */
35struct llcc_slice_desc {
36 u32 slice_id;
37 size_t slice_size;
38};
39
40/**
41 * llcc_slice_config - Data associated with the llcc slice
42 * @usecase_id: usecase id for which the llcc slice is used
43 * @slice_id: llcc slice id assigned to each slice
44 * @max_cap: maximum capacity of the llcc slice
45 * @priority: priority of the llcc slice
46 * @fixed_size: whether the llcc slice can grow beyond its size
47 * @bonus_ways: bonus ways associated with llcc slice
48 * @res_ways: reserved ways associated with llcc slice
49 * @cache_mode: mode of the llcc slice
50 * @probe_target_ways: Probe only reserved and bonus ways on a cache miss
51 * @dis_cap_alloc: Disable capacity based allocation
52 * @retain_on_pc: Retain through power collapse
53 * @activate_on_init: activate the slice on init
54 */
55struct llcc_slice_config {
56 u32 usecase_id;
57 u32 slice_id;
58 u32 max_cap;
59 u32 priority;
60 bool fixed_size;
61 u32 bonus_ways;
62 u32 res_ways;
63 u32 cache_mode;
64 u32 probe_target_ways;
65 bool dis_cap_alloc;
66 bool retain_on_pc;
67 bool activate_on_init;
68};
69
70/**
71 * llcc_drv_data - Data associated with the llcc driver
72 * @regmap: regmap associated with the llcc device
73 * @cfg: pointer to the data structure for slice configuration
74 * @lock: mutex associated with each slice
75 * @cfg_size: size of the config data table
76 * @max_slices: max slices as read from device tree
77 * @bcast_off: Offset of the broadcast bank
78 * @num_banks: Number of llcc banks
79 * @bitmap: Bit map to track the active slice ids
80 * @offsets: Pointer to the bank offsets array
81 */
82struct llcc_drv_data {
83 struct regmap *regmap;
84 const struct llcc_slice_config *cfg;
85 struct mutex lock;
86 u32 cfg_size;
87 u32 max_slices;
88 u32 bcast_off;
89 u32 num_banks;
90 unsigned long *bitmap;
91 u32 *offsets;
92};
93
94#if IS_ENABLED(CONFIG_QCOM_LLCC)
95/**
96 * llcc_slice_getd - get llcc slice descriptor
97 * @uid: usecase_id of the client
98 */
99struct llcc_slice_desc *llcc_slice_getd(u32 uid);
100
101/**
102 * llcc_slice_putd - llcc slice descritpor
103 * @desc: Pointer to llcc slice descriptor
104 */
105void llcc_slice_putd(struct llcc_slice_desc *desc);
106
107/**
108 * llcc_get_slice_id - get slice id
109 * @desc: Pointer to llcc slice descriptor
110 */
111int llcc_get_slice_id(struct llcc_slice_desc *desc);
112
113/**
114 * llcc_get_slice_size - llcc slice size
115 * @desc: Pointer to llcc slice descriptor
116 */
117size_t llcc_get_slice_size(struct llcc_slice_desc *desc);
118
119/**
120 * llcc_slice_activate - Activate the llcc slice
121 * @desc: Pointer to llcc slice descriptor
122 */
123int llcc_slice_activate(struct llcc_slice_desc *desc);
124
125/**
126 * llcc_slice_deactivate - Deactivate the llcc slice
127 * @desc: Pointer to llcc slice descriptor
128 */
129int llcc_slice_deactivate(struct llcc_slice_desc *desc);
130
131/**
132 * qcom_llcc_probe - program the sct table
133 * @pdev: platform device pointer
134 * @table: soc sct table
135 * @sz: Size of the config table
136 */
137int qcom_llcc_probe(struct platform_device *pdev,
138 const struct llcc_slice_config *table, u32 sz);
139#else
140static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid)
141{
142 return NULL;
143}
144
145static inline void llcc_slice_putd(struct llcc_slice_desc *desc)
146{
147
148};
149
150static inline int llcc_get_slice_id(struct llcc_slice_desc *desc)
151{
152 return -EINVAL;
153}
154
155static inline size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
156{
157 return 0;
158}
159static inline int llcc_slice_activate(struct llcc_slice_desc *desc)
160{
161 return -EINVAL;
162}
163
164static inline int llcc_slice_deactivate(struct llcc_slice_desc *desc)
165{
166 return -EINVAL;
167}
168static inline int qcom_llcc_probe(struct platform_device *pdev,
169 const struct llcc_slice_config *table, u32 sz)
170{
171 return -ENODEV;
172}
173
174static inline int qcom_llcc_remove(struct platform_device *pdev)
175{
176 return -ENODEV;
177}
178#endif
179
180#endif
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index 66dcb9ec273a..5addaf5ccbce 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -42,7 +42,9 @@
42#define EXYNOS_SWRESET 0x0400 42#define EXYNOS_SWRESET 0x0400
43 43
44#define S5P_WAKEUP_STAT 0x0600 44#define S5P_WAKEUP_STAT 0x0600
45#define S5P_EINT_WAKEUP_MASK 0x0604 45/* Value for EXYNOS_EINT_WAKEUP_MASK disabling all external wakeup interrupts */
46#define EXYNOS_EINT_WAKEUP_MASK_DISABLED 0xffffffff
47#define EXYNOS_EINT_WAKEUP_MASK 0x0604
46#define S5P_WAKEUP_MASK 0x0608 48#define S5P_WAKEUP_MASK 0x0608
47#define S5P_WAKEUP_MASK2 0x0614 49#define S5P_WAKEUP_MASK2 0x0614
48 50
@@ -180,6 +182,9 @@
180#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8) 182#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
181#define S5P_CORE_AUTOWAKEUP_EN (1 << 31) 183#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
182 184
185/* Only for S5Pv210 */
186#define S5PV210_EINT_WAKEUP_MASK 0xC004
187
183/* Only for EXYNOS4210 */ 188/* Only for EXYNOS4210 */
184#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154 189#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
185#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174 190#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174
@@ -641,6 +646,7 @@
641 | EXYNOS5420_KFC_USE_STANDBY_WFI3) 646 | EXYNOS5420_KFC_USE_STANDBY_WFI3)
642 647
643/* For EXYNOS5433 */ 648/* For EXYNOS5433 */
649#define EXYNOS5433_EINT_WAKEUP_MASK (0x060C)
644#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728) 650#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728)
645#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028) 651#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
646#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8) 652#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8)
diff --git a/include/linux/spi/adi_spi3.h b/include/linux/spi/adi_spi3.h
deleted file mode 100644
index c84123aa1d06..000000000000
--- a/include/linux/spi/adi_spi3.h
+++ /dev/null
@@ -1,254 +0,0 @@
1/*
2 * Analog Devices SPI3 controller driver
3 *
4 * Copyright (c) 2014 Analog Devices Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef _ADI_SPI3_H_
17#define _ADI_SPI3_H_
18
19#include <linux/types.h>
20
21/* SPI_CONTROL */
22#define SPI_CTL_EN 0x00000001 /* Enable */
23#define SPI_CTL_MSTR 0x00000002 /* Master/Slave */
24#define SPI_CTL_PSSE 0x00000004 /* controls modf error in master mode */
25#define SPI_CTL_ODM 0x00000008 /* Open Drain Mode */
26#define SPI_CTL_CPHA 0x00000010 /* Clock Phase */
27#define SPI_CTL_CPOL 0x00000020 /* Clock Polarity */
28#define SPI_CTL_ASSEL 0x00000040 /* Slave Select Pin Control */
29#define SPI_CTL_SELST 0x00000080 /* Slave Select Polarity in-between transfers */
30#define SPI_CTL_EMISO 0x00000100 /* Enable MISO */
31#define SPI_CTL_SIZE 0x00000600 /* Word Transfer Size */
32#define SPI_CTL_SIZE08 0x00000000 /* SIZE: 8 bits */
33#define SPI_CTL_SIZE16 0x00000200 /* SIZE: 16 bits */
34#define SPI_CTL_SIZE32 0x00000400 /* SIZE: 32 bits */
35#define SPI_CTL_LSBF 0x00001000 /* LSB First */
36#define SPI_CTL_FCEN 0x00002000 /* Flow-Control Enable */
37#define SPI_CTL_FCCH 0x00004000 /* Flow-Control Channel Selection */
38#define SPI_CTL_FCPL 0x00008000 /* Flow-Control Polarity */
39#define SPI_CTL_FCWM 0x00030000 /* Flow-Control Water-Mark */
40#define SPI_CTL_FIFO0 0x00000000 /* FCWM: TFIFO empty or RFIFO Full */
41#define SPI_CTL_FIFO1 0x00010000 /* FCWM: TFIFO 75% or more empty or RFIFO 75% or more full */
42#define SPI_CTL_FIFO2 0x00020000 /* FCWM: TFIFO 50% or more empty or RFIFO 50% or more full */
43#define SPI_CTL_FMODE 0x00040000 /* Fast-mode Enable */
44#define SPI_CTL_MIOM 0x00300000 /* Multiple I/O Mode */
45#define SPI_CTL_MIO_DIS 0x00000000 /* MIOM: Disable */
46#define SPI_CTL_MIO_DUAL 0x00100000 /* MIOM: Enable DIOM (Dual I/O Mode) */
47#define SPI_CTL_MIO_QUAD 0x00200000 /* MIOM: Enable QUAD (Quad SPI Mode) */
48#define SPI_CTL_SOSI 0x00400000 /* Start on MOSI */
49/* SPI_RX_CONTROL */
50#define SPI_RXCTL_REN 0x00000001 /* Receive Channel Enable */
51#define SPI_RXCTL_RTI 0x00000004 /* Receive Transfer Initiate */
52#define SPI_RXCTL_RWCEN 0x00000008 /* Receive Word Counter Enable */
53#define SPI_RXCTL_RDR 0x00000070 /* Receive Data Request */
54#define SPI_RXCTL_RDR_DIS 0x00000000 /* RDR: Disabled */
55#define SPI_RXCTL_RDR_NE 0x00000010 /* RDR: RFIFO not empty */
56#define SPI_RXCTL_RDR_25 0x00000020 /* RDR: RFIFO 25% full */
57#define SPI_RXCTL_RDR_50 0x00000030 /* RDR: RFIFO 50% full */
58#define SPI_RXCTL_RDR_75 0x00000040 /* RDR: RFIFO 75% full */
59#define SPI_RXCTL_RDR_FULL 0x00000050 /* RDR: RFIFO full */
60#define SPI_RXCTL_RDO 0x00000100 /* Receive Data Over-Run */
61#define SPI_RXCTL_RRWM 0x00003000 /* FIFO Regular Water-Mark */
62#define SPI_RXCTL_RWM_0 0x00000000 /* RRWM: RFIFO Empty */
63#define SPI_RXCTL_RWM_25 0x00001000 /* RRWM: RFIFO 25% full */
64#define SPI_RXCTL_RWM_50 0x00002000 /* RRWM: RFIFO 50% full */
65#define SPI_RXCTL_RWM_75 0x00003000 /* RRWM: RFIFO 75% full */
66#define SPI_RXCTL_RUWM 0x00070000 /* FIFO Urgent Water-Mark */
67#define SPI_RXCTL_UWM_DIS 0x00000000 /* RUWM: Disabled */
68#define SPI_RXCTL_UWM_25 0x00010000 /* RUWM: RFIFO 25% full */
69#define SPI_RXCTL_UWM_50 0x00020000 /* RUWM: RFIFO 50% full */
70#define SPI_RXCTL_UWM_75 0x00030000 /* RUWM: RFIFO 75% full */
71#define SPI_RXCTL_UWM_FULL 0x00040000 /* RUWM: RFIFO full */
72/* SPI_TX_CONTROL */
73#define SPI_TXCTL_TEN 0x00000001 /* Transmit Channel Enable */
74#define SPI_TXCTL_TTI 0x00000004 /* Transmit Transfer Initiate */
75#define SPI_TXCTL_TWCEN 0x00000008 /* Transmit Word Counter Enable */
76#define SPI_TXCTL_TDR 0x00000070 /* Transmit Data Request */
77#define SPI_TXCTL_TDR_DIS 0x00000000 /* TDR: Disabled */
78#define SPI_TXCTL_TDR_NF 0x00000010 /* TDR: TFIFO not full */
79#define SPI_TXCTL_TDR_25 0x00000020 /* TDR: TFIFO 25% empty */
80#define SPI_TXCTL_TDR_50 0x00000030 /* TDR: TFIFO 50% empty */
81#define SPI_TXCTL_TDR_75 0x00000040 /* TDR: TFIFO 75% empty */
82#define SPI_TXCTL_TDR_EMPTY 0x00000050 /* TDR: TFIFO empty */
83#define SPI_TXCTL_TDU 0x00000100 /* Transmit Data Under-Run */
84#define SPI_TXCTL_TRWM 0x00003000 /* FIFO Regular Water-Mark */
85#define SPI_TXCTL_RWM_FULL 0x00000000 /* TRWM: TFIFO full */
86#define SPI_TXCTL_RWM_25 0x00001000 /* TRWM: TFIFO 25% empty */
87#define SPI_TXCTL_RWM_50 0x00002000 /* TRWM: TFIFO 50% empty */
88#define SPI_TXCTL_RWM_75 0x00003000 /* TRWM: TFIFO 75% empty */
89#define SPI_TXCTL_TUWM 0x00070000 /* FIFO Urgent Water-Mark */
90#define SPI_TXCTL_UWM_DIS 0x00000000 /* TUWM: Disabled */
91#define SPI_TXCTL_UWM_25 0x00010000 /* TUWM: TFIFO 25% empty */
92#define SPI_TXCTL_UWM_50 0x00020000 /* TUWM: TFIFO 50% empty */
93#define SPI_TXCTL_UWM_75 0x00030000 /* TUWM: TFIFO 75% empty */
94#define SPI_TXCTL_UWM_EMPTY 0x00040000 /* TUWM: TFIFO empty */
95/* SPI_CLOCK */
96#define SPI_CLK_BAUD 0x0000FFFF /* Baud Rate */
97/* SPI_DELAY */
98#define SPI_DLY_STOP 0x000000FF /* Transfer delay time in multiples of SCK period */
99#define SPI_DLY_LEADX 0x00000100 /* Extended (1 SCK) LEAD Control */
100#define SPI_DLY_LAGX 0x00000200 /* Extended (1 SCK) LAG control */
101/* SPI_SSEL */
102#define SPI_SLVSEL_SSE1 0x00000002 /* SPISSEL1 Enable */
103#define SPI_SLVSEL_SSE2 0x00000004 /* SPISSEL2 Enable */
104#define SPI_SLVSEL_SSE3 0x00000008 /* SPISSEL3 Enable */
105#define SPI_SLVSEL_SSE4 0x00000010 /* SPISSEL4 Enable */
106#define SPI_SLVSEL_SSE5 0x00000020 /* SPISSEL5 Enable */
107#define SPI_SLVSEL_SSE6 0x00000040 /* SPISSEL6 Enable */
108#define SPI_SLVSEL_SSE7 0x00000080 /* SPISSEL7 Enable */
109#define SPI_SLVSEL_SSEL1 0x00000200 /* SPISSEL1 Value */
110#define SPI_SLVSEL_SSEL2 0x00000400 /* SPISSEL2 Value */
111#define SPI_SLVSEL_SSEL3 0x00000800 /* SPISSEL3 Value */
112#define SPI_SLVSEL_SSEL4 0x00001000 /* SPISSEL4 Value */
113#define SPI_SLVSEL_SSEL5 0x00002000 /* SPISSEL5 Value */
114#define SPI_SLVSEL_SSEL6 0x00004000 /* SPISSEL6 Value */
115#define SPI_SLVSEL_SSEL7 0x00008000 /* SPISSEL7 Value */
116/* SPI_RWC */
117#define SPI_RWC_VALUE 0x0000FFFF /* Received Word-Count */
118/* SPI_RWCR */
119#define SPI_RWCR_VALUE 0x0000FFFF /* Received Word-Count Reload */
120/* SPI_TWC */
121#define SPI_TWC_VALUE 0x0000FFFF /* Transmitted Word-Count */
122/* SPI_TWCR */
123#define SPI_TWCR_VALUE 0x0000FFFF /* Transmitted Word-Count Reload */
124/* SPI_IMASK */
125#define SPI_IMSK_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
126#define SPI_IMSK_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
127#define SPI_IMSK_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
128#define SPI_IMSK_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
129#define SPI_IMSK_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
130#define SPI_IMSK_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
131#define SPI_IMSK_RSM 0x00000100 /* Receive Start Interrupt Mask */
132#define SPI_IMSK_TSM 0x00000200 /* Transmit Start Interrupt Mask */
133#define SPI_IMSK_RFM 0x00000400 /* Receive Finish Interrupt Mask */
134#define SPI_IMSK_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
135/* SPI_IMASKCL */
136#define SPI_IMSK_CLR_RUW 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
137#define SPI_IMSK_CLR_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
138#define SPI_IMSK_CLR_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
139#define SPI_IMSK_CLR_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
140#define SPI_IMSK_CLR_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
141#define SPI_IMSK_CLR_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
142#define SPI_IMSK_CLR_RSM 0x00000100 /* Receive Start Interrupt Mask */
143#define SPI_IMSK_CLR_TSM 0x00000200 /* Transmit Start Interrupt Mask */
144#define SPI_IMSK_CLR_RFM 0x00000400 /* Receive Finish Interrupt Mask */
145#define SPI_IMSK_CLR_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
146/* SPI_IMASKST */
147#define SPI_IMSK_SET_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
148#define SPI_IMSK_SET_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
149#define SPI_IMSK_SET_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
150#define SPI_IMSK_SET_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
151#define SPI_IMSK_SET_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
152#define SPI_IMSK_SET_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
153#define SPI_IMSK_SET_RSM 0x00000100 /* Receive Start Interrupt Mask */
154#define SPI_IMSK_SET_TSM 0x00000200 /* Transmit Start Interrupt Mask */
155#define SPI_IMSK_SET_RFM 0x00000400 /* Receive Finish Interrupt Mask */
156#define SPI_IMSK_SET_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
157/* SPI_STATUS */
158#define SPI_STAT_SPIF 0x00000001 /* SPI Finished */
159#define SPI_STAT_RUWM 0x00000002 /* Receive Urgent Water-Mark Breached */
160#define SPI_STAT_TUWM 0x00000004 /* Transmit Urgent Water-Mark Breached */
161#define SPI_STAT_ROE 0x00000010 /* Receive Over-Run Error Indication */
162#define SPI_STAT_TUE 0x00000020 /* Transmit Under-Run Error Indication */
163#define SPI_STAT_TCE 0x00000040 /* Transmit Collision Error Indication */
164#define SPI_STAT_MODF 0x00000080 /* Mode Fault Error Indication */
165#define SPI_STAT_RS 0x00000100 /* Receive Start Indication */
166#define SPI_STAT_TS 0x00000200 /* Transmit Start Indication */
167#define SPI_STAT_RF 0x00000400 /* Receive Finish Indication */
168#define SPI_STAT_TF 0x00000800 /* Transmit Finish Indication */
169#define SPI_STAT_RFS 0x00007000 /* SPI_RFIFO status */
170#define SPI_STAT_RFIFO_EMPTY 0x00000000 /* RFS: RFIFO Empty */
171#define SPI_STAT_RFIFO_25 0x00001000 /* RFS: RFIFO 25% Full */
172#define SPI_STAT_RFIFO_50 0x00002000 /* RFS: RFIFO 50% Full */
173#define SPI_STAT_RFIFO_75 0x00003000 /* RFS: RFIFO 75% Full */
174#define SPI_STAT_RFIFO_FULL 0x00004000 /* RFS: RFIFO Full */
175#define SPI_STAT_TFS 0x00070000 /* SPI_TFIFO status */
176#define SPI_STAT_TFIFO_FULL 0x00000000 /* TFS: TFIFO full */
177#define SPI_STAT_TFIFO_25 0x00010000 /* TFS: TFIFO 25% empty */
178#define SPI_STAT_TFIFO_50 0x00020000 /* TFS: TFIFO 50% empty */
179#define SPI_STAT_TFIFO_75 0x00030000 /* TFS: TFIFO 75% empty */
180#define SPI_STAT_TFIFO_EMPTY 0x00040000 /* TFS: TFIFO empty */
181#define SPI_STAT_FCS 0x00100000 /* Flow-Control Stall Indication */
182#define SPI_STAT_RFE 0x00400000 /* SPI_RFIFO Empty */
183#define SPI_STAT_TFF 0x00800000 /* SPI_TFIFO Full */
184/* SPI_ILAT */
185#define SPI_ILAT_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */
186#define SPI_ILAT_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */
187#define SPI_ILAT_ROI 0x00000010 /* Receive Over-Run Error Indication */
188#define SPI_ILAT_TUI 0x00000020 /* Transmit Under-Run Error Indication */
189#define SPI_ILAT_TCI 0x00000040 /* Transmit Collision Error Indication */
190#define SPI_ILAT_MFI 0x00000080 /* Mode Fault Error Indication */
191#define SPI_ILAT_RSI 0x00000100 /* Receive Start Indication */
192#define SPI_ILAT_TSI 0x00000200 /* Transmit Start Indication */
193#define SPI_ILAT_RFI 0x00000400 /* Receive Finish Indication */
194#define SPI_ILAT_TFI 0x00000800 /* Transmit Finish Indication */
195/* SPI_ILATCL */
196#define SPI_ILAT_CLR_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */
197#define SPI_ILAT_CLR_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */
198#define SPI_ILAT_CLR_ROI 0x00000010 /* Receive Over-Run Error Indication */
199#define SPI_ILAT_CLR_TUI 0x00000020 /* Transmit Under-Run Error Indication */
200#define SPI_ILAT_CLR_TCI 0x00000040 /* Transmit Collision Error Indication */
201#define SPI_ILAT_CLR_MFI 0x00000080 /* Mode Fault Error Indication */
202#define SPI_ILAT_CLR_RSI 0x00000100 /* Receive Start Indication */
203#define SPI_ILAT_CLR_TSI 0x00000200 /* Transmit Start Indication */
204#define SPI_ILAT_CLR_RFI 0x00000400 /* Receive Finish Indication */
205#define SPI_ILAT_CLR_TFI 0x00000800 /* Transmit Finish Indication */
206
207/*
208 * adi spi3 registers layout
209 */
210struct adi_spi_regs {
211 u32 revid;
212 u32 control;
213 u32 rx_control;
214 u32 tx_control;
215 u32 clock;
216 u32 delay;
217 u32 ssel;
218 u32 rwc;
219 u32 rwcr;
220 u32 twc;
221 u32 twcr;
222 u32 reserved0;
223 u32 emask;
224 u32 emaskcl;
225 u32 emaskst;
226 u32 reserved1;
227 u32 status;
228 u32 elat;
229 u32 elatcl;
230 u32 reserved2;
231 u32 rfifo;
232 u32 reserved3;
233 u32 tfifo;
234};
235
236#define MAX_CTRL_CS 8 /* cs in spi controller */
237
238/* device.platform_data for SSP controller devices */
239struct adi_spi3_master {
240 u16 num_chipselect;
241 u16 pin_req[7];
242};
243
244/* spi_board_info.controller_data for SPI slave devices,
245 * copied to spi_device.platform_data ... mostly for dma tuning
246 */
247struct adi_spi3_chip {
248 u32 control;
249 u16 cs_chg_udelay; /* Some devices require 16-bit delays */
250 u32 tx_dummy_val; /* tx value for rx only transfer */
251 bool enable_dma;
252};
253
254#endif /* _ADI_SPI3_H_ */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index bb4bd15ae1f6..b2bd4b4127c4 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -3,7 +3,9 @@
3 * Copyright (C) 2018 Exceet Electronics GmbH 3 * Copyright (C) 2018 Exceet Electronics GmbH
4 * Copyright (C) 2018 Bootlin 4 * Copyright (C) 2018 Bootlin
5 * 5 *
6 * Author: Boris Brezillon <boris.brezillon@bootlin.com> 6 * Author:
7 * Peter Pan <peterpandong@micron.com>
8 * Boris Brezillon <boris.brezillon@bootlin.com>
7 */ 9 */
8 10
9#ifndef __LINUX_SPI_MEM_H 11#ifndef __LINUX_SPI_MEM_H
@@ -122,7 +124,8 @@ struct spi_mem_op {
122/** 124/**
123 * struct spi_mem - describes a SPI memory device 125 * struct spi_mem - describes a SPI memory device
124 * @spi: the underlying SPI device 126 * @spi: the underlying SPI device
125 * @drvpriv: spi_mem_drviver private data 127 * @drvpriv: spi_mem_driver private data
128 * @name: name of the SPI memory device
126 * 129 *
127 * Extra information that describe the SPI memory device and may be needed by 130 * Extra information that describe the SPI memory device and may be needed by
128 * the controller to properly handle this device should be placed here. 131 * the controller to properly handle this device should be placed here.
@@ -133,6 +136,7 @@ struct spi_mem_op {
133struct spi_mem { 136struct spi_mem {
134 struct spi_device *spi; 137 struct spi_device *spi;
135 void *drvpriv; 138 void *drvpriv;
139 const char *name;
136}; 140};
137 141
138/** 142/**
@@ -165,6 +169,13 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
165 * limitations) 169 * limitations)
166 * @supports_op: check if an operation is supported by the controller 170 * @supports_op: check if an operation is supported by the controller
167 * @exec_op: execute a SPI memory operation 171 * @exec_op: execute a SPI memory operation
172 * @get_name: get a custom name for the SPI mem device from the controller.
173 * This might be needed if the controller driver has been ported
174 * to use the SPI mem layer and a custom name is used to keep
175 * mtdparts compatible.
176 * Note that if the implementation of this function allocates memory
177 * dynamically, then it should do so with devm_xxx(), as we don't
178 * have a ->free_name() function.
168 * 179 *
169 * This interface should be implemented by SPI controllers providing an 180 * This interface should be implemented by SPI controllers providing an
170 * high-level interface to execute SPI memory operation, which is usually the 181 * high-level interface to execute SPI memory operation, which is usually the
@@ -176,6 +187,7 @@ struct spi_controller_mem_ops {
176 const struct spi_mem_op *op); 187 const struct spi_mem_op *op);
177 int (*exec_op)(struct spi_mem *mem, 188 int (*exec_op)(struct spi_mem *mem,
178 const struct spi_mem_op *op); 189 const struct spi_mem_op *op);
190 const char *(*get_name)(struct spi_mem *mem);
179}; 191};
180 192
181/** 193/**
@@ -234,6 +246,8 @@ bool spi_mem_supports_op(struct spi_mem *mem,
234int spi_mem_exec_op(struct spi_mem *mem, 246int spi_mem_exec_op(struct spi_mem *mem,
235 const struct spi_mem_op *op); 247 const struct spi_mem_op *op);
236 248
249const char *spi_mem_get_name(struct spi_mem *mem);
250
237int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, 251int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
238 struct module *owner); 252 struct module *owner);
239 253
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 51d8c060e513..b7e021b274dc 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -8,7 +8,7 @@ struct spi_bitbang {
8 struct mutex lock; 8 struct mutex lock;
9 u8 busy; 9 u8 busy;
10 u8 use_dma; 10 u8 use_dma;
11 u8 flags; /* extra spi->mode support */ 11 u16 flags; /* extra spi->mode support */
12 12
13 struct spi_master *master; 13 struct spi_master *master;
14 14
@@ -30,7 +30,8 @@ struct spi_bitbang {
30 /* txrx_word[SPI_MODE_*]() just looks like a shift register */ 30 /* txrx_word[SPI_MODE_*]() just looks like a shift register */
31 u32 (*txrx_word[4])(struct spi_device *spi, 31 u32 (*txrx_word[4])(struct spi_device *spi,
32 unsigned nsecs, 32 unsigned nsecs,
33 u32 word, u8 bits); 33 u32 word, u8 bits, unsigned flags);
34 int (*set_line_direction)(struct spi_device *spi, bool output);
34}; 35};
35 36
36/* you can call these default bitbang->master methods from your custom 37/* you can call these default bitbang->master methods from your custom
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 1e8a46435838..3190997df9ca 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -114,29 +114,48 @@ do { \
114#endif /*arch_spin_is_contended*/ 114#endif /*arch_spin_is_contended*/
115 115
116/* 116/*
117 * This barrier must provide two things: 117 * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
118 * between program-order earlier lock acquisitions and program-order later
119 * memory accesses.
118 * 120 *
119 * - it must guarantee a STORE before the spin_lock() is ordered against a 121 * This guarantees that the following two properties hold:
120 * LOAD after it, see the comments at its two usage sites.
121 * 122 *
122 * - it must ensure the critical section is RCsc. 123 * 1) Given the snippet:
123 * 124 *
124 * The latter is important for cases where we observe values written by other 125 * { X = 0; Y = 0; }
125 * CPUs in spin-loops, without barriers, while being subject to scheduling.
126 * 126 *
127 * CPU0 CPU1 CPU2 127 * CPU0 CPU1
128 * 128 *
129 * for (;;) { 129 * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
130 * if (READ_ONCE(X)) 130 * spin_lock(S); smp_mb();
131 * break; 131 * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
132 * } 132 * r0 = READ_ONCE(Y);
133 * X=1 133 * spin_unlock(S);
134 * <sched-out>
135 * <sched-in>
136 * r = X;
137 * 134 *
138 * without transitivity it could be that CPU1 observes X!=0 breaks the loop, 135 * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
139 * we get migrated and CPU2 sees X==0. 136 * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
137 * preceding the call to smp_mb__after_spinlock() in __schedule() and in
138 * try_to_wake_up().
139 *
140 * 2) Given the snippet:
141 *
142 * { X = 0; Y = 0; }
143 *
144 * CPU0 CPU1 CPU2
145 *
146 * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
147 * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
148 * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
149 * WRITE_ONCE(Y, 1);
150 * spin_unlock(S);
151 *
152 * it is forbidden that CPU0's critical section executes before CPU1's
153 * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
154 * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
155 * preceding the calls to smp_rmb() in try_to_wake_up() for similar
156 * snippets but "projected" onto two CPUs.
157 *
158 * Property (2) upgrades the lock to an RCsc lock.
140 * 159 *
141 * Since most load-store architectures implement ACQUIRE with an smp_mb() after 160 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
142 * the LL/SC loop, they need no further barriers. Similarly all our TSO 161 * the LL/SC loop, they need no further barriers. Similarly all our TSO
@@ -427,6 +446,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
427#define atomic_dec_and_lock(atomic, lock) \ 446#define atomic_dec_and_lock(atomic, lock) \
428 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 447 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
429 448
449extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
450 unsigned long *flags);
451#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
452 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
453
430int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, 454int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
431 size_t max_size, unsigned int cpu_mult, 455 size_t max_size, unsigned int cpu_mult,
432 gfp_t gfp); 456 gfp_t gfp);
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 91494d7e8e41..3e72a291c401 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
195 return retval; 195 return retval;
196} 196}
197 197
198/* Used by tracing, cannot be traced and cannot invoke lockdep. */
199static inline notrace int
200srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
201{
202 int retval;
203
204 retval = __srcu_read_lock(sp);
205 return retval;
206}
207
198/** 208/**
199 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 209 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
200 * @sp: srcu_struct in which to unregister the old reader. 210 * @sp: srcu_struct in which to unregister the old reader.
@@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
209 __srcu_read_unlock(sp, idx); 219 __srcu_read_unlock(sp, idx);
210} 220}
211 221
222/* Used by tracing, cannot be traced and cannot call lockdep. */
223static inline notrace void
224srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
225{
226 __srcu_read_unlock(sp, idx);
227}
228
212/** 229/**
213 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock 230 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
214 * 231 *
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 3b43655cabe6..0d5a2691e7e9 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -499,11 +499,9 @@ struct ssb_bus {
499 499
500 /* Internal-only stuff follows. Do not touch. */ 500 /* Internal-only stuff follows. Do not touch. */
501 struct list_head list; 501 struct list_head list;
502#ifdef CONFIG_SSB_DEBUG
503 /* Is the bus already powered up? */ 502 /* Is the bus already powered up? */
504 bool powered_up; 503 bool powered_up;
505 int power_warn_count; 504 int power_warn_count;
506#endif /* DEBUG */
507}; 505};
508 506
509enum ssb_quirks { 507enum ssb_quirks {
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 32feac5bbd75..c43e9a01b892 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -190,5 +190,6 @@ struct plat_stmmacenet_data {
190 bool tso_en; 190 bool tso_en;
191 int mac_port_sel_speed; 191 int mac_port_sel_speed;
192 bool en_tx_lpi_clockgating; 192 bool en_tx_lpi_clockgating;
193 int has_xgmac;
193}; 194};
194#endif 195#endif
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 440b62f7502e..5a28ac9284f0 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -414,7 +414,7 @@ static inline bool hibernation_available(void) { return false; }
414#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ 414#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
415#define PM_POST_RESTORE 0x0006 /* Restore failed */ 415#define PM_POST_RESTORE 0x0006 /* Restore failed */
416 416
417extern struct mutex pm_mutex; 417extern struct mutex system_transition_mutex;
418 418
419#ifdef CONFIG_PM_SLEEP 419#ifdef CONFIG_PM_SLEEP
420void save_processor_state(void); 420void save_processor_state(void);
diff --git a/include/linux/swait.h b/include/linux/swait.h
index bf8cb0dee23c..73e06e9986d4 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -16,7 +16,7 @@
16 * wait-queues, but the semantics are actually completely different, and 16 * wait-queues, but the semantics are actually completely different, and
17 * every single user we have ever had has been buggy (or pointless). 17 * every single user we have ever had has been buggy (or pointless).
18 * 18 *
19 * A "swake_up()" only wakes up _one_ waiter, which is not at all what 19 * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what
20 * "wake_up()" does, and has led to problems. In other cases, it has 20 * "wake_up()" does, and has led to problems. In other cases, it has
21 * been fine, because there's only ever one waiter (kvm), but in that 21 * been fine, because there's only ever one waiter (kvm), but in that
22 * case gthe whole "simple" wait-queue is just pointless to begin with, 22 * case gthe whole "simple" wait-queue is just pointless to begin with,
@@ -38,8 +38,8 @@
38 * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right 38 * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
39 * sleeper state. 39 * sleeper state.
40 * 40 *
41 * - the exclusive mode; because this requires preserving the list order 41 * - the !exclusive mode; because that leads to O(n) wakeups, everything is
42 * and this is hard. 42 * exclusive.
43 * 43 *
44 * - custom wake callback functions; because you cannot give any guarantees 44 * - custom wake callback functions; because you cannot give any guarantees
45 * about random code. This also allows swait to be used in RT, such that 45 * about random code. This also allows swait to be used in RT, such that
@@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
115 * CPU0 - waker CPU1 - waiter 115 * CPU0 - waker CPU1 - waiter
116 * 116 *
117 * for (;;) { 117 * for (;;) {
118 * @cond = true; prepare_to_swait(&wq_head, &wait, state); 118 * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state);
119 * smp_mb(); // smp_mb() from set_current_state() 119 * smp_mb(); // smp_mb() from set_current_state()
120 * if (swait_active(wq_head)) if (@cond) 120 * if (swait_active(wq_head)) if (@cond)
121 * wake_up(wq_head); break; 121 * wake_up(wq_head); break;
@@ -157,20 +157,20 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
157 return swait_active(wq); 157 return swait_active(wq);
158} 158}
159 159
160extern void swake_up(struct swait_queue_head *q); 160extern void swake_up_one(struct swait_queue_head *q);
161extern void swake_up_all(struct swait_queue_head *q); 161extern void swake_up_all(struct swait_queue_head *q);
162extern void swake_up_locked(struct swait_queue_head *q); 162extern void swake_up_locked(struct swait_queue_head *q);
163 163
164extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); 164extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
165extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
166extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); 165extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
167 166
168extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); 167extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
169extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); 168extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
170 169
171/* as per ___wait_event() but for swait, therefore "exclusive == 0" */ 170/* as per ___wait_event() but for swait, therefore "exclusive == 1" */
172#define ___swait_event(wq, condition, state, ret, cmd) \ 171#define ___swait_event(wq, condition, state, ret, cmd) \
173({ \ 172({ \
173 __label__ __out; \
174 struct swait_queue __wait; \ 174 struct swait_queue __wait; \
175 long __ret = ret; \ 175 long __ret = ret; \
176 \ 176 \
@@ -183,20 +183,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
183 \ 183 \
184 if (___wait_is_interruptible(state) && __int) { \ 184 if (___wait_is_interruptible(state) && __int) { \
185 __ret = __int; \ 185 __ret = __int; \
186 break; \ 186 goto __out; \
187 } \ 187 } \
188 \ 188 \
189 cmd; \ 189 cmd; \
190 } \ 190 } \
191 finish_swait(&wq, &__wait); \ 191 finish_swait(&wq, &__wait); \
192 __ret; \ 192__out: __ret; \
193}) 193})
194 194
195#define __swait_event(wq, condition) \ 195#define __swait_event(wq, condition) \
196 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ 196 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
197 schedule()) 197 schedule())
198 198
199#define swait_event(wq, condition) \ 199#define swait_event_exclusive(wq, condition) \
200do { \ 200do { \
201 if (condition) \ 201 if (condition) \
202 break; \ 202 break; \
@@ -208,7 +208,7 @@ do { \
208 TASK_UNINTERRUPTIBLE, timeout, \ 208 TASK_UNINTERRUPTIBLE, timeout, \
209 __ret = schedule_timeout(__ret)) 209 __ret = schedule_timeout(__ret))
210 210
211#define swait_event_timeout(wq, condition, timeout) \ 211#define swait_event_timeout_exclusive(wq, condition, timeout) \
212({ \ 212({ \
213 long __ret = timeout; \ 213 long __ret = timeout; \
214 if (!___wait_cond_timeout(condition)) \ 214 if (!___wait_cond_timeout(condition)) \
@@ -220,7 +220,7 @@ do { \
220 ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ 220 ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
221 schedule()) 221 schedule())
222 222
223#define swait_event_interruptible(wq, condition) \ 223#define swait_event_interruptible_exclusive(wq, condition) \
224({ \ 224({ \
225 int __ret = 0; \ 225 int __ret = 0; \
226 if (!(condition)) \ 226 if (!(condition)) \
@@ -233,7 +233,7 @@ do { \
233 TASK_INTERRUPTIBLE, timeout, \ 233 TASK_INTERRUPTIBLE, timeout, \
234 __ret = schedule_timeout(__ret)) 234 __ret = schedule_timeout(__ret))
235 235
236#define swait_event_interruptible_timeout(wq, condition, timeout) \ 236#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
237({ \ 237({ \
238 long __ret = timeout; \ 238 long __ret = timeout; \
239 if (!___wait_cond_timeout(condition)) \ 239 if (!___wait_cond_timeout(condition)) \
@@ -246,7 +246,7 @@ do { \
246 (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) 246 (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
247 247
248/** 248/**
249 * swait_event_idle - wait without system load contribution 249 * swait_event_idle_exclusive - wait without system load contribution
250 * @wq: the waitqueue to wait on 250 * @wq: the waitqueue to wait on
251 * @condition: a C expression for the event to wait for 251 * @condition: a C expression for the event to wait for
252 * 252 *
@@ -257,7 +257,7 @@ do { \
257 * condition and doesn't want to contribute to system load. Signals are 257 * condition and doesn't want to contribute to system load. Signals are
258 * ignored. 258 * ignored.
259 */ 259 */
260#define swait_event_idle(wq, condition) \ 260#define swait_event_idle_exclusive(wq, condition) \
261do { \ 261do { \
262 if (condition) \ 262 if (condition) \
263 break; \ 263 break; \
@@ -270,7 +270,7 @@ do { \
270 __ret = schedule_timeout(__ret)) 270 __ret = schedule_timeout(__ret))
271 271
272/** 272/**
273 * swait_event_idle_timeout - wait up to timeout without load contribution 273 * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
274 * @wq: the waitqueue to wait on 274 * @wq: the waitqueue to wait on
275 * @condition: a C expression for the event to wait for 275 * @condition: a C expression for the event to wait for
276 * @timeout: timeout at which we'll give up in jiffies 276 * @timeout: timeout at which we'll give up in jiffies
@@ -288,7 +288,7 @@ do { \
288 * or the remaining jiffies (at least 1) if the @condition evaluated 288 * or the remaining jiffies (at least 1) if the @condition evaluated
289 * to %true before the @timeout elapsed. 289 * to %true before the @timeout elapsed.
290 */ 290 */
291#define swait_event_idle_timeout(wq, condition, timeout) \ 291#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \
292({ \ 292({ \
293 long __ret = timeout; \ 293 long __ret = timeout; \
294 if (!___wait_cond_timeout(condition)) \ 294 if (!___wait_cond_timeout(condition)) \
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c063443d8638..1a8bd05a335e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -629,7 +629,6 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
629 629
630 return memcg->swappiness; 630 return memcg->swappiness;
631} 631}
632
633#else 632#else
634static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) 633static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
635{ 634{
@@ -637,6 +636,16 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
637} 636}
638#endif 637#endif
639 638
639#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
640extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
641 gfp_t gfp_mask);
642#else
643static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
644 int node, gfp_t gfp_mask)
645{
646}
647#endif
648
640#ifdef CONFIG_MEMCG_SWAP 649#ifdef CONFIG_MEMCG_SWAP
641extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); 650extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
642extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); 651extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 06bd7b096167..e06febf62978 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -10,5 +10,7 @@ extern spinlock_t swap_lock;
10extern struct plist_head swap_active_head; 10extern struct plist_head swap_active_head;
11extern struct swap_info_struct *swap_info[]; 11extern struct swap_info_struct *swap_info[];
12extern int try_to_unuse(unsigned int, bool, unsigned long); 12extern int try_to_unuse(unsigned int, bool, unsigned long);
13extern unsigned long generic_max_swapfile_size(void);
14extern unsigned long max_swapfile_size(void);
13 15
14#endif /* _LINUX_SWAPFILE_H */ 16#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 73810808cdf2..2ff814c92f7f 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -11,6 +11,7 @@
11#ifndef _LINUX_SYSCALLS_H 11#ifndef _LINUX_SYSCALLS_H
12#define _LINUX_SYSCALLS_H 12#define _LINUX_SYSCALLS_H
13 13
14struct __aio_sigset;
14struct epoll_event; 15struct epoll_event;
15struct iattr; 16struct iattr;
16struct inode; 17struct inode;
@@ -80,6 +81,7 @@ union bpf_attr;
80#include <linux/unistd.h> 81#include <linux/unistd.h>
81#include <linux/quota.h> 82#include <linux/quota.h>
82#include <linux/key.h> 83#include <linux/key.h>
84#include <linux/personality.h>
83#include <trace/syscall.h> 85#include <trace/syscall.h>
84 86
85#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER 87#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
@@ -231,6 +233,9 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
231 */ 233 */
232#ifndef __SYSCALL_DEFINEx 234#ifndef __SYSCALL_DEFINEx
233#define __SYSCALL_DEFINEx(x, name, ...) \ 235#define __SYSCALL_DEFINEx(x, name, ...) \
236 __diag_push(); \
237 __diag_ignore(GCC, 8, "-Wattribute-alias", \
238 "Type aliasing is used to sanitize syscall arguments");\
234 asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ 239 asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
235 __attribute__((alias(__stringify(__se_sys##name)))); \ 240 __attribute__((alias(__stringify(__se_sys##name)))); \
236 ALLOW_ERROR_INJECTION(sys##name, ERRNO); \ 241 ALLOW_ERROR_INJECTION(sys##name, ERRNO); \
@@ -243,6 +248,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
243 __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ 248 __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
244 return ret; \ 249 return ret; \
245 } \ 250 } \
251 __diag_pop(); \
246 static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) 252 static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
247#endif /* __SYSCALL_DEFINEx */ 253#endif /* __SYSCALL_DEFINEx */
248 254
@@ -501,9 +507,9 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
501/* fs/timerfd.c */ 507/* fs/timerfd.c */
502asmlinkage long sys_timerfd_create(int clockid, int flags); 508asmlinkage long sys_timerfd_create(int clockid, int flags);
503asmlinkage long sys_timerfd_settime(int ufd, int flags, 509asmlinkage long sys_timerfd_settime(int ufd, int flags,
504 const struct itimerspec __user *utmr, 510 const struct __kernel_itimerspec __user *utmr,
505 struct itimerspec __user *otmr); 511 struct __kernel_itimerspec __user *otmr);
506asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); 512asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr);
507 513
508/* fs/utimes.c */ 514/* fs/utimes.c */
509asmlinkage long sys_utimensat(int dfd, const char __user *filename, 515asmlinkage long sys_utimensat(int dfd, const char __user *filename,
@@ -568,10 +574,10 @@ asmlinkage long sys_timer_create(clockid_t which_clock,
568 struct sigevent __user *timer_event_spec, 574 struct sigevent __user *timer_event_spec,
569 timer_t __user * created_timer_id); 575 timer_t __user * created_timer_id);
570asmlinkage long sys_timer_gettime(timer_t timer_id, 576asmlinkage long sys_timer_gettime(timer_t timer_id,
571 struct itimerspec __user *setting); 577 struct __kernel_itimerspec __user *setting);
572asmlinkage long sys_timer_getoverrun(timer_t timer_id); 578asmlinkage long sys_timer_getoverrun(timer_t timer_id);
573asmlinkage long sys_timer_settime(timer_t timer_id, int flags, 579asmlinkage long sys_timer_settime(timer_t timer_id, int flags,
574 const struct itimerspec __user *new_setting, 580 const struct __kernel_itimerspec __user *new_setting,
575 struct itimerspec __user *old_setting); 581 struct itimerspec __user *old_setting);
576asmlinkage long sys_timer_delete(timer_t timer_id); 582asmlinkage long sys_timer_delete(timer_t timer_id);
577asmlinkage long sys_clock_settime(clockid_t which_clock, 583asmlinkage long sys_clock_settime(clockid_t which_clock,
@@ -1277,4 +1283,14 @@ static inline long ksys_truncate(const char __user *pathname, loff_t length)
1277 return do_sys_truncate(pathname, length); 1283 return do_sys_truncate(pathname, length);
1278} 1284}
1279 1285
1286static inline unsigned int ksys_personality(unsigned int personality)
1287{
1288 unsigned int old = current->personality;
1289
1290 if (personality != 0xffffffff)
1291 set_personality(personality);
1292
1293 return old;
1294}
1295
1280#endif 1296#endif
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index b8bfdc173ec0..3c12198c0103 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -237,6 +237,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
237 const struct attribute **attr); 237 const struct attribute **attr);
238int __must_check sysfs_chmod_file(struct kobject *kobj, 238int __must_check sysfs_chmod_file(struct kobject *kobj,
239 const struct attribute *attr, umode_t mode); 239 const struct attribute *attr, umode_t mode);
240struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
241 const struct attribute *attr);
242void sysfs_unbreak_active_protection(struct kernfs_node *kn);
240void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, 243void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
241 const void *ns); 244 const void *ns);
242bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); 245bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
@@ -350,6 +353,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
350 return 0; 353 return 0;
351} 354}
352 355
356static inline struct kernfs_node *
357sysfs_break_active_protection(struct kobject *kobj,
358 const struct attribute *attr)
359{
360 return NULL;
361}
362
363static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
364{
365}
366
353static inline void sysfs_remove_file_ns(struct kobject *kobj, 367static inline void sysfs_remove_file_ns(struct kobject *kobj,
354 const struct attribute *attr, 368 const struct attribute *attr,
355 const void *ns) 369 const void *ns)
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index c6aa8a3c42ed..b9626aa7e90c 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -37,9 +37,33 @@ struct t10_pi_tuple {
37#define T10_PI_APP_ESCAPE cpu_to_be16(0xffff) 37#define T10_PI_APP_ESCAPE cpu_to_be16(0xffff)
38#define T10_PI_REF_ESCAPE cpu_to_be32(0xffffffff) 38#define T10_PI_REF_ESCAPE cpu_to_be32(0xffffffff)
39 39
40static inline u32 t10_pi_ref_tag(struct request *rq)
41{
42#ifdef CONFIG_BLK_DEV_INTEGRITY
43 return blk_rq_pos(rq) >>
44 (rq->q->integrity.interval_exp - 9) & 0xffffffff;
45#else
46 return -1U;
47#endif
48}
49
40extern const struct blk_integrity_profile t10_pi_type1_crc; 50extern const struct blk_integrity_profile t10_pi_type1_crc;
41extern const struct blk_integrity_profile t10_pi_type1_ip; 51extern const struct blk_integrity_profile t10_pi_type1_ip;
42extern const struct blk_integrity_profile t10_pi_type3_crc; 52extern const struct blk_integrity_profile t10_pi_type3_crc;
43extern const struct blk_integrity_profile t10_pi_type3_ip; 53extern const struct blk_integrity_profile t10_pi_type3_ip;
44 54
55#ifdef CONFIG_BLK_DEV_INTEGRITY
56extern void t10_pi_prepare(struct request *rq, u8 protection_type);
57extern void t10_pi_complete(struct request *rq, u8 protection_type,
58 unsigned int intervals);
59#else
60static inline void t10_pi_complete(struct request *rq, u8 protection_type,
61 unsigned int intervals)
62{
63}
64static inline void t10_pi_prepare(struct request *rq, u8 protection_type)
65{
66}
67#endif
68
45#endif 69#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 72705eaf4b84..263e37271afd 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -89,7 +89,7 @@ struct tcp_sack_block {
89 89
90struct tcp_options_received { 90struct tcp_options_received {
91/* PAWS/RTTM data */ 91/* PAWS/RTTM data */
92 long ts_recent_stamp;/* Time we stored ts_recent (for aging) */ 92 int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
93 u32 ts_recent; /* Time stamp to echo next */ 93 u32 ts_recent; /* Time stamp to echo next */
94 u32 rcv_tsval; /* Time stamp value */ 94 u32 rcv_tsval; /* Time stamp value */
95 u32 rcv_tsecr; /* Time stamp echo reply */ 95 u32 rcv_tsecr; /* Time stamp echo reply */
@@ -181,10 +181,16 @@ struct tcp_sock {
181 u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut 181 u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
182 * total number of data segments sent. 182 * total number of data segments sent.
183 */ 183 */
184 u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
185 * total number of data bytes sent.
186 */
184 u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 187 u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
185 * sum(delta(snd_una)), or how many bytes 188 * sum(delta(snd_una)), or how many bytes
186 * were acked. 189 * were acked.
187 */ 190 */
191 u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
192 * total number of DSACK blocks received
193 */
188 u32 snd_una; /* First byte we want an ack for */ 194 u32 snd_una; /* First byte we want an ack for */
189 u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 195 u32 snd_sml; /* Last byte of the most recently transmitted small packet */
190 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 196 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
@@ -214,8 +220,7 @@ struct tcp_sock {
214#define TCP_RACK_RECOVERY_THRESH 16 220#define TCP_RACK_RECOVERY_THRESH 16
215 u8 reo_wnd_persist:5, /* No. of recovery since last adj */ 221 u8 reo_wnd_persist:5, /* No. of recovery since last adj */
216 dsack_seen:1, /* Whether DSACK seen after last adj */ 222 dsack_seen:1, /* Whether DSACK seen after last adj */
217 advanced:1, /* mstamp advanced since last lost marking */ 223 advanced:1; /* mstamp advanced since last lost marking */
218 reord:1; /* reordering detected */
219 } rack; 224 } rack;
220 u16 advmss; /* Advertised MSS */ 225 u16 advmss; /* Advertised MSS */
221 u8 compressed_ack; 226 u8 compressed_ack;
@@ -261,6 +266,7 @@ struct tcp_sock {
261 u8 ecn_flags; /* ECN status bits. */ 266 u8 ecn_flags; /* ECN status bits. */
262 u8 keepalive_probes; /* num of allowed keep alive probes */ 267 u8 keepalive_probes; /* num of allowed keep alive probes */
263 u32 reordering; /* Packet reordering metric. */ 268 u32 reordering; /* Packet reordering metric. */
269 u32 reord_seen; /* number of data packet reordering events */
264 u32 snd_up; /* Urgent pointer */ 270 u32 snd_up; /* Urgent pointer */
265 271
266/* 272/*
@@ -330,6 +336,9 @@ struct tcp_sock {
330 * the first SYN. */ 336 * the first SYN. */
331 u32 undo_marker; /* snd_una upon a new recovery episode. */ 337 u32 undo_marker; /* snd_una upon a new recovery episode. */
332 int undo_retrans; /* number of undoable retransmissions. */ 338 int undo_retrans; /* number of undoable retransmissions. */
339 u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans
340 * Total data bytes retransmitted
341 */
333 u32 total_retrans; /* Total retransmits for entire connection */ 342 u32 total_retrans; /* Total retransmits for entire connection */
334 343
335 u32 urg_seq; /* Seq of received urgent pointer */ 344 u32 urg_seq; /* Seq of received urgent pointer */
@@ -350,6 +359,7 @@ struct tcp_sock {
350#endif 359#endif
351 360
352/* Receiver side RTT estimation */ 361/* Receiver side RTT estimation */
362 u32 rcv_rtt_last_tsecr;
353 struct { 363 struct {
354 u32 rtt_us; 364 u32 rtt_us;
355 u32 seq; 365 u32 seq;
@@ -425,7 +435,7 @@ struct tcp_timewait_sock {
425 /* The time we sent the last out-of-window ACK: */ 435 /* The time we sent the last out-of-window ACK: */
426 u32 tw_last_oow_ack_time; 436 u32 tw_last_oow_ack_time;
427 437
428 long tw_ts_recent_stamp; 438 int tw_ts_recent_stamp;
429#ifdef CONFIG_TCP_MD5SIG 439#ifdef CONFIG_TCP_MD5SIG
430 struct tcp_md5sig_key *tw_md5_key; 440 struct tcp_md5sig_key *tw_md5_key;
431#endif 441#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index aed74463592d..27d83fd2ae61 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -14,9 +14,9 @@ int get_timespec64(struct timespec64 *ts,
14int put_timespec64(const struct timespec64 *ts, 14int put_timespec64(const struct timespec64 *ts,
15 struct __kernel_timespec __user *uts); 15 struct __kernel_timespec __user *uts);
16int get_itimerspec64(struct itimerspec64 *it, 16int get_itimerspec64(struct itimerspec64 *it,
17 const struct itimerspec __user *uit); 17 const struct __kernel_itimerspec __user *uit);
18int put_itimerspec64(const struct itimerspec64 *it, 18int put_itimerspec64(const struct itimerspec64 *it,
19 struct itimerspec __user *uit); 19 struct __kernel_itimerspec __user *uit);
20 20
21extern time64_t mktime64(const unsigned int year, const unsigned int mon, 21extern time64_t mktime64(const unsigned int year, const unsigned int mon,
22 const unsigned int day, const unsigned int hour, 22 const unsigned int day, const unsigned int hour,
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 0a7b2f79cec7..05634afba0db 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -12,6 +12,7 @@ typedef __u64 timeu64_t;
12 */ 12 */
13#ifndef CONFIG_64BIT_TIME 13#ifndef CONFIG_64BIT_TIME
14#define __kernel_timespec timespec 14#define __kernel_timespec timespec
15#define __kernel_itimerspec itimerspec
15#endif 16#endif
16 17
17#include <uapi/linux/time.h> 18#include <uapi/linux/time.h>
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 86bc2026efce..5d738804e3d6 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -21,6 +21,21 @@ extern int do_sys_settimeofday64(const struct timespec64 *tv,
21 const struct timezone *tz); 21 const struct timezone *tz);
22 22
23/* 23/*
24 * ktime_get() family: read the current time in a multitude of ways,
25 *
26 * The default time reference is CLOCK_MONOTONIC, starting at
27 * boot time but not counting the time spent in suspend.
28 * For other references, use the functions with "real", "clocktai",
29 * "boottime" and "raw" suffixes.
30 *
31 * To get the time in a different format, use the ones wit
32 * "ns", "ts64" and "seconds" suffix.
33 *
34 * See Documentation/core-api/timekeeping.rst for more details.
35 */
36
37
38/*
24 * timespec64 based interfaces 39 * timespec64 based interfaces
25 */ 40 */
26extern void ktime_get_raw_ts64(struct timespec64 *ts); 41extern void ktime_get_raw_ts64(struct timespec64 *ts);
@@ -177,7 +192,7 @@ static inline time64_t ktime_get_clocktai_seconds(void)
177extern bool timekeeping_rtc_skipsuspend(void); 192extern bool timekeeping_rtc_skipsuspend(void);
178extern bool timekeeping_rtc_skipresume(void); 193extern bool timekeeping_rtc_skipresume(void);
179 194
180extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); 195extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
181 196
182/* 197/*
183 * struct system_time_snapshot - simultaneous raw/real time capture with 198 * struct system_time_snapshot - simultaneous raw/real time capture with
@@ -243,7 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
243extern int persistent_clock_is_local; 258extern int persistent_clock_is_local;
244 259
245extern void read_persistent_clock64(struct timespec64 *ts); 260extern void read_persistent_clock64(struct timespec64 *ts);
246extern void read_boot_clock64(struct timespec64 *ts); 261void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
262 struct timespec64 *boot_offset);
247extern int update_persistent_clock64(struct timespec64 now); 263extern int update_persistent_clock64(struct timespec64 now);
248 264
249/* 265/*
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 66272862070b..61dfd93b6ee4 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -64,6 +64,8 @@ struct torture_random_state {
64 long trs_count; 64 long trs_count;
65}; 65};
66#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 } 66#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
67#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
68 DEFINE_PER_CPU(struct torture_random_state, name)
67unsigned long torture_random(struct torture_random_state *trsp); 69unsigned long torture_random(struct torture_random_state *trsp);
68 70
69/* Task shuffler, which causes CPUs to occasionally go idle. */ 71/* Task shuffler, which causes CPUs to occasionally go idle. */
@@ -79,7 +81,7 @@ void stutter_wait(const char *title);
79int torture_stutter_init(int s); 81int torture_stutter_init(int s);
80 82
81/* Initialization and cleanup. */ 83/* Initialization and cleanup. */
82bool torture_init_begin(char *ttype, bool v); 84bool torture_init_begin(char *ttype, int v);
83void torture_init_end(void); 85void torture_init_end(void);
84bool torture_cleanup_begin(void); 86bool torture_cleanup_begin(void);
85void torture_cleanup_end(void); 87void torture_cleanup_end(void);
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 06639fb6ab85..4609b94142d4 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -43,6 +43,8 @@ struct tpm_class_ops {
43 u8 (*status) (struct tpm_chip *chip); 43 u8 (*status) (struct tpm_chip *chip);
44 bool (*update_timeouts)(struct tpm_chip *chip, 44 bool (*update_timeouts)(struct tpm_chip *chip,
45 unsigned long *timeout_cap); 45 unsigned long *timeout_cap);
46 int (*go_idle)(struct tpm_chip *chip);
47 int (*cmd_ready)(struct tpm_chip *chip);
46 int (*request_locality)(struct tpm_chip *chip, int loc); 48 int (*request_locality)(struct tpm_chip *chip, int loc);
47 int (*relinquish_locality)(struct tpm_chip *chip, int loc); 49 int (*relinquish_locality)(struct tpm_chip *chip, int loc);
48 void (*clk_enable)(struct tpm_chip *chip, bool value); 50 void (*clk_enable)(struct tpm_chip *chip, bool value);
@@ -61,6 +63,7 @@ extern int tpm_seal_trusted(struct tpm_chip *chip,
61extern int tpm_unseal_trusted(struct tpm_chip *chip, 63extern int tpm_unseal_trusted(struct tpm_chip *chip,
62 struct trusted_key_payload *payload, 64 struct trusted_key_payload *payload,
63 struct trusted_key_options *options); 65 struct trusted_key_options *options);
66extern struct tpm_chip *tpm_default_chip(void);
64#else 67#else
65static inline int tpm_is_tpm2(struct tpm_chip *chip) 68static inline int tpm_is_tpm2(struct tpm_chip *chip)
66{ 69{
@@ -96,5 +99,9 @@ static inline int tpm_unseal_trusted(struct tpm_chip *chip,
96{ 99{
97 return -ENODEV; 100 return -ENODEV;
98} 101}
102static inline struct tpm_chip *tpm_default_chip(void)
103{
104 return NULL;
105}
99#endif 106#endif
100#endif 107#endif
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 4a8841963c2e..05589a3e37f4 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -51,6 +51,7 @@
51#include <linux/security.h> 51#include <linux/security.h>
52#include <linux/task_work.h> 52#include <linux/task_work.h>
53#include <linux/memcontrol.h> 53#include <linux/memcontrol.h>
54#include <linux/blk-cgroup.h>
54struct linux_binprm; 55struct linux_binprm;
55 56
56/* 57/*
@@ -192,6 +193,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
192 task_work_run(); 193 task_work_run();
193 194
194 mem_cgroup_handle_over_high(); 195 mem_cgroup_handle_over_high();
196 blkcg_maybe_throttle_current();
195} 197}
196 198
197#endif /* <linux/tracehook.h> */ 199#endif /* <linux/tracehook.h> */
diff --git a/include/linux/udp.h b/include/linux/udp.h
index ca840345571b..320d49d85484 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -74,8 +74,8 @@ struct udp_sock {
74 void (*encap_destroy)(struct sock *sk); 74 void (*encap_destroy)(struct sock *sk);
75 75
76 /* GRO functions for UDP socket */ 76 /* GRO functions for UDP socket */
77 struct sk_buff ** (*gro_receive)(struct sock *sk, 77 struct sk_buff * (*gro_receive)(struct sock *sk,
78 struct sk_buff **head, 78 struct list_head *head,
79 struct sk_buff *skb); 79 struct sk_buff *skb);
80 int (*gro_complete)(struct sock *sk, 80 int (*gro_complete)(struct sock *sk,
81 struct sk_buff *skb, 81 struct sk_buff *skb,
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 6c5f2074e14f..6f8b68cd460f 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -75,7 +75,7 @@ struct uio_device {
75 struct fasync_struct *async_queue; 75 struct fasync_struct *async_queue;
76 wait_queue_head_t wait; 76 wait_queue_head_t wait;
77 struct uio_info *info; 77 struct uio_info *info;
78 spinlock_t info_lock; 78 struct mutex info_lock;
79 struct kobject *map_dir; 79 struct kobject *map_dir;
80 struct kobject *portio_dir; 80 struct kobject *portio_dir;
81}; 81};
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index a710e28b5215..6b708434b7f9 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -387,6 +387,12 @@ struct uac3_interrupt_data_msg {
387#define UAC3_CONNECTORS 0x0f 387#define UAC3_CONNECTORS 0x0f
388#define UAC3_POWER_DOMAIN 0x10 388#define UAC3_POWER_DOMAIN 0x10
389 389
390/* A.20 PROCESSING UNIT PROCESS TYPES */
391#define UAC3_PROCESS_UNDEFINED 0x00
392#define UAC3_PROCESS_UP_DOWNMIX 0x01
393#define UAC3_PROCESS_STEREO_EXTENDER 0x02
394#define UAC3_PROCESS_MULTI_FUNCTION 0x03
395
390/* A.22 AUDIO CLASS-SPECIFIC REQUEST CODES */ 396/* A.22 AUDIO CLASS-SPECIFIC REQUEST CODES */
391/* see audio-v2.h for the rest, which is identical to v2 */ 397/* see audio-v2.h for the rest, which is identical to v2 */
392#define UAC3_CS_REQ_INTEN 0x04 398#define UAC3_CS_REQ_INTEN 0x04
@@ -406,6 +412,15 @@ struct uac3_interrupt_data_msg {
406#define UAC3_TE_OVERFLOW 0x04 412#define UAC3_TE_OVERFLOW 0x04
407#define UAC3_TE_LATENCY 0x05 413#define UAC3_TE_LATENCY 0x05
408 414
415/* A.23.10 PROCESSING UNITS CONTROL SELECTROS */
416
417/* Up/Down Mixer */
418#define UAC3_UD_MODE_SELECT 0x01
419
420/* Stereo Extender */
421#define UAC3_EXT_WIDTH_CONTROL 0x01
422
423
409/* BADD predefined Unit/Terminal values */ 424/* BADD predefined Unit/Terminal values */
410#define UAC3_BADD_IT_ID1 1 /* Input Terminal ID1: bTerminalID = 1 */ 425#define UAC3_BADD_IT_ID1 1 /* Input Terminal ID1: bTerminalID = 1 */
411#define UAC3_BADD_FU_ID2 2 /* Feature Unit ID2: bUnitID = 2 */ 426#define UAC3_BADD_FU_ID2 2 /* Feature Unit ID2: bUnitID = 2 */
@@ -432,4 +447,8 @@ struct uac3_interrupt_data_msg {
432/* BADD sample rate is always fixed to 48kHz */ 447/* BADD sample rate is always fixed to 48kHz */
433#define UAC3_BADD_SAMPLING_RATE 48000 448#define UAC3_BADD_SAMPLING_RATE 48000
434 449
450/* BADD power domains recovery times in 50us increments */
451#define UAC3_BADD_PD_RECOVER_D1D0 0x0258 /* 30ms */
452#define UAC3_BADD_PD_RECOVER_D2D0 0x1770 /* 300ms */
453
435#endif /* __LINUX_USB_AUDIO_V3_H */ 454#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 77f0f0af3a71..a34539b7f750 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -84,8 +84,8 @@ enum vga_switcheroo_state {
84 * Client identifier. Audio clients use the same identifier & 0x100. 84 * Client identifier. Audio clients use the same identifier & 0x100.
85 */ 85 */
86enum vga_switcheroo_client_id { 86enum vga_switcheroo_client_id {
87 VGA_SWITCHEROO_UNKNOWN_ID = -1, 87 VGA_SWITCHEROO_UNKNOWN_ID = 0x1000,
88 VGA_SWITCHEROO_IGD, 88 VGA_SWITCHEROO_IGD = 0,
89 VGA_SWITCHEROO_DIS, 89 VGA_SWITCHEROO_DIS,
90 VGA_SWITCHEROO_MAX_CLIENTS, 90 VGA_SWITCHEROO_MAX_CLIENTS,
91}; 91};
@@ -151,7 +151,7 @@ int vga_switcheroo_register_client(struct pci_dev *dev,
151 bool driver_power_control); 151 bool driver_power_control);
152int vga_switcheroo_register_audio_client(struct pci_dev *pdev, 152int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
153 const struct vga_switcheroo_client_ops *ops, 153 const struct vga_switcheroo_client_ops *ops,
154 enum vga_switcheroo_client_id id); 154 struct pci_dev *vga_dev);
155 155
156void vga_switcheroo_client_fb_set(struct pci_dev *dev, 156void vga_switcheroo_client_fb_set(struct pci_dev *dev,
157 struct fb_info *info); 157 struct fb_info *info);
@@ -180,7 +180,7 @@ static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_ha
180 enum vga_switcheroo_handler_flags_t handler_flags) { return 0; } 180 enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
181static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, 181static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
182 const struct vga_switcheroo_client_ops *ops, 182 const struct vga_switcheroo_client_ops *ops,
183 enum vga_switcheroo_client_id id) { return 0; } 183 struct pci_dev *vga_dev) { return 0; }
184static inline void vga_switcheroo_unregister_handler(void) {} 184static inline void vga_switcheroo_unregister_handler(void) {}
185static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; } 185static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
186static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; } 186static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 5559a2d31c46..32baf8e26735 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -79,7 +79,8 @@ struct virtio_config_ops {
79 u64 (*get_features)(struct virtio_device *vdev); 79 u64 (*get_features)(struct virtio_device *vdev);
80 int (*finalize_features)(struct virtio_device *vdev); 80 int (*finalize_features)(struct virtio_device *vdev);
81 const char *(*bus_name)(struct virtio_device *vdev); 81 const char *(*bus_name)(struct virtio_device *vdev);
82 int (*set_vq_affinity)(struct virtqueue *vq, int cpu); 82 int (*set_vq_affinity)(struct virtqueue *vq,
83 const struct cpumask *cpu_mask);
83 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev, 84 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
84 int index); 85 int index);
85}; 86};
@@ -236,11 +237,11 @@ const char *virtio_bus_name(struct virtio_device *vdev)
236 * 237 *
237 */ 238 */
238static inline 239static inline
239int virtqueue_set_affinity(struct virtqueue *vq, int cpu) 240int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
240{ 241{
241 struct virtio_device *vdev = vq->vdev; 242 struct virtio_device *vdev = vq->vdev;
242 if (vdev->config->set_vq_affinity) 243 if (vdev->config->set_vq_affinity)
243 return vdev->config->set_vq_affinity(vq, cpu); 244 return vdev->config->set_vq_affinity(vq, cpu_mask);
244 return 0; 245 return 0;
245} 246}
246 247
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 39fda195bf78..3af7c0e03be5 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -6,8 +6,10 @@
6 * 6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * 8 *
9 * Wound/wait implementation: 9 * Wait/Die implementation:
10 * Copyright (C) 2013 Canonical Ltd. 10 * Copyright (C) 2013 Canonical Ltd.
11 * Choice of algorithm:
12 * Copyright (C) 2018 WMWare Inc.
11 * 13 *
12 * This file contains the main data structure and API definitions. 14 * This file contains the main data structure and API definitions.
13 */ 15 */
@@ -23,14 +25,17 @@ struct ww_class {
23 struct lock_class_key mutex_key; 25 struct lock_class_key mutex_key;
24 const char *acquire_name; 26 const char *acquire_name;
25 const char *mutex_name; 27 const char *mutex_name;
28 unsigned int is_wait_die;
26}; 29};
27 30
28struct ww_acquire_ctx { 31struct ww_acquire_ctx {
29 struct task_struct *task; 32 struct task_struct *task;
30 unsigned long stamp; 33 unsigned long stamp;
31 unsigned acquired; 34 unsigned int acquired;
35 unsigned short wounded;
36 unsigned short is_wait_die;
32#ifdef CONFIG_DEBUG_MUTEXES 37#ifdef CONFIG_DEBUG_MUTEXES
33 unsigned done_acquire; 38 unsigned int done_acquire;
34 struct ww_class *ww_class; 39 struct ww_class *ww_class;
35 struct ww_mutex *contending_lock; 40 struct ww_mutex *contending_lock;
36#endif 41#endif
@@ -38,8 +43,8 @@ struct ww_acquire_ctx {
38 struct lockdep_map dep_map; 43 struct lockdep_map dep_map;
39#endif 44#endif
40#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 45#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
41 unsigned deadlock_inject_interval; 46 unsigned int deadlock_inject_interval;
42 unsigned deadlock_inject_countdown; 47 unsigned int deadlock_inject_countdown;
43#endif 48#endif
44}; 49};
45 50
@@ -58,17 +63,21 @@ struct ww_mutex {
58# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) 63# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
59#endif 64#endif
60 65
61#define __WW_CLASS_INITIALIZER(ww_class) \ 66#define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \
62 { .stamp = ATOMIC_LONG_INIT(0) \ 67 { .stamp = ATOMIC_LONG_INIT(0) \
63 , .acquire_name = #ww_class "_acquire" \ 68 , .acquire_name = #ww_class "_acquire" \
64 , .mutex_name = #ww_class "_mutex" } 69 , .mutex_name = #ww_class "_mutex" \
70 , .is_wait_die = _is_wait_die }
65 71
66#define __WW_MUTEX_INITIALIZER(lockname, class) \ 72#define __WW_MUTEX_INITIALIZER(lockname, class) \
67 { .base = __MUTEX_INITIALIZER(lockname.base) \ 73 { .base = __MUTEX_INITIALIZER(lockname.base) \
68 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } 74 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
69 75
76#define DEFINE_WD_CLASS(classname) \
77 struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
78
70#define DEFINE_WW_CLASS(classname) \ 79#define DEFINE_WW_CLASS(classname) \
71 struct ww_class classname = __WW_CLASS_INITIALIZER(classname) 80 struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
72 81
73#define DEFINE_WW_MUTEX(mutexname, ww_class) \ 82#define DEFINE_WW_MUTEX(mutexname, ww_class) \
74 struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) 83 struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
@@ -102,7 +111,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock,
102 * 111 *
103 * Context-based w/w mutex acquiring can be done in any order whatsoever within 112 * Context-based w/w mutex acquiring can be done in any order whatsoever within
104 * a given lock class. Deadlocks will be detected and handled with the 113 * a given lock class. Deadlocks will be detected and handled with the
105 * wait/wound logic. 114 * wait/die logic.
106 * 115 *
107 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can 116 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
108 * result in undetected deadlocks and is so forbidden. Mixing different contexts 117 * result in undetected deadlocks and is so forbidden. Mixing different contexts
@@ -123,6 +132,8 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
123 ctx->task = current; 132 ctx->task = current;
124 ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp); 133 ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
125 ctx->acquired = 0; 134 ctx->acquired = 0;
135 ctx->wounded = false;
136 ctx->is_wait_die = ww_class->is_wait_die;
126#ifdef CONFIG_DEBUG_MUTEXES 137#ifdef CONFIG_DEBUG_MUTEXES
127 ctx->ww_class = ww_class; 138 ctx->ww_class = ww_class;
128 ctx->done_acquire = 0; 139 ctx->done_acquire = 0;
@@ -195,13 +206,13 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
195 * Lock the w/w mutex exclusively for this task. 206 * Lock the w/w mutex exclusively for this task.
196 * 207 *
197 * Deadlocks within a given w/w class of locks are detected and handled with the 208 * Deadlocks within a given w/w class of locks are detected and handled with the
198 * wait/wound algorithm. If the lock isn't immediately avaiable this function 209 * wait/die algorithm. If the lock isn't immediately available this function
199 * will either sleep until it is (wait case). Or it selects the current context 210 * will either sleep until it is (wait case). Or it selects the current context
200 * for backing off by returning -EDEADLK (wound case). Trying to acquire the 211 * for backing off by returning -EDEADLK (die case). Trying to acquire the
201 * same lock with the same context twice is also detected and signalled by 212 * same lock with the same context twice is also detected and signalled by
202 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. 213 * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
203 * 214 *
204 * In the wound case the caller must release all currently held w/w mutexes for 215 * In the die case the caller must release all currently held w/w mutexes for
205 * the given context and then wait for this contending lock to be available by 216 * the given context and then wait for this contending lock to be available by
206 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this 217 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
207 * lock and proceed with trying to acquire further w/w mutexes (e.g. when 218 * lock and proceed with trying to acquire further w/w mutexes (e.g. when
@@ -226,14 +237,14 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq
226 * Lock the w/w mutex exclusively for this task. 237 * Lock the w/w mutex exclusively for this task.
227 * 238 *
228 * Deadlocks within a given w/w class of locks are detected and handled with the 239 * Deadlocks within a given w/w class of locks are detected and handled with the
229 * wait/wound algorithm. If the lock isn't immediately avaiable this function 240 * wait/die algorithm. If the lock isn't immediately available this function
230 * will either sleep until it is (wait case). Or it selects the current context 241 * will either sleep until it is (wait case). Or it selects the current context
231 * for backing off by returning -EDEADLK (wound case). Trying to acquire the 242 * for backing off by returning -EDEADLK (die case). Trying to acquire the
232 * same lock with the same context twice is also detected and signalled by 243 * same lock with the same context twice is also detected and signalled by
233 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a 244 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
234 * signal arrives while waiting for the lock then this function returns -EINTR. 245 * signal arrives while waiting for the lock then this function returns -EINTR.
235 * 246 *
236 * In the wound case the caller must release all currently held w/w mutexes for 247 * In the die case the caller must release all currently held w/w mutexes for
237 * the given context and then wait for this contending lock to be available by 248 * the given context and then wait for this contending lock to be available by
238 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to 249 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
239 * not acquire this lock and proceed with trying to acquire further w/w mutexes 250 * not acquire this lock and proceed with trying to acquire further w/w mutexes
@@ -256,7 +267,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
256 * @lock: the mutex to be acquired 267 * @lock: the mutex to be acquired
257 * @ctx: w/w acquire context 268 * @ctx: w/w acquire context
258 * 269 *
259 * Acquires a w/w mutex with the given context after a wound case. This function 270 * Acquires a w/w mutex with the given context after a die case. This function
260 * will sleep until the lock becomes available. 271 * will sleep until the lock becomes available.
261 * 272 *
262 * The caller must have released all w/w mutexes already acquired with the 273 * The caller must have released all w/w mutexes already acquired with the
@@ -290,7 +301,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
290 * @lock: the mutex to be acquired 301 * @lock: the mutex to be acquired
291 * @ctx: w/w acquire context 302 * @ctx: w/w acquire context
292 * 303 *
293 * Acquires a w/w mutex with the given context after a wound case. This function 304 * Acquires a w/w mutex with the given context after a die case. This function
294 * will sleep until the lock becomes available and returns 0 when the lock has 305 * will sleep until the lock becomes available and returns 0 when the lock has
295 * been acquired. If a signal arrives while waiting for the lock then this 306 * been acquired. If a signal arrives while waiting for the lock then this
296 * function returns -EINTR. 307 * function returns -EINTR.
diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h
index ed16c6dde0ba..604e79cb6cbf 100644
--- a/include/media/cec-pin.h
+++ b/include/media/cec-pin.h
@@ -25,6 +25,9 @@
25 * @read_hpd: read the HPD pin. Return true if high, false if low or 25 * @read_hpd: read the HPD pin. Return true if high, false if low or
26 * an error if negative. If NULL or -ENOTTY is returned, 26 * an error if negative. If NULL or -ENOTTY is returned,
27 * then this is not supported. 27 * then this is not supported.
28 * @read_5v: read the 5V pin. Return true if high, false if low or
29 * an error if negative. If NULL or -ENOTTY is returned,
30 * then this is not supported.
28 * 31 *
29 * These operations are used by the cec pin framework to manipulate 32 * These operations are used by the cec pin framework to manipulate
30 * the CEC pin. 33 * the CEC pin.
@@ -38,6 +41,7 @@ struct cec_pin_ops {
38 void (*free)(struct cec_adapter *adap); 41 void (*free)(struct cec_adapter *adap);
39 void (*status)(struct cec_adapter *adap, struct seq_file *file); 42 void (*status)(struct cec_adapter *adap, struct seq_file *file);
40 int (*read_hpd)(struct cec_adapter *adap); 43 int (*read_hpd)(struct cec_adapter *adap);
44 int (*read_5v)(struct cec_adapter *adap);
41}; 45};
42 46
43/** 47/**
diff --git a/include/media/cec.h b/include/media/cec.h
index 580ab1042898..ff9847f7f99d 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -79,7 +79,7 @@ struct cec_event_entry {
79}; 79};
80 80
81#define CEC_NUM_CORE_EVENTS 2 81#define CEC_NUM_CORE_EVENTS 2
82#define CEC_NUM_EVENTS CEC_EVENT_PIN_HPD_HIGH 82#define CEC_NUM_EVENTS CEC_EVENT_PIN_5V_HIGH
83 83
84struct cec_fh { 84struct cec_fh {
85 struct list_head list; 85 struct list_head list;
@@ -309,6 +309,16 @@ void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high,
309void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts); 309void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
310 310
311/** 311/**
312 * cec_queue_pin_5v_event() - queue a pin event with a given timestamp.
313 *
314 * @adap: pointer to the cec adapter
315 * @is_high: when true the 5V pin is high, otherwise it is low
316 * @ts: the timestamp for this event
317 *
318 */
319void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
320
321/**
312 * cec_get_edid_phys_addr() - find and return the physical address 322 * cec_get_edid_phys_addr() - find and return the physical address
313 * 323 *
314 * @edid: pointer to the EDID data 324 * @edid: pointer to the EDID data
diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h
index 331c8269c00e..6f7a85ab3541 100644
--- a/include/media/dvb_frontend.h
+++ b/include/media/dvb_frontend.h
@@ -52,6 +52,10 @@
52 */ 52 */
53#define MAX_DELSYS 8 53#define MAX_DELSYS 8
54 54
55/* Helper definitions to be used at frontend drivers */
56#define kHz 1000UL
57#define MHz 1000000UL
58
55/** 59/**
56 * struct dvb_frontend_tune_settings - parameters to adjust frontend tuning 60 * struct dvb_frontend_tune_settings - parameters to adjust frontend tuning
57 * 61 *
@@ -73,22 +77,19 @@ struct dvb_frontend;
73 * struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths 77 * struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths
74 * 78 *
75 * @name: name of the Frontend 79 * @name: name of the Frontend
76 * @frequency_min: minimal frequency supported 80 * @frequency_min_hz: minimal frequency supported in Hz
77 * @frequency_max: maximum frequency supported 81 * @frequency_max_hz: maximum frequency supported in Hz
78 * @frequency_step: frequency step 82 * @frequency_step_hz: frequency step in Hz
79 * @bandwidth_min: minimal frontend bandwidth supported 83 * @bandwidth_min: minimal frontend bandwidth supported
80 * @bandwidth_max: maximum frontend bandwidth supported 84 * @bandwidth_max: maximum frontend bandwidth supported
81 * @bandwidth_step: frontend bandwidth step 85 * @bandwidth_step: frontend bandwidth step
82 *
83 * NOTE: frequency parameters are in Hz, for terrestrial/cable or kHz for
84 * satellite.
85 */ 86 */
86struct dvb_tuner_info { 87struct dvb_tuner_info {
87 char name[128]; 88 char name[128];
88 89
89 u32 frequency_min; 90 u32 frequency_min_hz;
90 u32 frequency_max; 91 u32 frequency_max_hz;
91 u32 frequency_step; 92 u32 frequency_step_hz;
92 93
93 u32 bandwidth_min; 94 u32 bandwidth_min;
94 u32 bandwidth_max; 95 u32 bandwidth_max;
@@ -316,6 +317,34 @@ struct analog_demod_ops {
316 317
317struct dtv_frontend_properties; 318struct dtv_frontend_properties;
318 319
320/**
321 * struct dvb_frontend_internal_info - Frontend properties and capabilities
322 *
323 * @name: Name of the frontend
324 * @frequency_min_hz: Minimal frequency supported by the frontend.
325 * @frequency_max_hz: Minimal frequency supported by the frontend.
326 * @frequency_stepsize_hz: All frequencies are multiple of this value.
327 * @frequency_tolerance_hz: Frequency tolerance.
328 * @symbol_rate_min: Minimal symbol rate, in bauds
329 * (for Cable/Satellite systems).
330 * @symbol_rate_max: Maximal symbol rate, in bauds
331 * (for Cable/Satellite systems).
332 * @symbol_rate_tolerance: Maximal symbol rate tolerance, in ppm
333 * (for Cable/Satellite systems).
334 * @caps: Capabilities supported by the frontend,
335 * as specified in &enum fe_caps.
336 */
337struct dvb_frontend_internal_info {
338 char name[128];
339 u32 frequency_min_hz;
340 u32 frequency_max_hz;
341 u32 frequency_stepsize_hz;
342 u32 frequency_tolerance_hz;
343 u32 symbol_rate_min;
344 u32 symbol_rate_max;
345 u32 symbol_rate_tolerance;
346 enum fe_caps caps;
347};
319 348
320/** 349/**
321 * struct dvb_frontend_ops - Demodulation information and callbacks for 350 * struct dvb_frontend_ops - Demodulation information and callbacks for
@@ -403,7 +432,7 @@ struct dtv_frontend_properties;
403 * @analog_ops: pointer to &struct analog_demod_ops 432 * @analog_ops: pointer to &struct analog_demod_ops
404 */ 433 */
405struct dvb_frontend_ops { 434struct dvb_frontend_ops {
406 struct dvb_frontend_info info; 435 struct dvb_frontend_internal_info info;
407 436
408 u8 delsys[MAX_DELSYS]; 437 u8 delsys[MAX_DELSYS];
409 438
diff --git a/include/media/i2c/lm3560.h b/include/media/i2c/lm3560.h
index a5bd310c9e1e..0e2b1c751a5d 100644
--- a/include/media/i2c/lm3560.h
+++ b/include/media/i2c/lm3560.h
@@ -22,6 +22,7 @@
22 22
23#include <media/v4l2-subdev.h> 23#include <media/v4l2-subdev.h>
24 24
25#define LM3559_NAME "lm3559"
25#define LM3560_NAME "lm3560" 26#define LM3560_NAME "lm3560"
26#define LM3560_I2C_ADDR (0x53) 27#define LM3560_I2C_ADDR (0x53)
27 28
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 160bca96d524..cdc87ec61e54 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -338,7 +338,7 @@ void v4l_bound_align_image(unsigned int *width, unsigned int wmin,
338 ({ \ 338 ({ \
339 BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \ 339 BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \
340 sizeof((array)->height_field) != sizeof(u32)); \ 340 sizeof((array)->height_field) != sizeof(u32)); \
341 (typeof(&(*(array))))__v4l2_find_nearest_size( \ 341 (typeof(&(array)[0]))__v4l2_find_nearest_size( \
342 (array), array_size, sizeof(*(array)), \ 342 (array), array_size, sizeof(*(array)), \
343 offsetof(typeof(*(array)), width_field), \ 343 offsetof(typeof(*(array)), width_field), \
344 offsetof(typeof(*(array)), height_field), \ 344 offsetof(typeof(*(array)), height_field), \
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 5b445b5654f7..f615ba1b29dd 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -181,10 +181,10 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
181 * not freed when the control is deleted. Should this be needed 181 * not freed when the control is deleted. Should this be needed
182 * then a new internal bitfield can be added to tell the framework 182 * then a new internal bitfield can be added to tell the framework
183 * to free this pointer. 183 * to free this pointer.
184 * @p_cur: The control's current value represented via a union with 184 * @p_cur: The control's current value represented via a union which
185 * provides a standard way of accessing control types 185 * provides a standard way of accessing control types
186 * through a pointer. 186 * through a pointer.
187 * @p_new: The control's new value represented via a union with provides 187 * @p_new: The control's new value represented via a union which provides
188 * a standard way of accessing control types 188 * a standard way of accessing control types
189 * through a pointer. 189 * through a pointer.
190 */ 190 */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index a8dbf5b54b5c..5848d92c30da 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -621,7 +621,7 @@ const char *v4l2_norm_to_name(v4l2_std_id id);
621 * v4l2_video_std_frame_period - Ancillary routine that fills a 621 * v4l2_video_std_frame_period - Ancillary routine that fills a
622 * struct &v4l2_fract pointer with the default framerate fraction. 622 * struct &v4l2_fract pointer with the default framerate fraction.
623 * 623 *
624 * @id: analog TV sdandard ID. 624 * @id: analog TV standard ID.
625 * @frameperiod: struct &v4l2_fract pointer to be filled 625 * @frameperiod: struct &v4l2_fract pointer to be filled
626 * 626 *
627 */ 627 */
@@ -632,7 +632,7 @@ void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod);
632 * a &v4l2_standard structure according to the @id parameter. 632 * a &v4l2_standard structure according to the @id parameter.
633 * 633 *
634 * @vs: struct &v4l2_standard pointer to be filled 634 * @vs: struct &v4l2_standard pointer to be filled
635 * @id: analog TV sdandard ID. 635 * @id: analog TV standard ID.
636 * @name: name of the standard to be used 636 * @name: name of the standard to be used
637 * 637 *
638 * .. note:: 638 * .. note::
@@ -643,6 +643,17 @@ int v4l2_video_std_construct(struct v4l2_standard *vs,
643 int id, const char *name); 643 int id, const char *name);
644 644
645/** 645/**
646 * v4l_video_std_enumstd - Ancillary routine that fills in the fields of
647 * a &v4l2_standard structure according to the @id and @vs->index
648 * parameters.
649 *
650 * @vs: struct &v4l2_standard pointer to be filled.
651 * @id: analog TV standard ID.
652 *
653 */
654int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id);
655
656/**
646 * v4l_printk_ioctl - Ancillary routine that prints the ioctl in a 657 * v4l_printk_ioctl - Ancillary routine that prints the ioctl in a
647 * human-readable format. 658 * human-readable format.
648 * 659 *
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 4d8626c468bc..4bbb5f3d2b02 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -45,6 +45,8 @@
45/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */ 45/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */
46#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(12) 46#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(12)
47#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(13) 47#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(13)
48#define V4L2_MBUS_DATA_ENABLE_HIGH BIT(14)
49#define V4L2_MBUS_DATA_ENABLE_LOW BIT(15)
48 50
49/* Serial flags */ 51/* Serial flags */
50/* How many lanes the client can use */ 52/* How many lanes the client can use */
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 3d07ba3a8262..d655720e16a1 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -32,7 +32,7 @@
32 * assumed that one source and one destination buffer are all 32 * assumed that one source and one destination buffer are all
33 * that is required for the driver to perform one full transaction. 33 * that is required for the driver to perform one full transaction.
34 * This method may not sleep. 34 * This method may not sleep.
35 * @job_abort: required. Informs the driver that it has to abort the currently 35 * @job_abort: optional. Informs the driver that it has to abort the currently
36 * running transaction as soon as possible (i.e. as soon as it can 36 * running transaction as soon as possible (i.e. as soon as it can
37 * stop the device safely; e.g. in the next interrupt handler), 37 * stop the device safely; e.g. in the next interrupt handler),
38 * even if the transaction would not have been finished by then. 38 * even if the transaction would not have been finished by then.
@@ -40,19 +40,14 @@
40 * v4l2_m2m_job_finish() (as if the transaction ended normally). 40 * v4l2_m2m_job_finish() (as if the transaction ended normally).
41 * This function does not have to (and will usually not) wait 41 * This function does not have to (and will usually not) wait
42 * until the device enters a state when it can be stopped. 42 * until the device enters a state when it can be stopped.
43 * @lock: optional. Define a driver's own lock callback, instead of using
44 * &v4l2_m2m_ctx->q_lock.
45 * @unlock: optional. Define a driver's own unlock callback, instead of
46 * using &v4l2_m2m_ctx->q_lock.
47 */ 43 */
48struct v4l2_m2m_ops { 44struct v4l2_m2m_ops {
49 void (*device_run)(void *priv); 45 void (*device_run)(void *priv);
50 int (*job_ready)(void *priv); 46 int (*job_ready)(void *priv);
51 void (*job_abort)(void *priv); 47 void (*job_abort)(void *priv);
52 void (*lock)(void *priv);
53 void (*unlock)(void *priv);
54}; 48};
55 49
50struct video_device;
56struct v4l2_m2m_dev; 51struct v4l2_m2m_dev;
57 52
58/** 53/**
@@ -328,6 +323,24 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
328 */ 323 */
329struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); 324struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
330 325
326#if defined(CONFIG_MEDIA_CONTROLLER)
327void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
328int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
329 struct video_device *vdev, int function);
330#else
331static inline void
332v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
333{
334}
335
336static inline int
337v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
338 struct video_device *vdev, int function)
339{
340 return 0;
341}
342#endif
343
331/** 344/**
332 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure 345 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
333 * 346 *
@@ -437,6 +450,35 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
437} 450}
438 451
439/** 452/**
453 * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
454 *
455 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
456 */
457void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
458
459/**
460 * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
461 * ready buffers
462 *
463 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
464 */
465static inline void *v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
466{
467 return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
468}
469
470/**
471 * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
472 * ready buffers
473 *
474 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
475 */
476static inline void *v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
477{
478 return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
479}
480
481/**
440 * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready 482 * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
441 * buffers 483 * buffers
442 * 484 *
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 678c24de1ac6..3093b9cb9067 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -25,6 +25,7 @@ int vsp1_du_init(struct device *dev);
25 * struct vsp1_du_lif_config - VSP LIF configuration 25 * struct vsp1_du_lif_config - VSP LIF configuration
26 * @width: output frame width 26 * @width: output frame width
27 * @height: output frame height 27 * @height: output frame height
28 * @interlaced: true for interlaced pipelines
28 * @callback: frame completion callback function (optional). When a callback 29 * @callback: frame completion callback function (optional). When a callback
29 * is provided, the VSP driver guarantees that it will be called once 30 * is provided, the VSP driver guarantees that it will be called once
30 * and only once for each vsp1_du_atomic_flush() call. 31 * and only once for each vsp1_du_atomic_flush() call.
@@ -33,6 +34,7 @@ int vsp1_du_init(struct device *dev);
33struct vsp1_du_lif_config { 34struct vsp1_du_lif_config {
34 unsigned int width; 35 unsigned int width;
35 unsigned int height; 36 unsigned int height;
37 bool interlaced;
36 38
37 void (*callback)(void *data, bool completed, u32 crc); 39 void (*callback)(void *data, bool completed, u32 crc);
38 void *callback_data; 40 void *callback_data;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9e59ebfded62..1ad5b19e83a9 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -6,6 +6,7 @@
6 * Public action API for classifiers/qdiscs 6 * Public action API for classifiers/qdiscs
7*/ 7*/
8 8
9#include <linux/refcount.h>
9#include <net/sch_generic.h> 10#include <net/sch_generic.h>
10#include <net/pkt_sched.h> 11#include <net/pkt_sched.h>
11#include <net/net_namespace.h> 12#include <net/net_namespace.h>
@@ -26,8 +27,8 @@ struct tc_action {
26 struct tcf_idrinfo *idrinfo; 27 struct tcf_idrinfo *idrinfo;
27 28
28 u32 tcfa_index; 29 u32 tcfa_index;
29 int tcfa_refcnt; 30 refcount_t tcfa_refcnt;
30 int tcfa_bindcnt; 31 atomic_t tcfa_bindcnt;
31 u32 tcfa_capab; 32 u32 tcfa_capab;
32 int tcfa_action; 33 int tcfa_action;
33 struct tcf_t tcfa_tm; 34 struct tcf_t tcfa_tm;
@@ -37,7 +38,7 @@ struct tc_action {
37 spinlock_t tcfa_lock; 38 spinlock_t tcfa_lock;
38 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 39 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
39 struct gnet_stats_queue __percpu *cpu_qstats; 40 struct gnet_stats_queue __percpu *cpu_qstats;
40 struct tc_cookie *act_cookie; 41 struct tc_cookie __rcu *act_cookie;
41 struct tcf_chain *goto_chain; 42 struct tcf_chain *goto_chain;
42}; 43};
43#define tcf_index common.tcfa_index 44#define tcf_index common.tcfa_index
@@ -84,14 +85,15 @@ struct tc_action_ops {
84 size_t size; 85 size_t size;
85 struct module *owner; 86 struct module *owner;
86 int (*act)(struct sk_buff *, const struct tc_action *, 87 int (*act)(struct sk_buff *, const struct tc_action *,
87 struct tcf_result *); 88 struct tcf_result *); /* called under RCU BH lock*/
88 int (*dump)(struct sk_buff *, struct tc_action *, int, int); 89 int (*dump)(struct sk_buff *, struct tc_action *, int, int);
89 void (*cleanup)(struct tc_action *); 90 void (*cleanup)(struct tc_action *);
90 int (*lookup)(struct net *net, struct tc_action **a, u32 index, 91 int (*lookup)(struct net *net, struct tc_action **a, u32 index,
91 struct netlink_ext_ack *extack); 92 struct netlink_ext_ack *extack);
92 int (*init)(struct net *net, struct nlattr *nla, 93 int (*init)(struct net *net, struct nlattr *nla,
93 struct nlattr *est, struct tc_action **act, int ovr, 94 struct nlattr *est, struct tc_action **act, int ovr,
94 int bind, struct netlink_ext_ack *extack); 95 int bind, bool rtnl_held,
96 struct netlink_ext_ack *extack);
95 int (*walk)(struct net *, struct sk_buff *, 97 int (*walk)(struct net *, struct sk_buff *,
96 struct netlink_callback *, int, 98 struct netlink_callback *, int,
97 const struct tc_action_ops *, 99 const struct tc_action_ops *,
@@ -99,6 +101,8 @@ struct tc_action_ops {
99 void (*stats_update)(struct tc_action *, u64, u32, u64); 101 void (*stats_update)(struct tc_action *, u64, u32, u64);
100 size_t (*get_fill_size)(const struct tc_action *act); 102 size_t (*get_fill_size)(const struct tc_action *act);
101 struct net_device *(*get_dev)(const struct tc_action *a); 103 struct net_device *(*get_dev)(const struct tc_action *a);
104 void (*put_dev)(struct net_device *dev);
105 int (*delete)(struct net *net, u32 index);
102}; 106};
103 107
104struct tc_action_net { 108struct tc_action_net {
@@ -151,6 +155,10 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
151 int bind, bool cpustats); 155 int bind, bool cpustats);
152void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a); 156void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
153 157
158void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
159int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
160 struct tc_action **a, int bind);
161int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
154int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); 162int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
155 163
156static inline int tcf_idr_release(struct tc_action *a, bool bind) 164static inline int tcf_idr_release(struct tc_action *a, bool bind)
@@ -161,18 +169,20 @@ static inline int tcf_idr_release(struct tc_action *a, bool bind)
161int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops); 169int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
162int tcf_unregister_action(struct tc_action_ops *a, 170int tcf_unregister_action(struct tc_action_ops *a,
163 struct pernet_operations *ops); 171 struct pernet_operations *ops);
164int tcf_action_destroy(struct list_head *actions, int bind); 172int tcf_action_destroy(struct tc_action *actions[], int bind);
165int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, 173int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
166 int nr_actions, struct tcf_result *res); 174 int nr_actions, struct tcf_result *res);
167int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, 175int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
168 struct nlattr *est, char *name, int ovr, int bind, 176 struct nlattr *est, char *name, int ovr, int bind,
169 struct list_head *actions, size_t *attr_size, 177 struct tc_action *actions[], size_t *attr_size,
170 struct netlink_ext_ack *extack); 178 bool rtnl_held, struct netlink_ext_ack *extack);
171struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 179struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
172 struct nlattr *nla, struct nlattr *est, 180 struct nlattr *nla, struct nlattr *est,
173 char *name, int ovr, int bind, 181 char *name, int ovr, int bind,
182 bool rtnl_held,
174 struct netlink_ext_ack *extack); 183 struct netlink_ext_ack *extack);
175int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int); 184int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
185 int ref);
176int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); 186int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
177int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); 187int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
178int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); 188int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
@@ -190,9 +200,6 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
190#endif 200#endif
191} 201}
192 202
193typedef int tc_setup_cb_t(enum tc_setup_type type,
194 void *type_data, void *cb_priv);
195
196#ifdef CONFIG_NET_CLS_ACT 203#ifdef CONFIG_NET_CLS_ACT
197int tc_setup_cb_egdev_register(const struct net_device *dev, 204int tc_setup_cb_egdev_register(const struct net_device *dev,
198 tc_setup_cb_t *cb, void *cb_priv); 205 tc_setup_cb_t *cb, void *cb_priv);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 5f43f7a70fe6..6def0351bcc3 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -108,6 +108,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
108 u32 banned_flags); 108 u32 banned_flags);
109bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, 109bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
110 bool match_wildcard); 110 bool match_wildcard);
111bool inet_rcv_saddr_any(const struct sock *sk);
111void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); 112void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
112void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); 113void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
113 114
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
index a5563d27a3eb..8003a9f6eb43 100644
--- a/include/net/af_ieee802154.h
+++ b/include/net/af_ieee802154.h
@@ -56,6 +56,7 @@ struct sockaddr_ieee802154 {
56#define WPAN_WANTACK 0 56#define WPAN_WANTACK 0
57#define WPAN_SECURITY 1 57#define WPAN_SECURITY 1
58#define WPAN_SECURITY_LEVEL 2 58#define WPAN_SECURITY_LEVEL 2
59#define WPAN_WANTLQI 3
59 60
60#define WPAN_SECURITY_DEFAULT 0 61#define WPAN_SECURITY_DEFAULT 0
61#define WPAN_SECURITY_OFF 1 62#define WPAN_SECURITY_OFF 1
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 8ae8ee004258..f53edb3754bc 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -61,7 +61,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
61 struct msghdr *, size_t, 61 struct msghdr *, size_t,
62 rxrpc_notify_end_tx_t); 62 rxrpc_notify_end_tx_t);
63int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *, 63int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
64 void *, size_t, size_t *, bool, u32 *, u16 *); 64 struct iov_iter *, bool, u32 *, u16 *);
65bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, 65bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
66 u32, int, const char *); 66 u32, int, const char *);
67void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); 67void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 9324ac2d9ff2..43913ae79f64 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -64,7 +64,8 @@ struct vsock_sock {
64 struct list_head pending_links; 64 struct list_head pending_links;
65 struct list_head accept_queue; 65 struct list_head accept_queue;
66 bool rejected; 66 bool rejected;
67 struct delayed_work dwork; 67 struct delayed_work connect_work;
68 struct delayed_work pending_work;
68 struct delayed_work close_work; 69 struct delayed_work close_work;
69 bool close_work_scheduled; 70 bool close_work_scheduled;
70 u32 peer_shutdown; 71 u32 peer_shutdown;
@@ -77,7 +78,6 @@ struct vsock_sock {
77 78
78s64 vsock_stream_has_data(struct vsock_sock *vsk); 79s64 vsock_stream_has_data(struct vsock_sock *vsk);
79s64 vsock_stream_has_space(struct vsock_sock *vsk); 80s64 vsock_stream_has_space(struct vsock_sock *vsk);
80void vsock_pending_work(struct work_struct *work);
81struct sock *__vsock_create(struct net *net, 81struct sock *__vsock_create(struct net *net,
82 struct socket *sock, 82 struct socket *sock,
83 struct sock *parent, 83 struct sock *parent,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 53ce8176c313..ec9d6bc65855 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
271 int flags); 271 int flags);
272int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, 272int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
273 size_t len, int flags); 273 size_t len, int flags);
274__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events); 274__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
275int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 275int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
276int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); 276int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
277int bt_sock_wait_ready(struct sock *sk, unsigned long flags); 277int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 1668211297a9..cdd9f1fe7cfa 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -183,6 +183,15 @@ enum {
183 * during the hdev->setup vendor callback. 183 * during the hdev->setup vendor callback.
184 */ 184 */
185 HCI_QUIRK_NON_PERSISTENT_DIAG, 185 HCI_QUIRK_NON_PERSISTENT_DIAG,
186
187 /* When this quirk is set, setup() would be run after every
188 * open() and not just after the first open().
189 *
190 * This quirk can be set before hci_register_dev is called or
191 * during the hdev->setup vendor callback.
192 *
193 */
194 HCI_QUIRK_NON_PERSISTENT_SETUP,
186}; 195};
187 196
188/* HCI device flags */ 197/* HCI device flags */
@@ -260,6 +269,7 @@ enum {
260 HCI_VENDOR_DIAG, 269 HCI_VENDOR_DIAG,
261 HCI_FORCE_BREDR_SMP, 270 HCI_FORCE_BREDR_SMP,
262 HCI_FORCE_STATIC_ADDR, 271 HCI_FORCE_STATIC_ADDR,
272 HCI_LL_RPA_RESOLUTION,
263 273
264 __HCI_NUM_FLAGS, 274 __HCI_NUM_FLAGS,
265}; 275};
@@ -291,6 +301,14 @@ enum {
291#define HCI_DH3 0x0800 301#define HCI_DH3 0x0800
292#define HCI_DH5 0x8000 302#define HCI_DH5 0x8000
293 303
304/* HCI packet types inverted masks */
305#define HCI_2DH1 0x0002
306#define HCI_3DH1 0x0004
307#define HCI_2DH3 0x0100
308#define HCI_3DH3 0x0200
309#define HCI_2DH5 0x1000
310#define HCI_3DH5 0x2000
311
294#define HCI_HV1 0x0020 312#define HCI_HV1 0x0020
295#define HCI_HV2 0x0040 313#define HCI_HV2 0x0040
296#define HCI_HV3 0x0080 314#define HCI_HV3 0x0080
@@ -354,6 +372,8 @@ enum {
354#define LMP_PCONTROL 0x04 372#define LMP_PCONTROL 0x04
355#define LMP_TRANSPARENT 0x08 373#define LMP_TRANSPARENT 0x08
356 374
375#define LMP_EDR_2M 0x02
376#define LMP_EDR_3M 0x04
357#define LMP_RSSI_INQ 0x40 377#define LMP_RSSI_INQ 0x40
358#define LMP_ESCO 0x80 378#define LMP_ESCO 0x80
359 379
@@ -361,7 +381,9 @@ enum {
361#define LMP_EV5 0x02 381#define LMP_EV5 0x02
362#define LMP_NO_BREDR 0x20 382#define LMP_NO_BREDR 0x20
363#define LMP_LE 0x40 383#define LMP_LE 0x40
384#define LMP_EDR_3SLOT 0x80
364 385
386#define LMP_EDR_5SLOT 0x01
365#define LMP_SNIFF_SUBR 0x02 387#define LMP_SNIFF_SUBR 0x02
366#define LMP_PAUSE_ENC 0x04 388#define LMP_PAUSE_ENC 0x04
367#define LMP_EDR_ESCO_2M 0x20 389#define LMP_EDR_ESCO_2M 0x20
@@ -398,7 +420,12 @@ enum {
398#define HCI_LE_SLAVE_FEATURES 0x08 420#define HCI_LE_SLAVE_FEATURES 0x08
399#define HCI_LE_PING 0x10 421#define HCI_LE_PING 0x10
400#define HCI_LE_DATA_LEN_EXT 0x20 422#define HCI_LE_DATA_LEN_EXT 0x20
423#define HCI_LE_PHY_2M 0x01
424#define HCI_LE_PHY_CODED 0x08
425#define HCI_LE_EXT_ADV 0x10
401#define HCI_LE_EXT_SCAN_POLICY 0x80 426#define HCI_LE_EXT_SCAN_POLICY 0x80
427#define HCI_LE_PHY_2M 0x01
428#define HCI_LE_PHY_CODED 0x08
402#define HCI_LE_CHAN_SEL_ALG2 0x40 429#define HCI_LE_CHAN_SEL_ALG2 0x40
403 430
404/* Connection modes */ 431/* Connection modes */
@@ -1490,6 +1517,16 @@ struct hci_cp_le_write_def_data_len {
1490 __le16 tx_time; 1517 __le16 tx_time;
1491} __packed; 1518} __packed;
1492 1519
1520#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029
1521
1522#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a
1523struct hci_rp_le_read_resolv_list_size {
1524 __u8 status;
1525 __u8 size;
1526} __packed;
1527
1528#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d
1529
1493#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f 1530#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
1494struct hci_rp_le_read_max_data_len { 1531struct hci_rp_le_read_max_data_len {
1495 __u8 status; 1532 __u8 status;
@@ -1506,6 +1543,134 @@ struct hci_cp_le_set_default_phy {
1506 __u8 rx_phys; 1543 __u8 rx_phys;
1507} __packed; 1544} __packed;
1508 1545
1546#define HCI_LE_SET_PHY_1M 0x01
1547#define HCI_LE_SET_PHY_2M 0x02
1548#define HCI_LE_SET_PHY_CODED 0x04
1549
1550#define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041
1551struct hci_cp_le_set_ext_scan_params {
1552 __u8 own_addr_type;
1553 __u8 filter_policy;
1554 __u8 scanning_phys;
1555 __u8 data[0];
1556} __packed;
1557
1558#define LE_SCAN_PHY_1M 0x01
1559#define LE_SCAN_PHY_2M 0x02
1560#define LE_SCAN_PHY_CODED 0x04
1561
1562struct hci_cp_le_scan_phy_params {
1563 __u8 type;
1564 __le16 interval;
1565 __le16 window;
1566} __packed;
1567
1568#define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042
1569struct hci_cp_le_set_ext_scan_enable {
1570 __u8 enable;
1571 __u8 filter_dup;
1572 __le16 duration;
1573 __le16 period;
1574} __packed;
1575
1576#define HCI_OP_LE_EXT_CREATE_CONN 0x2043
1577struct hci_cp_le_ext_create_conn {
1578 __u8 filter_policy;
1579 __u8 own_addr_type;
1580 __u8 peer_addr_type;
1581 bdaddr_t peer_addr;
1582 __u8 phys;
1583 __u8 data[0];
1584} __packed;
1585
1586struct hci_cp_le_ext_conn_param {
1587 __le16 scan_interval;
1588 __le16 scan_window;
1589 __le16 conn_interval_min;
1590 __le16 conn_interval_max;
1591 __le16 conn_latency;
1592 __le16 supervision_timeout;
1593 __le16 min_ce_len;
1594 __le16 max_ce_len;
1595} __packed;
1596
1597#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b
1598struct hci_rp_le_read_num_supported_adv_sets {
1599 __u8 status;
1600 __u8 num_of_sets;
1601} __packed;
1602
1603#define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036
1604struct hci_cp_le_set_ext_adv_params {
1605 __u8 handle;
1606 __le16 evt_properties;
1607 __u8 min_interval[3];
1608 __u8 max_interval[3];
1609 __u8 channel_map;
1610 __u8 own_addr_type;
1611 __u8 peer_addr_type;
1612 bdaddr_t peer_addr;
1613 __u8 filter_policy;
1614 __u8 tx_power;
1615 __u8 primary_phy;
1616 __u8 secondary_max_skip;
1617 __u8 secondary_phy;
1618 __u8 sid;
1619 __u8 notif_enable;
1620} __packed;
1621
1622#define HCI_ADV_PHY_1M 0X01
1623#define HCI_ADV_PHY_2M 0x02
1624#define HCI_ADV_PHY_CODED 0x03
1625
1626struct hci_rp_le_set_ext_adv_params {
1627 __u8 status;
1628 __u8 tx_power;
1629} __packed;
1630
1631#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
1632struct hci_cp_le_set_ext_adv_enable {
1633 __u8 enable;
1634 __u8 num_of_sets;
1635 __u8 data[0];
1636} __packed;
1637
1638struct hci_cp_ext_adv_set {
1639 __u8 handle;
1640 __le16 duration;
1641 __u8 max_events;
1642} __packed;
1643
1644#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
1645struct hci_cp_le_set_ext_adv_data {
1646 __u8 handle;
1647 __u8 operation;
1648 __u8 frag_pref;
1649 __u8 length;
1650 __u8 data[HCI_MAX_AD_LENGTH];
1651} __packed;
1652
1653#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
1654struct hci_cp_le_set_ext_scan_rsp_data {
1655 __u8 handle;
1656 __u8 operation;
1657 __u8 frag_pref;
1658 __u8 length;
1659 __u8 data[HCI_MAX_AD_LENGTH];
1660} __packed;
1661
1662#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
1663
1664#define LE_SET_ADV_DATA_NO_FRAG 0x01
1665
1666#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
1667
1668#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
1669struct hci_cp_le_set_adv_set_rand_addr {
1670 __u8 handle;
1671 bdaddr_t bdaddr;
1672} __packed;
1673
1509/* ---- HCI Events ---- */ 1674/* ---- HCI Events ---- */
1510#define HCI_EV_INQUIRY_COMPLETE 0x01 1675#define HCI_EV_INQUIRY_COMPLETE 0x01
1511 1676
@@ -1893,6 +2058,23 @@ struct hci_ev_le_conn_complete {
1893#define LE_ADV_SCAN_IND 0x02 2058#define LE_ADV_SCAN_IND 0x02
1894#define LE_ADV_NONCONN_IND 0x03 2059#define LE_ADV_NONCONN_IND 0x03
1895#define LE_ADV_SCAN_RSP 0x04 2060#define LE_ADV_SCAN_RSP 0x04
2061#define LE_ADV_INVALID 0x05
2062
2063/* Legacy event types in extended adv report */
2064#define LE_LEGACY_ADV_IND 0x0013
2065#define LE_LEGACY_ADV_DIRECT_IND 0x0015
2066#define LE_LEGACY_ADV_SCAN_IND 0x0012
2067#define LE_LEGACY_NONCONN_IND 0x0010
2068#define LE_LEGACY_SCAN_RSP_ADV 0x001b
2069#define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a
2070
2071/* Extended Advertising event types */
2072#define LE_EXT_ADV_NON_CONN_IND 0x0000
2073#define LE_EXT_ADV_CONN_IND 0x0001
2074#define LE_EXT_ADV_SCAN_IND 0x0002
2075#define LE_EXT_ADV_DIRECT_IND 0x0004
2076#define LE_EXT_ADV_SCAN_RSP 0x0008
2077#define LE_EXT_ADV_LEGACY_PDU 0x0010
1896 2078
1897#define ADDR_LE_DEV_PUBLIC 0x00 2079#define ADDR_LE_DEV_PUBLIC 0x00
1898#define ADDR_LE_DEV_RANDOM 0x01 2080#define ADDR_LE_DEV_RANDOM 0x01
@@ -1957,6 +2139,48 @@ struct hci_ev_le_direct_adv_info {
1957 __s8 rssi; 2139 __s8 rssi;
1958} __packed; 2140} __packed;
1959 2141
2142#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
2143struct hci_ev_le_ext_adv_report {
2144 __le16 evt_type;
2145 __u8 bdaddr_type;
2146 bdaddr_t bdaddr;
2147 __u8 primary_phy;
2148 __u8 secondary_phy;
2149 __u8 sid;
2150 __u8 tx_power;
2151 __s8 rssi;
2152 __le16 interval;
2153 __u8 direct_addr_type;
2154 bdaddr_t direct_addr;
2155 __u8 length;
2156 __u8 data[0];
2157} __packed;
2158
2159#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
2160struct hci_ev_le_enh_conn_complete {
2161 __u8 status;
2162 __le16 handle;
2163 __u8 role;
2164 __u8 bdaddr_type;
2165 bdaddr_t bdaddr;
2166 bdaddr_t local_rpa;
2167 bdaddr_t peer_rpa;
2168 __le16 interval;
2169 __le16 latency;
2170 __le16 supervision_timeout;
2171 __u8 clk_accurancy;
2172} __packed;
2173
2174#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
2175struct hci_evt_le_ext_adv_set_term {
2176 __u8 status;
2177 __u8 handle;
2178 __le16 conn_handle;
2179 __u8 num_evts;
2180} __packed;
2181
2182#define HCI_EV_VENDOR 0xff
2183
1960/* Internal events generated by Bluetooth stack */ 2184/* Internal events generated by Bluetooth stack */
1961#define HCI_EV_STACK_INTERNAL 0xfd 2185#define HCI_EV_STACK_INTERNAL 0xfd
1962struct hci_ev_stack_internal { 2186struct hci_ev_stack_internal {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 893bbbb5d2fa..0db1b9b428b7 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -171,6 +171,10 @@ struct adv_info {
171 __u8 adv_data[HCI_MAX_AD_LENGTH]; 171 __u8 adv_data[HCI_MAX_AD_LENGTH];
172 __u16 scan_rsp_len; 172 __u16 scan_rsp_len;
173 __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; 173 __u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
174 __s8 tx_power;
175 bdaddr_t random_addr;
176 bool rpa_expired;
177 struct delayed_work rpa_expired_cb;
174}; 178};
175 179
176#define HCI_MAX_ADV_INSTANCES 5 180#define HCI_MAX_ADV_INSTANCES 5
@@ -221,6 +225,8 @@ struct hci_dev {
221 __u8 features[HCI_MAX_PAGES][8]; 225 __u8 features[HCI_MAX_PAGES][8];
222 __u8 le_features[8]; 226 __u8 le_features[8];
223 __u8 le_white_list_size; 227 __u8 le_white_list_size;
228 __u8 le_resolv_list_size;
229 __u8 le_num_of_adv_sets;
224 __u8 le_states[8]; 230 __u8 le_states[8];
225 __u8 commands[64]; 231 __u8 commands[64];
226 __u8 hci_ver; 232 __u8 hci_ver;
@@ -314,6 +320,9 @@ struct hci_dev {
314 unsigned long sco_last_tx; 320 unsigned long sco_last_tx;
315 unsigned long le_last_tx; 321 unsigned long le_last_tx;
316 322
323 __u8 le_tx_def_phys;
324 __u8 le_rx_def_phys;
325
317 struct workqueue_struct *workqueue; 326 struct workqueue_struct *workqueue;
318 struct workqueue_struct *req_workqueue; 327 struct workqueue_struct *req_workqueue;
319 328
@@ -367,6 +376,7 @@ struct hci_dev {
367 struct list_head identity_resolving_keys; 376 struct list_head identity_resolving_keys;
368 struct list_head remote_oob_data; 377 struct list_head remote_oob_data;
369 struct list_head le_white_list; 378 struct list_head le_white_list;
379 struct list_head le_resolv_list;
370 struct list_head le_conn_params; 380 struct list_head le_conn_params;
371 struct list_head pend_le_conns; 381 struct list_head pend_le_conns;
372 struct list_head pend_le_reports; 382 struct list_head pend_le_reports;
@@ -1106,6 +1116,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1106 u16 scan_rsp_len, u8 *scan_rsp_data, 1116 u16 scan_rsp_len, u8 *scan_rsp_data,
1107 u16 timeout, u16 duration); 1117 u16 timeout, u16 duration);
1108int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); 1118int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
1119void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
1109 1120
1110void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); 1121void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
1111 1122
@@ -1136,6 +1147,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
1136#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) 1147#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
1137#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) 1148#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
1138#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) 1149#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
1150#define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M)
1151#define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M)
1152#define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT)
1153#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
1139 1154
1140/* ----- Extended LMP capabilities ----- */ 1155/* ----- Extended LMP capabilities ----- */
1141#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER) 1156#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
@@ -1156,6 +1171,24 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
1156#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ 1171#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
1157 hci_dev_test_flag(dev, HCI_SC_ENABLED)) 1172 hci_dev_test_flag(dev, HCI_SC_ENABLED))
1158 1173
1174#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
1175 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
1176
1177#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
1178 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
1179
1180#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
1181 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
1182
1183/* Use ext scanning if set ext scan param and ext scan enable is supported */
1184#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
1185 ((dev)->commands[37] & 0x40))
1186/* Use ext create connection if command is supported */
1187#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
1188
1189/* Extended advertising support */
1190#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
1191
1159/* ----- HCI protocols ----- */ 1192/* ----- HCI protocols ----- */
1160#define HCI_PROTO_DEFER 0x01 1193#define HCI_PROTO_DEFER 0x01
1161 1194
@@ -1529,6 +1562,7 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
1529 u8 instance); 1562 u8 instance);
1530void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, 1563void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1531 u8 instance); 1564 u8 instance);
1565int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
1532 1566
1533u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, 1567u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
1534 u16 to_multiplier); 1568 u16 to_multiplier);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index e7303eee65cd..9cee7ddc6741 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -101,6 +101,7 @@ struct mgmt_rp_read_index_list {
101#define MGMT_SETTING_PRIVACY 0x00002000 101#define MGMT_SETTING_PRIVACY 0x00002000
102#define MGMT_SETTING_CONFIGURATION 0x00004000 102#define MGMT_SETTING_CONFIGURATION 0x00004000
103#define MGMT_SETTING_STATIC_ADDRESS 0x00008000 103#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
104#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
104 105
105#define MGMT_OP_READ_INFO 0x0004 106#define MGMT_OP_READ_INFO 0x0004
106#define MGMT_READ_INFO_SIZE 0 107#define MGMT_READ_INFO_SIZE 0
@@ -561,6 +562,12 @@ struct mgmt_rp_add_advertising {
561#define MGMT_ADV_FLAG_TX_POWER BIT(4) 562#define MGMT_ADV_FLAG_TX_POWER BIT(4)
562#define MGMT_ADV_FLAG_APPEARANCE BIT(5) 563#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
563#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6) 564#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
565#define MGMT_ADV_FLAG_SEC_1M BIT(7)
566#define MGMT_ADV_FLAG_SEC_2M BIT(8)
567#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
568
569#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
570 MGMT_ADV_FLAG_SEC_CODED)
564 571
565#define MGMT_OP_REMOVE_ADVERTISING 0x003F 572#define MGMT_OP_REMOVE_ADVERTISING 0x003F
566struct mgmt_cp_remove_advertising { 573struct mgmt_cp_remove_advertising {
@@ -604,6 +611,49 @@ struct mgmt_cp_set_appearance {
604} __packed; 611} __packed;
605#define MGMT_SET_APPEARANCE_SIZE 2 612#define MGMT_SET_APPEARANCE_SIZE 2
606 613
614#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044
615struct mgmt_rp_get_phy_confguration {
616 __le32 supported_phys;
617 __le32 configurable_phys;
618 __le32 selected_phys;
619} __packed;
620#define MGMT_GET_PHY_CONFIGURATION_SIZE 0
621
622#define MGMT_PHY_BR_1M_1SLOT 0x00000001
623#define MGMT_PHY_BR_1M_3SLOT 0x00000002
624#define MGMT_PHY_BR_1M_5SLOT 0x00000004
625#define MGMT_PHY_EDR_2M_1SLOT 0x00000008
626#define MGMT_PHY_EDR_2M_3SLOT 0x00000010
627#define MGMT_PHY_EDR_2M_5SLOT 0x00000020
628#define MGMT_PHY_EDR_3M_1SLOT 0x00000040
629#define MGMT_PHY_EDR_3M_3SLOT 0x00000080
630#define MGMT_PHY_EDR_3M_5SLOT 0x00000100
631#define MGMT_PHY_LE_1M_TX 0x00000200
632#define MGMT_PHY_LE_1M_RX 0x00000400
633#define MGMT_PHY_LE_2M_TX 0x00000800
634#define MGMT_PHY_LE_2M_RX 0x00001000
635#define MGMT_PHY_LE_CODED_TX 0x00002000
636#define MGMT_PHY_LE_CODED_RX 0x00004000
637
638#define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \
639 MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \
640 MGMT_PHY_EDR_2M_3SLOT | MGMT_PHY_EDR_2M_5SLOT | \
641 MGMT_PHY_EDR_3M_1SLOT | MGMT_PHY_EDR_3M_3SLOT | \
642 MGMT_PHY_EDR_3M_5SLOT)
643#define MGMT_PHY_LE_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_1M_RX | \
644 MGMT_PHY_LE_2M_TX | MGMT_PHY_LE_2M_RX | \
645 MGMT_PHY_LE_CODED_TX | MGMT_PHY_LE_CODED_RX)
646#define MGMT_PHY_LE_TX_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_2M_TX | \
647 MGMT_PHY_LE_CODED_TX)
648#define MGMT_PHY_LE_RX_MASK (MGMT_PHY_LE_1M_RX | MGMT_PHY_LE_2M_RX | \
649 MGMT_PHY_LE_CODED_RX)
650
651#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045
652struct mgmt_cp_set_phy_confguration {
653 __le32 selected_phys;
654} __packed;
655#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
656
607#define MGMT_EV_CMD_COMPLETE 0x0001 657#define MGMT_EV_CMD_COMPLETE 0x0001
608struct mgmt_ev_cmd_complete { 658struct mgmt_ev_cmd_complete {
609 __le16 opcode; 659 __le16 opcode;
@@ -824,3 +874,8 @@ struct mgmt_ev_ext_info_changed {
824 __le16 eir_len; 874 __le16 eir_len;
825 __u8 eir[0]; 875 __u8 eir[0];
826} __packed; 876} __packed;
877
878#define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026
879struct mgmt_ev_phy_configuration_changed {
880 __le32 selected_phys;
881} __packed;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index f358ad5e4214..fc3111515f5c 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -283,7 +283,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state)
283 "none", 283 "none",
284 "unknown" 284 "unknown"
285 }; 285 };
286 int max_size = sizeof(churn_description) / sizeof(churn_description[0]); 286 int max_size = ARRAY_SIZE(churn_description);
287 287
288 if (state >= max_size) 288 if (state >= max_size)
289 state = max_size - 1; 289 state = max_size - 1;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 808f1d167349..a2d058170ea3 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -411,6 +411,19 @@ static inline bool bond_slave_can_tx(struct slave *slave)
411 bond_is_active_slave(slave); 411 bond_is_active_slave(slave);
412} 412}
413 413
414static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev)
415{
416 struct slave *slave;
417 bool active;
418
419 rcu_read_lock();
420 slave = bond_slave_get_rcu(slave_dev);
421 active = bond_is_active_slave(slave);
422 rcu_read_unlock();
423
424 return active;
425}
426
414static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len) 427static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len)
415{ 428{
416 if (len == ETH_ALEN) { 429 if (len == ETH_ALEN) {
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index c5187438af38..ba61cdd09eaa 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -121,21 +121,6 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
121#endif 121#endif
122} 122}
123 123
124static inline void sock_poll_busy_loop(struct socket *sock, __poll_t events)
125{
126 if (sk_can_busy_loop(sock->sk) &&
127 events && (events & POLL_BUSY_LOOP)) {
128 /* once, only if requested by syscall */
129 sk_busy_loop(sock->sk, 1);
130 }
131}
132
133/* if this socket can poll_ll, tell the system call */
134static inline __poll_t sock_poll_busy_flag(struct socket *sock)
135{
136 return sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0;
137}
138
139/* used in the NIC receive handler to mark the skb */ 124/* used in the NIC receive handler to mark the skb */
140static inline void skb_mark_napi_id(struct sk_buff *skb, 125static inline void skb_mark_napi_id(struct sk_buff *skb,
141 struct napi_struct *napi) 126 struct napi_struct *napi)
@@ -151,6 +136,7 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
151#ifdef CONFIG_NET_RX_BUSY_POLL 136#ifdef CONFIG_NET_RX_BUSY_POLL
152 sk->sk_napi_id = skb->napi_id; 137 sk->sk_napi_id = skb->napi_id;
153#endif 138#endif
139 sk_rx_queue_set(sk, skb);
154} 140}
155 141
156/* variant used for unconnected sockets */ 142/* variant used for unconnected sockets */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5fbfe61f41c6..9a850973e09a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -285,6 +285,41 @@ struct ieee80211_sta_vht_cap {
285 struct ieee80211_vht_mcs_info vht_mcs; 285 struct ieee80211_vht_mcs_info vht_mcs;
286}; 286};
287 287
288#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
289
290/**
291 * struct ieee80211_sta_he_cap - STA's HE capabilities
292 *
293 * This structure describes most essential parameters needed
294 * to describe 802.11ax HE capabilities for a STA.
295 *
296 * @has_he: true iff HE data is valid.
297 * @he_cap_elem: Fixed portion of the HE capabilities element.
298 * @he_mcs_nss_supp: The supported NSS/MCS combinations.
299 * @ppe_thres: Holds the PPE Thresholds data.
300 */
301struct ieee80211_sta_he_cap {
302 bool has_he;
303 struct ieee80211_he_cap_elem he_cap_elem;
304 struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
305 u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
306};
307
308/**
309 * struct ieee80211_sband_iftype_data
310 *
311 * This structure encapsulates sband data that is relevant for the
312 * interface types defined in @types_mask. Each type in the
313 * @types_mask must be unique across all instances of iftype_data.
314 *
315 * @types_mask: interface types mask
316 * @he_cap: holds the HE capabilities
317 */
318struct ieee80211_sband_iftype_data {
319 u16 types_mask;
320 struct ieee80211_sta_he_cap he_cap;
321};
322
288/** 323/**
289 * struct ieee80211_supported_band - frequency band definition 324 * struct ieee80211_supported_band - frequency band definition
290 * 325 *
@@ -301,6 +336,11 @@ struct ieee80211_sta_vht_cap {
301 * @n_bitrates: Number of bitrates in @bitrates 336 * @n_bitrates: Number of bitrates in @bitrates
302 * @ht_cap: HT capabilities in this band 337 * @ht_cap: HT capabilities in this band
303 * @vht_cap: VHT capabilities in this band 338 * @vht_cap: VHT capabilities in this band
339 * @n_iftype_data: number of iftype data entries
340 * @iftype_data: interface type data entries. Note that the bits in
341 * @types_mask inside this structure cannot overlap (i.e. only
342 * one occurrence of each type is allowed across all instances of
343 * iftype_data).
304 */ 344 */
305struct ieee80211_supported_band { 345struct ieee80211_supported_band {
306 struct ieee80211_channel *channels; 346 struct ieee80211_channel *channels;
@@ -310,9 +350,56 @@ struct ieee80211_supported_band {
310 int n_bitrates; 350 int n_bitrates;
311 struct ieee80211_sta_ht_cap ht_cap; 351 struct ieee80211_sta_ht_cap ht_cap;
312 struct ieee80211_sta_vht_cap vht_cap; 352 struct ieee80211_sta_vht_cap vht_cap;
353 u16 n_iftype_data;
354 const struct ieee80211_sband_iftype_data *iftype_data;
313}; 355};
314 356
315/** 357/**
358 * ieee80211_get_sband_iftype_data - return sband data for a given iftype
359 * @sband: the sband to search for the STA on
360 * @iftype: enum nl80211_iftype
361 *
362 * Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found
363 */
364static inline const struct ieee80211_sband_iftype_data *
365ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
366 u8 iftype)
367{
368 int i;
369
370 if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
371 return NULL;
372
373 for (i = 0; i < sband->n_iftype_data; i++) {
374 const struct ieee80211_sband_iftype_data *data =
375 &sband->iftype_data[i];
376
377 if (data->types_mask & BIT(iftype))
378 return data;
379 }
380
381 return NULL;
382}
383
384/**
385 * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA
386 * @sband: the sband to search for the STA on
387 *
388 * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
389 */
390static inline const struct ieee80211_sta_he_cap *
391ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband)
392{
393 const struct ieee80211_sband_iftype_data *data =
394 ieee80211_get_sband_iftype_data(sband, NL80211_IFTYPE_STATION);
395
396 if (data && data->he_cap.has_he)
397 return &data->he_cap;
398
399 return NULL;
400}
401
402/**
316 * wiphy_read_of_freq_limits - read frequency limits from device tree 403 * wiphy_read_of_freq_limits - read frequency limits from device tree
317 * 404 *
318 * @wiphy: the wireless device to get extra limits for 405 * @wiphy: the wireless device to get extra limits for
@@ -899,6 +986,8 @@ enum station_parameters_apply_mask {
899 * @opmode_notif: operating mode field from Operating Mode Notification 986 * @opmode_notif: operating mode field from Operating Mode Notification
900 * @opmode_notif_used: information if operating mode field is used 987 * @opmode_notif_used: information if operating mode field is used
901 * @support_p2p_ps: information if station supports P2P PS mechanism 988 * @support_p2p_ps: information if station supports P2P PS mechanism
989 * @he_capa: HE capabilities of station
990 * @he_capa_len: the length of the HE capabilities
902 */ 991 */
903struct station_parameters { 992struct station_parameters {
904 const u8 *supported_rates; 993 const u8 *supported_rates;
@@ -926,6 +1015,8 @@ struct station_parameters {
926 u8 opmode_notif; 1015 u8 opmode_notif;
927 bool opmode_notif_used; 1016 bool opmode_notif_used;
928 int support_p2p_ps; 1017 int support_p2p_ps;
1018 const struct ieee80211_he_cap_elem *he_capa;
1019 u8 he_capa_len;
929}; 1020};
930 1021
931/** 1022/**
@@ -1000,12 +1091,14 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
1000 * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS 1091 * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
1001 * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval 1092 * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
1002 * @RATE_INFO_FLAGS_60G: 60GHz MCS 1093 * @RATE_INFO_FLAGS_60G: 60GHz MCS
1094 * @RATE_INFO_FLAGS_HE_MCS: HE MCS information
1003 */ 1095 */
1004enum rate_info_flags { 1096enum rate_info_flags {
1005 RATE_INFO_FLAGS_MCS = BIT(0), 1097 RATE_INFO_FLAGS_MCS = BIT(0),
1006 RATE_INFO_FLAGS_VHT_MCS = BIT(1), 1098 RATE_INFO_FLAGS_VHT_MCS = BIT(1),
1007 RATE_INFO_FLAGS_SHORT_GI = BIT(2), 1099 RATE_INFO_FLAGS_SHORT_GI = BIT(2),
1008 RATE_INFO_FLAGS_60G = BIT(3), 1100 RATE_INFO_FLAGS_60G = BIT(3),
1101 RATE_INFO_FLAGS_HE_MCS = BIT(4),
1009}; 1102};
1010 1103
1011/** 1104/**
@@ -1019,6 +1112,7 @@ enum rate_info_flags {
1019 * @RATE_INFO_BW_40: 40 MHz bandwidth 1112 * @RATE_INFO_BW_40: 40 MHz bandwidth
1020 * @RATE_INFO_BW_80: 80 MHz bandwidth 1113 * @RATE_INFO_BW_80: 80 MHz bandwidth
1021 * @RATE_INFO_BW_160: 160 MHz bandwidth 1114 * @RATE_INFO_BW_160: 160 MHz bandwidth
1115 * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
1022 */ 1116 */
1023enum rate_info_bw { 1117enum rate_info_bw {
1024 RATE_INFO_BW_20 = 0, 1118 RATE_INFO_BW_20 = 0,
@@ -1027,6 +1121,7 @@ enum rate_info_bw {
1027 RATE_INFO_BW_40, 1121 RATE_INFO_BW_40,
1028 RATE_INFO_BW_80, 1122 RATE_INFO_BW_80,
1029 RATE_INFO_BW_160, 1123 RATE_INFO_BW_160,
1124 RATE_INFO_BW_HE_RU,
1030}; 1125};
1031 1126
1032/** 1127/**
@@ -1035,10 +1130,14 @@ enum rate_info_bw {
1035 * Information about a receiving or transmitting bitrate 1130 * Information about a receiving or transmitting bitrate
1036 * 1131 *
1037 * @flags: bitflag of flags from &enum rate_info_flags 1132 * @flags: bitflag of flags from &enum rate_info_flags
1038 * @mcs: mcs index if struct describes a 802.11n bitrate 1133 * @mcs: mcs index if struct describes an HT/VHT/HE rate
1039 * @legacy: bitrate in 100kbit/s for 802.11abg 1134 * @legacy: bitrate in 100kbit/s for 802.11abg
1040 * @nss: number of streams (VHT only) 1135 * @nss: number of streams (VHT & HE only)
1041 * @bw: bandwidth (from &enum rate_info_bw) 1136 * @bw: bandwidth (from &enum rate_info_bw)
1137 * @he_gi: HE guard interval (from &enum nl80211_he_gi)
1138 * @he_dcm: HE DCM value
1139 * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
1140 * only valid if bw is %RATE_INFO_BW_HE_RU)
1042 */ 1141 */
1043struct rate_info { 1142struct rate_info {
1044 u8 flags; 1143 u8 flags;
@@ -1046,6 +1145,9 @@ struct rate_info {
1046 u16 legacy; 1145 u16 legacy;
1047 u8 nss; 1146 u8 nss;
1048 u8 bw; 1147 u8 bw;
1148 u8 he_gi;
1149 u8 he_dcm;
1150 u8 he_ru_alloc;
1049}; 1151};
1050 1152
1051/** 1153/**
@@ -5835,10 +5937,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
5835/** 5937/**
5836 * cfg80211_rx_control_port - notification about a received control port frame 5938 * cfg80211_rx_control_port - notification about a received control port frame
5837 * @dev: The device the frame matched to 5939 * @dev: The device the frame matched to
5838 * @buf: control port frame 5940 * @skb: The skbuf with the control port frame. It is assumed that the skbuf
5839 * @len: length of the frame data 5941 * is 802.3 formatted (with 802.3 header). The skb can be non-linear.
5840 * @addr: The peer from which the frame was received 5942 * This function does not take ownership of the skb, so the caller is
5841 * @proto: frame protocol, typically PAE or Pre-authentication 5943 * responsible for any cleanup. The caller must also ensure that
5944 * skb->protocol is set appropriately.
5842 * @unencrypted: Whether the frame was received unencrypted 5945 * @unencrypted: Whether the frame was received unencrypted
5843 * 5946 *
5844 * This function is used to inform userspace about a received control port 5947 * This function is used to inform userspace about a received control port
@@ -5851,8 +5954,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
5851 * Return: %true if the frame was passed to userspace 5954 * Return: %true if the frame was passed to userspace
5852 */ 5955 */
5853bool cfg80211_rx_control_port(struct net_device *dev, 5956bool cfg80211_rx_control_port(struct net_device *dev,
5854 const u8 *buf, size_t len, 5957 struct sk_buff *skb, bool unencrypted);
5855 const u8 *addr, u16 proto, bool unencrypted);
5856 5958
5857/** 5959/**
5858 * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event 5960 * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index 0e5e91be2d30..e22a8a3c089b 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -34,6 +34,19 @@ int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
34int dcb_ieee_delapp(struct net_device *, struct dcb_app *); 34int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
35u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *); 35u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
36 36
37struct dcb_ieee_app_prio_map {
38 u64 map[IEEE_8021QAZ_MAX_TCS];
39};
40void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
41 struct dcb_ieee_app_prio_map *p_map);
42
43struct dcb_ieee_app_dscp_map {
44 u8 map[64];
45};
46void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
47 struct dcb_ieee_app_dscp_map *p_map);
48u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev);
49
37int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, 50int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
38 u32 seq, u32 pid); 51 u32 seq, u32 pid);
39int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, 52int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
diff --git a/include/net/devlink.h b/include/net/devlink.h
index e336ea9c73df..b9b89d6604d4 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -27,6 +27,9 @@ struct devlink {
27 struct list_head sb_list; 27 struct list_head sb_list;
28 struct list_head dpipe_table_list; 28 struct list_head dpipe_table_list;
29 struct list_head resource_list; 29 struct list_head resource_list;
30 struct list_head param_list;
31 struct list_head region_list;
32 u32 snapshot_id;
30 struct devlink_dpipe_headers *dpipe_headers; 33 struct devlink_dpipe_headers *dpipe_headers;
31 const struct devlink_ops *ops; 34 const struct devlink_ops *ops;
32 struct device *dev; 35 struct device *dev;
@@ -295,6 +298,115 @@ struct devlink_resource {
295 298
296#define DEVLINK_RESOURCE_ID_PARENT_TOP 0 299#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
297 300
301#define DEVLINK_PARAM_MAX_STRING_VALUE 32
302enum devlink_param_type {
303 DEVLINK_PARAM_TYPE_U8,
304 DEVLINK_PARAM_TYPE_U16,
305 DEVLINK_PARAM_TYPE_U32,
306 DEVLINK_PARAM_TYPE_STRING,
307 DEVLINK_PARAM_TYPE_BOOL,
308};
309
310union devlink_param_value {
311 u8 vu8;
312 u16 vu16;
313 u32 vu32;
314 const char *vstr;
315 bool vbool;
316};
317
318struct devlink_param_gset_ctx {
319 union devlink_param_value val;
320 enum devlink_param_cmode cmode;
321};
322
323/**
324 * struct devlink_param - devlink configuration parameter data
325 * @name: name of the parameter
326 * @generic: indicates if the parameter is generic or driver specific
327 * @type: parameter type
328 * @supported_cmodes: bitmap of supported configuration modes
329 * @get: get parameter value, used for runtime and permanent
330 * configuration modes
331 * @set: set parameter value, used for runtime and permanent
332 * configuration modes
333 * @validate: validate input value is applicable (within value range, etc.)
334 *
335 * This struct should be used by the driver to fill the data for
336 * a parameter it registers.
337 */
338struct devlink_param {
339 u32 id;
340 const char *name;
341 bool generic;
342 enum devlink_param_type type;
343 unsigned long supported_cmodes;
344 int (*get)(struct devlink *devlink, u32 id,
345 struct devlink_param_gset_ctx *ctx);
346 int (*set)(struct devlink *devlink, u32 id,
347 struct devlink_param_gset_ctx *ctx);
348 int (*validate)(struct devlink *devlink, u32 id,
349 union devlink_param_value val,
350 struct netlink_ext_ack *extack);
351};
352
353struct devlink_param_item {
354 struct list_head list;
355 const struct devlink_param *param;
356 union devlink_param_value driverinit_value;
357 bool driverinit_value_valid;
358};
359
360enum devlink_param_generic_id {
361 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
362 DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
363 DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
364 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
365
366 /* add new param generic ids above here*/
367 __DEVLINK_PARAM_GENERIC_ID_MAX,
368 DEVLINK_PARAM_GENERIC_ID_MAX = __DEVLINK_PARAM_GENERIC_ID_MAX - 1,
369};
370
371#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME "internal_error_reset"
372#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
373
374#define DEVLINK_PARAM_GENERIC_MAX_MACS_NAME "max_macs"
375#define DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE DEVLINK_PARAM_TYPE_U32
376
377#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME "enable_sriov"
378#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE DEVLINK_PARAM_TYPE_BOOL
379
380#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable"
381#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL
382
383#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
384{ \
385 .id = DEVLINK_PARAM_GENERIC_ID_##_id, \
386 .name = DEVLINK_PARAM_GENERIC_##_id##_NAME, \
387 .type = DEVLINK_PARAM_GENERIC_##_id##_TYPE, \
388 .generic = true, \
389 .supported_cmodes = _cmodes, \
390 .get = _get, \
391 .set = _set, \
392 .validate = _validate, \
393}
394
395#define DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, _get, _set, _validate) \
396{ \
397 .id = _id, \
398 .name = _name, \
399 .type = _type, \
400 .supported_cmodes = _cmodes, \
401 .get = _get, \
402 .set = _set, \
403 .validate = _validate, \
404}
405
406struct devlink_region;
407
408typedef void devlink_snapshot_data_dest_t(const void *data);
409
298struct devlink_ops { 410struct devlink_ops {
299 int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack); 411 int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack);
300 int (*port_type_set)(struct devlink_port *devlink_port, 412 int (*port_type_set)(struct devlink_port *devlink_port,
@@ -430,6 +542,26 @@ void devlink_resource_occ_get_register(struct devlink *devlink,
430 void *occ_get_priv); 542 void *occ_get_priv);
431void devlink_resource_occ_get_unregister(struct devlink *devlink, 543void devlink_resource_occ_get_unregister(struct devlink *devlink,
432 u64 resource_id); 544 u64 resource_id);
545int devlink_params_register(struct devlink *devlink,
546 const struct devlink_param *params,
547 size_t params_count);
548void devlink_params_unregister(struct devlink *devlink,
549 const struct devlink_param *params,
550 size_t params_count);
551int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
552 union devlink_param_value *init_val);
553int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
554 union devlink_param_value init_val);
555void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
556struct devlink_region *devlink_region_create(struct devlink *devlink,
557 const char *region_name,
558 u32 region_max_snapshots,
559 u64 region_size);
560void devlink_region_destroy(struct devlink_region *region);
561u32 devlink_region_shapshot_id_get(struct devlink *devlink);
562int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
563 u8 *data, u32 snapshot_id,
564 devlink_snapshot_data_dest_t *data_destructor);
433 565
434#else 566#else
435 567
@@ -622,6 +754,69 @@ devlink_resource_occ_get_unregister(struct devlink *devlink,
622{ 754{
623} 755}
624 756
757static inline int
758devlink_params_register(struct devlink *devlink,
759 const struct devlink_param *params,
760 size_t params_count)
761{
762 return 0;
763}
764
765static inline void
766devlink_params_unregister(struct devlink *devlink,
767 const struct devlink_param *params,
768 size_t params_count)
769{
770
771}
772
773static inline int
774devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
775 union devlink_param_value *init_val)
776{
777 return -EOPNOTSUPP;
778}
779
780static inline int
781devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
782 union devlink_param_value init_val)
783{
784 return -EOPNOTSUPP;
785}
786
787static inline void
788devlink_param_value_changed(struct devlink *devlink, u32 param_id)
789{
790}
791
792static inline struct devlink_region *
793devlink_region_create(struct devlink *devlink,
794 const char *region_name,
795 u32 region_max_snapshots,
796 u64 region_size)
797{
798 return NULL;
799}
800
801static inline void
802devlink_region_destroy(struct devlink_region *region)
803{
804}
805
806static inline u32
807devlink_region_shapshot_id_get(struct devlink *devlink)
808{
809 return 0;
810}
811
812static inline int
813devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
814 u8 *data, u32 snapshot_id,
815 devlink_snapshot_data_dest_t *data_destructor)
816{
817 return 0;
818}
819
625#endif 820#endif
626 821
627#endif /* _NET_DEVLINK_H_ */ 822#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index fdbd6082945d..461e8a7661b7 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -259,6 +259,9 @@ struct dsa_switch {
259 /* Number of switch port queues */ 259 /* Number of switch port queues */
260 unsigned int num_tx_queues; 260 unsigned int num_tx_queues;
261 261
262 unsigned long *bitmap;
263 unsigned long _bitmap;
264
262 /* Dynamically allocated ports, keep last */ 265 /* Dynamically allocated ports, keep last */
263 size_t num_ports; 266 size_t num_ports;
264 struct dsa_port ports[]; 267 struct dsa_port ports[];
diff --git a/include/net/dst.h b/include/net/dst.h
index b3219cd8a5a1..7f735e76ca73 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -475,6 +475,14 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
475 return dst_orig; 475 return dst_orig;
476} 476}
477 477
478static inline struct dst_entry *
479xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
480 const struct flowi *fl, const struct sock *sk,
481 int flags, u32 if_id)
482{
483 return dst_orig;
484}
485
478static inline struct dst_entry *xfrm_lookup_route(struct net *net, 486static inline struct dst_entry *xfrm_lookup_route(struct net *net,
479 struct dst_entry *dst_orig, 487 struct dst_entry *dst_orig,
480 const struct flowi *fl, 488 const struct flowi *fl,
@@ -494,6 +502,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
494 const struct flowi *fl, const struct sock *sk, 502 const struct flowi *fl, const struct sock *sk,
495 int flags); 503 int flags);
496 504
505struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
506 struct dst_entry *dst_orig,
507 const struct flowi *fl,
508 const struct sock *sk, int flags,
509 u32 if_id);
510
497struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, 511struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
498 const struct flowi *fl, const struct sock *sk, 512 const struct flowi *fl, const struct sock *sk,
499 int flags); 513 int flags);
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index adc24df56b90..6a4586dcdede 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -47,7 +47,7 @@ struct flow_dissector_key_tags {
47struct flow_dissector_key_vlan { 47struct flow_dissector_key_vlan {
48 u16 vlan_id:12, 48 u16 vlan_id:12,
49 vlan_priority:3; 49 vlan_priority:3;
50 u16 padding; 50 __be16 vlan_tpid;
51}; 51};
52 52
53struct flow_dissector_key_mpls { 53struct flow_dissector_key_mpls {
@@ -57,6 +57,21 @@ struct flow_dissector_key_mpls {
57 mpls_label:20; 57 mpls_label:20;
58}; 58};
59 59
60#define FLOW_DIS_TUN_OPTS_MAX 255
61/**
62 * struct flow_dissector_key_enc_opts:
63 * @data: tunnel option data
64 * @len: length of tunnel option data
65 * @dst_opt_type: tunnel option type
66 */
67struct flow_dissector_key_enc_opts {
68 u8 data[FLOW_DIS_TUN_OPTS_MAX]; /* Using IP_TUNNEL_OPTS_MAX is desired
69 * here but seems difficult to #include
70 */
71 u8 len;
72 __be16 dst_opt_type;
73};
74
60struct flow_dissector_key_keyid { 75struct flow_dissector_key_keyid {
61 __be32 keyid; 76 __be32 keyid;
62}; 77};
@@ -206,6 +221,9 @@ enum flow_dissector_key_id {
206 FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */ 221 FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
207 FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */ 222 FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
208 FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */ 223 FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
224 FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */
225 FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */
226 FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
209 227
210 FLOW_DISSECTOR_KEY_MAX, 228 FLOW_DISSECTOR_KEY_MAX,
211}; 229};
@@ -237,6 +255,7 @@ struct flow_keys {
237 struct flow_dissector_key_basic basic; 255 struct flow_dissector_key_basic basic;
238 struct flow_dissector_key_tags tags; 256 struct flow_dissector_key_tags tags;
239 struct flow_dissector_key_vlan vlan; 257 struct flow_dissector_key_vlan vlan;
258 struct flow_dissector_key_vlan cvlan;
240 struct flow_dissector_key_keyid keyid; 259 struct flow_dissector_key_keyid keyid;
241 struct flow_dissector_key_ports ports; 260 struct flow_dissector_key_ports ports;
242 struct flow_dissector_key_addrs addrs; 261 struct flow_dissector_key_addrs addrs;
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 0304ba2ae353..883bb9085f15 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -59,13 +59,13 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
59int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 59int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
60 struct gnet_stats_basic_cpu __percpu *cpu_bstats, 60 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
61 struct net_rate_estimator __rcu **rate_est, 61 struct net_rate_estimator __rcu **rate_est,
62 spinlock_t *stats_lock, 62 spinlock_t *lock,
63 seqcount_t *running, struct nlattr *opt); 63 seqcount_t *running, struct nlattr *opt);
64void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); 64void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
65int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, 65int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
66 struct gnet_stats_basic_cpu __percpu *cpu_bstats, 66 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
67 struct net_rate_estimator __rcu **ptr, 67 struct net_rate_estimator __rcu **ptr,
68 spinlock_t *stats_lock, 68 spinlock_t *lock,
69 seqcount_t *running, struct nlattr *opt); 69 seqcount_t *running, struct nlattr *opt);
70bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); 70bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
71bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, 71bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 960236fb1681..feef706e1158 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2017 Intel Deutschland GmbH 2 * Copyright (c) 2017 Intel Deutschland GmbH
3 * Copyright (c) 2018 Intel Corporation
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -72,6 +73,8 @@ enum ieee80211_radiotap_presence {
72 IEEE80211_RADIOTAP_AMPDU_STATUS = 20, 73 IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
73 IEEE80211_RADIOTAP_VHT = 21, 74 IEEE80211_RADIOTAP_VHT = 21,
74 IEEE80211_RADIOTAP_TIMESTAMP = 22, 75 IEEE80211_RADIOTAP_TIMESTAMP = 22,
76 IEEE80211_RADIOTAP_HE = 23,
77 IEEE80211_RADIOTAP_HE_MU = 24,
75 78
76 /* valid in every it_present bitmap, even vendor namespaces */ 79 /* valid in every it_present bitmap, even vendor namespaces */
77 IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, 80 IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -202,6 +205,126 @@ enum ieee80211_radiotap_timestamp_flags {
202 IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02, 205 IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02,
203}; 206};
204 207
208struct ieee80211_radiotap_he {
209 __le16 data1, data2, data3, data4, data5, data6;
210};
211
212enum ieee80211_radiotap_he_bits {
213 IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3,
214 IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0,
215 IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1,
216 IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2,
217 IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3,
218
219 IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004,
220 IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008,
221 IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010,
222 IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020,
223 IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040,
224 IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080,
225 IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100,
226 IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200,
227 IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400,
228 IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800,
229 IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000,
230 IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000,
231 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000,
232 IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000,
233
234 IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001,
235 IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002,
236 IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004,
237 IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008,
238 IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010,
239 IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020,
240 IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040,
241 IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080,
242 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00,
243 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000,
244 IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000,
245
246 IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f,
247 IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040,
248 IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080,
249 IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00,
250 IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000,
251 IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000,
252 IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000,
253 IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000,
254
255 IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f,
256 IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0,
257 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f,
258 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0,
259 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00,
260 IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000,
261
262 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f,
263 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0,
264 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1,
265 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2,
266 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3,
267 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4,
268 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5,
269 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6,
270 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7,
271 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8,
272 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9,
273 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10,
274
275 IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030,
276 IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0,
277 IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1,
278 IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2,
279
280 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0,
281 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0,
282 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1,
283 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2,
284 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3,
285 IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700,
286 IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000,
287 IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000,
288 IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000,
289
290 IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f,
291 IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010,
292 IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00,
293 IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000,
294};
295
296struct ieee80211_radiotap_he_mu {
297 __le16 flags1, flags2;
298 u8 ru_ch1[4];
299 u8 ru_ch2[4];
300};
301
302enum ieee80211_radiotap_he_mu_bits {
303 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f,
304 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010,
305 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020,
306 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040,
307 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080,
308 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100,
309 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200,
310 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000,
311 IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000,
312 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000,
313 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000,
314
315 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003,
316 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000,
317 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001,
318 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002,
319 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003,
320 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004,
321 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008,
322 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0,
323 IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300,
324 IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400,
325 IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800,
326};
327
205/** 328/**
206 * ieee80211_get_radiotap_len - get radiotap header length 329 * ieee80211_get_radiotap_len - get radiotap header length
207 */ 330 */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 384b90c62c0b..3ca969cbd161 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -43,7 +43,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
43int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, 43int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
44 int *addr_len); 44 int *addr_len);
45 45
46struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb); 46struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb);
47int inet_gro_complete(struct sk_buff *skb, int nhoff); 47int inet_gro_complete(struct sk_buff *skb, int nhoff);
48struct sk_buff *inet_gso_segment(struct sk_buff *skb, 48struct sk_buff *inet_gso_segment(struct sk_buff *skb,
49 netdev_features_t features); 49 netdev_features_t features);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0a6c9e0f2b5a..371b3b45fd5c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -19,6 +19,7 @@
19#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/timer.h> 20#include <linux/timer.h>
21#include <linux/poll.h> 21#include <linux/poll.h>
22#include <linux/kernel.h>
22 23
23#include <net/inet_sock.h> 24#include <net/inet_sock.h>
24#include <net/request_sock.h> 25#include <net/request_sock.h>
@@ -167,7 +168,8 @@ enum inet_csk_ack_state_t {
167 ICSK_ACK_SCHED = 1, 168 ICSK_ACK_SCHED = 1,
168 ICSK_ACK_TIMER = 2, 169 ICSK_ACK_TIMER = 2,
169 ICSK_ACK_PUSHED = 4, 170 ICSK_ACK_PUSHED = 4,
170 ICSK_ACK_PUSHED2 = 8 171 ICSK_ACK_PUSHED2 = 8,
172 ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
171}; 173};
172 174
173void inet_csk_init_xmit_timers(struct sock *sk, 175void inet_csk_init_xmit_timers(struct sock *sk,
@@ -224,7 +226,7 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
224 226
225 if (when > max_when) { 227 if (when > max_when) {
226 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", 228 pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
227 sk, what, when, current_text_addr()); 229 sk, what, when, (void *)_THIS_IP_);
228 when = max_when; 230 when = max_when;
229 } 231 }
230 232
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index ed07e3786d98..1662cbc0b46b 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -2,7 +2,7 @@
2#ifndef __NET_FRAG_H__ 2#ifndef __NET_FRAG_H__
3#define __NET_FRAG_H__ 3#define __NET_FRAG_H__
4 4
5#include <linux/rhashtable.h> 5#include <linux/rhashtable-types.h>
6 6
7struct netns_frags { 7struct netns_frags {
8 /* sysctls */ 8 /* sysctls */
@@ -57,7 +57,9 @@ struct frag_v6_compare_key {
57 * @lock: spinlock protecting this frag 57 * @lock: spinlock protecting this frag
58 * @refcnt: reference count of the queue 58 * @refcnt: reference count of the queue
59 * @fragments: received fragments head 59 * @fragments: received fragments head
60 * @rb_fragments: received fragments rb-tree root
60 * @fragments_tail: received fragments tail 61 * @fragments_tail: received fragments tail
62 * @last_run_head: the head of the last "run". see ip_fragment.c
61 * @stamp: timestamp of the last received fragment 63 * @stamp: timestamp of the last received fragment
62 * @len: total length of the original datagram 64 * @len: total length of the original datagram
63 * @meat: length of received fragments so far 65 * @meat: length of received fragments so far
@@ -75,8 +77,10 @@ struct inet_frag_queue {
75 struct timer_list timer; 77 struct timer_list timer;
76 spinlock_t lock; 78 spinlock_t lock;
77 refcount_t refcnt; 79 refcount_t refcnt;
78 struct sk_buff *fragments; 80 struct sk_buff *fragments; /* Used in IPv6. */
81 struct rb_root rb_fragments; /* Used in IPv4. */
79 struct sk_buff *fragments_tail; 82 struct sk_buff *fragments_tail;
83 struct sk_buff *last_run_head;
80 ktime_t stamp; 84 ktime_t stamp;
81 int len; 85 int len;
82 int meat; 86 int meat;
@@ -112,6 +116,9 @@ void inet_frag_kill(struct inet_frag_queue *q);
112void inet_frag_destroy(struct inet_frag_queue *q); 116void inet_frag_destroy(struct inet_frag_queue *q);
113struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key); 117struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
114 118
119/* Free all skbs in the queue; return the sum of their truesizes. */
120unsigned int inet_frag_rbtree_purge(struct rb_root *root);
121
115static inline void inet_frag_put(struct inet_frag_queue *q) 122static inline void inet_frag_put(struct inet_frag_queue *q)
116{ 123{
117 if (refcount_dec_and_test(&q->refcnt)) 124 if (refcount_dec_and_test(&q->refcnt))
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 83d5b3c2ac42..e03b93360f33 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -148,6 +148,7 @@ struct inet_cork {
148 __s16 tos; 148 __s16 tos;
149 char priority; 149 char priority;
150 __u16 gso_size; 150 __u16 gso_size;
151 u64 transmit_time;
151}; 152};
152 153
153struct inet_cork_full { 154struct inet_cork_full {
@@ -358,4 +359,12 @@ static inline bool inet_get_convert_csum(struct sock *sk)
358 return !!inet_sk(sk)->convert_csum; 359 return !!inet_sk(sk)->convert_csum;
359} 360}
360 361
362
363static inline bool inet_can_nonlocal_bind(struct net *net,
364 struct inet_sock *inet)
365{
366 return net->ipv4.sysctl_ip_nonlocal_bind ||
367 inet->freebind || inet->transparent;
368}
369
361#endif /* _INET_SOCK_H */ 370#endif /* _INET_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 0d2281b4b27a..e44b1a44f67a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -72,13 +72,27 @@ struct ipcm_cookie {
72 __be32 addr; 72 __be32 addr;
73 int oif; 73 int oif;
74 struct ip_options_rcu *opt; 74 struct ip_options_rcu *opt;
75 __u8 tx_flags;
76 __u8 ttl; 75 __u8 ttl;
77 __s16 tos; 76 __s16 tos;
78 char priority; 77 char priority;
79 __u16 gso_size; 78 __u16 gso_size;
80}; 79};
81 80
81static inline void ipcm_init(struct ipcm_cookie *ipcm)
82{
83 *ipcm = (struct ipcm_cookie) { .tos = -1 };
84}
85
86static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
87 const struct inet_sock *inet)
88{
89 ipcm_init(ipcm);
90
91 ipcm->sockc.tsflags = inet->sk.sk_tsflags;
92 ipcm->oif = inet->sk.sk_bound_dev_if;
93 ipcm->addr = inet->inet_saddr;
94}
95
82#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) 96#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
83#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) 97#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
84 98
@@ -138,6 +152,8 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
138 struct ip_options_rcu *opt); 152 struct ip_options_rcu *opt);
139int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, 153int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
140 struct net_device *orig_dev); 154 struct net_device *orig_dev);
155void ip_list_rcv(struct list_head *head, struct packet_type *pt,
156 struct net_device *orig_dev);
141int ip_local_deliver(struct sk_buff *skb); 157int ip_local_deliver(struct sk_buff *skb);
142int ip_mr_input(struct sk_buff *skb); 158int ip_mr_input(struct sk_buff *skb);
143int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); 159int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -148,7 +164,8 @@ void ip_send_check(struct iphdr *ip);
148int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); 164int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
149int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); 165int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
150 166
151int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); 167int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
168 __u8 tos);
152void ip_init(void); 169void ip_init(void);
153int ip_append_data(struct sock *sk, struct flowi4 *fl4, 170int ip_append_data(struct sock *sk, struct flowi4 *fl4,
154 int getfrag(void *from, char *to, int offset, int len, 171 int getfrag(void *from, char *to, int offset, int len,
@@ -174,6 +191,12 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
174 struct ipcm_cookie *ipc, struct rtable **rtp, 191 struct ipcm_cookie *ipc, struct rtable **rtp,
175 struct inet_cork *cork, unsigned int flags); 192 struct inet_cork *cork, unsigned int flags);
176 193
194static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
195 struct flowi *fl)
196{
197 return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
198}
199
177static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) 200static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
178{ 201{
179 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); 202 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 5cba71d2dc44..3d4930528db0 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -170,6 +170,7 @@ struct fib6_info {
170 unused:3; 170 unused:3;
171 171
172 struct fib6_nh fib6_nh; 172 struct fib6_nh fib6_nh;
173 struct rcu_head rcu;
173}; 174};
174 175
175struct rt6_info { 176struct rt6_info {
@@ -273,17 +274,22 @@ static inline void ip6_rt_put(struct rt6_info *rt)
273} 274}
274 275
275struct fib6_info *fib6_info_alloc(gfp_t gfp_flags); 276struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
276void fib6_info_destroy(struct fib6_info *f6i); 277void fib6_info_destroy_rcu(struct rcu_head *head);
277 278
278static inline void fib6_info_hold(struct fib6_info *f6i) 279static inline void fib6_info_hold(struct fib6_info *f6i)
279{ 280{
280 atomic_inc(&f6i->fib6_ref); 281 atomic_inc(&f6i->fib6_ref);
281} 282}
282 283
284static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
285{
286 return atomic_inc_not_zero(&f6i->fib6_ref);
287}
288
283static inline void fib6_info_release(struct fib6_info *f6i) 289static inline void fib6_info_release(struct fib6_info *f6i)
284{ 290{
285 if (f6i && atomic_dec_and_test(&f6i->fib6_ref)) 291 if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
286 fib6_info_destroy(f6i); 292 call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
287} 293}
288 294
289enum fib6_walk_state { 295enum fib6_walk_state {
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 59656fc580df..7b9c82de11cc 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
66 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 66 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
67} 67}
68 68
69static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
70{
71 return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
72 RTF_GATEWAY;
73}
74
69void ip6_route_input(struct sk_buff *skb); 75void ip6_route_input(struct sk_buff *skb);
70struct dst_entry *ip6_route_input_lookup(struct net *net, 76struct dst_entry *ip6_route_input_lookup(struct net *net,
71 struct net_device *dev, 77 struct net_device *dev,
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 90ff430f5e9d..b0d022ff6ea1 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -466,10 +466,12 @@ static inline void ip_tunnel_info_opts_get(void *to,
466} 466}
467 467
468static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, 468static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
469 const void *from, int len) 469 const void *from, int len,
470 __be16 flags)
470{ 471{
471 memcpy(ip_tunnel_info_opts(info), from, len); 472 memcpy(ip_tunnel_info_opts(info), from, len);
472 info->options_len = len; 473 info->options_len = len;
474 info->key.tun_flags |= flags;
473} 475}
474 476
475static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate) 477static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
@@ -511,9 +513,11 @@ static inline void ip_tunnel_info_opts_get(void *to,
511} 513}
512 514
513static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, 515static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
514 const void *from, int len) 516 const void *from, int len,
517 __be16 flags)
515{ 518{
516 info->options_len = 0; 519 info->options_len = 0;
520 info->key.tun_flags |= flags;
517} 521}
518 522
519#endif /* CONFIG_INET */ 523#endif /* CONFIG_INET */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a0bec23c6d5e..a0d2e0bb9a94 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -335,6 +335,11 @@ enum ip_vs_sctp_states {
335 IP_VS_SCTP_S_LAST 335 IP_VS_SCTP_S_LAST
336}; 336};
337 337
338/* Connection templates use bits from state */
339#define IP_VS_CTPL_S_NONE 0x0000
340#define IP_VS_CTPL_S_ASSURED 0x0001
341#define IP_VS_CTPL_S_LAST 0x0002
342
338/* Delta sequence info structure 343/* Delta sequence info structure
339 * Each ip_vs_conn has 2 (output AND input seq. changes). 344 * Each ip_vs_conn has 2 (output AND input seq. changes).
340 * Only used in the VS/NAT. 345 * Only used in the VS/NAT.
@@ -1221,7 +1226,7 @@ struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
1221 struct ip_vs_dest *dest, __u32 fwmark); 1226 struct ip_vs_dest *dest, __u32 fwmark);
1222void ip_vs_conn_expire_now(struct ip_vs_conn *cp); 1227void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
1223 1228
1224const char *ip_vs_state_name(__u16 proto, int state); 1229const char *ip_vs_state_name(const struct ip_vs_conn *cp);
1225 1230
1226void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); 1231void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
1227int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); 1232int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
@@ -1289,6 +1294,17 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
1289 atomic_inc(&ctl_cp->n_control); 1294 atomic_inc(&ctl_cp->n_control);
1290} 1295}
1291 1296
1297/* Mark our template as assured */
1298static inline void
1299ip_vs_control_assure_ct(struct ip_vs_conn *cp)
1300{
1301 struct ip_vs_conn *ct = cp->control;
1302
1303 if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) &&
1304 (ct->flags & IP_VS_CONN_F_TEMPLATE))
1305 ct->state |= IP_VS_CTPL_S_ASSURED;
1306}
1307
1292/* IPVS netns init & cleanup functions */ 1308/* IPVS netns init & cleanup functions */
1293int ip_vs_estimator_net_init(struct netns_ipvs *ipvs); 1309int ip_vs_estimator_net_init(struct netns_ipvs *ipvs);
1294int ip_vs_control_net_init(struct netns_ipvs *ipvs); 1310int ip_vs_control_net_init(struct netns_ipvs *ipvs);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 16475c269749..ff33f498c137 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -294,6 +294,7 @@ struct ipv6_fl_socklist {
294}; 294};
295 295
296struct ipcm6_cookie { 296struct ipcm6_cookie {
297 struct sockcm_cookie sockc;
297 __s16 hlimit; 298 __s16 hlimit;
298 __s16 tclass; 299 __s16 tclass;
299 __s8 dontfrag; 300 __s8 dontfrag;
@@ -301,6 +302,25 @@ struct ipcm6_cookie {
301 __u16 gso_size; 302 __u16 gso_size;
302}; 303};
303 304
305static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
306{
307 *ipc6 = (struct ipcm6_cookie) {
308 .hlimit = -1,
309 .tclass = -1,
310 .dontfrag = -1,
311 };
312}
313
314static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
315 const struct ipv6_pinfo *np)
316{
317 *ipc6 = (struct ipcm6_cookie) {
318 .hlimit = -1,
319 .tclass = np->tclass,
320 .dontfrag = np->dontfrag,
321 };
322}
323
304static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) 324static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
305{ 325{
306 struct ipv6_txoptions *opt; 326 struct ipv6_txoptions *opt;
@@ -355,14 +375,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
355struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, 375struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
356 struct ipv6_txoptions *opt, 376 struct ipv6_txoptions *opt,
357 int newtype, 377 int newtype,
358 struct ipv6_opt_hdr __user *newopt, 378 struct ipv6_opt_hdr *newopt);
359 int newoptlen);
360struct ipv6_txoptions *
361ipv6_renew_options_kern(struct sock *sk,
362 struct ipv6_txoptions *opt,
363 int newtype,
364 struct ipv6_opt_hdr *newopt,
365 int newoptlen);
366struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, 379struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
367 struct ipv6_txoptions *opt); 380 struct ipv6_txoptions *opt);
368 381
@@ -561,34 +574,6 @@ static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
561} 574}
562#endif 575#endif
563 576
564struct inet_frag_queue;
565
566enum ip6_defrag_users {
567 IP6_DEFRAG_LOCAL_DELIVER,
568 IP6_DEFRAG_CONNTRACK_IN,
569 __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
570 IP6_DEFRAG_CONNTRACK_OUT,
571 __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
572 IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
573 __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
574};
575
576void ip6_frag_init(struct inet_frag_queue *q, const void *a);
577extern const struct rhashtable_params ip6_rhash_params;
578
579/*
580 * Equivalent of ipv4 struct ip
581 */
582struct frag_queue {
583 struct inet_frag_queue q;
584
585 int iif;
586 __u16 nhoffset;
587 u8 ecn;
588};
589
590void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
591
592static inline bool ipv6_addr_any(const struct in6_addr *a) 577static inline bool ipv6_addr_any(const struct in6_addr *a)
593{ 578{
594#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 579#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
@@ -797,6 +782,13 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
797 782
798#if IS_ENABLED(CONFIG_IPV6) 783#if IS_ENABLED(CONFIG_IPV6)
799 784
785static inline bool ipv6_can_nonlocal_bind(struct net *net,
786 struct inet_sock *inet)
787{
788 return net->ipv6.sysctl.ip_nonlocal_bind ||
789 inet->freebind || inet->transparent;
790}
791
800/* Sysctl settings for net ipv6.auto_flowlabels */ 792/* Sysctl settings for net ipv6.auto_flowlabels */
801#define IP6_AUTO_FLOW_LABEL_OFF 0 793#define IP6_AUTO_FLOW_LABEL_OFF 0
802#define IP6_AUTO_FLOW_LABEL_OPTOUT 1 794#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
@@ -830,7 +822,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
830 * to minimize possbility that any useful information to an 822 * to minimize possbility that any useful information to an
831 * attacker is leaked. Only lower 20 bits are relevant. 823 * attacker is leaked. Only lower 20 bits are relevant.
832 */ 824 */
833 rol32(hash, 16); 825 hash = rol32(hash, 16);
834 826
835 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; 827 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
836 828
@@ -922,6 +914,8 @@ static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
922 914
923int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, 915int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
924 struct packet_type *pt, struct net_device *orig_dev); 916 struct packet_type *pt, struct net_device *orig_dev);
917void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
918 struct net_device *orig_dev);
925 919
926int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); 920int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
927 921
@@ -938,8 +932,7 @@ int ip6_append_data(struct sock *sk,
938 int odd, struct sk_buff *skb), 932 int odd, struct sk_buff *skb),
939 void *from, int length, int transhdrlen, 933 void *from, int length, int transhdrlen,
940 struct ipcm6_cookie *ipc6, struct flowi6 *fl6, 934 struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
941 struct rt6_info *rt, unsigned int flags, 935 struct rt6_info *rt, unsigned int flags);
942 const struct sockcm_cookie *sockc);
943 936
944int ip6_push_pending_frames(struct sock *sk); 937int ip6_push_pending_frames(struct sock *sk);
945 938
@@ -956,8 +949,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
956 void *from, int length, int transhdrlen, 949 void *from, int length, int transhdrlen,
957 struct ipcm6_cookie *ipc6, struct flowi6 *fl6, 950 struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
958 struct rt6_info *rt, unsigned int flags, 951 struct rt6_info *rt, unsigned int flags,
959 struct inet_cork_full *cork, 952 struct inet_cork_full *cork);
960 const struct sockcm_cookie *sockc);
961 953
962static inline struct sk_buff *ip6_finish_skb(struct sock *sk) 954static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
963{ 955{
@@ -1107,6 +1099,8 @@ void ipv6_sysctl_unregister(void);
1107 1099
1108int ipv6_sock_mc_join(struct sock *sk, int ifindex, 1100int ipv6_sock_mc_join(struct sock *sk, int ifindex,
1109 const struct in6_addr *addr); 1101 const struct in6_addr *addr);
1102int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
1103 const struct in6_addr *addr, unsigned int mode);
1110int ipv6_sock_mc_drop(struct sock *sk, int ifindex, 1104int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
1111 const struct in6_addr *addr); 1105 const struct in6_addr *addr);
1112#endif /* _NET_IPV6_H */ 1106#endif /* _NET_IPV6_H */
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
new file mode 100644
index 000000000000..6ced1e6899b6
--- /dev/null
+++ b/include/net/ipv6_frag.h
@@ -0,0 +1,104 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _IPV6_FRAG_H
3#define _IPV6_FRAG_H
4#include <linux/kernel.h>
5#include <net/addrconf.h>
6#include <net/ipv6.h>
7#include <net/inet_frag.h>
8
9enum ip6_defrag_users {
10 IP6_DEFRAG_LOCAL_DELIVER,
11 IP6_DEFRAG_CONNTRACK_IN,
12 __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
13 IP6_DEFRAG_CONNTRACK_OUT,
14 __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
15 IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
16 __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
17};
18
19/*
20 * Equivalent of ipv4 struct ip
21 */
22struct frag_queue {
23 struct inet_frag_queue q;
24
25 int iif;
26 __u16 nhoffset;
27 u8 ecn;
28};
29
30#if IS_ENABLED(CONFIG_IPV6)
31static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
32{
33 struct frag_queue *fq = container_of(q, struct frag_queue, q);
34 const struct frag_v6_compare_key *key = a;
35
36 q->key.v6 = *key;
37 fq->ecn = 0;
38}
39
40static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
41{
42 return jhash2(data,
43 sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
44}
45
46static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
47{
48 const struct inet_frag_queue *fq = data;
49
50 return jhash2((const u32 *)&fq->key.v6,
51 sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
52}
53
54static inline int
55ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
56{
57 const struct frag_v6_compare_key *key = arg->key;
58 const struct inet_frag_queue *fq = ptr;
59
60 return !!memcmp(&fq->key, key, sizeof(*key));
61}
62
63static inline void
64ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
65{
66 struct net_device *dev = NULL;
67 struct sk_buff *head;
68
69 rcu_read_lock();
70 spin_lock(&fq->q.lock);
71
72 if (fq->q.flags & INET_FRAG_COMPLETE)
73 goto out;
74
75 inet_frag_kill(&fq->q);
76
77 dev = dev_get_by_index_rcu(net, fq->iif);
78 if (!dev)
79 goto out;
80
81 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
82 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
83
84 /* Don't send error if the first segment did not arrive. */
85 head = fq->q.fragments;
86 if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
87 goto out;
88
89 head->dev = dev;
90 skb_get(head);
91 spin_unlock(&fq->q.lock);
92
93 icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
94 kfree_skb(head);
95 goto out_rcu_unlock;
96
97out:
98 spin_unlock(&fq->q.lock);
99out_rcu_unlock:
100 rcu_read_unlock();
101 inet_frag_put(&fq->q);
102}
103#endif
104#endif
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index b0eaeb02d46d..f4c21b5a1242 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -153,6 +153,8 @@ struct iucv_sock_list {
153 atomic_t autobind_name; 153 atomic_t autobind_name;
154}; 154};
155 155
156__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
157 poll_table *wait);
156void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); 158void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
157void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); 159void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
158void iucv_accept_enqueue(struct sock *parent, struct sock *sk); 160void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
diff --git a/include/net/lag.h b/include/net/lag.h
new file mode 100644
index 000000000000..95b880e6fdde
--- /dev/null
+++ b/include/net/lag.h
@@ -0,0 +1,17 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_IF_LAG_H
3#define _LINUX_IF_LAG_H
4
5#include <linux/netdevice.h>
6#include <linux/if_team.h>
7#include <net/bonding.h>
8
9static inline bool net_lag_port_dev_txable(const struct net_device *port_dev)
10{
11 if (netif_is_team_port(port_dev))
12 return team_port_dev_txable(port_dev);
13 else
14 return bond_is_active_slave_dev(port_dev);
15}
16
17#endif /* _LINUX_IF_LAG_H */
diff --git a/include/net/llc.h b/include/net/llc.h
index dc35f25eb679..890a87318014 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
116 refcount_inc(&sap->refcnt); 116 refcount_inc(&sap->refcnt);
117} 117}
118 118
119static inline bool llc_sap_hold_safe(struct llc_sap *sap)
120{
121 return refcount_inc_not_zero(&sap->refcnt);
122}
123
119void llc_sap_close(struct llc_sap *sap); 124void llc_sap_close(struct llc_sap *sap);
120 125
121static inline void llc_sap_put(struct llc_sap *sap) 126static inline void llc_sap_put(struct llc_sap *sap)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 851a5e19ae32..5790f55c241d 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -23,6 +23,7 @@
23#include <linux/ieee80211.h> 23#include <linux/ieee80211.h>
24#include <net/cfg80211.h> 24#include <net/cfg80211.h>
25#include <net/codel.h> 25#include <net/codel.h>
26#include <net/ieee80211_radiotap.h>
26#include <asm/unaligned.h> 27#include <asm/unaligned.h>
27 28
28/** 29/**
@@ -162,6 +163,8 @@ enum ieee80211_ac_numbers {
162 * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled 163 * @txop: maximum burst time in units of 32 usecs, 0 meaning disabled
163 * @acm: is mandatory admission control required for the access category 164 * @acm: is mandatory admission control required for the access category
164 * @uapsd: is U-APSD mode enabled for the queue 165 * @uapsd: is U-APSD mode enabled for the queue
166 * @mu_edca: is the MU EDCA configured
167 * @mu_edca_param_rec: MU EDCA Parameter Record for HE
165 */ 168 */
166struct ieee80211_tx_queue_params { 169struct ieee80211_tx_queue_params {
167 u16 txop; 170 u16 txop;
@@ -170,6 +173,8 @@ struct ieee80211_tx_queue_params {
170 u8 aifs; 173 u8 aifs;
171 bool acm; 174 bool acm;
172 bool uapsd; 175 bool uapsd;
176 bool mu_edca;
177 struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec;
173}; 178};
174 179
175struct ieee80211_low_level_stats { 180struct ieee80211_low_level_stats {
@@ -463,6 +468,15 @@ struct ieee80211_mu_group_data {
463 * This structure keeps information about a BSS (and an association 468 * This structure keeps information about a BSS (and an association
464 * to that BSS) that can change during the lifetime of the BSS. 469 * to that BSS) that can change during the lifetime of the BSS.
465 * 470 *
471 * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE
472 * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
473 * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK
474 * @uora_exists: is the UORA element advertised by AP
475 * @ack_enabled: indicates support to receive a multi-TID that solicits either
476 * ACK, BACK or both
477 * @uora_ocw_range: UORA element's OCW Range field
478 * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
479 * @he_support: does this BSS support HE
466 * @assoc: association status 480 * @assoc: association status
467 * @ibss_joined: indicates whether this station is part of an IBSS 481 * @ibss_joined: indicates whether this station is part of an IBSS
468 * or not 482 * or not
@@ -550,6 +564,14 @@ struct ieee80211_mu_group_data {
550 */ 564 */
551struct ieee80211_bss_conf { 565struct ieee80211_bss_conf {
552 const u8 *bssid; 566 const u8 *bssid;
567 u8 bss_color;
568 u8 htc_trig_based_pkt_ext;
569 bool multi_sta_back_32bit;
570 bool uora_exists;
571 bool ack_enabled;
572 u8 uora_ocw_range;
573 u16 frame_time_rts_th;
574 bool he_support;
553 /* association related data */ 575 /* association related data */
554 bool assoc, ibss_joined; 576 bool assoc, ibss_joined;
555 bool ibss_creator; 577 bool ibss_creator;
@@ -1106,6 +1128,18 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
1106 * @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this 1128 * @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this
1107 * frame 1129 * frame
1108 * @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known 1130 * @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known
1131 * @RX_FLAG_RADIOTAP_HE: HE radiotap data is present
1132 * (&struct ieee80211_radiotap_he, mac80211 will fill in
1133 * - DATA3_DATA_MCS
1134 * - DATA3_DATA_DCM
1135 * - DATA3_CODING
1136 * - DATA5_GI
1137 * - DATA5_DATA_BW_RU_ALLOC
1138 * - DATA6_NSTS
1139 * - DATA3_STBC
1140 * from the RX info data, so leave those zeroed when building this data)
1141 * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present
1142 * (&struct ieee80211_radiotap_he_mu)
1109 */ 1143 */
1110enum mac80211_rx_flags { 1144enum mac80211_rx_flags {
1111 RX_FLAG_MMIC_ERROR = BIT(0), 1145 RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1134,6 +1168,8 @@ enum mac80211_rx_flags {
1134 RX_FLAG_ICV_STRIPPED = BIT(23), 1168 RX_FLAG_ICV_STRIPPED = BIT(23),
1135 RX_FLAG_AMPDU_EOF_BIT = BIT(24), 1169 RX_FLAG_AMPDU_EOF_BIT = BIT(24),
1136 RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25), 1170 RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25),
1171 RX_FLAG_RADIOTAP_HE = BIT(26),
1172 RX_FLAG_RADIOTAP_HE_MU = BIT(27),
1137}; 1173};
1138 1174
1139/** 1175/**
@@ -1164,6 +1200,7 @@ enum mac80211_rx_encoding {
1164 RX_ENC_LEGACY = 0, 1200 RX_ENC_LEGACY = 0,
1165 RX_ENC_HT, 1201 RX_ENC_HT,
1166 RX_ENC_VHT, 1202 RX_ENC_VHT,
1203 RX_ENC_HE,
1167}; 1204};
1168 1205
1169/** 1206/**
@@ -1198,6 +1235,9 @@ enum mac80211_rx_encoding {
1198 * @encoding: &enum mac80211_rx_encoding 1235 * @encoding: &enum mac80211_rx_encoding
1199 * @bw: &enum rate_info_bw 1236 * @bw: &enum rate_info_bw
1200 * @enc_flags: uses bits from &enum mac80211_rx_encoding_flags 1237 * @enc_flags: uses bits from &enum mac80211_rx_encoding_flags
1238 * @he_ru: HE RU, from &enum nl80211_he_ru_alloc
1239 * @he_gi: HE GI, from &enum nl80211_he_gi
1240 * @he_dcm: HE DCM value
1201 * @rx_flags: internal RX flags for mac80211 1241 * @rx_flags: internal RX flags for mac80211
1202 * @ampdu_reference: A-MPDU reference number, must be a different value for 1242 * @ampdu_reference: A-MPDU reference number, must be a different value for
1203 * each A-MPDU but the same for each subframe within one A-MPDU 1243 * each A-MPDU but the same for each subframe within one A-MPDU
@@ -1211,7 +1251,8 @@ struct ieee80211_rx_status {
1211 u32 flag; 1251 u32 flag;
1212 u16 freq; 1252 u16 freq;
1213 u8 enc_flags; 1253 u8 enc_flags;
1214 u8 encoding:2, bw:3; 1254 u8 encoding:2, bw:3, he_ru:3;
1255 u8 he_gi:2, he_dcm:1;
1215 u8 rate_idx; 1256 u8 rate_idx;
1216 u8 nss; 1257 u8 nss;
1217 u8 rx_flags; 1258 u8 rx_flags;
@@ -1770,6 +1811,7 @@ struct ieee80211_sta_rates {
1770 * @supp_rates: Bitmap of supported rates (per band) 1811 * @supp_rates: Bitmap of supported rates (per band)
1771 * @ht_cap: HT capabilities of this STA; restricted to our own capabilities 1812 * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
1772 * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities 1813 * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
1814 * @he_cap: HE capabilities of this STA
1773 * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU 1815 * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
1774 * that this station is allowed to transmit to us. 1816 * that this station is allowed to transmit to us.
1775 * Can be modified by driver. 1817 * Can be modified by driver.
@@ -1805,7 +1847,8 @@ struct ieee80211_sta {
1805 u16 aid; 1847 u16 aid;
1806 struct ieee80211_sta_ht_cap ht_cap; 1848 struct ieee80211_sta_ht_cap ht_cap;
1807 struct ieee80211_sta_vht_cap vht_cap; 1849 struct ieee80211_sta_vht_cap vht_cap;
1808 u8 max_rx_aggregation_subframes; 1850 struct ieee80211_sta_he_cap he_cap;
1851 u16 max_rx_aggregation_subframes;
1809 bool wme; 1852 bool wme;
1810 u8 uapsd_queues; 1853 u8 uapsd_queues;
1811 u8 max_sp; 1854 u8 max_sp;
@@ -2196,10 +2239,11 @@ enum ieee80211_hw_flags {
2196 * it shouldn't be set. 2239 * it shouldn't be set.
2197 * 2240 *
2198 * @max_tx_aggregation_subframes: maximum number of subframes in an 2241 * @max_tx_aggregation_subframes: maximum number of subframes in an
2199 * aggregate an HT driver will transmit. Though ADDBA will advertise 2242 * aggregate an HT/HE device will transmit. In HT AddBA we'll
2200 * a constant value of 64 as some older APs can crash if the window 2243 * advertise a constant value of 64 as some older APs crash if
2201 * size is smaller (an example is LinkSys WRT120N with FW v1.0.07 2244 * the window size is smaller (an example is LinkSys WRT120N
2202 * build 002 Jun 18 2012). 2245 * with FW v1.0.07 build 002 Jun 18 2012).
2246 * For AddBA to HE capable peers this value will be used.
2203 * 2247 *
2204 * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum 2248 * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum
2205 * of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list. 2249 * of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list.
@@ -2216,6 +2260,8 @@ enum ieee80211_hw_flags {
2216 * the default is _GI | _BANDWIDTH. 2260 * the default is _GI | _BANDWIDTH.
2217 * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values. 2261 * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
2218 * 2262 *
2263 * @radiotap_he: HE radiotap validity flags
2264 *
2219 * @radiotap_timestamp: Information for the radiotap timestamp field; if the 2265 * @radiotap_timestamp: Information for the radiotap timestamp field; if the
2220 * 'units_pos' member is set to a non-negative value it must be set to 2266 * 'units_pos' member is set to a non-negative value it must be set to
2221 * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a 2267 * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
@@ -2263,8 +2309,8 @@ struct ieee80211_hw {
2263 u8 max_rates; 2309 u8 max_rates;
2264 u8 max_report_rates; 2310 u8 max_report_rates;
2265 u8 max_rate_tries; 2311 u8 max_rate_tries;
2266 u8 max_rx_aggregation_subframes; 2312 u16 max_rx_aggregation_subframes;
2267 u8 max_tx_aggregation_subframes; 2313 u16 max_tx_aggregation_subframes;
2268 u8 max_tx_fragments; 2314 u8 max_tx_fragments;
2269 u8 offchannel_tx_hw_queue; 2315 u8 offchannel_tx_hw_queue;
2270 u8 radiotap_mcs_details; 2316 u8 radiotap_mcs_details;
@@ -2904,7 +2950,7 @@ struct ieee80211_ampdu_params {
2904 struct ieee80211_sta *sta; 2950 struct ieee80211_sta *sta;
2905 u16 tid; 2951 u16 tid;
2906 u16 ssn; 2952 u16 ssn;
2907 u8 buf_size; 2953 u16 buf_size;
2908 bool amsdu; 2954 bool amsdu;
2909 u16 timeout; 2955 u16 timeout;
2910}; 2956};
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 47e35cce3b64..9b5fdc50519a 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -10,6 +10,7 @@
10#include <linux/workqueue.h> 10#include <linux/workqueue.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/sysctl.h> 12#include <linux/sysctl.h>
13#include <linux/uidgid.h>
13 14
14#include <net/flow.h> 15#include <net/flow.h>
15#include <net/netns/core.h> 16#include <net/netns/core.h>
@@ -128,6 +129,7 @@ struct net {
128#endif 129#endif
129#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 130#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
130 struct netns_nf_frag nf_frag; 131 struct netns_nf_frag nf_frag;
132 struct ctl_table_header *nf_frag_frags_hdr;
131#endif 133#endif
132 struct sock *nfnl; 134 struct sock *nfnl;
133 struct sock *nfnl_stash; 135 struct sock *nfnl_stash;
@@ -169,6 +171,8 @@ extern struct net init_net;
169struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, 171struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
170 struct net *old_net); 172 struct net *old_net);
171 173
174void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
175
172void net_ns_barrier(void); 176void net_ns_barrier(void);
173#else /* CONFIG_NET_NS */ 177#else /* CONFIG_NET_NS */
174#include <linux/sched.h> 178#include <linux/sched.h>
@@ -181,6 +185,13 @@ static inline struct net *copy_net_ns(unsigned long flags,
181 return old_net; 185 return old_net;
182} 186}
183 187
188static inline void net_ns_get_ownership(const struct net *net,
189 kuid_t *uid, kgid_t *gid)
190{
191 *uid = GLOBAL_ROOT_UID;
192 *gid = GLOBAL_ROOT_GID;
193}
194
184static inline void net_ns_barrier(void) {} 195static inline void net_ns_barrier(void) {}
185#endif /* CONFIG_NET_NS */ 196#endif /* CONFIG_NET_NS */
186 197
diff --git a/include/net/netevent.h b/include/net/netevent.h
index d9918261701c..4107016c3bb4 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -28,6 +28,7 @@ enum netevent_notif_type {
28 NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */ 28 NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */
29 NETEVENT_IPV4_MPATH_HASH_UPDATE, /* arg is struct net ptr */ 29 NETEVENT_IPV4_MPATH_HASH_UPDATE, /* arg is struct net ptr */
30 NETEVENT_IPV6_MPATH_HASH_UPDATE, /* arg is struct net ptr */ 30 NETEVENT_IPV6_MPATH_HASH_UPDATE, /* arg is struct net ptr */
31 NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE, /* arg is struct net ptr */
31}; 32};
32 33
33int register_netevent_notifier(struct notifier_block *nb); 34int register_netevent_notifier(struct notifier_block *nb);
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 73f825732326..c84b51682f08 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -10,9 +10,6 @@
10#ifndef _NF_CONNTRACK_IPV4_H 10#ifndef _NF_CONNTRACK_IPV4_H
11#define _NF_CONNTRACK_IPV4_H 11#define _NF_CONNTRACK_IPV4_H
12 12
13
14const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
15
16extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; 13extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
17extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; 14extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
18extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; 15extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 062dc19b5840..7e012312cd61 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -41,6 +41,11 @@ union nf_conntrack_expect_proto {
41 /* insert expect proto private data here */ 41 /* insert expect proto private data here */
42}; 42};
43 43
44struct nf_conntrack_net {
45 unsigned int users4;
46 unsigned int users6;
47};
48
44#include <linux/types.h> 49#include <linux/types.h>
45#include <linux/skbuff.h> 50#include <linux/skbuff.h>
46 51
@@ -171,8 +176,6 @@ void nf_ct_netns_put(struct net *net, u8 nfproto);
171 */ 176 */
172void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls); 177void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
173 178
174void nf_ct_free_hashtable(void *hash, unsigned int size);
175
176int nf_conntrack_hash_check_insert(struct nf_conn *ct); 179int nf_conntrack_hash_check_insert(struct nf_conn *ct);
177bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report); 180bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
178 181
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 9b5e7634713e..2a3e0974a6af 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -14,7 +14,6 @@
14#define _NF_CONNTRACK_CORE_H 14#define _NF_CONNTRACK_CORE_H
15 15
16#include <linux/netfilter.h> 16#include <linux/netfilter.h>
17#include <net/netfilter/nf_conntrack_l3proto.h>
18#include <net/netfilter/nf_conntrack_l4proto.h> 17#include <net/netfilter/nf_conntrack_l4proto.h>
19#include <net/netfilter/nf_conntrack_ecache.h> 18#include <net/netfilter/nf_conntrack_ecache.h>
20 19
@@ -40,16 +39,8 @@ void nf_conntrack_cleanup_start(void);
40void nf_conntrack_init_end(void); 39void nf_conntrack_init_end(void);
41void nf_conntrack_cleanup_end(void); 40void nf_conntrack_cleanup_end(void);
42 41
43bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
44 unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
45 struct net *net,
46 struct nf_conntrack_tuple *tuple,
47 const struct nf_conntrack_l3proto *l3proto,
48 const struct nf_conntrack_l4proto *l4proto);
49
50bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, 42bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
51 const struct nf_conntrack_tuple *orig, 43 const struct nf_conntrack_tuple *orig,
52 const struct nf_conntrack_l3proto *l3proto,
53 const struct nf_conntrack_l4proto *l4proto); 44 const struct nf_conntrack_l4proto *l4proto);
54 45
55/* Find a connection corresponding to a tuple. */ 46/* Find a connection corresponding to a tuple. */
@@ -75,10 +66,8 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
75 return ret; 66 return ret;
76} 67}
77 68
78void 69void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
79print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, 70 const struct nf_conntrack_l4proto *proto);
80 const struct nf_conntrack_l3proto *l3proto,
81 const struct nf_conntrack_l4proto *proto);
82 71
83#define CONNTRACK_LOCKS 1024 72#define CONNTRACK_LOCKS 1024
84 73
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 3a188a0923a3..4b2b2baf8ab4 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -1,8 +1,23 @@
1#ifndef _NF_CONNTRACK_COUNT_H 1#ifndef _NF_CONNTRACK_COUNT_H
2#define _NF_CONNTRACK_COUNT_H 2#define _NF_CONNTRACK_COUNT_H
3 3
4#include <linux/list.h>
5
4struct nf_conncount_data; 6struct nf_conncount_data;
5 7
8enum nf_conncount_list_add {
9 NF_CONNCOUNT_ADDED, /* list add was ok */
10 NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
11 NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
12};
13
14struct nf_conncount_list {
15 spinlock_t list_lock;
16 struct list_head head; /* connections with the same filtering key */
17 unsigned int count; /* length of list */
18 bool dead;
19};
20
6struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family, 21struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
7 unsigned int keylen); 22 unsigned int keylen);
8void nf_conncount_destroy(struct net *net, unsigned int family, 23void nf_conncount_destroy(struct net *net, unsigned int family,
@@ -14,15 +29,21 @@ unsigned int nf_conncount_count(struct net *net,
14 const struct nf_conntrack_tuple *tuple, 29 const struct nf_conntrack_tuple *tuple,
15 const struct nf_conntrack_zone *zone); 30 const struct nf_conntrack_zone *zone);
16 31
17unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head, 32void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
18 const struct nf_conntrack_tuple *tuple, 33 const struct nf_conntrack_tuple *tuple,
19 const struct nf_conntrack_zone *zone, 34 const struct nf_conntrack_zone *zone,
20 bool *addit); 35 bool *addit);
36
37void nf_conncount_list_init(struct nf_conncount_list *list);
38
39enum nf_conncount_list_add
40nf_conncount_add(struct nf_conncount_list *list,
41 const struct nf_conntrack_tuple *tuple,
42 const struct nf_conntrack_zone *zone);
21 43
22bool nf_conncount_add(struct hlist_head *head, 44bool nf_conncount_gc_list(struct net *net,
23 const struct nf_conntrack_tuple *tuple, 45 struct nf_conncount_list *list);
24 const struct nf_conntrack_zone *zone);
25 46
26void nf_conncount_cache_free(struct hlist_head *hhead); 47void nf_conncount_cache_free(struct nf_conncount_list *list);
27 48
28#endif 49#endif
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 32c2a94a219d..2492120b8097 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -103,9 +103,7 @@ int nf_conntrack_helpers_register(struct nf_conntrack_helper *, unsigned int);
103void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *, 103void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *,
104 unsigned int); 104 unsigned int);
105 105
106struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, 106struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
107 struct nf_conntrack_helper *helper,
108 gfp_t gfp);
109 107
110int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, 108int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
111 gfp_t flags); 109 gfp_t flags);
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
deleted file mode 100644
index d5808f3e2715..000000000000
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ /dev/null
@@ -1,84 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C)2003,2004 USAGI/WIDE Project
4 *
5 * Header for use in defining a given L3 protocol for connection tracking.
6 *
7 * Author:
8 * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
9 *
10 * Derived from include/netfilter_ipv4/ip_conntrack_protocol.h
11 */
12
13#ifndef _NF_CONNTRACK_L3PROTO_H
14#define _NF_CONNTRACK_L3PROTO_H
15#include <linux/netlink.h>
16#include <net/netlink.h>
17#include <linux/seq_file.h>
18#include <net/netfilter/nf_conntrack.h>
19
20struct nf_conntrack_l3proto {
21 /* L3 Protocol Family number. ex) PF_INET */
22 u_int16_t l3proto;
23
24 /* size of tuple nlattr, fills a hole */
25 u16 nla_size;
26
27 /*
28 * Try to fill in the third arg: nhoff is offset of l3 proto
29 * hdr. Return true if possible.
30 */
31 bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
32 struct nf_conntrack_tuple *tuple);
33
34 /*
35 * Invert the per-proto part of the tuple: ie. turn xmit into reply.
36 * Some packets can't be inverted: return 0 in that case.
37 */
38 bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
39 const struct nf_conntrack_tuple *orig);
40
41 /*
42 * Called before tracking.
43 * *dataoff: offset of protocol header (TCP, UDP,...) in skb
44 * *protonum: protocol number
45 */
46 int (*get_l4proto)(const struct sk_buff *skb, unsigned int nhoff,
47 unsigned int *dataoff, u_int8_t *protonum);
48
49#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
50 int (*tuple_to_nlattr)(struct sk_buff *skb,
51 const struct nf_conntrack_tuple *t);
52 int (*nlattr_to_tuple)(struct nlattr *tb[],
53 struct nf_conntrack_tuple *t);
54 const struct nla_policy *nla_policy;
55#endif
56
57 /* Called when netns wants to use connection tracking */
58 int (*net_ns_get)(struct net *);
59 void (*net_ns_put)(struct net *);
60
61 /* Module (if any) which this is connected to. */
62 struct module *me;
63};
64
65extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO];
66
67/* Protocol global registration. */
68int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto);
69void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto);
70
71const struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
72
73/* Existing built-in protocols */
74extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
75
76static inline struct nf_conntrack_l3proto *
77__nf_ct_l3proto_find(u_int16_t l3proto)
78{
79 if (unlikely(l3proto >= NFPROTO_NUMPROTO))
80 return &nf_conntrack_l3proto_generic;
81 return rcu_dereference(nf_ct_l3protos[l3proto]);
82}
83
84#endif /*_NF_CONNTRACK_L3PROTO_H*/
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index a7220eef9aee..8465263b297d 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -36,7 +36,7 @@ struct nf_conntrack_l4proto {
36 struct net *net, struct nf_conntrack_tuple *tuple); 36 struct net *net, struct nf_conntrack_tuple *tuple);
37 37
38 /* Invert the per-proto part of the tuple: ie. turn xmit into reply. 38 /* Invert the per-proto part of the tuple: ie. turn xmit into reply.
39 * Some packets can't be inverted: return 0 in that case. 39 * Only used by icmp, most protocols use a generic version.
40 */ 40 */
41 bool (*invert_tuple)(struct nf_conntrack_tuple *inverse, 41 bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
42 const struct nf_conntrack_tuple *orig); 42 const struct nf_conntrack_tuple *orig);
@@ -45,13 +45,12 @@ struct nf_conntrack_l4proto {
45 int (*packet)(struct nf_conn *ct, 45 int (*packet)(struct nf_conn *ct,
46 const struct sk_buff *skb, 46 const struct sk_buff *skb,
47 unsigned int dataoff, 47 unsigned int dataoff,
48 enum ip_conntrack_info ctinfo, 48 enum ip_conntrack_info ctinfo);
49 unsigned int *timeouts);
50 49
51 /* Called when a new connection for this protocol found; 50 /* Called when a new connection for this protocol found;
52 * returns TRUE if it's OK. If so, packet() called next. */ 51 * returns TRUE if it's OK. If so, packet() called next. */
53 bool (*new)(struct nf_conn *ct, const struct sk_buff *skb, 52 bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
54 unsigned int dataoff, unsigned int *timeouts); 53 unsigned int dataoff);
55 54
56 /* Called when a conntrack entry is destroyed */ 55 /* Called when a conntrack entry is destroyed */
57 void (*destroy)(struct nf_conn *ct); 56 void (*destroy)(struct nf_conn *ct);
@@ -63,9 +62,6 @@ struct nf_conntrack_l4proto {
63 /* called by gc worker if table is full */ 62 /* called by gc worker if table is full */
64 bool (*can_early_drop)(const struct nf_conn *ct); 63 bool (*can_early_drop)(const struct nf_conn *ct);
65 64
66 /* Return the array of timeouts for this protocol. */
67 unsigned int *(*get_timeouts)(struct net *net);
68
69 /* convert protoinfo to nfnetink attributes */ 65 /* convert protoinfo to nfnetink attributes */
70 int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla, 66 int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
71 struct nf_conn *ct); 67 struct nf_conn *ct);
@@ -81,7 +77,6 @@ struct nf_conntrack_l4proto {
81 struct nf_conntrack_tuple *t); 77 struct nf_conntrack_tuple *t);
82 const struct nla_policy *nla_policy; 78 const struct nla_policy *nla_policy;
83 79
84#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
85 struct { 80 struct {
86 int (*nlattr_to_obj)(struct nlattr *tb[], 81 int (*nlattr_to_obj)(struct nlattr *tb[],
87 struct net *net, void *data); 82 struct net *net, void *data);
@@ -91,7 +86,6 @@ struct nf_conntrack_l4proto {
91 u16 nlattr_max; 86 u16 nlattr_max;
92 const struct nla_policy *nla_policy; 87 const struct nla_policy *nla_policy;
93 } ctnl_timeout; 88 } ctnl_timeout;
94#endif
95#ifdef CONFIG_NF_CONNTRACK_PROCFS 89#ifdef CONFIG_NF_CONNTRACK_PROCFS
96 /* Print out the private part of the conntrack. */ 90 /* Print out the private part of the conntrack. */
97 void (*print_conntrack)(struct seq_file *s, struct nf_conn *); 91 void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
@@ -134,10 +128,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
134/* Protocol global registration. */ 128/* Protocol global registration. */
135int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto); 129int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
136void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto); 130void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
137int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
138 unsigned int num_proto);
139void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
140 unsigned int num_proto);
141 131
142/* Generic netlink helpers */ 132/* Generic netlink helpers */
143int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 133int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 9468ab4ad12d..d5f62cc6c2ae 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -11,24 +11,28 @@
11 11
12#define CTNL_TIMEOUT_NAME_MAX 32 12#define CTNL_TIMEOUT_NAME_MAX 32
13 13
14struct nf_ct_timeout {
15 __u16 l3num;
16 const struct nf_conntrack_l4proto *l4proto;
17 char data[0];
18};
19
14struct ctnl_timeout { 20struct ctnl_timeout {
15 struct list_head head; 21 struct list_head head;
16 struct rcu_head rcu_head; 22 struct rcu_head rcu_head;
17 refcount_t refcnt; 23 refcount_t refcnt;
18 char name[CTNL_TIMEOUT_NAME_MAX]; 24 char name[CTNL_TIMEOUT_NAME_MAX];
19 __u16 l3num; 25 struct nf_ct_timeout timeout;
20 const struct nf_conntrack_l4proto *l4proto;
21 char data[0];
22}; 26};
23 27
24struct nf_conn_timeout { 28struct nf_conn_timeout {
25 struct ctnl_timeout __rcu *timeout; 29 struct nf_ct_timeout __rcu *timeout;
26}; 30};
27 31
28static inline unsigned int * 32static inline unsigned int *
29nf_ct_timeout_data(struct nf_conn_timeout *t) 33nf_ct_timeout_data(struct nf_conn_timeout *t)
30{ 34{
31 struct ctnl_timeout *timeout; 35 struct nf_ct_timeout *timeout;
32 36
33 timeout = rcu_dereference(t->timeout); 37 timeout = rcu_dereference(t->timeout);
34 if (timeout == NULL) 38 if (timeout == NULL)
@@ -49,7 +53,7 @@ struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
49 53
50static inline 54static inline
51struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct, 55struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
52 struct ctnl_timeout *timeout, 56 struct nf_ct_timeout *timeout,
53 gfp_t gfp) 57 gfp_t gfp)
54{ 58{
55#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 59#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
@@ -67,32 +71,23 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
67#endif 71#endif
68}; 72};
69 73
70static inline unsigned int * 74static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct)
71nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
72 const struct nf_conntrack_l4proto *l4proto)
73{ 75{
76 unsigned int *timeouts = NULL;
74#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 77#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
75 struct nf_conn_timeout *timeout_ext; 78 struct nf_conn_timeout *timeout_ext;
76 unsigned int *timeouts;
77 79
78 timeout_ext = nf_ct_timeout_find(ct); 80 timeout_ext = nf_ct_timeout_find(ct);
79 if (timeout_ext) { 81 if (timeout_ext)
80 timeouts = nf_ct_timeout_data(timeout_ext); 82 timeouts = nf_ct_timeout_data(timeout_ext);
81 if (unlikely(!timeouts))
82 timeouts = l4proto->get_timeouts(net);
83 } else {
84 timeouts = l4proto->get_timeouts(net);
85 }
86
87 return timeouts;
88#else
89 return l4proto->get_timeouts(net);
90#endif 83#endif
84 return timeouts;
91} 85}
92 86
93#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 87#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
94int nf_conntrack_timeout_init(void); 88int nf_conntrack_timeout_init(void);
95void nf_conntrack_timeout_fini(void); 89void nf_conntrack_timeout_fini(void);
90void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout);
96#else 91#else
97static inline int nf_conntrack_timeout_init(void) 92static inline int nf_conntrack_timeout_init(void)
98{ 93{
@@ -106,8 +101,8 @@ static inline void nf_conntrack_timeout_fini(void)
106#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 101#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
107 102
108#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 103#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
109extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name); 104extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
110extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout); 105extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout);
111#endif 106#endif
112 107
113#endif /* _NF_CONNTRACK_TIMEOUT_H */ 108#endif /* _NF_CONNTRACK_TIMEOUT_H */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index ba9fa4592f2b..0e355f4a3d76 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -4,7 +4,7 @@
4#include <linux/in.h> 4#include <linux/in.h>
5#include <linux/in6.h> 5#include <linux/in6.h>
6#include <linux/netdevice.h> 6#include <linux/netdevice.h>
7#include <linux/rhashtable.h> 7#include <linux/rhashtable-types.h>
8#include <linux/rcupdate.h> 8#include <linux/rcupdate.h>
9#include <linux/netfilter/nf_conntrack_tuple_common.h> 9#include <linux/netfilter/nf_conntrack_tuple_common.h>
10#include <net/dst.h> 10#include <net/dst.h>
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index e811ac07ea94..0d3920896d50 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
106int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, 106int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
107 u8 proto, int fragment, unsigned int offset, 107 u8 proto, int fragment, unsigned int offset,
108 unsigned int logflags); 108 unsigned int logflags);
109void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk); 109void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
110 struct sock *sk);
110void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, 111void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
111 unsigned int hooknum, const struct sk_buff *skb, 112 unsigned int hooknum, const struct sk_buff *skb,
112 const struct net_device *in, 113 const struct net_device *in,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 08c005ce56e9..dc417ef0a0c5 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
150 * @portid: netlink portID of the original message 150 * @portid: netlink portID of the original message
151 * @seq: netlink sequence number 151 * @seq: netlink sequence number
152 * @family: protocol family 152 * @family: protocol family
153 * @level: depth of the chains
153 * @report: notify via unicast netlink message 154 * @report: notify via unicast netlink message
154 */ 155 */
155struct nft_ctx { 156struct nft_ctx {
@@ -160,6 +161,7 @@ struct nft_ctx {
160 u32 portid; 161 u32 portid;
161 u32 seq; 162 u32 seq;
162 u8 family; 163 u8 family;
164 u8 level;
163 bool report; 165 bool report;
164}; 166};
165 167
@@ -865,7 +867,6 @@ enum nft_chain_flags {
865 * @table: table that this chain belongs to 867 * @table: table that this chain belongs to
866 * @handle: chain handle 868 * @handle: chain handle
867 * @use: number of jump references to this chain 869 * @use: number of jump references to this chain
868 * @level: length of longest path to this chain
869 * @flags: bitmask of enum nft_chain_flags 870 * @flags: bitmask of enum nft_chain_flags
870 * @name: name of the chain 871 * @name: name of the chain
871 */ 872 */
@@ -878,7 +879,6 @@ struct nft_chain {
878 struct nft_table *table; 879 struct nft_table *table;
879 u64 handle; 880 u64 handle;
880 u32 use; 881 u32 use;
881 u16 level;
882 u8 flags:6, 882 u8 flags:6,
883 genmask:2; 883 genmask:2;
884 char *name; 884 char *name;
@@ -1124,7 +1124,6 @@ struct nft_flowtable {
1124 u32 genmask:2, 1124 u32 genmask:2,
1125 use:30; 1125 use:30;
1126 u64 handle; 1126 u64 handle;
1127 char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
1128 /* runtime data below here */ 1127 /* runtime data below here */
1129 struct nf_hook_ops *ops ____cacheline_aligned; 1128 struct nf_hook_ops *ops ____cacheline_aligned;
1130 struct nf_flowtable data; 1129 struct nf_flowtable data;
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index e0c0c2558ec4..8da837d2aaf9 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -65,4 +65,17 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
65extern struct static_key_false nft_counters_enabled; 65extern struct static_key_false nft_counters_enabled;
66extern struct static_key_false nft_trace_enabled; 66extern struct static_key_false nft_trace_enabled;
67 67
68extern struct nft_set_type nft_set_rhash_type;
69extern struct nft_set_type nft_set_hash_type;
70extern struct nft_set_type nft_set_hash_fast_type;
71extern struct nft_set_type nft_set_rbtree_type;
72extern struct nft_set_type nft_set_bitmap_type;
73
74struct nft_expr;
75struct nft_regs;
76struct nft_pktinfo;
77void nft_meta_get_eval(const struct nft_expr *expr,
78 struct nft_regs *regs, const struct nft_pktinfo *pkt);
79void nft_lookup_eval(const struct nft_expr *expr,
80 struct nft_regs *regs, const struct nft_pktinfo *pkt);
68#endif /* _NET_NF_TABLES_CORE_H */ 81#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index 9754a50ecde9..82d0e41b76f2 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -17,6 +17,14 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
17 return false; 17 return false;
18} 18}
19 19
20/* assign a socket to the skb -- consumes sk */
21static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
22{
23 skb_orphan(skb);
24 skb->sk = sk;
25 skb->destructor = sock_edemux;
26}
27
20__be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr); 28__be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
21 29
22/** 30/**
@@ -64,7 +72,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
64 * belonging to established connections going through that one. 72 * belonging to established connections going through that one.
65 */ 73 */
66struct sock * 74struct sock *
67nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, 75nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
68 const u8 protocol, 76 const u8 protocol,
69 const __be32 saddr, const __be32 daddr, 77 const __be32 saddr, const __be32 daddr,
70 const __be16 sport, const __be16 dport, 78 const __be16 sport, const __be16 dport,
@@ -103,7 +111,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
103 struct sock *sk); 111 struct sock *sk);
104 112
105struct sock * 113struct sock *
106nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, 114nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
107 const u8 protocol, 115 const u8 protocol,
108 const struct in6_addr *saddr, const struct in6_addr *daddr, 116 const struct in6_addr *saddr, const struct in6_addr *daddr,
109 const __be16 sport, const __be16 dport, 117 const __be16 sport, const __be16 dport,
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 24c78183a4c2..16a842456189 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -9,12 +9,7 @@ struct net;
9static inline u32 net_hash_mix(const struct net *net) 9static inline u32 net_hash_mix(const struct net *net)
10{ 10{
11#ifdef CONFIG_NET_NS 11#ifdef CONFIG_NET_NS
12 /* 12 return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
13 * shift this right to eliminate bits, that are
14 * always zeroed
15 */
16
17 return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
18#else 13#else
19 return 0; 14 return 0;
20#endif 15#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 661348f23ea5..e47503b4e4d1 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -98,6 +98,7 @@ struct netns_ipv4 {
98 int sysctl_ip_default_ttl; 98 int sysctl_ip_default_ttl;
99 int sysctl_ip_no_pmtu_disc; 99 int sysctl_ip_no_pmtu_disc;
100 int sysctl_ip_fwd_use_pmtu; 100 int sysctl_ip_fwd_use_pmtu;
101 int sysctl_ip_fwd_update_priority;
101 int sysctl_ip_nonlocal_bind; 102 int sysctl_ip_nonlocal_bind;
102 /* Shall we try to damage output packets if routing dev changes? */ 103 /* Shall we try to damage output packets if routing dev changes? */
103 int sysctl_ip_dynaddr; 104 int sysctl_ip_dynaddr;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index c978a31b0f84..f0e396ab9bec 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -32,6 +32,7 @@ struct netns_sysctl_ipv6 {
32 int flowlabel_consistency; 32 int flowlabel_consistency;
33 int auto_flowlabels; 33 int auto_flowlabels;
34 int icmpv6_time; 34 int icmpv6_time;
35 int icmpv6_echo_ignore_all;
35 int anycast_src_echo_reply; 36 int anycast_src_echo_reply;
36 int ip_nonlocal_bind; 37 int ip_nonlocal_bind;
37 int fwmark_reflect; 38 int fwmark_reflect;
@@ -109,7 +110,6 @@ struct netns_ipv6 {
109 110
110#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 111#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
111struct netns_nf_frag { 112struct netns_nf_frag {
112 struct netns_sysctl_ipv6 sysctl;
113 struct netns_frags frags; 113 struct netns_frags frags;
114}; 114};
115#endif 115#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 94767ea3a490..286fd960896f 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -7,6 +7,7 @@
7struct netns_nftables { 7struct netns_nftables {
8 struct list_head tables; 8 struct list_head tables;
9 struct list_head commit_list; 9 struct list_head commit_list;
10 struct mutex commit_mutex;
10 unsigned int base_seq; 11 unsigned int base_seq;
11 u8 gencursor; 12 u8 gencursor;
12 u8 validate_state; 13 u8 validate_state;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a3c1a2c47cd4..ef727f71336e 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -7,12 +7,16 @@
7#include <net/sch_generic.h> 7#include <net/sch_generic.h>
8#include <net/act_api.h> 8#include <net/act_api.h>
9 9
10/* TC action not accessible from user space */
11#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
12
10/* Basic packet classifier frontend definitions. */ 13/* Basic packet classifier frontend definitions. */
11 14
12struct tcf_walker { 15struct tcf_walker {
13 int stop; 16 int stop;
14 int skip; 17 int skip;
15 int count; 18 int count;
19 unsigned long cookie;
16 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 20 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
17}; 21};
18 22
@@ -36,9 +40,9 @@ struct tcf_block_cb;
36bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); 40bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
37 41
38#ifdef CONFIG_NET_CLS 42#ifdef CONFIG_NET_CLS
39struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 43struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
40 bool create); 44 u32 chain_index);
41void tcf_chain_put(struct tcf_chain *chain); 45void tcf_chain_put_by_act(struct tcf_chain *chain);
42void tcf_block_netif_keep_dst(struct tcf_block *block); 46void tcf_block_netif_keep_dst(struct tcf_block *block);
43int tcf_block_get(struct tcf_block **p_block, 47int tcf_block_get(struct tcf_block **p_block,
44 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 48 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
@@ -73,11 +77,13 @@ void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
73unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb); 77unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
74struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 78struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
75 tc_setup_cb_t *cb, void *cb_ident, 79 tc_setup_cb_t *cb, void *cb_ident,
76 void *cb_priv); 80 void *cb_priv,
81 struct netlink_ext_ack *extack);
77int tcf_block_cb_register(struct tcf_block *block, 82int tcf_block_cb_register(struct tcf_block *block,
78 tc_setup_cb_t *cb, void *cb_ident, 83 tc_setup_cb_t *cb, void *cb_ident,
79 void *cb_priv); 84 void *cb_priv, struct netlink_ext_ack *extack);
80void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb); 85void __tcf_block_cb_unregister(struct tcf_block *block,
86 struct tcf_block_cb *block_cb);
81void tcf_block_cb_unregister(struct tcf_block *block, 87void tcf_block_cb_unregister(struct tcf_block *block,
82 tc_setup_cb_t *cb, void *cb_ident); 88 tc_setup_cb_t *cb, void *cb_ident);
83 89
@@ -161,7 +167,8 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
161static inline 167static inline
162struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 168struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
163 tc_setup_cb_t *cb, void *cb_ident, 169 tc_setup_cb_t *cb, void *cb_ident,
164 void *cb_priv) 170 void *cb_priv,
171 struct netlink_ext_ack *extack)
165{ 172{
166 return NULL; 173 return NULL;
167} 174}
@@ -169,13 +176,14 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
169static inline 176static inline
170int tcf_block_cb_register(struct tcf_block *block, 177int tcf_block_cb_register(struct tcf_block *block,
171 tc_setup_cb_t *cb, void *cb_ident, 178 tc_setup_cb_t *cb, void *cb_ident,
172 void *cb_priv) 179 void *cb_priv, struct netlink_ext_ack *extack)
173{ 180{
174 return 0; 181 return 0;
175} 182}
176 183
177static inline 184static inline
178void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb) 185void __tcf_block_cb_unregister(struct tcf_block *block,
186 struct tcf_block_cb *block_cb)
179{ 187{
180} 188}
181 189
@@ -596,6 +604,7 @@ struct tc_block_offload {
596 enum tc_block_command command; 604 enum tc_block_command command;
597 enum tcf_block_binder_type binder_type; 605 enum tcf_block_binder_type binder_type;
598 struct tcf_block *block; 606 struct tcf_block *block;
607 struct netlink_ext_ack *extack;
599}; 608};
600 609
601struct tc_cls_common_offload { 610struct tc_cls_common_offload {
@@ -715,6 +724,8 @@ enum tc_fl_command {
715 TC_CLSFLOWER_REPLACE, 724 TC_CLSFLOWER_REPLACE,
716 TC_CLSFLOWER_DESTROY, 725 TC_CLSFLOWER_DESTROY,
717 TC_CLSFLOWER_STATS, 726 TC_CLSFLOWER_STATS,
727 TC_CLSFLOWER_TMPLT_CREATE,
728 TC_CLSFLOWER_TMPLT_DESTROY,
718}; 729};
719 730
720struct tc_cls_flower_offload { 731struct tc_cls_flower_offload {
@@ -771,6 +782,7 @@ struct tc_mqprio_qopt_offload {
771struct tc_cookie { 782struct tc_cookie {
772 u8 *data; 783 u8 *data;
773 u32 len; 784 u32 len;
785 struct rcu_head rcu;
774}; 786};
775 787
776struct tc_qopt_offload_stats { 788struct tc_qopt_offload_stats {
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 815b92a23936..7dc769e5452b 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -72,6 +72,8 @@ struct qdisc_watchdog {
72 struct Qdisc *qdisc; 72 struct Qdisc *qdisc;
73}; 73};
74 74
75void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
76 clockid_t clockid);
75void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); 77void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
76void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); 78void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
77 79
@@ -153,4 +155,9 @@ struct tc_cbs_qopt_offload {
153 s32 sendslope; 155 s32 sendslope;
154}; 156};
155 157
158struct tc_etf_qopt_offload {
159 u8 enable;
160 s32 queue;
161};
162
156#endif 163#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6488daa32f82..a6d00093f35e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -20,6 +20,9 @@ struct qdisc_walker;
20struct tcf_walker; 20struct tcf_walker;
21struct module; 21struct module;
22 22
23typedef int tc_setup_cb_t(enum tc_setup_type type,
24 void *type_data, void *cb_priv);
25
23struct qdisc_rate_table { 26struct qdisc_rate_table {
24 struct tc_ratespec rate; 27 struct tc_ratespec rate;
25 u32 data[256]; 28 u32 data[256];
@@ -232,9 +235,17 @@ struct tcf_result {
232 u32 classid; 235 u32 classid;
233 }; 236 };
234 const struct tcf_proto *goto_tp; 237 const struct tcf_proto *goto_tp;
238
239 /* used by the TC_ACT_REINSERT action */
240 struct {
241 bool ingress;
242 struct gnet_stats_queue *qstats;
243 };
235 }; 244 };
236}; 245};
237 246
247struct tcf_chain;
248
238struct tcf_proto_ops { 249struct tcf_proto_ops {
239 struct list_head head; 250 struct list_head head;
240 char kind[IFNAMSIZ]; 251 char kind[IFNAMSIZ];
@@ -256,11 +267,22 @@ struct tcf_proto_ops {
256 bool *last, 267 bool *last,
257 struct netlink_ext_ack *); 268 struct netlink_ext_ack *);
258 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 269 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
270 int (*reoffload)(struct tcf_proto *tp, bool add,
271 tc_setup_cb_t *cb, void *cb_priv,
272 struct netlink_ext_ack *extack);
259 void (*bind_class)(void *, u32, unsigned long); 273 void (*bind_class)(void *, u32, unsigned long);
274 void * (*tmplt_create)(struct net *net,
275 struct tcf_chain *chain,
276 struct nlattr **tca,
277 struct netlink_ext_ack *extack);
278 void (*tmplt_destroy)(void *tmplt_priv);
260 279
261 /* rtnetlink specific */ 280 /* rtnetlink specific */
262 int (*dump)(struct net*, struct tcf_proto*, void *, 281 int (*dump)(struct net*, struct tcf_proto*, void *,
263 struct sk_buff *skb, struct tcmsg*); 282 struct sk_buff *skb, struct tcmsg*);
283 int (*tmplt_dump)(struct sk_buff *skb,
284 struct net *net,
285 void *tmplt_priv);
264 286
265 struct module *owner; 287 struct module *owner;
266}; 288};
@@ -269,6 +291,8 @@ struct tcf_proto {
269 /* Fast access part */ 291 /* Fast access part */
270 struct tcf_proto __rcu *next; 292 struct tcf_proto __rcu *next;
271 void __rcu *root; 293 void __rcu *root;
294
295 /* called under RCU BH lock*/
272 int (*classify)(struct sk_buff *, 296 int (*classify)(struct sk_buff *,
273 const struct tcf_proto *, 297 const struct tcf_proto *,
274 struct tcf_result *); 298 struct tcf_result *);
@@ -294,11 +318,14 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
294 318
295struct tcf_chain { 319struct tcf_chain {
296 struct tcf_proto __rcu *filter_chain; 320 struct tcf_proto __rcu *filter_chain;
297 struct list_head filter_chain_list;
298 struct list_head list; 321 struct list_head list;
299 struct tcf_block *block; 322 struct tcf_block *block;
300 u32 index; /* chain index */ 323 u32 index; /* chain index */
301 unsigned int refcnt; 324 unsigned int refcnt;
325 unsigned int action_refcnt;
326 bool explicitly_created;
327 const struct tcf_proto_ops *tmplt_ops;
328 void *tmplt_priv;
302}; 329};
303 330
304struct tcf_block { 331struct tcf_block {
@@ -312,6 +339,10 @@ struct tcf_block {
312 bool keep_dst; 339 bool keep_dst;
313 unsigned int offloadcnt; /* Number of oddloaded filters */ 340 unsigned int offloadcnt; /* Number of oddloaded filters */
314 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ 341 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
342 struct {
343 struct tcf_chain *chain;
344 struct list_head filter_chain_list;
345 } chain0;
315}; 346};
316 347
317static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 348static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
@@ -330,6 +361,21 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
330 block->offloadcnt--; 361 block->offloadcnt--;
331} 362}
332 363
364static inline void
365tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
366 u32 *flags, bool add)
367{
368 if (add) {
369 if (!*cnt)
370 tcf_block_offload_inc(block, flags);
371 (*cnt)++;
372 } else {
373 (*cnt)--;
374 if (!*cnt)
375 tcf_block_offload_dec(block, flags);
376 }
377}
378
333static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 379static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
334{ 380{
335 struct qdisc_skb_cb *qcb; 381 struct qdisc_skb_cb *qcb;
@@ -529,6 +575,15 @@ static inline void skb_reset_tc(struct sk_buff *skb)
529#endif 575#endif
530} 576}
531 577
578static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
579{
580#ifdef CONFIG_NET_CLS_ACT
581 return skb->tc_redirected;
582#else
583 return false;
584#endif
585}
586
532static inline bool skb_at_tc_ingress(const struct sk_buff *skb) 587static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
533{ 588{
534#ifdef CONFIG_NET_CLS_ACT 589#ifdef CONFIG_NET_CLS_ACT
@@ -1068,4 +1123,17 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1068void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, 1123void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1069 struct mini_Qdisc __rcu **p_miniq); 1124 struct mini_Qdisc __rcu **p_miniq);
1070 1125
1126static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
1127{
1128 struct gnet_stats_queue *stats = res->qstats;
1129 int ret;
1130
1131 if (res->ingress)
1132 ret = netif_receive_skb(skb);
1133 else
1134 ret = dev_queue_xmit(skb);
1135 if (ret && stats)
1136 qstats_overlimit_inc(res->qstats);
1137}
1138
1071#endif 1139#endif
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 30b3e2fe240a..8c2caa370e0f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
109int sctp_inet_listen(struct socket *sock, int backlog); 109int sctp_inet_listen(struct socket *sock, int backlog);
110void sctp_write_space(struct sock *sk); 110void sctp_write_space(struct sock *sk);
111void sctp_data_ready(struct sock *sk); 111void sctp_data_ready(struct sock *sk);
112__poll_t sctp_poll_mask(struct socket *sock, __poll_t events); 112__poll_t sctp_poll(struct file *file, struct socket *sock,
113 poll_table *wait);
113void sctp_sock_rfree(struct sk_buff *skb); 114void sctp_sock_rfree(struct sk_buff *skb);
114void sctp_copy_sock(struct sock *newsk, struct sock *sk, 115void sctp_copy_sock(struct sock *newsk, struct sock *sk,
115 struct sctp_association *asoc); 116 struct sctp_association *asoc);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index dbe1b911a24d..28a7c8e44636 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -48,7 +48,7 @@
48#define __sctp_structs_h__ 48#define __sctp_structs_h__
49 49
50#include <linux/ktime.h> 50#include <linux/ktime.h>
51#include <linux/rhashtable.h> 51#include <linux/rhashtable-types.h>
52#include <linux/socket.h> /* linux/in.h needs this!! */ 52#include <linux/socket.h> /* linux/in.h needs this!! */
53#include <linux/in.h> /* We get struct sockaddr_in. */ 53#include <linux/in.h> /* We get struct sockaddr_in. */
54#include <linux/in6.h> /* We get struct in6_addr */ 54#include <linux/in6.h> /* We get struct in6_addr */
@@ -57,6 +57,7 @@
57#include <linux/atomic.h> /* This gets us atomic counters. */ 57#include <linux/atomic.h> /* This gets us atomic counters. */
58#include <linux/skbuff.h> /* We need sk_buff_head. */ 58#include <linux/skbuff.h> /* We need sk_buff_head. */
59#include <linux/workqueue.h> /* We need tq_struct. */ 59#include <linux/workqueue.h> /* We need tq_struct. */
60#include <linux/flex_array.h> /* We need flex_array. */
60#include <linux/sctp.h> /* We need sctp* header structs. */ 61#include <linux/sctp.h> /* We need sctp* header structs. */
61#include <net/sctp/auth.h> /* We need auth specific structs */ 62#include <net/sctp/auth.h> /* We need auth specific structs */
62#include <net/ip.h> /* For inet_skb_parm */ 63#include <net/ip.h> /* For inet_skb_parm */
@@ -193,6 +194,9 @@ struct sctp_sock {
193 /* This is the max_retrans value for new associations. */ 194 /* This is the max_retrans value for new associations. */
194 __u16 pathmaxrxt; 195 __u16 pathmaxrxt;
195 196
197 __u32 flowlabel;
198 __u8 dscp;
199
196 /* The initial Path MTU to use for new associations. */ 200 /* The initial Path MTU to use for new associations. */
197 __u32 pathmtu; 201 __u32 pathmtu;
198 202
@@ -220,6 +224,7 @@ struct sctp_sock {
220 __u32 adaptation_ind; 224 __u32 adaptation_ind;
221 __u32 pd_point; 225 __u32 pd_point;
222 __u16 nodelay:1, 226 __u16 nodelay:1,
227 reuse:1,
223 disable_fragments:1, 228 disable_fragments:1,
224 v4mapped:1, 229 v4mapped:1,
225 frag_interleave:1, 230 frag_interleave:1,
@@ -394,37 +399,35 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
394 399
395/* What is the current SSN number for this stream? */ 400/* What is the current SSN number for this stream? */
396#define sctp_ssn_peek(stream, type, sid) \ 401#define sctp_ssn_peek(stream, type, sid) \
397 ((stream)->type[sid].ssn) 402 (sctp_stream_##type((stream), (sid))->ssn)
398 403
399/* Return the next SSN number for this stream. */ 404/* Return the next SSN number for this stream. */
400#define sctp_ssn_next(stream, type, sid) \ 405#define sctp_ssn_next(stream, type, sid) \
401 ((stream)->type[sid].ssn++) 406 (sctp_stream_##type((stream), (sid))->ssn++)
402 407
403/* Skip over this ssn and all below. */ 408/* Skip over this ssn and all below. */
404#define sctp_ssn_skip(stream, type, sid, ssn) \ 409#define sctp_ssn_skip(stream, type, sid, ssn) \
405 ((stream)->type[sid].ssn = ssn + 1) 410 (sctp_stream_##type((stream), (sid))->ssn = ssn + 1)
406 411
407/* What is the current MID number for this stream? */ 412/* What is the current MID number for this stream? */
408#define sctp_mid_peek(stream, type, sid) \ 413#define sctp_mid_peek(stream, type, sid) \
409 ((stream)->type[sid].mid) 414 (sctp_stream_##type((stream), (sid))->mid)
410 415
411/* Return the next MID number for this stream. */ 416/* Return the next MID number for this stream. */
412#define sctp_mid_next(stream, type, sid) \ 417#define sctp_mid_next(stream, type, sid) \
413 ((stream)->type[sid].mid++) 418 (sctp_stream_##type((stream), (sid))->mid++)
414 419
415/* Skip over this mid and all below. */ 420/* Skip over this mid and all below. */
416#define sctp_mid_skip(stream, type, sid, mid) \ 421#define sctp_mid_skip(stream, type, sid, mid) \
417 ((stream)->type[sid].mid = mid + 1) 422 (sctp_stream_##type((stream), (sid))->mid = mid + 1)
418
419#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
420 423
421/* What is the current MID_uo number for this stream? */ 424/* What is the current MID_uo number for this stream? */
422#define sctp_mid_uo_peek(stream, type, sid) \ 425#define sctp_mid_uo_peek(stream, type, sid) \
423 ((stream)->type[sid].mid_uo) 426 (sctp_stream_##type((stream), (sid))->mid_uo)
424 427
425/* Return the next MID_uo number for this stream. */ 428/* Return the next MID_uo number for this stream. */
426#define sctp_mid_uo_next(stream, type, sid) \ 429#define sctp_mid_uo_next(stream, type, sid) \
427 ((stream)->type[sid].mid_uo++) 430 (sctp_stream_##type((stream), (sid))->mid_uo++)
428 431
429/* 432/*
430 * Pointers to address related SCTP functions. 433 * Pointers to address related SCTP functions.
@@ -894,6 +897,9 @@ struct sctp_transport {
894 */ 897 */
895 __u16 pathmaxrxt; 898 __u16 pathmaxrxt;
896 899
900 __u32 flowlabel;
901 __u8 dscp;
902
897 /* This is the partially failed retrans value for the transport 903 /* This is the partially failed retrans value for the transport
898 * and will be initialized from the assocs value. This can be changed 904 * and will be initialized from the assocs value. This can be changed
899 * using the SCTP_PEER_ADDR_THLDS socket option 905 * using the SCTP_PEER_ADDR_THLDS socket option
@@ -1433,8 +1439,8 @@ struct sctp_stream_in {
1433}; 1439};
1434 1440
1435struct sctp_stream { 1441struct sctp_stream {
1436 struct sctp_stream_out *out; 1442 struct flex_array *out;
1437 struct sctp_stream_in *in; 1443 struct flex_array *in;
1438 __u16 outcnt; 1444 __u16 outcnt;
1439 __u16 incnt; 1445 __u16 incnt;
1440 /* Current stream being sent, if any */ 1446 /* Current stream being sent, if any */
@@ -1456,6 +1462,23 @@ struct sctp_stream {
1456 struct sctp_stream_interleave *si; 1462 struct sctp_stream_interleave *si;
1457}; 1463};
1458 1464
1465static inline struct sctp_stream_out *sctp_stream_out(
1466 const struct sctp_stream *stream,
1467 __u16 sid)
1468{
1469 return flex_array_get(stream->out, sid);
1470}
1471
1472static inline struct sctp_stream_in *sctp_stream_in(
1473 const struct sctp_stream *stream,
1474 __u16 sid)
1475{
1476 return flex_array_get(stream->in, sid);
1477}
1478
1479#define SCTP_SO(s, i) sctp_stream_out((s), (i))
1480#define SCTP_SI(s, i) sctp_stream_in((s), (i))
1481
1459#define SCTP_STREAM_CLOSED 0x00 1482#define SCTP_STREAM_CLOSED 0x00
1460#define SCTP_STREAM_OPEN 0x01 1483#define SCTP_STREAM_OPEN 0x01
1461 1484
@@ -1771,6 +1794,9 @@ struct sctp_association {
1771 */ 1794 */
1772 __u16 pathmaxrxt; 1795 __u16 pathmaxrxt;
1773 1796
1797 __u32 flowlabel;
1798 __u8 dscp;
1799
1774 /* Flag that path mtu update is pending */ 1800 /* Flag that path mtu update is pending */
1775 __u8 pmtu_pending; 1801 __u8 pmtu_pending;
1776 1802
diff --git a/include/net/seg6.h b/include/net/seg6.h
index e029e301faa5..2567941a2f32 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -18,7 +18,7 @@
18#include <linux/ipv6.h> 18#include <linux/ipv6.h>
19#include <net/lwtunnel.h> 19#include <net/lwtunnel.h>
20#include <linux/seg6.h> 20#include <linux/seg6.h>
21#include <linux/rhashtable.h> 21#include <linux/rhashtable-types.h>
22 22
23static inline void update_csum_diff4(struct sk_buff *skb, __be32 from, 23static inline void update_csum_diff4(struct sk_buff *skb, __be32 from,
24 __be32 to) 24 __be32 to)
diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h
index 69c3a106056b..7fda469e2758 100644
--- a/include/net/seg6_hmac.h
+++ b/include/net/seg6_hmac.h
@@ -22,7 +22,7 @@
22#include <linux/route.h> 22#include <linux/route.h>
23#include <net/seg6.h> 23#include <net/seg6.h>
24#include <linux/seg6_hmac.h> 24#include <linux/seg6_hmac.h>
25#include <linux/rhashtable.h> 25#include <linux/rhashtable-types.h>
26 26
27#define SEG6_HMAC_MAX_DIGESTSIZE 160 27#define SEG6_HMAC_MAX_DIGESTSIZE 160
28#define SEG6_HMAC_RING_SIZE 256 28#define SEG6_HMAC_RING_SIZE 256
diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h
index 661fd5b4d3e0..08359e2d8b35 100644
--- a/include/net/seg6_local.h
+++ b/include/net/seg6_local.h
@@ -21,10 +21,12 @@
21 21
22extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, 22extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
23 u32 tbl_id); 23 u32 tbl_id);
24extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
24 25
25struct seg6_bpf_srh_state { 26struct seg6_bpf_srh_state {
26 bool valid; 27 struct ipv6_sr_hdr *srh;
27 u16 hdrlen; 28 u16 hdrlen;
29 bool valid;
28}; 30};
29 31
30DECLARE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states); 32DECLARE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
diff --git a/include/net/smc.h b/include/net/smc.h
index 8381d163fefa..9ef49f8b1002 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -11,6 +11,8 @@
11#ifndef _SMC_H 11#ifndef _SMC_H
12#define _SMC_H 12#define _SMC_H
13 13
14#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */
15
14struct smc_hashinfo { 16struct smc_hashinfo {
15 rwlock_t lock; 17 rwlock_t lock;
16 struct hlist_head ht; 18 struct hlist_head ht;
@@ -18,4 +20,67 @@ struct smc_hashinfo {
18 20
19int smc_hash_sk(struct sock *sk); 21int smc_hash_sk(struct sock *sk);
20void smc_unhash_sk(struct sock *sk); 22void smc_unhash_sk(struct sock *sk);
23
24/* SMCD/ISM device driver interface */
25struct smcd_dmb {
26 u64 dmb_tok;
27 u64 rgid;
28 u32 dmb_len;
29 u32 sba_idx;
30 u32 vlan_valid;
31 u32 vlan_id;
32 void *cpu_addr;
33 dma_addr_t dma_addr;
34};
35
36#define ISM_EVENT_DMB 0
37#define ISM_EVENT_GID 1
38#define ISM_EVENT_SWR 2
39
40struct smcd_event {
41 u32 type;
42 u32 code;
43 u64 tok;
44 u64 time;
45 u64 info;
46};
47
48struct smcd_dev;
49
50struct smcd_ops {
51 int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
52 u32 vid);
53 int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
54 int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
55 int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
56 int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
57 int (*set_vlan_required)(struct smcd_dev *dev);
58 int (*reset_vlan_required)(struct smcd_dev *dev);
59 int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq,
60 u32 event_code, u64 info);
61 int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
62 bool sf, unsigned int offset, void *data,
63 unsigned int size);
64};
65
66struct smcd_dev {
67 const struct smcd_ops *ops;
68 struct device dev;
69 void *priv;
70 u64 local_gid;
71 struct list_head list;
72 spinlock_t lock;
73 struct smc_connection **conn;
74 struct list_head vlan;
75 struct workqueue_struct *event_wq;
76 u8 pnetid[SMC_MAX_PNETID_LEN];
77};
78
79struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
80 const struct smcd_ops *ops, int max_dmbs);
81int smcd_register_dev(struct smcd_dev *smcd);
82void smcd_unregister_dev(struct smcd_dev *smcd);
83void smcd_free_dev(struct smcd_dev *smcd);
84void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
85void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit);
21#endif /* _SMC_H */ 86#endif /* _SMC_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index b3b75419eafe..433f45fc2d68 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -139,6 +139,7 @@ typedef __u64 __bitwise __addrpair;
139 * @skc_node: main hash linkage for various protocol lookup tables 139 * @skc_node: main hash linkage for various protocol lookup tables
140 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol 140 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
141 * @skc_tx_queue_mapping: tx queue number for this connection 141 * @skc_tx_queue_mapping: tx queue number for this connection
142 * @skc_rx_queue_mapping: rx queue number for this connection
142 * @skc_flags: place holder for sk_flags 143 * @skc_flags: place holder for sk_flags
143 * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, 144 * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
144 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings 145 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -214,7 +215,10 @@ struct sock_common {
214 struct hlist_node skc_node; 215 struct hlist_node skc_node;
215 struct hlist_nulls_node skc_nulls_node; 216 struct hlist_nulls_node skc_nulls_node;
216 }; 217 };
217 int skc_tx_queue_mapping; 218 unsigned short skc_tx_queue_mapping;
219#ifdef CONFIG_XPS
220 unsigned short skc_rx_queue_mapping;
221#endif
218 union { 222 union {
219 int skc_incoming_cpu; 223 int skc_incoming_cpu;
220 u32 skc_rcv_wnd; 224 u32 skc_rcv_wnd;
@@ -315,6 +319,9 @@ struct sock_common {
315 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 319 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
316 * @sk_reuseport_cb: reuseport group container 320 * @sk_reuseport_cb: reuseport group container
317 * @sk_rcu: used during RCU grace period 321 * @sk_rcu: used during RCU grace period
322 * @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
323 * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
324 * @sk_txtime_unused: unused txtime flags
318 */ 325 */
319struct sock { 326struct sock {
320 /* 327 /*
@@ -326,6 +333,9 @@ struct sock {
326#define sk_nulls_node __sk_common.skc_nulls_node 333#define sk_nulls_node __sk_common.skc_nulls_node
327#define sk_refcnt __sk_common.skc_refcnt 334#define sk_refcnt __sk_common.skc_refcnt
328#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping 335#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
336#ifdef CONFIG_XPS
337#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
338#endif
329 339
330#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin 340#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
331#define sk_dontcopy_end __sk_common.skc_dontcopy_end 341#define sk_dontcopy_end __sk_common.skc_dontcopy_end
@@ -468,6 +478,12 @@ struct sock {
468 u8 sk_shutdown; 478 u8 sk_shutdown;
469 u32 sk_tskey; 479 u32 sk_tskey;
470 atomic_t sk_zckey; 480 atomic_t sk_zckey;
481
482 u8 sk_clockid;
483 u8 sk_txtime_deadline_mode : 1,
484 sk_txtime_report_errors : 1,
485 sk_txtime_unused : 6;
486
471 struct socket *sk_socket; 487 struct socket *sk_socket;
472 void *sk_user_data; 488 void *sk_user_data;
473#ifdef CONFIG_SECURITY 489#ifdef CONFIG_SECURITY
@@ -783,6 +799,7 @@ enum sock_flags {
783 SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */ 799 SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
784 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ 800 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
785 SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ 801 SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
802 SOCK_TXTIME,
786}; 803};
787 804
788#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 805#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -1578,10 +1595,17 @@ void sock_kzfree_s(struct sock *sk, void *mem, int size);
1578void sk_send_sigurg(struct sock *sk); 1595void sk_send_sigurg(struct sock *sk);
1579 1596
1580struct sockcm_cookie { 1597struct sockcm_cookie {
1598 u64 transmit_time;
1581 u32 mark; 1599 u32 mark;
1582 u16 tsflags; 1600 u16 tsflags;
1583}; 1601};
1584 1602
1603static inline void sockcm_init(struct sockcm_cookie *sockc,
1604 const struct sock *sk)
1605{
1606 *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
1607}
1608
1585int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, 1609int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1586 struct sockcm_cookie *sockc); 1610 struct sockcm_cookie *sockc);
1587int sock_cmsg_send(struct sock *sk, struct msghdr *msg, 1611int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
@@ -1681,19 +1705,58 @@ static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1681 1705
1682static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1706static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1683{ 1707{
1708 /* sk_tx_queue_mapping accept only upto a 16-bit value */
1709 if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
1710 return;
1684 sk->sk_tx_queue_mapping = tx_queue; 1711 sk->sk_tx_queue_mapping = tx_queue;
1685} 1712}
1686 1713
1714#define NO_QUEUE_MAPPING USHRT_MAX
1715
1687static inline void sk_tx_queue_clear(struct sock *sk) 1716static inline void sk_tx_queue_clear(struct sock *sk)
1688{ 1717{
1689 sk->sk_tx_queue_mapping = -1; 1718 sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
1690} 1719}
1691 1720
1692static inline int sk_tx_queue_get(const struct sock *sk) 1721static inline int sk_tx_queue_get(const struct sock *sk)
1693{ 1722{
1694 return sk ? sk->sk_tx_queue_mapping : -1; 1723 if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
1724 return sk->sk_tx_queue_mapping;
1725
1726 return -1;
1695} 1727}
1696 1728
1729static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
1730{
1731#ifdef CONFIG_XPS
1732 if (skb_rx_queue_recorded(skb)) {
1733 u16 rx_queue = skb_get_rx_queue(skb);
1734
1735 if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
1736 return;
1737
1738 sk->sk_rx_queue_mapping = rx_queue;
1739 }
1740#endif
1741}
1742
1743static inline void sk_rx_queue_clear(struct sock *sk)
1744{
1745#ifdef CONFIG_XPS
1746 sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
1747#endif
1748}
1749
1750#ifdef CONFIG_XPS
1751static inline int sk_rx_queue_get(const struct sock *sk)
1752{
1753 if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
1754 return sk->sk_rx_queue_mapping;
1755
1756 return -1;
1757}
1758#endif
1759
1697static inline void sk_set_socket(struct sock *sk, struct socket *sock) 1760static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1698{ 1761{
1699 sk_tx_queue_clear(sk); 1762 sk_tx_queue_clear(sk);
@@ -1725,7 +1788,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
1725{ 1788{
1726 WARN_ON(parent->sk); 1789 WARN_ON(parent->sk);
1727 write_lock_bh(&sk->sk_callback_lock); 1790 write_lock_bh(&sk->sk_callback_lock);
1728 sk->sk_wq = parent->wq; 1791 rcu_assign_pointer(sk->sk_wq, parent->wq);
1729 parent->sk = sk; 1792 parent->sk = sk;
1730 sk_set_socket(sk, parent); 1793 sk_set_socket(sk, parent);
1731 sk->sk_uid = SOCK_INODE(parent)->i_uid; 1794 sk->sk_uid = SOCK_INODE(parent)->i_uid;
@@ -1994,16 +2057,16 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
1994/** 2057/**
1995 * sock_poll_wait - place memory barrier behind the poll_wait call. 2058 * sock_poll_wait - place memory barrier behind the poll_wait call.
1996 * @filp: file 2059 * @filp: file
1997 * @wait_address: socket wait queue
1998 * @p: poll_table 2060 * @p: poll_table
1999 * 2061 *
2000 * See the comments in the wq_has_sleeper function. 2062 * See the comments in the wq_has_sleeper function.
2001 */ 2063 */
2002static inline void sock_poll_wait(struct file *filp, 2064static inline void sock_poll_wait(struct file *filp, poll_table *p)
2003 wait_queue_head_t *wait_address, poll_table *p)
2004{ 2065{
2005 if (!poll_does_not_wait(p) && wait_address) { 2066 struct socket *sock = filp->private_data;
2006 poll_wait(filp, wait_address, p); 2067
2068 if (!poll_does_not_wait(p)) {
2069 poll_wait(filp, &sock->wq->wait, p);
2007 /* We need to be sure we are in sync with the 2070 /* We need to be sure we are in sync with the
2008 * socket flags modification. 2071 * socket flags modification.
2009 * 2072 *
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 0054b3a9b923..8a5f70c7cdf2 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -5,25 +5,36 @@
5#include <linux/filter.h> 5#include <linux/filter.h>
6#include <linux/skbuff.h> 6#include <linux/skbuff.h>
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/spinlock.h>
8#include <net/sock.h> 9#include <net/sock.h>
9 10
11extern spinlock_t reuseport_lock;
12
10struct sock_reuseport { 13struct sock_reuseport {
11 struct rcu_head rcu; 14 struct rcu_head rcu;
12 15
13 u16 max_socks; /* length of socks */ 16 u16 max_socks; /* length of socks */
14 u16 num_socks; /* elements in socks */ 17 u16 num_socks; /* elements in socks */
18 /* The last synq overflow event timestamp of this
19 * reuse->socks[] group.
20 */
21 unsigned int synq_overflow_ts;
22 /* ID stays the same even after the size of socks[] grows. */
23 unsigned int reuseport_id;
24 bool bind_inany;
15 struct bpf_prog __rcu *prog; /* optional BPF sock selector */ 25 struct bpf_prog __rcu *prog; /* optional BPF sock selector */
16 struct sock *socks[0]; /* array of sock pointers */ 26 struct sock *socks[0]; /* array of sock pointers */
17}; 27};
18 28
19extern int reuseport_alloc(struct sock *sk); 29extern int reuseport_alloc(struct sock *sk, bool bind_inany);
20extern int reuseport_add_sock(struct sock *sk, struct sock *sk2); 30extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
31 bool bind_inany);
21extern void reuseport_detach_sock(struct sock *sk); 32extern void reuseport_detach_sock(struct sock *sk);
22extern struct sock *reuseport_select_sock(struct sock *sk, 33extern struct sock *reuseport_select_sock(struct sock *sk,
23 u32 hash, 34 u32 hash,
24 struct sk_buff *skb, 35 struct sk_buff *skb,
25 int hdr_len); 36 int hdr_len);
26extern struct bpf_prog *reuseport_attach_prog(struct sock *sk, 37extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
27 struct bpf_prog *prog); 38int reuseport_get_id(struct sock_reuseport *reuse);
28 39
29#endif /* _SOCK_REUSEPORT_H */ 40#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 9470fd7e4350..32d2454c0479 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -7,7 +7,6 @@
7#include <linux/tc_act/tc_csum.h> 7#include <linux/tc_act/tc_csum.h>
8 8
9struct tcf_csum_params { 9struct tcf_csum_params {
10 int action;
11 u32 update_flags; 10 u32 update_flags;
12 struct rcu_head rcu; 11 struct rcu_head rcu;
13}; 12};
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 227a6f1d02f4..fac3ad4a86de 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -17,6 +17,7 @@ struct tcf_pedit {
17 struct tc_pedit_key *tcfp_keys; 17 struct tc_pedit_key *tcfp_keys;
18 struct tcf_pedit_key_ex *tcfp_keys_ex; 18 struct tcf_pedit_key_ex *tcfp_keys_ex;
19}; 19};
20
20#define to_pedit(a) ((struct tcf_pedit *)a) 21#define to_pedit(a) ((struct tcf_pedit *)a)
21 22
22static inline bool is_tcf_pedit(const struct tc_action *a) 23static inline bool is_tcf_pedit(const struct tc_action *a)
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 19cd3d345804..911bbac838a2 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -22,14 +22,19 @@
22#include <net/act_api.h> 22#include <net/act_api.h>
23#include <linux/tc_act/tc_skbedit.h> 23#include <linux/tc_act/tc_skbedit.h>
24 24
25struct tcf_skbedit_params {
26 u32 flags;
27 u32 priority;
28 u32 mark;
29 u32 mask;
30 u16 queue_mapping;
31 u16 ptype;
32 struct rcu_head rcu;
33};
34
25struct tcf_skbedit { 35struct tcf_skbedit {
26 struct tc_action common; 36 struct tc_action common;
27 u32 flags; 37 struct tcf_skbedit_params __rcu *params;
28 u32 priority;
29 u32 mark;
30 u32 mask;
31 u16 queue_mapping;
32 u16 ptype;
33}; 38};
34#define to_skbedit(a) ((struct tcf_skbedit *)a) 39#define to_skbedit(a) ((struct tcf_skbedit *)a)
35 40
@@ -37,15 +42,27 @@ struct tcf_skbedit {
37static inline bool is_tcf_skbedit_mark(const struct tc_action *a) 42static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
38{ 43{
39#ifdef CONFIG_NET_CLS_ACT 44#ifdef CONFIG_NET_CLS_ACT
40 if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) 45 u32 flags;
41 return to_skbedit(a)->flags == SKBEDIT_F_MARK; 46
47 if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) {
48 rcu_read_lock();
49 flags = rcu_dereference(to_skbedit(a)->params)->flags;
50 rcu_read_unlock();
51 return flags == SKBEDIT_F_MARK;
52 }
42#endif 53#endif
43 return false; 54 return false;
44} 55}
45 56
46static inline u32 tcf_skbedit_mark(const struct tc_action *a) 57static inline u32 tcf_skbedit_mark(const struct tc_action *a)
47{ 58{
48 return to_skbedit(a)->mark; 59 u32 mark;
60
61 rcu_read_lock();
62 mark = rcu_dereference(to_skbedit(a)->params)->mark;
63 rcu_read_unlock();
64
65 return mark;
49} 66}
50 67
51#endif /* __NET_TC_SKBEDIT_H */ 68#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index efef0b4b1b2b..46b8c7f1c8d5 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -18,7 +18,6 @@
18struct tcf_tunnel_key_params { 18struct tcf_tunnel_key_params {
19 struct rcu_head rcu; 19 struct rcu_head rcu;
20 int tcft_action; 20 int tcft_action;
21 int action;
22 struct metadata_dst *tcft_enc_metadata; 21 struct metadata_dst *tcft_enc_metadata;
23}; 22};
24 23
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0448e7c5d2b4..d196901c9dba 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -36,6 +36,7 @@
36#include <net/inet_hashtables.h> 36#include <net/inet_hashtables.h>
37#include <net/checksum.h> 37#include <net/checksum.h>
38#include <net/request_sock.h> 38#include <net/request_sock.h>
39#include <net/sock_reuseport.h>
39#include <net/sock.h> 40#include <net/sock.h>
40#include <net/snmp.h> 41#include <net/snmp.h>
41#include <net/ip.h> 42#include <net/ip.h>
@@ -342,6 +343,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
342 struct pipe_inode_info *pipe, size_t len, 343 struct pipe_inode_info *pipe, size_t len,
343 unsigned int flags); 344 unsigned int flags);
344 345
346void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
345static inline void tcp_dec_quickack_mode(struct sock *sk, 347static inline void tcp_dec_quickack_mode(struct sock *sk,
346 const unsigned int pkts) 348 const unsigned int pkts)
347{ 349{
@@ -388,7 +390,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
388void tcp_close(struct sock *sk, long timeout); 390void tcp_close(struct sock *sk, long timeout);
389void tcp_init_sock(struct sock *sk); 391void tcp_init_sock(struct sock *sk);
390void tcp_init_transfer(struct sock *sk, int bpf_op); 392void tcp_init_transfer(struct sock *sk, int bpf_op);
391__poll_t tcp_poll_mask(struct socket *sock, __poll_t events); 393__poll_t tcp_poll(struct file *file, struct socket *sock,
394 struct poll_table_struct *wait);
392int tcp_getsockopt(struct sock *sk, int level, int optname, 395int tcp_getsockopt(struct sock *sk, int level, int optname,
393 char __user *optval, int __user *optlen); 396 char __user *optval, int __user *optlen);
394int tcp_setsockopt(struct sock *sk, int level, int optname, 397int tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -471,19 +474,45 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
471 */ 474 */
472static inline void tcp_synq_overflow(const struct sock *sk) 475static inline void tcp_synq_overflow(const struct sock *sk)
473{ 476{
474 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 477 unsigned int last_overflow;
475 unsigned long now = jiffies; 478 unsigned int now = jiffies;
476 479
477 if (time_after(now, last_overflow + HZ)) 480 if (sk->sk_reuseport) {
481 struct sock_reuseport *reuse;
482
483 reuse = rcu_dereference(sk->sk_reuseport_cb);
484 if (likely(reuse)) {
485 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
486 if (time_after32(now, last_overflow + HZ))
487 WRITE_ONCE(reuse->synq_overflow_ts, now);
488 return;
489 }
490 }
491
492 last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
493 if (time_after32(now, last_overflow + HZ))
478 tcp_sk(sk)->rx_opt.ts_recent_stamp = now; 494 tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
479} 495}
480 496
481/* syncookies: no recent synqueue overflow on this listening socket? */ 497/* syncookies: no recent synqueue overflow on this listening socket? */
482static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) 498static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
483{ 499{
484 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 500 unsigned int last_overflow;
501 unsigned int now = jiffies;
502
503 if (sk->sk_reuseport) {
504 struct sock_reuseport *reuse;
485 505
486 return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID); 506 reuse = rcu_dereference(sk->sk_reuseport_cb);
507 if (likely(reuse)) {
508 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
509 return time_after32(now, last_overflow +
510 TCP_SYNCOOKIE_VALID);
511 }
512 }
513
514 last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
515 return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
487} 516}
488 517
489static inline u32 tcp_cookie_time(void) 518static inline u32 tcp_cookie_time(void)
@@ -538,6 +567,7 @@ void tcp_send_fin(struct sock *sk);
538void tcp_send_active_reset(struct sock *sk, gfp_t priority); 567void tcp_send_active_reset(struct sock *sk, gfp_t priority);
539int tcp_send_synack(struct sock *); 568int tcp_send_synack(struct sock *);
540void tcp_push_one(struct sock *, unsigned int mss_now); 569void tcp_push_one(struct sock *, unsigned int mss_now);
570void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
541void tcp_send_ack(struct sock *sk); 571void tcp_send_ack(struct sock *sk);
542void tcp_send_delayed_ack(struct sock *sk); 572void tcp_send_delayed_ack(struct sock *sk);
543void tcp_send_loss_probe(struct sock *sk); 573void tcp_send_loss_probe(struct sock *sk);
@@ -827,6 +857,10 @@ struct tcp_skb_cb {
827 857
828#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 858#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
829 859
860static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
861{
862 TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
863}
830 864
831#if IS_ENABLED(CONFIG_IPV6) 865#if IS_ENABLED(CONFIG_IPV6)
832/* This is the variant of inet6_iif() that must be used by TCP, 866/* This is the variant of inet6_iif() that must be used by TCP,
@@ -834,6 +868,11 @@ struct tcp_skb_cb {
834 */ 868 */
835static inline int tcp_v6_iif(const struct sk_buff *skb) 869static inline int tcp_v6_iif(const struct sk_buff *skb)
836{ 870{
871 return TCP_SKB_CB(skb)->header.h6.iif;
872}
873
874static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
875{
837 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); 876 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
838 877
839 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; 878 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
@@ -907,8 +946,6 @@ enum tcp_ca_event {
907 CA_EVENT_LOSS, /* loss timeout */ 946 CA_EVENT_LOSS, /* loss timeout */
908 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ 947 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
909 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ 948 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
910 CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
911 CA_EVENT_NON_DELAYED_ACK,
912}; 949};
913 950
914/* Information about inbound ACK, passed to cong_ops->in_ack_event() */ 951/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
@@ -953,6 +990,8 @@ struct rate_sample {
953 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ 990 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
954 s32 delivered; /* number of packets delivered over interval */ 991 s32 delivered; /* number of packets delivered over interval */
955 long interval_us; /* time for tp->delivered to incr "delivered" */ 992 long interval_us; /* time for tp->delivered to incr "delivered" */
993 u32 snd_interval_us; /* snd interval for delivered packets */
994 u32 rcv_interval_us; /* rcv interval for delivered packets */
956 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 995 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
957 int losses; /* number of packets marked lost upon ACK */ 996 int losses; /* number of packets marked lost upon ACK */
958 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ 997 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
@@ -1184,6 +1223,17 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1184 return tp->is_cwnd_limited; 1223 return tp->is_cwnd_limited;
1185} 1224}
1186 1225
1226/* BBR congestion control needs pacing.
1227 * Same remark for SO_MAX_PACING_RATE.
1228 * sch_fq packet scheduler is efficiently handling pacing,
1229 * but is not always installed/used.
1230 * Return true if TCP stack should pace packets itself.
1231 */
1232static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1233{
1234 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1235}
1236
1187/* Something is really bad, we could not queue an additional packet, 1237/* Something is really bad, we could not queue an additional packet,
1188 * because qdisc is full or receiver sent a 0 window. 1238 * because qdisc is full or receiver sent a 0 window.
1189 * We do not want to add fuel to the fire, or abort too early, 1239 * We do not want to add fuel to the fire, or abort too early,
@@ -1361,7 +1411,8 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1361{ 1411{
1362 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) 1412 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1363 return true; 1413 return true;
1364 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) 1414 if (unlikely(!time_before32(ktime_get_seconds(),
1415 rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1365 return true; 1416 return true;
1366 /* 1417 /*
1367 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, 1418 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
@@ -1391,7 +1442,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1391 1442
1392 However, we can relax time bounds for RST segments to MSL. 1443 However, we can relax time bounds for RST segments to MSL.
1393 */ 1444 */
1394 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) 1445 if (rst && !time_before32(ktime_get_seconds(),
1446 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1395 return false; 1447 return false;
1396 return true; 1448 return true;
1397} 1449}
@@ -1777,7 +1829,7 @@ void tcp_v4_destroy_sock(struct sock *sk);
1777 1829
1778struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 1830struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1779 netdev_features_t features); 1831 netdev_features_t features);
1780struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); 1832struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
1781int tcp_gro_complete(struct sk_buff *skb); 1833int tcp_gro_complete(struct sk_buff *skb);
1782 1834
1783void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); 1835void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
diff --git a/include/net/tls.h b/include/net/tls.h
index 7f84ea3e217c..d5c683e8bb22 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -83,6 +83,16 @@ struct tls_device {
83 void (*unhash)(struct tls_device *device, struct sock *sk); 83 void (*unhash)(struct tls_device *device, struct sock *sk);
84}; 84};
85 85
86enum {
87 TLS_BASE,
88 TLS_SW,
89#ifdef CONFIG_TLS_DEVICE
90 TLS_HW,
91#endif
92 TLS_HW_RECORD,
93 TLS_NUM_CONFIG,
94};
95
86struct tls_sw_context_tx { 96struct tls_sw_context_tx {
87 struct crypto_aead *aead_send; 97 struct crypto_aead *aead_send;
88 struct crypto_wait async_wait; 98 struct crypto_wait async_wait;
@@ -109,14 +119,11 @@ struct tls_sw_context_rx {
109 119
110 struct strparser strp; 120 struct strparser strp;
111 void (*saved_data_ready)(struct sock *sk); 121 void (*saved_data_ready)(struct sock *sk);
112 __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events); 122 unsigned int (*sk_poll)(struct file *file, struct socket *sock,
123 struct poll_table_struct *wait);
113 struct sk_buff *recv_pkt; 124 struct sk_buff *recv_pkt;
114 u8 control; 125 u8 control;
115 bool decrypted; 126 bool decrypted;
116
117 char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE];
118 char rx_aad_plaintext[TLS_AAD_SPACE_SIZE];
119
120}; 127};
121 128
122struct tls_record_info { 129struct tls_record_info {
@@ -127,7 +134,7 @@ struct tls_record_info {
127 skb_frag_t frags[MAX_SKB_FRAGS]; 134 skb_frag_t frags[MAX_SKB_FRAGS];
128}; 135};
129 136
130struct tls_offload_context { 137struct tls_offload_context_tx {
131 struct crypto_aead *aead_send; 138 struct crypto_aead *aead_send;
132 spinlock_t lock; /* protects records list */ 139 spinlock_t lock; /* protects records list */
133 struct list_head records_list; 140 struct list_head records_list;
@@ -146,8 +153,8 @@ struct tls_offload_context {
146#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *))) 153#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
147}; 154};
148 155
149#define TLS_OFFLOAD_CONTEXT_SIZE \ 156#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
150 (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \ 157 (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
151 TLS_DRIVER_STATE_SIZE) 158 TLS_DRIVER_STATE_SIZE)
152 159
153enum { 160enum {
@@ -196,6 +203,7 @@ struct tls_context {
196 int (*push_pending_record)(struct sock *sk, int flags); 203 int (*push_pending_record)(struct sock *sk, int flags);
197 204
198 void (*sk_write_space)(struct sock *sk); 205 void (*sk_write_space)(struct sock *sk);
206 void (*sk_destruct)(struct sock *sk);
199 void (*sk_proto_close)(struct sock *sk, long timeout); 207 void (*sk_proto_close)(struct sock *sk, long timeout);
200 208
201 int (*setsockopt)(struct sock *sk, int level, 209 int (*setsockopt)(struct sock *sk, int level,
@@ -208,13 +216,27 @@ struct tls_context {
208 void (*unhash)(struct sock *sk); 216 void (*unhash)(struct sock *sk);
209}; 217};
210 218
219struct tls_offload_context_rx {
220 /* sw must be the first member of tls_offload_context_rx */
221 struct tls_sw_context_rx sw;
222 atomic64_t resync_req;
223 u8 driver_state[];
224 /* The TLS layer reserves room for driver specific state
225 * Currently the belief is that there is not enough
226 * driver specific state to justify another layer of indirection
227 */
228};
229
230#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
231 (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
232 TLS_DRIVER_STATE_SIZE)
233
211int wait_on_pending_writer(struct sock *sk, long *timeo); 234int wait_on_pending_writer(struct sock *sk, long *timeo);
212int tls_sk_query(struct sock *sk, int optname, char __user *optval, 235int tls_sk_query(struct sock *sk, int optname, char __user *optval,
213 int __user *optlen); 236 int __user *optlen);
214int tls_sk_attach(struct sock *sk, int optname, char __user *optval, 237int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
215 unsigned int optlen); 238 unsigned int optlen);
216 239
217
218int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 240int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
219int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 241int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
220int tls_sw_sendpage(struct sock *sk, struct page *page, 242int tls_sw_sendpage(struct sock *sk, struct page *page,
@@ -222,9 +244,11 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
222void tls_sw_close(struct sock *sk, long timeout); 244void tls_sw_close(struct sock *sk, long timeout);
223void tls_sw_free_resources_tx(struct sock *sk); 245void tls_sw_free_resources_tx(struct sock *sk);
224void tls_sw_free_resources_rx(struct sock *sk); 246void tls_sw_free_resources_rx(struct sock *sk);
247void tls_sw_release_resources_rx(struct sock *sk);
225int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 248int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
226 int nonblock, int flags, int *addr_len); 249 int nonblock, int flags, int *addr_len);
227__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events); 250unsigned int tls_sw_poll(struct file *file, struct socket *sock,
251 struct poll_table_struct *wait);
228ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 252ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
229 struct pipe_inode_info *pipe, 253 struct pipe_inode_info *pipe,
230 size_t len, unsigned int flags); 254 size_t len, unsigned int flags);
@@ -237,7 +261,7 @@ void tls_device_sk_destruct(struct sock *sk);
237void tls_device_init(void); 261void tls_device_init(void);
238void tls_device_cleanup(void); 262void tls_device_cleanup(void);
239 263
240struct tls_record_info *tls_get_record(struct tls_offload_context *context, 264struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
241 u32 seq, u64 *p_record_sn); 265 u32 seq, u64 *p_record_sn);
242 266
243static inline bool tls_record_is_start_marker(struct tls_record_info *rec) 267static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
@@ -287,11 +311,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
287 return tls_ctx->pending_open_record_frags; 311 return tls_ctx->pending_open_record_frags;
288} 312}
289 313
314struct sk_buff *
315tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
316 struct sk_buff *skb);
317
290static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) 318static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
291{ 319{
292 return sk_fullsock(sk) && 320#ifdef CONFIG_SOCK_VALIDATE_XMIT
293 /* matches smp_store_release in tls_set_device_offload */ 321 return sk_fullsock(sk) &
294 smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct; 322 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
323 &tls_validate_xmit_skb);
324#else
325 return false;
326#endif
295} 327}
296 328
297static inline void tls_err_abort(struct sock *sk, int err) 329static inline void tls_err_abort(struct sock *sk, int err)
@@ -378,23 +410,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
378 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx; 410 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
379} 411}
380 412
381static inline struct tls_offload_context *tls_offload_ctx( 413static inline struct tls_offload_context_tx *
382 const struct tls_context *tls_ctx) 414tls_offload_ctx_tx(const struct tls_context *tls_ctx)
415{
416 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
417}
418
419static inline struct tls_offload_context_rx *
420tls_offload_ctx_rx(const struct tls_context *tls_ctx)
383{ 421{
384 return (struct tls_offload_context *)tls_ctx->priv_ctx_tx; 422 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
385} 423}
386 424
425/* The TLS context is valid until sk_destruct is called */
426static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
427{
428 struct tls_context *tls_ctx = tls_get_ctx(sk);
429 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
430
431 atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
432}
433
434
387int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 435int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
388 unsigned char *record_type); 436 unsigned char *record_type);
389void tls_register_device(struct tls_device *device); 437void tls_register_device(struct tls_device *device);
390void tls_unregister_device(struct tls_device *device); 438void tls_unregister_device(struct tls_device *device);
439int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
440int decrypt_skb(struct sock *sk, struct sk_buff *skb,
441 struct scatterlist *sgout);
391 442
392struct sk_buff *tls_validate_xmit_skb(struct sock *sk, 443struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
393 struct net_device *dev, 444 struct net_device *dev,
394 struct sk_buff *skb); 445 struct sk_buff *skb);
395 446
396int tls_sw_fallback_init(struct sock *sk, 447int tls_sw_fallback_init(struct sock *sk,
397 struct tls_offload_context *offload_ctx, 448 struct tls_offload_context_tx *offload_ctx,
398 struct tls_crypto_info *crypto_info); 449 struct tls_crypto_info *crypto_info);
399 450
451int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
452
453void tls_device_offload_cleanup_rx(struct sock *sk);
454void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
455
400#endif /* _TLS_OFFLOAD_H */ 456#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index f6a3543e5247..a8f6020f1196 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -42,8 +42,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
42 struct sk_buff *skb); 42 struct sk_buff *skb);
43 43
44int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, 44int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
45 struct flowi6 *fl6, struct ipcm6_cookie *ipc6, 45 struct flowi6 *fl6, struct ipcm6_cookie *ipc6);
46 struct sockcm_cookie *sockc);
47 46
48void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, 47void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
49 __u16 srcp, __u16 destp, int rqueue, int bucket); 48 __u16 srcp, __u16 destp, int rqueue, int bucket);
diff --git a/include/net/udp.h b/include/net/udp.h
index b1ea8b0f5e6a..8482a990b0bb 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -170,8 +170,8 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
170typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport, 170typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
171 __be16 dport); 171 __be16 dport);
172 172
173struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, 173struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
174 struct udphdr *uh, udp_lookup_t lookup); 174 struct udphdr *uh, udp_lookup_t lookup);
175int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); 175int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
176 176
177struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, 177struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
@@ -285,7 +285,7 @@ int udp_init_sock(struct sock *sk);
285int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 285int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
286int __udp_disconnect(struct sock *sk, int flags); 286int __udp_disconnect(struct sock *sk, int flags);
287int udp_disconnect(struct sock *sk, int flags); 287int udp_disconnect(struct sock *sk, int flags);
288__poll_t udp_poll_mask(struct socket *sock, __poll_t events); 288__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
289struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, 289struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
290 netdev_features_t features, 290 netdev_features_t features,
291 bool is_ipv6); 291 bool is_ipv6);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index b95a6927c718..fe680ab6b15a 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -65,9 +65,9 @@ static inline int udp_sock_create(struct net *net,
65 65
66typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); 66typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
67typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); 67typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
68typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk, 68typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
69 struct sk_buff **head, 69 struct list_head *head,
70 struct sk_buff *skb); 70 struct sk_buff *skb);
71typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb, 71typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
72 int nhoff); 72 int nhoff);
73 73
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 2deea7166a34..76b95256c266 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -84,6 +84,13 @@ struct xdp_frame {
84 struct net_device *dev_rx; /* used by cpumap */ 84 struct net_device *dev_rx; /* used by cpumap */
85}; 85};
86 86
87/* Clear kernel pointers in xdp_frame */
88static inline void xdp_scrub_frame(struct xdp_frame *frame)
89{
90 frame->data = NULL;
91 frame->dev_rx = NULL;
92}
93
87/* Convert xdp_buff to xdp_frame */ 94/* Convert xdp_buff to xdp_frame */
88static inline 95static inline
89struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) 96struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
@@ -144,4 +151,17 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
144 return unlikely(xdp->data_meta > xdp->data); 151 return unlikely(xdp->data_meta > xdp->data);
145} 152}
146 153
154struct xdp_attachment_info {
155 struct bpf_prog *prog;
156 u32 flags;
157};
158
159struct netdev_bpf;
160int xdp_attachment_query(struct xdp_attachment_info *info,
161 struct netdev_bpf *bpf);
162bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
163 struct netdev_bpf *bpf);
164void xdp_attachment_setup(struct xdp_attachment_info *info,
165 struct netdev_bpf *bpf);
166
147#endif /* __LINUX_NET_XDP_H__ */ 167#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 9fe472f2ac95..7161856bcf9c 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -60,6 +60,10 @@ struct xdp_sock {
60 bool zc; 60 bool zc;
61 /* Protects multiple processes in the control path */ 61 /* Protects multiple processes in the control path */
62 struct mutex mutex; 62 struct mutex mutex;
63 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
64 * in the SKB destructor callback.
65 */
66 spinlock_t tx_completion_lock;
63 u64 rx_dropped; 67 u64 rx_dropped;
64}; 68};
65 69
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 557122846e0e..0eb390c205af 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -23,6 +23,7 @@
23#include <net/ipv6.h> 23#include <net/ipv6.h>
24#include <net/ip6_fib.h> 24#include <net/ip6_fib.h>
25#include <net/flow.h> 25#include <net/flow.h>
26#include <net/gro_cells.h>
26 27
27#include <linux/interrupt.h> 28#include <linux/interrupt.h>
28 29
@@ -147,6 +148,7 @@ struct xfrm_state {
147 struct xfrm_id id; 148 struct xfrm_id id;
148 struct xfrm_selector sel; 149 struct xfrm_selector sel;
149 struct xfrm_mark mark; 150 struct xfrm_mark mark;
151 u32 if_id;
150 u32 tfcpad; 152 u32 tfcpad;
151 153
152 u32 genid; 154 u32 genid;
@@ -166,7 +168,7 @@ struct xfrm_state {
166 int header_len; 168 int header_len;
167 int trailer_len; 169 int trailer_len;
168 u32 extra_flags; 170 u32 extra_flags;
169 u32 output_mark; 171 struct xfrm_mark smark;
170 } props; 172 } props;
171 173
172 struct xfrm_lifetime_cfg lft; 174 struct xfrm_lifetime_cfg lft;
@@ -225,7 +227,7 @@ struct xfrm_state {
225 long saved_tmo; 227 long saved_tmo;
226 228
227 /* Last used time */ 229 /* Last used time */
228 unsigned long lastused; 230 time64_t lastused;
229 231
230 struct page_frag xfrag; 232 struct page_frag xfrag;
231 233
@@ -292,6 +294,13 @@ struct xfrm_replay {
292 int (*overflow)(struct xfrm_state *x, struct sk_buff *skb); 294 int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
293}; 295};
294 296
297struct xfrm_if_cb {
298 struct xfrm_if *(*decode_session)(struct sk_buff *skb);
299};
300
301void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
302void xfrm_if_unregister_cb(void);
303
295struct net_device; 304struct net_device;
296struct xfrm_type; 305struct xfrm_type;
297struct xfrm_dst; 306struct xfrm_dst;
@@ -323,7 +332,6 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam
323void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo); 332void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
324void km_policy_notify(struct xfrm_policy *xp, int dir, 333void km_policy_notify(struct xfrm_policy *xp, int dir,
325 const struct km_event *c); 334 const struct km_event *c);
326void xfrm_policy_cache_flush(void);
327void km_state_notify(struct xfrm_state *x, const struct km_event *c); 335void km_state_notify(struct xfrm_state *x, const struct km_event *c);
328 336
329struct xfrm_tmpl; 337struct xfrm_tmpl;
@@ -574,6 +582,7 @@ struct xfrm_policy {
574 atomic_t genid; 582 atomic_t genid;
575 u32 priority; 583 u32 priority;
576 u32 index; 584 u32 index;
585 u32 if_id;
577 struct xfrm_mark mark; 586 struct xfrm_mark mark;
578 struct xfrm_selector selector; 587 struct xfrm_selector selector;
579 struct xfrm_lifetime_cfg lft; 588 struct xfrm_lifetime_cfg lft;
@@ -735,7 +744,7 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op)
735{ 744{
736 struct audit_buffer *audit_buf = NULL; 745 struct audit_buffer *audit_buf = NULL;
737 746
738 if (audit_enabled == 0) 747 if (audit_enabled == AUDIT_OFF)
739 return NULL; 748 return NULL;
740 audit_buf = audit_log_start(audit_context(), GFP_ATOMIC, 749 audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
741 AUDIT_MAC_IPSEC_EVENT); 750 AUDIT_MAC_IPSEC_EVENT);
@@ -1037,6 +1046,22 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
1037 1046
1038void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); 1047void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
1039 1048
1049struct xfrm_if_parms {
1050 char name[IFNAMSIZ]; /* name of XFRM device */
1051 int link; /* ifindex of underlying L2 interface */
1052 u32 if_id; /* interface identifyer */
1053};
1054
1055struct xfrm_if {
1056 struct xfrm_if __rcu *next; /* next interface in list */
1057 struct net_device *dev; /* virtual device associated with interface */
1058 struct net_device *phydev; /* physical device */
1059 struct net *net; /* netns for packet i/o */
1060 struct xfrm_if_parms p; /* interface parms */
1061
1062 struct gro_cells gro_cells;
1063};
1064
1040struct xfrm_offload { 1065struct xfrm_offload {
1041 /* Output sequence number for replay protection on offloading. */ 1066 /* Output sequence number for replay protection on offloading. */
1042 struct { 1067 struct {
@@ -1532,8 +1557,8 @@ struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1532 const struct flowi *fl, 1557 const struct flowi *fl,
1533 struct xfrm_tmpl *tmpl, 1558 struct xfrm_tmpl *tmpl,
1534 struct xfrm_policy *pol, int *err, 1559 struct xfrm_policy *pol, int *err,
1535 unsigned short family); 1560 unsigned short family, u32 if_id);
1536struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, 1561struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1537 xfrm_address_t *daddr, 1562 xfrm_address_t *daddr,
1538 xfrm_address_t *saddr, 1563 xfrm_address_t *saddr,
1539 unsigned short family, 1564 unsigned short family,
@@ -1690,20 +1715,20 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1690 void *); 1715 void *);
1691void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); 1716void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1692int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); 1717int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1693struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, 1718struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
1694 u8 type, int dir, 1719 u8 type, int dir,
1695 struct xfrm_selector *sel, 1720 struct xfrm_selector *sel,
1696 struct xfrm_sec_ctx *ctx, int delete, 1721 struct xfrm_sec_ctx *ctx, int delete,
1697 int *err); 1722 int *err);
1698struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, 1723struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
1699 u32 id, int delete, int *err); 1724 int dir, u32 id, int delete, int *err);
1700int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); 1725int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1701void xfrm_policy_hash_rebuild(struct net *net); 1726void xfrm_policy_hash_rebuild(struct net *net);
1702u32 xfrm_get_acqseq(void); 1727u32 xfrm_get_acqseq(void);
1703int verify_spi_info(u8 proto, u32 min, u32 max); 1728int verify_spi_info(u8 proto, u32 min, u32 max);
1704int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); 1729int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1705struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, 1730struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1706 u8 mode, u32 reqid, u8 proto, 1731 u8 mode, u32 reqid, u32 if_id, u8 proto,
1707 const xfrm_address_t *daddr, 1732 const xfrm_address_t *daddr,
1708 const xfrm_address_t *saddr, int create, 1733 const xfrm_address_t *saddr, int create,
1709 unsigned short family); 1734 unsigned short family);
@@ -2012,6 +2037,22 @@ static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
2012 return ret; 2037 return ret;
2013} 2038}
2014 2039
2040static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
2041{
2042 struct xfrm_mark *m = &x->props.smark;
2043
2044 return (m->v & m->m) | (mark & ~m->m);
2045}
2046
2047static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
2048{
2049 int ret = 0;
2050
2051 if (if_id)
2052 ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
2053 return ret;
2054}
2055
2015static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x, 2056static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
2016 unsigned int family) 2057 unsigned int family)
2017{ 2058{
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 4c6241bc2039..6c003995347a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -3391,11 +3391,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
3391 * 3391 *
3392 * Users can examine the cq structure to determine the actual CQ size. 3392 * Users can examine the cq structure to determine the actual CQ size.
3393 */ 3393 */
3394struct ib_cq *ib_create_cq(struct ib_device *device, 3394struct ib_cq *__ib_create_cq(struct ib_device *device,
3395 ib_comp_handler comp_handler, 3395 ib_comp_handler comp_handler,
3396 void (*event_handler)(struct ib_event *, void *), 3396 void (*event_handler)(struct ib_event *, void *),
3397 void *cq_context, 3397 void *cq_context,
3398 const struct ib_cq_init_attr *cq_attr); 3398 const struct ib_cq_init_attr *cq_attr,
3399 const char *caller);
3400#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3401 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3399 3402
3400/** 3403/**
3401 * ib_resize_cq - Modifies the capacity of the CQ. 3404 * ib_resize_cq - Modifies the capacity of the CQ.
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 225ab7783dfd..3de3b10da19a 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -161,7 +161,7 @@ struct sata_device {
161 u8 port_no; /* port number, if this is a PM (Port) */ 161 u8 port_no; /* port number, if this is a PM (Port) */
162 162
163 struct ata_port *ap; 163 struct ata_port *ap;
164 struct ata_host ata_host; 164 struct ata_host *ata_host;
165 struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */ 165 struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
166 u8 fis[ATA_RESP_FIS_SIZE]; 166 u8 fis[ATA_RESP_FIS_SIZE];
167}; 167};
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index aaf1e971c6a3..c891ada3c5c2 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <linux/blkdev.h> 6#include <linux/blkdev.h>
7#include <linux/t10-pi.h>
7#include <linux/list.h> 8#include <linux/list.h>
8#include <linux/types.h> 9#include <linux/types.h>
9#include <linux/timer.h> 10#include <linux/timer.h>
@@ -14,8 +15,6 @@
14struct Scsi_Host; 15struct Scsi_Host;
15struct scsi_driver; 16struct scsi_driver;
16 17
17#include <scsi/scsi_device.h>
18
19/* 18/*
20 * MAX_COMMAND_SIZE is: 19 * MAX_COMMAND_SIZE is:
21 * The longest fixed-length SCSI CDB as per the SCSI standard. 20 * The longest fixed-length SCSI CDB as per the SCSI standard.
@@ -120,11 +119,11 @@ struct scsi_cmnd {
120 struct request *request; /* The command we are 119 struct request *request; /* The command we are
121 working on */ 120 working on */
122 121
123#define SCSI_SENSE_BUFFERSIZE 96
124 unsigned char *sense_buffer; 122 unsigned char *sense_buffer;
125 /* obtained by REQUEST SENSE when 123 /* obtained by REQUEST SENSE when
126 * CHECK CONDITION is received on original 124 * CHECK CONDITION is received on original
127 * command (auto-sense) */ 125 * command (auto-sense). Length must be
126 * SCSI_SENSE_BUFFERSIZE bytes. */
128 127
129 /* Low-level done function - can be used by low-level driver to point 128 /* Low-level done function - can be used by low-level driver to point
130 * to completion function. Not used by mid/upper level code. */ 129 * to completion function. Not used by mid/upper level code. */
@@ -313,12 +312,6 @@ static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
313 return scmd->device->sector_size; 312 return scmd->device->sector_size;
314} 313}
315 314
316static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
317{
318 return blk_rq_pos(scmd->request) >>
319 (ilog2(scsi_prot_interval(scmd)) - 9) & 0xffffffff;
320}
321
322static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd) 315static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
323{ 316{
324 return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0; 317 return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 4c36af6edd79..202f4d6a4342 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -17,6 +17,8 @@ struct scsi_sense_hdr;
17 17
18typedef __u64 __bitwise blist_flags_t; 18typedef __u64 __bitwise blist_flags_t;
19 19
20#define SCSI_SENSE_BUFFERSIZE 96
21
20struct scsi_mode_data { 22struct scsi_mode_data {
21 __u32 length; 23 __u32 length;
22 __u16 block_descriptor_length; 24 __u16 block_descriptor_length;
@@ -426,11 +428,21 @@ extern const char *scsi_device_state_name(enum scsi_device_state);
426extern int scsi_is_sdev_device(const struct device *); 428extern int scsi_is_sdev_device(const struct device *);
427extern int scsi_is_target_device(const struct device *); 429extern int scsi_is_target_device(const struct device *);
428extern void scsi_sanitize_inquiry_string(unsigned char *s, int len); 430extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
429extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 431extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
430 int data_direction, void *buffer, unsigned bufflen, 432 int data_direction, void *buffer, unsigned bufflen,
431 unsigned char *sense, struct scsi_sense_hdr *sshdr, 433 unsigned char *sense, struct scsi_sense_hdr *sshdr,
432 int timeout, int retries, u64 flags, 434 int timeout, int retries, u64 flags,
433 req_flags_t rq_flags, int *resid); 435 req_flags_t rq_flags, int *resid);
436/* Make sure any sense buffer is the correct size. */
437#define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, \
438 sshdr, timeout, retries, flags, rq_flags, resid) \
439({ \
440 BUILD_BUG_ON((sense) != NULL && \
441 sizeof(sense) != SCSI_SENSE_BUFFERSIZE); \
442 __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, \
443 sense, sshdr, timeout, retries, flags, rq_flags, \
444 resid); \
445})
434static inline int scsi_execute_req(struct scsi_device *sdev, 446static inline int scsi_execute_req(struct scsi_device *sdev,
435 const unsigned char *cmd, int data_direction, void *buffer, 447 const unsigned char *cmd, int data_direction, void *buffer,
436 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, 448 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 53b485fe9b67..5ea06d310a25 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -758,6 +758,7 @@ extern void scsi_scan_host(struct Scsi_Host *);
758extern void scsi_rescan_device(struct device *); 758extern void scsi_rescan_device(struct device *);
759extern void scsi_remove_host(struct Scsi_Host *); 759extern void scsi_remove_host(struct Scsi_Host *);
760extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); 760extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
761extern int scsi_host_busy(struct Scsi_Host *shost);
761extern void scsi_host_put(struct Scsi_Host *t); 762extern void scsi_host_put(struct Scsi_Host *t);
762extern struct Scsi_Host *scsi_host_lookup(unsigned short); 763extern struct Scsi_Host *scsi_host_lookup(unsigned short);
763extern const char *scsi_host_state_name(enum scsi_host_state); 764extern const char *scsi_host_state_name(enum scsi_host_state);
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
new file mode 100644
index 000000000000..619e07c75da9
--- /dev/null
+++ b/include/soc/qcom/rpmh.h
@@ -0,0 +1,51 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6#ifndef __SOC_QCOM_RPMH_H__
7#define __SOC_QCOM_RPMH_H__
8
9#include <soc/qcom/tcs.h>
10#include <linux/platform_device.h>
11
12
13#if IS_ENABLED(CONFIG_QCOM_RPMH)
14int rpmh_write(const struct device *dev, enum rpmh_state state,
15 const struct tcs_cmd *cmd, u32 n);
16
17int rpmh_write_async(const struct device *dev, enum rpmh_state state,
18 const struct tcs_cmd *cmd, u32 n);
19
20int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
21 const struct tcs_cmd *cmd, u32 *n);
22
23int rpmh_flush(const struct device *dev);
24
25int rpmh_invalidate(const struct device *dev);
26
27#else
28
29static inline int rpmh_write(const struct device *dev, enum rpmh_state state,
30 const struct tcs_cmd *cmd, u32 n)
31{ return -ENODEV; }
32
33static inline int rpmh_write_async(const struct device *dev,
34 enum rpmh_state state,
35 const struct tcs_cmd *cmd, u32 n)
36{ return -ENODEV; }
37
38static inline int rpmh_write_batch(const struct device *dev,
39 enum rpmh_state state,
40 const struct tcs_cmd *cmd, u32 *n)
41{ return -ENODEV; }
42
43static inline int rpmh_flush(const struct device *dev)
44{ return -ENODEV; }
45
46static inline int rpmh_invalidate(const struct device *dev)
47{ return -ENODEV; }
48
49#endif /* CONFIG_QCOM_RPMH */
50
51#endif /* __SOC_QCOM_RPMH_H__ */
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
new file mode 100644
index 000000000000..262876a59e86
--- /dev/null
+++ b/include/soc/qcom/tcs.h
@@ -0,0 +1,56 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6#ifndef __SOC_QCOM_TCS_H__
7#define __SOC_QCOM_TCS_H__
8
9#define MAX_RPMH_PAYLOAD 16
10
11/**
12 * rpmh_state: state for the request
13 *
14 * RPMH_SLEEP_STATE: State of the resource when the processor subsystem
15 * is powered down. There is no client using the
16 * resource actively.
17 * RPMH_WAKE_ONLY_STATE: Resume resource state to the value previously
18 * requested before the processor was powered down.
19 * RPMH_ACTIVE_ONLY_STATE: Active or AMC mode requests. Resource state
20 * is aggregated immediately.
21 */
22enum rpmh_state {
23 RPMH_SLEEP_STATE,
24 RPMH_WAKE_ONLY_STATE,
25 RPMH_ACTIVE_ONLY_STATE,
26};
27
28/**
29 * struct tcs_cmd: an individual request to RPMH.
30 *
31 * @addr: the address of the resource slv_id:18:16 | offset:0:15
32 * @data: the resource state request
33 * @wait: wait for this request to be complete before sending the next
34 */
35struct tcs_cmd {
36 u32 addr;
37 u32 data;
38 u32 wait;
39};
40
41/**
42 * struct tcs_request: A set of tcs_cmds sent together in a TCS
43 *
44 * @state: state for the request.
45 * @wait_for_compl: wait until we get a response from the h/w accelerator
46 * @num_cmds: the number of @cmds in this request
47 * @cmds: an array of tcs_cmds
48 */
49struct tcs_request {
50 enum rpmh_state state;
51 u32 wait_for_compl;
52 u32 num_cmds;
53 struct tcs_cmd *cmds;
54};
55
56#endif /* __SOC_QCOM_TCS_H__ */
diff --git a/include/sound/ac97/codec.h b/include/sound/ac97/codec.h
index ec04be9ab119..9792d25fa369 100644
--- a/include/sound/ac97/codec.h
+++ b/include/sound/ac97/codec.h
@@ -1,10 +1,8 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
3 * 2 *
4 * This program is free software; you can redistribute it and/or modify 3 * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */ 4 */
5
8#ifndef __SOUND_AC97_CODEC2_H 6#ifndef __SOUND_AC97_CODEC2_H
9#define __SOUND_AC97_CODEC2_H 7#define __SOUND_AC97_CODEC2_H
10 8
diff --git a/include/sound/ac97/compat.h b/include/sound/ac97/compat.h
index 1351cba40048..57e19afa31ab 100644
--- a/include/sound/ac97/compat.h
+++ b/include/sound/ac97/compat.h
@@ -1,14 +1,11 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
3 * 2 *
4 * This program is free software; you can redistribute it and/or modify 3 * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 * 4 *
8 * This file is for backward compatibility with snd_ac97 structure and its 5 * This file is for backward compatibility with snd_ac97 structure and its
9 * multiple usages, such as the snd_ac97_bus and snd_ac97_build_ops. 6 * multiple usages, such as the snd_ac97_bus and snd_ac97_build_ops.
10 *
11 */ 7 */
8
12#ifndef AC97_COMPAT_H 9#ifndef AC97_COMPAT_H
13#define AC97_COMPAT_H 10#define AC97_COMPAT_H
14 11
diff --git a/include/sound/ac97/controller.h b/include/sound/ac97/controller.h
index b36ecdd64f14..06b5afb7fa6b 100644
--- a/include/sound/ac97/controller.h
+++ b/include/sound/ac97/controller.h
@@ -1,10 +1,8 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
3 * 2 *
4 * This program is free software; you can redistribute it and/or modify 3 * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */ 4 */
5
8#ifndef AC97_CONTROLLER_H 6#ifndef AC97_CONTROLLER_H
9#define AC97_CONTROLLER_H 7#define AC97_CONTROLLER_H
10 8
diff --git a/include/sound/ac97/regs.h b/include/sound/ac97/regs.h
index 9a4fa0c3264a..843f73f3705a 100644
--- a/include/sound/ac97/regs.h
+++ b/include/sound/ac97/regs.h
@@ -1,27 +1,11 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0+
2 *
2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Universal interface for Audio Codec '97 4 * Universal interface for Audio Codec '97
4 * 5 *
5 * For more details look to AC '97 component specification revision 2.1 6 * For more details look to AC '97 component specification revision 2.1
6 * by Intel Corporation (http://developer.intel.com). 7 * by Intel Corporation (http://developer.intel.com).
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */ 8 */
24
25/* 9/*
26 * AC'97 codec registers 10 * AC'97 codec registers
27 */ 11 */
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 89d311a503d3..cc383991c0fe 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -1,30 +1,15 @@
1#ifndef __SOUND_AC97_CODEC_H 1/* SPDX-License-Identifier: GPL-2.0+
2#define __SOUND_AC97_CODEC_H 2 *
3
4/*
5 * Copyright (c) by Jaroslav Kysela <perex@perex.cz> 3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
6 * Universal interface for Audio Codec '97 4 * Universal interface for Audio Codec '97
7 * 5 *
8 * For more details look to AC '97 component specification revision 2.1 6 * For more details look to AC '97 component specification revision 2.1
9 * by Intel Corporation (http://developer.intel.com). 7 * by Intel Corporation (http://developer.intel.com).
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */ 8 */
27 9
10#ifndef __SOUND_AC97_CODEC_H
11#define __SOUND_AC97_CODEC_H
12
28#include <linux/bitops.h> 13#include <linux/bitops.h>
29#include <linux/device.h> 14#include <linux/device.h>
30#include <linux/workqueue.h> 15#include <linux/workqueue.h>
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 9924bc9cbc7c..ea8c93bbb0e0 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -1,27 +1,12 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 *
2 * compress_driver.h - compress offload driver definations 3 * compress_driver.h - compress offload driver definations
3 * 4 *
4 * Copyright (C) 2011 Intel Corporation 5 * Copyright (C) 2011 Intel Corporation
5 * Authors: Vinod Koul <vinod.koul@linux.intel.com> 6 * Authors: Vinod Koul <vinod.koul@linux.intel.com>
6 * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com> 7 * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 */ 8 */
9
25#ifndef __COMPRESS_DRIVER_H 10#ifndef __COMPRESS_DRIVER_H
26#define __COMPRESS_DRIVER_H 11#define __COMPRESS_DRIVER_H
27 12
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index e3481eebdd98..2c4cfaa135a6 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -1,17 +1,9 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0+
2 *
2 * Copyright (C) 2012, Analog Devices Inc. 3 * Copyright (C) 2012, Analog Devices Inc.
3 * Author: Lars-Peter Clausen <lars@metafoo.de> 4 * Author: Lars-Peter Clausen <lars@metafoo.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * You should have received a copy of the GNU General Public License along
11 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * 675 Mass Ave, Cambridge, MA 02139, USA.
13 *
14 */ 5 */
6
15#ifndef __SOUND_DMAENGINE_PCM_H__ 7#ifndef __SOUND_DMAENGINE_PCM_H__
16#define __SOUND_DMAENGINE_PCM_H__ 8#define __SOUND_DMAENGINE_PCM_H__
17 9
diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h
new file mode 100644
index 000000000000..78626cde7081
--- /dev/null
+++ b/include/sound/hda_component.h
@@ -0,0 +1,61 @@
1// SPDX-License-Identifier: GPL-2.0
2// HD-Audio helpers to sync with DRM driver
3
4#ifndef __SOUND_HDA_COMPONENT_H
5#define __SOUND_HDA_COMPONENT_H
6
7#include <drm/drm_audio_component.h>
8
9#ifdef CONFIG_SND_HDA_COMPONENT
10int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
11int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
12int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
13 int dev_id, int rate);
14int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
15 bool *audio_enabled, char *buffer, int max_bytes);
16int snd_hdac_acomp_init(struct hdac_bus *bus,
17 const struct drm_audio_component_audio_ops *aops,
18 int (*match_master)(struct device *, void *),
19 size_t extra_size);
20int snd_hdac_acomp_exit(struct hdac_bus *bus);
21int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
22 const struct drm_audio_component_audio_ops *ops);
23#else
24static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
25{
26 return 0;
27}
28static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
29{
30 return 0;
31}
32static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
33 hda_nid_t nid, int dev_id, int rate)
34{
35 return 0;
36}
37static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
38 int dev_id, bool *audio_enabled,
39 char *buffer, int max_bytes)
40{
41 return -ENODEV;
42}
43static inline int snd_hdac_acomp_init(struct hdac_bus *bus,
44 const struct drm_audio_component_audio_ops *aops,
45 int (*match_master)(struct device *, void *),
46 size_t extra_size)
47{
48 return -ENODEV;
49}
50static inline int snd_hdac_acomp_exit(struct hdac_bus *bus)
51{
52 return 0;
53}
54static inline int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
55 const struct drm_audio_component_audio_ops *ops)
56{
57 return -ENODEV;
58}
59#endif
60
61#endif /* __SOUND_HDA_COMPONENT_H */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index a94f5b6f92ac..6b79614a893b 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -5,54 +5,23 @@
5#ifndef __SOUND_HDA_I915_H 5#ifndef __SOUND_HDA_I915_H
6#define __SOUND_HDA_I915_H 6#define __SOUND_HDA_I915_H
7 7
8#include <drm/i915_component.h> 8#include "hda_component.h"
9 9
10#ifdef CONFIG_SND_HDA_I915 10#ifdef CONFIG_SND_HDA_I915
11int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
12int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
13void snd_hdac_i915_set_bclk(struct hdac_bus *bus); 11void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
14int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
15 int dev_id, int rate);
16int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
17 bool *audio_enabled, char *buffer, int max_bytes);
18int snd_hdac_i915_init(struct hdac_bus *bus); 12int snd_hdac_i915_init(struct hdac_bus *bus);
19int snd_hdac_i915_exit(struct hdac_bus *bus);
20int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *);
21#else 13#else
22static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
23{
24 return 0;
25}
26static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
27{
28 return 0;
29}
30static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus) 14static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
31{ 15{
32} 16}
33static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
34 hda_nid_t nid, int dev_id, int rate)
35{
36 return 0;
37}
38static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
39 int dev_id, bool *audio_enabled,
40 char *buffer, int max_bytes)
41{
42 return -ENODEV;
43}
44static inline int snd_hdac_i915_init(struct hdac_bus *bus) 17static inline int snd_hdac_i915_init(struct hdac_bus *bus)
45{ 18{
46 return -ENODEV; 19 return -ENODEV;
47} 20}
21#endif
48static inline int snd_hdac_i915_exit(struct hdac_bus *bus) 22static inline int snd_hdac_i915_exit(struct hdac_bus *bus)
49{ 23{
50 return 0; 24 return snd_hdac_acomp_exit(bus);
51} 25}
52static inline int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *ops)
53{
54 return -ENODEV;
55}
56#endif
57 26
58#endif /* __SOUND_HDA_I915_H */ 27#endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index c052afc27547..6f1e1f3b3063 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -8,8 +8,10 @@
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/pm_runtime.h>
11#include <linux/timecounter.h> 12#include <linux/timecounter.h>
12#include <sound/core.h> 13#include <sound/core.h>
14#include <sound/pcm.h>
13#include <sound/memalloc.h> 15#include <sound/memalloc.h>
14#include <sound/hda_verbs.h> 16#include <sound/hda_verbs.h>
15#include <drm/i915_component.h> 17#include <drm/i915_component.h>
@@ -132,7 +134,7 @@ int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
132 hda_nid_t *start_id); 134 hda_nid_t *start_id);
133unsigned int snd_hdac_calc_stream_format(unsigned int rate, 135unsigned int snd_hdac_calc_stream_format(unsigned int rate,
134 unsigned int channels, 136 unsigned int channels,
135 unsigned int format, 137 snd_pcm_format_t format,
136 unsigned int maxbps, 138 unsigned int maxbps,
137 unsigned short spdif_ctls); 139 unsigned short spdif_ctls);
138int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid, 140int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid,
@@ -171,12 +173,38 @@ int snd_hdac_power_down(struct hdac_device *codec);
171int snd_hdac_power_up_pm(struct hdac_device *codec); 173int snd_hdac_power_up_pm(struct hdac_device *codec);
172int snd_hdac_power_down_pm(struct hdac_device *codec); 174int snd_hdac_power_down_pm(struct hdac_device *codec);
173int snd_hdac_keep_power_up(struct hdac_device *codec); 175int snd_hdac_keep_power_up(struct hdac_device *codec);
176
177/* call this at entering into suspend/resume callbacks in codec driver */
178static inline void snd_hdac_enter_pm(struct hdac_device *codec)
179{
180 atomic_inc(&codec->in_pm);
181}
182
183/* call this at leaving from suspend/resume callbacks in codec driver */
184static inline void snd_hdac_leave_pm(struct hdac_device *codec)
185{
186 atomic_dec(&codec->in_pm);
187}
188
189static inline bool snd_hdac_is_in_pm(struct hdac_device *codec)
190{
191 return atomic_read(&codec->in_pm);
192}
193
194static inline bool snd_hdac_is_power_on(struct hdac_device *codec)
195{
196 return !pm_runtime_suspended(&codec->dev);
197}
174#else 198#else
175static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; } 199static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
176static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; } 200static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
177static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; } 201static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
178static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; } 202static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
179static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; } 203static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
204static inline void snd_hdac_enter_pm(struct hdac_device *codec) {}
205static inline void snd_hdac_leave_pm(struct hdac_device *codec) {}
206static inline bool snd_hdac_is_in_pm(struct hdac_device *codec) { return 0; }
207static inline bool snd_hdac_is_power_on(struct hdac_device *codec) { return 1; }
180#endif 208#endif
181 209
182/* 210/*
@@ -188,6 +216,11 @@ struct hdac_driver {
188 const struct hda_device_id *id_table; 216 const struct hda_device_id *id_table;
189 int (*match)(struct hdac_device *dev, struct hdac_driver *drv); 217 int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
190 void (*unsol_event)(struct hdac_device *dev, unsigned int event); 218 void (*unsol_event)(struct hdac_device *dev, unsigned int event);
219
220 /* fields used by ext bus APIs */
221 int (*probe)(struct hdac_device *dev);
222 int (*remove)(struct hdac_device *dev);
223 void (*shutdown)(struct hdac_device *dev);
191}; 224};
192 225
193#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver) 226#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
@@ -209,6 +242,14 @@ struct hdac_bus_ops {
209}; 242};
210 243
211/* 244/*
245 * ops used for ASoC HDA codec drivers
246 */
247struct hdac_ext_bus_ops {
248 int (*hdev_attach)(struct hdac_device *hdev);
249 int (*hdev_detach)(struct hdac_device *hdev);
250};
251
252/*
212 * Lowlevel I/O operators 253 * Lowlevel I/O operators
213 */ 254 */
214struct hdac_io_ops { 255struct hdac_io_ops {
@@ -250,11 +291,17 @@ struct hdac_rb {
250 * @mlcap: MultiLink capabilities pointer 291 * @mlcap: MultiLink capabilities pointer
251 * @gtscap: gts capabilities pointer 292 * @gtscap: gts capabilities pointer
252 * @drsmcap: dma resume capabilities pointer 293 * @drsmcap: dma resume capabilities pointer
294 * @num_streams: streams supported
295 * @idx: HDA link index
296 * @hlink_list: link list of HDA links
297 * @lock: lock for link mgmt
298 * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
253 */ 299 */
254struct hdac_bus { 300struct hdac_bus {
255 struct device *dev; 301 struct device *dev;
256 const struct hdac_bus_ops *ops; 302 const struct hdac_bus_ops *ops;
257 const struct hdac_io_ops *io_ops; 303 const struct hdac_io_ops *io_ops;
304 const struct hdac_ext_bus_ops *ext_ops;
258 305
259 /* h/w resources */ 306 /* h/w resources */
260 unsigned long addr; 307 unsigned long addr;
@@ -314,9 +361,19 @@ struct hdac_bus {
314 spinlock_t reg_lock; 361 spinlock_t reg_lock;
315 struct mutex cmd_mutex; 362 struct mutex cmd_mutex;
316 363
317 /* i915 component interface */ 364 /* DRM component interface */
318 struct i915_audio_component *audio_component; 365 struct drm_audio_component *audio_component;
319 int i915_power_refcount; 366 int drm_power_refcount;
367
368 /* parameters required for enhanced capabilities */
369 int num_streams;
370 int idx;
371
372 struct list_head hlink_list;
373
374 struct mutex lock;
375 bool cmd_dma_state;
376
320}; 377};
321 378
322int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev, 379int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 9c14e21dda85..f34aced69ca8 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -4,38 +4,16 @@
4 4
5#include <sound/hdaudio.h> 5#include <sound/hdaudio.h>
6 6
7/** 7int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
8 * hdac_ext_bus: HDAC extended bus for extended HDA caps
9 *
10 * @bus: hdac bus
11 * @num_streams: streams supported
12 * @hlink_list: link list of HDA links
13 * @lock: lock for link mgmt
14 * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
15 */
16struct hdac_ext_bus {
17 struct hdac_bus bus;
18 int num_streams;
19 int idx;
20
21 struct list_head hlink_list;
22
23 struct mutex lock;
24 bool cmd_dma_state;
25};
26
27int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
28 const struct hdac_bus_ops *ops, 8 const struct hdac_bus_ops *ops,
29 const struct hdac_io_ops *io_ops); 9 const struct hdac_io_ops *io_ops,
10 const struct hdac_ext_bus_ops *ext_ops);
30 11
31void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus); 12void snd_hdac_ext_bus_exit(struct hdac_bus *bus);
32int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr); 13int snd_hdac_ext_bus_device_init(struct hdac_bus *bus, int addr,
14 struct hdac_device *hdev);
33void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev); 15void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
34void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus); 16void snd_hdac_ext_bus_device_remove(struct hdac_bus *bus);
35
36#define ebus_to_hbus(ebus) (&(ebus)->bus)
37#define hbus_to_ebus(_bus) \
38 container_of(_bus, struct hdac_ext_bus, bus)
39 17
40#define HDA_CODEC_REV_EXT_ENTRY(_vid, _rev, _name, drv_data) \ 18#define HDA_CODEC_REV_EXT_ENTRY(_vid, _rev, _name, drv_data) \
41 { .vendor_id = (_vid), .rev_id = (_rev), .name = (_name), \ 19 { .vendor_id = (_vid), .rev_id = (_rev), .name = (_name), \
@@ -44,14 +22,14 @@ void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
44#define HDA_CODEC_EXT_ENTRY(_vid, _revid, _name, _drv_data) \ 22#define HDA_CODEC_EXT_ENTRY(_vid, _revid, _name, _drv_data) \
45 HDA_CODEC_REV_EXT_ENTRY(_vid, _revid, _name, _drv_data) 23 HDA_CODEC_REV_EXT_ENTRY(_vid, _revid, _name, _drv_data)
46 24
47void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *chip, bool enable); 25void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *chip, bool enable);
48void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *chip, bool enable); 26void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *chip, bool enable);
49 27
50void snd_hdac_ext_stream_spbcap_enable(struct hdac_ext_bus *chip, 28void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *chip,
51 bool enable, int index); 29 bool enable, int index);
52 30
53int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_ext_bus *bus); 31int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus);
54struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_ext_bus *bus, 32struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus,
55 const char *codec_name); 33 const char *codec_name);
56 34
57enum hdac_ext_stream_type { 35enum hdac_ext_stream_type {
@@ -100,28 +78,28 @@ struct hdac_ext_stream {
100#define stream_to_hdac_ext_stream(s) \ 78#define stream_to_hdac_ext_stream(s) \
101 container_of(s, struct hdac_ext_stream, hstream) 79 container_of(s, struct hdac_ext_stream, hstream)
102 80
103void snd_hdac_ext_stream_init(struct hdac_ext_bus *bus, 81void snd_hdac_ext_stream_init(struct hdac_bus *bus,
104 struct hdac_ext_stream *stream, int idx, 82 struct hdac_ext_stream *stream, int idx,
105 int direction, int tag); 83 int direction, int tag);
106int snd_hdac_ext_stream_init_all(struct hdac_ext_bus *ebus, int start_idx, 84int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx,
107 int num_stream, int dir); 85 int num_stream, int dir);
108void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus); 86void snd_hdac_stream_free_all(struct hdac_bus *bus);
109void snd_hdac_link_free_all(struct hdac_ext_bus *ebus); 87void snd_hdac_link_free_all(struct hdac_bus *bus);
110struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_ext_bus *bus, 88struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
111 struct snd_pcm_substream *substream, 89 struct snd_pcm_substream *substream,
112 int type); 90 int type);
113void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type); 91void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
114void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *bus, 92void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
115 struct hdac_ext_stream *azx_dev, bool decouple); 93 struct hdac_ext_stream *azx_dev, bool decouple);
116void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus); 94void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
117 95
118int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus, 96int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
119 struct hdac_ext_stream *stream, u32 value); 97 struct hdac_ext_stream *stream, u32 value);
120int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus, 98int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
121 struct hdac_ext_stream *stream); 99 struct hdac_ext_stream *stream);
122void snd_hdac_ext_stream_drsm_enable(struct hdac_ext_bus *ebus, 100void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
123 bool enable, int index); 101 bool enable, int index);
124int snd_hdac_ext_stream_set_dpibr(struct hdac_ext_bus *ebus, 102int snd_hdac_ext_stream_set_dpibr(struct hdac_bus *bus,
125 struct hdac_ext_stream *stream, u32 value); 103 struct hdac_ext_stream *stream, u32 value);
126int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *stream, u32 value); 104int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *stream, u32 value);
127 105
@@ -144,17 +122,15 @@ struct hdac_ext_link {
144 122
145int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link); 123int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
146int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link); 124int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
147int snd_hdac_ext_bus_link_power_up_all(struct hdac_ext_bus *ebus); 125int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus);
148int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus); 126int snd_hdac_ext_bus_link_power_down_all(struct hdac_bus *bus);
149void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link, 127void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
150 int stream); 128 int stream);
151void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link, 129void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
152 int stream); 130 int stream);
153 131
154int snd_hdac_ext_bus_link_get(struct hdac_ext_bus *ebus, 132int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, struct hdac_ext_link *link);
155 struct hdac_ext_link *link); 133int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *link);
156int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
157 struct hdac_ext_link *link);
158 134
159/* update register macro */ 135/* update register macro */
160#define snd_hdac_updatel(addr, reg, mask, val) \ 136#define snd_hdac_updatel(addr, reg, mask, val) \
@@ -181,53 +157,12 @@ struct hda_dai_map {
181 u32 maxbps; 157 u32 maxbps;
182}; 158};
183 159
184#define HDA_MAX_NIDS 16
185
186/**
187 * struct hdac_ext_device - HDAC Ext device
188 *
189 * @hdac: hdac core device
190 * @nid_list - the dai map which matches the dai-name with the nid
191 * @map_cur_idx - the idx in use in dai_map
192 * @ops - the hda codec ops common to all codec drivers
193 * @pvt_data - private data, for asoc contains asoc codec object
194 */
195struct hdac_ext_device {
196 struct hdac_device hdev;
197 struct hdac_ext_bus *ebus;
198
199 /* soc-dai to nid map */
200 struct hda_dai_map nid_list[HDA_MAX_NIDS];
201 unsigned int map_cur_idx;
202
203 /* codec ops */
204 struct hdac_ext_codec_ops ops;
205
206 struct snd_card *card;
207 void *scodec;
208 void *private_data;
209};
210
211struct hdac_ext_dma_params { 160struct hdac_ext_dma_params {
212 u32 format; 161 u32 format;
213 u8 stream_tag; 162 u8 stream_tag;
214}; 163};
215#define to_ehdac_device(dev) (container_of((dev), \
216 struct hdac_ext_device, hdev))
217/*
218 * HD-audio codec base driver
219 */
220struct hdac_ext_driver {
221 struct hdac_driver hdac;
222
223 int (*probe)(struct hdac_ext_device *dev);
224 int (*remove)(struct hdac_ext_device *dev);
225 void (*shutdown)(struct hdac_ext_device *dev);
226};
227
228int snd_hda_ext_driver_register(struct hdac_ext_driver *drv);
229void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv);
230 164
231#define to_ehdac_driver(_drv) container_of(_drv, struct hdac_ext_driver, hdac) 165int snd_hda_ext_driver_register(struct hdac_driver *drv);
166void snd_hda_ext_driver_unregister(struct hdac_driver *drv);
232 167
233#endif /* __SOUND_HDAUDIO_EXT_H */ 168#endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 9c3db3dce32b..67561b997915 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -24,6 +24,8 @@
24#ifndef __SOUND_MEMALLOC_H 24#ifndef __SOUND_MEMALLOC_H
25#define __SOUND_MEMALLOC_H 25#define __SOUND_MEMALLOC_H
26 26
27#include <asm/page.h>
28
27struct device; 29struct device;
28 30
29/* 31/*
@@ -67,6 +69,14 @@ struct snd_dma_buffer {
67 void *private_data; /* private for allocator; don't touch */ 69 void *private_data; /* private for allocator; don't touch */
68}; 70};
69 71
72/*
73 * return the pages matching with the given byte size
74 */
75static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
76{
77 return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
78}
79
70#ifdef CONFIG_SND_DMA_SGBUF 80#ifdef CONFIG_SND_DMA_SGBUF
71/* 81/*
72 * Scatter-Gather generic device pages 82 * Scatter-Gather generic device pages
@@ -91,14 +101,6 @@ struct snd_sg_buf {
91}; 101};
92 102
93/* 103/*
94 * return the pages matching with the given byte size
95 */
96static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
97{
98 return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
99}
100
101/*
102 * return the physical address at the corresponding offset 104 * return the physical address at the corresponding offset
103 */ 105 */
104static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, 106static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index e054c583d3b3..d6bd3caf6878 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -462,6 +462,7 @@ struct snd_pcm_substream {
462 /* -- timer section -- */ 462 /* -- timer section -- */
463 struct snd_timer *timer; /* timer */ 463 struct snd_timer *timer; /* timer */
464 unsigned timer_running: 1; /* time is running */ 464 unsigned timer_running: 1; /* time is running */
465 long wait_time; /* time in ms for R/W to wait for avail */
465 /* -- next substream -- */ 466 /* -- next substream -- */
466 struct snd_pcm_substream *next; 467 struct snd_pcm_substream *next;
467 /* -- linked substreams -- */ 468 /* -- linked substreams -- */
@@ -1089,14 +1090,14 @@ static inline snd_pcm_sframes_t
1089snd_pcm_lib_write(struct snd_pcm_substream *substream, 1090snd_pcm_lib_write(struct snd_pcm_substream *substream,
1090 const void __user *buf, snd_pcm_uframes_t frames) 1091 const void __user *buf, snd_pcm_uframes_t frames)
1091{ 1092{
1092 return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, false); 1093 return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false);
1093} 1094}
1094 1095
1095static inline snd_pcm_sframes_t 1096static inline snd_pcm_sframes_t
1096snd_pcm_lib_read(struct snd_pcm_substream *substream, 1097snd_pcm_lib_read(struct snd_pcm_substream *substream,
1097 void __user *buf, snd_pcm_uframes_t frames) 1098 void __user *buf, snd_pcm_uframes_t frames)
1098{ 1099{
1099 return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, false); 1100 return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false);
1100} 1101}
1101 1102
1102static inline snd_pcm_sframes_t 1103static inline snd_pcm_sframes_t
@@ -1341,8 +1342,6 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
1341#define snd_pcm_lib_mmap_iomem NULL 1342#define snd_pcm_lib_mmap_iomem NULL
1342#endif 1343#endif
1343 1344
1344#define snd_pcm_lib_mmap_vmalloc NULL
1345
1346/** 1345/**
1347 * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer 1346 * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
1348 * @dma: DMA number 1347 * @dma: DMA number
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index c704357775fc..2dd37cada7c0 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -87,6 +87,13 @@ static inline void snd_mask_set(struct snd_mask *mask, unsigned int val)
87 mask->bits[MASK_OFS(val)] |= MASK_BIT(val); 87 mask->bits[MASK_OFS(val)] |= MASK_BIT(val);
88} 88}
89 89
90/* Most of drivers need only this one */
91static inline void snd_mask_set_format(struct snd_mask *mask,
92 snd_pcm_format_t format)
93{
94 snd_mask_set(mask, (__force unsigned int)format);
95}
96
90static inline void snd_mask_reset(struct snd_mask *mask, unsigned int val) 97static inline void snd_mask_reset(struct snd_mask *mask, unsigned int val)
91{ 98{
92 mask->bits[MASK_OFS(val)] &= ~MASK_BIT(val); 99 mask->bits[MASK_OFS(val)] &= ~MASK_BIT(val);
@@ -369,8 +376,7 @@ static inline int params_physical_width(const struct snd_pcm_hw_params *p)
369static inline void 376static inline void
370params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt) 377params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
371{ 378{
372 snd_mask_set(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT), 379 snd_mask_set_format(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT), fmt);
373 (__force int)fmt);
374} 380}
375 381
376#endif /* __SOUND_PCM_PARAMS_H */ 382#endif /* __SOUND_PCM_PARAMS_H */
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 63f75450d3db..6758fc12fa84 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -8,20 +8,23 @@
8/* PCM */ 8/* PCM */
9struct snd_pcm_substream; 9struct snd_pcm_substream;
10struct snd_pcm_hw_params; 10struct snd_pcm_hw_params;
11struct snd_soc_pcm_runtime;
11struct snd_pcm; 12struct snd_pcm;
12 13
13extern int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream, 14extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
14 struct snd_pcm_hw_params *params); 15 struct snd_pcm_hw_params *params);
15extern int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream); 16extern int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
16extern int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd); 17extern int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
17extern snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream); 18extern snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream);
18extern int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream); 19extern int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream);
19extern int __pxa2xx_pcm_open(struct snd_pcm_substream *substream); 20extern int pxa2xx_pcm_open(struct snd_pcm_substream *substream);
20extern int __pxa2xx_pcm_close(struct snd_pcm_substream *substream); 21extern int pxa2xx_pcm_close(struct snd_pcm_substream *substream);
21extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream, 22extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
22 struct vm_area_struct *vma); 23 struct vm_area_struct *vma);
23extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream); 24extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream);
24extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm); 25extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm);
26extern int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd);
27extern const struct snd_pcm_ops pxa2xx_pcm_ops;
25 28
26/* AC97 */ 29/* AC97 */
27 30
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
new file mode 100644
index 000000000000..0251797ab438
--- /dev/null
+++ b/include/sound/rt5682.h
@@ -0,0 +1,40 @@
1/*
2 * linux/sound/rt5682.h -- Platform data for RT5682
3 *
4 * Copyright 2018 Realtek Microelectronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __LINUX_SND_RT5682_H
12#define __LINUX_SND_RT5682_H
13
14enum rt5682_dmic1_data_pin {
15 RT5682_DMIC1_NULL,
16 RT5682_DMIC1_DATA_GPIO2,
17 RT5682_DMIC1_DATA_GPIO5,
18};
19
20enum rt5682_dmic1_clk_pin {
21 RT5682_DMIC1_CLK_GPIO1,
22 RT5682_DMIC1_CLK_GPIO3,
23};
24
25enum rt5682_jd_src {
26 RT5682_JD_NULL,
27 RT5682_JD1,
28};
29
30struct rt5682_platform_data {
31
32 int ldo1_en; /* GPIO for LDO1_EN */
33
34 enum rt5682_dmic1_data_pin dmic1_data_pin;
35 enum rt5682_dmic1_clk_pin dmic1_clk_pin;
36 enum rt5682_jd_src jd_src;
37};
38
39#endif
40
diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
index c7c7788005e4..7817e88bd08d 100644
--- a/include/sound/sb16_csp.h
+++ b/include/sound/sb16_csp.h
@@ -46,7 +46,7 @@ enum {
46struct snd_sb_csp_ops { 46struct snd_sb_csp_ops {
47 int (*csp_use) (struct snd_sb_csp * p); 47 int (*csp_use) (struct snd_sb_csp * p);
48 int (*csp_unuse) (struct snd_sb_csp * p); 48 int (*csp_unuse) (struct snd_sb_csp * p);
49 int (*csp_autoload) (struct snd_sb_csp * p, int pcm_sfmt, int play_rec_mode); 49 int (*csp_autoload) (struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode);
50 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels); 50 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
51 int (*csp_stop) (struct snd_sb_csp * p); 51 int (*csp_stop) (struct snd_sb_csp * p);
52 int (*csp_qsound_transfer) (struct snd_sb_csp * p); 52 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
diff --git a/include/sound/seq_midi_event.h b/include/sound/seq_midi_event.h
index e40f43e6fc7b..2f135bccf457 100644
--- a/include/sound/seq_midi_event.h
+++ b/include/sound/seq_midi_event.h
@@ -43,10 +43,8 @@ void snd_midi_event_free(struct snd_midi_event *dev);
43void snd_midi_event_reset_encode(struct snd_midi_event *dev); 43void snd_midi_event_reset_encode(struct snd_midi_event *dev);
44void snd_midi_event_reset_decode(struct snd_midi_event *dev); 44void snd_midi_event_reset_decode(struct snd_midi_event *dev);
45void snd_midi_event_no_status(struct snd_midi_event *dev, int on); 45void snd_midi_event_no_status(struct snd_midi_event *dev, int on);
46/* encode from byte stream - return number of written bytes if success */ 46bool snd_midi_event_encode_byte(struct snd_midi_event *dev, unsigned char c,
47long snd_midi_event_encode(struct snd_midi_event *dev, unsigned char *buf, long count, 47 struct snd_seq_event *ev);
48 struct snd_seq_event *ev);
49int snd_midi_event_encode_byte(struct snd_midi_event *dev, int c, struct snd_seq_event *ev);
50/* decode from event to bytes - return number of written bytes if success */ 48/* decode from event to bytes - return number of written bytes if success */
51long snd_midi_event_decode(struct snd_midi_event *dev, unsigned char *buf, long count, 49long snd_midi_event_decode(struct snd_midi_event *dev, unsigned char *buf, long count,
52 struct snd_seq_event *ev); 50 struct snd_seq_event *ev);
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
index 695257ae64ac..796ce7772213 100644
--- a/include/sound/seq_virmidi.h
+++ b/include/sound/seq_virmidi.h
@@ -36,11 +36,12 @@ struct snd_virmidi {
36 int seq_mode; 36 int seq_mode;
37 int client; 37 int client;
38 int port; 38 int port;
39 unsigned int trigger: 1; 39 bool trigger;
40 struct snd_midi_event *parser; 40 struct snd_midi_event *parser;
41 struct snd_seq_event event; 41 struct snd_seq_event event;
42 struct snd_virmidi_dev *rdev; 42 struct snd_virmidi_dev *rdev;
43 struct snd_rawmidi_substream *substream; 43 struct snd_rawmidi_substream *substream;
44 struct work_struct output_work;
44}; 45};
45 46
46#define SNDRV_VIRMIDI_SUBSCRIBE (1<<0) 47#define SNDRV_VIRMIDI_SUBSCRIBE (1<<0)
diff --git a/include/sound/sh_fsi.h b/include/sound/sh_fsi.h
index 7a9710b4b799..89eafe23ef88 100644
--- a/include/sound/sh_fsi.h
+++ b/include/sound/sh_fsi.h
@@ -1,16 +1,13 @@
1#ifndef __SOUND_FSI_H 1/* SPDX-License-Identifier: GPL-2.0
2#define __SOUND_FSI_H 2 *
3
4/*
5 * Fifo-attached Serial Interface (FSI) support for SH7724 3 * Fifo-attached Serial Interface (FSI) support for SH7724
6 * 4 *
7 * Copyright (C) 2009 Renesas Solutions Corp. 5 * Copyright (C) 2009 Renesas Solutions Corp.
8 * Kuninori Morimoto <morimoto.kuninori@renesas.com> 6 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */ 7 */
8#ifndef __SOUND_FSI_H
9#define __SOUND_FSI_H
10
14#include <linux/clk.h> 11#include <linux/clk.h>
15#include <sound/soc.h> 12#include <sound/soc.h>
16 13
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index a6a2e1547092..d264e5463f22 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -1,12 +1,9 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 *
2 * ASoC simple sound card support 3 * ASoC simple sound card support
3 * 4 *
4 * Copyright (C) 2012 Renesas Solutions Corp. 5 * Copyright (C) 2012 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 6 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */ 7 */
11 8
12#ifndef __SIMPLE_CARD_H 9#ifndef __SIMPLE_CARD_H
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 7e25afce6566..8bc5e2d8b13c 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -1,17 +1,20 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 *
2 * simple_card_utils.h 3 * simple_card_utils.h
3 * 4 *
4 * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 5 * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */ 6 */
7
10#ifndef __SIMPLE_CARD_UTILS_H 8#ifndef __SIMPLE_CARD_UTILS_H
11#define __SIMPLE_CARD_UTILS_H 9#define __SIMPLE_CARD_UTILS_H
12 10
13#include <sound/soc.h> 11#include <sound/soc.h>
14 12
13#define asoc_simple_card_init_hp(card, sjack, prefix) \
14 asoc_simple_card_init_jack(card, sjack, 1, prefix)
15#define asoc_simple_card_init_mic(card, sjack, prefix) \
16 asoc_simple_card_init_jack(card, sjack, 0, prefix)
17
15struct asoc_simple_dai { 18struct asoc_simple_dai {
16 const char *name; 19 const char *name;
17 unsigned int sysclk; 20 unsigned int sysclk;
@@ -28,6 +31,12 @@ struct asoc_simple_card_data {
28 u32 convert_channels; 31 u32 convert_channels;
29}; 32};
30 33
34struct asoc_simple_jack {
35 struct snd_soc_jack jack;
36 struct snd_soc_jack_pin pin;
37 struct snd_soc_jack_gpio gpio;
38};
39
31int asoc_simple_card_parse_daifmt(struct device *dev, 40int asoc_simple_card_parse_daifmt(struct device *dev,
32 struct device_node *node, 41 struct device_node *node,
33 struct device_node *codec, 42 struct device_node *codec,
@@ -107,4 +116,8 @@ int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
107int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card, 116int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
108 char *prefix); 117 char *prefix);
109 118
119int asoc_simple_card_init_jack(struct snd_soc_card *card,
120 struct asoc_simple_jack *sjack,
121 int is_hp, char *prefix);
122
110#endif /* __SIMPLE_CARD_UTILS_H */ 123#endif /* __SIMPLE_CARD_UTILS_H */
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 9da6388c20a1..bb1d24b703fb 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -1,16 +1,6 @@
1 1/* SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017, Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 * 2 *
3 * Copyright (C) 2017, Intel Corporation. All rights reserved.
14 */ 4 */
15 5
16#ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H 6#ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
@@ -29,5 +19,10 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[];
29extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[]; 19extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[];
30extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[]; 20extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[];
31extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[]; 21extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[];
22extern struct snd_soc_acpi_mach snd_soc_acpi_intel_skl_machines[];
23extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[];
24extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
25extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
26extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
32 27
33#endif 28#endif
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 082224275f52..e45b2330d16a 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -1,15 +1,6 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 * Copyright (C) 2013-15, Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 * 2 *
3 * Copyright (C) 2013-15, Intel Corporation. All rights reserved.
13 */ 4 */
14 5
15#ifndef __LINUX_SND_SOC_ACPI_H 6#ifndef __LINUX_SND_SOC_ACPI_H
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e6f8c40ed43c..f5d70041108f 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -1,12 +1,9 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 *
2 * linux/sound/soc-dai.h -- ALSA SoC Layer 3 * linux/sound/soc-dai.h -- ALSA SoC Layer
3 * 4 *
4 * Copyright: 2005-2008 Wolfson Microelectronics. PLC. 5 * Copyright: 2005-2008 Wolfson Microelectronics. PLC.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Digital Audio Interface (DAI) API. 7 * Digital Audio Interface (DAI) API.
11 */ 8 */
12 9
@@ -141,6 +138,11 @@ int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate);
141int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute, 138int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
142 int direction); 139 int direction);
143 140
141
142int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai,
143 unsigned int *tx_num, unsigned int *tx_slot,
144 unsigned int *rx_num, unsigned int *rx_slot);
145
144int snd_soc_dai_is_dummy(struct snd_soc_dai *dai); 146int snd_soc_dai_is_dummy(struct snd_soc_dai *dai);
145 147
146struct snd_soc_dai_ops { 148struct snd_soc_dai_ops {
@@ -168,6 +170,9 @@ struct snd_soc_dai_ops {
168 int (*set_channel_map)(struct snd_soc_dai *dai, 170 int (*set_channel_map)(struct snd_soc_dai *dai,
169 unsigned int tx_num, unsigned int *tx_slot, 171 unsigned int tx_num, unsigned int *tx_slot,
170 unsigned int rx_num, unsigned int *rx_slot); 172 unsigned int rx_num, unsigned int *rx_slot);
173 int (*get_channel_map)(struct snd_soc_dai *dai,
174 unsigned int *tx_num, unsigned int *tx_slot,
175 unsigned int *rx_num, unsigned int *rx_slot);
171 int (*set_tristate)(struct snd_soc_dai *dai, int tristate); 176 int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
172 177
173 int (*set_sdw_stream)(struct snd_soc_dai *dai, 178 int (*set_sdw_stream)(struct snd_soc_dai *dai,
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index a6ce2de4e20a..af9ef16cc34d 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -1,13 +1,10 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 *
2 * linux/sound/soc-dapm.h -- ALSA SoC Dynamic Audio Power Management 3 * linux/sound/soc-dapm.h -- ALSA SoC Dynamic Audio Power Management
3 * 4 *
4 * Author: Liam Girdwood 5 * Author: Liam Girdwood
5 * Created: Aug 11th 2005 6 * Created: Aug 11th 2005
6 * Copyright: Wolfson Microelectronics. PLC. 7 * Copyright: Wolfson Microelectronics. PLC.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#ifndef __LINUX_SND_SOC_DAPM_H 10#ifndef __LINUX_SND_SOC_DAPM_H
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 806059052bfc..9bb92f187af8 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -1,11 +1,8 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 *
2 * linux/sound/soc-dpcm.h -- ALSA SoC Dynamic PCM Support 3 * linux/sound/soc-dpcm.h -- ALSA SoC Dynamic PCM Support
3 * 4 *
4 * Author: Liam Girdwood <lrg@ti.com> 5 * Author: Liam Girdwood <lrg@ti.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */ 6 */
10 7
11#ifndef __LINUX_SND_SOC_DPCM_H 8#ifndef __LINUX_SND_SOC_DPCM_H
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index f552c3f56368..fa4b8413d2e2 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -1,13 +1,10 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 *
2 * linux/sound/soc-topology.h -- ALSA SoC Firmware Controls and DAPM 3 * linux/sound/soc-topology.h -- ALSA SoC Firmware Controls and DAPM
3 * 4 *
4 * Copyright (C) 2012 Texas Instruments Inc. 5 * Copyright (C) 2012 Texas Instruments Inc.
5 * Copyright (C) 2015 Intel Corporation. 6 * Copyright (C) 2015 Intel Corporation.
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Simple file API to load FW that includes mixers, coefficients, DAPM graphs, 8 * Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
12 * algorithms, equalisers, DAIs, widgets, FE caps, BE caps, codec link caps etc. 9 * algorithms, equalisers, DAIs, widgets, FE caps, BE caps, codec link caps etc.
13 */ 10 */
@@ -30,6 +27,9 @@ struct snd_soc_dapm_context;
30struct snd_soc_card; 27struct snd_soc_card;
31struct snd_kcontrol_new; 28struct snd_kcontrol_new;
32struct snd_soc_dai_link; 29struct snd_soc_dai_link;
30struct snd_soc_dai_driver;
31struct snd_soc_dai;
32struct snd_soc_dapm_route;
33 33
34/* object scan be loaded and unloaded in groups with identfying indexes */ 34/* object scan be loaded and unloaded in groups with identfying indexes */
35#define SND_SOC_TPLG_INDEX_ALL 0 /* ID that matches all FW objects */ 35#define SND_SOC_TPLG_INDEX_ALL 0 /* ID that matches all FW objects */
@@ -109,35 +109,44 @@ struct snd_soc_tplg_widget_events {
109struct snd_soc_tplg_ops { 109struct snd_soc_tplg_ops {
110 110
111 /* external kcontrol init - used for any driver specific init */ 111 /* external kcontrol init - used for any driver specific init */
112 int (*control_load)(struct snd_soc_component *, 112 int (*control_load)(struct snd_soc_component *, int index,
113 struct snd_kcontrol_new *, struct snd_soc_tplg_ctl_hdr *); 113 struct snd_kcontrol_new *, struct snd_soc_tplg_ctl_hdr *);
114 int (*control_unload)(struct snd_soc_component *, 114 int (*control_unload)(struct snd_soc_component *,
115 struct snd_soc_dobj *); 115 struct snd_soc_dobj *);
116 116
117 /* DAPM graph route element loading and unloading */
118 int (*dapm_route_load)(struct snd_soc_component *, int index,
119 struct snd_soc_dapm_route *route);
120 int (*dapm_route_unload)(struct snd_soc_component *,
121 struct snd_soc_dobj *);
122
117 /* external widget init - used for any driver specific init */ 123 /* external widget init - used for any driver specific init */
118 int (*widget_load)(struct snd_soc_component *, 124 int (*widget_load)(struct snd_soc_component *, int index,
119 struct snd_soc_dapm_widget *, 125 struct snd_soc_dapm_widget *,
120 struct snd_soc_tplg_dapm_widget *); 126 struct snd_soc_tplg_dapm_widget *);
121 int (*widget_ready)(struct snd_soc_component *, 127 int (*widget_ready)(struct snd_soc_component *, int index,
122 struct snd_soc_dapm_widget *, 128 struct snd_soc_dapm_widget *,
123 struct snd_soc_tplg_dapm_widget *); 129 struct snd_soc_tplg_dapm_widget *);
124 int (*widget_unload)(struct snd_soc_component *, 130 int (*widget_unload)(struct snd_soc_component *,
125 struct snd_soc_dobj *); 131 struct snd_soc_dobj *);
126 132
127 /* FE DAI - used for any driver specific init */ 133 /* FE DAI - used for any driver specific init */
128 int (*dai_load)(struct snd_soc_component *, 134 int (*dai_load)(struct snd_soc_component *, int index,
129 struct snd_soc_dai_driver *dai_drv); 135 struct snd_soc_dai_driver *dai_drv,
136 struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai);
137
130 int (*dai_unload)(struct snd_soc_component *, 138 int (*dai_unload)(struct snd_soc_component *,
131 struct snd_soc_dobj *); 139 struct snd_soc_dobj *);
132 140
133 /* DAI link - used for any driver specific init */ 141 /* DAI link - used for any driver specific init */
134 int (*link_load)(struct snd_soc_component *, 142 int (*link_load)(struct snd_soc_component *, int index,
135 struct snd_soc_dai_link *link); 143 struct snd_soc_dai_link *link,
144 struct snd_soc_tplg_link_config *cfg);
136 int (*link_unload)(struct snd_soc_component *, 145 int (*link_unload)(struct snd_soc_component *,
137 struct snd_soc_dobj *); 146 struct snd_soc_dobj *);
138 147
139 /* callback to handle vendor bespoke data */ 148 /* callback to handle vendor bespoke data */
140 int (*vendor_load)(struct snd_soc_component *, 149 int (*vendor_load)(struct snd_soc_component *, int index,
141 struct snd_soc_tplg_hdr *); 150 struct snd_soc_tplg_hdr *);
142 int (*vendor_unload)(struct snd_soc_component *, 151 int (*vendor_unload)(struct snd_soc_component *,
143 struct snd_soc_tplg_hdr *); 152 struct snd_soc_tplg_hdr *);
@@ -146,7 +155,7 @@ struct snd_soc_tplg_ops {
146 void (*complete)(struct snd_soc_component *); 155 void (*complete)(struct snd_soc_component *);
147 156
148 /* manifest - optional to inform component of manifest */ 157 /* manifest - optional to inform component of manifest */
149 int (*manifest)(struct snd_soc_component *, 158 int (*manifest)(struct snd_soc_component *, int index,
150 struct snd_soc_tplg_manifest *); 159 struct snd_soc_tplg_manifest *);
151 160
152 /* vendor specific kcontrol handlers available for binding */ 161 /* vendor specific kcontrol handlers available for binding */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1378dcd2128a..41cec42fb456 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1,13 +1,10 @@
1/* 1/* SPDX-License-Identifier: GPL-2.0
2 *
2 * linux/sound/soc.h -- ALSA SoC Layer 3 * linux/sound/soc.h -- ALSA SoC Layer
3 * 4 *
4 * Author: Liam Girdwood 5 * Author: Liam Girdwood
5 * Created: Aug 11th 2005 6 * Created: Aug 11th 2005
6 * Copyright: Wolfson Microelectronics. PLC. 7 * Copyright: Wolfson Microelectronics. PLC.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#ifndef __LINUX_SND_SOC_H 10#ifndef __LINUX_SND_SOC_H
@@ -806,6 +803,14 @@ struct snd_soc_component_driver {
806 unsigned int use_pmdown_time:1; /* care pmdown_time at stop */ 803 unsigned int use_pmdown_time:1; /* care pmdown_time at stop */
807 unsigned int endianness:1; 804 unsigned int endianness:1;
808 unsigned int non_legacy_dai_naming:1; 805 unsigned int non_legacy_dai_naming:1;
806
807 /* this component uses topology and ignore machine driver FEs */
808 const char *ignore_machine;
809 const char *topology_name_prefix;
810 int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
811 struct snd_pcm_hw_params *params);
812 bool use_dai_pcm_id; /* use the DAI link PCM ID as PCM device number */
813 int be_pcm_base; /* base device ID for all BE PCMs */
809}; 814};
810 815
811struct snd_soc_component { 816struct snd_soc_component {
@@ -957,10 +962,17 @@ struct snd_soc_dai_link {
957 962
958 /* DPCM used FE & BE merged format */ 963 /* DPCM used FE & BE merged format */
959 unsigned int dpcm_merged_format:1; 964 unsigned int dpcm_merged_format:1;
965 /* DPCM used FE & BE merged channel */
966 unsigned int dpcm_merged_chan:1;
967 /* DPCM used FE & BE merged rate */
968 unsigned int dpcm_merged_rate:1;
960 969
961 /* pmdown_time is ignored at stop */ 970 /* pmdown_time is ignored at stop */
962 unsigned int ignore_pmdown_time:1; 971 unsigned int ignore_pmdown_time:1;
963 972
973 /* Do not create a PCM for this DAI link (Backend link) */
974 unsigned int ignore:1;
975
964 struct list_head list; /* DAI link list of the soc card */ 976 struct list_head list; /* DAI link list of the soc card */
965 struct snd_soc_dobj dobj; /* For topology */ 977 struct snd_soc_dobj dobj; /* For topology */
966}; 978};
@@ -1000,6 +1012,7 @@ struct snd_soc_card {
1000 const char *long_name; 1012 const char *long_name;
1001 const char *driver_name; 1013 const char *driver_name;
1002 char dmi_longname[80]; 1014 char dmi_longname[80];
1015 char topology_shortname[32];
1003 1016
1004 struct device *dev; 1017 struct device *dev;
1005 struct snd_card *snd_card; 1018 struct snd_card *snd_card;
@@ -1009,6 +1022,7 @@ struct snd_soc_card {
1009 struct mutex dapm_mutex; 1022 struct mutex dapm_mutex;
1010 1023
1011 bool instantiated; 1024 bool instantiated;
1025 bool topology_shortname_created;
1012 1026
1013 int (*probe)(struct snd_soc_card *card); 1027 int (*probe)(struct snd_soc_card *card);
1014 int (*late_probe)(struct snd_soc_card *card); 1028 int (*late_probe)(struct snd_soc_card *card);
@@ -1412,6 +1426,9 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
1412 const char *propname); 1426 const char *propname);
1413int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card, 1427int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
1414 const char *propname); 1428 const char *propname);
1429int snd_soc_of_get_slot_mask(struct device_node *np,
1430 const char *prop_name,
1431 unsigned int *mask);
1415int snd_soc_of_parse_tdm_slot(struct device_node *np, 1432int snd_soc_of_parse_tdm_slot(struct device_node *np,
1416 unsigned int *tx_mask, 1433 unsigned int *tx_mask,
1417 unsigned int *rx_mask, 1434 unsigned int *rx_mask,
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index cf5f3fff1f1a..f2e6abea8490 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/dma-direction.h> /* enum dma_data_direction */ 5#include <linux/dma-direction.h> /* enum dma_data_direction */
6#include <linux/list.h> /* struct list_head */ 6#include <linux/list.h> /* struct list_head */
7#include <linux/sched.h>
7#include <linux/socket.h> /* struct sockaddr_storage */ 8#include <linux/socket.h> /* struct sockaddr_storage */
8#include <linux/types.h> /* u8 */ 9#include <linux/types.h> /* u8 */
9#include <scsi/iscsi_proto.h> /* itt_t */ 10#include <scsi/iscsi_proto.h> /* itt_t */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 34a15d59ed88..51b6f50eabee 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -106,13 +106,15 @@ bool target_lun_is_rdonly(struct se_cmd *);
106sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, 106sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
107 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); 107 sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
108 108
109struct se_device *target_find_device(int id, bool do_depend);
110
111bool target_sense_desc_format(struct se_device *dev); 109bool target_sense_desc_format(struct se_device *dev);
112sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); 110sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
113bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 111bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
114 struct request_queue *q); 112 struct request_queue *q);
115 113
114static inline bool target_dev_configured(struct se_device *se_dev)
115{
116 return !!(se_dev->dev_flags & DF_CONFIGURED);
117}
116 118
117/* Only use get_unaligned_be24() if reading p - 1 is allowed. */ 119/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
118static inline uint32_t get_unaligned_be24(const uint8_t *const p) 120static inline uint32_t get_unaligned_be24(const uint8_t *const p)
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 922a39f45abc..7a4ee7852ca4 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -4,7 +4,7 @@
4 4
5#include <linux/configfs.h> /* struct config_group */ 5#include <linux/configfs.h> /* struct config_group */
6#include <linux/dma-direction.h> /* enum dma_data_direction */ 6#include <linux/dma-direction.h> /* enum dma_data_direction */
7#include <linux/percpu_ida.h> /* struct percpu_ida */ 7#include <linux/sbitmap.h>
8#include <linux/percpu-refcount.h> 8#include <linux/percpu-refcount.h>
9#include <linux/semaphore.h> /* struct semaphore */ 9#include <linux/semaphore.h> /* struct semaphore */
10#include <linux/completion.h> 10#include <linux/completion.h>
@@ -443,7 +443,6 @@ struct se_cmd {
443 u8 scsi_asc; 443 u8 scsi_asc;
444 u8 scsi_ascq; 444 u8 scsi_ascq;
445 u16 scsi_sense_length; 445 u16 scsi_sense_length;
446 unsigned cmd_wait_set:1;
447 unsigned unknown_data_length:1; 446 unsigned unknown_data_length:1;
448 bool state_active:1; 447 bool state_active:1;
449 u64 tag; /* SAM command identifier aka task tag */ 448 u64 tag; /* SAM command identifier aka task tag */
@@ -455,6 +454,7 @@ struct se_cmd {
455 int sam_task_attr; 454 int sam_task_attr;
456 /* Used for se_sess->sess_tag_pool */ 455 /* Used for se_sess->sess_tag_pool */
457 unsigned int map_tag; 456 unsigned int map_tag;
457 int map_cpu;
458 /* Transport protocol dependent state, see transport_state_table */ 458 /* Transport protocol dependent state, see transport_state_table */
459 enum transport_state_table t_state; 459 enum transport_state_table t_state;
460 /* See se_cmd_flags_table */ 460 /* See se_cmd_flags_table */
@@ -475,7 +475,7 @@ struct se_cmd {
475 struct se_session *se_sess; 475 struct se_session *se_sess;
476 struct se_tmr_req *se_tmr_req; 476 struct se_tmr_req *se_tmr_req;
477 struct list_head se_cmd_list; 477 struct list_head se_cmd_list;
478 struct completion cmd_wait_comp; 478 struct completion *compl;
479 const struct target_core_fabric_ops *se_tfo; 479 const struct target_core_fabric_ops *se_tfo;
480 sense_reason_t (*execute_cmd)(struct se_cmd *); 480 sense_reason_t (*execute_cmd)(struct se_cmd *);
481 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); 481 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
@@ -605,10 +605,10 @@ struct se_session {
605 struct list_head sess_list; 605 struct list_head sess_list;
606 struct list_head sess_acl_list; 606 struct list_head sess_acl_list;
607 struct list_head sess_cmd_list; 607 struct list_head sess_cmd_list;
608 struct list_head sess_wait_list;
609 spinlock_t sess_cmd_lock; 608 spinlock_t sess_cmd_lock;
609 wait_queue_head_t cmd_list_wq;
610 void *sess_cmd_map; 610 void *sess_cmd_map;
611 struct percpu_ida sess_tag_pool; 611 struct sbitmap_queue sess_tag_pool;
612}; 612};
613 613
614struct se_device; 614struct se_device;
@@ -638,7 +638,6 @@ struct se_dev_entry {
638 atomic_long_t total_cmds; 638 atomic_long_t total_cmds;
639 atomic_long_t read_bytes; 639 atomic_long_t read_bytes;
640 atomic_long_t write_bytes; 640 atomic_long_t write_bytes;
641 atomic_t ua_count;
642 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 641 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
643 struct kref pr_kref; 642 struct kref pr_kref;
644 struct completion pr_comp; 643 struct completion pr_comp;
@@ -934,4 +933,9 @@ static inline void atomic_dec_mb(atomic_t *v)
934 smp_mb__after_atomic(); 933 smp_mb__after_atomic();
935} 934}
936 935
936static inline void target_free_tag(struct se_session *sess, struct se_cmd *cmd)
937{
938 sbitmap_queue_clear(&sess->sess_tag_pool, cmd->map_tag, cmd->map_cpu);
939}
940
937#endif /* TARGET_CORE_BASE_H */ 941#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index b297aa0d9651..f4147b398431 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -79,7 +79,7 @@ struct target_core_fabric_ops {
79 void (*fabric_drop_wwn)(struct se_wwn *); 79 void (*fabric_drop_wwn)(struct se_wwn *);
80 void (*add_wwn_groups)(struct se_wwn *); 80 void (*add_wwn_groups)(struct se_wwn *);
81 struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, 81 struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
82 struct config_group *, const char *); 82 const char *);
83 void (*fabric_drop_tpg)(struct se_portal_group *); 83 void (*fabric_drop_tpg)(struct se_portal_group *);
84 int (*fabric_post_link)(struct se_portal_group *, 84 int (*fabric_post_link)(struct se_portal_group *,
85 struct se_lun *); 85 struct se_lun *);
@@ -109,17 +109,17 @@ void target_unregister_template(const struct target_core_fabric_ops *fo);
109int target_depend_item(struct config_item *item); 109int target_depend_item(struct config_item *item);
110void target_undepend_item(struct config_item *item); 110void target_undepend_item(struct config_item *item);
111 111
112struct se_session *target_alloc_session(struct se_portal_group *, 112struct se_session *target_setup_session(struct se_portal_group *,
113 unsigned int, unsigned int, enum target_prot_op prot_op, 113 unsigned int, unsigned int, enum target_prot_op prot_op,
114 const char *, void *, 114 const char *, void *,
115 int (*callback)(struct se_portal_group *, 115 int (*callback)(struct se_portal_group *,
116 struct se_session *, void *)); 116 struct se_session *, void *));
117void target_remove_session(struct se_session *);
117 118
118struct se_session *transport_init_session(enum target_prot_op); 119void transport_init_session(struct se_session *);
120struct se_session *transport_alloc_session(enum target_prot_op);
119int transport_alloc_session_tags(struct se_session *, unsigned int, 121int transport_alloc_session_tags(struct se_session *, unsigned int,
120 unsigned int); 122 unsigned int);
121struct se_session *transport_init_session_tags(unsigned int, unsigned int,
122 enum target_prot_op);
123void __transport_register_session(struct se_portal_group *, 123void __transport_register_session(struct se_portal_group *,
124 struct se_node_acl *, struct se_session *, void *); 124 struct se_node_acl *, struct se_session *, void *);
125void transport_register_session(struct se_portal_group *, 125void transport_register_session(struct se_portal_group *,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 39b94ec965be..b401c4e36394 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -374,7 +374,7 @@ DECLARE_EVENT_CLASS(
374 __entry->extent_type = btrfs_file_extent_type(l, fi); 374 __entry->extent_type = btrfs_file_extent_type(l, fi);
375 __entry->compression = btrfs_file_extent_compression(l, fi); 375 __entry->compression = btrfs_file_extent_compression(l, fi);
376 __entry->extent_start = start; 376 __entry->extent_start = start;
377 __entry->extent_end = (start + btrfs_file_extent_inline_len(l, slot, fi)); 377 __entry->extent_end = (start + btrfs_file_extent_ram_bytes(l, fi));
378 ), 378 ),
379 379
380 TP_printk_btrfs( 380 TP_printk_btrfs(
@@ -433,7 +433,6 @@ DEFINE_EVENT(
433 { (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \ 433 { (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \
434 { (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \ 434 { (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \
435 { (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \ 435 { (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \
436 { (1 << BTRFS_ORDERED_LOGGED_CSUM), "LOGGED_CSUM" }, \
437 { (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" }) 436 { (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" })
438 437
439 438
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index 2cd449328aee..9004ffff7f32 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -192,6 +192,42 @@ DEFINE_EVENT(clk_phase, clk_set_phase_complete,
192 TP_ARGS(core, phase) 192 TP_ARGS(core, phase)
193); 193);
194 194
195DECLARE_EVENT_CLASS(clk_duty_cycle,
196
197 TP_PROTO(struct clk_core *core, struct clk_duty *duty),
198
199 TP_ARGS(core, duty),
200
201 TP_STRUCT__entry(
202 __string( name, core->name )
203 __field( unsigned int, num )
204 __field( unsigned int, den )
205 ),
206
207 TP_fast_assign(
208 __assign_str(name, core->name);
209 __entry->num = duty->num;
210 __entry->den = duty->den;
211 ),
212
213 TP_printk("%s %u/%u", __get_str(name), (unsigned int)__entry->num,
214 (unsigned int)__entry->den)
215);
216
217DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle,
218
219 TP_PROTO(struct clk_core *core, struct clk_duty *duty),
220
221 TP_ARGS(core, duty)
222);
223
224DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle_complete,
225
226 TP_PROTO(struct clk_core *core, struct clk_duty *duty),
227
228 TP_ARGS(core, duty)
229);
230
195#endif /* _TRACE_CLK_H */ 231#endif /* _TRACE_CLK_H */
196 232
197/* This part must be outside protection */ 233/* This part must be outside protection */
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 9763cddd0594..6271bab63bfb 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -22,6 +22,7 @@ TRACE_EVENT(fib_table_lookup,
22 __field( int, err ) 22 __field( int, err )
23 __field( int, oif ) 23 __field( int, oif )
24 __field( int, iif ) 24 __field( int, iif )
25 __field( u8, proto )
25 __field( __u8, tos ) 26 __field( __u8, tos )
26 __field( __u8, scope ) 27 __field( __u8, scope )
27 __field( __u8, flags ) 28 __field( __u8, flags )
@@ -31,7 +32,6 @@ TRACE_EVENT(fib_table_lookup,
31 __array( __u8, saddr, 4 ) 32 __array( __u8, saddr, 4 )
32 __field( u16, sport ) 33 __field( u16, sport )
33 __field( u16, dport ) 34 __field( u16, dport )
34 __field( u8, proto )
35 __dynamic_array(char, name, IFNAMSIZ ) 35 __dynamic_array(char, name, IFNAMSIZ )
36 ), 36 ),
37 37
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index d1faf3597b9d..68b17c116907 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -112,8 +112,11 @@ DEFINE_EVENT(filelock_lock, locks_remove_posix,
112 TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), 112 TP_PROTO(struct inode *inode, struct file_lock *fl, int ret),
113 TP_ARGS(inode, fl, ret)); 113 TP_ARGS(inode, fl, ret));
114 114
115DECLARE_EVENT_CLASS(filelock_lease, 115DEFINE_EVENT(filelock_lock, flock_lock_inode,
116 TP_PROTO(struct inode *inode, struct file_lock *fl, int ret),
117 TP_ARGS(inode, fl, ret));
116 118
119DECLARE_EVENT_CLASS(filelock_lease,
117 TP_PROTO(struct inode *inode, struct file_lock *fl), 120 TP_PROTO(struct inode *inode, struct file_lock *fl),
118 121
119 TP_ARGS(inode, fl), 122 TP_ARGS(inode, fl),
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 9c886739246a..00aa72ce0e7c 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -223,6 +223,13 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
223 TP_ARGS(skb) 223 TP_ARGS(skb)
224); 224);
225 225
226DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_list_entry,
227
228 TP_PROTO(const struct sk_buff *skb),
229
230 TP_ARGS(skb)
231);
232
226DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry, 233DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
227 234
228 TP_PROTO(const struct sk_buff *skb), 235 TP_PROTO(const struct sk_buff *skb),
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 908977d69783..f7aece721aed 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -5,6 +5,7 @@
5#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ) 5#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_POWER_H 6#define _TRACE_POWER_H
7 7
8#include <linux/cpufreq.h>
8#include <linux/ktime.h> 9#include <linux/ktime.h>
9#include <linux/pm_qos.h> 10#include <linux/pm_qos.h>
10#include <linux/tracepoint.h> 11#include <linux/tracepoint.h>
@@ -148,6 +149,30 @@ DEFINE_EVENT(cpu, cpu_frequency,
148 TP_ARGS(frequency, cpu_id) 149 TP_ARGS(frequency, cpu_id)
149); 150);
150 151
152TRACE_EVENT(cpu_frequency_limits,
153
154 TP_PROTO(struct cpufreq_policy *policy),
155
156 TP_ARGS(policy),
157
158 TP_STRUCT__entry(
159 __field(u32, min_freq)
160 __field(u32, max_freq)
161 __field(u32, cpu_id)
162 ),
163
164 TP_fast_assign(
165 __entry->min_freq = policy->min;
166 __entry->max_freq = policy->max;
167 __entry->cpu_id = policy->cpu;
168 ),
169
170 TP_printk("min=%lu max=%lu cpu_id=%lu",
171 (unsigned long)__entry->min_freq,
172 (unsigned long)__entry->max_freq,
173 (unsigned long)__entry->cpu_id)
174);
175
151TRACE_EVENT(device_pm_callback_start, 176TRACE_EVENT(device_pm_callback_start,
152 177
153 TP_PROTO(struct device *dev, const char *pm_ops, int event), 178 TP_PROTO(struct device *dev, const char *pm_ops, int event),
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 5936aac357ab..a8d07feff6a0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -52,6 +52,7 @@ TRACE_EVENT(rcu_utilization,
52 * "cpuqs": CPU passes through a quiescent state. 52 * "cpuqs": CPU passes through a quiescent state.
53 * "cpuonl": CPU comes online. 53 * "cpuonl": CPU comes online.
54 * "cpuofl": CPU goes offline. 54 * "cpuofl": CPU goes offline.
55 * "cpuofl-bgp": CPU goes offline while blocking a grace period.
55 * "reqwait": GP kthread sleeps waiting for grace-period request. 56 * "reqwait": GP kthread sleeps waiting for grace-period request.
56 * "reqwaitsig": GP kthread awakened by signal from reqwait state. 57 * "reqwaitsig": GP kthread awakened by signal from reqwait state.
57 * "fqswait": GP kthread waiting until time to force quiescent states. 58 * "fqswait": GP kthread waiting until time to force quiescent states.
@@ -63,24 +64,24 @@ TRACE_EVENT(rcu_utilization,
63 */ 64 */
64TRACE_EVENT(rcu_grace_period, 65TRACE_EVENT(rcu_grace_period,
65 66
66 TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent), 67 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
67 68
68 TP_ARGS(rcuname, gpnum, gpevent), 69 TP_ARGS(rcuname, gp_seq, gpevent),
69 70
70 TP_STRUCT__entry( 71 TP_STRUCT__entry(
71 __field(const char *, rcuname) 72 __field(const char *, rcuname)
72 __field(unsigned long, gpnum) 73 __field(unsigned long, gp_seq)
73 __field(const char *, gpevent) 74 __field(const char *, gpevent)
74 ), 75 ),
75 76
76 TP_fast_assign( 77 TP_fast_assign(
77 __entry->rcuname = rcuname; 78 __entry->rcuname = rcuname;
78 __entry->gpnum = gpnum; 79 __entry->gp_seq = gp_seq;
79 __entry->gpevent = gpevent; 80 __entry->gpevent = gpevent;
80 ), 81 ),
81 82
82 TP_printk("%s %lu %s", 83 TP_printk("%s %lu %s",
83 __entry->rcuname, __entry->gpnum, __entry->gpevent) 84 __entry->rcuname, __entry->gp_seq, __entry->gpevent)
84); 85);
85 86
86/* 87/*
@@ -90,8 +91,8 @@ TRACE_EVENT(rcu_grace_period,
90 * 91 *
91 * "Startleaf": Request a grace period based on leaf-node data. 92 * "Startleaf": Request a grace period based on leaf-node data.
92 * "Prestarted": Someone beat us to the request 93 * "Prestarted": Someone beat us to the request
93 * "Startedleaf": Leaf-node start proved sufficient. 94 * "Startedleaf": Leaf node marked for future GP.
94 * "Startedleafroot": Leaf-node start proved sufficient after checking root. 95 * "Startedleafroot": All nodes from leaf to root marked for future GP.
95 * "Startedroot": Requested a nocb grace period based on root-node data. 96 * "Startedroot": Requested a nocb grace period based on root-node data.
96 * "NoGPkthread": The RCU grace-period kthread has not yet started. 97 * "NoGPkthread": The RCU grace-period kthread has not yet started.
97 * "StartWait": Start waiting for the requested grace period. 98 * "StartWait": Start waiting for the requested grace period.
@@ -102,17 +103,16 @@ TRACE_EVENT(rcu_grace_period,
102 */ 103 */
103TRACE_EVENT(rcu_future_grace_period, 104TRACE_EVENT(rcu_future_grace_period,
104 105
105 TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed, 106 TP_PROTO(const char *rcuname, unsigned long gp_seq,
106 unsigned long c, u8 level, int grplo, int grphi, 107 unsigned long gp_seq_req, u8 level, int grplo, int grphi,
107 const char *gpevent), 108 const char *gpevent),
108 109
109 TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent), 110 TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
110 111
111 TP_STRUCT__entry( 112 TP_STRUCT__entry(
112 __field(const char *, rcuname) 113 __field(const char *, rcuname)
113 __field(unsigned long, gpnum) 114 __field(unsigned long, gp_seq)
114 __field(unsigned long, completed) 115 __field(unsigned long, gp_seq_req)
115 __field(unsigned long, c)
116 __field(u8, level) 116 __field(u8, level)
117 __field(int, grplo) 117 __field(int, grplo)
118 __field(int, grphi) 118 __field(int, grphi)
@@ -121,19 +121,17 @@ TRACE_EVENT(rcu_future_grace_period,
121 121
122 TP_fast_assign( 122 TP_fast_assign(
123 __entry->rcuname = rcuname; 123 __entry->rcuname = rcuname;
124 __entry->gpnum = gpnum; 124 __entry->gp_seq = gp_seq;
125 __entry->completed = completed; 125 __entry->gp_seq_req = gp_seq_req;
126 __entry->c = c;
127 __entry->level = level; 126 __entry->level = level;
128 __entry->grplo = grplo; 127 __entry->grplo = grplo;
129 __entry->grphi = grphi; 128 __entry->grphi = grphi;
130 __entry->gpevent = gpevent; 129 __entry->gpevent = gpevent;
131 ), 130 ),
132 131
133 TP_printk("%s %lu %lu %lu %u %d %d %s", 132 TP_printk("%s %lu %lu %u %d %d %s",
134 __entry->rcuname, __entry->gpnum, __entry->completed, 133 __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
135 __entry->c, __entry->level, __entry->grplo, __entry->grphi, 134 __entry->grplo, __entry->grphi, __entry->gpevent)
136 __entry->gpevent)
137); 135);
138 136
139/* 137/*
@@ -145,14 +143,14 @@ TRACE_EVENT(rcu_future_grace_period,
145 */ 143 */
146TRACE_EVENT(rcu_grace_period_init, 144TRACE_EVENT(rcu_grace_period_init,
147 145
148 TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level, 146 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
149 int grplo, int grphi, unsigned long qsmask), 147 int grplo, int grphi, unsigned long qsmask),
150 148
151 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask), 149 TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
152 150
153 TP_STRUCT__entry( 151 TP_STRUCT__entry(
154 __field(const char *, rcuname) 152 __field(const char *, rcuname)
155 __field(unsigned long, gpnum) 153 __field(unsigned long, gp_seq)
156 __field(u8, level) 154 __field(u8, level)
157 __field(int, grplo) 155 __field(int, grplo)
158 __field(int, grphi) 156 __field(int, grphi)
@@ -161,7 +159,7 @@ TRACE_EVENT(rcu_grace_period_init,
161 159
162 TP_fast_assign( 160 TP_fast_assign(
163 __entry->rcuname = rcuname; 161 __entry->rcuname = rcuname;
164 __entry->gpnum = gpnum; 162 __entry->gp_seq = gp_seq;
165 __entry->level = level; 163 __entry->level = level;
166 __entry->grplo = grplo; 164 __entry->grplo = grplo;
167 __entry->grphi = grphi; 165 __entry->grphi = grphi;
@@ -169,7 +167,7 @@ TRACE_EVENT(rcu_grace_period_init,
169 ), 167 ),
170 168
171 TP_printk("%s %lu %u %d %d %lx", 169 TP_printk("%s %lu %u %d %d %lx",
172 __entry->rcuname, __entry->gpnum, __entry->level, 170 __entry->rcuname, __entry->gp_seq, __entry->level,
173 __entry->grplo, __entry->grphi, __entry->qsmask) 171 __entry->grplo, __entry->grphi, __entry->qsmask)
174); 172);
175 173
@@ -301,24 +299,24 @@ TRACE_EVENT(rcu_nocb_wake,
301 */ 299 */
302TRACE_EVENT(rcu_preempt_task, 300TRACE_EVENT(rcu_preempt_task,
303 301
304 TP_PROTO(const char *rcuname, int pid, unsigned long gpnum), 302 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
305 303
306 TP_ARGS(rcuname, pid, gpnum), 304 TP_ARGS(rcuname, pid, gp_seq),
307 305
308 TP_STRUCT__entry( 306 TP_STRUCT__entry(
309 __field(const char *, rcuname) 307 __field(const char *, rcuname)
310 __field(unsigned long, gpnum) 308 __field(unsigned long, gp_seq)
311 __field(int, pid) 309 __field(int, pid)
312 ), 310 ),
313 311
314 TP_fast_assign( 312 TP_fast_assign(
315 __entry->rcuname = rcuname; 313 __entry->rcuname = rcuname;
316 __entry->gpnum = gpnum; 314 __entry->gp_seq = gp_seq;
317 __entry->pid = pid; 315 __entry->pid = pid;
318 ), 316 ),
319 317
320 TP_printk("%s %lu %d", 318 TP_printk("%s %lu %d",
321 __entry->rcuname, __entry->gpnum, __entry->pid) 319 __entry->rcuname, __entry->gp_seq, __entry->pid)
322); 320);
323 321
324/* 322/*
@@ -328,23 +326,23 @@ TRACE_EVENT(rcu_preempt_task,
328 */ 326 */
329TRACE_EVENT(rcu_unlock_preempted_task, 327TRACE_EVENT(rcu_unlock_preempted_task,
330 328
331 TP_PROTO(const char *rcuname, unsigned long gpnum, int pid), 329 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
332 330
333 TP_ARGS(rcuname, gpnum, pid), 331 TP_ARGS(rcuname, gp_seq, pid),
334 332
335 TP_STRUCT__entry( 333 TP_STRUCT__entry(
336 __field(const char *, rcuname) 334 __field(const char *, rcuname)
337 __field(unsigned long, gpnum) 335 __field(unsigned long, gp_seq)
338 __field(int, pid) 336 __field(int, pid)
339 ), 337 ),
340 338
341 TP_fast_assign( 339 TP_fast_assign(
342 __entry->rcuname = rcuname; 340 __entry->rcuname = rcuname;
343 __entry->gpnum = gpnum; 341 __entry->gp_seq = gp_seq;
344 __entry->pid = pid; 342 __entry->pid = pid;
345 ), 343 ),
346 344
347 TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid) 345 TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
348); 346);
349 347
350/* 348/*
@@ -357,15 +355,15 @@ TRACE_EVENT(rcu_unlock_preempted_task,
357 */ 355 */
358TRACE_EVENT(rcu_quiescent_state_report, 356TRACE_EVENT(rcu_quiescent_state_report,
359 357
360 TP_PROTO(const char *rcuname, unsigned long gpnum, 358 TP_PROTO(const char *rcuname, unsigned long gp_seq,
361 unsigned long mask, unsigned long qsmask, 359 unsigned long mask, unsigned long qsmask,
362 u8 level, int grplo, int grphi, int gp_tasks), 360 u8 level, int grplo, int grphi, int gp_tasks),
363 361
364 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks), 362 TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
365 363
366 TP_STRUCT__entry( 364 TP_STRUCT__entry(
367 __field(const char *, rcuname) 365 __field(const char *, rcuname)
368 __field(unsigned long, gpnum) 366 __field(unsigned long, gp_seq)
369 __field(unsigned long, mask) 367 __field(unsigned long, mask)
370 __field(unsigned long, qsmask) 368 __field(unsigned long, qsmask)
371 __field(u8, level) 369 __field(u8, level)
@@ -376,7 +374,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
376 374
377 TP_fast_assign( 375 TP_fast_assign(
378 __entry->rcuname = rcuname; 376 __entry->rcuname = rcuname;
379 __entry->gpnum = gpnum; 377 __entry->gp_seq = gp_seq;
380 __entry->mask = mask; 378 __entry->mask = mask;
381 __entry->qsmask = qsmask; 379 __entry->qsmask = qsmask;
382 __entry->level = level; 380 __entry->level = level;
@@ -386,41 +384,41 @@ TRACE_EVENT(rcu_quiescent_state_report,
386 ), 384 ),
387 385
388 TP_printk("%s %lu %lx>%lx %u %d %d %u", 386 TP_printk("%s %lu %lx>%lx %u %d %d %u",
389 __entry->rcuname, __entry->gpnum, 387 __entry->rcuname, __entry->gp_seq,
390 __entry->mask, __entry->qsmask, __entry->level, 388 __entry->mask, __entry->qsmask, __entry->level,
391 __entry->grplo, __entry->grphi, __entry->gp_tasks) 389 __entry->grplo, __entry->grphi, __entry->gp_tasks)
392); 390);
393 391
394/* 392/*
395 * Tracepoint for quiescent states detected by force_quiescent_state(). 393 * Tracepoint for quiescent states detected by force_quiescent_state().
396 * These trace events include the type of RCU, the grace-period number that 394 * These trace events include the type of RCU, the grace-period number
397 * was blocked by the CPU, the CPU itself, and the type of quiescent state, 395 * that was blocked by the CPU, the CPU itself, and the type of quiescent
398 * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick" 396 * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
399 * when kicking a CPU that has been in dyntick-idle mode for too long, or 397 * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
400 * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr. 398 * CPU got a quiescent state via its rcu_qs_ctr.
401 */ 399 */
402TRACE_EVENT(rcu_fqs, 400TRACE_EVENT(rcu_fqs,
403 401
404 TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent), 402 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
405 403
406 TP_ARGS(rcuname, gpnum, cpu, qsevent), 404 TP_ARGS(rcuname, gp_seq, cpu, qsevent),
407 405
408 TP_STRUCT__entry( 406 TP_STRUCT__entry(
409 __field(const char *, rcuname) 407 __field(const char *, rcuname)
410 __field(unsigned long, gpnum) 408 __field(unsigned long, gp_seq)
411 __field(int, cpu) 409 __field(int, cpu)
412 __field(const char *, qsevent) 410 __field(const char *, qsevent)
413 ), 411 ),
414 412
415 TP_fast_assign( 413 TP_fast_assign(
416 __entry->rcuname = rcuname; 414 __entry->rcuname = rcuname;
417 __entry->gpnum = gpnum; 415 __entry->gp_seq = gp_seq;
418 __entry->cpu = cpu; 416 __entry->cpu = cpu;
419 __entry->qsevent = qsevent; 417 __entry->qsevent = qsevent;
420 ), 418 ),
421 419
422 TP_printk("%s %lu %d %s", 420 TP_printk("%s %lu %d %s",
423 __entry->rcuname, __entry->gpnum, 421 __entry->rcuname, __entry->gp_seq,
424 __entry->cpu, __entry->qsevent) 422 __entry->cpu, __entry->qsevent)
425); 423);
426 424
@@ -753,23 +751,23 @@ TRACE_EVENT(rcu_barrier,
753 751
754#else /* #ifdef CONFIG_RCU_TRACE */ 752#else /* #ifdef CONFIG_RCU_TRACE */
755 753
756#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) 754#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
757#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \ 755#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
758 level, grplo, grphi, event) \ 756 level, grplo, grphi, event) \
759 do { } while (0) 757 do { } while (0)
760#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \ 758#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
761 qsmask) do { } while (0) 759 qsmask) do { } while (0)
762#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \ 760#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
763 do { } while (0) 761 do { } while (0)
764#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \ 762#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
765 do { } while (0) 763 do { } while (0)
766#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0) 764#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
767#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) 765#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
768#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) 766#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
769#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ 767#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
770 grplo, grphi, gp_tasks) do { } \ 768 grplo, grphi, gp_tasks) do { } \
771 while (0) 769 while (0)
772#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) 770#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
773#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0) 771#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
774#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0) 772#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
775#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \ 773#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 4fff00e9da8a..196587b8f204 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -211,18 +211,18 @@ enum rxrpc_congest_change {
211 rxrpc_cong_saw_nack, 211 rxrpc_cong_saw_nack,
212}; 212};
213 213
214enum rxrpc_tx_fail_trace { 214enum rxrpc_tx_point {
215 rxrpc_tx_fail_call_abort, 215 rxrpc_tx_point_call_abort,
216 rxrpc_tx_fail_call_ack, 216 rxrpc_tx_point_call_ack,
217 rxrpc_tx_fail_call_data_frag, 217 rxrpc_tx_point_call_data_frag,
218 rxrpc_tx_fail_call_data_nofrag, 218 rxrpc_tx_point_call_data_nofrag,
219 rxrpc_tx_fail_call_final_resend, 219 rxrpc_tx_point_call_final_resend,
220 rxrpc_tx_fail_conn_abort, 220 rxrpc_tx_point_conn_abort,
221 rxrpc_tx_fail_conn_challenge, 221 rxrpc_tx_point_rxkad_challenge,
222 rxrpc_tx_fail_conn_response, 222 rxrpc_tx_point_rxkad_response,
223 rxrpc_tx_fail_reject, 223 rxrpc_tx_point_reject,
224 rxrpc_tx_fail_version_keepalive, 224 rxrpc_tx_point_version_keepalive,
225 rxrpc_tx_fail_version_reply, 225 rxrpc_tx_point_version_reply,
226}; 226};
227 227
228#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */ 228#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
@@ -396,7 +396,7 @@ enum rxrpc_tx_fail_trace {
396#define rxrpc_propose_ack_outcomes \ 396#define rxrpc_propose_ack_outcomes \
397 EM(rxrpc_propose_ack_subsume, " Subsume") \ 397 EM(rxrpc_propose_ack_subsume, " Subsume") \
398 EM(rxrpc_propose_ack_update, " Update") \ 398 EM(rxrpc_propose_ack_update, " Update") \
399 E_(rxrpc_propose_ack_use, "") 399 E_(rxrpc_propose_ack_use, " New")
400 400
401#define rxrpc_congest_modes \ 401#define rxrpc_congest_modes \
402 EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \ 402 EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \
@@ -452,18 +452,18 @@ enum rxrpc_tx_fail_trace {
452 EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \ 452 EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \
453 E_(RXRPC_CALL_NETWORK_ERROR, "NetError") 453 E_(RXRPC_CALL_NETWORK_ERROR, "NetError")
454 454
455#define rxrpc_tx_fail_traces \ 455#define rxrpc_tx_points \
456 EM(rxrpc_tx_fail_call_abort, "CallAbort") \ 456 EM(rxrpc_tx_point_call_abort, "CallAbort") \
457 EM(rxrpc_tx_fail_call_ack, "CallAck") \ 457 EM(rxrpc_tx_point_call_ack, "CallAck") \
458 EM(rxrpc_tx_fail_call_data_frag, "CallDataFrag") \ 458 EM(rxrpc_tx_point_call_data_frag, "CallDataFrag") \
459 EM(rxrpc_tx_fail_call_data_nofrag, "CallDataNofrag") \ 459 EM(rxrpc_tx_point_call_data_nofrag, "CallDataNofrag") \
460 EM(rxrpc_tx_fail_call_final_resend, "CallFinalResend") \ 460 EM(rxrpc_tx_point_call_final_resend, "CallFinalResend") \
461 EM(rxrpc_tx_fail_conn_abort, "ConnAbort") \ 461 EM(rxrpc_tx_point_conn_abort, "ConnAbort") \
462 EM(rxrpc_tx_fail_conn_challenge, "ConnChall") \ 462 EM(rxrpc_tx_point_reject, "Reject") \
463 EM(rxrpc_tx_fail_conn_response, "ConnResp") \ 463 EM(rxrpc_tx_point_rxkad_challenge, "RxkadChall") \
464 EM(rxrpc_tx_fail_reject, "Reject") \ 464 EM(rxrpc_tx_point_rxkad_response, "RxkadResp") \
465 EM(rxrpc_tx_fail_version_keepalive, "VerKeepalive") \ 465 EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \
466 E_(rxrpc_tx_fail_version_reply, "VerReply") 466 E_(rxrpc_tx_point_version_reply, "VerReply")
467 467
468/* 468/*
469 * Export enum symbols via userspace. 469 * Export enum symbols via userspace.
@@ -488,7 +488,7 @@ rxrpc_propose_ack_traces;
488rxrpc_propose_ack_outcomes; 488rxrpc_propose_ack_outcomes;
489rxrpc_congest_modes; 489rxrpc_congest_modes;
490rxrpc_congest_changes; 490rxrpc_congest_changes;
491rxrpc_tx_fail_traces; 491rxrpc_tx_points;
492 492
493/* 493/*
494 * Now redefine the EM() and E_() macros to map the enums to the strings that 494 * Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -801,7 +801,7 @@ TRACE_EVENT(rxrpc_transmit,
801 ); 801 );
802 802
803TRACE_EVENT(rxrpc_rx_data, 803TRACE_EVENT(rxrpc_rx_data,
804 TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, 804 TP_PROTO(unsigned int call, rxrpc_seq_t seq,
805 rxrpc_serial_t serial, u8 flags, u8 anno), 805 rxrpc_serial_t serial, u8 flags, u8 anno),
806 806
807 TP_ARGS(call, seq, serial, flags, anno), 807 TP_ARGS(call, seq, serial, flags, anno),
@@ -815,7 +815,7 @@ TRACE_EVENT(rxrpc_rx_data,
815 ), 815 ),
816 816
817 TP_fast_assign( 817 TP_fast_assign(
818 __entry->call = call->debug_id; 818 __entry->call = call;
819 __entry->seq = seq; 819 __entry->seq = seq;
820 __entry->serial = serial; 820 __entry->serial = serial;
821 __entry->flags = flags; 821 __entry->flags = flags;
@@ -918,6 +918,37 @@ TRACE_EVENT(rxrpc_rx_rwind_change,
918 __entry->wake ? " wake" : "") 918 __entry->wake ? " wake" : "")
919 ); 919 );
920 920
921TRACE_EVENT(rxrpc_tx_packet,
922 TP_PROTO(unsigned int call_id, struct rxrpc_wire_header *whdr,
923 enum rxrpc_tx_point where),
924
925 TP_ARGS(call_id, whdr, where),
926
927 TP_STRUCT__entry(
928 __field(unsigned int, call )
929 __field(enum rxrpc_tx_point, where )
930 __field_struct(struct rxrpc_wire_header, whdr )
931 ),
932
933 TP_fast_assign(
934 __entry->call = call_id;
935 memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
936 ),
937
938 TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
939 __entry->call,
940 ntohl(__entry->whdr.epoch),
941 ntohl(__entry->whdr.cid),
942 ntohl(__entry->whdr.callNumber),
943 ntohs(__entry->whdr.serviceId),
944 ntohl(__entry->whdr.serial),
945 ntohl(__entry->whdr.seq),
946 __entry->whdr.type, __entry->whdr.flags,
947 __entry->whdr.type <= 15 ?
948 __print_symbolic(__entry->whdr.type, rxrpc_pkts) : "?UNK",
949 __print_symbolic(__entry->where, rxrpc_tx_points))
950 );
951
921TRACE_EVENT(rxrpc_tx_data, 952TRACE_EVENT(rxrpc_tx_data,
922 TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, 953 TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
923 rxrpc_serial_t serial, u8 flags, bool retrans, bool lose), 954 rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
@@ -928,6 +959,8 @@ TRACE_EVENT(rxrpc_tx_data,
928 __field(unsigned int, call ) 959 __field(unsigned int, call )
929 __field(rxrpc_seq_t, seq ) 960 __field(rxrpc_seq_t, seq )
930 __field(rxrpc_serial_t, serial ) 961 __field(rxrpc_serial_t, serial )
962 __field(u32, cid )
963 __field(u32, call_id )
931 __field(u8, flags ) 964 __field(u8, flags )
932 __field(bool, retrans ) 965 __field(bool, retrans )
933 __field(bool, lose ) 966 __field(bool, lose )
@@ -935,6 +968,8 @@ TRACE_EVENT(rxrpc_tx_data,
935 968
936 TP_fast_assign( 969 TP_fast_assign(
937 __entry->call = call->debug_id; 970 __entry->call = call->debug_id;
971 __entry->cid = call->cid;
972 __entry->call_id = call->call_id;
938 __entry->seq = seq; 973 __entry->seq = seq;
939 __entry->serial = serial; 974 __entry->serial = serial;
940 __entry->flags = flags; 975 __entry->flags = flags;
@@ -942,8 +977,10 @@ TRACE_EVENT(rxrpc_tx_data,
942 __entry->lose = lose; 977 __entry->lose = lose;
943 ), 978 ),
944 979
945 TP_printk("c=%08x DATA %08x q=%08x fl=%02x%s%s", 980 TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s%s",
946 __entry->call, 981 __entry->call,
982 __entry->cid,
983 __entry->call_id,
947 __entry->serial, 984 __entry->serial,
948 __entry->seq, 985 __entry->seq,
949 __entry->flags, 986 __entry->flags,
@@ -952,7 +989,7 @@ TRACE_EVENT(rxrpc_tx_data,
952 ); 989 );
953 990
954TRACE_EVENT(rxrpc_tx_ack, 991TRACE_EVENT(rxrpc_tx_ack,
955 TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial, 992 TP_PROTO(unsigned int call, rxrpc_serial_t serial,
956 rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial, 993 rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
957 u8 reason, u8 n_acks), 994 u8 reason, u8 n_acks),
958 995
@@ -968,7 +1005,7 @@ TRACE_EVENT(rxrpc_tx_ack,
968 ), 1005 ),
969 1006
970 TP_fast_assign( 1007 TP_fast_assign(
971 __entry->call = call ? call->debug_id : 0; 1008 __entry->call = call;
972 __entry->serial = serial; 1009 __entry->serial = serial;
973 __entry->ack_first = ack_first; 1010 __entry->ack_first = ack_first;
974 __entry->ack_serial = ack_serial; 1011 __entry->ack_serial = ack_serial;
@@ -1434,29 +1471,29 @@ TRACE_EVENT(rxrpc_rx_icmp,
1434 1471
1435TRACE_EVENT(rxrpc_tx_fail, 1472TRACE_EVENT(rxrpc_tx_fail,
1436 TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret, 1473 TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret,
1437 enum rxrpc_tx_fail_trace what), 1474 enum rxrpc_tx_point where),
1438 1475
1439 TP_ARGS(debug_id, serial, ret, what), 1476 TP_ARGS(debug_id, serial, ret, where),
1440 1477
1441 TP_STRUCT__entry( 1478 TP_STRUCT__entry(
1442 __field(unsigned int, debug_id ) 1479 __field(unsigned int, debug_id )
1443 __field(rxrpc_serial_t, serial ) 1480 __field(rxrpc_serial_t, serial )
1444 __field(int, ret ) 1481 __field(int, ret )
1445 __field(enum rxrpc_tx_fail_trace, what ) 1482 __field(enum rxrpc_tx_point, where )
1446 ), 1483 ),
1447 1484
1448 TP_fast_assign( 1485 TP_fast_assign(
1449 __entry->debug_id = debug_id; 1486 __entry->debug_id = debug_id;
1450 __entry->serial = serial; 1487 __entry->serial = serial;
1451 __entry->ret = ret; 1488 __entry->ret = ret;
1452 __entry->what = what; 1489 __entry->where = where;
1453 ), 1490 ),
1454 1491
1455 TP_printk("c=%08x r=%x ret=%d %s", 1492 TP_printk("c=%08x r=%x ret=%d %s",
1456 __entry->debug_id, 1493 __entry->debug_id,
1457 __entry->serial, 1494 __entry->serial,
1458 __entry->ret, 1495 __entry->ret,
1459 __print_symbolic(__entry->what, rxrpc_tx_fail_traces)) 1496 __print_symbolic(__entry->where, rxrpc_tx_points))
1460 ); 1497 );
1461 1498
1462TRACE_EVENT(rxrpc_call_reset, 1499TRACE_EVENT(rxrpc_call_reset,
@@ -1491,6 +1528,26 @@ TRACE_EVENT(rxrpc_call_reset,
1491 __entry->tx_seq, __entry->rx_seq) 1528 __entry->tx_seq, __entry->rx_seq)
1492 ); 1529 );
1493 1530
1531TRACE_EVENT(rxrpc_notify_socket,
1532 TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial),
1533
1534 TP_ARGS(debug_id, serial),
1535
1536 TP_STRUCT__entry(
1537 __field(unsigned int, debug_id )
1538 __field(rxrpc_serial_t, serial )
1539 ),
1540
1541 TP_fast_assign(
1542 __entry->debug_id = debug_id;
1543 __entry->serial = serial;
1544 ),
1545
1546 TP_printk("c=%08x r=%08x",
1547 __entry->debug_id,
1548 __entry->serial)
1549 );
1550
1494#endif /* _TRACE_RXRPC_H */ 1551#endif /* _TRACE_RXRPC_H */
1495 1552
1496/* This part must be outside protection */ 1553/* This part must be outside protection */
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 3176a3931107..a0c4b8a30966 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -35,6 +35,10 @@
35 EM(TCP_CLOSING) \ 35 EM(TCP_CLOSING) \
36 EMe(TCP_NEW_SYN_RECV) 36 EMe(TCP_NEW_SYN_RECV)
37 37
38#define skmem_kind_names \
39 EM(SK_MEM_SEND) \
40 EMe(SK_MEM_RECV)
41
38/* enums need to be exported to user space */ 42/* enums need to be exported to user space */
39#undef EM 43#undef EM
40#undef EMe 44#undef EMe
@@ -44,6 +48,7 @@
44family_names 48family_names
45inet_protocol_names 49inet_protocol_names
46tcp_state_names 50tcp_state_names
51skmem_kind_names
47 52
48#undef EM 53#undef EM
49#undef EMe 54#undef EMe
@@ -59,6 +64,9 @@ tcp_state_names
59#define show_tcp_state_name(val) \ 64#define show_tcp_state_name(val) \
60 __print_symbolic(val, tcp_state_names) 65 __print_symbolic(val, tcp_state_names)
61 66
67#define show_skmem_kind_names(val) \
68 __print_symbolic(val, skmem_kind_names)
69
62TRACE_EVENT(sock_rcvqueue_full, 70TRACE_EVENT(sock_rcvqueue_full,
63 71
64 TP_PROTO(struct sock *sk, struct sk_buff *skb), 72 TP_PROTO(struct sock *sk, struct sk_buff *skb),
@@ -83,9 +91,9 @@ TRACE_EVENT(sock_rcvqueue_full,
83 91
84TRACE_EVENT(sock_exceed_buf_limit, 92TRACE_EVENT(sock_exceed_buf_limit,
85 93
86 TP_PROTO(struct sock *sk, struct proto *prot, long allocated), 94 TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind),
87 95
88 TP_ARGS(sk, prot, allocated), 96 TP_ARGS(sk, prot, allocated, kind),
89 97
90 TP_STRUCT__entry( 98 TP_STRUCT__entry(
91 __array(char, name, 32) 99 __array(char, name, 32)
@@ -93,6 +101,10 @@ TRACE_EVENT(sock_exceed_buf_limit,
93 __field(long, allocated) 101 __field(long, allocated)
94 __field(int, sysctl_rmem) 102 __field(int, sysctl_rmem)
95 __field(int, rmem_alloc) 103 __field(int, rmem_alloc)
104 __field(int, sysctl_wmem)
105 __field(int, wmem_alloc)
106 __field(int, wmem_queued)
107 __field(int, kind)
96 ), 108 ),
97 109
98 TP_fast_assign( 110 TP_fast_assign(
@@ -101,17 +113,25 @@ TRACE_EVENT(sock_exceed_buf_limit,
101 __entry->allocated = allocated; 113 __entry->allocated = allocated;
102 __entry->sysctl_rmem = sk_get_rmem0(sk, prot); 114 __entry->sysctl_rmem = sk_get_rmem0(sk, prot);
103 __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); 115 __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
116 __entry->sysctl_wmem = sk_get_wmem0(sk, prot);
117 __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
118 __entry->wmem_queued = sk->sk_wmem_queued;
119 __entry->kind = kind;
104 ), 120 ),
105 121
106 TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld " 122 TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld sysctl_rmem=%d rmem_alloc=%d sysctl_wmem=%d wmem_alloc=%d wmem_queued=%d kind=%s",
107 "sysctl_rmem=%d rmem_alloc=%d",
108 __entry->name, 123 __entry->name,
109 __entry->sysctl_mem[0], 124 __entry->sysctl_mem[0],
110 __entry->sysctl_mem[1], 125 __entry->sysctl_mem[1],
111 __entry->sysctl_mem[2], 126 __entry->sysctl_mem[2],
112 __entry->allocated, 127 __entry->allocated,
113 __entry->sysctl_rmem, 128 __entry->sysctl_rmem,
114 __entry->rmem_alloc) 129 __entry->rmem_alloc,
130 __entry->sysctl_wmem,
131 __entry->wmem_alloc,
132 __entry->wmem_queued,
133 show_skmem_kind_names(__entry->kind)
134 )
115); 135);
116 136
117TRACE_EVENT(inet_sock_set_state, 137TRACE_EVENT(inet_sock_set_state,
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 0ae758c90e54..a12692e5f7a8 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -107,4 +107,7 @@
107 107
108#define SO_ZEROCOPY 60 108#define SO_ZEROCOPY 60
109 109
110#define SO_TXTIME 61
111#define SCM_TXTIME SO_TXTIME
112
110#endif /* __ASM_GENERIC_SOCKET_H */ 113#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 42990676a55e..df4bedb9b01c 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free)
734__SYSCALL(__NR_statx, sys_statx) 734__SYSCALL(__NR_statx, sys_statx)
735#define __NR_io_pgetevents 292 735#define __NR_io_pgetevents 292
736__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) 736__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
737#define __NR_rseq 293
738__SYSCALL(__NR_rseq, sys_rseq)
737 739
738#undef __NR_syscalls 740#undef __NR_syscalls
739#define __NR_syscalls 293 741#define __NR_syscalls 294
740 742
741/* 743/*
742 * 32 bit systems traditionally used different 744 * 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 78b4dd89fcb4..1ceec56de015 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -72,6 +72,29 @@ extern "C" {
72#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle) 72#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
73#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched) 73#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
74 74
75/**
76 * DOC: memory domains
77 *
78 * %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible.
79 * Memory in this pool could be swapped out to disk if there is pressure.
80 *
81 * %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
82 * GPU's virtual address space via gart. Gart memory linearizes non-contiguous
83 * pages of system memory, allows GPU access system memory in a linezrized
84 * fashion.
85 *
86 * %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
87 * carved out by the BIOS.
88 *
89 * %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data
90 * across shader threads.
91 *
92 * %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the
93 * execution of all the waves on a device.
94 *
95 * %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
96 * for appending data.
97 */
75#define AMDGPU_GEM_DOMAIN_CPU 0x1 98#define AMDGPU_GEM_DOMAIN_CPU 0x1
76#define AMDGPU_GEM_DOMAIN_GTT 0x2 99#define AMDGPU_GEM_DOMAIN_GTT 0x2
77#define AMDGPU_GEM_DOMAIN_VRAM 0x4 100#define AMDGPU_GEM_DOMAIN_VRAM 0x4
@@ -483,7 +506,8 @@ struct drm_amdgpu_gem_va {
483#define AMDGPU_HW_IP_UVD_ENC 5 506#define AMDGPU_HW_IP_UVD_ENC 5
484#define AMDGPU_HW_IP_VCN_DEC 6 507#define AMDGPU_HW_IP_VCN_DEC 6
485#define AMDGPU_HW_IP_VCN_ENC 7 508#define AMDGPU_HW_IP_VCN_ENC 7
486#define AMDGPU_HW_IP_NUM 8 509#define AMDGPU_HW_IP_VCN_JPEG 8
510#define AMDGPU_HW_IP_NUM 9
487 511
488#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 512#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
489 513
@@ -492,6 +516,7 @@ struct drm_amdgpu_gem_va {
492#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 516#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
493#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 517#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
494#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 518#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
519#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
495 520
496struct drm_amdgpu_cs_chunk { 521struct drm_amdgpu_cs_chunk {
497 __u32 chunk_id; 522 __u32 chunk_id;
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 9c660e1688ab..300f336633f2 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -687,6 +687,15 @@ struct drm_get_cap {
687 */ 687 */
688#define DRM_CLIENT_CAP_ASPECT_RATIO 4 688#define DRM_CLIENT_CAP_ASPECT_RATIO 4
689 689
690/**
691 * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
692 *
693 * If set to 1, the DRM core will expose special connectors to be used for
694 * writing back to memory the scene setup in the commit. Depends on client
695 * also supporting DRM_CLIENT_CAP_ATOMIC
696 */
697#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
698
690/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ 699/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
691struct drm_set_client_cap { 700struct drm_set_client_cap {
692 __u64 capability; 701 __u64 capability;
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index e04613d30a13..721ab7e54d96 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -183,6 +183,7 @@ extern "C" {
183#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 183#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
184#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06 184#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
185#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07 185#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
186#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
186/* add more to the end as needed */ 187/* add more to the end as needed */
187 188
188#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) 189#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
@@ -298,6 +299,19 @@ extern "C" {
298 */ 299 */
299#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) 300#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
300 301
302/*
303 * Qualcomm Compressed Format
304 *
305 * Refers to a compressed variant of the base format that is compressed.
306 * Implementation may be platform and base-format specific.
307 *
308 * Each macrotile consists of m x n (mostly 4 x 4) tiles.
309 * Pixel data pitch/stride is aligned with macrotile width.
310 * Pixel data height is aligned with macrotile height.
311 * Entire pixel data buffer is aligned with 4k(bytes).
312 */
313#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
314
301/* Vivante framebuffer modifiers */ 315/* Vivante framebuffer modifiers */
302 316
303/* 317/*
@@ -385,6 +399,23 @@ extern "C" {
385 fourcc_mod_code(NVIDIA, 0x15) 399 fourcc_mod_code(NVIDIA, 0x15)
386 400
387/* 401/*
402 * Some Broadcom modifiers take parameters, for example the number of
403 * vertical lines in the image. Reserve the lower 32 bits for modifier
404 * type, and the next 24 bits for parameters. Top 8 bits are the
405 * vendor code.
406 */
407#define __fourcc_mod_broadcom_param_shift 8
408#define __fourcc_mod_broadcom_param_bits 48
409#define fourcc_mod_broadcom_code(val, params) \
410 fourcc_mod_code(BROADCOM, ((((__u64)params) << __fourcc_mod_broadcom_param_shift) | val))
411#define fourcc_mod_broadcom_param(m) \
412 ((int)(((m) >> __fourcc_mod_broadcom_param_shift) & \
413 ((1ULL << __fourcc_mod_broadcom_param_bits) - 1)))
414#define fourcc_mod_broadcom_mod(m) \
415 ((m) & ~(((1ULL << __fourcc_mod_broadcom_param_bits) - 1) << \
416 __fourcc_mod_broadcom_param_shift))
417
418/*
388 * Broadcom VC4 "T" format 419 * Broadcom VC4 "T" format
389 * 420 *
390 * This is the primary layout that the V3D GPU can texture from (it 421 * This is the primary layout that the V3D GPU can texture from (it
@@ -405,6 +436,151 @@ extern "C" {
405 */ 436 */
406#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1) 437#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
407 438
439/*
440 * Broadcom SAND format
441 *
442 * This is the native format that the H.264 codec block uses. For VC4
443 * HVS, it is only valid for H.264 (NV12/21) and RGBA modes.
444 *
445 * The image can be considered to be split into columns, and the
446 * columns are placed consecutively into memory. The width of those
447 * columns can be either 32, 64, 128, or 256 pixels, but in practice
448 * only 128 pixel columns are used.
449 *
450 * The pitch between the start of each column is set to optimally
451 * switch between SDRAM banks. This is passed as the number of lines
452 * of column width in the modifier (we can't use the stride value due
453 * to various core checks that look at it , so you should set the
454 * stride to width*cpp).
455 *
456 * Note that the column height for this format modifier is the same
457 * for all of the planes, assuming that each column contains both Y
458 * and UV. Some SAND-using hardware stores UV in a separate tiled
459 * image from Y to reduce the column height, which is not supported
460 * with these modifiers.
461 */
462
463#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
464 fourcc_mod_broadcom_code(2, v)
465#define DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(v) \
466 fourcc_mod_broadcom_code(3, v)
467#define DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(v) \
468 fourcc_mod_broadcom_code(4, v)
469#define DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(v) \
470 fourcc_mod_broadcom_code(5, v)
471
472#define DRM_FORMAT_MOD_BROADCOM_SAND32 \
473 DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(0)
474#define DRM_FORMAT_MOD_BROADCOM_SAND64 \
475 DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(0)
476#define DRM_FORMAT_MOD_BROADCOM_SAND128 \
477 DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(0)
478#define DRM_FORMAT_MOD_BROADCOM_SAND256 \
479 DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(0)
480
481/* Broadcom UIF format
482 *
483 * This is the common format for the current Broadcom multimedia
484 * blocks, including V3D 3.x and newer, newer video codecs, and
485 * displays.
486 *
487 * The image consists of utiles (64b blocks), UIF blocks (2x2 utiles),
488 * and macroblocks (4x4 UIF blocks). Those 4x4 UIF block groups are
489 * stored in columns, with padding between the columns to ensure that
490 * moving from one column to the next doesn't hit the same SDRAM page
491 * bank.
492 *
493 * To calculate the padding, it is assumed that each hardware block
494 * and the software driving it knows the platform's SDRAM page size,
495 * number of banks, and XOR address, and that it's identical between
496 * all blocks using the format. This tiling modifier will use XOR as
497 * necessary to reduce the padding. If a hardware block can't do XOR,
498 * the assumption is that a no-XOR tiling modifier will be created.
499 */
500#define DRM_FORMAT_MOD_BROADCOM_UIF fourcc_mod_code(BROADCOM, 6)
501
502/*
503 * Arm Framebuffer Compression (AFBC) modifiers
504 *
505 * AFBC is a proprietary lossless image compression protocol and format.
506 * It provides fine-grained random access and minimizes the amount of data
507 * transferred between IP blocks.
508 *
509 * AFBC has several features which may be supported and/or used, which are
510 * represented using bits in the modifier. Not all combinations are valid,
511 * and different devices or use-cases may support different combinations.
512 */
513#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) fourcc_mod_code(ARM, __afbc_mode)
514
515/*
516 * AFBC superblock size
517 *
518 * Indicates the superblock size(s) used for the AFBC buffer. The buffer
519 * size (in pixels) must be aligned to a multiple of the superblock size.
520 * Four lowest significant bits(LSBs) are reserved for block size.
521 */
522#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf
523#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL)
524#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL)
525
526/*
527 * AFBC lossless colorspace transform
528 *
529 * Indicates that the buffer makes use of the AFBC lossless colorspace
530 * transform.
531 */
532#define AFBC_FORMAT_MOD_YTR (1ULL << 4)
533
534/*
535 * AFBC block-split
536 *
537 * Indicates that the payload of each superblock is split. The second
538 * half of the payload is positioned at a predefined offset from the start
539 * of the superblock payload.
540 */
541#define AFBC_FORMAT_MOD_SPLIT (1ULL << 5)
542
543/*
544 * AFBC sparse layout
545 *
546 * This flag indicates that the payload of each superblock must be stored at a
547 * predefined position relative to the other superblocks in the same AFBC
548 * buffer. This order is the same order used by the header buffer. In this mode
549 * each superblock is given the same amount of space as an uncompressed
550 * superblock of the particular format would require, rounding up to the next
551 * multiple of 128 bytes in size.
552 */
553#define AFBC_FORMAT_MOD_SPARSE (1ULL << 6)
554
555/*
556 * AFBC copy-block restrict
557 *
558 * Buffers with this flag must obey the copy-block restriction. The restriction
559 * is such that there are no copy-blocks referring across the border of 8x8
560 * blocks. For the subsampled data the 8x8 limitation is also subsampled.
561 */
562#define AFBC_FORMAT_MOD_CBR (1ULL << 7)
563
564/*
565 * AFBC tiled layout
566 *
567 * The tiled layout groups superblocks in 8x8 or 4x4 tiles, where all
568 * superblocks inside a tile are stored together in memory. 8x8 tiles are used
569 * for pixel formats up to and including 32 bpp while 4x4 tiles are used for
570 * larger bpp formats. The order between the tiles is scan line.
571 * When the tiled layout is used, the buffer size (in pixels) must be aligned
572 * to the tile size.
573 */
574#define AFBC_FORMAT_MOD_TILED (1ULL << 8)
575
576/*
577 * AFBC solid color blocks
578 *
579 * Indicates that the buffer makes use of solid-color blocks, whereby bandwidth
580 * can be reduced if a whole superblock is a single color.
581 */
582#define AFBC_FORMAT_MOD_SC (1ULL << 9)
583
408#if defined(__cplusplus) 584#if defined(__cplusplus)
409} 585}
410#endif 586#endif
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 4b3a1bb58e68..8d67243952f4 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -96,6 +96,13 @@ extern "C" {
96#define DRM_MODE_PICTURE_ASPECT_64_27 3 96#define DRM_MODE_PICTURE_ASPECT_64_27 3
97#define DRM_MODE_PICTURE_ASPECT_256_135 4 97#define DRM_MODE_PICTURE_ASPECT_256_135 4
98 98
99/* Content type options */
100#define DRM_MODE_CONTENT_TYPE_NO_DATA 0
101#define DRM_MODE_CONTENT_TYPE_GRAPHICS 1
102#define DRM_MODE_CONTENT_TYPE_PHOTO 2
103#define DRM_MODE_CONTENT_TYPE_CINEMA 3
104#define DRM_MODE_CONTENT_TYPE_GAME 4
105
99/* Aspect ratio flag bitmask (4 bits 22:19) */ 106/* Aspect ratio flag bitmask (4 bits 22:19) */
100#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19) 107#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
101#define DRM_MODE_FLAG_PIC_AR_NONE \ 108#define DRM_MODE_FLAG_PIC_AR_NONE \
@@ -344,6 +351,7 @@ enum drm_mode_subconnector {
344#define DRM_MODE_CONNECTOR_VIRTUAL 15 351#define DRM_MODE_CONNECTOR_VIRTUAL 15
345#define DRM_MODE_CONNECTOR_DSI 16 352#define DRM_MODE_CONNECTOR_DSI 16
346#define DRM_MODE_CONNECTOR_DPI 17 353#define DRM_MODE_CONNECTOR_DPI 17
354#define DRM_MODE_CONNECTOR_WRITEBACK 18
347 355
348struct drm_mode_get_connector { 356struct drm_mode_get_connector {
349 357
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 0bc784f5e0db..399f58317cff 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -40,6 +40,7 @@ extern "C" {
40 40
41#define DRM_VMW_GET_PARAM 0 41#define DRM_VMW_GET_PARAM 0
42#define DRM_VMW_ALLOC_DMABUF 1 42#define DRM_VMW_ALLOC_DMABUF 1
43#define DRM_VMW_ALLOC_BO 1
43#define DRM_VMW_UNREF_DMABUF 2 44#define DRM_VMW_UNREF_DMABUF 2
44#define DRM_VMW_HANDLE_CLOSE 2 45#define DRM_VMW_HANDLE_CLOSE 2
45#define DRM_VMW_CURSOR_BYPASS 3 46#define DRM_VMW_CURSOR_BYPASS 3
@@ -68,6 +69,8 @@ extern "C" {
68#define DRM_VMW_GB_SURFACE_REF 24 69#define DRM_VMW_GB_SURFACE_REF 24
69#define DRM_VMW_SYNCCPU 25 70#define DRM_VMW_SYNCCPU 25
70#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26 71#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
72#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
73#define DRM_VMW_GB_SURFACE_REF_EXT 28
71 74
72/*************************************************************************/ 75/*************************************************************************/
73/** 76/**
@@ -79,6 +82,9 @@ extern "C" {
79 * 82 *
80 * DRM_VMW_PARAM_OVERLAY_IOCTL: 83 * DRM_VMW_PARAM_OVERLAY_IOCTL:
81 * Does the driver support the overlay ioctl. 84 * Does the driver support the overlay ioctl.
85 *
86 * DRM_VMW_PARAM_SM4_1
87 * SM4_1 support is enabled.
82 */ 88 */
83 89
84#define DRM_VMW_PARAM_NUM_STREAMS 0 90#define DRM_VMW_PARAM_NUM_STREAMS 0
@@ -94,6 +100,8 @@ extern "C" {
94#define DRM_VMW_PARAM_MAX_MOB_SIZE 10 100#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
95#define DRM_VMW_PARAM_SCREEN_TARGET 11 101#define DRM_VMW_PARAM_SCREEN_TARGET 11
96#define DRM_VMW_PARAM_DX 12 102#define DRM_VMW_PARAM_DX 12
103#define DRM_VMW_PARAM_HW_CAPS2 13
104#define DRM_VMW_PARAM_SM4_1 14
97 105
98/** 106/**
99 * enum drm_vmw_handle_type - handle type for ref ioctls 107 * enum drm_vmw_handle_type - handle type for ref ioctls
@@ -356,9 +364,9 @@ struct drm_vmw_fence_rep {
356 364
357/*************************************************************************/ 365/*************************************************************************/
358/** 366/**
359 * DRM_VMW_ALLOC_DMABUF 367 * DRM_VMW_ALLOC_BO
360 * 368 *
361 * Allocate a DMA buffer that is visible also to the host. 369 * Allocate a buffer object that is visible also to the host.
362 * NOTE: The buffer is 370 * NOTE: The buffer is
363 * identified by a handle and an offset, which are private to the guest, but 371 * identified by a handle and an offset, which are private to the guest, but
364 * useable in the command stream. The guest kernel may translate these 372 * useable in the command stream. The guest kernel may translate these
@@ -366,27 +374,28 @@ struct drm_vmw_fence_rep {
366 * be zero at all times, or it may disappear from the interface before it is 374 * be zero at all times, or it may disappear from the interface before it is
367 * fixed. 375 * fixed.
368 * 376 *
369 * The DMA buffer may stay user-space mapped in the guest at all times, 377 * The buffer object may stay user-space mapped in the guest at all times,
370 * and is thus suitable for sub-allocation. 378 * and is thus suitable for sub-allocation.
371 * 379 *
372 * DMA buffers are mapped using the mmap() syscall on the drm device. 380 * Buffer objects are mapped using the mmap() syscall on the drm device.
373 */ 381 */
374 382
375/** 383/**
376 * struct drm_vmw_alloc_dmabuf_req 384 * struct drm_vmw_alloc_bo_req
377 * 385 *
378 * @size: Required minimum size of the buffer. 386 * @size: Required minimum size of the buffer.
379 * 387 *
380 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. 388 * Input data to the DRM_VMW_ALLOC_BO Ioctl.
381 */ 389 */
382 390
383struct drm_vmw_alloc_dmabuf_req { 391struct drm_vmw_alloc_bo_req {
384 __u32 size; 392 __u32 size;
385 __u32 pad64; 393 __u32 pad64;
386}; 394};
395#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
387 396
388/** 397/**
389 * struct drm_vmw_dmabuf_rep 398 * struct drm_vmw_bo_rep
390 * 399 *
391 * @map_handle: Offset to use in the mmap() call used to map the buffer. 400 * @map_handle: Offset to use in the mmap() call used to map the buffer.
392 * @handle: Handle unique to this buffer. Used for unreferencing. 401 * @handle: Handle unique to this buffer. Used for unreferencing.
@@ -395,50 +404,32 @@ struct drm_vmw_alloc_dmabuf_req {
395 * @cur_gmr_offset: Offset to use in the command stream when this buffer is 404 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
396 * referenced. See note above. 405 * referenced. See note above.
397 * 406 *
398 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. 407 * Output data from the DRM_VMW_ALLOC_BO Ioctl.
399 */ 408 */
400 409
401struct drm_vmw_dmabuf_rep { 410struct drm_vmw_bo_rep {
402 __u64 map_handle; 411 __u64 map_handle;
403 __u32 handle; 412 __u32 handle;
404 __u32 cur_gmr_id; 413 __u32 cur_gmr_id;
405 __u32 cur_gmr_offset; 414 __u32 cur_gmr_offset;
406 __u32 pad64; 415 __u32 pad64;
407}; 416};
417#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
408 418
409/** 419/**
410 * union drm_vmw_dmabuf_arg 420 * union drm_vmw_alloc_bo_arg
411 * 421 *
412 * @req: Input data as described above. 422 * @req: Input data as described above.
413 * @rep: Output data as described above. 423 * @rep: Output data as described above.
414 * 424 *
415 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. 425 * Argument to the DRM_VMW_ALLOC_BO Ioctl.
416 */ 426 */
417 427
418union drm_vmw_alloc_dmabuf_arg { 428union drm_vmw_alloc_bo_arg {
419 struct drm_vmw_alloc_dmabuf_req req; 429 struct drm_vmw_alloc_bo_req req;
420 struct drm_vmw_dmabuf_rep rep; 430 struct drm_vmw_bo_rep rep;
421};
422
423/*************************************************************************/
424/**
425 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
426 *
427 */
428
429/**
430 * struct drm_vmw_unref_dmabuf_arg
431 *
432 * @handle: Handle indicating what buffer to free. Obtained from the
433 * DRM_VMW_ALLOC_DMABUF Ioctl.
434 *
435 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
436 */
437
438struct drm_vmw_unref_dmabuf_arg {
439 __u32 handle;
440 __u32 pad64;
441}; 431};
432#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
442 433
443/*************************************************************************/ 434/*************************************************************************/
444/** 435/**
@@ -1103,9 +1094,8 @@ union drm_vmw_extended_context_arg {
1103 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its 1094 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1104 * underlying resource. 1095 * underlying resource.
1105 * 1096 *
1106 * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl. 1097 * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
1107 * The ioctl arguments therefore need to be identical in layout. 1098 * Ioctl.
1108 *
1109 */ 1099 */
1110 1100
1111/** 1101/**
@@ -1119,7 +1109,107 @@ struct drm_vmw_handle_close_arg {
1119 __u32 handle; 1109 __u32 handle;
1120 __u32 pad64; 1110 __u32 pad64;
1121}; 1111};
1112#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
1113
1114/*************************************************************************/
1115/**
1116 * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
1117 *
1118 * Allocates a surface handle and queues a create surface command
1119 * for the host on the first use of the surface. The surface ID can
1120 * be used as the surface ID in commands referencing the surface.
1121 *
1122 * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
1123 * parameter and 64 bit svga flag.
1124 */
1125
1126/**
1127 * enum drm_vmw_surface_version
1128 *
1129 * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
1130 * svga3d surface flags split into 2, upper half and lower half.
1131 */
1132enum drm_vmw_surface_version {
1133 drm_vmw_gb_surface_v1
1134};
1135
1136/**
1137 * struct drm_vmw_gb_surface_create_ext_req
1138 *
1139 * @base: Surface create parameters.
1140 * @version: Version of surface create ioctl.
1141 * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
1142 * @multisample_pattern: Multisampling pattern when msaa is supported.
1143 * @quality_level: Precision settings for each sample.
1144 * @must_be_zero: Reserved for future usage.
1145 *
1146 * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
1147 * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
1148 */
1149struct drm_vmw_gb_surface_create_ext_req {
1150 struct drm_vmw_gb_surface_create_req base;
1151 enum drm_vmw_surface_version version;
1152 uint32_t svga3d_flags_upper_32_bits;
1153 SVGA3dMSPattern multisample_pattern;
1154 SVGA3dMSQualityLevel quality_level;
1155 uint64_t must_be_zero;
1156};
1157
1158/**
1159 * union drm_vmw_gb_surface_create_ext_arg
1160 *
1161 * @req: Input argument as described above.
1162 * @rep: Output argument as described above.
1163 *
1164 * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1165 */
1166union drm_vmw_gb_surface_create_ext_arg {
1167 struct drm_vmw_gb_surface_create_rep rep;
1168 struct drm_vmw_gb_surface_create_ext_req req;
1169};
1170
1171/*************************************************************************/
1172/**
1173 * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
1174 *
1175 * Puts a reference on a host surface with a given handle, as previously
1176 * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1177 * A reference will make sure the surface isn't destroyed while we hold
1178 * it and will allow the calling client to use the surface handle in
1179 * the command stream.
1180 *
1181 * On successful return, the Ioctl returns the surface information given
1182 * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1183 */
1122 1184
1185/**
1186 * struct drm_vmw_gb_surface_ref_ext_rep
1187 *
1188 * @creq: The data used as input when the surface was created, as described
1189 * above at "struct drm_vmw_gb_surface_create_ext_req"
1190 * @crep: Additional data output when the surface was created, as described
1191 * above at "struct drm_vmw_gb_surface_create_rep"
1192 *
1193 * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
1194 */
1195struct drm_vmw_gb_surface_ref_ext_rep {
1196 struct drm_vmw_gb_surface_create_ext_req creq;
1197 struct drm_vmw_gb_surface_create_rep crep;
1198};
1199
1200/**
1201 * union drm_vmw_gb_surface_reference_ext_arg
1202 *
1203 * @req: Input data as described above at "struct drm_vmw_surface_arg"
1204 * @rep: Output data as described above at
1205 * "struct drm_vmw_gb_surface_ref_ext_rep"
1206 *
1207 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1208 */
1209union drm_vmw_gb_surface_reference_ext_arg {
1210 struct drm_vmw_gb_surface_ref_ext_rep rep;
1211 struct drm_vmw_surface_arg req;
1212};
1123 1213
1124#if defined(__cplusplus) 1214#if defined(__cplusplus)
1125} 1215}
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index d00221345c19..ce43d340f010 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -29,7 +29,6 @@
29 29
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/signal.h>
33#include <asm/byteorder.h> 32#include <asm/byteorder.h>
34 33
35typedef __kernel_ulong_t aio_context_t; 34typedef __kernel_ulong_t aio_context_t;
@@ -108,10 +107,5 @@ struct iocb {
108#undef IFBIG 107#undef IFBIG
109#undef IFLITTLE 108#undef IFLITTLE
110 109
111struct __aio_sigset {
112 const sigset_t __user *sigmask;
113 size_t sigsetsize;
114};
115
116#endif /* __LINUX__AIO_ABI_H */ 110#endif /* __LINUX__AIO_ABI_H */
117 111
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index c35aee9ad4a6..818ae690ab79 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -148,6 +148,7 @@
148#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */ 148#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
149#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */ 149#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
150#define AUDIT_INTEGRITY_EVM_XATTR 1806 /* New EVM-covered xattr */ 150#define AUDIT_INTEGRITY_EVM_XATTR 1806 /* New EVM-covered xattr */
151#define AUDIT_INTEGRITY_POLICY_RULE 1807 /* IMA policy rules */
151 152
152#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */ 153#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */
153 154
@@ -157,7 +158,8 @@
157#define AUDIT_FILTER_ENTRY 0x02 /* Apply rule at syscall entry */ 158#define AUDIT_FILTER_ENTRY 0x02 /* Apply rule at syscall entry */
158#define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */ 159#define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */
159#define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */ 160#define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */
160#define AUDIT_FILTER_TYPE 0x05 /* Apply rule at audit_log_start */ 161#define AUDIT_FILTER_EXCLUDE 0x05 /* Apply rule before record creation */
162#define AUDIT_FILTER_TYPE AUDIT_FILTER_EXCLUDE /* obsolete misleading naming */
161#define AUDIT_FILTER_FS 0x06 /* Apply rule at __audit_inode_child */ 163#define AUDIT_FILTER_FS 0x06 /* Apply rule at __audit_inode_child */
162 164
163#define AUDIT_NR_FILTERS 7 165#define AUDIT_NR_FILTERS 7
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 821f71a2e48f..8d19e02d752a 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -195,7 +195,7 @@ struct cache_sb {
195 }; 195 };
196 }; 196 };
197 197
198 __u32 last_mount; /* time_t */ 198 __u32 last_mount; /* time overflow in y2106 */
199 199
200 __u16 first_bucket; 200 __u16 first_bucket;
201 union { 201 union {
@@ -318,7 +318,7 @@ struct uuid_entry {
318 struct { 318 struct {
319 __u8 uuid[16]; 319 __u8 uuid[16];
320 __u8 label[32]; 320 __u8 label[32];
321 __u32 first_reg; 321 __u32 first_reg; /* time overflow in y2106 */
322 __u32 last_reg; 322 __u32 last_reg;
323 __u32 invalidated; 323 __u32 invalidated;
324 324
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index e3c70fe6bf0f..ff5a5db8906a 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -117,7 +117,7 @@ struct blk_zone_report {
117 __u32 nr_zones; 117 __u32 nr_zones;
118 __u8 reserved[4]; 118 __u8 reserved[4];
119 struct blk_zone zones[0]; 119 struct blk_zone zones[0];
120} __packed; 120};
121 121
122/** 122/**
123 * struct blk_zone_range - BLKRESETZONE ioctl request 123 * struct blk_zone_range - BLKRESETZONE ioctl request
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 59b19b6a40d7..66917a4eba27 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -75,6 +75,11 @@ struct bpf_lpm_trie_key {
75 __u8 data[0]; /* Arbitrary size */ 75 __u8 data[0]; /* Arbitrary size */
76}; 76};
77 77
78struct bpf_cgroup_storage_key {
79 __u64 cgroup_inode_id; /* cgroup inode id */
80 __u32 attach_type; /* program attach type */
81};
82
78/* BPF syscall commands, see bpf(2) man-page for details. */ 83/* BPF syscall commands, see bpf(2) man-page for details. */
79enum bpf_cmd { 84enum bpf_cmd {
80 BPF_MAP_CREATE, 85 BPF_MAP_CREATE,
@@ -120,6 +125,8 @@ enum bpf_map_type {
120 BPF_MAP_TYPE_CPUMAP, 125 BPF_MAP_TYPE_CPUMAP,
121 BPF_MAP_TYPE_XSKMAP, 126 BPF_MAP_TYPE_XSKMAP,
122 BPF_MAP_TYPE_SOCKHASH, 127 BPF_MAP_TYPE_SOCKHASH,
128 BPF_MAP_TYPE_CGROUP_STORAGE,
129 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
123}; 130};
124 131
125enum bpf_prog_type { 132enum bpf_prog_type {
@@ -144,6 +151,7 @@ enum bpf_prog_type {
144 BPF_PROG_TYPE_CGROUP_SOCK_ADDR, 151 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
145 BPF_PROG_TYPE_LWT_SEG6LOCAL, 152 BPF_PROG_TYPE_LWT_SEG6LOCAL,
146 BPF_PROG_TYPE_LIRC_MODE2, 153 BPF_PROG_TYPE_LIRC_MODE2,
154 BPF_PROG_TYPE_SK_REUSEPORT,
147}; 155};
148 156
149enum bpf_attach_type { 157enum bpf_attach_type {
@@ -1371,6 +1379,20 @@ union bpf_attr {
1371 * A 8-byte long non-decreasing number on success, or 0 if the 1379 * A 8-byte long non-decreasing number on success, or 0 if the
1372 * socket field is missing inside *skb*. 1380 * socket field is missing inside *skb*.
1373 * 1381 *
1382 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
1383 * Description
1384 * Equivalent to bpf_get_socket_cookie() helper that accepts
1385 * *skb*, but gets socket from **struct bpf_sock_addr** contex.
1386 * Return
1387 * A 8-byte long non-decreasing number.
1388 *
1389 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
1390 * Description
1391 * Equivalent to bpf_get_socket_cookie() helper that accepts
1392 * *skb*, but gets socket from **struct bpf_sock_ops** contex.
1393 * Return
1394 * A 8-byte long non-decreasing number.
1395 *
1374 * u32 bpf_get_socket_uid(struct sk_buff *skb) 1396 * u32 bpf_get_socket_uid(struct sk_buff *skb)
1375 * Return 1397 * Return
1376 * The owner UID of the socket associated to *skb*. If the socket 1398 * The owner UID of the socket associated to *skb*. If the socket
@@ -1826,7 +1848,7 @@ union bpf_attr {
1826 * A non-negative value equal to or less than *size* on success, 1848 * A non-negative value equal to or less than *size* on success,
1827 * or a negative error in case of failure. 1849 * or a negative error in case of failure.
1828 * 1850 *
1829 * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) 1851 * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
1830 * Description 1852 * Description
1831 * This helper is similar to **bpf_skb_load_bytes**\ () in that 1853 * This helper is similar to **bpf_skb_load_bytes**\ () in that
1832 * it provides an easy way to load *len* bytes from *offset* 1854 * it provides an easy way to load *len* bytes from *offset*
@@ -1857,7 +1879,8 @@ union bpf_attr {
1857 * is resolved), the nexthop address is returned in ipv4_dst 1879 * is resolved), the nexthop address is returned in ipv4_dst
1858 * or ipv6_dst based on family, smac is set to mac address of 1880 * or ipv6_dst based on family, smac is set to mac address of
1859 * egress device, dmac is set to nexthop mac address, rt_metric 1881 * egress device, dmac is set to nexthop mac address, rt_metric
1860 * is set to metric from route (IPv4/IPv6 only). 1882 * is set to metric from route (IPv4/IPv6 only), and ifindex
1883 * is set to the device index of the nexthop from the FIB lookup.
1861 * 1884 *
1862 * *plen* argument is the size of the passed in struct. 1885 * *plen* argument is the size of the passed in struct.
1863 * *flags* argument can be a combination of one or more of the 1886 * *flags* argument can be a combination of one or more of the
@@ -1873,9 +1896,10 @@ union bpf_attr {
1873 * *ctx* is either **struct xdp_md** for XDP programs or 1896 * *ctx* is either **struct xdp_md** for XDP programs or
1874 * **struct sk_buff** tc cls_act programs. 1897 * **struct sk_buff** tc cls_act programs.
1875 * Return 1898 * Return
1876 * Egress device index on success, 0 if packet needs to continue 1899 * * < 0 if any input argument is invalid
1877 * up the stack for further processing or a negative error in case 1900 * * 0 on success (packet is forwarded, nexthop neighbor exists)
1878 * of failure. 1901 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
1902 * packet is not forwarded or needs assist from full stack
1879 * 1903 *
1880 * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) 1904 * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
1881 * Description 1905 * Description
@@ -2031,7 +2055,6 @@ union bpf_attr {
2031 * This helper is only available is the kernel was compiled with 2055 * This helper is only available is the kernel was compiled with
2032 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2056 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2033 * "**y**". 2057 * "**y**".
2034 *
2035 * Return 2058 * Return
2036 * 0 2059 * 0
2037 * 2060 *
@@ -2051,7 +2074,6 @@ union bpf_attr {
2051 * This helper is only available is the kernel was compiled with 2074 * This helper is only available is the kernel was compiled with
2052 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2075 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2053 * "**y**". 2076 * "**y**".
2054 *
2055 * Return 2077 * Return
2056 * 0 2078 * 0
2057 * 2079 *
@@ -2071,10 +2093,54 @@ union bpf_attr {
2071 * Return 2093 * Return
2072 * The id is returned or 0 in case the id could not be retrieved. 2094 * The id is returned or 0 in case the id could not be retrieved.
2073 * 2095 *
2096 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
2097 * Description
2098 * Return id of cgroup v2 that is ancestor of cgroup associated
2099 * with the *skb* at the *ancestor_level*. The root cgroup is at
2100 * *ancestor_level* zero and each step down the hierarchy
2101 * increments the level. If *ancestor_level* == level of cgroup
2102 * associated with *skb*, then return value will be same as that
2103 * of **bpf_skb_cgroup_id**\ ().
2104 *
2105 * The helper is useful to implement policies based on cgroups
2106 * that are upper in hierarchy than immediate cgroup associated
2107 * with *skb*.
2108 *
2109 * The format of returned id and helper limitations are same as in
2110 * **bpf_skb_cgroup_id**\ ().
2111 * Return
2112 * The id is returned or 0 in case the id could not be retrieved.
2113 *
2074 * u64 bpf_get_current_cgroup_id(void) 2114 * u64 bpf_get_current_cgroup_id(void)
2075 * Return 2115 * Return
2076 * A 64-bit integer containing the current cgroup id based 2116 * A 64-bit integer containing the current cgroup id based
2077 * on the cgroup within which the current task is running. 2117 * on the cgroup within which the current task is running.
2118 *
2119 * void* get_local_storage(void *map, u64 flags)
2120 * Description
2121 * Get the pointer to the local storage area.
2122 * The type and the size of the local storage is defined
2123 * by the *map* argument.
2124 * The *flags* meaning is specific for each map type,
2125 * and has to be 0 for cgroup local storage.
2126 *
2127 * Depending on the bpf program type, a local storage area
2128 * can be shared between multiple instances of the bpf program,
2129 * running simultaneously.
2130 *
2131 * A user should care about the synchronization by himself.
2132 * For example, by using the BPF_STX_XADD instruction to alter
2133 * the shared data.
2134 * Return
2135 * Pointer to the local storage area.
2136 *
2137 * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
2138 * Description
2139 * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
2140 * It checks the selected sk is matching the incoming
2141 * request in the skb.
2142 * Return
2143 * 0 on success, or a negative error in case of failure.
2078 */ 2144 */
2079#define __BPF_FUNC_MAPPER(FN) \ 2145#define __BPF_FUNC_MAPPER(FN) \
2080 FN(unspec), \ 2146 FN(unspec), \
@@ -2157,7 +2223,10 @@ union bpf_attr {
2157 FN(rc_repeat), \ 2223 FN(rc_repeat), \
2158 FN(rc_keydown), \ 2224 FN(rc_keydown), \
2159 FN(skb_cgroup_id), \ 2225 FN(skb_cgroup_id), \
2160 FN(get_current_cgroup_id), 2226 FN(get_current_cgroup_id), \
2227 FN(get_local_storage), \
2228 FN(sk_select_reuseport), \
2229 FN(skb_ancestor_cgroup_id),
2161 2230
2162/* integer value in 'imm' field of BPF_CALL instruction selects which helper 2231/* integer value in 'imm' field of BPF_CALL instruction selects which helper
2163 * function eBPF program intends to call 2232 * function eBPF program intends to call
@@ -2374,6 +2443,30 @@ struct sk_msg_md {
2374 __u32 local_port; /* stored in host byte order */ 2443 __u32 local_port; /* stored in host byte order */
2375}; 2444};
2376 2445
2446struct sk_reuseport_md {
2447 /*
2448 * Start of directly accessible data. It begins from
2449 * the tcp/udp header.
2450 */
2451 void *data;
2452 void *data_end; /* End of directly accessible data */
2453 /*
2454 * Total length of packet (starting from the tcp/udp header).
2455 * Note that the directly accessible bytes (data_end - data)
2456 * could be less than this "len". Those bytes could be
2457 * indirectly read by a helper "bpf_skb_load_bytes()".
2458 */
2459 __u32 len;
2460 /*
2461 * Eth protocol in the mac header (network byte order). e.g.
2462 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
2463 */
2464 __u32 eth_protocol;
2465 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
2466 __u32 bind_inany; /* Is sock bound to an INANY address? */
2467 __u32 hash; /* A hash of the packet 4 tuples */
2468};
2469
2377#define BPF_TAG_SIZE 8 2470#define BPF_TAG_SIZE 8
2378 2471
2379struct bpf_prog_info { 2472struct bpf_prog_info {
@@ -2555,6 +2648,9 @@ enum {
2555 * Arg1: old_state 2648 * Arg1: old_state
2556 * Arg2: new_state 2649 * Arg2: new_state
2557 */ 2650 */
2651 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
2652 * socket transition to LISTEN state.
2653 */
2558}; 2654};
2559 2655
2560/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect 2656/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -2612,6 +2708,18 @@ struct bpf_raw_tracepoint_args {
2612#define BPF_FIB_LOOKUP_DIRECT BIT(0) 2708#define BPF_FIB_LOOKUP_DIRECT BIT(0)
2613#define BPF_FIB_LOOKUP_OUTPUT BIT(1) 2709#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
2614 2710
2711enum {
2712 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
2713 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
2714 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
2715 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
2716 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
2717 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
2718 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
2719 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
2720 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
2721};
2722
2615struct bpf_fib_lookup { 2723struct bpf_fib_lookup {
2616 /* input: network family for lookup (AF_INET, AF_INET6) 2724 /* input: network family for lookup (AF_INET, AF_INET6)
2617 * output: network family of egress nexthop 2725 * output: network family of egress nexthop
@@ -2625,7 +2733,11 @@ struct bpf_fib_lookup {
2625 2733
2626 /* total length of packet from network header - used for MTU check */ 2734 /* total length of packet from network header - used for MTU check */
2627 __u16 tot_len; 2735 __u16 tot_len;
2628 __u32 ifindex; /* L3 device index for lookup */ 2736
2737 /* input: L3 device index for lookup
2738 * output: device index from FIB lookup
2739 */
2740 __u32 ifindex;
2629 2741
2630 union { 2742 union {
2631 /* inputs to lookup */ 2743 /* inputs to lookup */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 0b5ddbe135a4..972265f32871 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -76,7 +76,7 @@ struct btf_type {
76 */ 76 */
77#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) 77#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
78#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) 78#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
79#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff) 79#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
80 80
81/* Attributes stored in the BTF_INT_ENCODING */ 81/* Attributes stored in the BTF_INT_ENCODING */
82#define BTF_INT_SIGNED (1 << 0) 82#define BTF_INT_SIGNED (1 << 0)
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index d7f97ac197a9..0afb7d8e867f 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -77,7 +77,7 @@ typedef __u32 canid_t;
77/* 77/*
78 * Controller Area Network Error Message Frame Mask structure 78 * Controller Area Network Error Message Frame Mask structure
79 * 79 *
80 * bit 0-28 : error class mask (see include/linux/can/error.h) 80 * bit 0-28 : error class mask (see include/uapi/linux/can/error.h)
81 * bit 29-31 : set to zero 81 * bit 29-31 : set to zero
82 */ 82 */
83typedef __u32 can_err_mask_t; 83typedef __u32 can_err_mask_t;
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 20fe091b7e96..097fcd812471 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -384,6 +384,8 @@ struct cec_log_addrs {
384#define CEC_EVENT_PIN_CEC_HIGH 4 384#define CEC_EVENT_PIN_CEC_HIGH 4
385#define CEC_EVENT_PIN_HPD_LOW 5 385#define CEC_EVENT_PIN_HPD_LOW 5
386#define CEC_EVENT_PIN_HPD_HIGH 6 386#define CEC_EVENT_PIN_HPD_HIGH 6
387#define CEC_EVENT_PIN_5V_LOW 7
388#define CEC_EVENT_PIN_5V_HIGH 8
387 389
388#define CEC_EVENT_FL_INITIAL_STATE (1 << 0) 390#define CEC_EVENT_FL_INITIAL_STATE (1 << 0)
389#define CEC_EVENT_FL_DROPPED_EVENTS (1 << 1) 391#define CEC_EVENT_FL_DROPPED_EVENTS (1 << 1)
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 60aa2e446698..69df19aa8e72 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -233,7 +233,8 @@ struct cee_pfc {
233 * 2 Well known port number over TCP or SCTP 233 * 2 Well known port number over TCP or SCTP
234 * 3 Well known port number over UDP or DCCP 234 * 3 Well known port number over UDP or DCCP
235 * 4 Well known port number over TCP, SCTP, UDP, or DCCP 235 * 4 Well known port number over TCP, SCTP, UDP, or DCCP
236 * 5-7 Reserved 236 * 5 Differentiated Services Code Point (DSCP) value
237 * 6-7 Reserved
237 * 238 *
238 * Selector field values for CEE 239 * Selector field values for CEE
239 * 0 Ethertype 240 * 0 Ethertype
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 75cb5450c851..79407bbd296d 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -78,6 +78,17 @@ enum devlink_command {
78 */ 78 */
79 DEVLINK_CMD_RELOAD, 79 DEVLINK_CMD_RELOAD,
80 80
81 DEVLINK_CMD_PARAM_GET, /* can dump */
82 DEVLINK_CMD_PARAM_SET,
83 DEVLINK_CMD_PARAM_NEW,
84 DEVLINK_CMD_PARAM_DEL,
85
86 DEVLINK_CMD_REGION_GET,
87 DEVLINK_CMD_REGION_SET,
88 DEVLINK_CMD_REGION_NEW,
89 DEVLINK_CMD_REGION_DEL,
90 DEVLINK_CMD_REGION_READ,
91
81 /* add new commands above here */ 92 /* add new commands above here */
82 __DEVLINK_CMD_MAX, 93 __DEVLINK_CMD_MAX,
83 DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 94 DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -142,6 +153,16 @@ enum devlink_port_flavour {
142 */ 153 */
143}; 154};
144 155
156enum devlink_param_cmode {
157 DEVLINK_PARAM_CMODE_RUNTIME,
158 DEVLINK_PARAM_CMODE_DRIVERINIT,
159 DEVLINK_PARAM_CMODE_PERMANENT,
160
161 /* Add new configuration modes above */
162 __DEVLINK_PARAM_CMODE_MAX,
163 DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1
164};
165
145enum devlink_attr { 166enum devlink_attr {
146 /* don't change the order or add anything between, this is ABI! */ 167 /* don't change the order or add anything between, this is ABI! */
147 DEVLINK_ATTR_UNSPEC, 168 DEVLINK_ATTR_UNSPEC,
@@ -238,6 +259,27 @@ enum devlink_attr {
238 DEVLINK_ATTR_PORT_NUMBER, /* u32 */ 259 DEVLINK_ATTR_PORT_NUMBER, /* u32 */
239 DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */ 260 DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */
240 261
262 DEVLINK_ATTR_PARAM, /* nested */
263 DEVLINK_ATTR_PARAM_NAME, /* string */
264 DEVLINK_ATTR_PARAM_GENERIC, /* flag */
265 DEVLINK_ATTR_PARAM_TYPE, /* u8 */
266 DEVLINK_ATTR_PARAM_VALUES_LIST, /* nested */
267 DEVLINK_ATTR_PARAM_VALUE, /* nested */
268 DEVLINK_ATTR_PARAM_VALUE_DATA, /* dynamic */
269 DEVLINK_ATTR_PARAM_VALUE_CMODE, /* u8 */
270
271 DEVLINK_ATTR_REGION_NAME, /* string */
272 DEVLINK_ATTR_REGION_SIZE, /* u64 */
273 DEVLINK_ATTR_REGION_SNAPSHOTS, /* nested */
274 DEVLINK_ATTR_REGION_SNAPSHOT, /* nested */
275 DEVLINK_ATTR_REGION_SNAPSHOT_ID, /* u32 */
276
277 DEVLINK_ATTR_REGION_CHUNKS, /* nested */
278 DEVLINK_ATTR_REGION_CHUNK, /* nested */
279 DEVLINK_ATTR_REGION_CHUNK_DATA, /* binary */
280 DEVLINK_ATTR_REGION_CHUNK_ADDR, /* u64 */
281 DEVLINK_ATTR_REGION_CHUNK_LEN, /* u64 */
282
241 /* add new attributes above here, update the policy in devlink.c */ 283 /* add new attributes above here, update the policy in devlink.c */
242 284
243 __DEVLINK_ATTR_MAX, 285 __DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/dvb/audio.h b/include/uapi/linux/dvb/audio.h
index 69f7a85d81b1..afeae063e640 100644
--- a/include/uapi/linux/dvb/audio.h
+++ b/include/uapi/linux/dvb/audio.h
@@ -67,27 +67,6 @@ typedef struct audio_status {
67} audio_status_t; /* separate decoder hardware */ 67} audio_status_t; /* separate decoder hardware */
68 68
69 69
70typedef
71struct audio_karaoke { /* if Vocal1 or Vocal2 are non-zero, they get mixed */
72 int vocal1; /* into left and right t at 70% each */
73 int vocal2; /* if both, Vocal1 and Vocal2 are non-zero, Vocal1 gets*/
74 int melody; /* mixed into the left channel and */
75 /* Vocal2 into the right channel at 100% each. */
76 /* if Melody is non-zero, the melody channel gets mixed*/
77} audio_karaoke_t; /* into left and right */
78
79
80typedef __u16 audio_attributes_t;
81/* bits: descr. */
82/* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */
83/* 12 multichannel extension */
84/* 11-10 audio type (0=not spec, 1=language included) */
85/* 9- 8 audio application mode (0=not spec, 1=karaoke, 2=surround) */
86/* 7- 6 Quantization / DRC (mpeg audio: 1=DRC exists)(lpcm: 0=16bit, */
87/* 5- 4 Sample frequency fs (0=48kHz, 1=96kHz) */
88/* 2- 0 number of audio channels (n+1 channels) */
89
90
91/* for GET_CAPABILITIES and SET_FORMAT, the latter should only set one bit */ 70/* for GET_CAPABILITIES and SET_FORMAT, the latter should only set one bit */
92#define AUDIO_CAP_DTS 1 71#define AUDIO_CAP_DTS 1
93#define AUDIO_CAP_LPCM 2 72#define AUDIO_CAP_LPCM 2
@@ -115,22 +94,6 @@ typedef __u16 audio_attributes_t;
115#define AUDIO_SET_ID _IO('o', 13) 94#define AUDIO_SET_ID _IO('o', 13)
116#define AUDIO_SET_MIXER _IOW('o', 14, audio_mixer_t) 95#define AUDIO_SET_MIXER _IOW('o', 14, audio_mixer_t)
117#define AUDIO_SET_STREAMTYPE _IO('o', 15) 96#define AUDIO_SET_STREAMTYPE _IO('o', 15)
118#define AUDIO_SET_EXT_ID _IO('o', 16)
119#define AUDIO_SET_ATTRIBUTES _IOW('o', 17, audio_attributes_t)
120#define AUDIO_SET_KARAOKE _IOW('o', 18, audio_karaoke_t)
121
122/**
123 * AUDIO_GET_PTS
124 *
125 * Read the 33 bit presentation time stamp as defined
126 * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
127 *
128 * The PTS should belong to the currently played
129 * frame if possible, but may also be a value close to it
130 * like the PTS of the last decoded frame or the last PTS
131 * extracted by the PES parser.
132 */
133#define AUDIO_GET_PTS _IOR('o', 19, __u64)
134#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20) 97#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20)
135 98
136#endif /* _DVBAUDIO_H_ */ 99#endif /* _DVBAUDIO_H_ */
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
index df3d7028c807..43ba8b0a3d14 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/include/uapi/linux/dvb/video.h
@@ -38,18 +38,6 @@ typedef enum {
38 38
39 39
40typedef enum { 40typedef enum {
41 VIDEO_SYSTEM_PAL,
42 VIDEO_SYSTEM_NTSC,
43 VIDEO_SYSTEM_PALN,
44 VIDEO_SYSTEM_PALNc,
45 VIDEO_SYSTEM_PALM,
46 VIDEO_SYSTEM_NTSC60,
47 VIDEO_SYSTEM_PAL60,
48 VIDEO_SYSTEM_PALM60
49} video_system_t;
50
51
52typedef enum {
53 VIDEO_PAN_SCAN, /* use pan and scan format */ 41 VIDEO_PAN_SCAN, /* use pan and scan format */
54 VIDEO_LETTER_BOX, /* use letterbox format */ 42 VIDEO_LETTER_BOX, /* use letterbox format */
55 VIDEO_CENTER_CUT_OUT /* use center cut out format */ 43 VIDEO_CENTER_CUT_OUT /* use center cut out format */
@@ -160,44 +148,6 @@ struct video_still_picture {
160}; 148};
161 149
162 150
163typedef
164struct video_highlight {
165 int active; /* 1=show highlight, 0=hide highlight */
166 __u8 contrast1; /* 7- 4 Pattern pixel contrast */
167 /* 3- 0 Background pixel contrast */
168 __u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */
169 /* 3- 0 Emphasis pixel-1 contrast */
170 __u8 color1; /* 7- 4 Pattern pixel color */
171 /* 3- 0 Background pixel color */
172 __u8 color2; /* 7- 4 Emphasis pixel-2 color */
173 /* 3- 0 Emphasis pixel-1 color */
174 __u32 ypos; /* 23-22 auto action mode */
175 /* 21-12 start y */
176 /* 9- 0 end y */
177 __u32 xpos; /* 23-22 button color number */
178 /* 21-12 start x */
179 /* 9- 0 end x */
180} video_highlight_t;
181
182
183typedef struct video_spu {
184 int active;
185 int stream_id;
186} video_spu_t;
187
188
189typedef struct video_spu_palette { /* SPU Palette information */
190 int length;
191 __u8 __user *palette;
192} video_spu_palette_t;
193
194
195typedef struct video_navi_pack {
196 int length; /* 0 ... 1024 */
197 __u8 data[1024];
198} video_navi_pack_t;
199
200
201typedef __u16 video_attributes_t; 151typedef __u16 video_attributes_t;
202/* bits: descr. */ 152/* bits: descr. */
203/* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */ 153/* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
@@ -242,17 +192,9 @@ typedef __u16 video_attributes_t;
242#define VIDEO_SLOWMOTION _IO('o', 32) 192#define VIDEO_SLOWMOTION _IO('o', 32)
243#define VIDEO_GET_CAPABILITIES _IOR('o', 33, unsigned int) 193#define VIDEO_GET_CAPABILITIES _IOR('o', 33, unsigned int)
244#define VIDEO_CLEAR_BUFFER _IO('o', 34) 194#define VIDEO_CLEAR_BUFFER _IO('o', 34)
245#define VIDEO_SET_ID _IO('o', 35)
246#define VIDEO_SET_STREAMTYPE _IO('o', 36) 195#define VIDEO_SET_STREAMTYPE _IO('o', 36)
247#define VIDEO_SET_FORMAT _IO('o', 37) 196#define VIDEO_SET_FORMAT _IO('o', 37)
248#define VIDEO_SET_SYSTEM _IO('o', 38)
249#define VIDEO_SET_HIGHLIGHT _IOW('o', 39, video_highlight_t)
250#define VIDEO_SET_SPU _IOW('o', 50, video_spu_t)
251#define VIDEO_SET_SPU_PALETTE _IOW('o', 51, video_spu_palette_t)
252#define VIDEO_GET_NAVI _IOR('o', 52, video_navi_pack_t)
253#define VIDEO_SET_ATTRIBUTES _IO('o', 53)
254#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t) 197#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t)
255#define VIDEO_GET_FRAME_RATE _IOR('o', 56, unsigned int)
256 198
257/** 199/**
258 * VIDEO_GET_PTS 200 * VIDEO_GET_PTS
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 4e12c423b9fe..c5358e0ae7c5 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -422,6 +422,8 @@ typedef struct elf64_shdr {
422#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */ 422#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
423#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ 423#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
424#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */ 424#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
425#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
426#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
425 427
426/* Note header in a PT_NOTE section */ 428/* Note header in a PT_NOTE section */
427typedef struct elf32_note { 429typedef struct elf32_note {
diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h
index dc64cfaf13da..c0151200f7d1 100644
--- a/include/uapi/linux/errqueue.h
+++ b/include/uapi/linux/errqueue.h
@@ -20,12 +20,16 @@ struct sock_extended_err {
20#define SO_EE_ORIGIN_ICMP6 3 20#define SO_EE_ORIGIN_ICMP6 3
21#define SO_EE_ORIGIN_TXSTATUS 4 21#define SO_EE_ORIGIN_TXSTATUS 4
22#define SO_EE_ORIGIN_ZEROCOPY 5 22#define SO_EE_ORIGIN_ZEROCOPY 5
23#define SO_EE_ORIGIN_TXTIME 6
23#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS 24#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
24 25
25#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) 26#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1))
26 27
27#define SO_EE_CODE_ZEROCOPY_COPIED 1 28#define SO_EE_CODE_ZEROCOPY_COPIED 1
28 29
30#define SO_EE_CODE_TXTIME_INVALID_PARAM 1
31#define SO_EE_CODE_TXTIME_MISSED 2
32
29/** 33/**
30 * struct scm_timestamping - timestamps exposed through cmsg 34 * struct scm_timestamping - timestamps exposed through cmsg
31 * 35 *
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 4ca65b56084f..dc69391d2bba 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -226,7 +226,7 @@ enum tunable_id {
226 ETHTOOL_TX_COPYBREAK, 226 ETHTOOL_TX_COPYBREAK,
227 ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */ 227 ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
228 /* 228 /*
229 * Add your fresh new tubale attribute above and remember to update 229 * Add your fresh new tunable attribute above and remember to update
230 * tunable_strings[] in net/core/ethtool.c 230 * tunable_strings[] in net/core/ethtool.c
231 */ 231 */
232 __ETHTOOL_TUNABLE_COUNT, 232 __ETHTOOL_TUNABLE_COUNT,
@@ -870,7 +870,8 @@ struct ethtool_flow_ext {
870 * includes the %FLOW_EXT or %FLOW_MAC_EXT flag 870 * includes the %FLOW_EXT or %FLOW_MAC_EXT flag
871 * (see &struct ethtool_flow_ext description). 871 * (see &struct ethtool_flow_ext description).
872 * @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC 872 * @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC
873 * if packets should be discarded 873 * if packets should be discarded, or %RX_CLS_FLOW_WAKE if the
874 * packets should be used for Wake-on-LAN with %WAKE_FILTER
874 * @location: Location of rule in the table. Locations must be 875 * @location: Location of rule in the table. Locations must be
875 * numbered such that a flow matching multiple rules will be 876 * numbered such that a flow matching multiple rules will be
876 * classified according to the first (lowest numbered) rule. 877 * classified according to the first (lowest numbered) rule.
@@ -902,13 +903,13 @@ struct ethtool_rx_flow_spec {
902static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) 903static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
903{ 904{
904 return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; 905 return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
905}; 906}
906 907
907static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) 908static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
908{ 909{
909 return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> 910 return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
910 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 911 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
911}; 912}
912 913
913/** 914/**
914 * struct ethtool_rxnfc - command to get or set RX flow classification rules 915 * struct ethtool_rxnfc - command to get or set RX flow classification rules
@@ -1634,6 +1635,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
1634#define WAKE_ARP (1 << 4) 1635#define WAKE_ARP (1 << 4)
1635#define WAKE_MAGIC (1 << 5) 1636#define WAKE_MAGIC (1 << 5)
1636#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ 1637#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
1638#define WAKE_FILTER (1 << 7)
1637 1639
1638/* L2-L4 network traffic flow types */ 1640/* L2-L4 network traffic flow types */
1639#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ 1641#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
@@ -1671,6 +1673,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
1671#define RXH_DISCARD (1 << 31) 1673#define RXH_DISCARD (1 << 31)
1672 1674
1673#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL 1675#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
1676#define RX_CLS_FLOW_WAKE 0xfffffffffffffffeULL
1674 1677
1675/* Special RX classification rule insert location values */ 1678/* Special RX classification rule insert location values */
1676#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */ 1679#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index cf01b6824244..43391e2d1153 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -164,6 +164,8 @@ enum {
164 IFLA_CARRIER_UP_COUNT, 164 IFLA_CARRIER_UP_COUNT,
165 IFLA_CARRIER_DOWN_COUNT, 165 IFLA_CARRIER_DOWN_COUNT,
166 IFLA_NEW_IFINDEX, 166 IFLA_NEW_IFINDEX,
167 IFLA_MIN_MTU,
168 IFLA_MAX_MTU,
167 __IFLA_MAX 169 __IFLA_MAX
168}; 170};
169 171
@@ -334,6 +336,7 @@ enum {
334 IFLA_BRPORT_GROUP_FWD_MASK, 336 IFLA_BRPORT_GROUP_FWD_MASK,
335 IFLA_BRPORT_NEIGH_SUPPRESS, 337 IFLA_BRPORT_NEIGH_SUPPRESS,
336 IFLA_BRPORT_ISOLATED, 338 IFLA_BRPORT_ISOLATED,
339 IFLA_BRPORT_BACKUP_PORT,
337 __IFLA_BRPORT_MAX 340 __IFLA_BRPORT_MAX
338}; 341};
339#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) 342#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -459,6 +462,16 @@ enum {
459 462
460#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1) 463#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
461 464
465/* XFRM section */
466enum {
467 IFLA_XFRM_UNSPEC,
468 IFLA_XFRM_LINK,
469 IFLA_XFRM_IF_ID,
470 __IFLA_XFRM_MAX
471};
472
473#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1)
474
462enum macsec_validation_type { 475enum macsec_validation_type {
463 MACSEC_VALIDATE_DISABLED = 0, 476 MACSEC_VALIDATE_DISABLED = 0,
464 MACSEC_VALIDATE_CHECK = 1, 477 MACSEC_VALIDATE_CHECK = 1,
@@ -920,6 +933,7 @@ enum {
920 XDP_ATTACHED_DRV, 933 XDP_ATTACHED_DRV,
921 XDP_ATTACHED_SKB, 934 XDP_ATTACHED_SKB,
922 XDP_ATTACHED_HW, 935 XDP_ATTACHED_HW,
936 XDP_ATTACHED_MULTI,
923}; 937};
924 938
925enum { 939enum {
@@ -928,6 +942,9 @@ enum {
928 IFLA_XDP_ATTACHED, 942 IFLA_XDP_ATTACHED,
929 IFLA_XDP_FLAGS, 943 IFLA_XDP_FLAGS,
930 IFLA_XDP_PROG_ID, 944 IFLA_XDP_PROG_ID,
945 IFLA_XDP_DRV_PROG_ID,
946 IFLA_XDP_SKB_PROG_ID,
947 IFLA_XDP_HW_PROG_ID,
931 __IFLA_XDP_MAX, 948 __IFLA_XDP_MAX,
932}; 949};
933 950
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
index 483b77af4eb8..db45d3e49a12 100644
--- a/include/uapi/linux/ila.h
+++ b/include/uapi/linux/ila.h
@@ -30,6 +30,7 @@ enum {
30 ILA_CMD_ADD, 30 ILA_CMD_ADD,
31 ILA_CMD_DEL, 31 ILA_CMD_DEL,
32 ILA_CMD_GET, 32 ILA_CMD_GET,
33 ILA_CMD_FLUSH,
33 34
34 __ILA_CMD_MAX, 35 __ILA_CMD_MAX,
35}; 36};
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index b24a742beae5..e42d13b55cf3 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -168,6 +168,7 @@ enum
168 IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN, 168 IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
169 IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, 169 IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
170 IPV4_DEVCONF_DROP_GRATUITOUS_ARP, 170 IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
171 IPV4_DEVCONF_BC_FORWARDING,
171 __IPV4_DEVCONF_MAX 172 __IPV4_DEVCONF_MAX
172}; 173};
173 174
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index b4f5073dbac2..01674b56e14f 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -76,6 +76,12 @@ struct kfd_ioctl_update_queue_args {
76 __u32 queue_priority; /* to KFD */ 76 __u32 queue_priority; /* to KFD */
77}; 77};
78 78
79struct kfd_ioctl_set_cu_mask_args {
80 __u32 queue_id; /* to KFD */
81 __u32 num_cu_mask; /* to KFD */
82 __u64 cu_mask_ptr; /* to KFD */
83};
84
79/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ 85/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
80#define KFD_IOC_CACHE_POLICY_COHERENT 0 86#define KFD_IOC_CACHE_POLICY_COHERENT 0
81#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 87#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
@@ -189,6 +195,15 @@ struct kfd_ioctl_dbg_wave_control_args {
189 195
190#define KFD_SIGNAL_EVENT_LIMIT 4096 196#define KFD_SIGNAL_EVENT_LIMIT 4096
191 197
198/* For kfd_event_data.hw_exception_data.reset_type. */
199#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
200#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
201
202/* For kfd_event_data.hw_exception_data.reset_cause. */
203#define KFD_HW_EXCEPTION_GPU_HANG 0
204#define KFD_HW_EXCEPTION_ECC 1
205
206
192struct kfd_ioctl_create_event_args { 207struct kfd_ioctl_create_event_args {
193 __u64 event_page_offset; /* from KFD */ 208 __u64 event_page_offset; /* from KFD */
194 __u32 event_trigger_data; /* from KFD - signal events only */ 209 __u32 event_trigger_data; /* from KFD - signal events only */
@@ -219,7 +234,7 @@ struct kfd_memory_exception_failure {
219 __u32 NotPresent; /* Page not present or supervisor privilege */ 234 __u32 NotPresent; /* Page not present or supervisor privilege */
220 __u32 ReadOnly; /* Write access to a read-only page */ 235 __u32 ReadOnly; /* Write access to a read-only page */
221 __u32 NoExecute; /* Execute access to a page marked NX */ 236 __u32 NoExecute; /* Execute access to a page marked NX */
222 __u32 pad; 237 __u32 imprecise; /* Can't determine the exact fault address */
223}; 238};
224 239
225/* memory exception data*/ 240/* memory exception data*/
@@ -230,10 +245,19 @@ struct kfd_hsa_memory_exception_data {
230 __u32 pad; 245 __u32 pad;
231}; 246};
232 247
233/* Event data*/ 248/* hw exception data */
249struct kfd_hsa_hw_exception_data {
250 uint32_t reset_type;
251 uint32_t reset_cause;
252 uint32_t memory_lost;
253 uint32_t gpu_id;
254};
255
256/* Event data */
234struct kfd_event_data { 257struct kfd_event_data {
235 union { 258 union {
236 struct kfd_hsa_memory_exception_data memory_exception_data; 259 struct kfd_hsa_memory_exception_data memory_exception_data;
260 struct kfd_hsa_hw_exception_data hw_exception_data;
237 }; /* From KFD */ 261 }; /* From KFD */
238 __u64 kfd_event_data_ext; /* pointer to an extension structure 262 __u64 kfd_event_data_ext; /* pointer to an extension structure
239 for future exception types */ 263 for future exception types */
@@ -448,7 +472,10 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
448#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \ 472#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
449 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args) 473 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
450 474
475#define AMDKFD_IOC_SET_CU_MASK \
476 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
477
451#define AMDKFD_COMMAND_START 0x01 478#define AMDKFD_COMMAND_START 0x01
452#define AMDKFD_COMMAND_END 0x1A 479#define AMDKFD_COMMAND_END 0x1B
453 480
454#endif 481#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index b6270a3b38e9..b955b986b341 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -949,6 +949,7 @@ struct kvm_ppc_resize_hpt {
949#define KVM_CAP_GET_MSR_FEATURES 153 949#define KVM_CAP_GET_MSR_FEATURES 153
950#define KVM_CAP_HYPERV_EVENTFD 154 950#define KVM_CAP_HYPERV_EVENTFD 154
951#define KVM_CAP_HYPERV_TLBFLUSH 155 951#define KVM_CAP_HYPERV_TLBFLUSH 155
952#define KVM_CAP_S390_HPAGE_1M 156
952 953
953#ifdef KVM_CAP_IRQ_ROUTING 954#ifdef KVM_CAP_IRQ_ROUTING
954 955
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 7d570c7bd117..61158f5a1a5b 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -60,14 +60,14 @@ struct sockaddr_l2tpip6 {
60/* 60/*
61 * Commands. 61 * Commands.
62 * Valid TLVs of each command are:- 62 * Valid TLVs of each command are:-
63 * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum, vlanid 63 * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum
64 * TUNNEL_DELETE - CONN_ID 64 * TUNNEL_DELETE - CONN_ID
65 * TUNNEL_MODIFY - CONN_ID, udpcsum 65 * TUNNEL_MODIFY - CONN_ID, udpcsum
66 * TUNNEL_GETSTATS - CONN_ID, (stats) 66 * TUNNEL_GETSTATS - CONN_ID, (stats)
67 * TUNNEL_GET - CONN_ID, (...) 67 * TUNNEL_GET - CONN_ID, (...)
68 * SESSION_CREATE - SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec 68 * SESSION_CREATE - SESSION_ID, PW_TYPE, cookie, peer_cookie, l2spec
69 * SESSION_DELETE - SESSION_ID 69 * SESSION_DELETE - SESSION_ID
70 * SESSION_MODIFY - SESSION_ID, data_seq 70 * SESSION_MODIFY - SESSION_ID
71 * SESSION_GET - SESSION_ID, (...) 71 * SESSION_GET - SESSION_ID, (...)
72 * SESSION_GETSTATS - SESSION_ID, (stats) 72 * SESSION_GETSTATS - SESSION_ID, (stats)
73 * 73 *
@@ -95,7 +95,7 @@ enum {
95 L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */ 95 L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */
96 L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */ 96 L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */
97 L2TP_ATTR_OFFSET, /* u16 (not used) */ 97 L2TP_ATTR_OFFSET, /* u16 (not used) */
98 L2TP_ATTR_DATA_SEQ, /* u16 */ 98 L2TP_ATTR_DATA_SEQ, /* u16 (not used) */
99 L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */ 99 L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */
100 L2TP_ATTR_L2SPEC_LEN, /* u8 (not used) */ 100 L2TP_ATTR_L2SPEC_LEN, /* u8 (not used) */
101 L2TP_ATTR_PROTO_VERSION, /* u8 */ 101 L2TP_ATTR_PROTO_VERSION, /* u8 */
@@ -105,7 +105,7 @@ enum {
105 L2TP_ATTR_SESSION_ID, /* u32 */ 105 L2TP_ATTR_SESSION_ID, /* u32 */
106 L2TP_ATTR_PEER_SESSION_ID, /* u32 */ 106 L2TP_ATTR_PEER_SESSION_ID, /* u32 */
107 L2TP_ATTR_UDP_CSUM, /* u8 */ 107 L2TP_ATTR_UDP_CSUM, /* u8 */
108 L2TP_ATTR_VLAN_ID, /* u16 */ 108 L2TP_ATTR_VLAN_ID, /* u16 (not used) */
109 L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */ 109 L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
110 L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */ 110 L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
111 L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */ 111 L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
@@ -119,8 +119,8 @@ enum {
119 L2TP_ATTR_IP_DADDR, /* u32 */ 119 L2TP_ATTR_IP_DADDR, /* u32 */
120 L2TP_ATTR_UDP_SPORT, /* u16 */ 120 L2TP_ATTR_UDP_SPORT, /* u16 */
121 L2TP_ATTR_UDP_DPORT, /* u16 */ 121 L2TP_ATTR_UDP_DPORT, /* u16 */
122 L2TP_ATTR_MTU, /* u16 */ 122 L2TP_ATTR_MTU, /* u16 (not used) */
123 L2TP_ATTR_MRU, /* u16 */ 123 L2TP_ATTR_MRU, /* u16 (not used) */
124 L2TP_ATTR_STATS, /* nested */ 124 L2TP_ATTR_STATS, /* nested */
125 L2TP_ATTR_IP6_SADDR, /* struct in6_addr */ 125 L2TP_ATTR_IP6_SADDR, /* struct in6_addr */
126 L2TP_ATTR_IP6_DADDR, /* struct in6_addr */ 126 L2TP_ATTR_IP6_DADDR, /* struct in6_addr */
@@ -169,6 +169,7 @@ enum l2tp_encap_type {
169 L2TP_ENCAPTYPE_IP, 169 L2TP_ENCAPTYPE_IP,
170}; 170};
171 171
172/* For L2TP_ATTR_DATA_SEQ. Unused. */
172enum l2tp_seqmode { 173enum l2tp_seqmode {
173 L2TP_SEQ_NONE = 0, 174 L2TP_SEQ_NONE = 0,
174 L2TP_SEQ_IP = 1, 175 L2TP_SEQ_IP = 1,
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 9e3511742fdc..d6a5a3bfe6c4 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -62,7 +62,7 @@
62#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019 62#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
63#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a 63#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
64 64
65/* YUV (including grey) - next is 0x202c */ 65/* YUV (including grey) - next is 0x202d */
66#define MEDIA_BUS_FMT_Y8_1X8 0x2001 66#define MEDIA_BUS_FMT_Y8_1X8 0x2001
67#define MEDIA_BUS_FMT_UV8_1X8 0x2015 67#define MEDIA_BUS_FMT_UV8_1X8 0x2015
68#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002 68#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -74,6 +74,7 @@
74#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008 74#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008
75#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009 75#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009
76#define MEDIA_BUS_FMT_Y10_1X10 0x200a 76#define MEDIA_BUS_FMT_Y10_1X10 0x200a
77#define MEDIA_BUS_FMT_Y10_2X8_PADHI_LE 0x202c
77#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018 78#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018
78#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019 79#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019
79#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b 80#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index c7e9a5cba24e..36f76e777ef9 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -25,7 +25,6 @@
25#endif 25#endif
26#include <linux/ioctl.h> 26#include <linux/ioctl.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/version.h>
29 28
30struct media_device_info { 29struct media_device_info {
31 char driver[16]; 30 char driver[16];
@@ -90,12 +89,6 @@ struct media_device_info {
90#define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3) 89#define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3)
91 90
92/* 91/*
93 * Video decoder functions
94 */
95#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
96#define MEDIA_ENT_F_DTV_DECODER (MEDIA_ENT_F_BASE + 0x6001)
97
98/*
99 * Digital TV, analog TV, radio and/or software defined radio tuner functions. 92 * Digital TV, analog TV, radio and/or software defined radio tuner functions.
100 * 93 *
101 * It is a responsibility of the master/bridge drivers to add connectors 94 * It is a responsibility of the master/bridge drivers to add connectors
@@ -132,6 +125,8 @@ struct media_device_info {
132#define MEDIA_ENT_F_PROC_VIDEO_LUT (MEDIA_ENT_F_BASE + 0x4004) 125#define MEDIA_ENT_F_PROC_VIDEO_LUT (MEDIA_ENT_F_BASE + 0x4004)
133#define MEDIA_ENT_F_PROC_VIDEO_SCALER (MEDIA_ENT_F_BASE + 0x4005) 126#define MEDIA_ENT_F_PROC_VIDEO_SCALER (MEDIA_ENT_F_BASE + 0x4005)
134#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006) 127#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006)
128#define MEDIA_ENT_F_PROC_VIDEO_ENCODER (MEDIA_ENT_F_BASE + 0x4007)
129#define MEDIA_ENT_F_PROC_VIDEO_DECODER (MEDIA_ENT_F_BASE + 0x4008)
135 130
136/* 131/*
137 * Switch and bridge entity functions 132 * Switch and bridge entity functions
@@ -139,6 +134,13 @@ struct media_device_info {
139#define MEDIA_ENT_F_VID_MUX (MEDIA_ENT_F_BASE + 0x5001) 134#define MEDIA_ENT_F_VID_MUX (MEDIA_ENT_F_BASE + 0x5001)
140#define MEDIA_ENT_F_VID_IF_BRIDGE (MEDIA_ENT_F_BASE + 0x5002) 135#define MEDIA_ENT_F_VID_IF_BRIDGE (MEDIA_ENT_F_BASE + 0x5002)
141 136
137/*
138 * Video decoder/encoder functions
139 */
140#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
141#define MEDIA_ENT_F_DV_DECODER (MEDIA_ENT_F_BASE + 0x6001)
142#define MEDIA_ENT_F_DV_ENCODER (MEDIA_ENT_F_BASE + 0x6002)
143
142/* Entity flags */ 144/* Entity flags */
143#define MEDIA_ENT_FL_DEFAULT (1 << 0) 145#define MEDIA_ENT_FL_DEFAULT (1 << 0)
144#define MEDIA_ENT_FL_CONNECTOR (1 << 1) 146#define MEDIA_ENT_FL_CONNECTOR (1 << 1)
@@ -280,11 +282,21 @@ struct media_links_enum {
280 * MC next gen API definitions 282 * MC next gen API definitions
281 */ 283 */
282 284
285/*
286 * Appeared in 4.19.0.
287 *
288 * The media_version argument comes from the media_version field in
289 * struct media_device_info.
290 */
291#define MEDIA_V2_ENTITY_HAS_FLAGS(media_version) \
292 ((media_version) >= ((4 << 16) | (19 << 8) | 0))
293
283struct media_v2_entity { 294struct media_v2_entity {
284 __u32 id; 295 __u32 id;
285 char name[64]; 296 char name[64];
286 __u32 function; /* Main function of the entity */ 297 __u32 function; /* Main function of the entity */
287 __u32 reserved[6]; 298 __u32 flags;
299 __u32 reserved[5];
288} __attribute__ ((packed)); 300} __attribute__ ((packed));
289 301
290/* Should match the specific fields at media_intf_devnode */ 302/* Should match the specific fields at media_intf_devnode */
@@ -305,11 +317,21 @@ struct media_v2_interface {
305 }; 317 };
306} __attribute__ ((packed)); 318} __attribute__ ((packed));
307 319
320/*
321 * Appeared in 4.19.0.
322 *
323 * The media_version argument comes from the media_version field in
324 * struct media_device_info.
325 */
326#define MEDIA_V2_PAD_HAS_INDEX(media_version) \
327 ((media_version) >= ((4 << 16) | (19 << 8) | 0))
328
308struct media_v2_pad { 329struct media_v2_pad {
309 __u32 id; 330 __u32 id;
310 __u32 entity_id; 331 __u32 entity_id;
311 __u32 flags; 332 __u32 flags;
312 __u32 reserved[5]; 333 __u32 index;
334 __u32 reserved[4];
313} __attribute__ ((packed)); 335} __attribute__ ((packed));
314 336
315struct media_v2_link { 337struct media_v2_link {
@@ -348,7 +370,7 @@ struct media_v2_topology {
348#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc) 370#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc)
349#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology) 371#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology)
350 372
351#if !defined(__KERNEL__) || defined(__NEED_MEDIA_LEGACY_API) 373#ifndef __KERNEL__
352 374
353/* 375/*
354 * Legacy symbols used to avoid userspace compilation breakages. 376 * Legacy symbols used to avoid userspace compilation breakages.
@@ -380,6 +402,8 @@ struct media_v2_topology {
380#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER MEDIA_ENT_F_ATV_DECODER 402#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER MEDIA_ENT_F_ATV_DECODER
381#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER MEDIA_ENT_F_TUNER 403#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER MEDIA_ENT_F_TUNER
382 404
405#define MEDIA_ENT_F_DTV_DECODER MEDIA_ENT_F_DV_DECODER
406
383/* 407/*
384 * There is still no ALSA support in the media controller. These 408 * There is still no ALSA support in the media controller. These
385 * defines should not have been added and we leave them here only 409 * defines should not have been added and we leave them here only
@@ -396,7 +420,7 @@ struct media_v2_topology {
396#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7) 420#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
397 421
398/* Obsolete symbol for media_version, no longer used in the kernel */ 422/* Obsolete symbol for media_version, no longer used in the kernel */
399#define MEDIA_API_VERSION KERNEL_VERSION(0, 1, 0) 423#define MEDIA_API_VERSION ((0 << 16) | (1 << 8) | 0)
400 424
401#endif 425#endif
402 426
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index b5c2fdcf23fd..a506216591d6 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -136,6 +136,7 @@
136#define CTL1000_ENABLE_MASTER 0x1000 136#define CTL1000_ENABLE_MASTER 0x1000
137 137
138/* 1000BASE-T Status register */ 138/* 1000BASE-T Status register */
139#define LPA_1000MSFAIL 0x8000 /* Master/Slave resolution failure */
139#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ 140#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
140#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ 141#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
141#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */ 142#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */
diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h
index 10f9ff9426a2..5d37a9ccce63 100644
--- a/include/uapi/linux/mroute.h
+++ b/include/uapi/linux/mroute.h
@@ -120,6 +120,7 @@ enum {
120 IPMRA_TABLE_MROUTE_DO_ASSERT, 120 IPMRA_TABLE_MROUTE_DO_ASSERT,
121 IPMRA_TABLE_MROUTE_DO_PIM, 121 IPMRA_TABLE_MROUTE_DO_PIM,
122 IPMRA_TABLE_VIFS, 122 IPMRA_TABLE_VIFS,
123 IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
123 __IPMRA_TABLE_MAX 124 __IPMRA_TABLE_MAX
124}; 125};
125#define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1) 126#define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1)
@@ -173,5 +174,6 @@ enum {
173#define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */ 174#define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */
174#define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */ 175#define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */
175#define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */ 176#define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */
177#define IGMPMSG_WRVIFWHOLE 4 /* For PIM Register and assert processing */
176 178
177#endif /* _UAPI__LINUX_MROUTE_H */ 179#endif /* _UAPI__LINUX_MROUTE_H */
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 85a3fb65e40a..20d6cc91435d 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -53,6 +53,9 @@ enum {
53/* These are client behavior specific flags. */ 53/* These are client behavior specific flags. */
54#define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on 54#define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on
55 disconnect. */ 55 disconnect. */
56#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on
57 * close by last opener.
58 */
56 59
57/* userspace doesn't need the nbd_device structure */ 60/* userspace doesn't need the nbd_device structure */
58 61
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 4fe104b2411f..97ff3c17ec4d 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -141,4 +141,22 @@ struct scm_ts_pktinfo {
141 __u32 reserved[2]; 141 __u32 reserved[2];
142}; 142};
143 143
144/*
145 * SO_TXTIME gets a struct sock_txtime with flags being an integer bit
146 * field comprised of these values.
147 */
148enum txtime_flags {
149 SOF_TXTIME_DEADLINE_MODE = (1 << 0),
150 SOF_TXTIME_REPORT_ERRORS = (1 << 1),
151
152 SOF_TXTIME_FLAGS_LAST = SOF_TXTIME_REPORT_ERRORS,
153 SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_FLAGS_LAST - 1) |
154 SOF_TXTIME_FLAGS_LAST
155};
156
157struct sock_txtime {
158 clockid_t clockid; /* reference clockid */
159 __u32 flags; /* as defined by enum txtime_flags */
160};
161
144#endif /* _NET_TIMESTAMPING_H */ 162#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index c84fcdfca862..fac4edd55379 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -18,6 +18,7 @@ enum {
18 NETCONFA_PROXY_NEIGH, 18 NETCONFA_PROXY_NEIGH,
19 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, 19 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
20 NETCONFA_INPUT, 20 NETCONFA_INPUT,
21 NETCONFA_BC_FORWARDING,
21 __NETCONFA_MAX 22 __NETCONFA_MAX
22}; 23};
23#define NETCONFA_MAX (__NETCONFA_MAX - 1) 24#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 89438e68dc03..e23290ffdc77 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -8,6 +8,7 @@
8#define NFT_SET_MAXNAMELEN NFT_NAME_MAXLEN 8#define NFT_SET_MAXNAMELEN NFT_NAME_MAXLEN
9#define NFT_OBJ_MAXNAMELEN NFT_NAME_MAXLEN 9#define NFT_OBJ_MAXNAMELEN NFT_NAME_MAXLEN
10#define NFT_USERDATA_MAXLEN 256 10#define NFT_USERDATA_MAXLEN 256
11#define NFT_OSF_MAXGENRELEN 16
11 12
12/** 13/**
13 * enum nft_registers - nf_tables registers 14 * enum nft_registers - nf_tables registers
@@ -921,10 +922,12 @@ enum nft_socket_attributes {
921/* 922/*
922 * enum nft_socket_keys - nf_tables socket expression keys 923 * enum nft_socket_keys - nf_tables socket expression keys
923 * 924 *
924 * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option_ 925 * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option
926 * @NFT_SOCKET_MARK: Value of the socket mark
925 */ 927 */
926enum nft_socket_keys { 928enum nft_socket_keys {
927 NFT_SOCKET_TRANSPARENT, 929 NFT_SOCKET_TRANSPARENT,
930 NFT_SOCKET_MARK,
928 __NFT_SOCKET_MAX 931 __NFT_SOCKET_MAX
929}; 932};
930#define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1) 933#define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1)
@@ -955,6 +958,7 @@ enum nft_socket_keys {
955 * @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address) 958 * @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address)
956 * @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address) 959 * @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address)
957 * @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address) 960 * @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address)
961 * @NFT_CT_TIMEOUT: connection tracking timeout policy assigned to conntrack
958 */ 962 */
959enum nft_ct_keys { 963enum nft_ct_keys {
960 NFT_CT_STATE, 964 NFT_CT_STATE,
@@ -980,6 +984,7 @@ enum nft_ct_keys {
980 NFT_CT_DST_IP, 984 NFT_CT_DST_IP,
981 NFT_CT_SRC_IP6, 985 NFT_CT_SRC_IP6,
982 NFT_CT_DST_IP6, 986 NFT_CT_DST_IP6,
987 NFT_CT_TIMEOUT,
983 __NFT_CT_MAX 988 __NFT_CT_MAX
984}; 989};
985#define NFT_CT_MAX (__NFT_CT_MAX - 1) 990#define NFT_CT_MAX (__NFT_CT_MAX - 1)
@@ -1251,6 +1256,22 @@ enum nft_nat_attributes {
1251#define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1) 1256#define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1)
1252 1257
1253/** 1258/**
1259 * enum nft_tproxy_attributes - nf_tables tproxy expression netlink attributes
1260 *
1261 * NFTA_TPROXY_FAMILY: Target address family (NLA_U32: nft_registers)
1262 * NFTA_TPROXY_REG_ADDR: Target address register (NLA_U32: nft_registers)
1263 * NFTA_TPROXY_REG_PORT: Target port register (NLA_U32: nft_registers)
1264 */
1265enum nft_tproxy_attributes {
1266 NFTA_TPROXY_UNSPEC,
1267 NFTA_TPROXY_FAMILY,
1268 NFTA_TPROXY_REG_ADDR,
1269 NFTA_TPROXY_REG_PORT,
1270 __NFTA_TPROXY_MAX
1271};
1272#define NFTA_TPROXY_MAX (__NFTA_TPROXY_MAX - 1)
1273
1274/**
1254 * enum nft_masq_attributes - nf_tables masquerade expression attributes 1275 * enum nft_masq_attributes - nf_tables masquerade expression attributes
1255 * 1276 *
1256 * @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) 1277 * @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
@@ -1392,13 +1413,24 @@ enum nft_ct_helper_attributes {
1392}; 1413};
1393#define NFTA_CT_HELPER_MAX (__NFTA_CT_HELPER_MAX - 1) 1414#define NFTA_CT_HELPER_MAX (__NFTA_CT_HELPER_MAX - 1)
1394 1415
1416enum nft_ct_timeout_timeout_attributes {
1417 NFTA_CT_TIMEOUT_UNSPEC,
1418 NFTA_CT_TIMEOUT_L3PROTO,
1419 NFTA_CT_TIMEOUT_L4PROTO,
1420 NFTA_CT_TIMEOUT_DATA,
1421 __NFTA_CT_TIMEOUT_MAX,
1422};
1423#define NFTA_CT_TIMEOUT_MAX (__NFTA_CT_TIMEOUT_MAX - 1)
1424
1395#define NFT_OBJECT_UNSPEC 0 1425#define NFT_OBJECT_UNSPEC 0
1396#define NFT_OBJECT_COUNTER 1 1426#define NFT_OBJECT_COUNTER 1
1397#define NFT_OBJECT_QUOTA 2 1427#define NFT_OBJECT_QUOTA 2
1398#define NFT_OBJECT_CT_HELPER 3 1428#define NFT_OBJECT_CT_HELPER 3
1399#define NFT_OBJECT_LIMIT 4 1429#define NFT_OBJECT_LIMIT 4
1400#define NFT_OBJECT_CONNLIMIT 5 1430#define NFT_OBJECT_CONNLIMIT 5
1401#define __NFT_OBJECT_MAX 6 1431#define NFT_OBJECT_TUNNEL 6
1432#define NFT_OBJECT_CT_TIMEOUT 7
1433#define __NFT_OBJECT_MAX 8
1402#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1) 1434#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
1403 1435
1404/** 1436/**
@@ -1461,6 +1493,13 @@ enum nft_flowtable_hook_attributes {
1461}; 1493};
1462#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1) 1494#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1)
1463 1495
1496enum nft_osf_attributes {
1497 NFTA_OSF_UNSPEC,
1498 NFTA_OSF_DREG,
1499 __NFTA_OSF_MAX,
1500};
1501#define NFTA_OSF_MAX (__NFTA_OSF_MAX - 1)
1502
1464/** 1503/**
1465 * enum nft_device_attributes - nf_tables device netlink attributes 1504 * enum nft_device_attributes - nf_tables device netlink attributes
1466 * 1505 *
@@ -1555,4 +1594,85 @@ enum nft_ng_types {
1555}; 1594};
1556#define NFT_NG_MAX (__NFT_NG_MAX - 1) 1595#define NFT_NG_MAX (__NFT_NG_MAX - 1)
1557 1596
1597enum nft_tunnel_key_ip_attributes {
1598 NFTA_TUNNEL_KEY_IP_UNSPEC,
1599 NFTA_TUNNEL_KEY_IP_SRC,
1600 NFTA_TUNNEL_KEY_IP_DST,
1601 __NFTA_TUNNEL_KEY_IP_MAX
1602};
1603#define NFTA_TUNNEL_KEY_IP_MAX (__NFTA_TUNNEL_KEY_IP_MAX - 1)
1604
1605enum nft_tunnel_ip6_attributes {
1606 NFTA_TUNNEL_KEY_IP6_UNSPEC,
1607 NFTA_TUNNEL_KEY_IP6_SRC,
1608 NFTA_TUNNEL_KEY_IP6_DST,
1609 NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
1610 __NFTA_TUNNEL_KEY_IP6_MAX
1611};
1612#define NFTA_TUNNEL_KEY_IP6_MAX (__NFTA_TUNNEL_KEY_IP6_MAX - 1)
1613
1614enum nft_tunnel_opts_attributes {
1615 NFTA_TUNNEL_KEY_OPTS_UNSPEC,
1616 NFTA_TUNNEL_KEY_OPTS_VXLAN,
1617 NFTA_TUNNEL_KEY_OPTS_ERSPAN,
1618 __NFTA_TUNNEL_KEY_OPTS_MAX
1619};
1620#define NFTA_TUNNEL_KEY_OPTS_MAX (__NFTA_TUNNEL_KEY_OPTS_MAX - 1)
1621
1622enum nft_tunnel_opts_vxlan_attributes {
1623 NFTA_TUNNEL_KEY_VXLAN_UNSPEC,
1624 NFTA_TUNNEL_KEY_VXLAN_GBP,
1625 __NFTA_TUNNEL_KEY_VXLAN_MAX
1626};
1627#define NFTA_TUNNEL_KEY_VXLAN_MAX (__NFTA_TUNNEL_KEY_VXLAN_MAX - 1)
1628
1629enum nft_tunnel_opts_erspan_attributes {
1630 NFTA_TUNNEL_KEY_ERSPAN_UNSPEC,
1631 NFTA_TUNNEL_KEY_ERSPAN_VERSION,
1632 NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
1633 NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
1634 NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
1635 __NFTA_TUNNEL_KEY_ERSPAN_MAX
1636};
1637#define NFTA_TUNNEL_KEY_ERSPAN_MAX (__NFTA_TUNNEL_KEY_ERSPAN_MAX - 1)
1638
1639enum nft_tunnel_flags {
1640 NFT_TUNNEL_F_ZERO_CSUM_TX = (1 << 0),
1641 NFT_TUNNEL_F_DONT_FRAGMENT = (1 << 1),
1642 NFT_TUNNEL_F_SEQ_NUMBER = (1 << 2),
1643};
1644#define NFT_TUNNEL_F_MASK (NFT_TUNNEL_F_ZERO_CSUM_TX | \
1645 NFT_TUNNEL_F_DONT_FRAGMENT | \
1646 NFT_TUNNEL_F_SEQ_NUMBER)
1647
1648enum nft_tunnel_key_attributes {
1649 NFTA_TUNNEL_KEY_UNSPEC,
1650 NFTA_TUNNEL_KEY_ID,
1651 NFTA_TUNNEL_KEY_IP,
1652 NFTA_TUNNEL_KEY_IP6,
1653 NFTA_TUNNEL_KEY_FLAGS,
1654 NFTA_TUNNEL_KEY_TOS,
1655 NFTA_TUNNEL_KEY_TTL,
1656 NFTA_TUNNEL_KEY_SPORT,
1657 NFTA_TUNNEL_KEY_DPORT,
1658 NFTA_TUNNEL_KEY_OPTS,
1659 __NFTA_TUNNEL_KEY_MAX
1660};
1661#define NFTA_TUNNEL_KEY_MAX (__NFTA_TUNNEL_KEY_MAX - 1)
1662
1663enum nft_tunnel_keys {
1664 NFT_TUNNEL_PATH,
1665 NFT_TUNNEL_ID,
1666 __NFT_TUNNEL_MAX
1667};
1668#define NFT_TUNNEL_MAX (__NFT_TUNNEL_MAX - 1)
1669
1670enum nft_tunnel_attributes {
1671 NFTA_TUNNEL_UNSPEC,
1672 NFTA_TUNNEL_KEY,
1673 NFTA_TUNNEL_DREG,
1674 __NFTA_TUNNEL_MAX
1675};
1676#define NFTA_TUNNEL_MAX (__NFTA_TUNNEL_MAX - 1)
1677
1558#endif /* _LINUX_NF_TABLES_H */ 1678#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nf_osf.h b/include/uapi/linux/netfilter/nfnetlink_osf.h
index 8f2f2f403183..76a3527df5dd 100644
--- a/include/uapi/linux/netfilter/nf_osf.h
+++ b/include/uapi/linux/netfilter/nfnetlink_osf.h
@@ -16,9 +16,14 @@
16 16
17#define NF_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */ 17#define NF_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */
18 18
19/* Check if ip TTL is less than fingerprint one */
20#define NF_OSF_TTL_LESS 1
21
19/* Do not compare ip and fingerprint TTL at all */ 22/* Do not compare ip and fingerprint TTL at all */
20#define NF_OSF_TTL_NOCHECK 2 23#define NF_OSF_TTL_NOCHECK 2
21 24
25#define NF_OSF_FLAGMASK (NF_OSF_GENRE | NF_OSF_TTL | \
26 NF_OSF_LOG | NF_OSF_INVERT)
22/* Wildcard MSS (kind of). 27/* Wildcard MSS (kind of).
23 * It is used to implement a state machine for the different wildcard values 28 * It is used to implement a state machine for the different wildcard values
24 * of the MSS and window sizes. 29 * of the MSS and window sizes.
@@ -83,4 +88,31 @@ enum iana_options {
83 OSFOPT_EMPTY = 255, 88 OSFOPT_EMPTY = 255,
84}; 89};
85 90
91/* Initial window size option state machine: multiple of mss, mtu or
92 * plain numeric value. Can also be made as plain numeric value which
93 * is not a multiple of specified value.
94 */
95enum nf_osf_window_size_options {
96 OSF_WSS_PLAIN = 0,
97 OSF_WSS_MSS,
98 OSF_WSS_MTU,
99 OSF_WSS_MODULO,
100 OSF_WSS_MAX,
101};
102
103enum nf_osf_attr_type {
104 OSF_ATTR_UNSPEC,
105 OSF_ATTR_FINGER,
106 OSF_ATTR_MAX,
107};
108
109/*
110 * Add/remove fingerprint from the kernel.
111 */
112enum nf_osf_msg_types {
113 OSF_MSG_ADD,
114 OSF_MSG_REMOVE,
115 OSF_MSG_MAX,
116};
117
86#endif /* _NF_OSF_H */ 118#endif /* _NF_OSF_H */
diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h
index 72956eceeb09..24102b5286ec 100644
--- a/include/uapi/linux/netfilter/xt_osf.h
+++ b/include/uapi/linux/netfilter/xt_osf.h
@@ -23,7 +23,7 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/ip.h> 24#include <linux/ip.h>
25#include <linux/tcp.h> 25#include <linux/tcp.h>
26#include <linux/netfilter/nf_osf.h> 26#include <linux/netfilter/nfnetlink_osf.h>
27 27
28#define XT_OSF_GENRE NF_OSF_GENRE 28#define XT_OSF_GENRE NF_OSF_GENRE
29#define XT_OSF_INVERT NF_OSF_INVERT 29#define XT_OSF_INVERT NF_OSF_INVERT
@@ -37,8 +37,7 @@
37 37
38#define XT_OSF_TTL_TRUE NF_OSF_TTL_TRUE 38#define XT_OSF_TTL_TRUE NF_OSF_TTL_TRUE
39#define XT_OSF_TTL_NOCHECK NF_OSF_TTL_NOCHECK 39#define XT_OSF_TTL_NOCHECK NF_OSF_TTL_NOCHECK
40 40#define XT_OSF_TTL_LESS NF_OSF_TTL_LESS
41#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */
42 41
43#define xt_osf_wc nf_osf_wc 42#define xt_osf_wc nf_osf_wc
44#define xt_osf_opt nf_osf_opt 43#define xt_osf_opt nf_osf_opt
@@ -47,19 +46,8 @@
47#define xt_osf_finger nf_osf_finger 46#define xt_osf_finger nf_osf_finger
48#define xt_osf_nlmsg nf_osf_nlmsg 47#define xt_osf_nlmsg nf_osf_nlmsg
49 48
50/* 49#define xt_osf_window_size_options nf_osf_window_size_options
51 * Add/remove fingerprint from the kernel. 50#define xt_osf_attr_type nf_osf_attr_type
52 */ 51#define xt_osf_msg_types nf_osf_msg_types
53enum xt_osf_msg_types {
54 OSF_MSG_ADD,
55 OSF_MSG_REMOVE,
56 OSF_MSG_MAX,
57};
58
59enum xt_osf_attr_type {
60 OSF_ATTR_UNSPEC,
61 OSF_ATTR_FINGER,
62 OSF_ATTR_MAX,
63};
64 52
65#endif /* _XT_OSF_H */ 53#endif /* _XT_OSF_H */
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 12fb77633f83..156ccd089df1 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -26,4 +26,15 @@
26#define NF_BR_BROUTING 5 26#define NF_BR_BROUTING 5
27#define NF_BR_NUMHOOKS 6 27#define NF_BR_NUMHOOKS 6
28 28
29enum nf_br_hook_priorities {
30 NF_BR_PRI_FIRST = INT_MIN,
31 NF_BR_PRI_NAT_DST_BRIDGED = -300,
32 NF_BR_PRI_FILTER_BRIDGED = -200,
33 NF_BR_PRI_BRNF = 0,
34 NF_BR_PRI_NAT_DST_OTHER = 100,
35 NF_BR_PRI_FILTER_OTHER = 200,
36 NF_BR_PRI_NAT_SRC = 300,
37 NF_BR_PRI_LAST = INT_MAX,
38};
39
29#endif /* _UAPI__LINUX_BRIDGE_NETFILTER_H */ 40#endif /* _UAPI__LINUX_BRIDGE_NETFILTER_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 27e4e441caac..7acc16f34942 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2237,6 +2237,9 @@ enum nl80211_commands {
2237 * enforced. 2237 * enforced.
2238 * @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes 2238 * @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes
2239 * a flow is assigned on each round of the DRR scheduler. 2239 * a flow is assigned on each round of the DRR scheduler.
2240 * @NL80211_ATTR_HE_CAPABILITY: HE Capability information element (from
2241 * association request when used with NL80211_CMD_NEW_STATION). Can be set
2242 * only if %NL80211_STA_FLAG_WME is set.
2240 * 2243 *
2241 * @NUM_NL80211_ATTR: total number of nl80211_attrs available 2244 * @NUM_NL80211_ATTR: total number of nl80211_attrs available
2242 * @NL80211_ATTR_MAX: highest attribute number currently defined 2245 * @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2677,6 +2680,8 @@ enum nl80211_attrs {
2677 NL80211_ATTR_TXQ_MEMORY_LIMIT, 2680 NL80211_ATTR_TXQ_MEMORY_LIMIT,
2678 NL80211_ATTR_TXQ_QUANTUM, 2681 NL80211_ATTR_TXQ_QUANTUM,
2679 2682
2683 NL80211_ATTR_HE_CAPABILITY,
2684
2680 /* add attributes here, update the policy in nl80211.c */ 2685 /* add attributes here, update the policy in nl80211.c */
2681 2686
2682 __NL80211_ATTR_AFTER_LAST, 2687 __NL80211_ATTR_AFTER_LAST,
@@ -2726,7 +2731,8 @@ enum nl80211_attrs {
2726#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 2731#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
2727#define NL80211_HT_CAPABILITY_LEN 26 2732#define NL80211_HT_CAPABILITY_LEN 26
2728#define NL80211_VHT_CAPABILITY_LEN 12 2733#define NL80211_VHT_CAPABILITY_LEN 12
2729 2734#define NL80211_HE_MIN_CAPABILITY_LEN 16
2735#define NL80211_HE_MAX_CAPABILITY_LEN 51
2730#define NL80211_MAX_NR_CIPHER_SUITES 5 2736#define NL80211_MAX_NR_CIPHER_SUITES 5
2731#define NL80211_MAX_NR_AKM_SUITES 2 2737#define NL80211_MAX_NR_AKM_SUITES 2
2732 2738
@@ -2854,6 +2860,38 @@ struct nl80211_sta_flag_update {
2854} __attribute__((packed)); 2860} __attribute__((packed));
2855 2861
2856/** 2862/**
2863 * enum nl80211_he_gi - HE guard interval
2864 * @NL80211_RATE_INFO_HE_GI_0_8: 0.8 usec
2865 * @NL80211_RATE_INFO_HE_GI_1_6: 1.6 usec
2866 * @NL80211_RATE_INFO_HE_GI_3_2: 3.2 usec
2867 */
2868enum nl80211_he_gi {
2869 NL80211_RATE_INFO_HE_GI_0_8,
2870 NL80211_RATE_INFO_HE_GI_1_6,
2871 NL80211_RATE_INFO_HE_GI_3_2,
2872};
2873
2874/**
2875 * enum nl80211_he_ru_alloc - HE RU allocation values
2876 * @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation
2877 * @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation
2878 * @NL80211_RATE_INFO_HE_RU_ALLOC_106: 106-tone RU allocation
2879 * @NL80211_RATE_INFO_HE_RU_ALLOC_242: 242-tone RU allocation
2880 * @NL80211_RATE_INFO_HE_RU_ALLOC_484: 484-tone RU allocation
2881 * @NL80211_RATE_INFO_HE_RU_ALLOC_996: 996-tone RU allocation
2882 * @NL80211_RATE_INFO_HE_RU_ALLOC_2x996: 2x996-tone RU allocation
2883 */
2884enum nl80211_he_ru_alloc {
2885 NL80211_RATE_INFO_HE_RU_ALLOC_26,
2886 NL80211_RATE_INFO_HE_RU_ALLOC_52,
2887 NL80211_RATE_INFO_HE_RU_ALLOC_106,
2888 NL80211_RATE_INFO_HE_RU_ALLOC_242,
2889 NL80211_RATE_INFO_HE_RU_ALLOC_484,
2890 NL80211_RATE_INFO_HE_RU_ALLOC_996,
2891 NL80211_RATE_INFO_HE_RU_ALLOC_2x996,
2892};
2893
2894/**
2857 * enum nl80211_rate_info - bitrate information 2895 * enum nl80211_rate_info - bitrate information
2858 * 2896 *
2859 * These attribute types are used with %NL80211_STA_INFO_TXRATE 2897 * These attribute types are used with %NL80211_STA_INFO_TXRATE
@@ -2885,6 +2923,13 @@ struct nl80211_sta_flag_update {
2885 * @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is 2923 * @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is
2886 * a legacy rate and will be reported as the actual bitrate, i.e. 2924 * a legacy rate and will be reported as the actual bitrate, i.e.
2887 * a quarter of the base (20 MHz) rate 2925 * a quarter of the base (20 MHz) rate
2926 * @NL80211_RATE_INFO_HE_MCS: HE MCS index (u8, 0-11)
2927 * @NL80211_RATE_INFO_HE_NSS: HE NSS value (u8, 1-8)
2928 * @NL80211_RATE_INFO_HE_GI: HE guard interval identifier
2929 * (u8, see &enum nl80211_he_gi)
2930 * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1)
2931 * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then
2932 * non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc)
2888 * @__NL80211_RATE_INFO_AFTER_LAST: internal use 2933 * @__NL80211_RATE_INFO_AFTER_LAST: internal use
2889 */ 2934 */
2890enum nl80211_rate_info { 2935enum nl80211_rate_info {
@@ -2901,6 +2946,11 @@ enum nl80211_rate_info {
2901 NL80211_RATE_INFO_160_MHZ_WIDTH, 2946 NL80211_RATE_INFO_160_MHZ_WIDTH,
2902 NL80211_RATE_INFO_10_MHZ_WIDTH, 2947 NL80211_RATE_INFO_10_MHZ_WIDTH,
2903 NL80211_RATE_INFO_5_MHZ_WIDTH, 2948 NL80211_RATE_INFO_5_MHZ_WIDTH,
2949 NL80211_RATE_INFO_HE_MCS,
2950 NL80211_RATE_INFO_HE_NSS,
2951 NL80211_RATE_INFO_HE_GI,
2952 NL80211_RATE_INFO_HE_DCM,
2953 NL80211_RATE_INFO_HE_RU_ALLOC,
2904 2954
2905 /* keep last */ 2955 /* keep last */
2906 __NL80211_RATE_INFO_AFTER_LAST, 2956 __NL80211_RATE_INFO_AFTER_LAST,
@@ -3167,6 +3217,38 @@ enum nl80211_mpath_info {
3167}; 3217};
3168 3218
3169/** 3219/**
3220 * enum nl80211_band_iftype_attr - Interface type data attributes
3221 *
3222 * @__NL80211_BAND_IFTYPE_ATTR_INVALID: attribute number 0 is reserved
3223 * @NL80211_BAND_IFTYPE_ATTR_IFTYPES: nested attribute containing a flag attribute
3224 * for each interface type that supports the band data
3225 * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC: HE MAC capabilities as in HE
3226 * capabilities IE
3227 * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY: HE PHY capabilities as in HE
3228 * capabilities IE
3229 * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET: HE supported NSS/MCS as in HE
3230 * capabilities IE
3231 * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE: HE PPE thresholds information as
3232 * defined in HE capabilities IE
3233 * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently
3234 * defined
3235 * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
3236 */
3237enum nl80211_band_iftype_attr {
3238 __NL80211_BAND_IFTYPE_ATTR_INVALID,
3239
3240 NL80211_BAND_IFTYPE_ATTR_IFTYPES,
3241 NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC,
3242 NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY,
3243 NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
3244 NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
3245
3246 /* keep last */
3247 __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
3248 NL80211_BAND_IFTYPE_ATTR_MAX = __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST - 1
3249};
3250
3251/**
3170 * enum nl80211_band_attr - band attributes 3252 * enum nl80211_band_attr - band attributes
3171 * @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved 3253 * @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved
3172 * @NL80211_BAND_ATTR_FREQS: supported frequencies in this band, 3254 * @NL80211_BAND_ATTR_FREQS: supported frequencies in this band,
@@ -3181,6 +3263,8 @@ enum nl80211_mpath_info {
3181 * @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as 3263 * @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as
3182 * defined in 802.11ac 3264 * defined in 802.11ac
3183 * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE 3265 * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE
3266 * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using
3267 * attributes from &enum nl80211_band_iftype_attr
3184 * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined 3268 * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
3185 * @__NL80211_BAND_ATTR_AFTER_LAST: internal use 3269 * @__NL80211_BAND_ATTR_AFTER_LAST: internal use
3186 */ 3270 */
@@ -3196,6 +3280,7 @@ enum nl80211_band_attr {
3196 3280
3197 NL80211_BAND_ATTR_VHT_MCS_SET, 3281 NL80211_BAND_ATTR_VHT_MCS_SET,
3198 NL80211_BAND_ATTR_VHT_CAPA, 3282 NL80211_BAND_ATTR_VHT_CAPA,
3283 NL80211_BAND_ATTR_IFTYPE_DATA,
3199 3284
3200 /* keep last */ 3285 /* keep last */
3201 __NL80211_BAND_ATTR_AFTER_LAST, 3286 __NL80211_BAND_ATTR_AFTER_LAST,
@@ -5133,6 +5218,11 @@ enum nl80211_feature_flags {
5133 * support to nl80211. 5218 * support to nl80211.
5134 * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate 5219 * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate
5135 * TXQs. 5220 * TXQs.
5221 * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the
5222 * SN in probe request frames if requested by %NL80211_SCAN_FLAG_RANDOM_SN.
5223 * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data
5224 * except for supported rates from the probe request content if requested
5225 * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag.
5136 * 5226 *
5137 * @NUM_NL80211_EXT_FEATURES: number of extended features. 5227 * @NUM_NL80211_EXT_FEATURES: number of extended features.
5138 * @MAX_NL80211_EXT_FEATURES: highest extended feature index. 5228 * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -5167,6 +5257,8 @@ enum nl80211_ext_feature_index {
5167 NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211, 5257 NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
5168 NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT, 5258 NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT,
5169 NL80211_EXT_FEATURE_TXQS, 5259 NL80211_EXT_FEATURE_TXQS,
5260 NL80211_EXT_FEATURE_SCAN_RANDOM_SN,
5261 NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
5170 5262
5171 /* add new features before the definition below */ 5263 /* add new features before the definition below */
5172 NUM_NL80211_EXT_FEATURES, 5264 NUM_NL80211_EXT_FEATURES,
@@ -5272,6 +5364,12 @@ enum nl80211_timeout_reason {
5272 * possible scan results. This flag hints the driver to use the best 5364 * possible scan results. This flag hints the driver to use the best
5273 * possible scan configuration to improve the accuracy in scanning. 5365 * possible scan configuration to improve the accuracy in scanning.
5274 * Latency and power use may get impacted with this flag. 5366 * Latency and power use may get impacted with this flag.
5367 * @NL80211_SCAN_FLAG_RANDOM_SN: randomize the sequence number in probe
5368 * request frames from this scan to avoid correlation/tracking being
5369 * possible.
5370 * @NL80211_SCAN_FLAG_MIN_PREQ_CONTENT: minimize probe request content to
5371 * only have supported rates and no additional capabilities (unless
5372 * added by userspace explicitly.)
5275 */ 5373 */
5276enum nl80211_scan_flags { 5374enum nl80211_scan_flags {
5277 NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, 5375 NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
@@ -5285,6 +5383,8 @@ enum nl80211_scan_flags {
5285 NL80211_SCAN_FLAG_LOW_SPAN = 1<<8, 5383 NL80211_SCAN_FLAG_LOW_SPAN = 1<<8,
5286 NL80211_SCAN_FLAG_LOW_POWER = 1<<9, 5384 NL80211_SCAN_FLAG_LOW_POWER = 1<<9,
5287 NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10, 5385 NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10,
5386 NL80211_SCAN_FLAG_RANDOM_SN = 1<<11,
5387 NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12,
5288}; 5388};
5289 5389
5290/** 5390/**
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 863aabaa5cc9..dbe0cbe4f1b7 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -840,6 +840,8 @@ struct ovs_action_push_eth {
840 * @OVS_ACTION_ATTR_POP_NSH: pop the outermost NSH header off the packet. 840 * @OVS_ACTION_ATTR_POP_NSH: pop the outermost NSH header off the packet.
841 * @OVS_ACTION_ATTR_METER: Run packet through a meter, which may drop the 841 * @OVS_ACTION_ATTR_METER: Run packet through a meter, which may drop the
842 * packet, or modify the packet (e.g., change the DSCP field). 842 * packet, or modify the packet (e.g., change the DSCP field).
843 * @OVS_ACTION_ATTR_CLONE: make a copy of the packet and execute a list of
844 * actions without affecting the original packet and key.
843 * 845 *
844 * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all 846 * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
845 * fields within a header are modifiable, e.g. the IPv4 protocol and fragment 847 * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -873,6 +875,7 @@ enum ovs_action_attr {
873 OVS_ACTION_ATTR_PUSH_NSH, /* Nested OVS_NSH_KEY_ATTR_*. */ 875 OVS_ACTION_ATTR_PUSH_NSH, /* Nested OVS_NSH_KEY_ATTR_*. */
874 OVS_ACTION_ATTR_POP_NSH, /* No argument. */ 876 OVS_ACTION_ATTR_POP_NSH, /* No argument. */
875 OVS_ACTION_ATTR_METER, /* u32 meter ID. */ 877 OVS_ACTION_ATTR_METER, /* u32 meter ID. */
878 OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */
876 879
877 __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted 880 __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
878 * from userspace. */ 881 * from userspace. */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b8e288a1f740..eeb787b1c53c 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
143 PERF_SAMPLE_PHYS_ADDR = 1U << 19, 143 PERF_SAMPLE_PHYS_ADDR = 1U << 19,
144 144
145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ 145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
146
147 __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
146}; 148};
147 149
148/* 150/*
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 84e4c1d0f874..be382fb0592d 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -45,6 +45,7 @@ enum {
45 * the skb and act like everything 45 * the skb and act like everything
46 * is alright. 46 * is alright.
47 */ 47 */
48#define TC_ACT_VALUE_MAX TC_ACT_TRAP
48 49
49/* There is a special kind of actions called "extended actions", 50/* There is a special kind of actions called "extended actions",
50 * which need a value parameter. These have a local opcode located in 51 * which need a value parameter. These have a local opcode located in
@@ -55,11 +56,12 @@ enum {
55#define __TC_ACT_EXT_SHIFT 28 56#define __TC_ACT_EXT_SHIFT 28
56#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT) 57#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
57#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1) 58#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
58#define TC_ACT_EXT_CMP(combined, opcode) \ 59#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK))
59 (((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode) 60#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode)
60 61
61#define TC_ACT_JUMP __TC_ACT_EXT(1) 62#define TC_ACT_JUMP __TC_ACT_EXT(1)
62#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2) 63#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
64#define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN
63 65
64/* Action type identifiers*/ 66/* Action type identifiers*/
65enum { 67enum {
@@ -469,12 +471,47 @@ enum {
469 TCA_FLOWER_KEY_IP_TTL, /* u8 */ 471 TCA_FLOWER_KEY_IP_TTL, /* u8 */
470 TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */ 472 TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */
471 473
474 TCA_FLOWER_KEY_CVLAN_ID, /* be16 */
475 TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */
476 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */
477
478 TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */
479 TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */
480 TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */
481 TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */
482
483 TCA_FLOWER_KEY_ENC_OPTS,
484 TCA_FLOWER_KEY_ENC_OPTS_MASK,
485
472 __TCA_FLOWER_MAX, 486 __TCA_FLOWER_MAX,
473}; 487};
474 488
475#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1) 489#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
476 490
477enum { 491enum {
492 TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
493 TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested
494 * TCA_FLOWER_KEY_ENC_OPT_GENEVE_
495 * attributes
496 */
497 __TCA_FLOWER_KEY_ENC_OPTS_MAX,
498};
499
500#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1)
501
502enum {
503 TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
504 TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, /* u16 */
505 TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
506 TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
507
508 __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
509};
510
511#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \
512 (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
513
514enum {
478 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0), 515 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
479 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1), 516 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
480}; 517};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..8975fd1a1421 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -124,6 +124,21 @@ struct tc_fifo_qopt {
124 __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ 124 __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
125}; 125};
126 126
127/* SKBPRIO section */
128
129/*
130 * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
131 * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
132 * to map one to one the DS field of IPV4 and IPV6 headers.
133 * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
134 */
135
136#define SKBPRIO_MAX_PRIORITY 64
137
138struct tc_skbprio_qopt {
139 __u32 limit; /* Queue length in packets. */
140};
141
127/* PRIO section */ 142/* PRIO section */
128 143
129#define TCQ_PRIO_BANDS 16 144#define TCQ_PRIO_BANDS 16
@@ -539,6 +554,7 @@ enum {
539 TCA_NETEM_LATENCY64, 554 TCA_NETEM_LATENCY64,
540 TCA_NETEM_JITTER64, 555 TCA_NETEM_JITTER64,
541 TCA_NETEM_SLOT, 556 TCA_NETEM_SLOT,
557 TCA_NETEM_SLOT_DIST,
542 __TCA_NETEM_MAX, 558 __TCA_NETEM_MAX,
543}; 559};
544 560
@@ -581,6 +597,8 @@ struct tc_netem_slot {
581 __s64 max_delay; 597 __s64 max_delay;
582 __s32 max_packets; 598 __s32 max_packets;
583 __s32 max_bytes; 599 __s32 max_bytes;
600 __s64 dist_delay; /* nsec */
601 __s64 dist_jitter; /* nsec */
584}; 602};
585 603
586enum { 604enum {
@@ -934,4 +952,136 @@ enum {
934 952
935#define TCA_CBS_MAX (__TCA_CBS_MAX - 1) 953#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
936 954
955
956/* ETF */
957struct tc_etf_qopt {
958 __s32 delta;
959 __s32 clockid;
960 __u32 flags;
961#define TC_ETF_DEADLINE_MODE_ON BIT(0)
962#define TC_ETF_OFFLOAD_ON BIT(1)
963};
964
965enum {
966 TCA_ETF_UNSPEC,
967 TCA_ETF_PARMS,
968 __TCA_ETF_MAX,
969};
970
971#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
972
973
974/* CAKE */
975enum {
976 TCA_CAKE_UNSPEC,
977 TCA_CAKE_PAD,
978 TCA_CAKE_BASE_RATE64,
979 TCA_CAKE_DIFFSERV_MODE,
980 TCA_CAKE_ATM,
981 TCA_CAKE_FLOW_MODE,
982 TCA_CAKE_OVERHEAD,
983 TCA_CAKE_RTT,
984 TCA_CAKE_TARGET,
985 TCA_CAKE_AUTORATE,
986 TCA_CAKE_MEMORY,
987 TCA_CAKE_NAT,
988 TCA_CAKE_RAW,
989 TCA_CAKE_WASH,
990 TCA_CAKE_MPU,
991 TCA_CAKE_INGRESS,
992 TCA_CAKE_ACK_FILTER,
993 TCA_CAKE_SPLIT_GSO,
994 __TCA_CAKE_MAX
995};
996#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
997
998enum {
999 __TCA_CAKE_STATS_INVALID,
1000 TCA_CAKE_STATS_PAD,
1001 TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
1002 TCA_CAKE_STATS_MEMORY_LIMIT,
1003 TCA_CAKE_STATS_MEMORY_USED,
1004 TCA_CAKE_STATS_AVG_NETOFF,
1005 TCA_CAKE_STATS_MIN_NETLEN,
1006 TCA_CAKE_STATS_MAX_NETLEN,
1007 TCA_CAKE_STATS_MIN_ADJLEN,
1008 TCA_CAKE_STATS_MAX_ADJLEN,
1009 TCA_CAKE_STATS_TIN_STATS,
1010 TCA_CAKE_STATS_DEFICIT,
1011 TCA_CAKE_STATS_COBALT_COUNT,
1012 TCA_CAKE_STATS_DROPPING,
1013 TCA_CAKE_STATS_DROP_NEXT_US,
1014 TCA_CAKE_STATS_P_DROP,
1015 TCA_CAKE_STATS_BLUE_TIMER_US,
1016 __TCA_CAKE_STATS_MAX
1017};
1018#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
1019
1020enum {
1021 __TCA_CAKE_TIN_STATS_INVALID,
1022 TCA_CAKE_TIN_STATS_PAD,
1023 TCA_CAKE_TIN_STATS_SENT_PACKETS,
1024 TCA_CAKE_TIN_STATS_SENT_BYTES64,
1025 TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
1026 TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
1027 TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
1028 TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
1029 TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
1030 TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
1031 TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
1032 TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
1033 TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
1034 TCA_CAKE_TIN_STATS_TARGET_US,
1035 TCA_CAKE_TIN_STATS_INTERVAL_US,
1036 TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
1037 TCA_CAKE_TIN_STATS_WAY_MISSES,
1038 TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
1039 TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
1040 TCA_CAKE_TIN_STATS_AVG_DELAY_US,
1041 TCA_CAKE_TIN_STATS_BASE_DELAY_US,
1042 TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
1043 TCA_CAKE_TIN_STATS_BULK_FLOWS,
1044 TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
1045 TCA_CAKE_TIN_STATS_MAX_SKBLEN,
1046 TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
1047 __TCA_CAKE_TIN_STATS_MAX
1048};
1049#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
1050#define TC_CAKE_MAX_TINS (8)
1051
1052enum {
1053 CAKE_FLOW_NONE = 0,
1054 CAKE_FLOW_SRC_IP,
1055 CAKE_FLOW_DST_IP,
1056 CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
1057 CAKE_FLOW_FLOWS,
1058 CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
1059 CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
1060 CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
1061 CAKE_FLOW_MAX,
1062};
1063
1064enum {
1065 CAKE_DIFFSERV_DIFFSERV3 = 0,
1066 CAKE_DIFFSERV_DIFFSERV4,
1067 CAKE_DIFFSERV_DIFFSERV8,
1068 CAKE_DIFFSERV_BESTEFFORT,
1069 CAKE_DIFFSERV_PRECEDENCE,
1070 CAKE_DIFFSERV_MAX
1071};
1072
1073enum {
1074 CAKE_ACK_NONE = 0,
1075 CAKE_ACK_FILTER,
1076 CAKE_ACK_AGGRESSIVE,
1077 CAKE_ACK_MAX
1078};
1079
1080enum {
1081 CAKE_ATM_NONE = 0,
1082 CAKE_ATM_ATM,
1083 CAKE_ATM_PTM,
1084 CAKE_ATM_MAX
1085};
1086
937#endif 1087#endif
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index 784c2e3e572e..88b5f9990320 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -68,7 +68,7 @@ struct ppp_option_data {
68struct pppol2tp_ioc_stats { 68struct pppol2tp_ioc_stats {
69 __u16 tunnel_id; /* redundant */ 69 __u16 tunnel_id; /* redundant */
70 __u16 session_id; /* if zero, get tunnel stats */ 70 __u16 session_id; /* if zero, get tunnel stats */
71 __u32 using_ipsec:1; /* valid only for session_id == 0 */ 71 __u32 using_ipsec:1;
72 __aligned_u64 tx_packets; 72 __aligned_u64 tx_packets;
73 __aligned_u64 tx_bytes; 73 __aligned_u64 tx_bytes;
74 __aligned_u64 tx_errors; 74 __aligned_u64 tx_errors;
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 20c6bd0b0007..dc520e1a4123 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ 1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
2/* 2/*
3 * Copyright (c) 2008 Oracle. All rights reserved. 3 * Copyright (c) 2008, 2018 Oracle and/or its affiliates. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -118,7 +118,17 @@
118#define RDS_INFO_IB_CONNECTIONS 10008 118#define RDS_INFO_IB_CONNECTIONS 10008
119#define RDS_INFO_CONNECTION_STATS 10009 119#define RDS_INFO_CONNECTION_STATS 10009
120#define RDS_INFO_IWARP_CONNECTIONS 10010 120#define RDS_INFO_IWARP_CONNECTIONS 10010
121#define RDS_INFO_LAST 10010 121
122/* PF_RDS6 options */
123#define RDS6_INFO_CONNECTIONS 10011
124#define RDS6_INFO_SEND_MESSAGES 10012
125#define RDS6_INFO_RETRANS_MESSAGES 10013
126#define RDS6_INFO_RECV_MESSAGES 10014
127#define RDS6_INFO_SOCKETS 10015
128#define RDS6_INFO_TCP_SOCKETS 10016
129#define RDS6_INFO_IB_CONNECTIONS 10017
130
131#define RDS_INFO_LAST 10017
122 132
123struct rds_info_counter { 133struct rds_info_counter {
124 __u8 name[32]; 134 __u8 name[32];
@@ -140,6 +150,15 @@ struct rds_info_connection {
140 __u8 flags; 150 __u8 flags;
141} __attribute__((packed)); 151} __attribute__((packed));
142 152
153struct rds6_info_connection {
154 __u64 next_tx_seq;
155 __u64 next_rx_seq;
156 struct in6_addr laddr;
157 struct in6_addr faddr;
158 __u8 transport[TRANSNAMSIZ]; /* null term ascii */
159 __u8 flags;
160} __attribute__((packed));
161
143#define RDS_INFO_MESSAGE_FLAG_ACK 0x01 162#define RDS_INFO_MESSAGE_FLAG_ACK 0x01
144#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 163#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02
145 164
@@ -153,6 +172,17 @@ struct rds_info_message {
153 __u8 flags; 172 __u8 flags;
154} __attribute__((packed)); 173} __attribute__((packed));
155 174
175struct rds6_info_message {
176 __u64 seq;
177 __u32 len;
178 struct in6_addr laddr;
179 struct in6_addr faddr;
180 __be16 lport;
181 __be16 fport;
182 __u8 flags;
183 __u8 tos;
184} __attribute__((packed));
185
156struct rds_info_socket { 186struct rds_info_socket {
157 __u32 sndbuf; 187 __u32 sndbuf;
158 __be32 bound_addr; 188 __be32 bound_addr;
@@ -163,6 +193,16 @@ struct rds_info_socket {
163 __u64 inum; 193 __u64 inum;
164} __attribute__((packed)); 194} __attribute__((packed));
165 195
196struct rds6_info_socket {
197 __u32 sndbuf;
198 struct in6_addr bound_addr;
199 struct in6_addr connected_addr;
200 __be16 bound_port;
201 __be16 connected_port;
202 __u32 rcvbuf;
203 __u64 inum;
204} __attribute__((packed));
205
166struct rds_info_tcp_socket { 206struct rds_info_tcp_socket {
167 __be32 local_addr; 207 __be32 local_addr;
168 __be16 local_port; 208 __be16 local_port;
@@ -175,6 +215,18 @@ struct rds_info_tcp_socket {
175 __u32 last_seen_una; 215 __u32 last_seen_una;
176} __attribute__((packed)); 216} __attribute__((packed));
177 217
218struct rds6_info_tcp_socket {
219 struct in6_addr local_addr;
220 __be16 local_port;
221 struct in6_addr peer_addr;
222 __be16 peer_port;
223 __u64 hdr_rem;
224 __u64 data_rem;
225 __u32 last_sent_nxt;
226 __u32 last_expected_una;
227 __u32 last_seen_una;
228} __attribute__((packed));
229
178#define RDS_IB_GID_LEN 16 230#define RDS_IB_GID_LEN 16
179struct rds_info_rdma_connection { 231struct rds_info_rdma_connection {
180 __be32 src_addr; 232 __be32 src_addr;
@@ -189,6 +241,19 @@ struct rds_info_rdma_connection {
189 __u32 rdma_mr_size; 241 __u32 rdma_mr_size;
190}; 242};
191 243
244struct rds6_info_rdma_connection {
245 struct in6_addr src_addr;
246 struct in6_addr dst_addr;
247 __u8 src_gid[RDS_IB_GID_LEN];
248 __u8 dst_gid[RDS_IB_GID_LEN];
249
250 __u32 max_send_wr;
251 __u32 max_recv_wr;
252 __u32 max_send_sge;
253 __u32 rdma_mr_max;
254 __u32 rdma_mr_size;
255};
256
192/* RDS message Receive Path Latency points */ 257/* RDS message Receive Path Latency points */
193enum rds_message_rxpath_latency { 258enum rds_message_rxpath_latency {
194 RDS_MSG_RX_HDR_TO_DGRAM_START = 0, 259 RDS_MSG_RX_HDR_TO_DGRAM_START = 0,
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
index d620fa43756c..9a402fdb60e9 100644
--- a/include/uapi/linux/rseq.h
+++ b/include/uapi/linux/rseq.h
@@ -10,13 +10,8 @@
10 * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> 10 * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 */ 11 */
12 12
13#ifdef __KERNEL__ 13#include <linux/types.h>
14# include <linux/types.h> 14#include <asm/byteorder.h>
15#else
16# include <stdint.h>
17#endif
18
19#include <linux/types_32_64.h>
20 15
21enum rseq_cpu_id_state { 16enum rseq_cpu_id_state {
22 RSEQ_CPU_ID_UNINITIALIZED = -1, 17 RSEQ_CPU_ID_UNINITIALIZED = -1,
@@ -52,10 +47,10 @@ struct rseq_cs {
52 __u32 version; 47 __u32 version;
53 /* enum rseq_cs_flags */ 48 /* enum rseq_cs_flags */
54 __u32 flags; 49 __u32 flags;
55 LINUX_FIELD_u32_u64(start_ip); 50 __u64 start_ip;
56 /* Offset from start_ip. */ 51 /* Offset from start_ip. */
57 LINUX_FIELD_u32_u64(post_commit_offset); 52 __u64 post_commit_offset;
58 LINUX_FIELD_u32_u64(abort_ip); 53 __u64 abort_ip;
59} __attribute__((aligned(4 * sizeof(__u64)))); 54} __attribute__((aligned(4 * sizeof(__u64))));
60 55
61/* 56/*
@@ -67,28 +62,30 @@ struct rseq_cs {
67struct rseq { 62struct rseq {
68 /* 63 /*
69 * Restartable sequences cpu_id_start field. Updated by the 64 * Restartable sequences cpu_id_start field. Updated by the
70 * kernel, and read by user-space with single-copy atomicity 65 * kernel. Read by user-space with single-copy atomicity
71 * semantics. Aligned on 32-bit. Always contains a value in the 66 * semantics. This field should only be read by the thread which
72 * range of possible CPUs, although the value may not be the 67 * registered this data structure. Aligned on 32-bit. Always
73 * actual current CPU (e.g. if rseq is not initialized). This 68 * contains a value in the range of possible CPUs, although the
74 * CPU number value should always be compared against the value 69 * value may not be the actual current CPU (e.g. if rseq is not
75 * of the cpu_id field before performing a rseq commit or 70 * initialized). This CPU number value should always be compared
76 * returning a value read from a data structure indexed using 71 * against the value of the cpu_id field before performing a rseq
77 * the cpu_id_start value. 72 * commit or returning a value read from a data structure indexed
73 * using the cpu_id_start value.
78 */ 74 */
79 __u32 cpu_id_start; 75 __u32 cpu_id_start;
80 /* 76 /*
81 * Restartable sequences cpu_id field. Updated by the kernel, 77 * Restartable sequences cpu_id field. Updated by the kernel.
82 * and read by user-space with single-copy atomicity semantics. 78 * Read by user-space with single-copy atomicity semantics. This
83 * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and 79 * field should only be read by the thread which registered this
84 * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the 80 * data structure. Aligned on 32-bit. Values
85 * former means "rseq uninitialized", and latter means "rseq 81 * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED
86 * initialization failed". This value is meant to be read within 82 * have a special semantic: the former means "rseq uninitialized",
87 * rseq critical sections and compared with the cpu_id_start 83 * and latter means "rseq initialization failed". This value is
88 * value previously read, before performing the commit instruction, 84 * meant to be read within rseq critical sections and compared
89 * or read and compared with the cpu_id_start value before returning 85 * with the cpu_id_start value previously read, before performing
90 * a value loaded from a data structure indexed using the 86 * the commit instruction, or read and compared with the
91 * cpu_id_start value. 87 * cpu_id_start value before returning a value loaded from a data
88 * structure indexed using the cpu_id_start value.
92 */ 89 */
93 __u32 cpu_id; 90 __u32 cpu_id;
94 /* 91 /*
@@ -105,27 +102,44 @@ struct rseq {
105 * targeted by the rseq_cs. Also needs to be set to NULL by user-space 102 * targeted by the rseq_cs. Also needs to be set to NULL by user-space
106 * before reclaiming memory that contains the targeted struct rseq_cs. 103 * before reclaiming memory that contains the targeted struct rseq_cs.
107 * 104 *
108 * Read and set by the kernel with single-copy atomicity semantics. 105 * Read and set by the kernel. Set by user-space with single-copy
109 * Set by user-space with single-copy atomicity semantics. Aligned 106 * atomicity semantics. This field should only be updated by the
110 * on 64-bit. 107 * thread which registered this data structure. Aligned on 64-bit.
111 */ 108 */
112 LINUX_FIELD_u32_u64(rseq_cs); 109 union {
110 __u64 ptr64;
111#ifdef __LP64__
112 __u64 ptr;
113#else
114 struct {
115#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
116 __u32 padding; /* Initialized to zero. */
117 __u32 ptr32;
118#else /* LITTLE */
119 __u32 ptr32;
120 __u32 padding; /* Initialized to zero. */
121#endif /* ENDIAN */
122 } ptr;
123#endif
124 } rseq_cs;
125
113 /* 126 /*
114 * - RSEQ_DISABLE flag: 127 * Restartable sequences flags field.
128 *
129 * This field should only be updated by the thread which
130 * registered this data structure. Read by the kernel.
131 * Mainly used for single-stepping through rseq critical sections
132 * with debuggers.
115 * 133 *
116 * Fallback fast-track flag for single-stepping.
117 * Set by user-space if lack of progress is detected.
118 * Cleared by user-space after rseq finish.
119 * Read by the kernel.
120 * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT 134 * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
121 * Inhibit instruction sequence block restart and event 135 * Inhibit instruction sequence block restart on preemption
122 * counter increment on preemption for this thread. 136 * for this thread.
123 * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL 137 * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
124 * Inhibit instruction sequence block restart and event 138 * Inhibit instruction sequence block restart on signal
125 * counter increment on signal delivery for this thread. 139 * delivery for this thread.
126 * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE 140 * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
127 * Inhibit instruction sequence block restart and event 141 * Inhibit instruction sequence block restart on migration for
128 * counter increment on migration for this thread. 142 * this thread.
129 */ 143 */
130 __u32 flags; 144 __u32 flags;
131} __attribute__((aligned(4 * sizeof(__u64)))); 145} __attribute__((aligned(4 * sizeof(__u64))));
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 7d8502313c99..46399367627f 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -150,6 +150,13 @@ enum {
150 RTM_NEWCACHEREPORT = 96, 150 RTM_NEWCACHEREPORT = 96,
151#define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT 151#define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT
152 152
153 RTM_NEWCHAIN = 100,
154#define RTM_NEWCHAIN RTM_NEWCHAIN
155 RTM_DELCHAIN,
156#define RTM_DELCHAIN RTM_DELCHAIN
157 RTM_GETCHAIN,
158#define RTM_GETCHAIN RTM_GETCHAIN
159
153 __RTM_MAX, 160 __RTM_MAX,
154#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) 161#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
155}; 162};
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index b64d583bf053..b479db5c71d9 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -100,6 +100,7 @@ typedef __s32 sctp_assoc_t;
100#define SCTP_RECVNXTINFO 33 100#define SCTP_RECVNXTINFO 33
101#define SCTP_DEFAULT_SNDINFO 34 101#define SCTP_DEFAULT_SNDINFO 34
102#define SCTP_AUTH_DEACTIVATE_KEY 35 102#define SCTP_AUTH_DEACTIVATE_KEY 35
103#define SCTP_REUSE_PORT 36
103 104
104/* Internal Socket Options. Some of the sctp library functions are 105/* Internal Socket Options. Some of the sctp library functions are
105 * implemented using these socket options. 106 * implemented using these socket options.
@@ -762,6 +763,8 @@ enum sctp_spp_flags {
762 SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/ 763 SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/
763 SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE, 764 SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE,
764 SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */ 765 SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */
766 SPP_IPV6_FLOWLABEL = 1<<8,
767 SPP_DSCP = 1<<9,
765}; 768};
766 769
767struct sctp_paddrparams { 770struct sctp_paddrparams {
@@ -772,6 +775,8 @@ struct sctp_paddrparams {
772 __u32 spp_pathmtu; 775 __u32 spp_pathmtu;
773 __u32 spp_sackdelay; 776 __u32 spp_sackdelay;
774 __u32 spp_flags; 777 __u32 spp_flags;
778 __u32 spp_ipv6_flowlabel;
779 __u8 spp_dscp;
775} __attribute__((packed, aligned(4))); 780} __attribute__((packed, aligned(4)));
776 781
777/* 782/*
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index 0ae5d4685ba3..ac9e8c96d9bd 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -20,7 +20,7 @@ struct smc_diag_req {
20struct smc_diag_msg { 20struct smc_diag_msg {
21 __u8 diag_family; 21 __u8 diag_family;
22 __u8 diag_state; 22 __u8 diag_state;
23 __u8 diag_fallback; 23 __u8 diag_mode;
24 __u8 diag_shutdown; 24 __u8 diag_shutdown;
25 struct inet_diag_sockid id; 25 struct inet_diag_sockid id;
26 26
@@ -28,6 +28,13 @@ struct smc_diag_msg {
28 __u64 diag_inode; 28 __u64 diag_inode;
29}; 29};
30 30
31/* Mode of a connection */
32enum {
33 SMC_DIAG_MODE_SMCR,
34 SMC_DIAG_MODE_FALLBACK_TCP,
35 SMC_DIAG_MODE_SMCD,
36};
37
31/* Extensions */ 38/* Extensions */
32 39
33enum { 40enum {
@@ -35,6 +42,8 @@ enum {
35 SMC_DIAG_CONNINFO, 42 SMC_DIAG_CONNINFO,
36 SMC_DIAG_LGRINFO, 43 SMC_DIAG_LGRINFO,
37 SMC_DIAG_SHUTDOWN, 44 SMC_DIAG_SHUTDOWN,
45 SMC_DIAG_DMBINFO,
46 SMC_DIAG_FALLBACK,
38 __SMC_DIAG_MAX, 47 __SMC_DIAG_MAX,
39}; 48};
40 49
@@ -83,4 +92,18 @@ struct smc_diag_lgrinfo {
83 struct smc_diag_linkinfo lnk[1]; 92 struct smc_diag_linkinfo lnk[1];
84 __u8 role; 93 __u8 role;
85}; 94};
95
96struct smc_diag_fallback {
97 __u32 reason;
98 __u32 peer_diagnosis;
99};
100
101struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
102 __u32 linkid; /* Link identifier */
103 __u64 peer_gid; /* Peer GID */
104 __u64 my_gid; /* My GID */
105 __u64 token; /* Token of DMB */
106 __u64 peer_token; /* Token of remote DMBE */
107};
108
86#endif /* _UAPI_SMC_DIAG_H_ */ 109#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 750d89120335..f80135e5feaa 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -56,6 +56,7 @@ enum
56 IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */ 56 IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
57 IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */ 57 IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
58 IPSTATS_MIB_CEPKTS, /* InCEPkts */ 58 IPSTATS_MIB_CEPKTS, /* InCEPkts */
59 IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
59 __IPSTATS_MIB_MAX 60 __IPSTATS_MIB_MAX
60}; 61};
61 62
@@ -279,6 +280,8 @@ enum
279 LINUX_MIB_TCPDELIVERED, /* TCPDelivered */ 280 LINUX_MIB_TCPDELIVERED, /* TCPDelivered */
280 LINUX_MIB_TCPDELIVEREDCE, /* TCPDeliveredCE */ 281 LINUX_MIB_TCPDELIVEREDCE, /* TCPDeliveredCE */
281 LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */ 282 LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */
283 LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */
284 LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */
282 __LINUX_MIB_MAX 285 __LINUX_MIB_MAX
283}; 286};
284 287
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 6b58371b1f0d..d71013fffaf6 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -575,7 +575,8 @@ enum {
575 575
576/* /proc/sys/net/ipv6/icmp */ 576/* /proc/sys/net/ipv6/icmp */
577enum { 577enum {
578 NET_IPV6_ICMP_RATELIMIT=1 578 NET_IPV6_ICMP_RATELIMIT = 1,
579 NET_IPV6_ICMP_ECHO_IGNORE_ALL = 2
579}; 580};
580 581
581/* /proc/sys/net/<protocol>/neigh/<dev> */ 582/* /proc/sys/net/<protocol>/neigh/<dev> */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 6e299349b158..b7b57967d90f 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -44,6 +44,7 @@
44#define TCMU_MAILBOX_VERSION 2 44#define TCMU_MAILBOX_VERSION 2
45#define ALIGN_SIZE 64 /* Should be enough for most CPUs */ 45#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
46#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */ 46#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
47#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
47 48
48struct tcmu_mailbox { 49struct tcmu_mailbox {
49 __u16 version; 50 __u16 version;
@@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr {
71 __u16 cmd_id; 72 __u16 cmd_id;
72 __u8 kflags; 73 __u8 kflags;
73#define TCMU_UFLAG_UNKNOWN_OP 0x1 74#define TCMU_UFLAG_UNKNOWN_OP 0x1
75#define TCMU_UFLAG_READ_LEN 0x2
74 __u8 uflags; 76 __u8 uflags;
75 77
76} __packed; 78} __packed;
@@ -119,7 +121,7 @@ struct tcmu_cmd_entry {
119 __u8 scsi_status; 121 __u8 scsi_status;
120 __u8 __pad1; 122 __u8 __pad1;
121 __u16 __pad2; 123 __u16 __pad2;
122 __u32 __pad3; 124 __u32 read_len;
123 char sense_buffer[TCMU_SENSE_BUFFERSIZE]; 125 char sense_buffer[TCMU_SENSE_BUFFERSIZE];
124 } rsp; 126 } rsp;
125 }; 127 };
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index 162d1094c41c..24ec792dacc1 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -17,13 +17,15 @@ enum {
17 TCA_PEDIT_KEY_EX, 17 TCA_PEDIT_KEY_EX,
18 __TCA_PEDIT_MAX 18 __TCA_PEDIT_MAX
19}; 19};
20
20#define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1) 21#define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
21 22
22enum { 23enum {
23 TCA_PEDIT_KEY_EX_HTYPE = 1, 24 TCA_PEDIT_KEY_EX_HTYPE = 1,
24 TCA_PEDIT_KEY_EX_CMD = 2, 25 TCA_PEDIT_KEY_EX_CMD = 2,
25 __TCA_PEDIT_KEY_EX_MAX 26 __TCA_PEDIT_KEY_EX_MAX
26}; 27};
28
27#define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1) 29#define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1)
28 30
29 /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It 31 /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It
@@ -38,6 +40,7 @@ enum pedit_header_type {
38 TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5, 40 TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5,
39 __PEDIT_HDR_TYPE_MAX, 41 __PEDIT_HDR_TYPE_MAX,
40}; 42};
43
41#define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1) 44#define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1)
42 45
43enum pedit_cmd { 46enum pedit_cmd {
@@ -45,6 +48,7 @@ enum pedit_cmd {
45 TCA_PEDIT_KEY_EX_CMD_ADD = 1, 48 TCA_PEDIT_KEY_EX_CMD_ADD = 1,
46 __PEDIT_CMD_MAX, 49 __PEDIT_CMD_MAX,
47}; 50};
51
48#define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1) 52#define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1)
49 53
50struct tc_pedit_key { 54struct tc_pedit_key {
@@ -55,13 +59,14 @@ struct tc_pedit_key {
55 __u32 offmask; 59 __u32 offmask;
56 __u32 shift; 60 __u32 shift;
57}; 61};
58 62
59struct tc_pedit_sel { 63struct tc_pedit_sel {
60 tc_gen; 64 tc_gen;
61 unsigned char nkeys; 65 unsigned char nkeys;
62 unsigned char flags; 66 unsigned char flags;
63 struct tc_pedit_key keys[0]; 67 struct tc_pedit_key keys[0];
64}; 68};
69
65#define tc_pedit tc_pedit_sel 70#define tc_pedit tc_pedit_sel
66 71
67#endif 72#endif
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index fbcfe27a4e6c..6de6071ebed6 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -30,6 +30,7 @@
30#define SKBEDIT_F_MARK 0x4 30#define SKBEDIT_F_MARK 0x4
31#define SKBEDIT_F_PTYPE 0x8 31#define SKBEDIT_F_PTYPE 0x8
32#define SKBEDIT_F_MASK 0x10 32#define SKBEDIT_F_MASK 0x10
33#define SKBEDIT_F_INHERITDSFIELD 0x20
33 34
34struct tc_skbedit { 35struct tc_skbedit {
35 tc_gen; 36 tc_gen;
@@ -45,6 +46,7 @@ enum {
45 TCA_SKBEDIT_PAD, 46 TCA_SKBEDIT_PAD,
46 TCA_SKBEDIT_PTYPE, 47 TCA_SKBEDIT_PTYPE,
47 TCA_SKBEDIT_MASK, 48 TCA_SKBEDIT_MASK,
49 TCA_SKBEDIT_FLAGS,
48 __TCA_SKBEDIT_MAX 50 __TCA_SKBEDIT_MAX
49}; 51};
50#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1) 52#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 72bbefe5d1d1..be384d63e1b5 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -36,9 +36,37 @@ enum {
36 TCA_TUNNEL_KEY_PAD, 36 TCA_TUNNEL_KEY_PAD,
37 TCA_TUNNEL_KEY_ENC_DST_PORT, /* be16 */ 37 TCA_TUNNEL_KEY_ENC_DST_PORT, /* be16 */
38 TCA_TUNNEL_KEY_NO_CSUM, /* u8 */ 38 TCA_TUNNEL_KEY_NO_CSUM, /* u8 */
39 TCA_TUNNEL_KEY_ENC_OPTS, /* Nested TCA_TUNNEL_KEY_ENC_OPTS_
40 * attributes
41 */
42 TCA_TUNNEL_KEY_ENC_TOS, /* u8 */
43 TCA_TUNNEL_KEY_ENC_TTL, /* u8 */
39 __TCA_TUNNEL_KEY_MAX, 44 __TCA_TUNNEL_KEY_MAX,
40}; 45};
41 46
42#define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1) 47#define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1)
43 48
49enum {
50 TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC,
51 TCA_TUNNEL_KEY_ENC_OPTS_GENEVE, /* Nested
52 * TCA_TUNNEL_KEY_ENC_OPTS_
53 * attributes
54 */
55 __TCA_TUNNEL_KEY_ENC_OPTS_MAX,
56};
57
58#define TCA_TUNNEL_KEY_ENC_OPTS_MAX (__TCA_TUNNEL_KEY_ENC_OPTS_MAX - 1)
59
60enum {
61 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_UNSPEC,
62 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS, /* be16 */
63 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
64 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
65
66 __TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
67};
68
69#define TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX \
70 (__TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX - 1)
71
44#endif 72#endif
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 29eb659aa77a..e02d31986ff9 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -127,6 +127,10 @@ enum {
127 127
128#define TCP_CM_INQ TCP_INQ 128#define TCP_CM_INQ TCP_INQ
129 129
130#define TCP_REPAIR_ON 1
131#define TCP_REPAIR_OFF 0
132#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
133
130struct tcp_repair_opt { 134struct tcp_repair_opt {
131 __u32 opt_code; 135 __u32 opt_code;
132 __u32 opt_val; 136 __u32 opt_val;
@@ -231,6 +235,11 @@ struct tcp_info {
231 235
232 __u32 tcpi_delivered; 236 __u32 tcpi_delivered;
233 __u32 tcpi_delivered_ce; 237 __u32 tcpi_delivered_ce;
238
239 __u64 tcpi_bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut */
240 __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */
241 __u32 tcpi_dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups */
242 __u32 tcpi_reord_seen; /* reordering events seen */
234}; 243};
235 244
236/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ 245/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
@@ -253,7 +262,10 @@ enum {
253 TCP_NLA_SND_SSTHRESH, /* Slow start size threshold */ 262 TCP_NLA_SND_SSTHRESH, /* Slow start size threshold */
254 TCP_NLA_DELIVERED, /* Data pkts delivered incl. out-of-order */ 263 TCP_NLA_DELIVERED, /* Data pkts delivered incl. out-of-order */
255 TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */ 264 TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */
256 265 TCP_NLA_BYTES_SENT, /* Data bytes sent including retransmission */
266 TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */
267 TCP_NLA_DSACK_DUPS, /* DSACK blocks received */
268 TCP_NLA_REORD_SEEN, /* reordering events seen */
257}; 269};
258 270
259/* for TCP_MD5SIG socket option */ 271/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index fcf936656493..6b56a2208be7 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -49,6 +49,13 @@ struct __kernel_timespec {
49}; 49};
50#endif 50#endif
51 51
52#ifndef __kernel_itimerspec
53struct __kernel_itimerspec {
54 struct __kernel_timespec it_interval; /* timer period */
55 struct __kernel_timespec it_value; /* timer expiration */
56};
57#endif
58
52/* 59/*
53 * legacy timeval structure, only embedded in structures that 60 * legacy timeval structure, only embedded in structures that
54 * traditionally used 'timeval' to pass time intervals (not absolute 61 * traditionally used 'timeval' to pass time intervals (not absolute
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 85c11982c89b..0ebe02ef1a86 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -121,6 +121,7 @@ enum {
121 TIPC_NLA_SOCK_TIPC_STATE, /* u32 */ 121 TIPC_NLA_SOCK_TIPC_STATE, /* u32 */
122 TIPC_NLA_SOCK_COOKIE, /* u64 */ 122 TIPC_NLA_SOCK_COOKIE, /* u64 */
123 TIPC_NLA_SOCK_PAD, /* flag */ 123 TIPC_NLA_SOCK_PAD, /* flag */
124 TIPC_NLA_SOCK_GROUP, /* nest */
124 125
125 __TIPC_NLA_SOCK_MAX, 126 __TIPC_NLA_SOCK_MAX,
126 TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1 127 TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1
@@ -233,6 +234,19 @@ enum {
233 TIPC_NLA_MON_PEER_MAX = __TIPC_NLA_MON_PEER_MAX - 1 234 TIPC_NLA_MON_PEER_MAX = __TIPC_NLA_MON_PEER_MAX - 1
234}; 235};
235 236
237/* Nest, socket group info */
238enum {
239 TIPC_NLA_SOCK_GROUP_ID, /* u32 */
240 TIPC_NLA_SOCK_GROUP_OPEN, /* flag */
241 TIPC_NLA_SOCK_GROUP_NODE_SCOPE, /* flag */
242 TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE, /* flag */
243 TIPC_NLA_SOCK_GROUP_INSTANCE, /* u32 */
244 TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT, /* u32 */
245
246 __TIPC_NLA_SOCK_GROUP_MAX,
247 TIPC_NLA_SOCK_GROUP_MAX = __TIPC_NLA_SOCK_GROUP_MAX - 1
248};
249
236/* Nest, connection info */ 250/* Nest, connection info */
237enum { 251enum {
238 TIPC_NLA_CON_UNSPEC, 252 TIPC_NLA_CON_UNSPEC,
diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h
deleted file mode 100644
index 0a87ace34a57..000000000000
--- a/include/uapi/linux/types_32_64.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2#ifndef _UAPI_LINUX_TYPES_32_64_H
3#define _UAPI_LINUX_TYPES_32_64_H
4
5/*
6 * linux/types_32_64.h
7 *
8 * Integer type declaration for pointers across 32-bit and 64-bit systems.
9 *
10 * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 */
12
13#ifdef __KERNEL__
14# include <linux/types.h>
15#else
16# include <stdint.h>
17#endif
18
19#include <asm/byteorder.h>
20
21#ifdef __BYTE_ORDER
22# if (__BYTE_ORDER == __BIG_ENDIAN)
23# define LINUX_BYTE_ORDER_BIG_ENDIAN
24# else
25# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
26# endif
27#else
28# ifdef __BIG_ENDIAN
29# define LINUX_BYTE_ORDER_BIG_ENDIAN
30# else
31# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
32# endif
33#endif
34
35#ifdef __LP64__
36# define LINUX_FIELD_u32_u64(field) __u64 field
37# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v
38#else
39# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
40# define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field
41# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
42 field ## _padding = 0, field = (intptr_t)v
43# else
44# define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding
45# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
46 field = (intptr_t)v, field ## _padding = 0
47# endif
48#endif
49
50#endif /* _UAPI_LINUX_TYPES_32_64_H */
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 74e520fb944f..ddc5396800aa 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -390,33 +390,64 @@ static inline __u8 uac_processing_unit_iChannelNames(struct uac_processing_unit_
390static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc, 390static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc,
391 int protocol) 391 int protocol)
392{ 392{
393 return (protocol == UAC_VERSION_1) ? 393 switch (protocol) {
394 desc->baSourceID[desc->bNrInPins + 4] : 394 case UAC_VERSION_1:
395 2; /* in UAC2, this value is constant */ 395 return desc->baSourceID[desc->bNrInPins + 4];
396 case UAC_VERSION_2:
397 return 2; /* in UAC2, this value is constant */
398 case UAC_VERSION_3:
399 return 4; /* in UAC3, this value is constant */
400 default:
401 return 1;
402 }
396} 403}
397 404
398static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc, 405static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
399 int protocol) 406 int protocol)
400{ 407{
401 return (protocol == UAC_VERSION_1) ? 408 switch (protocol) {
402 &desc->baSourceID[desc->bNrInPins + 5] : 409 case UAC_VERSION_1:
403 &desc->baSourceID[desc->bNrInPins + 6]; 410 return &desc->baSourceID[desc->bNrInPins + 5];
411 case UAC_VERSION_2:
412 return &desc->baSourceID[desc->bNrInPins + 6];
413 case UAC_VERSION_3:
414 return &desc->baSourceID[desc->bNrInPins + 2];
415 default:
416 return NULL;
417 }
404} 418}
405 419
406static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc, 420static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
407 int protocol) 421 int protocol)
408{ 422{
409 __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); 423 __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
410 return *(uac_processing_unit_bmControls(desc, protocol) 424
411 + control_size); 425 switch (protocol) {
426 case UAC_VERSION_1:
427 case UAC_VERSION_2:
428 default:
429 return *(uac_processing_unit_bmControls(desc, protocol)
430 + control_size);
431 case UAC_VERSION_3:
432 return 0; /* UAC3 does not have this field */
433 }
412} 434}
413 435
414static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc, 436static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
415 int protocol) 437 int protocol)
416{ 438{
417 __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); 439 __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
418 return uac_processing_unit_bmControls(desc, protocol) 440
441 switch (protocol) {
442 case UAC_VERSION_1:
443 case UAC_VERSION_2:
444 default:
445 return uac_processing_unit_bmControls(desc, protocol)
419 + control_size + 1; 446 + control_size + 1;
447 case UAC_VERSION_3:
448 return uac_processing_unit_bmControls(desc, protocol)
449 + control_size;
450 }
420} 451}
421 452
422/* 4.5.2 Class-Specific AS Interface Descriptor */ 453/* 4.5.2 Class-Specific AS Interface Descriptor */
diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
index 020714d2c5bd..f80f05b3c423 100644
--- a/include/uapi/linux/uvcvideo.h
+++ b/include/uapi/linux/uvcvideo.h
@@ -28,6 +28,8 @@
28#define UVC_CTRL_FLAG_RESTORE (1 << 6) 28#define UVC_CTRL_FLAG_RESTORE (1 << 6)
29/* Control can be updated by the camera. */ 29/* Control can be updated by the camera. */
30#define UVC_CTRL_FLAG_AUTO_UPDATE (1 << 7) 30#define UVC_CTRL_FLAG_AUTO_UPDATE (1 << 7)
31/* Control supports asynchronous reporting */
32#define UVC_CTRL_FLAG_ASYNCHRONOUS (1 << 8)
31 33
32#define UVC_CTRL_FLAG_GET_RANGE \ 34#define UVC_CTRL_FLAG_GET_RANGE \
33 (UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_MIN | \ 35 (UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_MIN | \
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 8d473c979b61..e4ee10ee917d 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -188,7 +188,7 @@ enum v4l2_colorfx {
188 188
189/* The base for the imx driver controls. 189/* The base for the imx driver controls.
190 * We reserve 16 controls for this driver. */ 190 * We reserve 16 controls for this driver. */
191#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x1090) 191#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x10b0)
192 192
193/* MPEG-class control IDs */ 193/* MPEG-class control IDs */
194/* The MPEG controls are applicable to all codec controls 194/* The MPEG controls are applicable to all codec controls
@@ -587,7 +587,23 @@ enum v4l2_vp8_golden_frame_sel {
587#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_MPEG_BASE+508) 587#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_MPEG_BASE+508)
588#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_MPEG_BASE+509) 588#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_MPEG_BASE+509)
589#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_MPEG_BASE+510) 589#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_MPEG_BASE+510)
590#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE (V4L2_CID_MPEG_BASE+511) 590
591#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_MPEG_BASE+511)
592enum v4l2_mpeg_video_vp8_profile {
593 V4L2_MPEG_VIDEO_VP8_PROFILE_0 = 0,
594 V4L2_MPEG_VIDEO_VP8_PROFILE_1 = 1,
595 V4L2_MPEG_VIDEO_VP8_PROFILE_2 = 2,
596 V4L2_MPEG_VIDEO_VP8_PROFILE_3 = 3,
597};
598/* Deprecated alias for compatibility reasons. */
599#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE V4L2_CID_MPEG_VIDEO_VP8_PROFILE
600#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_MPEG_BASE+512)
601enum v4l2_mpeg_video_vp9_profile {
602 V4L2_MPEG_VIDEO_VP9_PROFILE_0 = 0,
603 V4L2_MPEG_VIDEO_VP9_PROFILE_1 = 1,
604 V4L2_MPEG_VIDEO_VP9_PROFILE_2 = 2,
605 V4L2_MPEG_VIDEO_VP9_PROFILE_3 = 3,
606};
591 607
592/* CIDs for HEVC encoding. */ 608/* CIDs for HEVC encoding. */
593 609
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index c95a53e6743c..03970ce30741 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -170,8 +170,12 @@ struct v4l2_subdev_selection {
170#define VIDIOC_SUBDEV_G_SELECTION _IOWR('V', 61, struct v4l2_subdev_selection) 170#define VIDIOC_SUBDEV_G_SELECTION _IOWR('V', 61, struct v4l2_subdev_selection)
171#define VIDIOC_SUBDEV_S_SELECTION _IOWR('V', 62, struct v4l2_subdev_selection) 171#define VIDIOC_SUBDEV_S_SELECTION _IOWR('V', 62, struct v4l2_subdev_selection)
172/* The following ioctls are identical to the ioctls in videodev2.h */ 172/* The following ioctls are identical to the ioctls in videodev2.h */
173#define VIDIOC_SUBDEV_G_STD _IOR('V', 23, v4l2_std_id)
174#define VIDIOC_SUBDEV_S_STD _IOW('V', 24, v4l2_std_id)
175#define VIDIOC_SUBDEV_ENUMSTD _IOWR('V', 25, struct v4l2_standard)
173#define VIDIOC_SUBDEV_G_EDID _IOWR('V', 40, struct v4l2_edid) 176#define VIDIOC_SUBDEV_G_EDID _IOWR('V', 40, struct v4l2_edid)
174#define VIDIOC_SUBDEV_S_EDID _IOWR('V', 41, struct v4l2_edid) 177#define VIDIOC_SUBDEV_S_EDID _IOWR('V', 41, struct v4l2_edid)
178#define VIDIOC_SUBDEV_QUERYSTD _IOR('V', 63, v4l2_std_id)
175#define VIDIOC_SUBDEV_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings) 179#define VIDIOC_SUBDEV_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
176#define VIDIOC_SUBDEV_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings) 180#define VIDIOC_SUBDEV_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
177#define VIDIOC_SUBDEV_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings) 181#define VIDIOC_SUBDEV_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index c51f8e5cc608..b1e22c40c4b6 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -65,6 +65,7 @@ struct vhost_iotlb_msg {
65}; 65};
66 66
67#define VHOST_IOTLB_MSG 0x1 67#define VHOST_IOTLB_MSG 0x1
68#define VHOST_IOTLB_MSG_V2 0x2
68 69
69struct vhost_msg { 70struct vhost_msg {
70 int type; 71 int type;
@@ -74,6 +75,15 @@ struct vhost_msg {
74 }; 75 };
75}; 76};
76 77
78struct vhost_msg_v2 {
79 __u32 type;
80 __u32 reserved;
81 union {
82 struct vhost_iotlb_msg iotlb;
83 __u8 padding[64];
84 };
85};
86
77struct vhost_memory_region { 87struct vhost_memory_region {
78 __u64 guest_phys_addr; 88 __u64 guest_phys_addr;
79 __u64 memory_size; /* bytes */ 89 __u64 memory_size; /* bytes */
@@ -160,6 +170,14 @@ struct vhost_memory {
160#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \ 170#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
161 struct vhost_vring_state) 171 struct vhost_vring_state)
162 172
173/* Set or get vhost backend capability */
174
175/* Use message type V2 */
176#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
177
178#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
179#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
180
163/* VHOST_NET specific defines */ 181/* VHOST_NET specific defines */
164 182
165/* Attach virtio net ring to a raw socket, or tap device. 183/* Attach virtio net ring to a raw socket, or tap device.
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 600877be5c22..5d1a3685bea9 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -522,6 +522,7 @@ struct v4l2_pix_format {
522 522
523/* Grey bit-packed formats */ 523/* Grey bit-packed formats */
524#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */ 524#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
525#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */
525 526
526/* Palette formats */ 527/* Palette formats */
527#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */ 528#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
@@ -609,6 +610,11 @@ struct v4l2_pix_format {
609#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C') 610#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C')
610#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C') 611#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C')
611#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C') 612#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C')
613 /* 14bit raw bayer packed, 7 bytes for every 4 pixels */
614#define V4L2_PIX_FMT_SBGGR14P v4l2_fourcc('p', 'B', 'E', 'E')
615#define V4L2_PIX_FMT_SGBRG14P v4l2_fourcc('p', 'G', 'E', 'E')
616#define V4L2_PIX_FMT_SGRBG14P v4l2_fourcc('p', 'g', 'E', 'E')
617#define V4L2_PIX_FMT_SRGGB14P v4l2_fourcc('p', 'R', 'E', 'E')
612#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */ 618#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
613#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16 GBGB.. RGRG.. */ 619#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16 GBGB.. RGRG.. */
614#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16 GRGR.. BGBG.. */ 620#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16 GRGR.. BGBG.. */
@@ -636,6 +642,7 @@ struct v4l2_pix_format {
636#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */ 642#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
637#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */ 643#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
638#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */ 644#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
645#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
639 646
640/* Vendor-specific formats */ 647/* Vendor-specific formats */
641#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */ 648#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -2310,7 +2317,6 @@ struct v4l2_create_buffers {
2310 * 2317 *
2311 */ 2318 */
2312#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability) 2319#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability)
2313#define VIDIOC_RESERVED _IO('V', 1)
2314#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc) 2320#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc)
2315#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format) 2321#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format)
2316#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format) 2322#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format)
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index e3af2859188b..5f3b9fec7b5f 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -305,9 +305,12 @@ enum xfrm_attr_type_t {
305 XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ 305 XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
306 XFRMA_PAD, 306 XFRMA_PAD,
307 XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ 307 XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
308 XFRMA_OUTPUT_MARK, /* __u32 */ 308 XFRMA_SET_MARK, /* __u32 */
309 XFRMA_SET_MARK_MASK, /* __u32 */
310 XFRMA_IF_ID, /* __u32 */
309 __XFRMA_MAX 311 __XFRMA_MAX
310 312
313#define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */
311#define XFRMA_MAX (__XFRMA_MAX - 1) 314#define XFRMA_MAX (__XFRMA_MAX - 1)
312}; 315};
313 316
diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 6d1163456c03..fe4423e518c6 100644
--- a/include/uapi/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
@@ -5,6 +5,7 @@
5 * Interface to /dev/xen/gntdev. 5 * Interface to /dev/xen/gntdev.
6 * 6 *
7 * Copyright (c) 2007, D G Murray 7 * Copyright (c) 2007, D G Murray
8 * Copyright (c) 2018, Oleksandr Andrushchenko, EPAM Systems Inc.
8 * 9 *
9 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2 11 * modify it under the terms of the GNU General Public License version 2
@@ -200,4 +201,109 @@ struct ioctl_gntdev_grant_copy {
200/* Send an interrupt on the indicated event channel */ 201/* Send an interrupt on the indicated event channel */
201#define UNMAP_NOTIFY_SEND_EVENT 0x2 202#define UNMAP_NOTIFY_SEND_EVENT 0x2
202 203
204/*
205 * Flags to be used while requesting memory mapping's backing storage
206 * to be allocated with DMA API.
207 */
208
209/*
210 * The buffer is backed with memory allocated with dma_alloc_wc.
211 */
212#define GNTDEV_DMA_FLAG_WC (1 << 0)
213
214/*
215 * The buffer is backed with memory allocated with dma_alloc_coherent.
216 */
217#define GNTDEV_DMA_FLAG_COHERENT (1 << 1)
218
219/*
220 * Create a dma-buf [1] from grant references @refs of count @count provided
221 * by the foreign domain @domid with flags @flags.
222 *
223 * By default dma-buf is backed by system memory pages, but by providing
224 * one of the GNTDEV_DMA_FLAG_XXX flags it can also be created as
225 * a DMA write-combine or coherent buffer, e.g. allocated with dma_alloc_wc/
226 * dma_alloc_coherent.
227 *
228 * Returns 0 if dma-buf was successfully created and the corresponding
229 * dma-buf's file descriptor is returned in @fd.
230 *
231 * [1] Documentation/driver-api/dma-buf.rst
232 */
233
234#define IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS \
235 _IOC(_IOC_NONE, 'G', 9, \
236 sizeof(struct ioctl_gntdev_dmabuf_exp_from_refs))
237struct ioctl_gntdev_dmabuf_exp_from_refs {
238 /* IN parameters. */
239 /* Specific options for this dma-buf: see GNTDEV_DMA_FLAG_XXX. */
240 __u32 flags;
241 /* Number of grant references in @refs array. */
242 __u32 count;
243 /* OUT parameters. */
244 /* File descriptor of the dma-buf. */
245 __u32 fd;
246 /* The domain ID of the grant references to be mapped. */
247 __u32 domid;
248 /* Variable IN parameter. */
249 /* Array of grant references of size @count. */
250 __u32 refs[1];
251};
252
253/*
254 * This will block until the dma-buf with the file descriptor @fd is
255 * released. This is only valid for buffers created with
256 * IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS.
257 *
258 * If within @wait_to_ms milliseconds the buffer is not released
259 * then -ETIMEDOUT error is returned.
260 * If the buffer with the file descriptor @fd does not exist or has already
261 * been released, then -ENOENT is returned. For valid file descriptors
262 * this must not be treated as error.
263 */
264#define IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED \
265 _IOC(_IOC_NONE, 'G', 10, \
266 sizeof(struct ioctl_gntdev_dmabuf_exp_wait_released))
267struct ioctl_gntdev_dmabuf_exp_wait_released {
268 /* IN parameters */
269 __u32 fd;
270 __u32 wait_to_ms;
271};
272
273/*
274 * Import a dma-buf with file descriptor @fd and export granted references
275 * to the pages of that dma-buf into array @refs of size @count.
276 */
277#define IOCTL_GNTDEV_DMABUF_IMP_TO_REFS \
278 _IOC(_IOC_NONE, 'G', 11, \
279 sizeof(struct ioctl_gntdev_dmabuf_imp_to_refs))
280struct ioctl_gntdev_dmabuf_imp_to_refs {
281 /* IN parameters. */
282 /* File descriptor of the dma-buf. */
283 __u32 fd;
284 /* Number of grant references in @refs array. */
285 __u32 count;
286 /* The domain ID for which references to be granted. */
287 __u32 domid;
288 /* Reserved - must be zero. */
289 __u32 reserved;
290 /* OUT parameters. */
291 /* Array of grant references of size @count. */
292 __u32 refs[1];
293};
294
295/*
296 * This will close all references to the imported buffer with file descriptor
297 * @fd, so it can be released by the owner. This is only valid for buffers
298 * created with IOCTL_GNTDEV_DMABUF_IMP_TO_REFS.
299 */
300#define IOCTL_GNTDEV_DMABUF_IMP_RELEASE \
301 _IOC(_IOC_NONE, 'G', 12, \
302 sizeof(struct ioctl_gntdev_dmabuf_imp_release))
303struct ioctl_gntdev_dmabuf_imp_release {
304 /* IN parameters */
305 __u32 fd;
306 __u32 reserved;
307};
308
203#endif /* __LINUX_PUBLIC_GNTDEV_H__ */ 309#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h
index 19aa65a35546..49a53ef8da96 100644
--- a/include/video/mipi_display.h
+++ b/include/video/mipi_display.h
@@ -38,6 +38,9 @@ enum {
38 38
39 MIPI_DSI_DCS_READ = 0x06, 39 MIPI_DSI_DCS_READ = 0x06,
40 40
41 MIPI_DSI_DCS_COMPRESSION_MODE = 0x07,
42 MIPI_DSI_PPS_LONG_WRITE = 0x0A,
43
41 MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 0x37, 44 MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 0x37,
42 45
43 MIPI_DSI_END_OF_TRANSMISSION = 0x08, 46 MIPI_DSI_END_OF_TRANSMISSION = 0x08,
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 2e37741f6b8d..9bc5bc07d4d3 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -198,6 +198,27 @@ void gnttab_free_auto_xlat_frames(void);
198int gnttab_alloc_pages(int nr_pages, struct page **pages); 198int gnttab_alloc_pages(int nr_pages, struct page **pages);
199void gnttab_free_pages(int nr_pages, struct page **pages); 199void gnttab_free_pages(int nr_pages, struct page **pages);
200 200
201#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
202struct gnttab_dma_alloc_args {
203 /* Device for which DMA memory will be/was allocated. */
204 struct device *dev;
205 /* If set then DMA buffer is coherent and write-combine otherwise. */
206 bool coherent;
207
208 int nr_pages;
209 struct page **pages;
210 xen_pfn_t *frames;
211 void *vaddr;
212 dma_addr_t dev_bus_addr;
213};
214
215int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
216int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
217#endif
218
219int gnttab_pages_set_private(int nr_pages, struct page **pages);
220void gnttab_pages_clear_private(int nr_pages, struct page **pages);
221
201int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 222int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
202 struct gnttab_map_grant_ref *kmap_ops, 223 struct gnttab_map_grant_ref *kmap_ops,
203 struct page **pages, unsigned int count); 224 struct page **pages, unsigned int count);
diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h
new file mode 100644
index 000000000000..80b52b4945e9
--- /dev/null
+++ b/include/xen/mem-reservation.h
@@ -0,0 +1,59 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3/*
4 * Xen memory reservation utilities.
5 *
6 * Copyright (c) 2003, B Dragovic
7 * Copyright (c) 2003-2004, M Williamson, K Fraser
8 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
9 * Copyright (c) 2010 Daniel Kiper
10 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11 */
12
13#ifndef _XENMEM_RESERVATION_H
14#define _XENMEM_RESERVATION_H
15
16#include <linux/highmem.h>
17
18#include <xen/page.h>
19
20static inline void xenmem_reservation_scrub_page(struct page *page)
21{
22#ifdef CONFIG_XEN_SCRUB_PAGES
23 clear_highpage(page);
24#endif
25}
26
27#ifdef CONFIG_XEN_HAVE_PVMMU
28void __xenmem_reservation_va_mapping_update(unsigned long count,
29 struct page **pages,
30 xen_pfn_t *frames);
31
32void __xenmem_reservation_va_mapping_reset(unsigned long count,
33 struct page **pages);
34#endif
35
36static inline void xenmem_reservation_va_mapping_update(unsigned long count,
37 struct page **pages,
38 xen_pfn_t *frames)
39{
40#ifdef CONFIG_XEN_HAVE_PVMMU
41 if (!xen_feature(XENFEAT_auto_translated_physmap))
42 __xenmem_reservation_va_mapping_update(count, pages, frames);
43#endif
44}
45
46static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
47 struct page **pages)
48{
49#ifdef CONFIG_XEN_HAVE_PVMMU
50 if (!xen_feature(XENFEAT_auto_translated_physmap))
51 __xenmem_reservation_va_mapping_reset(count, pages);
52#endif
53}
54
55int xenmem_reservation_increase(int count, xen_pfn_t *frames);
56
57int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
58
59#endif
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 9d4340c907d1..1e1d9bd0bd37 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -25,12 +25,16 @@ extern bool xen_pvh;
25#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) 25#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
26#define xen_pvh_domain() (xen_pvh) 26#define xen_pvh_domain() (xen_pvh)
27 27
28#include <linux/types.h>
29
30extern uint32_t xen_start_flags;
31
28#ifdef CONFIG_XEN_DOM0 32#ifdef CONFIG_XEN_DOM0
29#include <xen/interface/xen.h> 33#include <xen/interface/xen.h>
30#include <asm/xen/hypervisor.h> 34#include <asm/xen/hypervisor.h>
31 35
32#define xen_initial_domain() (xen_domain() && \ 36#define xen_initial_domain() (xen_domain() && \
33 xen_start_info && xen_start_info->flags & SIF_INITDOMAIN) 37 (xen_start_flags & SIF_INITDOMAIN))
34#else /* !CONFIG_XEN_DOM0 */ 38#else /* !CONFIG_XEN_DOM0 */
35#define xen_initial_domain() (0) 39#define xen_initial_domain() (0)
36#endif /* CONFIG_XEN_DOM0 */ 40#endif /* CONFIG_XEN_DOM0 */