aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/audit_dir_write.h2
-rw-r--r--include/asm-generic/bitops/find.h20
-rw-r--r--include/asm-generic/clkdev.h30
-rw-r--r--include/asm-generic/dma-mapping.h10
-rw-r--r--include/asm-generic/error-injection.h35
-rw-r--r--include/asm-generic/pci_iomap.h6
-rw-r--r--include/asm-generic/pgtable.h25
-rw-r--r--include/asm-generic/qrwlock_types.h1
-rw-r--r--include/asm-generic/sections.h8
-rw-r--r--include/asm-generic/vmlinux.lds.h10
-rw-r--r--include/crypto/aead.h10
-rw-r--r--include/crypto/chacha20.h3
-rw-r--r--include/crypto/hash.h46
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/crypto/internal/hash.h2
-rw-r--r--include/crypto/internal/scompress.h11
-rw-r--r--include/crypto/null.h10
-rw-r--r--include/crypto/poly1305.h2
-rw-r--r--include/crypto/salsa20.h27
-rw-r--r--include/crypto/sha3.h6
-rw-r--r--include/crypto/skcipher.h11
-rw-r--r--include/drm/drmP.h197
-rw-r--r--include/drm/drm_atomic.h32
-rw-r--r--include/drm/drm_atomic_helper.h7
-rw-r--r--include/drm/drm_connector.h54
-rw-r--r--include/drm/drm_device.h9
-rw-r--r--include/drm/drm_dp_helper.h1
-rw-r--r--include/drm/drm_drv.h22
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_encoder.h6
-rw-r--r--include/drm/drm_fb_cma_helper.h13
-rw-r--r--include/drm/drm_fb_helper.h57
-rw-r--r--include/drm/drm_file.h2
-rw-r--r--include/drm/drm_framebuffer.h8
-rw-r--r--include/drm/drm_gem_cma_helper.h16
-rw-r--r--include/drm/drm_mm.h2
-rw-r--r--include/drm/drm_mode_config.h27
-rw-r--r--include/drm/drm_modeset_helper.h3
-rw-r--r--include/drm/drm_modeset_helper_vtables.h3
-rw-r--r--include/drm/drm_plane.h14
-rw-r--r--include/drm/drm_plane_helper.h5
-rw-r--r--include/drm/drm_print.h219
-rw-r--r--include/drm/drm_syncobj.h36
-rw-r--r--include/drm/drm_utils.h15
-rw-r--r--include/drm/drm_vma_manager.h2
-rw-r--r--include/drm/gpu_scheduler.h173
-rw-r--r--include/drm/gpu_scheduler_trace.h82
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/drm/i915_pciids.h32
-rw-r--r--include/drm/intel-gtt.h3
-rw-r--r--include/drm/spsc_queue.h122
-rw-r--r--include/drm/tinydrm/mipi-dbi.h4
-rw-r--r--include/drm/tinydrm/tinydrm.h8
-rw-r--r--include/drm/ttm/ttm_bo_api.h169
-rw-r--r--include/drm/ttm/ttm_bo_driver.h157
-rw-r--r--include/drm/ttm/ttm_memory.h75
-rw-r--r--include/drm/ttm/ttm_page_alloc.h11
-rw-r--r--include/dt-bindings/bus/ti-sysc.h22
-rw-r--r--include/dt-bindings/clock/am3.h108
-rw-r--r--include/dt-bindings/clock/am4.h113
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h52
-rw-r--r--include/dt-bindings/clock/axg-clkc.h71
-rw-r--r--include/dt-bindings/clock/dm814.h45
-rw-r--r--include/dt-bindings/clock/dm816.h53
-rw-r--r--include/dt-bindings/clock/dra7.h172
-rw-r--r--include/dt-bindings/clock/hi3660-clock.h7
-rw-r--r--include/dt-bindings/clock/jz4770-cgu.h58
-rw-r--r--include/dt-bindings/clock/omap5.h118
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq8074.h222
-rw-r--r--include/dt-bindings/clock/sprd,sc9860-clk.h404
-rw-r--r--include/dt-bindings/gpio/aspeed-gpio.h49
-rw-r--r--include/dt-bindings/gpio/gpio.h6
-rw-r--r--include/dt-bindings/gpio/meson-axg-gpio.h116
-rw-r--r--include/dt-bindings/memory/tegra186-mc.h111
-rw-r--r--include/dt-bindings/pinctrl/am43xx.h3
-rw-r--r--include/dt-bindings/pinctrl/stm32-pinfunc.h6
-rw-r--r--include/dt-bindings/power/mt2712-power.h26
-rw-r--r--include/dt-bindings/power/owl-s700-powergate.h19
-rw-r--r--include/dt-bindings/reset/amlogic,meson-axg-reset.h124
-rw-r--r--include/kvm/arm_psci.h51
-rw-r--r--include/linux/acpi.h14
-rw-r--r--include/linux/arch_topology.h2
-rw-r--r--include/linux/arm-smccc.h165
-rw-r--r--include/linux/arm_sdei.h79
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/bio.h24
-rw-r--r--include/linux/bitfield.h46
-rw-r--r--include/linux/bitmap.h63
-rw-r--r--include/linux/blk-cgroup.h8
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h46
-rw-r--r--include/linux/blkdev.h172
-rw-r--r--include/linux/bpf.h94
-rw-r--r--include/linux/bpf_types.h2
-rw-r--r--include/linux/bpf_verifier.h63
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/buffer_head.h6
-rw-r--r--include/linux/build_bug.h2
-rw-r--r--include/linux/bvec.h9
-rw-r--r--include/linux/can/dev.h7
-rw-r--r--include/linux/cgroup-defs.h2
-rw-r--r--include/linux/clk-provider.h43
-rw-r--r--include/linux/clk.h62
-rw-r--r--include/linux/clkdev.h7
-rw-r--r--include/linux/compat.h100
-rw-r--r--include/linux/compiler-clang.h8
-rw-r--r--include/linux/compiler.h22
-rw-r--r--include/linux/cper.h48
-rw-r--r--include/linux/cpufreq.h125
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/cpuidle.h40
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/crash_dump.h12
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/crypto.h10
-rw-r--r--include/linux/dax.h2
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/device-mapper.h56
-rw-r--r--include/linux/device.h10
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-direct.h47
-rw-r--r--include/linux/dma-fence-array.h3
-rw-r--r--include/linux/dma-fence.h2
-rw-r--r--include/linux/dma-mapping.h23
-rw-r--r--include/linux/dsa/lan9303.h3
-rw-r--r--include/linux/efi.h46
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/error-injection.h27
-rw-r--r--include/linux/errseq.h2
-rw-r--r--include/linux/eventfd.h14
-rw-r--r--include/linux/f2fs_fs.h14
-rw-r--r--include/linux/fb.h10
-rw-r--r--include/linux/fdtable.h5
-rw-r--r--include/linux/filter.h40
-rw-r--r--include/linux/fpga/fpga-bridge.h14
-rw-r--r--include/linux/fpga/fpga-mgr.h39
-rw-r--r--include/linux/fpga/fpga-region.h40
-rw-r--r--include/linux/fs.h34
-rw-r--r--include/linux/fscrypt.h174
-rw-r--r--include/linux/fscrypt_notsupp.h59
-rw-r--r--include/linux/fscrypt_supp.h68
-rw-r--r--include/linux/fwnode.h4
-rw-r--r--include/linux/genetlink.h3
-rw-r--r--include/linux/genhd.h5
-rw-r--r--include/linux/genl_magic_func.h12
-rw-r--r--include/linux/gpio.h10
-rw-r--r--include/linux/gpio/consumer.h25
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/gpio/machine.h4
-rw-r--r--include/linux/hid.h22
-rw-r--r--include/linux/hil_mlc.h6
-rw-r--r--include/linux/hp_sdc.h2
-rw-r--r--include/linux/hrtimer.h113
-rw-r--r--include/linux/hugetlb.h21
-rw-r--r--include/linux/hyperv.h22
-rw-r--r--include/linux/i2c.h112
-rw-r--r--include/linux/i7300_idle.h84
-rw-r--r--include/linux/idr.h174
-rw-r--r--include/linux/if_link.h2
-rw-r--r--include/linux/if_macvlan.h2
-rw-r--r--include/linux/if_tap.h6
-rw-r--r--include/linux/if_tun.h21
-rw-r--r--include/linux/iio/iio.h4
-rw-r--r--include/linux/iio/machine.h7
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init.h9
-rw-r--r--include/linux/input/gpio_tilt.h74
-rw-r--r--include/linux/integrity.h1
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/ioport.h2
-rw-r--r--include/linux/irq_work.h11
-rw-r--r--include/linux/iversion.h337
-rw-r--r--include/linux/jbd2.h431
-rw-r--r--include/linux/jump_label.h4
-rw-r--r--include/linux/kallsyms.h74
-rw-r--r--include/linux/kasan.h15
-rw-r--r--include/linux/kfifo.h3
-rw-r--r--include/linux/kobject.h3
-rw-r--r--include/linux/kobject_ns.h3
-rw-r--r--include/linux/led-class-flash.h4
-rw-r--r--include/linux/libfdt.h1
-rw-r--r--include/linux/libnvdimm.h11
-rw-r--r--include/linux/lightnvm.h125
-rw-r--r--include/linux/livepatch.h4
-rw-r--r--include/linux/lockd/lockd.h9
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/lockref.h2
-rw-r--r--include/linux/mdio.h8
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/memcontrol.h165
-rw-r--r--include/linux/memory_hotplug.h29
-rw-r--r--include/linux/memremap.h77
-rw-r--r--include/linux/mfd/axp20x.h5
-rw-r--r--include/linux/mfd/cros_ec.h4
-rw-r--r--include/linux/mfd/cros_ec_commands.h25
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/mfd/rave-sp.h60
-rw-r--r--include/linux/mfd/stm32-lptimer.h6
-rw-r--r--include/linux/mfd/stm32-timers.h4
-rw-r--r--include/linux/mfd/tmio.h20
-rw-r--r--include/linux/mlx5/device.h16
-rw-r--r--include/linux/mlx5/driver.h45
-rw-r--r--include/linux/mlx5/fs.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h73
-rw-r--r--include/linux/mlx5/qp.h12
-rw-r--r--include/linux/mlx5/transobj.h23
-rw-r--r--include/linux/mlx5/vport.h4
-rw-r--r--include/linux/mm.h52
-rw-r--r--include/linux/mm_types.h154
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/mmu_notifier.h30
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--include/linux/mod_devicetable.h19
-rw-r--r--include/linux/module.h27
-rw-r--r--include/linux/mtd/map.h130
-rw-r--r--include/linux/mtd/mtd.h28
-rw-r--r--include/linux/mtd/rawnand.h443
-rw-r--r--include/linux/mtd/spi-nor.h12
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/mux/consumer.h5
-rw-r--r--include/linux/mux/driver.h5
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/net_dim.h380
-rw-r--r--include/linux/netdev_features.h3
-rw-r--r--include/linux/netdevice.h60
-rw-r--r--include/linux/netfilter.h116
-rw-r--r--include/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/linux/netfilter/ipset/ip_set_counter.h25
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/netfilter_defs.h12
-rw-r--r--include/linux/netfilter_ipv4.h46
-rw-r--r--include/linux/netfilter_ipv6.h19
-rw-r--r--include/linux/nfs4.h12
-rw-r--r--include/linux/nospec.h72
-rw-r--r--include/linux/ntb.h51
-rw-r--r--include/linux/nubus.h189
-rw-r--r--include/linux/nvme.h22
-rw-r--r--include/linux/of.h13
-rw-r--r--include/linux/of_dma.h5
-rw-r--r--include/linux/of_fdt.h18
-rw-r--r--include/linux/of_gpio.h8
-rw-r--r--include/linux/of_graph.h5
-rw-r--r--include/linux/of_iommu.h5
-rw-r--r--include/linux/of_pci.h8
-rw-r--r--include/linux/of_pdt.h6
-rw-r--r--include/linux/of_platform.h7
-rw-r--r--include/linux/omap-gpmc.h28
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--include/linux/pagevec.h6
-rw-r--r--include/linux/pci-aspm.h35
-rw-r--r--include/linux/pci-dma-compat.h27
-rw-r--r--include/linux/pci-ecam.h13
-rw-r--r--include/linux/pci-ep-cfs.h5
-rw-r--r--include/linux/pci-epc.h48
-rw-r--r--include/linux/pci-epf.h5
-rw-r--r--include/linux/pci.h480
-rw-r--r--include/linux/pci_hotplug.h16
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu-refcount.h6
-rw-r--r--include/linux/percpu_counter.h6
-rw-r--r--include/linux/pfn_t.h13
-rw-r--r--include/linux/phy.h145
-rw-r--r--include/linux/phy_fixed.h9
-rw-r--r--include/linux/phylink.h201
-rw-r--r--include/linux/pinctrl/devinfo.h2
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/pinctrl/pinctrl.h1
-rw-r--r--include/linux/pipe_fs_i.h5
-rw-r--r--include/linux/platform_data/at24.h2
-rw-r--r--include/linux/platform_data/i2c-davinci.h5
-rw-r--r--include/linux/platform_data/i2c-pxa.h (renamed from include/linux/i2c/pxa-i2c.h)11
-rw-r--r--include/linux/platform_data/mlxcpld-hotplug.h99
-rw-r--r--include/linux/platform_data/mlxreg.h144
-rw-r--r--include/linux/platform_data/mms114.h24
-rw-r--r--include/linux/platform_data/mtd-onenand-omap2.h34
-rw-r--r--include/linux/platform_data/si5351.h2
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h6
-rw-r--r--include/linux/platform_data/ti-sysc.h86
-rw-r--r--include/linux/poll.h10
-rw-r--r--include/linux/posix-clock.h2
-rw-r--r--include/linux/posix-timers.h25
-rw-r--r--include/linux/posix_acl.h7
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/proc_ns.h3
-rw-r--r--include/linux/property.h23
-rw-r--r--include/linux/psci.h17
-rw-r--r--include/linux/ptr_ring.h79
-rw-r--r--include/linux/qcom_scm.h3
-rw-r--r--include/linux/qed/common_hsi.h1264
-rw-r--r--include/linux/qed/eth_common.h396
-rw-r--r--include/linux/qed/fcoe_common.h940
-rw-r--r--include/linux/qed/iscsi_common.h1585
-rw-r--r--include/linux/qed/iwarp_common.h17
-rw-r--r--include/linux/qed/qed_eth_if.h38
-rw-r--r--include/linux/qed/qed_if.h36
-rw-r--r--include/linux/qed/qed_iscsi_if.h2
-rw-r--r--include/linux/qed/qed_ll2_if.h2
-rw-r--r--include/linux/qed/rdma_common.h25
-rw-r--r--include/linux/qed/roce_common.h15
-rw-r--r--include/linux/qed/storage_common.h91
-rw-r--r--include/linux/qed/tcp_common.h165
-rw-r--r--include/linux/radix-tree.h17
-rw-r--r--include/linux/rcupdate.h25
-rw-r--r--include/linux/rcutiny.h1
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/refcount.h2
-rw-r--r--include/linux/regmap.h68
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/regulator/machine.h37
-rw-r--r--include/linux/remoteproc.h21
-rw-r--r--include/linux/reservation.h23
-rw-r--r--include/linux/reset.h50
-rw-r--r--include/linux/rhashtable.h38
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rpmsg.h4
-rw-r--r--include/linux/rtc.h1
-rw-r--r--include/linux/rtnetlink.h18
-rw-r--r--include/linux/rtsx_common.h (renamed from include/linux/mfd/rtsx_common.h)0
-rw-r--r--include/linux/rtsx_pci.h (renamed from include/linux/mfd/rtsx_pci.h)236
-rw-r--r--include/linux/rtsx_usb.h (renamed from include/linux/mfd/rtsx_usb.h)0
-rw-r--r--include/linux/scatterlist.h11
-rw-r--r--include/linux/sched.h18
-rw-r--r--include/linux/sched/cpufreq.h2
-rw-r--r--include/linux/sched/mm.h59
-rw-r--r--include/linux/sched/signal.h28
-rw-r--r--include/linux/sched/task.h14
-rw-r--r--include/linux/sched/task_stack.h2
-rw-r--r--include/linux/sched/topology.h12
-rw-r--r--include/linux/scif.h4
-rw-r--r--include/linux/sctp.h37
-rw-r--r--include/linux/seccomp.h8
-rw-r--r--include/linux/seq_file.h14
-rw-r--r--include/linux/seqlock.h3
-rw-r--r--include/linux/serdev.h17
-rw-r--r--include/linux/serial_core.h8
-rw-r--r--include/linux/sfp.h94
-rw-r--r--include/linux/sh_eth.h3
-rw-r--r--include/linux/shmem_fs.h6
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/siox.h77
-rw-r--r--include/linux/skb_array.h7
-rw-r--r--include/linux/skbuff.h23
-rw-r--r--include/linux/slab.h41
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slimbus.h164
-rw-r--r--include/linux/slub_def.h3
-rw-r--r--include/linux/soc/brcmstb/brcmstb.h6
-rw-r--r--include/linux/soc/mediatek/infracfg.h7
-rw-r--r--include/linux/soc/qcom/qmi.h271
-rw-r--r--include/linux/soundwire/sdw.h479
-rw-r--r--include/linux/soundwire/sdw_intel.h24
-rw-r--r--include/linux/soundwire/sdw_registers.h194
-rw-r--r--include/linux/soundwire/sdw_type.h19
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/srcu.h4
-rw-r--r--include/linux/srcutree.h8
-rw-r--r--include/linux/stddef.h10
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/svc_rdma.h2
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/swiotlb.h12
-rw-r--r--include/linux/switchtec.h26
-rw-r--r--include/linux/sync_core.h21
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/sysfs.h14
-rw-r--r--include/linux/tcp.h13
-rw-r--r--include/linux/tee_drv.h196
-rw-r--r--include/linux/ti-emif-sram.h69
-rw-r--r--include/linux/torture.h8
-rw-r--r--include/linux/tpm.h39
-rw-r--r--include/linux/tpm_eventlog.h124
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_ldisc.h2
-rw-r--r--include/linux/uaccess.h8
-rw-r--r--include/linux/usb.h7
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/of.h21
-rw-r--r--include/linux/usb/pd.h2
-rw-r--r--include/linux/usb/pd_vdo.h2
-rw-r--r--include/linux/usb/renesas_usbhs.h9
-rw-r--r--include/linux/usb/tcpm.h16
-rw-r--r--include/linux/uuid.h1
-rw-r--r--include/linux/vbox_utils.h79
-rw-r--r--include/linux/vfio.h3
-rw-r--r--include/linux/visorbus.h344
-rw-r--r--include/linux/vmstat.h17
-rw-r--r--include/linux/w1-gpio.h9
-rw-r--r--include/linux/wait.h10
-rw-r--r--include/linux/zpool.h2
-rw-r--r--include/media/cec.h18
-rw-r--r--include/media/demux.h589
-rw-r--r--include/media/dmxdev.h212
-rw-r--r--include/media/drv-intf/cx2341x.h144
-rw-r--r--include/media/drv-intf/exynos-fimc.h3
-rw-r--r--include/media/drv-intf/msp3400.h62
-rw-r--r--include/media/drv-intf/saa7146.h2
-rw-r--r--include/media/dvb-usb-ids.h424
-rw-r--r--include/media/dvb_ca_en50221.h142
-rw-r--r--include/media/dvb_demux.h350
-rw-r--r--include/media/dvb_frontend.h795
-rw-r--r--include/media/dvb_math.h66
-rw-r--r--include/media/dvb_net.h93
-rw-r--r--include/media/dvb_ringbuffer.h280
-rw-r--r--include/media/dvb_vb2.h266
-rw-r--r--include/media/dvbdev.h407
-rw-r--r--include/media/i2c-addr.h43
-rw-r--r--include/media/i2c/as3645a.h66
-rw-r--r--include/media/i2c/bt819.h4
-rw-r--r--include/media/i2c/ir-kbd-i2c.h6
-rw-r--r--include/media/i2c/m52790.h52
-rw-r--r--include/media/i2c/saa7115.h12
-rw-r--r--include/media/i2c/tvaudio.h17
-rw-r--r--include/media/i2c/upd64031a.h6
-rw-r--r--include/media/lirc.h1
-rw-r--r--include/media/lirc_dev.h192
-rw-r--r--include/media/media-devnode.h2
-rw-r--r--include/media/media-entity.h11
-rw-r--r--include/media/rc-core.h69
-rw-r--r--include/media/rc-map.h54
-rw-r--r--include/media/soc_camera.h2
-rw-r--r--include/media/tpg/v4l2-tpg.h (renamed from include/media/v4l2-tpg.h)45
-rw-r--r--include/media/tuner-types.h15
-rw-r--r--include/media/v4l2-async.h39
-rw-r--r--include/media/v4l2-common.h145
-rw-r--r--include/media/v4l2-ctrls.h13
-rw-r--r--include/media/v4l2-dev.h142
-rw-r--r--include/media/v4l2-device.h246
-rw-r--r--include/media/v4l2-dv-timings.h16
-rw-r--r--include/media/v4l2-event.h36
-rw-r--r--include/media/v4l2-flash-led-class.h12
-rw-r--r--include/media/v4l2-fwnode.h12
-rw-r--r--include/media/v4l2-mediabus.h80
-rw-r--r--include/media/v4l2-mem2mem.h4
-rw-r--r--include/media/v4l2-subdev.h150
-rw-r--r--include/media/v4l2-tpg-colors.h68
-rw-r--r--include/media/videobuf-core.h2
-rw-r--r--include/media/videobuf-dvb.h10
-rw-r--r--include/media/videobuf2-core.h521
-rw-r--r--include/media/videobuf2-dvb.h11
-rw-r--r--include/media/videobuf2-memops.h8
-rw-r--r--include/media/videobuf2-v4l2.h117
-rw-r--r--include/misc/cxl.h2
-rw-r--r--include/misc/ocxl-config.h45
-rw-r--r--include/misc/ocxl.h214
-rw-r--r--include/net/act_api.h15
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/caif/cfpkt.h27
-rw-r--r--include/net/cfg80211.h17
-rw-r--r--include/net/devlink.h115
-rw-r--r--include/net/dn_route.h1
-rw-r--r--include/net/dsa.h61
-rw-r--r--include/net/dst.h39
-rw-r--r--include/net/erspan.h240
-rw-r--r--include/net/gen_stats.h3
-rw-r--r--include/net/inet_connection_sock.h4
-rw-r--r--include/net/inet_hashtables.h29
-rw-r--r--include/net/inet_sock.h25
-rw-r--r--include/net/inet_timewait_sock.h4
-rw-r--r--include/net/ip.h9
-rw-r--r--include/net/ip6_fib.h20
-rw-r--r--include/net/ip6_route.h11
-rw-r--r--include/net/ip6_tunnel.h4
-rw-r--r--include/net/ip_tunnels.h5
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/ipv6.h19
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/mac80211.h10
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h12
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h12
-rw-r--r--include/net/netfilter/nf_conntrack_count.h17
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h19
-rw-r--r--include/net/netfilter/nf_flow_table.h122
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h129
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h27
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h29
-rw-r--r--include/net/netns/can.h4
-rw-r--r--include/net/netns/core.h5
-rw-r--r--include/net/netns/netfilter.h12
-rw-r--r--include/net/netns/nftables.h8
-rw-r--r--include/net/netns/sctp.h5
-rw-r--r--include/net/pkt_cls.h113
-rw-r--r--include/net/pkt_sched.h17
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sch_generic.h150
-rw-r--r--include/net/sctp/constants.h9
-rw-r--r--include/net/sctp/sctp.h8
-rw-r--r--include/net/sctp/sm.h18
-rw-r--r--include/net/sctp/stream_interleave.h61
-rw-r--r--include/net/sctp/structs.h77
-rw-r--r--include/net/sctp/ulpevent.h23
-rw-r--r--include/net/sctp/ulpqueue.h10
-rw-r--r--include/net/sock.h65
-rw-r--r--include/net/tc_act/tc_csum.h16
-rw-r--r--include/net/tc_act/tc_mirred.h6
-rw-r--r--include/net/tcp.h54
-rw-r--r--include/net/tls.h2
-rw-r--r--include/net/udp.h2
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/net/wext.h4
-rw-r--r--include/net/xdp.h48
-rw-r--r--include/net/xfrm.h79
-rw-r--r--include/rdma/ib_addr.h38
-rw-r--r--include/rdma/ib_hdrs.h19
-rw-r--r--include/rdma/ib_sa.h10
-rw-r--r--include/rdma/ib_verbs.h84
-rw-r--r--include/rdma/opa_addr.h16
-rw-r--r--include/rdma/rdma_cm.h19
-rw-r--r--include/rdma/rdma_cm_ib.h8
-rw-r--r--include/rdma/rdma_vt.h31
-rw-r--r--include/rdma/restrack.h157
-rw-r--r--include/scsi/libsas.h30
-rw-r--r--include/scsi/scsi_cmnd.h4
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsi_proto.h1
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/scsi/scsi_transport_sas.h1
-rw-r--r--include/scsi/srp.h17
-rw-r--r--include/soc/tegra/mc.h9
-rw-r--r--include/soc/tegra/pmc.h12
-rw-r--r--include/sound/hwdep.h2
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/soc.h2
-rw-r--r--include/trace/events/bridge.h4
-rw-r--r--include/trace/events/btrfs.h1
-rw-r--r--include/trace/events/f2fs.h3
-rw-r--r--include/trace/events/net_probe_common.h44
-rw-r--r--include/trace/events/rcu.h75
-rw-r--r--include/trace/events/rdma.h129
-rw-r--r--include/trace/events/rpcrdma.h890
-rw-r--r--include/trace/events/sctp.h99
-rw-r--r--include/trace/events/siox.h66
-rw-r--r--include/trace/events/sock.h117
-rw-r--r--include/trace/events/sunrpc.h12
-rw-r--r--include/trace/events/tcp.h76
-rw-r--r--include/trace/events/timer.h37
-rw-r--r--include/trace/events/vmscan.h23
-rw-r--r--include/uapi/asm-generic/poll.h44
-rw-r--r--include/uapi/asm-generic/siginfo.h109
-rw-r--r--include/uapi/drm/amdgpu_drm.h12
-rw-r--r--include/uapi/drm/drm_fourcc.h38
-rw-r--r--include/uapi/drm/exynos_drm.h192
-rw-r--r--include/uapi/drm/i915_drm.h77
-rw-r--r--include/uapi/linux/arm_sdei.h73
-rw-r--r--include/uapi/linux/batadv_packet.h644
-rw-r--r--include/uapi/linux/batman_adv.h27
-rw-r--r--include/uapi/linux/bpf.h116
-rw-r--r--include/uapi/linux/bpf_common.h7
-rw-r--r--include/uapi/linux/btrfs.h11
-rw-r--r--include/uapi/linux/btrfs_tree.h2
-rw-r--r--include/uapi/linux/can/netlink.h1
-rw-r--r--include/uapi/linux/devlink.h25
-rw-r--r--include/uapi/linux/dvb/dmx.h63
-rw-r--r--include/uapi/linux/dvb/frontend.h40
-rw-r--r--include/uapi/linux/dvb/version.h2
-rw-r--r--include/uapi/linux/dvb/video.h20
-rw-r--r--include/uapi/linux/elf.h1
-rw-r--r--include/uapi/linux/erspan.h52
-rw-r--r--include/uapi/linux/ethtool.h1
-rw-r--r--include/uapi/linux/fs.h6
-rw-r--r--include/uapi/linux/gfs2_ondisk.h62
-rw-r--r--include/uapi/linux/i2c.h3
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h5
-rw-r--r--include/uapi/linux/if_macsec.h9
-rw-r--r--include/uapi/linux/if_tun.h2
-rw-r--r--include/uapi/linux/if_tunnel.h3
-rw-r--r--include/uapi/linux/inet_diag.h2
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/input.h11
-rw-r--r--include/uapi/linux/kfd_ioctl.h15
-rw-r--r--include/uapi/linux/l2tp.h6
-rw-r--r--include/uapi/linux/lightnvm.h9
-rw-r--r--include/uapi/linux/lirc.h82
-rw-r--r--include/uapi/linux/lp.h12
-rw-r--r--include/uapi/linux/membarrier.h74
-rw-r--r--include/uapi/linux/ndctl.h56
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h6
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h76
-rw-r--r--include/uapi/linux/netfilter/xt_connlimit.h2
-rw-r--r--include/uapi/linux/netfilter_arp.h3
-rw-r--r--include/uapi/linux/netfilter_decnet.h4
-rw-r--r--include/uapi/linux/netfilter_ipv4.h1
-rw-r--r--include/uapi/linux/netfilter_ipv6.h1
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_srh.h57
-rw-r--r--include/uapi/linux/nfs.h1
-rw-r--r--include/uapi/linux/nl80211.h4
-rw-r--r--include/uapi/linux/nubus.h23
-rw-r--r--include/uapi/linux/openvswitch.h1
-rw-r--r--include/uapi/linux/pci_regs.h30
-rw-r--r--include/uapi/linux/perf_event.h32
-rw-r--r--include/uapi/linux/psci.h3
-rw-r--r--include/uapi/linux/ptrace.h6
-rw-r--r--include/uapi/linux/rtnetlink.h12
-rw-r--r--include/uapi/linux/sched.h5
-rw-r--r--include/uapi/linux/sctp.h3
-rw-r--r--include/uapi/linux/switchtec_ioctl.h3
-rw-r--r--include/uapi/linux/tee.h37
-rw-r--r--include/uapi/linux/tipc.h7
-rw-r--r--include/uapi/linux/types.h6
-rw-r--r--include/uapi/linux/usb/ch9.h4
-rw-r--r--include/uapi/linux/usbdevice_fs.h2
-rw-r--r--include/uapi/linux/uuid.h1
-rw-r--r--include/uapi/linux/uvcvideo.h26
-rw-r--r--include/uapi/linux/v4l2-controls.h96
-rw-r--r--include/uapi/linux/vbox_err.h151
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h226
-rw-r--r--include/uapi/linux/vboxguest.h330
-rw-r--r--include/uapi/linux/vfio.h72
-rw-r--r--include/uapi/linux/videodev2.h63
-rw-r--r--include/uapi/linux/virtio_balloon.h3
-rw-r--r--include/uapi/linux/virtio_net.h13
-rw-r--r--include/uapi/misc/cxl.h10
-rw-r--r--include/uapi/misc/ocxl.h49
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h9
-rw-r--r--include/uapi/rdma/ib_user_verbs.h11
-rw-r--r--include/uapi/rdma/mlx4-abi.h7
-rw-r--r--include/uapi/rdma/mlx5-abi.h53
-rw-r--r--include/uapi/rdma/rdma_netlink.h61
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h12
-rw-r--r--include/video/exynos5433_decon.h209
-rw-r--r--include/video/exynos7_decon.h349
-rw-r--r--include/video/imx-ipu-v3.h2
-rw-r--r--include/video/udlfb.h3
637 files changed, 25834 insertions, 8232 deletions
diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h
index da09fb986459..dd5a9dd7a102 100644
--- a/include/asm-generic/audit_dir_write.h
+++ b/include/asm-generic/audit_dir_write.h
@@ -27,7 +27,9 @@ __NR_mknod,
27__NR_mkdirat, 27__NR_mkdirat,
28__NR_mknodat, 28__NR_mknodat,
29__NR_unlinkat, 29__NR_unlinkat,
30#ifdef __NR_renameat
30__NR_renameat, 31__NR_renameat,
32#endif
31__NR_linkat, 33__NR_linkat,
32__NR_symlinkat, 34__NR_symlinkat,
33#endif 35#endif
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 1ba611e16fa0..8a1ee10014de 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -16,6 +16,22 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
16 size, unsigned long offset); 16 size, unsigned long offset);
17#endif 17#endif
18 18
19#ifndef find_next_and_bit
20/**
21 * find_next_and_bit - find the next set bit in both memory regions
22 * @addr1: The first address to base the search on
23 * @addr2: The second address to base the search on
24 * @offset: The bitnumber to start searching at
25 * @size: The bitmap size in bits
26 *
27 * Returns the bit number for the next set bit
28 * If no bits are set, returns @size.
29 */
30extern unsigned long find_next_and_bit(const unsigned long *addr1,
31 const unsigned long *addr2, unsigned long size,
32 unsigned long offset);
33#endif
34
19#ifndef find_next_zero_bit 35#ifndef find_next_zero_bit
20/** 36/**
21 * find_next_zero_bit - find the next cleared bit in a memory region 37 * find_next_zero_bit - find the next cleared bit in a memory region
@@ -55,8 +71,12 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr,
55 unsigned long size); 71 unsigned long size);
56#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ 72#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
57 73
74#ifndef find_first_bit
58#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) 75#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
76#endif
77#ifndef find_first_zero_bit
59#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) 78#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
79#endif
60 80
61#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 81#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
62 82
diff --git a/include/asm-generic/clkdev.h b/include/asm-generic/clkdev.h
deleted file mode 100644
index 4ff334749ed5..000000000000
--- a/include/asm-generic/clkdev.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * include/asm-generic/clkdev.h
3 *
4 * Based on the ARM clkdev.h:
5 * Copyright (C) 2008 Russell King.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Helper for the clk API to assist looking up a struct clk.
12 */
13#ifndef __ASM_CLKDEV_H
14#define __ASM_CLKDEV_H
15
16#include <linux/slab.h>
17
18#ifndef CONFIG_COMMON_CLK
19struct clk;
20
21static inline int __clk_get(struct clk *clk) { return 1; }
22static inline void __clk_put(struct clk *clk) { }
23#endif
24
25static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
26{
27 return kzalloc(size, GFP_KERNEL);
28}
29
30#endif
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
new file mode 100644
index 000000000000..880a292d792f
--- /dev/null
+++ b/include/asm-generic/dma-mapping.h
@@ -0,0 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_GENERIC_DMA_MAPPING_H
3#define _ASM_GENERIC_DMA_MAPPING_H
4
5static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
6{
7 return &dma_direct_ops;
8}
9
10#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
new file mode 100644
index 000000000000..296c65442f00
--- /dev/null
+++ b/include/asm-generic/error-injection.h
@@ -0,0 +1,35 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_GENERIC_ERROR_INJECTION_H
3#define _ASM_GENERIC_ERROR_INJECTION_H
4
5#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
6enum {
7 EI_ETYPE_NONE, /* Dummy value for undefined case */
8 EI_ETYPE_NULL, /* Return NULL if failure */
9 EI_ETYPE_ERRNO, /* Return -ERRNO if failure */
10 EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */
11};
12
13struct error_injection_entry {
14 unsigned long addr;
15 int etype;
16};
17
18#ifdef CONFIG_FUNCTION_ERROR_INJECTION
19/*
20 * Whitelist ganerating macro. Specify functions which can be
21 * error-injectable using this macro.
22 */
23#define ALLOW_ERROR_INJECTION(fname, _etype) \
24static struct error_injection_entry __used \
25 __attribute__((__section__("_error_injection_whitelist"))) \
26 _eil_addr_##fname = { \
27 .addr = (unsigned long)fname, \
28 .etype = EI_ETYPE_##_etype, \
29 };
30#else
31#define ALLOW_ERROR_INJECTION(fname, _etype)
32#endif
33#endif
34
35#endif /* _ASM_GENERIC_ERROR_INJECTION_H */
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index b1e17fcee2d0..854f96ad5ccb 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -1,12 +1,8 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* Generic I/O port emulation, based on MN10300 code 2/* Generic I/O port emulation, based on MN10300 code
2 * 3 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 5 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */ 6 */
11#ifndef __ASM_GENERIC_PCI_IOMAP_H 7#ifndef __ASM_GENERIC_PCI_IOMAP_H
12#define __ASM_GENERIC_PCI_IOMAP_H 8#define __ASM_GENERIC_PCI_IOMAP_H
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 868e68561f91..2cfa3075d148 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -309,19 +309,26 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
309extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 309extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
310#endif 310#endif
311 311
312#ifndef __HAVE_ARCH_PMDP_INVALIDATE 312#ifdef CONFIG_TRANSPARENT_HUGEPAGE
313extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 313/*
314 pmd_t *pmdp); 314 * This is an implementation of pmdp_establish() that is only suitable for an
315#endif 315 * architecture that doesn't have hardware dirty/accessed bits. In this case we
316 316 * can't race with CPU which sets these bits and non-atomic aproach is fine.
317#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE 317 */
318static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma, 318static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
319 unsigned long address, pmd_t *pmdp) 319 unsigned long address, pmd_t *pmdp, pmd_t pmd)
320{ 320{
321 321 pmd_t old_pmd = *pmdp;
322 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
323 return old_pmd;
322} 324}
323#endif 325#endif
324 326
327#ifndef __HAVE_ARCH_PMDP_INVALIDATE
328extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
329 pmd_t *pmdp);
330#endif
331
325#ifndef __HAVE_ARCH_PTE_SAME 332#ifndef __HAVE_ARCH_PTE_SAME
326static inline int pte_same(pte_t pte_a, pte_t pte_b) 333static inline int pte_same(pte_t pte_a, pte_t pte_b)
327{ 334{
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index 137ecdd16daa..c36f1d5a2572 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -3,6 +3,7 @@
3#define __ASM_GENERIC_QRWLOCK_TYPES_H 3#define __ASM_GENERIC_QRWLOCK_TYPES_H
4 4
5#include <linux/types.h> 5#include <linux/types.h>
6#include <asm/byteorder.h>
6#include <asm/spinlock_types.h> 7#include <asm/spinlock_types.h>
7 8
8/* 9/*
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 03cc5f9bba71..849cd8eb5ca0 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -30,6 +30,7 @@
30 * __ctors_start, __ctors_end 30 * __ctors_start, __ctors_end
31 * __irqentry_text_start, __irqentry_text_end 31 * __irqentry_text_start, __irqentry_text_end
32 * __softirqentry_text_start, __softirqentry_text_end 32 * __softirqentry_text_start, __softirqentry_text_end
33 * __start_opd, __end_opd
33 */ 34 */
34extern char _text[], _stext[], _etext[]; 35extern char _text[], _stext[], _etext[];
35extern char _data[], _sdata[], _edata[]; 36extern char _data[], _sdata[], _edata[];
@@ -49,12 +50,15 @@ extern char __start_once[], __end_once[];
49/* Start and end of .ctors section - used for constructor calls. */ 50/* Start and end of .ctors section - used for constructor calls. */
50extern char __ctors_start[], __ctors_end[]; 51extern char __ctors_start[], __ctors_end[];
51 52
53/* Start and end of .opd section - used for function descriptors. */
54extern char __start_opd[], __end_opd[];
55
52extern __visible const void __nosave_begin, __nosave_end; 56extern __visible const void __nosave_begin, __nosave_end;
53 57
54/* function descriptor handling (if any). Override 58/* Function descriptor handling (if any). Override in asm/sections.h */
55 * in asm/sections.h */
56#ifndef dereference_function_descriptor 59#ifndef dereference_function_descriptor
57#define dereference_function_descriptor(p) (p) 60#define dereference_function_descriptor(p) (p)
61#define dereference_kernel_function_descriptor(p) (p)
58#endif 62#endif
59 63
60/* random extra sections (if any). Override 64/* random extra sections (if any). Override
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index a564b83bf013..1ab0e520d6fc 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -136,6 +136,15 @@
136#define KPROBE_BLACKLIST() 136#define KPROBE_BLACKLIST()
137#endif 137#endif
138 138
139#ifdef CONFIG_FUNCTION_ERROR_INJECTION
140#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
141 VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\
142 KEEP(*(_error_injection_whitelist)) \
143 VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .;
144#else
145#define ERROR_INJECT_WHITELIST()
146#endif
147
139#ifdef CONFIG_EVENT_TRACING 148#ifdef CONFIG_EVENT_TRACING
140#define FTRACE_EVENTS() . = ALIGN(8); \ 149#define FTRACE_EVENTS() . = ALIGN(8); \
141 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 150 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
@@ -568,6 +577,7 @@
568 FTRACE_EVENTS() \ 577 FTRACE_EVENTS() \
569 TRACE_SYSCALLS() \ 578 TRACE_SYSCALLS() \
570 KPROBE_BLACKLIST() \ 579 KPROBE_BLACKLIST() \
580 ERROR_INJECT_WHITELIST() \
571 MEM_DISCARD(init.rodata) \ 581 MEM_DISCARD(init.rodata) \
572 CLK_OF_TABLES() \ 582 CLK_OF_TABLES() \
573 RESERVEDMEM_OF_TABLES() \ 583 RESERVEDMEM_OF_TABLES() \
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 03b97629442c..1e26f790b03f 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -327,7 +327,12 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
327 */ 327 */
328static inline int crypto_aead_encrypt(struct aead_request *req) 328static inline int crypto_aead_encrypt(struct aead_request *req)
329{ 329{
330 return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req); 330 struct crypto_aead *aead = crypto_aead_reqtfm(req);
331
332 if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
333 return -ENOKEY;
334
335 return crypto_aead_alg(aead)->encrypt(req);
331} 336}
332 337
333/** 338/**
@@ -356,6 +361,9 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
356{ 361{
357 struct crypto_aead *aead = crypto_aead_reqtfm(req); 362 struct crypto_aead *aead = crypto_aead_reqtfm(req);
358 363
364 if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
365 return -ENOKEY;
366
359 if (req->cryptlen < crypto_aead_authsize(aead)) 367 if (req->cryptlen < crypto_aead_authsize(aead))
360 return -EINVAL; 368 return -EINVAL;
361 369
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index caaa470389e0..b83d66073db0 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -13,12 +13,13 @@
13#define CHACHA20_IV_SIZE 16 13#define CHACHA20_IV_SIZE 16
14#define CHACHA20_KEY_SIZE 32 14#define CHACHA20_KEY_SIZE 32
15#define CHACHA20_BLOCK_SIZE 64 15#define CHACHA20_BLOCK_SIZE 64
16#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32))
16 17
17struct chacha20_ctx { 18struct chacha20_ctx {
18 u32 key[8]; 19 u32 key[8];
19}; 20};
20 21
21void chacha20_block(u32 *state, void *stream); 22void chacha20_block(u32 *state, u32 *stream);
22void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); 23void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
23int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, 24int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
24 unsigned int keysize); 25 unsigned int keysize);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 0ed31fd80242..2d1849dffb80 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -71,12 +71,11 @@ struct ahash_request {
71 71
72/** 72/**
73 * struct ahash_alg - asynchronous message digest definition 73 * struct ahash_alg - asynchronous message digest definition
74 * @init: Initialize the transformation context. Intended only to initialize the 74 * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
75 * state of the HASH transformation at the beginning. This shall fill in 75 * state of the HASH transformation at the beginning. This shall fill in
76 * the internal structures used during the entire duration of the whole 76 * the internal structures used during the entire duration of the whole
77 * transformation. No data processing happens at this point. 77 * transformation. No data processing happens at this point.
78 * Note: mandatory. 78 * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
79 * @update: Push a chunk of data into the driver for transformation. This
80 * function actually pushes blocks of data from upper layers into the 79 * function actually pushes blocks of data from upper layers into the
81 * driver, which then passes those to the hardware as seen fit. This 80 * driver, which then passes those to the hardware as seen fit. This
82 * function must not finalize the HASH transformation by calculating the 81 * function must not finalize the HASH transformation by calculating the
@@ -85,20 +84,17 @@ struct ahash_request {
85 * context, as this function may be called in parallel with the same 84 * context, as this function may be called in parallel with the same
86 * transformation object. Data processing can happen synchronously 85 * transformation object. Data processing can happen synchronously
87 * [SHASH] or asynchronously [AHASH] at this point. 86 * [SHASH] or asynchronously [AHASH] at this point.
88 * Note: mandatory. 87 * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
89 * @final: Retrieve result from the driver. This function finalizes the
90 * transformation and retrieves the resulting hash from the driver and 88 * transformation and retrieves the resulting hash from the driver and
91 * pushes it back to upper layers. No data processing happens at this 89 * pushes it back to upper layers. No data processing happens at this
92 * point unless hardware requires it to finish the transformation 90 * point unless hardware requires it to finish the transformation
93 * (then the data buffered by the device driver is processed). 91 * (then the data buffered by the device driver is processed).
94 * Note: mandatory. 92 * @finup: **[optional]** Combination of @update and @final. This function is effectively a
95 * @finup: Combination of @update and @final. This function is effectively a
96 * combination of @update and @final calls issued in sequence. As some 93 * combination of @update and @final calls issued in sequence. As some
97 * hardware cannot do @update and @final separately, this callback was 94 * hardware cannot do @update and @final separately, this callback was
98 * added to allow such hardware to be used at least by IPsec. Data 95 * added to allow such hardware to be used at least by IPsec. Data
99 * processing can happen synchronously [SHASH] or asynchronously [AHASH] 96 * processing can happen synchronously [SHASH] or asynchronously [AHASH]
100 * at this point. 97 * at this point.
101 * Note: optional.
102 * @digest: Combination of @init and @update and @final. This function 98 * @digest: Combination of @init and @update and @final. This function
103 * effectively behaves as the entire chain of operations, @init, 99 * effectively behaves as the entire chain of operations, @init,
104 * @update and @final issued in sequence. Just like @finup, this was 100 * @update and @final issued in sequence. Just like @finup, this was
@@ -210,7 +206,6 @@ struct crypto_ahash {
210 unsigned int keylen); 206 unsigned int keylen);
211 207
212 unsigned int reqsize; 208 unsigned int reqsize;
213 bool has_setkey;
214 struct crypto_tfm base; 209 struct crypto_tfm base;
215}; 210};
216 211
@@ -410,11 +405,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
410int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 405int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
411 unsigned int keylen); 406 unsigned int keylen);
412 407
413static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
414{
415 return tfm->has_setkey;
416}
417
418/** 408/**
419 * crypto_ahash_finup() - update and finalize message digest 409 * crypto_ahash_finup() - update and finalize message digest
420 * @req: reference to the ahash_request handle that holds all information 410 * @req: reference to the ahash_request handle that holds all information
@@ -487,7 +477,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
487 */ 477 */
488static inline int crypto_ahash_import(struct ahash_request *req, const void *in) 478static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
489{ 479{
490 return crypto_ahash_reqtfm(req)->import(req, in); 480 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
481
482 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
483 return -ENOKEY;
484
485 return tfm->import(req, in);
491} 486}
492 487
493/** 488/**
@@ -503,7 +498,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
503 */ 498 */
504static inline int crypto_ahash_init(struct ahash_request *req) 499static inline int crypto_ahash_init(struct ahash_request *req)
505{ 500{
506 return crypto_ahash_reqtfm(req)->init(req); 501 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
502
503 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
504 return -ENOKEY;
505
506 return tfm->init(req);
507} 507}
508 508
509/** 509/**
@@ -855,7 +855,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
855 */ 855 */
856static inline int crypto_shash_import(struct shash_desc *desc, const void *in) 856static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
857{ 857{
858 return crypto_shash_alg(desc->tfm)->import(desc, in); 858 struct crypto_shash *tfm = desc->tfm;
859
860 if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
861 return -ENOKEY;
862
863 return crypto_shash_alg(tfm)->import(desc, in);
859} 864}
860 865
861/** 866/**
@@ -871,7 +876,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
871 */ 876 */
872static inline int crypto_shash_init(struct shash_desc *desc) 877static inline int crypto_shash_init(struct shash_desc *desc)
873{ 878{
874 return crypto_shash_alg(desc->tfm)->init(desc); 879 struct crypto_shash *tfm = desc->tfm;
880
881 if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
882 return -ENOKEY;
883
884 return crypto_shash_alg(tfm)->init(desc);
875} 885}
876 886
877/** 887/**
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index f38227a78eae..482461d8931d 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -245,7 +245,7 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
245 int offset, size_t size, int flags); 245 int offset, size_t size, int flags);
246void af_alg_free_resources(struct af_alg_async_req *areq); 246void af_alg_free_resources(struct af_alg_async_req *areq);
247void af_alg_async_cb(struct crypto_async_request *_req, int err); 247void af_alg_async_cb(struct crypto_async_request *_req, int err);
248unsigned int af_alg_poll(struct file *file, struct socket *sock, 248__poll_t af_alg_poll(struct file *file, struct socket *sock,
249 poll_table *wait); 249 poll_table *wait);
250struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, 250struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
251 unsigned int areqlen); 251 unsigned int areqlen);
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index c2bae8da642c..27040a46d50a 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
90 return alg->setkey != shash_no_setkey; 90 return alg->setkey != shash_no_setkey;
91} 91}
92 92
93bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
94
93int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, 95int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
94 struct hash_alg_common *alg, 96 struct hash_alg_common *alg,
95 struct crypto_instance *inst); 97 struct crypto_instance *inst);
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index ccad9b2c9bd6..0f6ddac1acfc 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -28,17 +28,6 @@ struct crypto_scomp {
28 * @free_ctx: Function frees context allocated with alloc_ctx 28 * @free_ctx: Function frees context allocated with alloc_ctx
29 * @compress: Function performs a compress operation 29 * @compress: Function performs a compress operation
30 * @decompress: Function performs a de-compress operation 30 * @decompress: Function performs a de-compress operation
31 * @init: Initialize the cryptographic transformation object.
32 * This function is used to initialize the cryptographic
33 * transformation object. This function is called only once at
34 * the instantiation time, right after the transformation context
35 * was allocated. In case the cryptographic hardware has some
36 * special requirements which need to be handled by software, this
37 * function shall check for the precise requirement of the
38 * transformation and put any software fallbacks in place.
39 * @exit: Deinitialize the cryptographic transformation object. This is a
40 * counterpart to @init, used to remove various changes set in
41 * @init.
42 * @base: Common crypto API algorithm data structure 31 * @base: Common crypto API algorithm data structure
43 */ 32 */
44struct scomp_alg { 33struct scomp_alg {
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 5757c0a4b321..15aeef6e30ef 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -12,14 +12,4 @@
12struct crypto_skcipher *crypto_get_default_null_skcipher(void); 12struct crypto_skcipher *crypto_get_default_null_skcipher(void);
13void crypto_put_default_null_skcipher(void); 13void crypto_put_default_null_skcipher(void);
14 14
15static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void)
16{
17 return crypto_get_default_null_skcipher();
18}
19
20static inline void crypto_put_default_null_skcipher2(void)
21{
22 crypto_put_default_null_skcipher();
23}
24
25#endif 15#endif
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index c65567d01e8e..f718a19da82f 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -31,8 +31,6 @@ struct poly1305_desc_ctx {
31}; 31};
32 32
33int crypto_poly1305_init(struct shash_desc *desc); 33int crypto_poly1305_init(struct shash_desc *desc);
34int crypto_poly1305_setkey(struct crypto_shash *tfm,
35 const u8 *key, unsigned int keylen);
36unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, 34unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
37 const u8 *src, unsigned int srclen); 35 const u8 *src, unsigned int srclen);
38int crypto_poly1305_update(struct shash_desc *desc, 36int crypto_poly1305_update(struct shash_desc *desc,
diff --git a/include/crypto/salsa20.h b/include/crypto/salsa20.h
new file mode 100644
index 000000000000..19ed48aefc86
--- /dev/null
+++ b/include/crypto/salsa20.h
@@ -0,0 +1,27 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Common values for the Salsa20 algorithm
4 */
5
6#ifndef _CRYPTO_SALSA20_H
7#define _CRYPTO_SALSA20_H
8
9#include <linux/types.h>
10
11#define SALSA20_IV_SIZE 8
12#define SALSA20_MIN_KEY_SIZE 16
13#define SALSA20_MAX_KEY_SIZE 32
14#define SALSA20_BLOCK_SIZE 64
15
16struct crypto_skcipher;
17
18struct salsa20_ctx {
19 u32 initial_state[16];
20};
21
22void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
23 const u8 *iv);
24int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
25 unsigned int keysize);
26
27#endif /* _CRYPTO_SALSA20_H */
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
index b9d9bd553b48..080f60c2e6b1 100644
--- a/include/crypto/sha3.h
+++ b/include/crypto/sha3.h
@@ -19,7 +19,6 @@
19 19
20struct sha3_state { 20struct sha3_state {
21 u64 st[25]; 21 u64 st[25];
22 unsigned int md_len;
23 unsigned int rsiz; 22 unsigned int rsiz;
24 unsigned int rsizw; 23 unsigned int rsizw;
25 24
@@ -27,4 +26,9 @@ struct sha3_state {
27 u8 buf[SHA3_224_BLOCK_SIZE]; 26 u8 buf[SHA3_224_BLOCK_SIZE];
28}; 27};
29 28
29int crypto_sha3_init(struct shash_desc *desc);
30int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
31 unsigned int len);
32int crypto_sha3_final(struct shash_desc *desc, u8 *out);
33
30#endif 34#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 562001cb412b..2f327f090c3e 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -401,11 +401,6 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
401 return tfm->setkey(tfm, key, keylen); 401 return tfm->setkey(tfm, key, keylen);
402} 402}
403 403
404static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm)
405{
406 return tfm->keysize;
407}
408
409static inline unsigned int crypto_skcipher_default_keysize( 404static inline unsigned int crypto_skcipher_default_keysize(
410 struct crypto_skcipher *tfm) 405 struct crypto_skcipher *tfm)
411{ 406{
@@ -442,6 +437,9 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
442{ 437{
443 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 438 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
444 439
440 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
441 return -ENOKEY;
442
445 return tfm->encrypt(req); 443 return tfm->encrypt(req);
446} 444}
447 445
@@ -460,6 +458,9 @@ static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
460{ 458{
461 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 459 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
462 460
461 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
462 return -ENOKEY;
463
463 return tfm->decrypt(req); 464 return tfm->decrypt(req);
464} 465}
465 466
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 59be1232d005..c6666cd09347 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -75,6 +75,7 @@
75#include <drm/drm_sarea.h> 75#include <drm/drm_sarea.h>
76#include <drm/drm_drv.h> 76#include <drm/drm_drv.h>
77#include <drm/drm_prime.h> 77#include <drm/drm_prime.h>
78#include <drm/drm_print.h>
78#include <drm/drm_pci.h> 79#include <drm/drm_pci.h>
79#include <drm/drm_file.h> 80#include <drm/drm_file.h>
80#include <drm/drm_debugfs.h> 81#include <drm/drm_debugfs.h>
@@ -94,212 +95,16 @@ struct dma_buf_attachment;
94struct pci_dev; 95struct pci_dev;
95struct pci_controller; 96struct pci_controller;
96 97
97/*
98 * The following categories are defined:
99 *
100 * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
101 * This is the category used by the DRM_DEBUG() macro.
102 *
103 * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
104 * This is the category used by the DRM_DEBUG_DRIVER() macro.
105 *
106 * KMS: used in the modesetting code.
107 * This is the category used by the DRM_DEBUG_KMS() macro.
108 *
109 * PRIME: used in the prime code.
110 * This is the category used by the DRM_DEBUG_PRIME() macro.
111 *
112 * ATOMIC: used in the atomic code.
113 * This is the category used by the DRM_DEBUG_ATOMIC() macro.
114 *
115 * VBL: used for verbose debug message in the vblank code
116 * This is the category used by the DRM_DEBUG_VBL() macro.
117 *
118 * Enabling verbose debug messages is done through the drm.debug parameter,
119 * each category being enabled by a bit.
120 *
121 * drm.debug=0x1 will enable CORE messages
122 * drm.debug=0x2 will enable DRIVER messages
123 * drm.debug=0x3 will enable CORE and DRIVER messages
124 * ...
125 * drm.debug=0x3f will enable all messages
126 *
127 * An interesting feature is that it's possible to enable verbose logging at
128 * run-time by echoing the debug value in its sysfs node:
129 * # echo 0xf > /sys/module/drm/parameters/debug
130 */
131#define DRM_UT_NONE 0x00
132#define DRM_UT_CORE 0x01
133#define DRM_UT_DRIVER 0x02
134#define DRM_UT_KMS 0x04
135#define DRM_UT_PRIME 0x08
136#define DRM_UT_ATOMIC 0x10
137#define DRM_UT_VBL 0x20
138#define DRM_UT_STATE 0x40
139#define DRM_UT_LEASE 0x80
140
141/***********************************************************************/ 98/***********************************************************************/
142/** \name DRM template customization defaults */ 99/** \name DRM template customization defaults */
143/*@{*/ 100/*@{*/
144 101
145/***********************************************************************/ 102/***********************************************************************/
146/** \name Macros to make printk easier */
147/*@{*/
148
149#define _DRM_PRINTK(once, level, fmt, ...) \
150 do { \
151 printk##once(KERN_##level "[" DRM_NAME "] " fmt, \
152 ##__VA_ARGS__); \
153 } while (0)
154
155#define DRM_INFO(fmt, ...) \
156 _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
157#define DRM_NOTE(fmt, ...) \
158 _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
159#define DRM_WARN(fmt, ...) \
160 _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
161
162#define DRM_INFO_ONCE(fmt, ...) \
163 _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
164#define DRM_NOTE_ONCE(fmt, ...) \
165 _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
166#define DRM_WARN_ONCE(fmt, ...) \
167 _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
168
169/**
170 * Error output.
171 *
172 * \param fmt printf() like format string.
173 * \param arg arguments
174 */
175#define DRM_DEV_ERROR(dev, fmt, ...) \
176 drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\
177 fmt, ##__VA_ARGS__)
178#define DRM_ERROR(fmt, ...) \
179 drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__)
180
181/**
182 * Rate limited error output. Like DRM_ERROR() but won't flood the log.
183 *
184 * \param fmt printf() like format string.
185 * \param arg arguments
186 */
187#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \
188({ \
189 static DEFINE_RATELIMIT_STATE(_rs, \
190 DEFAULT_RATELIMIT_INTERVAL, \
191 DEFAULT_RATELIMIT_BURST); \
192 \
193 if (__ratelimit(&_rs)) \
194 DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
195})
196#define DRM_ERROR_RATELIMITED(fmt, ...) \
197 DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
198
199#define DRM_DEV_INFO(dev, fmt, ...) \
200 drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \
201 ##__VA_ARGS__)
202
203#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
204({ \
205 static bool __print_once __read_mostly; \
206 if (!__print_once) { \
207 __print_once = true; \
208 DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \
209 } \
210})
211
212/**
213 * Debug output.
214 *
215 * \param fmt printf() like format string.
216 * \param arg arguments
217 */
218#define DRM_DEV_DEBUG(dev, fmt, args...) \
219 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \
220 ##args)
221#define DRM_DEBUG(fmt, ...) \
222 drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__)
223
224#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \
225 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \
226 fmt, ##args)
227#define DRM_DEBUG_DRIVER(fmt, ...) \
228 drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
229
230#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \
231 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \
232 ##args)
233#define DRM_DEBUG_KMS(fmt, ...) \
234 drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__)
235
236#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \
237 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \
238 fmt, ##args)
239#define DRM_DEBUG_PRIME(fmt, ...) \
240 drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
241
242#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \
243 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \
244 fmt, ##args)
245#define DRM_DEBUG_ATOMIC(fmt, ...) \
246 drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
247
248#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \
249 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \
250 ##args)
251#define DRM_DEBUG_VBL(fmt, ...) \
252 drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__)
253
254#define DRM_DEBUG_LEASE(fmt, ...) \
255 drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
256
257#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \
258({ \
259 static DEFINE_RATELIMIT_STATE(_rs, \
260 DEFAULT_RATELIMIT_INTERVAL, \
261 DEFAULT_RATELIMIT_BURST); \
262 if (__ratelimit(&_rs)) \
263 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \
264 __func__, "", fmt, ##args); \
265})
266
267/**
268 * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
269 *
270 * \param fmt printf() like format string.
271 * \param arg arguments
272 */
273#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \
274 DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args)
275#define DRM_DEBUG_RATELIMITED(fmt, args...) \
276 DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args)
277#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \
278 _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args)
279#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \
280 DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args)
281#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \
282 _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args)
283#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \
284 DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args)
285#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \
286 _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args)
287#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \
288 DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
289
290/* Format strings and argument splitters to simplify printing
291 * various "complex" objects
292 */
293
294/*@}*/
295
296/***********************************************************************/
297/** \name Internal types and structures */ 103/** \name Internal types and structures */
298/*@{*/ 104/*@{*/
299 105
300#define DRM_IF_VERSION(maj, min) (maj << 16 | min) 106#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
301 107
302
303/** 108/**
304 * drm_drv_uses_atomic_modeset - check if the driver implements 109 * drm_drv_uses_atomic_modeset - check if the driver implements
305 * atomic_commit() 110 * atomic_commit()
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 5afd6e364fb6..1c27526c499e 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -189,12 +189,40 @@ struct drm_private_state_funcs {
189 struct drm_private_state *state); 189 struct drm_private_state *state);
190}; 190};
191 191
192/**
193 * struct drm_private_obj - base struct for driver private atomic object
194 *
195 * A driver private object is initialized by calling
196 * drm_atomic_private_obj_init() and cleaned up by calling
197 * drm_atomic_private_obj_fini().
198 *
199 * Currently only tracks the state update functions and the opaque driver
200 * private state itself, but in the future might also track which
201 * &drm_modeset_lock is required to duplicate and update this object's state.
202 */
192struct drm_private_obj { 203struct drm_private_obj {
204 /**
205 * @state: Current atomic state for this driver private object.
206 */
193 struct drm_private_state *state; 207 struct drm_private_state *state;
194 208
209 /**
210 * @funcs:
211 *
212 * Functions to manipulate the state of this driver private object, see
213 * &drm_private_state_funcs.
214 */
195 const struct drm_private_state_funcs *funcs; 215 const struct drm_private_state_funcs *funcs;
196}; 216};
197 217
218/**
219 * struct drm_private_state - base struct for driver private object state
220 * @state: backpointer to global drm_atomic_state
221 *
222 * Currently only contains a backpointer to the overall atomic update, but in
223 * the future also might hold synchronization information similar to e.g.
224 * &drm_crtc.commit.
225 */
198struct drm_private_state { 226struct drm_private_state {
199 struct drm_atomic_state *state; 227 struct drm_atomic_state *state;
200}; 228};
@@ -218,6 +246,10 @@ struct __drm_private_objs_state {
218 * @num_private_objs: size of the @private_objs array 246 * @num_private_objs: size of the @private_objs array
219 * @private_objs: pointer to array of private object pointers 247 * @private_objs: pointer to array of private object pointers
220 * @acquire_ctx: acquire context for this atomic modeset state update 248 * @acquire_ctx: acquire context for this atomic modeset state update
249 *
250 * States are added to an atomic update by calling drm_atomic_get_crtc_state(),
251 * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for
252 * private state structures, drm_atomic_get_private_obj_state().
221 */ 253 */
222struct drm_atomic_state { 254struct drm_atomic_state {
223 struct kref ref; 255 struct kref ref;
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d2b56cc657e9..4842ee9485ce 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -38,6 +38,13 @@ struct drm_private_state;
38 38
39int drm_atomic_helper_check_modeset(struct drm_device *dev, 39int drm_atomic_helper_check_modeset(struct drm_device *dev,
40 struct drm_atomic_state *state); 40 struct drm_atomic_state *state);
41int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
42 const struct drm_crtc_state *crtc_state,
43 const struct drm_rect *clip,
44 int min_scale,
45 int max_scale,
46 bool can_position,
47 bool can_update_disabled);
41int drm_atomic_helper_check_planes(struct drm_device *dev, 48int drm_atomic_helper_check_planes(struct drm_device *dev,
42 struct drm_atomic_state *state); 49 struct drm_atomic_state *state);
43int drm_atomic_helper_check(struct drm_device *dev, 50int drm_atomic_helper_check(struct drm_device *dev,
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 5971577016a2..ed38df4ac204 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -177,6 +177,35 @@ enum drm_link_status {
177}; 177};
178 178
179/** 179/**
180 * enum drm_panel_orientation - panel_orientation info for &drm_display_info
181 *
182 * This enum is used to track the (LCD) panel orientation. There are no
183 * separate #defines for the uapi!
184 *
185 * @DRM_MODE_PANEL_ORIENTATION_UNKNOWN: The drm driver has not provided any
186 * panel orientation information (normal
187 * for non panels) in this case the "panel
188 * orientation" connector prop will not be
189 * attached.
190 * @DRM_MODE_PANEL_ORIENTATION_NORMAL: The top side of the panel matches the
191 * top side of the device's casing.
192 * @DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP: The top side of the panel matches the
193 * bottom side of the device's casing, iow
194 * the panel is mounted upside-down.
195 * @DRM_MODE_PANEL_ORIENTATION_LEFT_UP: The left side of the panel matches the
196 * top side of the device's casing.
197 * @DRM_MODE_PANEL_ORIENTATION_RIGHT_UP: The right side of the panel matches the
198 * top side of the device's casing.
199 */
200enum drm_panel_orientation {
201 DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1,
202 DRM_MODE_PANEL_ORIENTATION_NORMAL = 0,
203 DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP,
204 DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
205 DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
206};
207
208/**
180 * struct drm_display_info - runtime data about the connected sink 209 * struct drm_display_info - runtime data about the connected sink
181 * 210 *
182 * Describes a given display (e.g. CRT or flat panel) and its limitations. For 211 * Describes a given display (e.g. CRT or flat panel) and its limitations. For
@@ -224,6 +253,15 @@ struct drm_display_info {
224#define DRM_COLOR_FORMAT_YCRCB420 (1<<3) 253#define DRM_COLOR_FORMAT_YCRCB420 (1<<3)
225 254
226 /** 255 /**
256 * @panel_orientation: Read only connector property for built-in panels,
257 * indicating the orientation of the panel vs the device's casing.
258 * drm_connector_init() sets this to DRM_MODE_PANEL_ORIENTATION_UNKNOWN.
259 * When not UNKNOWN this gets used by the drm_fb_helpers to rotate the
260 * fb to compensate and gets exported as prop to userspace.
261 */
262 int panel_orientation;
263
264 /**
227 * @color_formats: HDMI Color formats, selects between RGB and YCrCb 265 * @color_formats: HDMI Color formats, selects between RGB and YCrCb
228 * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones 266 * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones
229 * as used to describe the pixel format in framebuffers, and also don't 267 * as used to describe the pixel format in framebuffers, and also don't
@@ -271,6 +309,11 @@ struct drm_display_info {
271 bool dvi_dual; 309 bool dvi_dual;
272 310
273 /** 311 /**
312 * @has_hdmi_infoframe: Does the sink support the HDMI infoframe?
313 */
314 bool has_hdmi_infoframe;
315
316 /**
274 * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even 317 * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
275 * more stuff redundant with @bus_formats. 318 * more stuff redundant with @bus_formats.
276 */ 319 */
@@ -705,7 +748,6 @@ struct drm_cmdline_mode {
705 * @force: a DRM_FORCE_<foo> state for forced mode sets 748 * @force: a DRM_FORCE_<foo> state for forced mode sets
706 * @override_edid: has the EDID been overwritten through debugfs for testing? 749 * @override_edid: has the EDID been overwritten through debugfs for testing?
707 * @encoder_ids: valid encoders for this connector 750 * @encoder_ids: valid encoders for this connector
708 * @encoder: encoder driving this connector, if any
709 * @eld: EDID-like data, if present 751 * @eld: EDID-like data, if present
710 * @latency_present: AV delay info from ELD, if found 752 * @latency_present: AV delay info from ELD, if found
711 * @video_latency: video latency info from ELD, if found 753 * @video_latency: video latency info from ELD, if found
@@ -875,7 +917,13 @@ struct drm_connector {
875 917
876#define DRM_CONNECTOR_MAX_ENCODER 3 918#define DRM_CONNECTOR_MAX_ENCODER 3
877 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; 919 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
878 struct drm_encoder *encoder; /* currently active encoder */ 920 /**
921 * @encoder: Currently bound encoder driving this connector, if any.
922 * Only really meaningful for non-atomic drivers. Atomic drivers should
923 * instead look at &drm_connector_state.best_encoder, and in case they
924 * need the CRTC driving this output, &drm_connector_state.crtc.
925 */
926 struct drm_encoder *encoder;
879 927
880#define MAX_ELD_BYTES 128 928#define MAX_ELD_BYTES 128
881 /* EDID bits */ 929 /* EDID bits */
@@ -1035,6 +1083,8 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
1035 const struct edid *edid); 1083 const struct edid *edid);
1036void drm_mode_connector_set_link_status_property(struct drm_connector *connector, 1084void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
1037 uint64_t link_status); 1085 uint64_t link_status);
1086int drm_connector_init_panel_orientation_property(
1087 struct drm_connector *connector, int width, int height);
1038 1088
1039/** 1089/**
1040 * struct drm_tile_group - Tile group metadata 1090 * struct drm_tile_group - Tile group metadata
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index e21af87a2f3c..7c4fa32f3fc6 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -17,6 +17,7 @@ struct drm_vblank_crtc;
17struct drm_sg_mem; 17struct drm_sg_mem;
18struct drm_local_map; 18struct drm_local_map;
19struct drm_vma_offset_manager; 19struct drm_vma_offset_manager;
20struct drm_fb_helper;
20 21
21struct inode; 22struct inode;
22 23
@@ -185,6 +186,14 @@ struct drm_device {
185 struct drm_vma_offset_manager *vma_offset_manager; 186 struct drm_vma_offset_manager *vma_offset_manager;
186 /*@} */ 187 /*@} */
187 int switch_power_state; 188 int switch_power_state;
189
190 /**
191 * @fb_helper:
192 *
193 * Pointer to the fbdev emulation structure.
194 * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini().
195 */
196 struct drm_fb_helper *fb_helper;
188}; 197};
189 198
190#endif 199#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2623a1255481..da58a428c8d7 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -635,6 +635,7 @@
635# define DP_SET_POWER_D0 0x1 635# define DP_SET_POWER_D0 0x1
636# define DP_SET_POWER_D3 0x2 636# define DP_SET_POWER_D3 0x2
637# define DP_SET_POWER_MASK 0x3 637# define DP_SET_POWER_MASK 0x3
638# define DP_SET_POWER_D3_AUX_ON 0x5
638 639
639#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */ 640#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
640# define DP_EDP_11 0x00 641# define DP_EDP_11 0x00
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 412e83a4d3db..d32b688eb346 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -39,6 +39,7 @@ struct drm_minor;
39struct dma_buf_attachment; 39struct dma_buf_attachment;
40struct drm_display_mode; 40struct drm_display_mode;
41struct drm_mode_create_dumb; 41struct drm_mode_create_dumb;
42struct drm_printer;
42 43
43/* driver capabilities and requirements mask */ 44/* driver capabilities and requirements mask */
44#define DRIVER_USE_AGP 0x1 45#define DRIVER_USE_AGP 0x1
@@ -429,6 +430,20 @@ struct drm_driver {
429 void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); 430 void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
430 431
431 /** 432 /**
433 * @gem_print_info:
434 *
435 * If driver subclasses struct &drm_gem_object, it can implement this
436 * optional hook for printing additional driver specific info.
437 *
438 * drm_printf_indent() should be used in the callback passing it the
439 * indent argument.
440 *
441 * This callback is called from drm_gem_print_info().
442 */
443 void (*gem_print_info)(struct drm_printer *p, unsigned int indent,
444 const struct drm_gem_object *obj);
445
446 /**
432 * @gem_create_object: constructor for gem objects 447 * @gem_create_object: constructor for gem objects
433 * 448 *
434 * Hook for allocating the GEM object struct, for use by core 449 * Hook for allocating the GEM object struct, for use by core
@@ -592,13 +607,6 @@ struct drm_driver {
592 int dev_priv_size; 607 int dev_priv_size;
593}; 608};
594 609
595__printf(6, 7)
596void drm_dev_printk(const struct device *dev, const char *level,
597 unsigned int category, const char *function_name,
598 const char *prefix, const char *format, ...);
599__printf(3, 4)
600void drm_printk(const char *level, unsigned int category,
601 const char *format, ...);
602extern unsigned int drm_debug; 610extern unsigned int drm_debug;
603 611
604int drm_dev_init(struct drm_device *dev, 612int drm_dev_init(struct drm_device *dev,
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index efe6d5a8e834..8d89a9c3748d 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -333,7 +333,6 @@ struct drm_encoder;
333struct drm_connector; 333struct drm_connector;
334struct drm_display_mode; 334struct drm_display_mode;
335 335
336void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
337int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); 336int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
338int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); 337int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
339int drm_av_sync_delay(struct drm_connector *connector, 338int drm_av_sync_delay(struct drm_connector *connector,
@@ -357,6 +356,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
357 bool is_hdmi2_sink); 356 bool is_hdmi2_sink);
358int 357int
359drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, 358drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
359 struct drm_connector *connector,
360 const struct drm_display_mode *mode); 360 const struct drm_display_mode *mode);
361void 361void
362drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, 362drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index ee4cfbe63c52..fb299696c7c4 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -88,7 +88,6 @@ struct drm_encoder_funcs {
88 * @head: list management 88 * @head: list management
89 * @base: base KMS object 89 * @base: base KMS object
90 * @name: human readable name, can be overwritten by the driver 90 * @name: human readable name, can be overwritten by the driver
91 * @crtc: currently bound CRTC
92 * @bridge: bridge associated to the encoder 91 * @bridge: bridge associated to the encoder
93 * @funcs: control functions 92 * @funcs: control functions
94 * @helper_private: mid-layer private data 93 * @helper_private: mid-layer private data
@@ -166,6 +165,11 @@ struct drm_encoder {
166 */ 165 */
167 uint32_t possible_clones; 166 uint32_t possible_clones;
168 167
168 /**
169 * @crtc: Currently bound CRTC, only really meaningful for non-atomic
170 * drivers. Atomic drivers should instead check
171 * &drm_connector_state.crtc.
172 */
169 struct drm_crtc *crtc; 173 struct drm_crtc *crtc;
170 struct drm_bridge *bridge; 174 struct drm_bridge *bridge;
171 const struct drm_encoder_funcs *funcs; 175 const struct drm_encoder_funcs *funcs;
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index faf56c53df28..d532f88a8d55 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -16,6 +16,13 @@ struct drm_mode_fb_cmd2;
16struct drm_plane; 16struct drm_plane;
17struct drm_plane_state; 17struct drm_plane_state;
18 18
19int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
20 unsigned int preferred_bpp, unsigned int max_conn_count,
21 const struct drm_framebuffer_funcs *funcs);
22int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
23 unsigned int max_conn_count);
24void drm_fb_cma_fbdev_fini(struct drm_device *dev);
25
19struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, 26struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
20 unsigned int preferred_bpp, unsigned int max_conn_count, 27 unsigned int preferred_bpp, unsigned int max_conn_count,
21 const struct drm_framebuffer_funcs *funcs); 28 const struct drm_framebuffer_funcs *funcs);
@@ -36,11 +43,5 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
36 struct drm_plane_state *state, 43 struct drm_plane_state *state,
37 unsigned int plane); 44 unsigned int plane);
38 45
39#ifdef CONFIG_DEBUG_FS
40struct seq_file;
41
42int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
43#endif
44
45#endif 46#endif
46 47
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 33fe95927742..b069433e7fc1 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -33,6 +33,7 @@
33struct drm_fb_helper; 33struct drm_fb_helper;
34 34
35#include <drm/drm_crtc.h> 35#include <drm/drm_crtc.h>
36#include <drm/drm_device.h>
36#include <linux/kgdb.h> 37#include <linux/kgdb.h>
37 38
38enum mode_set_atomic { 39enum mode_set_atomic {
@@ -48,6 +49,7 @@ struct drm_fb_helper_crtc {
48 struct drm_mode_set mode_set; 49 struct drm_mode_set mode_set;
49 struct drm_display_mode *desired_mode; 50 struct drm_display_mode *desired_mode;
50 int x, y; 51 int x, y;
52 int rotation;
51}; 53};
52 54
53/** 55/**
@@ -159,6 +161,13 @@ struct drm_fb_helper {
159 int connector_count; 161 int connector_count;
160 int connector_info_alloc_count; 162 int connector_info_alloc_count;
161 /** 163 /**
164 * @sw_rotations:
165 * Bitmask of all rotations requested for panel-orientation which
166 * could not be handled in hardware. If only one bit is set
167 * fbdev->fbcon_rotate_hint gets set to the requested rotation.
168 */
169 int sw_rotations;
170 /**
162 * @connector_info: 171 * @connector_info:
163 * 172 *
164 * Array of per-connector information. Do not iterate directly, but use 173 * Array of per-connector information. Do not iterate directly, but use
@@ -267,6 +276,7 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
267 276
268void drm_fb_helper_deferred_io(struct fb_info *info, 277void drm_fb_helper_deferred_io(struct fb_info *info,
269 struct list_head *pagelist); 278 struct list_head *pagelist);
279int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper);
270 280
271ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, 281ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
272 size_t count, loff_t *ppos); 282 size_t count, loff_t *ppos);
@@ -310,6 +320,16 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn);
310int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); 320int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
311int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, 321int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
312 struct drm_connector *connector); 322 struct drm_connector *connector);
323
324int drm_fb_helper_fbdev_setup(struct drm_device *dev,
325 struct drm_fb_helper *fb_helper,
326 const struct drm_fb_helper_funcs *funcs,
327 unsigned int preferred_bpp,
328 unsigned int max_conn_count);
329void drm_fb_helper_fbdev_teardown(struct drm_device *dev);
330
331void drm_fb_helper_lastclose(struct drm_device *dev);
332void drm_fb_helper_output_poll_changed(struct drm_device *dev);
313#else 333#else
314static inline void drm_fb_helper_prepare(struct drm_device *dev, 334static inline void drm_fb_helper_prepare(struct drm_device *dev,
315 struct drm_fb_helper *helper, 335 struct drm_fb_helper *helper,
@@ -321,11 +341,17 @@ static inline int drm_fb_helper_init(struct drm_device *dev,
321 struct drm_fb_helper *helper, 341 struct drm_fb_helper *helper,
322 int max_conn) 342 int max_conn)
323{ 343{
344 /* So drivers can use it to free the struct */
345 helper->dev = dev;
346 dev->fb_helper = helper;
347
324 return 0; 348 return 0;
325} 349}
326 350
327static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) 351static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
328{ 352{
353 if (helper && helper->dev)
354 helper->dev->fb_helper = NULL;
329} 355}
330 356
331static inline int drm_fb_helper_blank(int blank, struct fb_info *info) 357static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -398,6 +424,11 @@ static inline void drm_fb_helper_deferred_io(struct fb_info *info,
398{ 424{
399} 425}
400 426
427static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
428{
429 return -ENODEV;
430}
431
401static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, 432static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
402 char __user *buf, size_t count, 433 char __user *buf, size_t count,
403 loff_t *ppos) 434 loff_t *ppos)
@@ -507,6 +538,32 @@ drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
507 return 0; 538 return 0;
508} 539}
509 540
541static inline int
542drm_fb_helper_fbdev_setup(struct drm_device *dev,
543 struct drm_fb_helper *fb_helper,
544 const struct drm_fb_helper_funcs *funcs,
545 unsigned int preferred_bpp,
546 unsigned int max_conn_count)
547{
548 /* So drivers can use it to free the struct */
549 dev->fb_helper = fb_helper;
550
551 return 0;
552}
553
554static inline void drm_fb_helper_fbdev_teardown(struct drm_device *dev)
555{
556 dev->fb_helper = NULL;
557}
558
559static inline void drm_fb_helper_lastclose(struct drm_device *dev)
560{
561}
562
563static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
564{
565}
566
510#endif 567#endif
511 568
512static inline int 569static inline int
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 0e0c868451a5..5176c3797680 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -364,7 +364,7 @@ int drm_open(struct inode *inode, struct file *filp);
364ssize_t drm_read(struct file *filp, char __user *buffer, 364ssize_t drm_read(struct file *filp, char __user *buffer,
365 size_t count, loff_t *offset); 365 size_t count, loff_t *offset);
366int drm_release(struct inode *inode, struct file *filp); 366int drm_release(struct inode *inode, struct file *filp);
367unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); 367__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait);
368int drm_event_reserve_init_locked(struct drm_device *dev, 368int drm_event_reserve_init_locked(struct drm_device *dev,
369 struct drm_file *file_priv, 369 struct drm_file *file_priv,
370 struct drm_pending_event *p, 370 struct drm_pending_event *p,
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index 4c5ee4ae54df..c50502c656e5 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -121,6 +121,12 @@ struct drm_framebuffer {
121 * @base: base modeset object structure, contains the reference count. 121 * @base: base modeset object structure, contains the reference count.
122 */ 122 */
123 struct drm_mode_object base; 123 struct drm_mode_object base;
124
125 /**
126 * @comm: Name of the process allocating the fb, used for fb dumping.
127 */
128 char comm[TASK_COMM_LEN];
129
124 /** 130 /**
125 * @format: framebuffer format information 131 * @format: framebuffer format information
126 */ 132 */
@@ -264,7 +270,7 @@ static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
264 * 270 *
265 * This functions returns the framebuffer's reference count. 271 * This functions returns the framebuffer's reference count.
266 */ 272 */
267static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb) 273static inline uint32_t drm_framebuffer_read_refcount(const struct drm_framebuffer *fb)
268{ 274{
269 return kref_read(&fb->base.refcount); 275 return kref_read(&fb->base.refcount);
270} 276}
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 520e3feb502c..19777145cf8e 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -9,7 +9,9 @@
9 * struct drm_gem_cma_object - GEM object backed by CMA memory allocations 9 * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
10 * @base: base GEM object 10 * @base: base GEM object
11 * @paddr: physical address of the backing memory 11 * @paddr: physical address of the backing memory
12 * @sgt: scatter/gather table for imported PRIME buffers 12 * @sgt: scatter/gather table for imported PRIME buffers. The table can have
13 * more than one entry but they are guaranteed to have contiguous
14 * DMA addresses.
13 * @vaddr: kernel virtual address of the backing memory 15 * @vaddr: kernel virtual address of the backing memory
14 */ 16 */
15struct drm_gem_cma_object { 17struct drm_gem_cma_object {
@@ -21,11 +23,8 @@ struct drm_gem_cma_object {
21 void *vaddr; 23 void *vaddr;
22}; 24};
23 25
24static inline struct drm_gem_cma_object * 26#define to_drm_gem_cma_obj(gem_obj) \
25to_drm_gem_cma_obj(struct drm_gem_object *gem_obj) 27 container_of(gem_obj, struct drm_gem_cma_object, base)
26{
27 return container_of(gem_obj, struct drm_gem_cma_object, base);
28}
29 28
30#ifndef CONFIG_MMU 29#ifndef CONFIG_MMU
31#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ 30#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
@@ -91,9 +90,8 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
91 unsigned long flags); 90 unsigned long flags);
92#endif 91#endif
93 92
94#ifdef CONFIG_DEBUG_FS 93void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
95void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); 94 const struct drm_gem_object *obj);
96#endif
97 95
98struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); 96struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj);
99struct drm_gem_object * 97struct drm_gem_object *
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 8d10fc97801c..101f566ae43d 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -386,7 +386,7 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm,
386 * @color: opaque tag value to use for this node 386 * @color: opaque tag value to use for this node
387 * @mode: fine-tune the allocation search and placement 387 * @mode: fine-tune the allocation search and placement
388 * 388 *
389 * This is a simplified version of drm_mm_insert_node_in_range_generic() with no 389 * This is a simplified version of drm_mm_insert_node_in_range() with no
390 * range restrictions applied. 390 * range restrictions applied.
391 * 391 *
392 * The preallocated node must be cleared to 0. 392 * The preallocated node must be cleared to 0.
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index b0ce26d71296..2cb6f02df64a 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -269,6 +269,9 @@ struct drm_mode_config_funcs {
269 * state easily. If this hook is implemented, drivers must also 269 * state easily. If this hook is implemented, drivers must also
270 * implement @atomic_state_clear and @atomic_state_free. 270 * implement @atomic_state_clear and @atomic_state_free.
271 * 271 *
272 * Subclassing of &drm_atomic_state is deprecated in favour of using
273 * &drm_private_state and &drm_private_obj.
274 *
272 * RETURNS: 275 * RETURNS:
273 * 276 *
274 * A new &drm_atomic_state on success or NULL on failure. 277 * A new &drm_atomic_state on success or NULL on failure.
@@ -290,6 +293,9 @@ struct drm_mode_config_funcs {
290 * 293 *
291 * Drivers that implement this must call drm_atomic_state_default_clear() 294 * Drivers that implement this must call drm_atomic_state_default_clear()
292 * to clear common state. 295 * to clear common state.
296 *
297 * Subclassing of &drm_atomic_state is deprecated in favour of using
298 * &drm_private_state and &drm_private_obj.
293 */ 299 */
294 void (*atomic_state_clear)(struct drm_atomic_state *state); 300 void (*atomic_state_clear)(struct drm_atomic_state *state);
295 301
@@ -302,6 +308,9 @@ struct drm_mode_config_funcs {
302 * 308 *
303 * Drivers that implement this must call 309 * Drivers that implement this must call
304 * drm_atomic_state_default_release() to release common resources. 310 * drm_atomic_state_default_release() to release common resources.
311 *
312 * Subclassing of &drm_atomic_state is deprecated in favour of using
313 * &drm_private_state and &drm_private_obj.
305 */ 314 */
306 void (*atomic_state_free)(struct drm_atomic_state *state); 315 void (*atomic_state_free)(struct drm_atomic_state *state);
307}; 316};
@@ -751,6 +760,13 @@ struct drm_mode_config {
751 */ 760 */
752 struct drm_property *non_desktop_property; 761 struct drm_property *non_desktop_property;
753 762
763 /**
764 * @panel_orientation_property: Optional connector property indicating
765 * how the lcd-panel is mounted inside the casing (e.g. normal or
766 * upside-down).
767 */
768 struct drm_property *panel_orientation_property;
769
754 /* dumb ioctl parameters */ 770 /* dumb ioctl parameters */
755 uint32_t preferred_depth, prefer_shadow; 771 uint32_t preferred_depth, prefer_shadow;
756 772
@@ -768,7 +784,7 @@ struct drm_mode_config {
768 bool allow_fb_modifiers; 784 bool allow_fb_modifiers;
769 785
770 /** 786 /**
771 * @modifiers: Plane property to list support modifier/format 787 * @modifiers_property: Plane property to list support modifier/format
772 * combination. 788 * combination.
773 */ 789 */
774 struct drm_property *modifiers_property; 790 struct drm_property *modifiers_property;
@@ -776,6 +792,15 @@ struct drm_mode_config {
776 /* cursor size */ 792 /* cursor size */
777 uint32_t cursor_width, cursor_height; 793 uint32_t cursor_width, cursor_height;
778 794
795 /**
796 * @suspend_state:
797 *
798 * Atomic state when suspended.
799 * Set by drm_mode_config_helper_suspend() and cleared by
800 * drm_mode_config_helper_resume().
801 */
802 struct drm_atomic_state *suspend_state;
803
779 const struct drm_mode_config_helper_funcs *helper_private; 804 const struct drm_mode_config_helper_funcs *helper_private;
780}; 805};
781 806
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
index cb0ec92e11e6..efa337f03129 100644
--- a/include/drm/drm_modeset_helper.h
+++ b/include/drm/drm_modeset_helper.h
@@ -34,4 +34,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
34int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, 34int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
35 const struct drm_crtc_funcs *funcs); 35 const struct drm_crtc_funcs *funcs);
36 36
37int drm_mode_config_helper_suspend(struct drm_device *dev);
38int drm_mode_config_helper_resume(struct drm_device *dev);
39
37#endif 40#endif
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 16646c44b7df..3e76ca805b0f 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -801,9 +801,6 @@ struct drm_connector_helper_funcs {
801 * resolution can call drm_add_modes_noedid(), and mark the preferred 801 * resolution can call drm_add_modes_noedid(), and mark the preferred
802 * one using drm_set_preferred_mode(). 802 * one using drm_set_preferred_mode().
803 * 803 *
804 * Finally drivers that support audio probably want to update the ELD
805 * data, too, using drm_edid_to_eld().
806 *
807 * This function is only called after the @detect hook has indicated 804 * This function is only called after the @detect hook has indicated
808 * that a sink is connected and when the EDID isn't overridden through 805 * that a sink is connected and when the EDID isn't overridden through
809 * sysfs or the kernel commandline. 806 * sysfs or the kernel commandline.
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 571615079230..8185e3468a23 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -474,8 +474,8 @@ enum drm_plane_type {
474 * @format_types: array of formats supported by this plane 474 * @format_types: array of formats supported by this plane
475 * @format_count: number of formats supported 475 * @format_count: number of formats supported
476 * @format_default: driver hasn't supplied supported formats for the plane 476 * @format_default: driver hasn't supplied supported formats for the plane
477 * @crtc: currently bound CRTC 477 * @modifiers: array of modifiers supported by this plane
478 * @fb: currently bound fb 478 * @modifier_count: number of modifiers supported
479 * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by 479 * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
480 * drm_mode_set_config_internal() to implement correct refcounting. 480 * drm_mode_set_config_internal() to implement correct refcounting.
481 * @funcs: helper functions 481 * @funcs: helper functions
@@ -512,7 +512,17 @@ struct drm_plane {
512 uint64_t *modifiers; 512 uint64_t *modifiers;
513 unsigned int modifier_count; 513 unsigned int modifier_count;
514 514
515 /**
516 * @crtc: Currently bound CRTC, only really meaningful for non-atomic
517 * drivers. Atomic drivers should instead check &drm_plane_state.crtc.
518 */
515 struct drm_crtc *crtc; 519 struct drm_crtc *crtc;
520
521 /**
522 * @fb: Currently bound framebuffer, only really meaningful for
523 * non-atomic drivers. Atomic drivers should instead check
524 * &drm_plane_state.fb.
525 */
516 struct drm_framebuffer *fb; 526 struct drm_framebuffer *fb;
517 527
518 struct drm_framebuffer *old_fb; 528 struct drm_framebuffer *old_fb;
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 7c8a00ceadb7..8aa49c0ecd4d 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -38,11 +38,6 @@
38 */ 38 */
39#define DRM_PLANE_HELPER_NO_SCALING (1<<16) 39#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
40 40
41int drm_plane_helper_check_state(struct drm_plane_state *state,
42 const struct drm_rect *clip,
43 int min_scale, int max_scale,
44 bool can_position,
45 bool can_update_disabled);
46int drm_plane_helper_check_update(struct drm_plane *plane, 41int drm_plane_helper_check_update(struct drm_plane *plane,
47 struct drm_crtc *crtc, 42 struct drm_crtc *crtc,
48 struct drm_framebuffer *fb, 43 struct drm_framebuffer *fb,
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index ca4d7c6321f2..2a4a42e59a47 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -80,6 +80,29 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
80__printf(2, 3) 80__printf(2, 3)
81void drm_printf(struct drm_printer *p, const char *f, ...); 81void drm_printf(struct drm_printer *p, const char *f, ...);
82 82
83__printf(2, 0)
84/**
85 * drm_vprintf - print to a &drm_printer stream
86 * @p: the &drm_printer
87 * @fmt: format string
88 * @va: the va_list
89 */
90static inline void
91drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
92{
93 struct va_format vaf = { .fmt = fmt, .va = va };
94
95 p->printfn(p, &vaf);
96}
97
98/**
99 * drm_printf_indent - Print to a &drm_printer stream with indentation
100 * @printer: DRM printer
101 * @indent: Tab indentation level (max 5)
102 * @fmt: Format string
103 */
104#define drm_printf_indent(printer, indent, fmt, ...) \
105 drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__)
83 106
84/** 107/**
85 * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file 108 * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
@@ -128,4 +151,200 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
128 }; 151 };
129 return p; 152 return p;
130} 153}
154
155/*
156 * The following categories are defined:
157 *
158 * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
159 * This is the category used by the DRM_DEBUG() macro.
160 *
161 * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
162 * This is the category used by the DRM_DEBUG_DRIVER() macro.
163 *
164 * KMS: used in the modesetting code.
165 * This is the category used by the DRM_DEBUG_KMS() macro.
166 *
167 * PRIME: used in the prime code.
168 * This is the category used by the DRM_DEBUG_PRIME() macro.
169 *
170 * ATOMIC: used in the atomic code.
171 * This is the category used by the DRM_DEBUG_ATOMIC() macro.
172 *
173 * VBL: used for verbose debug message in the vblank code
174 * This is the category used by the DRM_DEBUG_VBL() macro.
175 *
176 * Enabling verbose debug messages is done through the drm.debug parameter,
177 * each category being enabled by a bit.
178 *
179 * drm.debug=0x1 will enable CORE messages
180 * drm.debug=0x2 will enable DRIVER messages
181 * drm.debug=0x3 will enable CORE and DRIVER messages
182 * ...
183 * drm.debug=0x3f will enable all messages
184 *
185 * An interesting feature is that it's possible to enable verbose logging at
186 * run-time by echoing the debug value in its sysfs node:
187 * # echo 0xf > /sys/module/drm/parameters/debug
188 */
189#define DRM_UT_NONE 0x00
190#define DRM_UT_CORE 0x01
191#define DRM_UT_DRIVER 0x02
192#define DRM_UT_KMS 0x04
193#define DRM_UT_PRIME 0x08
194#define DRM_UT_ATOMIC 0x10
195#define DRM_UT_VBL 0x20
196#define DRM_UT_STATE 0x40
197#define DRM_UT_LEASE 0x80
198
199__printf(6, 7)
200void drm_dev_printk(const struct device *dev, const char *level,
201 unsigned int category, const char *function_name,
202 const char *prefix, const char *format, ...);
203__printf(3, 4)
204void drm_printk(const char *level, unsigned int category,
205 const char *format, ...);
206
207/* Macros to make printk easier */
208
209#define _DRM_PRINTK(once, level, fmt, ...) \
210 do { \
211 printk##once(KERN_##level "[" DRM_NAME "] " fmt, \
212 ##__VA_ARGS__); \
213 } while (0)
214
215#define DRM_INFO(fmt, ...) \
216 _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
217#define DRM_NOTE(fmt, ...) \
218 _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
219#define DRM_WARN(fmt, ...) \
220 _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
221
222#define DRM_INFO_ONCE(fmt, ...) \
223 _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
224#define DRM_NOTE_ONCE(fmt, ...) \
225 _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
226#define DRM_WARN_ONCE(fmt, ...) \
227 _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
228
229/**
230 * Error output.
231 *
232 * @dev: device pointer
233 * @fmt: printf() like format string.
234 */
235#define DRM_DEV_ERROR(dev, fmt, ...) \
236 drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\
237 fmt, ##__VA_ARGS__)
238#define DRM_ERROR(fmt, ...) \
239 drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__)
240
241/**
242 * Rate limited error output. Like DRM_ERROR() but won't flood the log.
243 *
244 * @dev: device pointer
245 * @fmt: printf() like format string.
246 */
247#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \
248({ \
249 static DEFINE_RATELIMIT_STATE(_rs, \
250 DEFAULT_RATELIMIT_INTERVAL, \
251 DEFAULT_RATELIMIT_BURST); \
252 \
253 if (__ratelimit(&_rs)) \
254 DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
255})
256#define DRM_ERROR_RATELIMITED(fmt, ...) \
257 DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
258
259#define DRM_DEV_INFO(dev, fmt, ...) \
260 drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \
261 ##__VA_ARGS__)
262
263#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
264({ \
265 static bool __print_once __read_mostly; \
266 if (!__print_once) { \
267 __print_once = true; \
268 DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \
269 } \
270})
271
272/**
273 * Debug output.
274 *
275 * @dev: device pointer
276 * @fmt: printf() like format string.
277 */
278#define DRM_DEV_DEBUG(dev, fmt, args...) \
279 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \
280 ##args)
281#define DRM_DEBUG(fmt, ...) \
282 drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__)
283
284#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \
285 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \
286 fmt, ##args)
287#define DRM_DEBUG_DRIVER(fmt, ...) \
288 drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
289
290#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \
291 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \
292 ##args)
293#define DRM_DEBUG_KMS(fmt, ...) \
294 drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__)
295
296#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \
297 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \
298 fmt, ##args)
299#define DRM_DEBUG_PRIME(fmt, ...) \
300 drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
301
302#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \
303 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \
304 fmt, ##args)
305#define DRM_DEBUG_ATOMIC(fmt, ...) \
306 drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
307
308#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \
309 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \
310 ##args)
311#define DRM_DEBUG_VBL(fmt, ...) \
312 drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__)
313
314#define DRM_DEBUG_LEASE(fmt, ...) \
315 drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
316
317#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \
318({ \
319 static DEFINE_RATELIMIT_STATE(_rs, \
320 DEFAULT_RATELIMIT_INTERVAL, \
321 DEFAULT_RATELIMIT_BURST); \
322 if (__ratelimit(&_rs)) \
323 drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \
324 __func__, "", fmt, ##args); \
325})
326
327/**
328 * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
329 *
330 * @dev: device pointer
331 * @fmt: printf() like format string.
332 */
333#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \
334 DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args)
335#define DRM_DEBUG_RATELIMITED(fmt, args...) \
336 DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args)
337#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \
338 _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args)
339#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \
340 DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args)
341#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \
342 _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args)
343#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \
344 DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args)
345#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \
346 _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args)
347#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \
348 DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
349
131#endif /* DRM_PRINT_H_ */ 350#endif /* DRM_PRINT_H_ */
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 43e2f382d2f0..3980602472c0 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -33,36 +33,31 @@ struct drm_syncobj_cb;
33/** 33/**
34 * struct drm_syncobj - sync object. 34 * struct drm_syncobj - sync object.
35 * 35 *
36 * This structure defines a generic sync object which wraps a dma fence. 36 * This structure defines a generic sync object which wraps a &dma_fence.
37 */ 37 */
38struct drm_syncobj { 38struct drm_syncobj {
39 /** 39 /**
40 * @refcount: 40 * @refcount: Reference count of this object.
41 *
42 * Reference count of this object.
43 */ 41 */
44 struct kref refcount; 42 struct kref refcount;
45 /** 43 /**
46 * @fence: 44 * @fence:
47 * NULL or a pointer to the fence bound to this object. 45 * NULL or a pointer to the fence bound to this object.
48 * 46 *
49 * This field should not be used directly. Use drm_syncobj_fence_get 47 * This field should not be used directly. Use drm_syncobj_fence_get()
50 * and drm_syncobj_replace_fence instead. 48 * and drm_syncobj_replace_fence() instead.
51 */ 49 */
52 struct dma_fence *fence; 50 struct dma_fence __rcu *fence;
53 /** 51 /**
54 * @cb_list: 52 * @cb_list: List of callbacks to call when the &fence gets replaced.
55 * List of callbacks to call when the fence gets replaced
56 */ 53 */
57 struct list_head cb_list; 54 struct list_head cb_list;
58 /** 55 /**
59 * @lock: 56 * @lock: Protects &cb_list and write-locks &fence.
60 * locks cb_list and write-locks fence.
61 */ 57 */
62 spinlock_t lock; 58 spinlock_t lock;
63 /** 59 /**
64 * @file: 60 * @file: A file backing for this syncobj.
65 * a file backing for this syncobj.
66 */ 61 */
67 struct file *file; 62 struct file *file;
68}; 63};
@@ -73,7 +68,7 @@ typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj,
73/** 68/**
74 * struct drm_syncobj_cb - callback for drm_syncobj_add_callback 69 * struct drm_syncobj_cb - callback for drm_syncobj_add_callback
75 * @node: used by drm_syncob_add_callback to append this struct to 70 * @node: used by drm_syncob_add_callback to append this struct to
76 * syncobj::cb_list 71 * &drm_syncobj.cb_list
77 * @func: drm_syncobj_func_t to call 72 * @func: drm_syncobj_func_t to call
78 * 73 *
79 * This struct will be initialized by drm_syncobj_add_callback, additional 74 * This struct will be initialized by drm_syncobj_add_callback, additional
@@ -92,7 +87,7 @@ void drm_syncobj_free(struct kref *kref);
92 * drm_syncobj_get - acquire a syncobj reference 87 * drm_syncobj_get - acquire a syncobj reference
93 * @obj: sync object 88 * @obj: sync object
94 * 89 *
95 * This acquires additional reference to @obj. It is illegal to call this 90 * This acquires an additional reference to @obj. It is illegal to call this
96 * without already holding a reference. No locks required. 91 * without already holding a reference. No locks required.
97 */ 92 */
98static inline void 93static inline void
@@ -111,6 +106,17 @@ drm_syncobj_put(struct drm_syncobj *obj)
111 kref_put(&obj->refcount, drm_syncobj_free); 106 kref_put(&obj->refcount, drm_syncobj_free);
112} 107}
113 108
109/**
110 * drm_syncobj_fence_get - get a reference to a fence in a sync object
111 * @syncobj: sync object.
112 *
113 * This acquires additional reference to &drm_syncobj.fence contained in @obj,
114 * if not NULL. It is illegal to call this without already holding a reference.
115 * No locks required.
116 *
117 * Returns:
118 * Either the fence of @obj or NULL if there's none.
119 */
114static inline struct dma_fence * 120static inline struct dma_fence *
115drm_syncobj_fence_get(struct drm_syncobj *syncobj) 121drm_syncobj_fence_get(struct drm_syncobj *syncobj)
116{ 122{
diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h
new file mode 100644
index 000000000000..a803988d8579
--- /dev/null
+++ b/include/drm/drm_utils.h
@@ -0,0 +1,15 @@
1/* SPDX-License-Identifier: MIT */
2/*
3 * Function prototypes for misc. drm utility functions.
4 * Specifically this file is for function prototypes for functions which
5 * may also be used outside of drm code (e.g. in fbdev drivers).
6 *
7 * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
8 */
9
10#ifndef __DRM_UTILS_H__
11#define __DRM_UTILS_H__
12
13int drm_get_panel_orientation_quirk(int width, int height);
14
15#endif
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index d84d52f6d2b1..8758df94e9a0 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -152,7 +152,7 @@ static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
152 * Start address of @node for page-based addressing. 0 if the node does not 152 * Start address of @node for page-based addressing. 0 if the node does not
153 * have an offset allocated. 153 * have an offset allocated.
154 */ 154 */
155static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node) 155static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node)
156{ 156{
157 return node->vm_node.start; 157 return node->vm_node.start;
158} 158}
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
new file mode 100644
index 000000000000..dfd54fb94e10
--- /dev/null
+++ b/include/drm/gpu_scheduler.h
@@ -0,0 +1,173 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef _DRM_GPU_SCHEDULER_H_
25#define _DRM_GPU_SCHEDULER_H_
26
27#include <drm/spsc_queue.h>
28#include <linux/dma-fence.h>
29
30struct drm_gpu_scheduler;
31struct drm_sched_rq;
32
33enum drm_sched_priority {
34 DRM_SCHED_PRIORITY_MIN,
35 DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
36 DRM_SCHED_PRIORITY_NORMAL,
37 DRM_SCHED_PRIORITY_HIGH_SW,
38 DRM_SCHED_PRIORITY_HIGH_HW,
39 DRM_SCHED_PRIORITY_KERNEL,
40 DRM_SCHED_PRIORITY_MAX,
41 DRM_SCHED_PRIORITY_INVALID = -1,
42 DRM_SCHED_PRIORITY_UNSET = -2
43};
44
45/**
46 * A scheduler entity is a wrapper around a job queue or a group
47 * of other entities. Entities take turns emitting jobs from their
48 * job queues to corresponding hardware ring based on scheduling
49 * policy.
50*/
51struct drm_sched_entity {
52 struct list_head list;
53 struct drm_sched_rq *rq;
54 spinlock_t rq_lock;
55 struct drm_gpu_scheduler *sched;
56
57 spinlock_t queue_lock;
58 struct spsc_queue job_queue;
59
60 atomic_t fence_seq;
61 uint64_t fence_context;
62
63 struct dma_fence *dependency;
64 struct dma_fence_cb cb;
65 atomic_t *guilty; /* points to ctx's guilty */
66};
67
68/**
69 * Run queue is a set of entities scheduling command submissions for
70 * one specific ring. It implements the scheduling policy that selects
71 * the next entity to emit commands from.
72*/
73struct drm_sched_rq {
74 spinlock_t lock;
75 struct list_head entities;
76 struct drm_sched_entity *current_entity;
77};
78
79struct drm_sched_fence {
80 struct dma_fence scheduled;
81 struct dma_fence finished;
82 struct dma_fence_cb cb;
83 struct dma_fence *parent;
84 struct drm_gpu_scheduler *sched;
85 spinlock_t lock;
86 void *owner;
87};
88
89struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
90
91struct drm_sched_job {
92 struct spsc_node queue_node;
93 struct drm_gpu_scheduler *sched;
94 struct drm_sched_fence *s_fence;
95 struct dma_fence_cb finish_cb;
96 struct work_struct finish_work;
97 struct list_head node;
98 struct delayed_work work_tdr;
99 uint64_t id;
100 atomic_t karma;
101 enum drm_sched_priority s_priority;
102};
103
104static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
105 int threshold)
106{
107 return (s_job && atomic_inc_return(&s_job->karma) > threshold);
108}
109
110/**
111 * Define the backend operations called by the scheduler,
112 * these functions should be implemented in driver side
113*/
114struct drm_sched_backend_ops {
115 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
116 struct drm_sched_entity *s_entity);
117 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
118 void (*timedout_job)(struct drm_sched_job *sched_job);
119 void (*free_job)(struct drm_sched_job *sched_job);
120};
121
122/**
123 * One scheduler is implemented for each hardware ring
124*/
125struct drm_gpu_scheduler {
126 const struct drm_sched_backend_ops *ops;
127 uint32_t hw_submission_limit;
128 long timeout;
129 const char *name;
130 struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
131 wait_queue_head_t wake_up_worker;
132 wait_queue_head_t job_scheduled;
133 atomic_t hw_rq_count;
134 atomic64_t job_id_count;
135 struct task_struct *thread;
136 struct list_head ring_mirror_list;
137 spinlock_t job_list_lock;
138 int hang_limit;
139};
140
141int drm_sched_init(struct drm_gpu_scheduler *sched,
142 const struct drm_sched_backend_ops *ops,
143 uint32_t hw_submission, unsigned hang_limit, long timeout,
144 const char *name);
145void drm_sched_fini(struct drm_gpu_scheduler *sched);
146
147int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
148 struct drm_sched_entity *entity,
149 struct drm_sched_rq *rq,
150 uint32_t jobs, atomic_t *guilty);
151void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
152 struct drm_sched_entity *entity);
153void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
154 struct drm_sched_entity *entity);
155void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
156 struct drm_sched_rq *rq);
157
158struct drm_sched_fence *drm_sched_fence_create(
159 struct drm_sched_entity *s_entity, void *owner);
160void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
161void drm_sched_fence_finished(struct drm_sched_fence *fence);
162int drm_sched_job_init(struct drm_sched_job *job,
163 struct drm_gpu_scheduler *sched,
164 struct drm_sched_entity *entity,
165 void *owner);
166void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
167 struct drm_sched_job *job);
168void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
169bool drm_sched_dependency_optimized(struct dma_fence* fence,
170 struct drm_sched_entity *entity);
171void drm_sched_job_kickout(struct drm_sched_job *s_job);
172
173#endif
diff --git a/include/drm/gpu_scheduler_trace.h b/include/drm/gpu_scheduler_trace.h
new file mode 100644
index 000000000000..0789e8d0a0e1
--- /dev/null
+++ b/include/drm/gpu_scheduler_trace.h
@@ -0,0 +1,82 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
25#define _GPU_SCHED_TRACE_H_
26
27#include <linux/stringify.h>
28#include <linux/types.h>
29#include <linux/tracepoint.h>
30
31#include <drm/drmP.h>
32
33#undef TRACE_SYSTEM
34#define TRACE_SYSTEM gpu_scheduler
35#define TRACE_INCLUDE_FILE gpu_scheduler_trace
36
37TRACE_EVENT(drm_sched_job,
38 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
39 TP_ARGS(sched_job, entity),
40 TP_STRUCT__entry(
41 __field(struct drm_sched_entity *, entity)
42 __field(struct dma_fence *, fence)
43 __field(const char *, name)
44 __field(uint64_t, id)
45 __field(u32, job_count)
46 __field(int, hw_job_count)
47 ),
48
49 TP_fast_assign(
50 __entry->entity = entity;
51 __entry->id = sched_job->id;
52 __entry->fence = &sched_job->s_fence->finished;
53 __entry->name = sched_job->sched->name;
54 __entry->job_count = spsc_queue_count(&entity->job_queue);
55 __entry->hw_job_count = atomic_read(
56 &sched_job->sched->hw_rq_count);
57 ),
58 TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
59 __entry->entity, __entry->id,
60 __entry->fence, __entry->name,
61 __entry->job_count, __entry->hw_job_count)
62);
63
64TRACE_EVENT(drm_sched_process_job,
65 TP_PROTO(struct drm_sched_fence *fence),
66 TP_ARGS(fence),
67 TP_STRUCT__entry(
68 __field(struct dma_fence *, fence)
69 ),
70
71 TP_fast_assign(
72 __entry->fence = &fence->finished;
73 ),
74 TP_printk("fence=%p signaled", __entry->fence)
75);
76
77#endif
78
79/* This part must be outside protection */
80#undef TRACE_INCLUDE_PATH
81#define TRACE_INCLUDE_PATH .
82#include <trace/define_trace.h>
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 4e1b274e1164..c9e5a6621b95 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -36,6 +36,9 @@ extern bool i915_gpu_lower(void);
36extern bool i915_gpu_busy(void); 36extern bool i915_gpu_busy(void);
37extern bool i915_gpu_turbo_disable(void); 37extern bool i915_gpu_turbo_disable(void);
38 38
39/* Exported from arch/x86/kernel/early-quirks.c */
40extern struct resource intel_graphics_stolen_res;
41
39/* 42/*
40 * The Bridge device's PCI config space has information about the 43 * The Bridge device's PCI config space has information about the
41 * fb aperture size and the amount of pre-reserved memory. 44 * fb aperture size and the amount of pre-reserved memory.
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 972a25633525..5db0458dd832 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -373,24 +373,46 @@
373/* CFL S */ 373/* CFL S */
374#define INTEL_CFL_S_GT1_IDS(info) \ 374#define INTEL_CFL_S_GT1_IDS(info) \
375 INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \ 375 INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
376 INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */ 376 INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
377 INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */
377 378
378#define INTEL_CFL_S_GT2_IDS(info) \ 379#define INTEL_CFL_S_GT2_IDS(info) \
379 INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ 380 INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
380 INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ 381 INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
381 INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */ 382 INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
383 INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
382 384
383/* CFL H */ 385/* CFL H */
384#define INTEL_CFL_H_GT2_IDS(info) \ 386#define INTEL_CFL_H_GT2_IDS(info) \
385 INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \ 387 INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
386 INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */ 388 INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
387 389
388/* CFL U */ 390/* CFL U GT1 */
391#define INTEL_CFL_U_GT1_IDS(info) \
392 INTEL_VGA_DEVICE(0x3EA1, info), \
393 INTEL_VGA_DEVICE(0x3EA4, info)
394
395/* CFL U GT2 */
396#define INTEL_CFL_U_GT2_IDS(info) \
397 INTEL_VGA_DEVICE(0x3EA0, info), \
398 INTEL_VGA_DEVICE(0x3EA3, info), \
399 INTEL_VGA_DEVICE(0x3EA9, info)
400
401/* CFL U GT3 */
389#define INTEL_CFL_U_GT3_IDS(info) \ 402#define INTEL_CFL_U_GT3_IDS(info) \
403 INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \
404 INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
390 INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ 405 INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
391 INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ 406 INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
392 INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \ 407 INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
393 INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */ 408
409#define INTEL_CFL_IDS(info) \
410 INTEL_CFL_S_GT1_IDS(info), \
411 INTEL_CFL_S_GT2_IDS(info), \
412 INTEL_CFL_H_GT2_IDS(info), \
413 INTEL_CFL_U_GT1_IDS(info), \
414 INTEL_CFL_U_GT2_IDS(info), \
415 INTEL_CFL_U_GT3_IDS(info)
394 416
395/* CNL U 2+2 */ 417/* CNL U 2+2 */
396#define INTEL_CNL_U_GT2_IDS(info) \ 418#define INTEL_CNL_U_GT2_IDS(info) \
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index c5db7975c640..2324c84a25c0 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -5,9 +5,8 @@
5#define _DRM_INTEL_GTT_H 5#define _DRM_INTEL_GTT_H
6 6
7void intel_gtt_get(u64 *gtt_total, 7void intel_gtt_get(u64 *gtt_total,
8 u32 *stolen_size,
9 phys_addr_t *mappable_base, 8 phys_addr_t *mappable_base,
10 u64 *mappable_end); 9 resource_size_t *mappable_end);
11 10
12int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, 11int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
13 struct agp_bridge_data *bridge); 12 struct agp_bridge_data *bridge);
diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h
new file mode 100644
index 000000000000..125f096c88cb
--- /dev/null
+++ b/include/drm/spsc_queue.h
@@ -0,0 +1,122 @@
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
25#define DRM_SCHEDULER_SPSC_QUEUE_H_
26
27#include <linux/atomic.h>
28#include <linux/preempt.h>
29
30/** SPSC lockless queue */
31
32struct spsc_node {
33
34 /* Stores spsc_node* */
35 struct spsc_node *next;
36};
37
38struct spsc_queue {
39
40 struct spsc_node *head;
41
42 /* atomic pointer to struct spsc_node* */
43 atomic_long_t tail;
44
45 atomic_t job_count;
46};
47
48static inline void spsc_queue_init(struct spsc_queue *queue)
49{
50 queue->head = NULL;
51 atomic_long_set(&queue->tail, (long)&queue->head);
52 atomic_set(&queue->job_count, 0);
53}
54
55static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
56{
57 return queue->head;
58}
59
60static inline int spsc_queue_count(struct spsc_queue *queue)
61{
62 return atomic_read(&queue->job_count);
63}
64
65static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
66{
67 struct spsc_node **tail;
68
69 node->next = NULL;
70
71 preempt_disable();
72
73 tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
74 WRITE_ONCE(*tail, node);
75 atomic_inc(&queue->job_count);
76
77 /*
78 * In case of first element verify new node will be visible to the consumer
79 * thread when we ping the kernel thread that there is new work to do.
80 */
81 smp_wmb();
82
83 preempt_enable();
84
85 return tail == &queue->head;
86}
87
88
89static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
90{
91 struct spsc_node *next, *node;
92
93 /* Verify reading from memory and not the cache */
94 smp_rmb();
95
96 node = READ_ONCE(queue->head);
97
98 if (!node)
99 return NULL;
100
101 next = READ_ONCE(node->next);
102 WRITE_ONCE(queue->head, next);
103
104 if (unlikely(!next)) {
105 /* slowpath for the last element in the queue */
106
107 if (atomic_long_cmpxchg(&queue->tail,
108 (long)&node->next, (long) &queue->head) != (long)&node->next) {
109 /* Updating tail failed wait for new next to appear */
110 do {
111 smp_rmb();
112 } while (unlikely(!(queue->head = READ_ONCE(node->next))));
113 }
114 }
115
116 atomic_dec(&queue->job_count);
117 return node;
118}
119
120
121
122#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
index 83346ddb9dba..5d0e82b36eaf 100644
--- a/include/drm/tinydrm/mipi-dbi.h
+++ b/include/drm/tinydrm/mipi-dbi.h
@@ -72,10 +72,12 @@ void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe,
72void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); 72void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
73void mipi_dbi_hw_reset(struct mipi_dbi *mipi); 73void mipi_dbi_hw_reset(struct mipi_dbi *mipi);
74bool mipi_dbi_display_is_on(struct mipi_dbi *mipi); 74bool mipi_dbi_display_is_on(struct mipi_dbi *mipi);
75u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
75 76
76int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val); 77int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
77int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); 78int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
78 79int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
80 struct drm_clip_rect *clip, bool swap);
79/** 81/**
80 * mipi_dbi_command - MIPI DCS command with optional parameter(s) 82 * mipi_dbi_command - MIPI DCS command with optional parameter(s)
81 * @mipi: MIPI structure 83 * @mipi: MIPI structure
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 4774fe3d4273..07a9a11fe19d 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -19,16 +19,12 @@
19 * @drm: DRM device 19 * @drm: DRM device
20 * @pipe: Display pipe structure 20 * @pipe: Display pipe structure
21 * @dirty_lock: Serializes framebuffer flushing 21 * @dirty_lock: Serializes framebuffer flushing
22 * @fbdev_cma: CMA fbdev structure
23 * @suspend_state: Atomic state when suspended
24 * @fb_funcs: Framebuffer functions used when creating framebuffers 22 * @fb_funcs: Framebuffer functions used when creating framebuffers
25 */ 23 */
26struct tinydrm_device { 24struct tinydrm_device {
27 struct drm_device *drm; 25 struct drm_device *drm;
28 struct drm_simple_display_pipe pipe; 26 struct drm_simple_display_pipe pipe;
29 struct mutex dirty_lock; 27 struct mutex dirty_lock;
30 struct drm_fbdev_cma *fbdev_cma;
31 struct drm_atomic_state *suspend_state;
32 const struct drm_framebuffer_funcs *fb_funcs; 28 const struct drm_framebuffer_funcs *fb_funcs;
33}; 29};
34 30
@@ -46,6 +42,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
46 */ 42 */
47#define TINYDRM_GEM_DRIVER_OPS \ 43#define TINYDRM_GEM_DRIVER_OPS \
48 .gem_free_object = tinydrm_gem_cma_free_object, \ 44 .gem_free_object = tinydrm_gem_cma_free_object, \
45 .gem_print_info = drm_gem_cma_print_info, \
49 .gem_vm_ops = &drm_gem_cma_vm_ops, \ 46 .gem_vm_ops = &drm_gem_cma_vm_ops, \
50 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ 47 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
51 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ 48 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
@@ -81,7 +78,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
81 .type = DRM_MODE_TYPE_DRIVER, \ 78 .type = DRM_MODE_TYPE_DRIVER, \
82 .clock = 1 /* pass validation */ 79 .clock = 1 /* pass validation */
83 80
84void tinydrm_lastclose(struct drm_device *drm);
85void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj); 81void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj);
86struct drm_gem_object * 82struct drm_gem_object *
87tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, 83tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
@@ -92,8 +88,6 @@ int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
92 struct drm_driver *driver); 88 struct drm_driver *driver);
93int devm_tinydrm_register(struct tinydrm_device *tdev); 89int devm_tinydrm_register(struct tinydrm_device *tdev);
94void tinydrm_shutdown(struct tinydrm_device *tdev); 90void tinydrm_shutdown(struct tinydrm_device *tdev);
95int tinydrm_suspend(struct tinydrm_device *tdev);
96int tinydrm_resume(struct tinydrm_device *tdev);
97 91
98void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe, 92void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe,
99 struct drm_plane_state *old_state); 93 struct drm_plane_state *old_state);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index fa07be197945..2cd025c2abe7 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -224,7 +224,6 @@ struct ttm_buffer_object {
224 */ 224 */
225 225
226 uint64_t offset; /* GPU address space is independent of CPU word size */ 226 uint64_t offset; /* GPU address space is independent of CPU word size */
227 uint32_t cur_placement;
228 227
229 struct sg_table *sg; 228 struct sg_table *sg;
230 229
@@ -260,6 +259,25 @@ struct ttm_bo_kmap_obj {
260}; 259};
261 260
262/** 261/**
262 * struct ttm_operation_ctx
263 *
264 * @interruptible: Sleep interruptible if sleeping.
265 * @no_wait_gpu: Return immediately if the GPU is busy.
266 * @allow_reserved_eviction: Allow eviction of reserved BOs.
267 * @resv: Reservation object to allow reserved evictions with.
268 *
269 * Context for TTM operations like changing buffer placement or general memory
270 * allocation.
271 */
272struct ttm_operation_ctx {
273 bool interruptible;
274 bool no_wait_gpu;
275 bool allow_reserved_eviction;
276 struct reservation_object *resv;
277 uint64_t bytes_moved;
278};
279
280/**
263 * ttm_bo_reference - reference a struct ttm_buffer_object 281 * ttm_bo_reference - reference a struct ttm_buffer_object
264 * 282 *
265 * @bo: The buffer object. 283 * @bo: The buffer object.
@@ -288,8 +306,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
288 * Returns -EBUSY if no_wait is true and the buffer is busy. 306 * Returns -EBUSY if no_wait is true and the buffer is busy.
289 * Returns -ERESTARTSYS if interrupted by a signal. 307 * Returns -ERESTARTSYS if interrupted by a signal.
290 */ 308 */
291extern int ttm_bo_wait(struct ttm_buffer_object *bo, 309int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
292 bool interruptible, bool no_wait);
293 310
294/** 311/**
295 * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo 312 * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
@@ -300,17 +317,15 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo,
300 * 317 *
301 * Returns true if the placement is compatible 318 * Returns true if the placement is compatible
302 */ 319 */
303extern bool ttm_bo_mem_compat(struct ttm_placement *placement, 320bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
304 struct ttm_mem_reg *mem, 321 uint32_t *new_flags);
305 uint32_t *new_flags);
306 322
307/** 323/**
308 * ttm_bo_validate 324 * ttm_bo_validate
309 * 325 *
310 * @bo: The buffer object. 326 * @bo: The buffer object.
311 * @placement: Proposed placement for the buffer object. 327 * @placement: Proposed placement for the buffer object.
312 * @interruptible: Sleep interruptible if sleeping. 328 * @ctx: validation parameters.
313 * @no_wait_gpu: Return immediately if the GPU is busy.
314 * 329 *
315 * Changes placement and caching policy of the buffer object 330 * Changes placement and caching policy of the buffer object
316 * according proposed placement. 331 * according proposed placement.
@@ -320,10 +335,9 @@ extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
320 * -EBUSY if no_wait is true and buffer busy. 335 * -EBUSY if no_wait is true and buffer busy.
321 * -ERESTARTSYS if interrupted by a signal. 336 * -ERESTARTSYS if interrupted by a signal.
322 */ 337 */
323extern int ttm_bo_validate(struct ttm_buffer_object *bo, 338int ttm_bo_validate(struct ttm_buffer_object *bo,
324 struct ttm_placement *placement, 339 struct ttm_placement *placement,
325 bool interruptible, 340 struct ttm_operation_ctx *ctx);
326 bool no_wait_gpu);
327 341
328/** 342/**
329 * ttm_bo_unref 343 * ttm_bo_unref
@@ -332,7 +346,7 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
332 * 346 *
333 * Unreference and clear a pointer to a buffer object. 347 * Unreference and clear a pointer to a buffer object.
334 */ 348 */
335extern void ttm_bo_unref(struct ttm_buffer_object **bo); 349void ttm_bo_unref(struct ttm_buffer_object **bo);
336 350
337/** 351/**
338 * ttm_bo_add_to_lru 352 * ttm_bo_add_to_lru
@@ -344,7 +358,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
344 * This function must be called with struct ttm_bo_global::lru_lock held, and 358 * This function must be called with struct ttm_bo_global::lru_lock held, and
345 * is typically called immediately prior to unreserving a bo. 359 * is typically called immediately prior to unreserving a bo.
346 */ 360 */
347extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); 361void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
348 362
349/** 363/**
350 * ttm_bo_del_from_lru 364 * ttm_bo_del_from_lru
@@ -356,7 +370,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
356 * and is usually called just immediately after the bo has been reserved to 370 * and is usually called just immediately after the bo has been reserved to
357 * avoid recursive reservation from lru lists. 371 * avoid recursive reservation from lru lists.
358 */ 372 */
359extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); 373void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
360 374
361/** 375/**
362 * ttm_bo_move_to_lru_tail 376 * ttm_bo_move_to_lru_tail
@@ -367,7 +381,7 @@ extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
367 * object. This function must be called with struct ttm_bo_global::lru_lock 381 * object. This function must be called with struct ttm_bo_global::lru_lock
368 * held, and is used to make a BO less likely to be considered for eviction. 382 * held, and is used to make a BO less likely to be considered for eviction.
369 */ 383 */
370extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); 384void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
371 385
372/** 386/**
373 * ttm_bo_lock_delayed_workqueue 387 * ttm_bo_lock_delayed_workqueue
@@ -376,15 +390,14 @@ extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
376 * Returns 390 * Returns
377 * True if the workqueue was queued at the time 391 * True if the workqueue was queued at the time
378 */ 392 */
379extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); 393int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
380 394
381/** 395/**
382 * ttm_bo_unlock_delayed_workqueue 396 * ttm_bo_unlock_delayed_workqueue
383 * 397 *
384 * Allows the delayed workqueue to run. 398 * Allows the delayed workqueue to run.
385 */ 399 */
386extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, 400void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
387 int resched);
388 401
389/** 402/**
390 * ttm_bo_eviction_valuable 403 * ttm_bo_eviction_valuable
@@ -411,8 +424,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
411 * -EBUSY if the buffer is busy and no_wait is true. 424 * -EBUSY if the buffer is busy and no_wait is true.
412 * -ERESTARTSYS if interrupted by a signal. 425 * -ERESTARTSYS if interrupted by a signal.
413 */ 426 */
414extern int 427int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
415ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
416 428
417/** 429/**
418 * ttm_bo_synccpu_write_release: 430 * ttm_bo_synccpu_write_release:
@@ -421,7 +433,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
421 * 433 *
422 * Releases a synccpu lock. 434 * Releases a synccpu lock.
423 */ 435 */
424extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); 436void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
425 437
426/** 438/**
427 * ttm_bo_acc_size 439 * ttm_bo_acc_size
@@ -448,8 +460,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
448 * @type: Requested type of buffer object. 460 * @type: Requested type of buffer object.
449 * @flags: Initial placement flags. 461 * @flags: Initial placement flags.
450 * @page_alignment: Data alignment in pages. 462 * @page_alignment: Data alignment in pages.
451 * @interruptible: If needing to sleep to wait for GPU resources, 463 * @ctx: TTM operation context for memory allocation.
452 * sleep interruptible.
453 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 464 * @persistent_swap_storage: Usually the swap storage is deleted for buffers
454 * pinned in physical memory. If this behaviour is not desired, this member 465 * pinned in physical memory. If this behaviour is not desired, this member
455 * holds a pointer to a persistent shmem object. Typically, this would 466 * holds a pointer to a persistent shmem object. Typically, this would
@@ -480,18 +491,18 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
480 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. 491 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
481 */ 492 */
482 493
483extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev, 494int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
484 struct ttm_buffer_object *bo, 495 struct ttm_buffer_object *bo,
485 unsigned long size, 496 unsigned long size,
486 enum ttm_bo_type type, 497 enum ttm_bo_type type,
487 struct ttm_placement *placement, 498 struct ttm_placement *placement,
488 uint32_t page_alignment, 499 uint32_t page_alignment,
489 bool interrubtible, 500 struct ttm_operation_ctx *ctx,
490 struct file *persistent_swap_storage, 501 struct file *persistent_swap_storage,
491 size_t acc_size, 502 size_t acc_size,
492 struct sg_table *sg, 503 struct sg_table *sg,
493 struct reservation_object *resv, 504 struct reservation_object *resv,
494 void (*destroy) (struct ttm_buffer_object *)); 505 void (*destroy) (struct ttm_buffer_object *));
495 506
496/** 507/**
497 * ttm_bo_init 508 * ttm_bo_init
@@ -531,19 +542,13 @@ extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
531 * -EINVAL: Invalid placement flags. 542 * -EINVAL: Invalid placement flags.
532 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. 543 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
533 */ 544 */
534 545int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
535extern int ttm_bo_init(struct ttm_bo_device *bdev, 546 unsigned long size, enum ttm_bo_type type,
536 struct ttm_buffer_object *bo, 547 struct ttm_placement *placement,
537 unsigned long size, 548 uint32_t page_alignment, bool interrubtible,
538 enum ttm_bo_type type, 549 struct file *persistent_swap_storage, size_t acc_size,
539 struct ttm_placement *placement, 550 struct sg_table *sg, struct reservation_object *resv,
540 uint32_t page_alignment, 551 void (*destroy) (struct ttm_buffer_object *));
541 bool interrubtible,
542 struct file *persistent_swap_storage,
543 size_t acc_size,
544 struct sg_table *sg,
545 struct reservation_object *resv,
546 void (*destroy) (struct ttm_buffer_object *));
547 552
548/** 553/**
549 * ttm_bo_create 554 * ttm_bo_create
@@ -569,15 +574,11 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
569 * -EINVAL: Invalid placement flags. 574 * -EINVAL: Invalid placement flags.
570 * -ERESTARTSYS: Interrupted by signal while waiting for resources. 575 * -ERESTARTSYS: Interrupted by signal while waiting for resources.
571 */ 576 */
572 577int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
573extern int ttm_bo_create(struct ttm_bo_device *bdev, 578 enum ttm_bo_type type, struct ttm_placement *placement,
574 unsigned long size, 579 uint32_t page_alignment, bool interruptible,
575 enum ttm_bo_type type, 580 struct file *persistent_swap_storage,
576 struct ttm_placement *placement, 581 struct ttm_buffer_object **p_bo);
577 uint32_t page_alignment,
578 bool interruptible,
579 struct file *persistent_swap_storage,
580 struct ttm_buffer_object **p_bo);
581 582
582/** 583/**
583 * ttm_bo_init_mm 584 * ttm_bo_init_mm
@@ -594,9 +595,9 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
594 * -ENOMEM: Not enough memory. 595 * -ENOMEM: Not enough memory.
595 * May also return driver-specified errors. 596 * May also return driver-specified errors.
596 */ 597 */
598int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
599 unsigned long p_size);
597 600
598extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
599 unsigned long p_size);
600/** 601/**
601 * ttm_bo_clean_mm 602 * ttm_bo_clean_mm
602 * 603 *
@@ -623,8 +624,7 @@ extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
623 * -EINVAL: invalid or uninitialized memory type. 624 * -EINVAL: invalid or uninitialized memory type.
624 * -EBUSY: There are still buffers left in this memory type. 625 * -EBUSY: There are still buffers left in this memory type.
625 */ 626 */
626 627int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
627extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
628 628
629/** 629/**
630 * ttm_bo_evict_mm 630 * ttm_bo_evict_mm
@@ -644,8 +644,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
644 * -ERESTARTSYS: The call was interrupted by a signal while waiting to 644 * -ERESTARTSYS: The call was interrupted by a signal while waiting to
645 * evict a buffer. 645 * evict a buffer.
646 */ 646 */
647 647int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
648extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
649 648
650/** 649/**
651 * ttm_kmap_obj_virtual 650 * ttm_kmap_obj_virtual
@@ -658,7 +657,6 @@ extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
658 * If *is_iomem is 1 on return, the virtual address points to an io memory area, 657 * If *is_iomem is 1 on return, the virtual address points to an io memory area,
659 * that should strictly be accessed by the iowriteXX() and similar functions. 658 * that should strictly be accessed by the iowriteXX() and similar functions.
660 */ 659 */
661
662static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, 660static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
663 bool *is_iomem) 661 bool *is_iomem)
664{ 662{
@@ -682,9 +680,8 @@ static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
682 * -ENOMEM: Out of memory. 680 * -ENOMEM: Out of memory.
683 * -EINVAL: Invalid range. 681 * -EINVAL: Invalid range.
684 */ 682 */
685 683int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
686extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, 684 unsigned long num_pages, struct ttm_bo_kmap_obj *map);
687 unsigned long num_pages, struct ttm_bo_kmap_obj *map);
688 685
689/** 686/**
690 * ttm_bo_kunmap 687 * ttm_bo_kunmap
@@ -693,8 +690,7 @@ extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
693 * 690 *
694 * Unmaps a kernel map set up by ttm_bo_kmap. 691 * Unmaps a kernel map set up by ttm_bo_kmap.
695 */ 692 */
696 693void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
697extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
698 694
699/** 695/**
700 * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. 696 * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
@@ -706,20 +702,7 @@ extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
706 * This function is intended to be called by the fbdev mmap method 702 * This function is intended to be called by the fbdev mmap method
707 * if the fbdev address space is to be backed by a bo. 703 * if the fbdev address space is to be backed by a bo.
708 */ 704 */
709 705int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
710extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
711 struct ttm_buffer_object *bo);
712
713/**
714 * ttm_bo_default_iomem_pfn - get a pfn for a page offset
715 *
716 * @bo: the BO we need to look up the pfn for
717 * @page_offset: offset inside the BO to look up.
718 *
719 * Calculate the PFN for iomem based mappings during page fault
720 */
721unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
722 unsigned long page_offset);
723 706
724/** 707/**
725 * ttm_bo_mmap - mmap out of the ttm device address space. 708 * ttm_bo_mmap - mmap out of the ttm device address space.
@@ -731,9 +714,8 @@ unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
731 * This function is intended to be called by the device mmap method. 714 * This function is intended to be called by the device mmap method.
732 * if the device address space is to be backed by the bo manager. 715 * if the device address space is to be backed by the bo manager.
733 */ 716 */
734 717int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
735extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, 718 struct ttm_bo_device *bdev);
736 struct ttm_bo_device *bdev);
737 719
738/** 720/**
739 * ttm_bo_io 721 * ttm_bo_io
@@ -755,11 +737,12 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
755 * the function may return -ERESTARTSYS if 737 * the function may return -ERESTARTSYS if
756 * interrupted by a signal. 738 * interrupted by a signal.
757 */ 739 */
740ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
741 const char __user *wbuf, char __user *rbuf,
742 size_t count, loff_t *f_pos, bool write);
758 743
759extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, 744int ttm_bo_swapout(struct ttm_bo_global *glob,
760 const char __user *wbuf, char __user *rbuf, 745 struct ttm_operation_ctx *ctx);
761 size_t count, loff_t *f_pos, bool write); 746void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
762 747int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
763extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
764extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
765#endif 748#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 5f821a9b3a1f..94064b126e8e 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -352,7 +352,8 @@ struct ttm_bo_driver {
352 * Returns: 352 * Returns:
353 * -ENOMEM: Out of memory. 353 * -ENOMEM: Out of memory.
354 */ 354 */
355 int (*ttm_tt_populate)(struct ttm_tt *ttm); 355 int (*ttm_tt_populate)(struct ttm_tt *ttm,
356 struct ttm_operation_ctx *ctx);
356 357
357 /** 358 /**
358 * ttm_tt_unpopulate 359 * ttm_tt_unpopulate
@@ -409,15 +410,13 @@ struct ttm_bo_driver {
409 * @bo: the buffer to move 410 * @bo: the buffer to move
410 * @evict: whether this motion is evicting the buffer from 411 * @evict: whether this motion is evicting the buffer from
411 * the graphics address space 412 * the graphics address space
412 * @interruptible: Use interruptible sleeps if possible when sleeping. 413 * @ctx: context for this move with parameters
413 * @no_wait: whether this should give up and return -EBUSY
414 * if this move would require sleeping
415 * @new_mem: the new memory region receiving the buffer 414 * @new_mem: the new memory region receiving the buffer
416 * 415 *
417 * Move a buffer between two memory regions. 416 * Move a buffer between two memory regions.
418 */ 417 */
419 int (*move)(struct ttm_buffer_object *bo, bool evict, 418 int (*move)(struct ttm_buffer_object *bo, bool evict,
420 bool interruptible, bool no_wait_gpu, 419 struct ttm_operation_ctx *ctx,
421 struct ttm_mem_reg *new_mem); 420 struct ttm_mem_reg *new_mem);
422 421
423 /** 422 /**
@@ -524,7 +523,6 @@ struct ttm_bo_global {
524 struct kobject kobj; 523 struct kobject kobj;
525 struct ttm_mem_global *mem_glob; 524 struct ttm_mem_global *mem_glob;
526 struct page *dummy_read_page; 525 struct page *dummy_read_page;
527 struct ttm_mem_shrink shrink;
528 struct mutex device_list_mutex; 526 struct mutex device_list_mutex;
529 spinlock_t lru_lock; 527 spinlock_t lru_lock;
530 528
@@ -627,12 +625,12 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
627 * Returns: 625 * Returns:
628 * NULL: Out of memory. 626 * NULL: Out of memory.
629 */ 627 */
630extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, 628int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
631 unsigned long size, uint32_t page_flags, 629 unsigned long size, uint32_t page_flags,
632 struct page *dummy_read_page); 630 struct page *dummy_read_page);
633extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, 631int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
634 unsigned long size, uint32_t page_flags, 632 unsigned long size, uint32_t page_flags,
635 struct page *dummy_read_page); 633 struct page *dummy_read_page);
636 634
637/** 635/**
638 * ttm_tt_fini 636 * ttm_tt_fini
@@ -641,8 +639,8 @@ extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bde
641 * 639 *
642 * Free memory of ttm_tt structure 640 * Free memory of ttm_tt structure
643 */ 641 */
644extern void ttm_tt_fini(struct ttm_tt *ttm); 642void ttm_tt_fini(struct ttm_tt *ttm);
645extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); 643void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
646 644
647/** 645/**
648 * ttm_ttm_bind: 646 * ttm_ttm_bind:
@@ -652,7 +650,8 @@ extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
652 * 650 *
653 * Bind the pages of @ttm to an aperture location identified by @bo_mem 651 * Bind the pages of @ttm to an aperture location identified by @bo_mem
654 */ 652 */
655extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 653int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
654 struct ttm_operation_ctx *ctx);
656 655
657/** 656/**
658 * ttm_ttm_destroy: 657 * ttm_ttm_destroy:
@@ -661,7 +660,7 @@ extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
661 * 660 *
662 * Unbind, unpopulate and destroy common struct ttm_tt. 661 * Unbind, unpopulate and destroy common struct ttm_tt.
663 */ 662 */
664extern void ttm_tt_destroy(struct ttm_tt *ttm); 663void ttm_tt_destroy(struct ttm_tt *ttm);
665 664
666/** 665/**
667 * ttm_ttm_unbind: 666 * ttm_ttm_unbind:
@@ -670,7 +669,7 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm);
670 * 669 *
671 * Unbind a struct ttm_tt. 670 * Unbind a struct ttm_tt.
672 */ 671 */
673extern void ttm_tt_unbind(struct ttm_tt *ttm); 672void ttm_tt_unbind(struct ttm_tt *ttm);
674 673
675/** 674/**
676 * ttm_tt_swapin: 675 * ttm_tt_swapin:
@@ -679,7 +678,7 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm);
679 * 678 *
680 * Swap in a previously swap out ttm_tt. 679 * Swap in a previously swap out ttm_tt.
681 */ 680 */
682extern int ttm_tt_swapin(struct ttm_tt *ttm); 681int ttm_tt_swapin(struct ttm_tt *ttm);
683 682
684/** 683/**
685 * ttm_tt_set_placement_caching: 684 * ttm_tt_set_placement_caching:
@@ -694,9 +693,8 @@ extern int ttm_tt_swapin(struct ttm_tt *ttm);
694 * hit RAM. This function may be very costly as it involves global TLB 693 * hit RAM. This function may be very costly as it involves global TLB
695 * and cache flushes and potential page splitting / combining. 694 * and cache flushes and potential page splitting / combining.
696 */ 695 */
697extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); 696int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
698extern int ttm_tt_swapout(struct ttm_tt *ttm, 697int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage);
699 struct file *persistent_swap_storage);
700 698
701/** 699/**
702 * ttm_tt_unpopulate - free pages from a ttm 700 * ttm_tt_unpopulate - free pages from a ttm
@@ -705,7 +703,7 @@ extern int ttm_tt_swapout(struct ttm_tt *ttm,
705 * 703 *
706 * Calls the driver method to free all pages from a ttm 704 * Calls the driver method to free all pages from a ttm
707 */ 705 */
708extern void ttm_tt_unpopulate(struct ttm_tt *ttm); 706void ttm_tt_unpopulate(struct ttm_tt *ttm);
709 707
710/* 708/*
711 * ttm_bo.c 709 * ttm_bo.c
@@ -720,8 +718,7 @@ extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
720 * Returns true if the memory described by @mem is PCI memory, 718 * Returns true if the memory described by @mem is PCI memory,
721 * false otherwise. 719 * false otherwise.
722 */ 720 */
723extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, 721bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
724 struct ttm_mem_reg *mem);
725 722
726/** 723/**
727 * ttm_bo_mem_space 724 * ttm_bo_mem_space
@@ -742,21 +739,19 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
742 * fragmentation or concurrent allocators. 739 * fragmentation or concurrent allocators.
743 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. 740 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
744 */ 741 */
745extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 742int ttm_bo_mem_space(struct ttm_buffer_object *bo,
746 struct ttm_placement *placement, 743 struct ttm_placement *placement,
747 struct ttm_mem_reg *mem, 744 struct ttm_mem_reg *mem,
748 bool interruptible, 745 struct ttm_operation_ctx *ctx);
749 bool no_wait_gpu);
750 746
751extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, 747void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
748void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
752 struct ttm_mem_reg *mem); 749 struct ttm_mem_reg *mem);
753extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
754 struct ttm_mem_reg *mem);
755 750
756extern void ttm_bo_global_release(struct drm_global_reference *ref); 751void ttm_bo_global_release(struct drm_global_reference *ref);
757extern int ttm_bo_global_init(struct drm_global_reference *ref); 752int ttm_bo_global_init(struct drm_global_reference *ref);
758 753
759extern int ttm_bo_device_release(struct ttm_bo_device *bdev); 754int ttm_bo_device_release(struct ttm_bo_device *bdev);
760 755
761/** 756/**
762 * ttm_bo_device_init 757 * ttm_bo_device_init
@@ -773,18 +768,17 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
773 * Returns: 768 * Returns:
774 * !0: Failure. 769 * !0: Failure.
775 */ 770 */
776extern int ttm_bo_device_init(struct ttm_bo_device *bdev, 771int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
777 struct ttm_bo_global *glob, 772 struct ttm_bo_driver *driver,
778 struct ttm_bo_driver *driver, 773 struct address_space *mapping,
779 struct address_space *mapping, 774 uint64_t file_page_offset, bool need_dma32);
780 uint64_t file_page_offset, bool need_dma32);
781 775
782/** 776/**
783 * ttm_bo_unmap_virtual 777 * ttm_bo_unmap_virtual
784 * 778 *
785 * @bo: tear down the virtual mappings for this BO 779 * @bo: tear down the virtual mappings for this BO
786 */ 780 */
787extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 781void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
788 782
789/** 783/**
790 * ttm_bo_unmap_virtual 784 * ttm_bo_unmap_virtual
@@ -793,16 +787,15 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
793 * 787 *
794 * The caller must take ttm_mem_io_lock before calling this function. 788 * The caller must take ttm_mem_io_lock before calling this function.
795 */ 789 */
796extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); 790void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
797 791
798extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); 792int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
799extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); 793void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
800extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, 794int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
801 bool interruptible); 795void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
802extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
803 796
804extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); 797void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
805extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); 798void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
806 799
807/** 800/**
808 * __ttm_bo_reserve: 801 * __ttm_bo_reserve:
@@ -836,14 +829,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
836 if (WARN_ON(ticket)) 829 if (WARN_ON(ticket))
837 return -EBUSY; 830 return -EBUSY;
838 831
839 success = ww_mutex_trylock(&bo->resv->lock); 832 success = reservation_object_trylock(bo->resv);
840 return success ? 0 : -EBUSY; 833 return success ? 0 : -EBUSY;
841 } 834 }
842 835
843 if (interruptible) 836 if (interruptible)
844 ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); 837 ret = reservation_object_lock_interruptible(bo->resv, ticket);
845 else 838 else
846 ret = ww_mutex_lock(&bo->resv->lock, ticket); 839 ret = reservation_object_lock(bo->resv, ticket);
847 if (ret == -EINTR) 840 if (ret == -EINTR)
848 return -ERESTARTSYS; 841 return -ERESTARTSYS;
849 return ret; 842 return ret;
@@ -941,18 +934,6 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
941} 934}
942 935
943/** 936/**
944 * __ttm_bo_unreserve
945 * @bo: A pointer to a struct ttm_buffer_object.
946 *
947 * Unreserve a previous reservation of @bo where the buffer object is
948 * already on lru lists.
949 */
950static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
951{
952 ww_mutex_unlock(&bo->resv->lock);
953}
954
955/**
956 * ttm_bo_unreserve 937 * ttm_bo_unreserve
957 * 938 *
958 * @bo: A pointer to a struct ttm_buffer_object. 939 * @bo: A pointer to a struct ttm_buffer_object.
@@ -966,20 +947,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
966 ttm_bo_add_to_lru(bo); 947 ttm_bo_add_to_lru(bo);
967 spin_unlock(&bo->glob->lru_lock); 948 spin_unlock(&bo->glob->lru_lock);
968 } 949 }
969 __ttm_bo_unreserve(bo); 950 reservation_object_unlock(bo->resv);
970}
971
972/**
973 * ttm_bo_unreserve_ticket
974 * @bo: A pointer to a struct ttm_buffer_object.
975 * @ticket: ww_acquire_ctx used for reserving
976 *
977 * Unreserve a previous reservation of @bo made with @ticket.
978 */
979static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
980 struct ww_acquire_ctx *t)
981{
982 ttm_bo_unreserve(bo);
983} 951}
984 952
985/* 953/*
@@ -1008,9 +976,9 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
1008 * !0: Failure. 976 * !0: Failure.
1009 */ 977 */
1010 978
1011extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 979int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
1012 bool interruptible, bool no_wait_gpu, 980 struct ttm_operation_ctx *ctx,
1013 struct ttm_mem_reg *new_mem); 981 struct ttm_mem_reg *new_mem);
1014 982
1015/** 983/**
1016 * ttm_bo_move_memcpy 984 * ttm_bo_move_memcpy
@@ -1030,9 +998,9 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
1030 * !0: Failure. 998 * !0: Failure.
1031 */ 999 */
1032 1000
1033extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 1001int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
1034 bool interruptible, bool no_wait_gpu, 1002 struct ttm_operation_ctx *ctx,
1035 struct ttm_mem_reg *new_mem); 1003 struct ttm_mem_reg *new_mem);
1036 1004
1037/** 1005/**
1038 * ttm_bo_free_old_node 1006 * ttm_bo_free_old_node
@@ -1041,7 +1009,7 @@ extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
1041 * 1009 *
1042 * Utility function to free an old placement after a successful move. 1010 * Utility function to free an old placement after a successful move.
1043 */ 1011 */
1044extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); 1012void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
1045 1013
1046/** 1014/**
1047 * ttm_bo_move_accel_cleanup. 1015 * ttm_bo_move_accel_cleanup.
@@ -1058,10 +1026,9 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
1058 * destroyed when the move is complete. This will help pipeline 1026 * destroyed when the move is complete. This will help pipeline
1059 * buffer moves. 1027 * buffer moves.
1060 */ 1028 */
1061 1029int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
1062extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 1030 struct dma_fence *fence, bool evict,
1063 struct dma_fence *fence, bool evict, 1031 struct ttm_mem_reg *new_mem);
1064 struct ttm_mem_reg *new_mem);
1065 1032
1066/** 1033/**
1067 * ttm_bo_pipeline_move. 1034 * ttm_bo_pipeline_move.
@@ -1087,7 +1054,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
1087 * Utility function that returns the pgprot_t that should be used for 1054 * Utility function that returns the pgprot_t that should be used for
1088 * setting up a PTE with the caching model indicated by @c_state. 1055 * setting up a PTE with the caching model indicated by @c_state.
1089 */ 1056 */
1090extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); 1057pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
1091 1058
1092extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; 1059extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1093 1060
@@ -1108,11 +1075,11 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1108 * for TT memory. This function uses the linux agpgart interface to 1075 * for TT memory. This function uses the linux agpgart interface to
1109 * bind and unbind memory backing a ttm_tt. 1076 * bind and unbind memory backing a ttm_tt.
1110 */ 1077 */
1111extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, 1078struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1112 struct agp_bridge_data *bridge, 1079 struct agp_bridge_data *bridge,
1113 unsigned long size, uint32_t page_flags, 1080 unsigned long size, uint32_t page_flags,
1114 struct page *dummy_read_page); 1081 struct page *dummy_read_page);
1115int ttm_agp_tt_populate(struct ttm_tt *ttm); 1082int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
1116void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); 1083void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1117#endif 1084#endif
1118 1085
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 2c1e3598effe..8936285b6543 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -35,20 +35,7 @@
35#include <linux/errno.h> 35#include <linux/errno.h>
36#include <linux/kobject.h> 36#include <linux/kobject.h>
37#include <linux/mm.h> 37#include <linux/mm.h>
38 38#include "ttm_bo_api.h"
39/**
40 * struct ttm_mem_shrink - callback to shrink TTM memory usage.
41 *
42 * @do_shrink: The callback function.
43 *
44 * Arguments to the do_shrink functions are intended to be passed using
45 * inheritance. That is, the argument class derives from struct ttm_mem_shrink,
46 * and can be accessed using container_of().
47 */
48
49struct ttm_mem_shrink {
50 int (*do_shrink) (struct ttm_mem_shrink *);
51};
52 39
53/** 40/**
54 * struct ttm_mem_global - Global memory accounting structure. 41 * struct ttm_mem_global - Global memory accounting structure.
@@ -76,7 +63,7 @@ struct ttm_mem_shrink {
76struct ttm_mem_zone; 63struct ttm_mem_zone;
77struct ttm_mem_global { 64struct ttm_mem_global {
78 struct kobject kobj; 65 struct kobject kobj;
79 struct ttm_mem_shrink *shrink; 66 struct ttm_bo_global *bo_glob;
80 struct workqueue_struct *swap_queue; 67 struct workqueue_struct *swap_queue;
81 struct work_struct work; 68 struct work_struct work;
82 spinlock_t lock; 69 spinlock_t lock;
@@ -90,67 +77,15 @@ struct ttm_mem_global {
90#endif 77#endif
91}; 78};
92 79
93/**
94 * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
95 *
96 * @shrink: The object to initialize.
97 * @func: The callback function.
98 */
99
100static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
101 int (*func) (struct ttm_mem_shrink *))
102{
103 shrink->do_shrink = func;
104}
105
106/**
107 * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
108 *
109 * @glob: The struct ttm_mem_global object to register with.
110 * @shrink: An initialized struct ttm_mem_shrink object to register.
111 *
112 * Returns:
113 * -EBUSY: There's already a callback registered. (May change).
114 */
115
116static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
117 struct ttm_mem_shrink *shrink)
118{
119 spin_lock(&glob->lock);
120 if (glob->shrink != NULL) {
121 spin_unlock(&glob->lock);
122 return -EBUSY;
123 }
124 glob->shrink = shrink;
125 spin_unlock(&glob->lock);
126 return 0;
127}
128
129/**
130 * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
131 *
132 * @glob: The struct ttm_mem_global object to unregister from.
133 * @shrink: A previously registert struct ttm_mem_shrink object.
134 *
135 */
136
137static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
138 struct ttm_mem_shrink *shrink)
139{
140 spin_lock(&glob->lock);
141 BUG_ON(glob->shrink != shrink);
142 glob->shrink = NULL;
143 spin_unlock(&glob->lock);
144}
145
146extern int ttm_mem_global_init(struct ttm_mem_global *glob); 80extern int ttm_mem_global_init(struct ttm_mem_global *glob);
147extern void ttm_mem_global_release(struct ttm_mem_global *glob); 81extern void ttm_mem_global_release(struct ttm_mem_global *glob);
148extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, 82extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
149 bool no_wait, bool interruptible); 83 struct ttm_operation_ctx *ctx);
150extern void ttm_mem_global_free(struct ttm_mem_global *glob, 84extern void ttm_mem_global_free(struct ttm_mem_global *glob,
151 uint64_t amount); 85 uint64_t amount);
152extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, 86extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
153 struct page *page, uint64_t size); 87 struct page *page, uint64_t size,
88 struct ttm_operation_ctx *ctx);
154extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, 89extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
155 struct page *page, uint64_t size); 90 struct page *page, uint64_t size);
156extern size_t ttm_round_pot(size_t size); 91extern size_t ttm_round_pot(size_t size);
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 593811362a91..4d9b019d253c 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void);
47 * 47 *
48 * Add backing pages to all of @ttm 48 * Add backing pages to all of @ttm
49 */ 49 */
50int ttm_pool_populate(struct ttm_tt *ttm); 50int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
51 51
52/** 52/**
53 * ttm_pool_unpopulate: 53 * ttm_pool_unpopulate:
@@ -61,7 +61,8 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm);
61/** 61/**
62 * Populates and DMA maps pages to fullfil a ttm_dma_populate() request 62 * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
63 */ 63 */
64int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); 64int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
65 struct ttm_operation_ctx *ctx);
65 66
66/** 67/**
67 * Unpopulates and DMA unmaps pages as part of a 68 * Unpopulates and DMA unmaps pages as part of a
@@ -89,7 +90,8 @@ void ttm_dma_page_alloc_fini(void);
89 */ 90 */
90int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); 91int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
91 92
92int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); 93int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
94 struct ttm_operation_ctx *ctx);
93void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); 95void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
94 96
95#else 97#else
@@ -106,7 +108,8 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
106 return 0; 108 return 0;
107} 109}
108static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, 110static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
109 struct device *dev) 111 struct device *dev,
112 struct ttm_operation_ctx *ctx)
110{ 113{
111 return -ENOMEM; 114 return -ENOMEM;
112} 115}
diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h
new file mode 100644
index 000000000000..2c005376ac0e
--- /dev/null
+++ b/include/dt-bindings/bus/ti-sysc.h
@@ -0,0 +1,22 @@
1/* TI sysc interconnect target module defines */
2
3/* Generic sysc found on omap2 and later, also known as type1 */
4#define SYSC_OMAP2_CLOCKACTIVITY (3 << 8)
5#define SYSC_OMAP2_EMUFREE (1 << 5)
6#define SYSC_OMAP2_ENAWAKEUP (1 << 2)
7#define SYSC_OMAP2_SOFTRESET (1 << 1)
8#define SYSC_OMAP2_AUTOIDLE (1 << 0)
9
10/* Generic sysc found on omap4 and later, also known as type2 */
11#define SYSC_OMAP4_DMADISABLE (1 << 16)
12#define SYSC_OMAP4_FREEEMU (1 << 1) /* Also known as EMUFREE */
13#define SYSC_OMAP4_SOFTRESET (1 << 0)
14
15/* SmartReflex sysc found on 36xx and later */
16#define SYSC_OMAP3_SR_ENAWAKEUP (1 << 26)
17
18/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */
19#define SYSC_IDLE_FORCE 0
20#define SYSC_IDLE_NO 1
21#define SYSC_IDLE_SMART 2
22#define SYSC_IDLE_SMART_WKUP 3
diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h
new file mode 100644
index 000000000000..b396f00e481d
--- /dev/null
+++ b/include/dt-bindings/clock/am3.h
@@ -0,0 +1,108 @@
1/*
2 * Copyright 2017 Texas Instruments, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#ifndef __DT_BINDINGS_CLK_AM3_H
14#define __DT_BINDINGS_CLK_AM3_H
15
16#define AM3_CLKCTRL_OFFSET 0x0
17#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET)
18
19/* l4_per clocks */
20#define AM3_L4_PER_CLKCTRL_OFFSET 0x14
21#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET)
22#define AM3_CPGMAC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14)
23#define AM3_LCDC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x18)
24#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c)
25#define AM3_TPTC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x24)
26#define AM3_EMIF_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x28)
27#define AM3_OCMCRAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x2c)
28#define AM3_GPMC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x30)
29#define AM3_MCASP0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x34)
30#define AM3_UART6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x38)
31#define AM3_MMC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x3c)
32#define AM3_ELM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x40)
33#define AM3_I2C3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x44)
34#define AM3_I2C2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x48)
35#define AM3_SPI0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x4c)
36#define AM3_SPI1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x50)
37#define AM3_L4_LS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x60)
38#define AM3_MCASP1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x68)
39#define AM3_UART2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x6c)
40#define AM3_UART3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x70)
41#define AM3_UART4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x74)
42#define AM3_UART5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x78)
43#define AM3_TIMER7_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x7c)
44#define AM3_TIMER2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x80)
45#define AM3_TIMER3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x84)
46#define AM3_TIMER4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x88)
47#define AM3_RNG_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x90)
48#define AM3_AES_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x94)
49#define AM3_SHAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xa0)
50#define AM3_GPIO2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xac)
51#define AM3_GPIO3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb0)
52#define AM3_GPIO4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb4)
53#define AM3_TPCC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xbc)
54#define AM3_D_CAN0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc0)
55#define AM3_D_CAN1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc4)
56#define AM3_EPWMSS1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xcc)
57#define AM3_EPWMSS0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd4)
58#define AM3_EPWMSS2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd8)
59#define AM3_L3_INSTR_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xdc)
60#define AM3_L3_MAIN_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe0)
61#define AM3_PRUSS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe8)
62#define AM3_TIMER5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xec)
63#define AM3_TIMER6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf0)
64#define AM3_MMC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf4)
65#define AM3_MMC3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf8)
66#define AM3_TPTC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xfc)
67#define AM3_TPTC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x100)
68#define AM3_SPINLOCK_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x10c)
69#define AM3_MAILBOX_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x110)
70#define AM3_L4_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x120)
71#define AM3_OCPWP_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x130)
72#define AM3_CLKDIV32K_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14c)
73
74/* l4_wkup clocks */
75#define AM3_L4_WKUP_CLKCTRL_OFFSET 0x4
76#define AM3_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET)
77#define AM3_CONTROL_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x4)
78#define AM3_GPIO1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x8)
79#define AM3_L4_WKUP_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc)
80#define AM3_DEBUGSS_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x14)
81#define AM3_WKUP_M3_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb0)
82#define AM3_UART1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb4)
83#define AM3_I2C1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb8)
84#define AM3_ADC_TSC_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xbc)
85#define AM3_SMARTREFLEX0_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc0)
86#define AM3_TIMER1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc4)
87#define AM3_SMARTREFLEX1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc8)
88#define AM3_WD_TIMER2_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xd4)
89
90/* mpu clocks */
91#define AM3_MPU_CLKCTRL_OFFSET 0x4
92#define AM3_MPU_CLKCTRL_INDEX(offset) ((offset) - AM3_MPU_CLKCTRL_OFFSET)
93#define AM3_MPU_CLKCTRL AM3_MPU_CLKCTRL_INDEX(0x4)
94
95/* l4_rtc clocks */
96#define AM3_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0)
97
98/* gfx_l3 clocks */
99#define AM3_GFX_L3_CLKCTRL_OFFSET 0x4
100#define AM3_GFX_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET)
101#define AM3_GFX_CLKCTRL AM3_GFX_L3_CLKCTRL_INDEX(0x4)
102
103/* l4_cefuse clocks */
104#define AM3_L4_CEFUSE_CLKCTRL_OFFSET 0x20
105#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET)
106#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20)
107
108#endif
diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h
new file mode 100644
index 000000000000..d21df00b3270
--- /dev/null
+++ b/include/dt-bindings/clock/am4.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2017 Texas Instruments, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#ifndef __DT_BINDINGS_CLK_AM4_H
14#define __DT_BINDINGS_CLK_AM4_H
15
16#define AM4_CLKCTRL_OFFSET 0x20
17#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET)
18
19/* l4_wkup clocks */
20#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120)
21#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
22#define AM4_WKUP_M3_CLKCTRL AM4_CLKCTRL_INDEX(0x228)
23#define AM4_COUNTER_32K_CLKCTRL AM4_CLKCTRL_INDEX(0x230)
24#define AM4_TIMER1_CLKCTRL AM4_CLKCTRL_INDEX(0x328)
25#define AM4_WD_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x338)
26#define AM4_I2C1_CLKCTRL AM4_CLKCTRL_INDEX(0x340)
27#define AM4_UART1_CLKCTRL AM4_CLKCTRL_INDEX(0x348)
28#define AM4_SMARTREFLEX0_CLKCTRL AM4_CLKCTRL_INDEX(0x350)
29#define AM4_SMARTREFLEX1_CLKCTRL AM4_CLKCTRL_INDEX(0x358)
30#define AM4_CONTROL_CLKCTRL AM4_CLKCTRL_INDEX(0x360)
31#define AM4_GPIO1_CLKCTRL AM4_CLKCTRL_INDEX(0x368)
32
33/* mpu clocks */
34#define AM4_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
35
36/* gfx_l3 clocks */
37#define AM4_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
38
39/* l4_rtc clocks */
40#define AM4_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
41
42/* l4_per clocks */
43#define AM4_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
44#define AM4_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28)
45#define AM4_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30)
46#define AM4_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40)
47#define AM4_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50)
48#define AM4_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58)
49#define AM4_VPFE0_CLKCTRL AM4_CLKCTRL_INDEX(0x68)
50#define AM4_VPFE1_CLKCTRL AM4_CLKCTRL_INDEX(0x70)
51#define AM4_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78)
52#define AM4_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80)
53#define AM4_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88)
54#define AM4_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90)
55#define AM4_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0)
56#define AM4_GPMC_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
57#define AM4_MCASP0_CLKCTRL AM4_CLKCTRL_INDEX(0x238)
58#define AM4_MCASP1_CLKCTRL AM4_CLKCTRL_INDEX(0x240)
59#define AM4_MMC3_CLKCTRL AM4_CLKCTRL_INDEX(0x248)
60#define AM4_QSPI_CLKCTRL AM4_CLKCTRL_INDEX(0x258)
61#define AM4_USB_OTG_SS0_CLKCTRL AM4_CLKCTRL_INDEX(0x260)
62#define AM4_USB_OTG_SS1_CLKCTRL AM4_CLKCTRL_INDEX(0x268)
63#define AM4_PRUSS_CLKCTRL AM4_CLKCTRL_INDEX(0x320)
64#define AM4_L4_LS_CLKCTRL AM4_CLKCTRL_INDEX(0x420)
65#define AM4_D_CAN0_CLKCTRL AM4_CLKCTRL_INDEX(0x428)
66#define AM4_D_CAN1_CLKCTRL AM4_CLKCTRL_INDEX(0x430)
67#define AM4_EPWMSS0_CLKCTRL AM4_CLKCTRL_INDEX(0x438)
68#define AM4_EPWMSS1_CLKCTRL AM4_CLKCTRL_INDEX(0x440)
69#define AM4_EPWMSS2_CLKCTRL AM4_CLKCTRL_INDEX(0x448)
70#define AM4_EPWMSS3_CLKCTRL AM4_CLKCTRL_INDEX(0x450)
71#define AM4_EPWMSS4_CLKCTRL AM4_CLKCTRL_INDEX(0x458)
72#define AM4_EPWMSS5_CLKCTRL AM4_CLKCTRL_INDEX(0x460)
73#define AM4_ELM_CLKCTRL AM4_CLKCTRL_INDEX(0x468)
74#define AM4_GPIO2_CLKCTRL AM4_CLKCTRL_INDEX(0x478)
75#define AM4_GPIO3_CLKCTRL AM4_CLKCTRL_INDEX(0x480)
76#define AM4_GPIO4_CLKCTRL AM4_CLKCTRL_INDEX(0x488)
77#define AM4_GPIO5_CLKCTRL AM4_CLKCTRL_INDEX(0x490)
78#define AM4_GPIO6_CLKCTRL AM4_CLKCTRL_INDEX(0x498)
79#define AM4_HDQ1W_CLKCTRL AM4_CLKCTRL_INDEX(0x4a0)
80#define AM4_I2C2_CLKCTRL AM4_CLKCTRL_INDEX(0x4a8)
81#define AM4_I2C3_CLKCTRL AM4_CLKCTRL_INDEX(0x4b0)
82#define AM4_MAILBOX_CLKCTRL AM4_CLKCTRL_INDEX(0x4b8)
83#define AM4_MMC1_CLKCTRL AM4_CLKCTRL_INDEX(0x4c0)
84#define AM4_MMC2_CLKCTRL AM4_CLKCTRL_INDEX(0x4c8)
85#define AM4_RNG_CLKCTRL AM4_CLKCTRL_INDEX(0x4e0)
86#define AM4_SPI0_CLKCTRL AM4_CLKCTRL_INDEX(0x500)
87#define AM4_SPI1_CLKCTRL AM4_CLKCTRL_INDEX(0x508)
88#define AM4_SPI2_CLKCTRL AM4_CLKCTRL_INDEX(0x510)
89#define AM4_SPI3_CLKCTRL AM4_CLKCTRL_INDEX(0x518)
90#define AM4_SPI4_CLKCTRL AM4_CLKCTRL_INDEX(0x520)
91#define AM4_SPINLOCK_CLKCTRL AM4_CLKCTRL_INDEX(0x528)
92#define AM4_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x530)
93#define AM4_TIMER3_CLKCTRL AM4_CLKCTRL_INDEX(0x538)
94#define AM4_TIMER4_CLKCTRL AM4_CLKCTRL_INDEX(0x540)
95#define AM4_TIMER5_CLKCTRL AM4_CLKCTRL_INDEX(0x548)
96#define AM4_TIMER6_CLKCTRL AM4_CLKCTRL_INDEX(0x550)
97#define AM4_TIMER7_CLKCTRL AM4_CLKCTRL_INDEX(0x558)
98#define AM4_TIMER8_CLKCTRL AM4_CLKCTRL_INDEX(0x560)
99#define AM4_TIMER9_CLKCTRL AM4_CLKCTRL_INDEX(0x568)
100#define AM4_TIMER10_CLKCTRL AM4_CLKCTRL_INDEX(0x570)
101#define AM4_TIMER11_CLKCTRL AM4_CLKCTRL_INDEX(0x578)
102#define AM4_UART2_CLKCTRL AM4_CLKCTRL_INDEX(0x580)
103#define AM4_UART3_CLKCTRL AM4_CLKCTRL_INDEX(0x588)
104#define AM4_UART4_CLKCTRL AM4_CLKCTRL_INDEX(0x590)
105#define AM4_UART5_CLKCTRL AM4_CLKCTRL_INDEX(0x598)
106#define AM4_UART6_CLKCTRL AM4_CLKCTRL_INDEX(0x5a0)
107#define AM4_OCP2SCP0_CLKCTRL AM4_CLKCTRL_INDEX(0x5b8)
108#define AM4_OCP2SCP1_CLKCTRL AM4_CLKCTRL_INDEX(0x5c0)
109#define AM4_EMIF_CLKCTRL AM4_CLKCTRL_INDEX(0x720)
110#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20)
111#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20)
112
113#endif
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
new file mode 100644
index 000000000000..d3558d897a4d
--- /dev/null
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -0,0 +1,52 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
2
3#ifndef DT_BINDINGS_ASPEED_CLOCK_H
4#define DT_BINDINGS_ASPEED_CLOCK_H
5
6#define ASPEED_CLK_GATE_ECLK 0
7#define ASPEED_CLK_GATE_GCLK 1
8#define ASPEED_CLK_GATE_MCLK 2
9#define ASPEED_CLK_GATE_VCLK 3
10#define ASPEED_CLK_GATE_BCLK 4
11#define ASPEED_CLK_GATE_DCLK 5
12#define ASPEED_CLK_GATE_REFCLK 6
13#define ASPEED_CLK_GATE_USBPORT2CLK 7
14#define ASPEED_CLK_GATE_LCLK 8
15#define ASPEED_CLK_GATE_USBUHCICLK 9
16#define ASPEED_CLK_GATE_D1CLK 10
17#define ASPEED_CLK_GATE_YCLK 11
18#define ASPEED_CLK_GATE_USBPORT1CLK 12
19#define ASPEED_CLK_GATE_UART1CLK 13
20#define ASPEED_CLK_GATE_UART2CLK 14
21#define ASPEED_CLK_GATE_UART5CLK 15
22#define ASPEED_CLK_GATE_ESPICLK 16
23#define ASPEED_CLK_GATE_MAC1CLK 17
24#define ASPEED_CLK_GATE_MAC2CLK 18
25#define ASPEED_CLK_GATE_RSACLK 19
26#define ASPEED_CLK_GATE_UART3CLK 20
27#define ASPEED_CLK_GATE_UART4CLK 21
28#define ASPEED_CLK_GATE_SDCLKCLK 22
29#define ASPEED_CLK_GATE_LHCCLK 23
30#define ASPEED_CLK_HPLL 24
31#define ASPEED_CLK_AHB 25
32#define ASPEED_CLK_APB 26
33#define ASPEED_CLK_UART 27
34#define ASPEED_CLK_SDIO 28
35#define ASPEED_CLK_ECLK 29
36#define ASPEED_CLK_ECLK_MUX 30
37#define ASPEED_CLK_LHCLK 31
38#define ASPEED_CLK_MAC 32
39#define ASPEED_CLK_BCLK 33
40#define ASPEED_CLK_MPLL 34
41
42#define ASPEED_RESET_XDMA 0
43#define ASPEED_RESET_MCTP 1
44#define ASPEED_RESET_ADC 2
45#define ASPEED_RESET_JTAG_MASTER 3
46#define ASPEED_RESET_MIC 4
47#define ASPEED_RESET_PWM 5
48#define ASPEED_RESET_PCIVGA 6
49#define ASPEED_RESET_I2C 7
50#define ASPEED_RESET_AHB 8
51
52#endif
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
new file mode 100644
index 000000000000..941ac70e7f30
--- /dev/null
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -0,0 +1,71 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
2/*
3 * Meson-AXG clock tree IDs
4 *
5 * Copyright (c) 2017 Amlogic, Inc. All rights reserved.
6 */
7
8#ifndef __AXG_CLKC_H
9#define __AXG_CLKC_H
10
11#define CLKID_SYS_PLL 0
12#define CLKID_FIXED_PLL 1
13#define CLKID_FCLK_DIV2 2
14#define CLKID_FCLK_DIV3 3
15#define CLKID_FCLK_DIV4 4
16#define CLKID_FCLK_DIV5 5
17#define CLKID_FCLK_DIV7 6
18#define CLKID_GP0_PLL 7
19#define CLKID_CLK81 10
20#define CLKID_MPLL0 11
21#define CLKID_MPLL1 12
22#define CLKID_MPLL2 13
23#define CLKID_MPLL3 14
24#define CLKID_DDR 15
25#define CLKID_AUDIO_LOCKER 16
26#define CLKID_MIPI_DSI_HOST 17
27#define CLKID_ISA 18
28#define CLKID_PL301 19
29#define CLKID_PERIPHS 20
30#define CLKID_SPICC0 21
31#define CLKID_I2C 22
32#define CLKID_RNG0 23
33#define CLKID_UART0 24
34#define CLKID_MIPI_DSI_PHY 25
35#define CLKID_SPICC1 26
36#define CLKID_PCIE_A 27
37#define CLKID_PCIE_B 28
38#define CLKID_HIU_IFACE 29
39#define CLKID_ASSIST_MISC 30
40#define CLKID_SD_EMMC_B 31
41#define CLKID_SD_EMMC_C 32
42#define CLKID_DMA 33
43#define CLKID_SPI 34
44#define CLKID_AUDIO 35
45#define CLKID_ETH 36
46#define CLKID_UART1 37
47#define CLKID_G2D 38
48#define CLKID_USB0 39
49#define CLKID_USB1 40
50#define CLKID_RESET 41
51#define CLKID_USB 42
52#define CLKID_AHB_ARB0 43
53#define CLKID_EFUSE 44
54#define CLKID_BOOT_ROM 45
55#define CLKID_AHB_DATA_BUS 46
56#define CLKID_AHB_CTRL_BUS 47
57#define CLKID_USB1_DDR_BRIDGE 48
58#define CLKID_USB0_DDR_BRIDGE 49
59#define CLKID_MMC_PCLK 50
60#define CLKID_VPU_INTR 51
61#define CLKID_SEC_AHB_AHB3_BRIDGE 52
62#define CLKID_GIC 53
63#define CLKID_AO_MEDIA_CPU 54
64#define CLKID_AO_AHB_SRAM 55
65#define CLKID_AO_AHB_BUS 56
66#define CLKID_AO_IFACE 57
67#define CLKID_AO_I2C 58
68#define CLKID_SD_EMMC_B_CLK0 59
69#define CLKID_SD_EMMC_C_CLK0 60
70
71#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/dm814.h b/include/dt-bindings/clock/dm814.h
new file mode 100644
index 000000000000..0e7099a344e1
--- /dev/null
+++ b/include/dt-bindings/clock/dm814.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2017 Texas Instruments, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#ifndef __DT_BINDINGS_CLK_DM814_H
14#define __DT_BINDINGS_CLK_DM814_H
15
16#define DM814_CLKCTRL_OFFSET 0x0
17#define DM814_CLKCTRL_INDEX(offset) ((offset) - DM814_CLKCTRL_OFFSET)
18
19/* default clocks */
20#define DM814_USB_OTG_HS_CLKCTRL DM814_CLKCTRL_INDEX(0x58)
21
22/* alwon clocks */
23#define DM814_UART1_CLKCTRL DM814_CLKCTRL_INDEX(0x150)
24#define DM814_UART2_CLKCTRL DM814_CLKCTRL_INDEX(0x154)
25#define DM814_UART3_CLKCTRL DM814_CLKCTRL_INDEX(0x158)
26#define DM814_GPIO1_CLKCTRL DM814_CLKCTRL_INDEX(0x15c)
27#define DM814_GPIO2_CLKCTRL DM814_CLKCTRL_INDEX(0x160)
28#define DM814_I2C1_CLKCTRL DM814_CLKCTRL_INDEX(0x164)
29#define DM814_I2C2_CLKCTRL DM814_CLKCTRL_INDEX(0x168)
30#define DM814_WD_TIMER_CLKCTRL DM814_CLKCTRL_INDEX(0x18c)
31#define DM814_MCSPI1_CLKCTRL DM814_CLKCTRL_INDEX(0x190)
32#define DM814_GPMC_CLKCTRL DM814_CLKCTRL_INDEX(0x1d0)
33#define DM814_CPGMAC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1d4)
34#define DM814_MPU_CLKCTRL DM814_CLKCTRL_INDEX(0x1dc)
35#define DM814_RTC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f0)
36#define DM814_TPCC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f4)
37#define DM814_TPTC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1f8)
38#define DM814_TPTC1_CLKCTRL DM814_CLKCTRL_INDEX(0x1fc)
39#define DM814_TPTC2_CLKCTRL DM814_CLKCTRL_INDEX(0x200)
40#define DM814_TPTC3_CLKCTRL DM814_CLKCTRL_INDEX(0x204)
41#define DM814_MMC1_CLKCTRL DM814_CLKCTRL_INDEX(0x21c)
42#define DM814_MMC2_CLKCTRL DM814_CLKCTRL_INDEX(0x220)
43#define DM814_MMC3_CLKCTRL DM814_CLKCTRL_INDEX(0x224)
44
45#endif
diff --git a/include/dt-bindings/clock/dm816.h b/include/dt-bindings/clock/dm816.h
new file mode 100644
index 000000000000..69e8a36d783e
--- /dev/null
+++ b/include/dt-bindings/clock/dm816.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2017 Texas Instruments, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#ifndef __DT_BINDINGS_CLK_DM816_H
14#define __DT_BINDINGS_CLK_DM816_H
15
16#define DM816_CLKCTRL_OFFSET 0x0
17#define DM816_CLKCTRL_INDEX(offset) ((offset) - DM816_CLKCTRL_OFFSET)
18
19/* default clocks */
20#define DM816_USB_OTG_HS_CLKCTRL DM816_CLKCTRL_INDEX(0x58)
21
22/* alwon clocks */
23#define DM816_UART1_CLKCTRL DM816_CLKCTRL_INDEX(0x150)
24#define DM816_UART2_CLKCTRL DM816_CLKCTRL_INDEX(0x154)
25#define DM816_UART3_CLKCTRL DM816_CLKCTRL_INDEX(0x158)
26#define DM816_GPIO1_CLKCTRL DM816_CLKCTRL_INDEX(0x15c)
27#define DM816_GPIO2_CLKCTRL DM816_CLKCTRL_INDEX(0x160)
28#define DM816_I2C1_CLKCTRL DM816_CLKCTRL_INDEX(0x164)
29#define DM816_I2C2_CLKCTRL DM816_CLKCTRL_INDEX(0x168)
30#define DM816_TIMER1_CLKCTRL DM816_CLKCTRL_INDEX(0x170)
31#define DM816_TIMER2_CLKCTRL DM816_CLKCTRL_INDEX(0x174)
32#define DM816_TIMER3_CLKCTRL DM816_CLKCTRL_INDEX(0x178)
33#define DM816_TIMER4_CLKCTRL DM816_CLKCTRL_INDEX(0x17c)
34#define DM816_TIMER5_CLKCTRL DM816_CLKCTRL_INDEX(0x180)
35#define DM816_TIMER6_CLKCTRL DM816_CLKCTRL_INDEX(0x184)
36#define DM816_TIMER7_CLKCTRL DM816_CLKCTRL_INDEX(0x188)
37#define DM816_WD_TIMER_CLKCTRL DM816_CLKCTRL_INDEX(0x18c)
38#define DM816_MCSPI1_CLKCTRL DM816_CLKCTRL_INDEX(0x190)
39#define DM816_MAILBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x194)
40#define DM816_SPINBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x198)
41#define DM816_MMC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1b0)
42#define DM816_GPMC_CLKCTRL DM816_CLKCTRL_INDEX(0x1d0)
43#define DM816_DAVINCI_MDIO_CLKCTRL DM816_CLKCTRL_INDEX(0x1d4)
44#define DM816_EMAC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1d8)
45#define DM816_MPU_CLKCTRL DM816_CLKCTRL_INDEX(0x1dc)
46#define DM816_RTC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f0)
47#define DM816_TPCC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f4)
48#define DM816_TPTC0_CLKCTRL DM816_CLKCTRL_INDEX(0x1f8)
49#define DM816_TPTC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1fc)
50#define DM816_TPTC2_CLKCTRL DM816_CLKCTRL_INDEX(0x200)
51#define DM816_TPTC3_CLKCTRL DM816_CLKCTRL_INDEX(0x204)
52
53#endif
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
new file mode 100644
index 000000000000..5e1061b15aed
--- /dev/null
+++ b/include/dt-bindings/clock/dra7.h
@@ -0,0 +1,172 @@
1/*
2 * Copyright 2017 Texas Instruments, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#ifndef __DT_BINDINGS_CLK_DRA7_H
14#define __DT_BINDINGS_CLK_DRA7_H
15
16#define DRA7_CLKCTRL_OFFSET 0x20
17#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET)
18
19/* mpu clocks */
20#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
21
22/* ipu clocks */
23#define DRA7_IPU_CLKCTRL_OFFSET 0x40
24#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET)
25#define DRA7_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50)
26#define DRA7_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58)
27#define DRA7_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60)
28#define DRA7_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68)
29#define DRA7_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70)
30#define DRA7_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78)
31#define DRA7_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80)
32
33/* rtc clocks */
34#define DRA7_RTC_CLKCTRL_OFFSET 0x40
35#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET)
36#define DRA7_RTCSS_CLKCTRL DRA7_RTC_CLKCTRL_INDEX(0x44)
37
38/* coreaon clocks */
39#define DRA7_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
40#define DRA7_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
41
42/* l3main1 clocks */
43#define DRA7_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
44#define DRA7_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
45#define DRA7_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
46#define DRA7_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
47#define DRA7_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
48#define DRA7_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
49#define DRA7_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
50
51/* dma clocks */
52#define DRA7_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
53
54/* emif clocks */
55#define DRA7_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
56
57/* atl clocks */
58#define DRA7_ATL_CLKCTRL_OFFSET 0x0
59#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET)
60#define DRA7_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0)
61
62/* l4cfg clocks */
63#define DRA7_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
64#define DRA7_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
65#define DRA7_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
66#define DRA7_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
67#define DRA7_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
68#define DRA7_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58)
69#define DRA7_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60)
70#define DRA7_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68)
71#define DRA7_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
72#define DRA7_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
73#define DRA7_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
74#define DRA7_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
75#define DRA7_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
76#define DRA7_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98)
77#define DRA7_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
78
79/* l3instr clocks */
80#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
81#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
82
83/* dss clocks */
84#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
85#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
86
87/* l3init clocks */
88#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
89#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
90#define DRA7_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
91#define DRA7_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
92#define DRA7_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
93#define DRA7_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
94#define DRA7_PCIE1_CLKCTRL DRA7_CLKCTRL_INDEX(0xb0)
95#define DRA7_PCIE2_CLKCTRL DRA7_CLKCTRL_INDEX(0xb8)
96#define DRA7_GMAC_CLKCTRL DRA7_CLKCTRL_INDEX(0xd0)
97#define DRA7_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0)
98#define DRA7_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8)
99#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0)
100
101/* l4per clocks */
102#define DRA7_L4PER_CLKCTRL_OFFSET 0x0
103#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET)
104#define DRA7_L4_PER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc)
105#define DRA7_L4_PER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x14)
106#define DRA7_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28)
107#define DRA7_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30)
108#define DRA7_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38)
109#define DRA7_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40)
110#define DRA7_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48)
111#define DRA7_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50)
112#define DRA7_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58)
113#define DRA7_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60)
114#define DRA7_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68)
115#define DRA7_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70)
116#define DRA7_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78)
117#define DRA7_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80)
118#define DRA7_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88)
119#define DRA7_EPWMSS1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x90)
120#define DRA7_EPWMSS2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x98)
121#define DRA7_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0)
122#define DRA7_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8)
123#define DRA7_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0)
124#define DRA7_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8)
125#define DRA7_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0)
126#define DRA7_EPWMSS0_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc4)
127#define DRA7_TIMER13_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc8)
128#define DRA7_TIMER14_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd0)
129#define DRA7_TIMER15_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd8)
130#define DRA7_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0)
131#define DRA7_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8)
132#define DRA7_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100)
133#define DRA7_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108)
134#define DRA7_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110)
135#define DRA7_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118)
136#define DRA7_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120)
137#define DRA7_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128)
138#define DRA7_TIMER16_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x130)
139#define DRA7_QSPI_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x138)
140#define DRA7_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140)
141#define DRA7_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148)
142#define DRA7_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150)
143#define DRA7_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158)
144#define DRA7_MCASP2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x160)
145#define DRA7_MCASP3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x168)
146#define DRA7_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170)
147#define DRA7_MCASP5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x178)
148#define DRA7_MCASP8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x190)
149#define DRA7_MCASP4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x198)
150#define DRA7_AES1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a0)
151#define DRA7_AES2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a8)
152#define DRA7_DES_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1b0)
153#define DRA7_RNG_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c0)
154#define DRA7_SHAM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c8)
155#define DRA7_UART7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1d0)
156#define DRA7_UART8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e0)
157#define DRA7_UART9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e8)
158#define DRA7_DCAN2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1f0)
159#define DRA7_MCASP6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x204)
160#define DRA7_MCASP7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x208)
161
162/* wkupaon clocks */
163#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
164#define DRA7_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
165#define DRA7_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
166#define DRA7_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
167#define DRA7_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
168#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
169#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
170#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
171
172#endif
diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h
index adb768d447a5..75d583eb84dd 100644
--- a/include/dt-bindings/clock/hi3660-clock.h
+++ b/include/dt-bindings/clock/hi3660-clock.h
@@ -208,4 +208,11 @@
208#define HI3660_CLK_I2C6_IOMCU 3 208#define HI3660_CLK_I2C6_IOMCU 3
209#define HI3660_CLK_IOMCU_PERI0 4 209#define HI3660_CLK_IOMCU_PERI0 4
210 210
211/* clk in stub clock */
212#define HI3660_CLK_STUB_CLUSTER0 0
213#define HI3660_CLK_STUB_CLUSTER1 1
214#define HI3660_CLK_STUB_GPU 2
215#define HI3660_CLK_STUB_DDR 3
216#define HI3660_CLK_STUB_NUM 4
217
211#endif /* __DTS_HI3660_CLOCK_H */ 218#endif /* __DTS_HI3660_CLOCK_H */
diff --git a/include/dt-bindings/clock/jz4770-cgu.h b/include/dt-bindings/clock/jz4770-cgu.h
new file mode 100644
index 000000000000..d68a7695a1f8
--- /dev/null
+++ b/include/dt-bindings/clock/jz4770-cgu.h
@@ -0,0 +1,58 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This header provides clock numbers for the ingenic,jz4770-cgu DT binding.
4 */
5
6#ifndef __DT_BINDINGS_CLOCK_JZ4770_CGU_H__
7#define __DT_BINDINGS_CLOCK_JZ4770_CGU_H__
8
9#define JZ4770_CLK_EXT 0
10#define JZ4770_CLK_OSC32K 1
11#define JZ4770_CLK_PLL0 2
12#define JZ4770_CLK_PLL1 3
13#define JZ4770_CLK_CCLK 4
14#define JZ4770_CLK_H0CLK 5
15#define JZ4770_CLK_H1CLK 6
16#define JZ4770_CLK_H2CLK 7
17#define JZ4770_CLK_C1CLK 8
18#define JZ4770_CLK_PCLK 9
19#define JZ4770_CLK_MMC0_MUX 10
20#define JZ4770_CLK_MMC0 11
21#define JZ4770_CLK_MMC1_MUX 12
22#define JZ4770_CLK_MMC1 13
23#define JZ4770_CLK_MMC2_MUX 14
24#define JZ4770_CLK_MMC2 15
25#define JZ4770_CLK_CIM 16
26#define JZ4770_CLK_UHC 17
27#define JZ4770_CLK_GPU 18
28#define JZ4770_CLK_BCH 19
29#define JZ4770_CLK_LPCLK_MUX 20
30#define JZ4770_CLK_GPS 21
31#define JZ4770_CLK_SSI_MUX 22
32#define JZ4770_CLK_PCM_MUX 23
33#define JZ4770_CLK_I2S 24
34#define JZ4770_CLK_OTG 25
35#define JZ4770_CLK_SSI0 26
36#define JZ4770_CLK_SSI1 27
37#define JZ4770_CLK_SSI2 28
38#define JZ4770_CLK_PCM0 29
39#define JZ4770_CLK_PCM1 30
40#define JZ4770_CLK_DMA 31
41#define JZ4770_CLK_I2C0 32
42#define JZ4770_CLK_I2C1 33
43#define JZ4770_CLK_I2C2 34
44#define JZ4770_CLK_UART0 35
45#define JZ4770_CLK_UART1 36
46#define JZ4770_CLK_UART2 37
47#define JZ4770_CLK_UART3 38
48#define JZ4770_CLK_IPU 39
49#define JZ4770_CLK_ADC 40
50#define JZ4770_CLK_AIC 41
51#define JZ4770_CLK_AUX 42
52#define JZ4770_CLK_VPU 43
53#define JZ4770_CLK_UHC_PHY 44
54#define JZ4770_CLK_OTG_PHY 45
55#define JZ4770_CLK_EXT512 46
56#define JZ4770_CLK_RTC 47
57
58#endif /* __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ */
diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h
new file mode 100644
index 000000000000..f51821a91216
--- /dev/null
+++ b/include/dt-bindings/clock/omap5.h
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2017 Texas Instruments, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#ifndef __DT_BINDINGS_CLK_OMAP5_H
14#define __DT_BINDINGS_CLK_OMAP5_H
15
16#define OMAP5_CLKCTRL_OFFSET 0x20
17#define OMAP5_CLKCTRL_INDEX(offset) ((offset) - OMAP5_CLKCTRL_OFFSET)
18
19/* mpu clocks */
20#define OMAP5_MPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
21
22/* dsp clocks */
23#define OMAP5_MMU_DSP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
24
25/* abe clocks */
26#define OMAP5_L4_ABE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
27#define OMAP5_MCPDM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
28#define OMAP5_DMIC_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38)
29#define OMAP5_MCBSP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48)
30#define OMAP5_MCBSP2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50)
31#define OMAP5_MCBSP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58)
32#define OMAP5_TIMER5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68)
33#define OMAP5_TIMER6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70)
34#define OMAP5_TIMER7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78)
35#define OMAP5_TIMER8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80)
36
37/* l3main1 clocks */
38#define OMAP5_L3_MAIN_1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
39
40/* l3main2 clocks */
41#define OMAP5_L3_MAIN_2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
42
43/* ipu clocks */
44#define OMAP5_MMU_IPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
45
46/* dma clocks */
47#define OMAP5_DMA_SYSTEM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
48
49/* emif clocks */
50#define OMAP5_DMM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
51#define OMAP5_EMIF1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
52#define OMAP5_EMIF2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38)
53
54/* l4cfg clocks */
55#define OMAP5_L4_CFG_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
56#define OMAP5_SPINLOCK_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
57#define OMAP5_MAILBOX_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
58
59/* l3instr clocks */
60#define OMAP5_L3_MAIN_3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
61#define OMAP5_L3_INSTR_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
62
63/* l4per clocks */
64#define OMAP5_TIMER10_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
65#define OMAP5_TIMER11_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
66#define OMAP5_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38)
67#define OMAP5_TIMER3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40)
68#define OMAP5_TIMER4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48)
69#define OMAP5_TIMER9_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50)
70#define OMAP5_GPIO2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x60)
71#define OMAP5_GPIO3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68)
72#define OMAP5_GPIO4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70)
73#define OMAP5_GPIO5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78)
74#define OMAP5_GPIO6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80)
75#define OMAP5_I2C1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa0)
76#define OMAP5_I2C2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa8)
77#define OMAP5_I2C3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb0)
78#define OMAP5_I2C4_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb8)
79#define OMAP5_L4_PER_CLKCTRL OMAP5_CLKCTRL_INDEX(0xc0)
80#define OMAP5_MCSPI1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0)
81#define OMAP5_MCSPI2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf8)
82#define OMAP5_MCSPI3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x100)
83#define OMAP5_MCSPI4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x108)
84#define OMAP5_GPIO7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x110)
85#define OMAP5_GPIO8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x118)
86#define OMAP5_MMC3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x120)
87#define OMAP5_MMC4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x128)
88#define OMAP5_UART1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x140)
89#define OMAP5_UART2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x148)
90#define OMAP5_UART3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x150)
91#define OMAP5_UART4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x158)
92#define OMAP5_MMC5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x160)
93#define OMAP5_I2C5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x168)
94#define OMAP5_UART5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x170)
95#define OMAP5_UART6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x178)
96
97/* dss clocks */
98#define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
99
100/* l3init clocks */
101#define OMAP5_MMC1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
102#define OMAP5_MMC2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
103#define OMAP5_USB_HOST_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58)
104#define OMAP5_USB_TLL_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68)
105#define OMAP5_SATA_CLKCTRL OMAP5_CLKCTRL_INDEX(0x88)
106#define OMAP5_OCP2SCP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe0)
107#define OMAP5_OCP2SCP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe8)
108#define OMAP5_USB_OTG_SS_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0)
109
110/* wkupaon clocks */
111#define OMAP5_L4_WKUP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
112#define OMAP5_WD_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
113#define OMAP5_GPIO1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38)
114#define OMAP5_TIMER1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40)
115#define OMAP5_COUNTER_32K_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50)
116#define OMAP5_KBD_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78)
117
118#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
index 370c83c3bccc..238f872e52f4 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
@@ -58,6 +58,186 @@
58#define GCC_QPIC_AHB_CLK 41 58#define GCC_QPIC_AHB_CLK 41
59#define GCC_QPIC_CLK 42 59#define GCC_QPIC_CLK 42
60#define PCNOC_BFDCD_CLK_SRC 43 60#define PCNOC_BFDCD_CLK_SRC 43
61#define GPLL2_MAIN 44
62#define GPLL2 45
63#define GPLL4_MAIN 46
64#define GPLL4 47
65#define GPLL6_MAIN 48
66#define GPLL6 49
67#define UBI32_PLL_MAIN 50
68#define UBI32_PLL 51
69#define NSS_CRYPTO_PLL_MAIN 52
70#define NSS_CRYPTO_PLL 53
71#define PCIE0_AXI_CLK_SRC 54
72#define PCIE0_AUX_CLK_SRC 55
73#define PCIE0_PIPE_CLK_SRC 56
74#define PCIE1_AXI_CLK_SRC 57
75#define PCIE1_AUX_CLK_SRC 58
76#define PCIE1_PIPE_CLK_SRC 59
77#define SDCC1_APPS_CLK_SRC 60
78#define SDCC1_ICE_CORE_CLK_SRC 61
79#define SDCC2_APPS_CLK_SRC 62
80#define USB0_MASTER_CLK_SRC 63
81#define USB0_AUX_CLK_SRC 64
82#define USB0_MOCK_UTMI_CLK_SRC 65
83#define USB0_PIPE_CLK_SRC 66
84#define USB1_MASTER_CLK_SRC 67
85#define USB1_AUX_CLK_SRC 68
86#define USB1_MOCK_UTMI_CLK_SRC 69
87#define USB1_PIPE_CLK_SRC 70
88#define GCC_XO_CLK_SRC 71
89#define SYSTEM_NOC_BFDCD_CLK_SRC 72
90#define NSS_CE_CLK_SRC 73
91#define NSS_NOC_BFDCD_CLK_SRC 74
92#define NSS_CRYPTO_CLK_SRC 75
93#define NSS_UBI0_CLK_SRC 76
94#define NSS_UBI0_DIV_CLK_SRC 77
95#define NSS_UBI1_CLK_SRC 78
96#define NSS_UBI1_DIV_CLK_SRC 79
97#define UBI_MPT_CLK_SRC 80
98#define NSS_IMEM_CLK_SRC 81
99#define NSS_PPE_CLK_SRC 82
100#define NSS_PORT1_RX_CLK_SRC 83
101#define NSS_PORT1_RX_DIV_CLK_SRC 84
102#define NSS_PORT1_TX_CLK_SRC 85
103#define NSS_PORT1_TX_DIV_CLK_SRC 86
104#define NSS_PORT2_RX_CLK_SRC 87
105#define NSS_PORT2_RX_DIV_CLK_SRC 88
106#define NSS_PORT2_TX_CLK_SRC 89
107#define NSS_PORT2_TX_DIV_CLK_SRC 90
108#define NSS_PORT3_RX_CLK_SRC 91
109#define NSS_PORT3_RX_DIV_CLK_SRC 92
110#define NSS_PORT3_TX_CLK_SRC 93
111#define NSS_PORT3_TX_DIV_CLK_SRC 94
112#define NSS_PORT4_RX_CLK_SRC 95
113#define NSS_PORT4_RX_DIV_CLK_SRC 96
114#define NSS_PORT4_TX_CLK_SRC 97
115#define NSS_PORT4_TX_DIV_CLK_SRC 98
116#define NSS_PORT5_RX_CLK_SRC 99
117#define NSS_PORT5_RX_DIV_CLK_SRC 100
118#define NSS_PORT5_TX_CLK_SRC 101
119#define NSS_PORT5_TX_DIV_CLK_SRC 102
120#define NSS_PORT6_RX_CLK_SRC 103
121#define NSS_PORT6_RX_DIV_CLK_SRC 104
122#define NSS_PORT6_TX_CLK_SRC 105
123#define NSS_PORT6_TX_DIV_CLK_SRC 106
124#define CRYPTO_CLK_SRC 107
125#define GP1_CLK_SRC 108
126#define GP2_CLK_SRC 109
127#define GP3_CLK_SRC 110
128#define GCC_PCIE0_AHB_CLK 111
129#define GCC_PCIE0_AUX_CLK 112
130#define GCC_PCIE0_AXI_M_CLK 113
131#define GCC_PCIE0_AXI_S_CLK 114
132#define GCC_PCIE0_PIPE_CLK 115
133#define GCC_SYS_NOC_PCIE0_AXI_CLK 116
134#define GCC_PCIE1_AHB_CLK 117
135#define GCC_PCIE1_AUX_CLK 118
136#define GCC_PCIE1_AXI_M_CLK 119
137#define GCC_PCIE1_AXI_S_CLK 120
138#define GCC_PCIE1_PIPE_CLK 121
139#define GCC_SYS_NOC_PCIE1_AXI_CLK 122
140#define GCC_USB0_AUX_CLK 123
141#define GCC_SYS_NOC_USB0_AXI_CLK 124
142#define GCC_USB0_MASTER_CLK 125
143#define GCC_USB0_MOCK_UTMI_CLK 126
144#define GCC_USB0_PHY_CFG_AHB_CLK 127
145#define GCC_USB0_PIPE_CLK 128
146#define GCC_USB0_SLEEP_CLK 129
147#define GCC_USB1_AUX_CLK 130
148#define GCC_SYS_NOC_USB1_AXI_CLK 131
149#define GCC_USB1_MASTER_CLK 132
150#define GCC_USB1_MOCK_UTMI_CLK 133
151#define GCC_USB1_PHY_CFG_AHB_CLK 134
152#define GCC_USB1_PIPE_CLK 135
153#define GCC_USB1_SLEEP_CLK 136
154#define GCC_SDCC1_AHB_CLK 137
155#define GCC_SDCC1_APPS_CLK 138
156#define GCC_SDCC1_ICE_CORE_CLK 139
157#define GCC_SDCC2_AHB_CLK 140
158#define GCC_SDCC2_APPS_CLK 141
159#define GCC_MEM_NOC_NSS_AXI_CLK 142
160#define GCC_NSS_CE_APB_CLK 143
161#define GCC_NSS_CE_AXI_CLK 144
162#define GCC_NSS_CFG_CLK 145
163#define GCC_NSS_CRYPTO_CLK 146
164#define GCC_NSS_CSR_CLK 147
165#define GCC_NSS_EDMA_CFG_CLK 148
166#define GCC_NSS_EDMA_CLK 149
167#define GCC_NSS_IMEM_CLK 150
168#define GCC_NSS_NOC_CLK 151
169#define GCC_NSS_PPE_BTQ_CLK 152
170#define GCC_NSS_PPE_CFG_CLK 153
171#define GCC_NSS_PPE_CLK 154
172#define GCC_NSS_PPE_IPE_CLK 155
173#define GCC_NSS_PTP_REF_CLK 156
174#define GCC_NSSNOC_CE_APB_CLK 157
175#define GCC_NSSNOC_CE_AXI_CLK 158
176#define GCC_NSSNOC_CRYPTO_CLK 159
177#define GCC_NSSNOC_PPE_CFG_CLK 160
178#define GCC_NSSNOC_PPE_CLK 161
179#define GCC_NSSNOC_QOSGEN_REF_CLK 162
180#define GCC_NSSNOC_SNOC_CLK 163
181#define GCC_NSSNOC_TIMEOUT_REF_CLK 164
182#define GCC_NSSNOC_UBI0_AHB_CLK 165
183#define GCC_NSSNOC_UBI1_AHB_CLK 166
184#define GCC_UBI0_AHB_CLK 167
185#define GCC_UBI0_AXI_CLK 168
186#define GCC_UBI0_NC_AXI_CLK 169
187#define GCC_UBI0_CORE_CLK 170
188#define GCC_UBI0_MPT_CLK 171
189#define GCC_UBI1_AHB_CLK 172
190#define GCC_UBI1_AXI_CLK 173
191#define GCC_UBI1_NC_AXI_CLK 174
192#define GCC_UBI1_CORE_CLK 175
193#define GCC_UBI1_MPT_CLK 176
194#define GCC_CMN_12GPLL_AHB_CLK 177
195#define GCC_CMN_12GPLL_SYS_CLK 178
196#define GCC_MDIO_AHB_CLK 179
197#define GCC_UNIPHY0_AHB_CLK 180
198#define GCC_UNIPHY0_SYS_CLK 181
199#define GCC_UNIPHY1_AHB_CLK 182
200#define GCC_UNIPHY1_SYS_CLK 183
201#define GCC_UNIPHY2_AHB_CLK 184
202#define GCC_UNIPHY2_SYS_CLK 185
203#define GCC_NSS_PORT1_RX_CLK 186
204#define GCC_NSS_PORT1_TX_CLK 187
205#define GCC_NSS_PORT2_RX_CLK 188
206#define GCC_NSS_PORT2_TX_CLK 189
207#define GCC_NSS_PORT3_RX_CLK 190
208#define GCC_NSS_PORT3_TX_CLK 191
209#define GCC_NSS_PORT4_RX_CLK 192
210#define GCC_NSS_PORT4_TX_CLK 193
211#define GCC_NSS_PORT5_RX_CLK 194
212#define GCC_NSS_PORT5_TX_CLK 195
213#define GCC_NSS_PORT6_RX_CLK 196
214#define GCC_NSS_PORT6_TX_CLK 197
215#define GCC_PORT1_MAC_CLK 198
216#define GCC_PORT2_MAC_CLK 199
217#define GCC_PORT3_MAC_CLK 200
218#define GCC_PORT4_MAC_CLK 201
219#define GCC_PORT5_MAC_CLK 202
220#define GCC_PORT6_MAC_CLK 203
221#define GCC_UNIPHY0_PORT1_RX_CLK 204
222#define GCC_UNIPHY0_PORT1_TX_CLK 205
223#define GCC_UNIPHY0_PORT2_RX_CLK 206
224#define GCC_UNIPHY0_PORT2_TX_CLK 207
225#define GCC_UNIPHY0_PORT3_RX_CLK 208
226#define GCC_UNIPHY0_PORT3_TX_CLK 209
227#define GCC_UNIPHY0_PORT4_RX_CLK 210
228#define GCC_UNIPHY0_PORT4_TX_CLK 211
229#define GCC_UNIPHY0_PORT5_RX_CLK 212
230#define GCC_UNIPHY0_PORT5_TX_CLK 213
231#define GCC_UNIPHY1_PORT5_RX_CLK 214
232#define GCC_UNIPHY1_PORT5_TX_CLK 215
233#define GCC_UNIPHY2_PORT6_RX_CLK 216
234#define GCC_UNIPHY2_PORT6_TX_CLK 217
235#define GCC_CRYPTO_AHB_CLK 218
236#define GCC_CRYPTO_AXI_CLK 219
237#define GCC_CRYPTO_CLK 220
238#define GCC_GP1_CLK 221
239#define GCC_GP2_CLK 222
240#define GCC_GP3_CLK 223
61 241
62#define GCC_BLSP1_BCR 0 242#define GCC_BLSP1_BCR 0
63#define GCC_BLSP1_QUP1_BCR 1 243#define GCC_BLSP1_QUP1_BCR 1
@@ -148,5 +328,47 @@
148#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 86 328#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 86
149#define GCC_APC1_VOLTAGE_DROOP_DETECTOR_BCR 87 329#define GCC_APC1_VOLTAGE_DROOP_DETECTOR_BCR 87
150#define GCC_SMMU_CATS_BCR 88 330#define GCC_SMMU_CATS_BCR 88
331#define GCC_UBI0_AXI_ARES 89
332#define GCC_UBI0_AHB_ARES 90
333#define GCC_UBI0_NC_AXI_ARES 91
334#define GCC_UBI0_DBG_ARES 92
335#define GCC_UBI0_CORE_CLAMP_ENABLE 93
336#define GCC_UBI0_CLKRST_CLAMP_ENABLE 94
337#define GCC_UBI1_AXI_ARES 95
338#define GCC_UBI1_AHB_ARES 96
339#define GCC_UBI1_NC_AXI_ARES 97
340#define GCC_UBI1_DBG_ARES 98
341#define GCC_UBI1_CORE_CLAMP_ENABLE 99
342#define GCC_UBI1_CLKRST_CLAMP_ENABLE 100
343#define GCC_NSS_CFG_ARES 101
344#define GCC_NSS_IMEM_ARES 102
345#define GCC_NSS_NOC_ARES 103
346#define GCC_NSS_CRYPTO_ARES 104
347#define GCC_NSS_CSR_ARES 105
348#define GCC_NSS_CE_APB_ARES 106
349#define GCC_NSS_CE_AXI_ARES 107
350#define GCC_NSSNOC_CE_APB_ARES 108
351#define GCC_NSSNOC_CE_AXI_ARES 109
352#define GCC_NSSNOC_UBI0_AHB_ARES 110
353#define GCC_NSSNOC_UBI1_AHB_ARES 111
354#define GCC_NSSNOC_SNOC_ARES 112
355#define GCC_NSSNOC_CRYPTO_ARES 113
356#define GCC_NSSNOC_ATB_ARES 114
357#define GCC_NSSNOC_QOSGEN_REF_ARES 115
358#define GCC_NSSNOC_TIMEOUT_REF_ARES 116
359#define GCC_PCIE0_PIPE_ARES 117
360#define GCC_PCIE0_SLEEP_ARES 118
361#define GCC_PCIE0_CORE_STICKY_ARES 119
362#define GCC_PCIE0_AXI_MASTER_ARES 120
363#define GCC_PCIE0_AXI_SLAVE_ARES 121
364#define GCC_PCIE0_AHB_ARES 122
365#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 123
366#define GCC_PCIE1_PIPE_ARES 124
367#define GCC_PCIE1_SLEEP_ARES 125
368#define GCC_PCIE1_CORE_STICKY_ARES 126
369#define GCC_PCIE1_AXI_MASTER_ARES 127
370#define GCC_PCIE1_AXI_SLAVE_ARES 128
371#define GCC_PCIE1_AHB_ARES 129
372#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130
151 373
152#endif 374#endif
diff --git a/include/dt-bindings/clock/sprd,sc9860-clk.h b/include/dt-bindings/clock/sprd,sc9860-clk.h
new file mode 100644
index 000000000000..4cb202f090c2
--- /dev/null
+++ b/include/dt-bindings/clock/sprd,sc9860-clk.h
@@ -0,0 +1,404 @@
1// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2//
3// Spreadtrum SC9860 platform clocks
4//
5// Copyright (C) 2017, Spreadtrum Communications Inc.
6
7#ifndef _DT_BINDINGS_CLK_SC9860_H_
8#define _DT_BINDINGS_CLK_SC9860_H_
9
10#define CLK_FAC_4M 0
11#define CLK_FAC_2M 1
12#define CLK_FAC_1M 2
13#define CLK_FAC_250K 3
14#define CLK_FAC_RPLL0_26M 4
15#define CLK_FAC_RPLL1_26M 5
16#define CLK_FAC_RCO25M 6
17#define CLK_FAC_RCO4M 7
18#define CLK_FAC_RCO2M 8
19#define CLK_FAC_3K2 9
20#define CLK_FAC_1K 10
21#define CLK_MPLL0_GATE 11
22#define CLK_MPLL1_GATE 12
23#define CLK_DPLL0_GATE 13
24#define CLK_DPLL1_GATE 14
25#define CLK_LTEPLL0_GATE 15
26#define CLK_TWPLL_GATE 16
27#define CLK_LTEPLL1_GATE 17
28#define CLK_RPLL0_GATE 18
29#define CLK_RPLL1_GATE 19
30#define CLK_CPPLL_GATE 20
31#define CLK_GPLL_GATE 21
32#define CLK_PMU_GATE_NUM (CLK_GPLL_GATE + 1)
33
34#define CLK_MPLL0 0
35#define CLK_MPLL1 1
36#define CLK_DPLL0 2
37#define CLK_DPLL1 3
38#define CLK_RPLL0 4
39#define CLK_RPLL1 5
40#define CLK_TWPLL 6
41#define CLK_LTEPLL0 7
42#define CLK_LTEPLL1 8
43#define CLK_GPLL 9
44#define CLK_CPPLL 10
45#define CLK_GPLL_42M5 11
46#define CLK_TWPLL_768M 12
47#define CLK_TWPLL_384M 13
48#define CLK_TWPLL_192M 14
49#define CLK_TWPLL_96M 15
50#define CLK_TWPLL_48M 16
51#define CLK_TWPLL_24M 17
52#define CLK_TWPLL_12M 18
53#define CLK_TWPLL_512M 19
54#define CLK_TWPLL_256M 20
55#define CLK_TWPLL_128M 21
56#define CLK_TWPLL_64M 22
57#define CLK_TWPLL_307M2 23
58#define CLK_TWPLL_153M6 24
59#define CLK_TWPLL_76M8 25
60#define CLK_TWPLL_51M2 26
61#define CLK_TWPLL_38M4 27
62#define CLK_TWPLL_19M2 28
63#define CLK_L0_614M4 29
64#define CLK_L0_409M6 30
65#define CLK_L0_38M 31
66#define CLK_L1_38M 32
67#define CLK_RPLL0_192M 33
68#define CLK_RPLL0_96M 34
69#define CLK_RPLL0_48M 35
70#define CLK_RPLL1_468M 36
71#define CLK_RPLL1_192M 37
72#define CLK_RPLL1_96M 38
73#define CLK_RPLL1_64M 39
74#define CLK_RPLL1_48M 40
75#define CLK_DPLL0_50M 41
76#define CLK_DPLL1_50M 42
77#define CLK_CPPLL_50M 43
78#define CLK_M0_39M 44
79#define CLK_M1_63M 45
80#define CLK_PLL_NUM (CLK_M1_63M + 1)
81
82
83#define CLK_AP_APB 0
84#define CLK_AP_USB3 1
85#define CLK_UART0 2
86#define CLK_UART1 3
87#define CLK_UART2 4
88#define CLK_UART3 5
89#define CLK_UART4 6
90#define CLK_I2C0 7
91#define CLK_I2C1 8
92#define CLK_I2C2 9
93#define CLK_I2C3 10
94#define CLK_I2C4 11
95#define CLK_I2C5 12
96#define CLK_SPI0 13
97#define CLK_SPI1 14
98#define CLK_SPI2 15
99#define CLK_SPI3 16
100#define CLK_IIS0 17
101#define CLK_IIS1 18
102#define CLK_IIS2 19
103#define CLK_IIS3 20
104#define CLK_AP_CLK_NUM (CLK_IIS3 + 1)
105
106#define CLK_AON_APB 0
107#define CLK_AUX0 1
108#define CLK_AUX1 2
109#define CLK_AUX2 3
110#define CLK_PROBE 4
111#define CLK_SP_AHB 5
112#define CLK_CCI 6
113#define CLK_GIC 7
114#define CLK_CSSYS 8
115#define CLK_SDIO0_2X 9
116#define CLK_SDIO1_2X 10
117#define CLK_SDIO2_2X 11
118#define CLK_EMMC_2X 12
119#define CLK_SDIO0_1X 13
120#define CLK_SDIO1_1X 14
121#define CLK_SDIO2_1X 15
122#define CLK_EMMC_1X 16
123#define CLK_ADI 17
124#define CLK_PWM0 18
125#define CLK_PWM1 19
126#define CLK_PWM2 20
127#define CLK_PWM3 21
128#define CLK_EFUSE 22
129#define CLK_CM3_UART0 23
130#define CLK_CM3_UART1 24
131#define CLK_THM 25
132#define CLK_CM3_I2C0 26
133#define CLK_CM3_I2C1 27
134#define CLK_CM4_SPI 28
135#define CLK_AON_I2C 29
136#define CLK_AVS 30
137#define CLK_CA53_DAP 31
138#define CLK_CA53_TS 32
139#define CLK_DJTAG_TCK 33
140#define CLK_PMU 34
141#define CLK_PMU_26M 35
142#define CLK_DEBOUNCE 36
143#define CLK_OTG2_REF 37
144#define CLK_USB3_REF 38
145#define CLK_AP_AXI 39
146#define CLK_AON_PREDIV_NUM (CLK_AP_AXI + 1)
147
148#define CLK_USB3_EB 0
149#define CLK_USB3_SUSPEND_EB 1
150#define CLK_USB3_REF_EB 2
151#define CLK_DMA_EB 3
152#define CLK_SDIO0_EB 4
153#define CLK_SDIO1_EB 5
154#define CLK_SDIO2_EB 6
155#define CLK_EMMC_EB 7
156#define CLK_ROM_EB 8
157#define CLK_BUSMON_EB 9
158#define CLK_CC63S_EB 10
159#define CLK_CC63P_EB 11
160#define CLK_CE0_EB 12
161#define CLK_CE1_EB 13
162#define CLK_APAHB_GATE_NUM (CLK_CE1_EB + 1)
163
164#define CLK_AVS_LIT_EB 0
165#define CLK_AVS_BIG_EB 1
166#define CLK_AP_INTC5_EB 2
167#define CLK_GPIO_EB 3
168#define CLK_PWM0_EB 4
169#define CLK_PWM1_EB 5
170#define CLK_PWM2_EB 6
171#define CLK_PWM3_EB 7
172#define CLK_KPD_EB 8
173#define CLK_AON_SYS_EB 9
174#define CLK_AP_SYS_EB 10
175#define CLK_AON_TMR_EB 11
176#define CLK_AP_TMR0_EB 12
177#define CLK_EFUSE_EB 13
178#define CLK_EIC_EB 14
179#define CLK_PUB1_REG_EB 15
180#define CLK_ADI_EB 16
181#define CLK_AP_INTC0_EB 17
182#define CLK_AP_INTC1_EB 18
183#define CLK_AP_INTC2_EB 19
184#define CLK_AP_INTC3_EB 20
185#define CLK_AP_INTC4_EB 21
186#define CLK_SPLK_EB 22
187#define CLK_MSPI_EB 23
188#define CLK_PUB0_REG_EB 24
189#define CLK_PIN_EB 25
190#define CLK_AON_CKG_EB 26
191#define CLK_GPU_EB 27
192#define CLK_APCPU_TS0_EB 28
193#define CLK_APCPU_TS1_EB 29
194#define CLK_DAP_EB 30
195#define CLK_I2C_EB 31
196#define CLK_PMU_EB 32
197#define CLK_THM_EB 33
198#define CLK_AUX0_EB 34
199#define CLK_AUX1_EB 35
200#define CLK_AUX2_EB 36
201#define CLK_PROBE_EB 37
202#define CLK_GPU0_AVS_EB 38
203#define CLK_GPU1_AVS_EB 39
204#define CLK_APCPU_WDG_EB 40
205#define CLK_AP_TMR1_EB 41
206#define CLK_AP_TMR2_EB 42
207#define CLK_DISP_EMC_EB 43
208#define CLK_ZIP_EMC_EB 44
209#define CLK_GSP_EMC_EB 45
210#define CLK_OSC_AON_EB 46
211#define CLK_LVDS_TRX_EB 47
212#define CLK_LVDS_TCXO_EB 48
213#define CLK_MDAR_EB 49
214#define CLK_RTC4M0_CAL_EB 50
215#define CLK_RCT100M_CAL_EB 51
216#define CLK_DJTAG_EB 52
217#define CLK_MBOX_EB 53
218#define CLK_AON_DMA_EB 54
219#define CLK_DBG_EMC_EB 55
220#define CLK_LVDS_PLL_DIV_EN 56
221#define CLK_DEF_EB 57
222#define CLK_AON_APB_RSV0 58
223#define CLK_ORP_JTAG_EB 59
224#define CLK_VSP_EB 60
225#define CLK_CAM_EB 61
226#define CLK_DISP_EB 62
227#define CLK_DBG_AXI_IF_EB 63
228#define CLK_SDIO0_2X_EN 64
229#define CLK_SDIO1_2X_EN 65
230#define CLK_SDIO2_2X_EN 66
231#define CLK_EMMC_2X_EN 67
232#define CLK_AON_GATE_NUM (CLK_EMMC_2X_EN + 1)
233
234#define CLK_LIT_MCU 0
235#define CLK_BIG_MCU 1
236#define CLK_AONSECURE_NUM (CLK_BIG_MCU + 1)
237
238#define CLK_AGCP_IIS0_EB 0
239#define CLK_AGCP_IIS1_EB 1
240#define CLK_AGCP_IIS2_EB 2
241#define CLK_AGCP_IIS3_EB 3
242#define CLK_AGCP_UART_EB 4
243#define CLK_AGCP_DMACP_EB 5
244#define CLK_AGCP_DMAAP_EB 6
245#define CLK_AGCP_ARC48K_EB 7
246#define CLK_AGCP_SRC44P1K_EB 8
247#define CLK_AGCP_MCDT_EB 9
248#define CLK_AGCP_VBCIFD_EB 10
249#define CLK_AGCP_VBC_EB 11
250#define CLK_AGCP_SPINLOCK_EB 12
251#define CLK_AGCP_ICU_EB 13
252#define CLK_AGCP_AP_ASHB_EB 14
253#define CLK_AGCP_CP_ASHB_EB 15
254#define CLK_AGCP_AUD_EB 16
255#define CLK_AGCP_AUDIF_EB 17
256#define CLK_AGCP_GATE_NUM (CLK_AGCP_AUDIF_EB + 1)
257
258#define CLK_GPU 0
259#define CLK_GPU_NUM (CLK_GPU + 1)
260
261#define CLK_AHB_VSP 0
262#define CLK_VSP 1
263#define CLK_VSP_ENC 2
264#define CLK_VPP 3
265#define CLK_VSP_26M 4
266#define CLK_VSP_NUM (CLK_VSP_26M + 1)
267
268#define CLK_VSP_DEC_EB 0
269#define CLK_VSP_CKG_EB 1
270#define CLK_VSP_MMU_EB 2
271#define CLK_VSP_ENC_EB 3
272#define CLK_VPP_EB 4
273#define CLK_VSP_26M_EB 5
274#define CLK_VSP_AXI_GATE 6
275#define CLK_VSP_ENC_GATE 7
276#define CLK_VPP_AXI_GATE 8
277#define CLK_VSP_BM_GATE 9
278#define CLK_VSP_ENC_BM_GATE 10
279#define CLK_VPP_BM_GATE 11
280#define CLK_VSP_GATE_NUM (CLK_VPP_BM_GATE + 1)
281
282#define CLK_AHB_CAM 0
283#define CLK_SENSOR0 1
284#define CLK_SENSOR1 2
285#define CLK_SENSOR2 3
286#define CLK_MIPI_CSI0_EB 4
287#define CLK_MIPI_CSI1_EB 5
288#define CLK_CAM_NUM (CLK_MIPI_CSI1_EB + 1)
289
290#define CLK_DCAM0_EB 0
291#define CLK_DCAM1_EB 1
292#define CLK_ISP0_EB 2
293#define CLK_CSI0_EB 3
294#define CLK_CSI1_EB 4
295#define CLK_JPG0_EB 5
296#define CLK_JPG1_EB 6
297#define CLK_CAM_CKG_EB 7
298#define CLK_CAM_MMU_EB 8
299#define CLK_ISP1_EB 9
300#define CLK_CPP_EB 10
301#define CLK_MMU_PF_EB 11
302#define CLK_ISP2_EB 12
303#define CLK_DCAM2ISP_IF_EB 13
304#define CLK_ISP2DCAM_IF_EB 14
305#define CLK_ISP_LCLK_EB 15
306#define CLK_ISP_ICLK_EB 16
307#define CLK_ISP_MCLK_EB 17
308#define CLK_ISP_PCLK_EB 18
309#define CLK_ISP_ISP2DCAM_EB 19
310#define CLK_DCAM0_IF_EB 20
311#define CLK_CLK26M_IF_EB 21
312#define CLK_CPHY0_GATE 22
313#define CLK_MIPI_CSI0_GATE 23
314#define CLK_CPHY1_GATE 24
315#define CLK_MIPI_CSI1 25
316#define CLK_DCAM0_AXI_GATE 26
317#define CLK_DCAM1_AXI_GATE 27
318#define CLK_SENSOR0_GATE 28
319#define CLK_SENSOR1_GATE 29
320#define CLK_JPG0_AXI_GATE 30
321#define CLK_GPG1_AXI_GATE 31
322#define CLK_ISP0_AXI_GATE 32
323#define CLK_ISP1_AXI_GATE 33
324#define CLK_ISP2_AXI_GATE 34
325#define CLK_CPP_AXI_GATE 35
326#define CLK_D0_IF_AXI_GATE 36
327#define CLK_D2I_IF_AXI_GATE 37
328#define CLK_I2D_IF_AXI_GATE 38
329#define CLK_SPARE_AXI_GATE 39
330#define CLK_SENSOR2_GATE 40
331#define CLK_D0IF_IN_D_EN 41
332#define CLK_D1IF_IN_D_EN 42
333#define CLK_D0IF_IN_D2I_EN 43
334#define CLK_D1IF_IN_D2I_EN 44
335#define CLK_IA_IN_D2I_EN 45
336#define CLK_IB_IN_D2I_EN 46
337#define CLK_IC_IN_D2I_EN 47
338#define CLK_IA_IN_I_EN 48
339#define CLK_IB_IN_I_EN 49
340#define CLK_IC_IN_I_EN 50
341#define CLK_CAM_GATE_NUM (CLK_IC_IN_I_EN + 1)
342
343#define CLK_AHB_DISP 0
344#define CLK_DISPC0_DPI 1
345#define CLK_DISPC1_DPI 2
346#define CLK_DISP_NUM (CLK_DISPC1_DPI + 1)
347
348#define CLK_DISPC0_EB 0
349#define CLK_DISPC1_EB 1
350#define CLK_DISPC_MMU_EB 2
351#define CLK_GSP0_EB 3
352#define CLK_GSP1_EB 4
353#define CLK_GSP0_MMU_EB 5
354#define CLK_GSP1_MMU_EB 6
355#define CLK_DSI0_EB 7
356#define CLK_DSI1_EB 8
357#define CLK_DISP_CKG_EB 9
358#define CLK_DISP_GPU_EB 10
359#define CLK_GPU_MTX_EB 11
360#define CLK_GSP_MTX_EB 12
361#define CLK_TMC_MTX_EB 13
362#define CLK_DISPC_MTX_EB 14
363#define CLK_DPHY0_GATE 15
364#define CLK_DPHY1_GATE 16
365#define CLK_GSP0_A_GATE 17
366#define CLK_GSP1_A_GATE 18
367#define CLK_GSP0_F_GATE 19
368#define CLK_GSP1_F_GATE 20
369#define CLK_D_MTX_F_GATE 21
370#define CLK_D_MTX_A_GATE 22
371#define CLK_D_NOC_F_GATE 23
372#define CLK_D_NOC_A_GATE 24
373#define CLK_GSP_MTX_F_GATE 25
374#define CLK_GSP_MTX_A_GATE 26
375#define CLK_GSP_NOC_F_GATE 27
376#define CLK_GSP_NOC_A_GATE 28
377#define CLK_DISPM0IDLE_GATE 29
378#define CLK_GSPM0IDLE_GATE 30
379#define CLK_DISP_GATE_NUM (CLK_GSPM0IDLE_GATE + 1)
380
381#define CLK_SIM0_EB 0
382#define CLK_IIS0_EB 1
383#define CLK_IIS1_EB 2
384#define CLK_IIS2_EB 3
385#define CLK_IIS3_EB 4
386#define CLK_SPI0_EB 5
387#define CLK_SPI1_EB 6
388#define CLK_SPI2_EB 7
389#define CLK_I2C0_EB 8
390#define CLK_I2C1_EB 9
391#define CLK_I2C2_EB 10
392#define CLK_I2C3_EB 11
393#define CLK_I2C4_EB 12
394#define CLK_I2C5_EB 13
395#define CLK_UART0_EB 14
396#define CLK_UART1_EB 15
397#define CLK_UART2_EB 16
398#define CLK_UART3_EB 17
399#define CLK_UART4_EB 18
400#define CLK_AP_CKG_EB 19
401#define CLK_SPI3_EB 20
402#define CLK_APAPB_GATE_NUM (CLK_SPI3_EB + 1)
403
404#endif /* _DT_BINDINGS_CLK_SC9860_H_ */
diff --git a/include/dt-bindings/gpio/aspeed-gpio.h b/include/dt-bindings/gpio/aspeed-gpio.h
new file mode 100644
index 000000000000..56fc4889b2c4
--- /dev/null
+++ b/include/dt-bindings/gpio/aspeed-gpio.h
@@ -0,0 +1,49 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * This header provides constants for binding aspeed,*-gpio.
4 *
5 * The first cell in Aspeed's GPIO specifier is the GPIO ID. The macros below
6 * provide names for this.
7 *
8 * The second cell contains standard flag values specified in gpio.h.
9 */
10
11#ifndef _DT_BINDINGS_GPIO_ASPEED_GPIO_H
12#define _DT_BINDINGS_GPIO_ASPEED_GPIO_H
13
14#include <dt-bindings/gpio/gpio.h>
15
16#define ASPEED_GPIO_PORT_A 0
17#define ASPEED_GPIO_PORT_B 1
18#define ASPEED_GPIO_PORT_C 2
19#define ASPEED_GPIO_PORT_D 3
20#define ASPEED_GPIO_PORT_E 4
21#define ASPEED_GPIO_PORT_F 5
22#define ASPEED_GPIO_PORT_G 6
23#define ASPEED_GPIO_PORT_H 7
24#define ASPEED_GPIO_PORT_I 8
25#define ASPEED_GPIO_PORT_J 9
26#define ASPEED_GPIO_PORT_K 10
27#define ASPEED_GPIO_PORT_L 11
28#define ASPEED_GPIO_PORT_M 12
29#define ASPEED_GPIO_PORT_N 13
30#define ASPEED_GPIO_PORT_O 14
31#define ASPEED_GPIO_PORT_P 15
32#define ASPEED_GPIO_PORT_Q 16
33#define ASPEED_GPIO_PORT_R 17
34#define ASPEED_GPIO_PORT_S 18
35#define ASPEED_GPIO_PORT_T 19
36#define ASPEED_GPIO_PORT_U 20
37#define ASPEED_GPIO_PORT_V 21
38#define ASPEED_GPIO_PORT_W 22
39#define ASPEED_GPIO_PORT_X 23
40#define ASPEED_GPIO_PORT_Y 24
41#define ASPEED_GPIO_PORT_Z 25
42#define ASPEED_GPIO_PORT_AA 26
43#define ASPEED_GPIO_PORT_AB 27
44#define ASPEED_GPIO_PORT_AC 28
45
46#define ASPEED_GPIO(port, offset) \
47 ((ASPEED_GPIO_PORT_##port * 8) + offset)
48
49#endif
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index dd549ff04295..2cc10ae4bbb7 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -29,8 +29,8 @@
29#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN) 29#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN)
30#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE) 30#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE)
31 31
32/* Bit 3 express GPIO suspend/resume persistence */ 32/* Bit 3 express GPIO suspend/resume and reset persistence */
33#define GPIO_SLEEP_MAINTAIN_VALUE 0 33#define GPIO_PERSISTENT 0
34#define GPIO_SLEEP_MAY_LOSE_VALUE 8 34#define GPIO_TRANSITORY 8
35 35
36#endif 36#endif
diff --git a/include/dt-bindings/gpio/meson-axg-gpio.h b/include/dt-bindings/gpio/meson-axg-gpio.h
new file mode 100644
index 000000000000..25bb1fffa97a
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-axg-gpio.h
@@ -0,0 +1,116 @@
1/*
2 * Copyright (c) 2017 Amlogic, Inc. All rights reserved.
3 * Author: Xingyu Chen <xingyu.chen@amlogic.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0+
6 */
7
8#ifndef _DT_BINDINGS_MESON_AXG_GPIO_H
9#define _DT_BINDINGS_MESON_AXG_GPIO_H
10
11/* First GPIO chip */
12#define GPIOAO_0 0
13#define GPIOAO_1 1
14#define GPIOAO_2 2
15#define GPIOAO_3 3
16#define GPIOAO_4 4
17#define GPIOAO_5 5
18#define GPIOAO_6 6
19#define GPIOAO_7 7
20#define GPIOAO_8 8
21#define GPIOAO_9 9
22#define GPIOAO_10 10
23#define GPIOAO_11 11
24#define GPIOAO_12 12
25#define GPIOAO_13 13
26#define GPIO_TEST_N 14
27
28/* Second GPIO chip */
29#define GPIOZ_0 0
30#define GPIOZ_1 1
31#define GPIOZ_2 2
32#define GPIOZ_3 3
33#define GPIOZ_4 4
34#define GPIOZ_5 5
35#define GPIOZ_6 6
36#define GPIOZ_7 7
37#define GPIOZ_8 8
38#define GPIOZ_9 9
39#define GPIOZ_10 10
40#define BOOT_0 11
41#define BOOT_1 12
42#define BOOT_2 13
43#define BOOT_3 14
44#define BOOT_4 15
45#define BOOT_5 16
46#define BOOT_6 17
47#define BOOT_7 18
48#define BOOT_8 19
49#define BOOT_9 20
50#define BOOT_10 21
51#define BOOT_11 22
52#define BOOT_12 23
53#define BOOT_13 24
54#define BOOT_14 25
55#define GPIOA_0 26
56#define GPIOA_1 27
57#define GPIOA_2 28
58#define GPIOA_3 29
59#define GPIOA_4 30
60#define GPIOA_5 31
61#define GPIOA_6 32
62#define GPIOA_7 33
63#define GPIOA_8 34
64#define GPIOA_9 35
65#define GPIOA_10 36
66#define GPIOA_11 37
67#define GPIOA_12 38
68#define GPIOA_13 39
69#define GPIOA_14 40
70#define GPIOA_15 41
71#define GPIOA_16 42
72#define GPIOA_17 43
73#define GPIOA_18 44
74#define GPIOA_19 45
75#define GPIOA_20 46
76#define GPIOX_0 47
77#define GPIOX_1 48
78#define GPIOX_2 49
79#define GPIOX_3 50
80#define GPIOX_4 51
81#define GPIOX_5 52
82#define GPIOX_6 53
83#define GPIOX_7 54
84#define GPIOX_8 55
85#define GPIOX_9 56
86#define GPIOX_10 57
87#define GPIOX_11 58
88#define GPIOX_12 59
89#define GPIOX_13 60
90#define GPIOX_14 61
91#define GPIOX_15 62
92#define GPIOX_16 63
93#define GPIOX_17 64
94#define GPIOX_18 65
95#define GPIOX_19 66
96#define GPIOX_20 67
97#define GPIOX_21 68
98#define GPIOX_22 69
99#define GPIOY_0 70
100#define GPIOY_1 71
101#define GPIOY_2 72
102#define GPIOY_3 73
103#define GPIOY_4 74
104#define GPIOY_5 75
105#define GPIOY_6 76
106#define GPIOY_7 77
107#define GPIOY_8 78
108#define GPIOY_9 79
109#define GPIOY_10 80
110#define GPIOY_11 81
111#define GPIOY_12 82
112#define GPIOY_13 83
113#define GPIOY_14 84
114#define GPIOY_15 85
115
116#endif /* _DT_BINDINGS_MESON_AXG_GPIO_H */
diff --git a/include/dt-bindings/memory/tegra186-mc.h b/include/dt-bindings/memory/tegra186-mc.h
new file mode 100644
index 000000000000..64813536aec9
--- /dev/null
+++ b/include/dt-bindings/memory/tegra186-mc.h
@@ -0,0 +1,111 @@
1#ifndef DT_BINDINGS_MEMORY_TEGRA186_MC_H
2#define DT_BINDINGS_MEMORY_TEGRA186_MC_H
3
4/* special clients */
5#define TEGRA186_SID_INVALID 0x00
6#define TEGRA186_SID_PASSTHROUGH 0x7f
7
8/* host1x clients */
9#define TEGRA186_SID_HOST1X 0x01
10#define TEGRA186_SID_CSI 0x02
11#define TEGRA186_SID_VIC 0x03
12#define TEGRA186_SID_VI 0x04
13#define TEGRA186_SID_ISP 0x05
14#define TEGRA186_SID_NVDEC 0x06
15#define TEGRA186_SID_NVENC 0x07
16#define TEGRA186_SID_NVJPG 0x08
17#define TEGRA186_SID_NVDISPLAY 0x09
18#define TEGRA186_SID_TSEC 0x0a
19#define TEGRA186_SID_TSECB 0x0b
20#define TEGRA186_SID_SE 0x0c
21#define TEGRA186_SID_SE1 0x0d
22#define TEGRA186_SID_SE2 0x0e
23#define TEGRA186_SID_SE3 0x0f
24
25/* GPU clients */
26#define TEGRA186_SID_GPU 0x10
27
28/* other SoC clients */
29#define TEGRA186_SID_AFI 0x11
30#define TEGRA186_SID_HDA 0x12
31#define TEGRA186_SID_ETR 0x13
32#define TEGRA186_SID_EQOS 0x14
33#define TEGRA186_SID_UFSHC 0x15
34#define TEGRA186_SID_AON 0x16
35#define TEGRA186_SID_SDMMC4 0x17
36#define TEGRA186_SID_SDMMC3 0x18
37#define TEGRA186_SID_SDMMC2 0x19
38#define TEGRA186_SID_SDMMC1 0x1a
39#define TEGRA186_SID_XUSB_HOST 0x1b
40#define TEGRA186_SID_XUSB_DEV 0x1c
41#define TEGRA186_SID_SATA 0x1d
42#define TEGRA186_SID_APE 0x1e
43#define TEGRA186_SID_SCE 0x1f
44
45/* GPC DMA clients */
46#define TEGRA186_SID_GPCDMA_0 0x20
47#define TEGRA186_SID_GPCDMA_1 0x21
48#define TEGRA186_SID_GPCDMA_2 0x22
49#define TEGRA186_SID_GPCDMA_3 0x23
50#define TEGRA186_SID_GPCDMA_4 0x24
51#define TEGRA186_SID_GPCDMA_5 0x25
52#define TEGRA186_SID_GPCDMA_6 0x26
53#define TEGRA186_SID_GPCDMA_7 0x27
54
55/* APE DMA clients */
56#define TEGRA186_SID_APE_1 0x28
57#define TEGRA186_SID_APE_2 0x29
58
59/* camera RTCPU */
60#define TEGRA186_SID_RCE 0x2a
61
62/* camera RTCPU on host1x address space */
63#define TEGRA186_SID_RCE_1X 0x2b
64
65/* APE DMA clients */
66#define TEGRA186_SID_APE_3 0x2c
67
68/* camera RTCPU running on APE */
69#define TEGRA186_SID_APE_CAM 0x2d
70#define TEGRA186_SID_APE_CAM_1X 0x2e
71
72/*
73 * The BPMP has its SID value hardcoded in the firmware. Changing it requires
74 * considerable effort.
75 */
76#define TEGRA186_SID_BPMP 0x32
77
78/* for SMMU tests */
79#define TEGRA186_SID_SMMU_TEST 0x33
80
81/* host1x virtualization channels */
82#define TEGRA186_SID_HOST1X_CTX0 0x38
83#define TEGRA186_SID_HOST1X_CTX1 0x39
84#define TEGRA186_SID_HOST1X_CTX2 0x3a
85#define TEGRA186_SID_HOST1X_CTX3 0x3b
86#define TEGRA186_SID_HOST1X_CTX4 0x3c
87#define TEGRA186_SID_HOST1X_CTX5 0x3d
88#define TEGRA186_SID_HOST1X_CTX6 0x3e
89#define TEGRA186_SID_HOST1X_CTX7 0x3f
90
91/* host1x command buffers */
92#define TEGRA186_SID_HOST1X_VM0 0x40
93#define TEGRA186_SID_HOST1X_VM1 0x41
94#define TEGRA186_SID_HOST1X_VM2 0x42
95#define TEGRA186_SID_HOST1X_VM3 0x43
96#define TEGRA186_SID_HOST1X_VM4 0x44
97#define TEGRA186_SID_HOST1X_VM5 0x45
98#define TEGRA186_SID_HOST1X_VM6 0x46
99#define TEGRA186_SID_HOST1X_VM7 0x47
100
101/* SE data buffers */
102#define TEGRA186_SID_SE_VM0 0x48
103#define TEGRA186_SID_SE_VM1 0x49
104#define TEGRA186_SID_SE_VM2 0x4a
105#define TEGRA186_SID_SE_VM3 0x4b
106#define TEGRA186_SID_SE_VM4 0x4c
107#define TEGRA186_SID_SE_VM5 0x4d
108#define TEGRA186_SID_SE_VM6 0x4e
109#define TEGRA186_SID_SE_VM7 0x4f
110
111#endif
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
index a69e310789c5..6ce4a32f77d4 100644
--- a/include/dt-bindings/pinctrl/am43xx.h
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -25,7 +25,8 @@
25#define DS0_FORCE_OFF_MODE (1 << 24) 25#define DS0_FORCE_OFF_MODE (1 << 24)
26#define DS0_INPUT (1 << 25) 26#define DS0_INPUT (1 << 25)
27#define DS0_FORCE_OUT_HIGH (1 << 26) 27#define DS0_FORCE_OUT_HIGH (1 << 26)
28#define DS0_PULL_UP_DOWN_EN (1 << 27) 28#define DS0_PULL_UP_DOWN_EN (0 << 27)
29#define DS0_PULL_UP_DOWN_DIS (1 << 27)
29#define DS0_PULL_UP_SEL (1 << 28) 30#define DS0_PULL_UP_SEL (1 << 28)
30#define WAKEUP_ENABLE (1 << 29) 31#define WAKEUP_ENABLE (1 << 29)
31 32
diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h
index b8dfe31821e6..b5a2174a6386 100644
--- a/include/dt-bindings/pinctrl/stm32-pinfunc.h
+++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h
@@ -1,3 +1,9 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Copyright (C) STMicroelectronics 2017 - All Rights Reserved
4 * Author: Torgue Alexandre <alexandre.torgue@st.com> for STMicroelectronics.
5 */
6
1#ifndef _DT_BINDINGS_STM32_PINFUNC_H 7#ifndef _DT_BINDINGS_STM32_PINFUNC_H
2#define _DT_BINDINGS_STM32_PINFUNC_H 8#define _DT_BINDINGS_STM32_PINFUNC_H
3 9
diff --git a/include/dt-bindings/power/mt2712-power.h b/include/dt-bindings/power/mt2712-power.h
new file mode 100644
index 000000000000..92b46d772fae
--- /dev/null
+++ b/include/dt-bindings/power/mt2712-power.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2017 MediaTek Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
12 */
13
14#ifndef _DT_BINDINGS_POWER_MT2712_POWER_H
15#define _DT_BINDINGS_POWER_MT2712_POWER_H
16
17#define MT2712_POWER_DOMAIN_MM 0
18#define MT2712_POWER_DOMAIN_VDEC 1
19#define MT2712_POWER_DOMAIN_VENC 2
20#define MT2712_POWER_DOMAIN_ISP 3
21#define MT2712_POWER_DOMAIN_AUDIO 4
22#define MT2712_POWER_DOMAIN_USB 5
23#define MT2712_POWER_DOMAIN_USB2 6
24#define MT2712_POWER_DOMAIN_MFG 7
25
26#endif /* _DT_BINDINGS_POWER_MT2712_POWER_H */
diff --git a/include/dt-bindings/power/owl-s700-powergate.h b/include/dt-bindings/power/owl-s700-powergate.h
new file mode 100644
index 000000000000..4cf1aefbf09c
--- /dev/null
+++ b/include/dt-bindings/power/owl-s700-powergate.h
@@ -0,0 +1,19 @@
1// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2/*
3 * Actions Semi S700 SPS
4 *
5 * Copyright (c) 2017 Andreas Färber
6 */
7#ifndef DT_BINDINGS_POWER_OWL_S700_POWERGATE_H
8#define DT_BINDINGS_POWER_OWL_S700_POWERGATE_H
9
10#define S700_PD_VDE 0
11#define S700_PD_VCE_SI 1
12#define S700_PD_USB2_1 2
13#define S700_PD_HDE 3
14#define S700_PD_DMA 4
15#define S700_PD_DS 5
16#define S700_PD_USB3 6
17#define S700_PD_USB2_0 7
18
19#endif
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
new file mode 100644
index 000000000000..ad6f55dabd6d
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
@@ -0,0 +1,124 @@
1/*
2 *
3 * Copyright (c) 2016 BayLibre, SAS.
4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 *
6 * Copyright (c) 2017 Amlogic, inc.
7 * Author: Yixun Lan <yixun.lan@amlogic.com>
8 *
9 * SPDX-License-Identifier: (GPL-2.0+ OR BSD)
10 */
11
12#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H
13#define _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H
14
15/* RESET0 */
16#define RESET_HIU 0
17#define RESET_PCIE_A 1
18#define RESET_PCIE_B 2
19#define RESET_DDR_TOP 3
20/* 4 */
21#define RESET_VIU 5
22#define RESET_PCIE_PHY 6
23#define RESET_PCIE_APB 7
24/* 8 */
25/* 9 */
26#define RESET_VENC 10
27#define RESET_ASSIST 11
28/* 12 */
29#define RESET_VCBUS 13
30/* 14 */
31/* 15 */
32#define RESET_GIC 16
33#define RESET_CAPB3_DECODE 17
34/* 18-21 */
35#define RESET_SYS_CPU_CAPB3 22
36#define RESET_CBUS_CAPB3 23
37#define RESET_AHB_CNTL 24
38#define RESET_AHB_DATA 25
39#define RESET_VCBUS_CLK81 26
40#define RESET_MMC 27
41/* 28-31 */
42/* RESET1 */
43/* 32 */
44/* 33 */
45#define RESET_USB_OTG 34
46#define RESET_DDR 35
47#define RESET_AO_RESET 36
48/* 37 */
49#define RESET_AHB_SRAM 38
50/* 39 */
51/* 40 */
52#define RESET_DMA 41
53#define RESET_ISA 42
54#define RESET_ETHERNET 43
55/* 44 */
56#define RESET_SD_EMMC_B 45
57#define RESET_SD_EMMC_C 46
58#define RESET_ROM_BOOT 47
59#define RESET_SYS_CPU_0 48
60#define RESET_SYS_CPU_1 49
61#define RESET_SYS_CPU_2 50
62#define RESET_SYS_CPU_3 51
63#define RESET_SYS_CPU_CORE_0 52
64#define RESET_SYS_CPU_CORE_1 53
65#define RESET_SYS_CPU_CORE_2 54
66#define RESET_SYS_CPU_CORE_3 55
67#define RESET_SYS_PLL_DIV 56
68#define RESET_SYS_CPU_AXI 57
69#define RESET_SYS_CPU_L2 58
70#define RESET_SYS_CPU_P 59
71#define RESET_SYS_CPU_MBIST 60
72/* 61-63 */
73/* RESET2 */
74/* 64 */
75/* 65 */
76#define RESET_AUDIO 66
77/* 67 */
78#define RESET_MIPI_HOST 68
79#define RESET_AUDIO_LOCKER 69
80#define RESET_GE2D 70
81/* 71-76 */
82#define RESET_AO_CPU_RESET 77
83/* 78-95 */
84/* RESET3 */
85#define RESET_RING_OSCILLATOR 96
86/* 97-127 */
87/* RESET4 */
88/* 128 */
89/* 129 */
90#define RESET_MIPI_PHY 130
91/* 131-140 */
92#define RESET_VENCL 141
93#define RESET_I2C_MASTER_2 142
94#define RESET_I2C_MASTER_1 143
95/* 144-159 */
96/* RESET5 */
97/* 160-191 */
98/* RESET6 */
99#define RESET_PERIPHS_GENERAL 192
100#define RESET_PERIPHS_SPICC 193
101/* 194 */
102/* 195 */
103#define RESET_PERIPHS_I2C_MASTER_0 196
104/* 197-200 */
105#define RESET_PERIPHS_UART_0 201
106#define RESET_PERIPHS_UART_1 202
107/* 203-204 */
108#define RESET_PERIPHS_SPI_0 205
109#define RESET_PERIPHS_I2C_MASTER_3 206
110/* 207-223 */
111/* RESET7 */
112#define RESET_USB_DDR_0 224
113#define RESET_USB_DDR_1 225
114#define RESET_USB_DDR_2 226
115#define RESET_USB_DDR_3 227
116/* 228 */
117#define RESET_DEVICE_MMC_ARB 229
118/* 230 */
119#define RESET_VID_LOCK 231
120#define RESET_A9_DMC_PIPEL 232
121#define RESET_DMC_VPU_PIPEL 233
122/* 234-255 */
123
124#endif
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
new file mode 100644
index 000000000000..e518e4e3dfb5
--- /dev/null
+++ b/include/kvm/arm_psci.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __KVM_ARM_PSCI_H__
19#define __KVM_ARM_PSCI_H__
20
21#include <linux/kvm_host.h>
22#include <uapi/linux/psci.h>
23
24#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
25#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
26#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
27
28#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
29
30/*
31 * We need the KVM pointer independently from the vcpu as we can call
32 * this from HYP, and need to apply kern_hyp_va on it...
33 */
34static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
35{
36 /*
37 * Our PSCI implementation stays the same across versions from
38 * v0.2 onward, only adding the few mandatory functions (such
39 * as FEATURES with 1.0) that are required by newer
40 * revisions. It is thus safe to return the latest.
41 */
42 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
43 return KVM_ARM_PSCI_LATEST;
44
45 return KVM_ARM_PSCI_0_1;
46}
47
48
49int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
50
51#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index e6b98a32495f..64e10746f282 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -56,6 +56,8 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
56#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ 56#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
57 acpi_fwnode_handle(adev) : NULL) 57 acpi_fwnode_handle(adev) : NULL)
58#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) 58#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
59#define ACPI_HANDLE_FWNODE(fwnode) \
60 acpi_device_handle(to_acpi_device_node(fwnode))
59 61
60static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) 62static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
61{ 63{
@@ -585,6 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
585const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, 587const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
586 const struct device *dev); 588 const struct device *dev);
587 589
590void *acpi_get_match_data(const struct device *dev);
588extern bool acpi_driver_match_device(struct device *dev, 591extern bool acpi_driver_match_device(struct device *dev,
589 const struct device_driver *drv); 592 const struct device_driver *drv);
590int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); 593int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
@@ -627,6 +630,7 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count)
627#define ACPI_COMPANION(dev) (NULL) 630#define ACPI_COMPANION(dev) (NULL)
628#define ACPI_COMPANION_SET(dev, adev) do { } while (0) 631#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
629#define ACPI_HANDLE(dev) (NULL) 632#define ACPI_HANDLE(dev) (NULL)
633#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
630#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), 634#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
631 635
632struct fwnode_handle; 636struct fwnode_handle;
@@ -762,6 +766,11 @@ static inline const struct acpi_device_id *acpi_match_device(
762 return NULL; 766 return NULL;
763} 767}
764 768
769static inline void *acpi_get_match_data(const struct device *dev)
770{
771 return NULL;
772}
773
765static inline bool acpi_driver_match_device(struct device *dev, 774static inline bool acpi_driver_match_device(struct device *dev,
766 const struct device_driver *drv) 775 const struct device_driver *drv)
767{ 776{
@@ -985,6 +994,11 @@ struct acpi_gpio_mapping {
985 const char *name; 994 const char *name;
986 const struct acpi_gpio_params *data; 995 const struct acpi_gpio_params *data;
987 unsigned int size; 996 unsigned int size;
997
998/* Ignore IoRestriction field */
999#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0)
1000
1001 unsigned int quirks;
988}; 1002};
989 1003
990#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) 1004#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 304511267c82..2b709416de05 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -27,7 +27,7 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
27DECLARE_PER_CPU(unsigned long, freq_scale); 27DECLARE_PER_CPU(unsigned long, freq_scale);
28 28
29static inline 29static inline
30unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu) 30unsigned long topology_get_freq_scale(int cpu)
31{ 31{
32 return per_cpu(freq_scale, cpu); 32 return per_cpu(freq_scale, cpu);
33} 33}
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 4c5bca38c653..a031897fca76 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -14,14 +14,16 @@
14#ifndef __LINUX_ARM_SMCCC_H 14#ifndef __LINUX_ARM_SMCCC_H
15#define __LINUX_ARM_SMCCC_H 15#define __LINUX_ARM_SMCCC_H
16 16
17#include <uapi/linux/const.h>
18
17/* 19/*
18 * This file provides common defines for ARM SMC Calling Convention as 20 * This file provides common defines for ARM SMC Calling Convention as
19 * specified in 21 * specified in
20 * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html 22 * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
21 */ 23 */
22 24
23#define ARM_SMCCC_STD_CALL 0 25#define ARM_SMCCC_STD_CALL _AC(0,U)
24#define ARM_SMCCC_FAST_CALL 1 26#define ARM_SMCCC_FAST_CALL _AC(1,U)
25#define ARM_SMCCC_TYPE_SHIFT 31 27#define ARM_SMCCC_TYPE_SHIFT 31
26 28
27#define ARM_SMCCC_SMC_32 0 29#define ARM_SMCCC_SMC_32 0
@@ -60,6 +62,24 @@
60#define ARM_SMCCC_QUIRK_NONE 0 62#define ARM_SMCCC_QUIRK_NONE 0
61#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ 63#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
62 64
65#define ARM_SMCCC_VERSION_1_0 0x10000
66#define ARM_SMCCC_VERSION_1_1 0x10001
67
68#define ARM_SMCCC_VERSION_FUNC_ID \
69 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
70 ARM_SMCCC_SMC_32, \
71 0, 0)
72
73#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
74 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
75 ARM_SMCCC_SMC_32, \
76 0, 1)
77
78#define ARM_SMCCC_ARCH_WORKAROUND_1 \
79 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
80 ARM_SMCCC_SMC_32, \
81 0, 0x8000)
82
63#ifndef __ASSEMBLY__ 83#ifndef __ASSEMBLY__
64 84
65#include <linux/linkage.h> 85#include <linux/linkage.h>
@@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
130 150
131#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) 151#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
132 152
153/* SMCCC v1.1 implementation madness follows */
154#ifdef CONFIG_ARM64
155
156#define SMCCC_SMC_INST "smc #0"
157#define SMCCC_HVC_INST "hvc #0"
158
159#elif defined(CONFIG_ARM)
160#include <asm/opcodes-sec.h>
161#include <asm/opcodes-virt.h>
162
163#define SMCCC_SMC_INST __SMC(0)
164#define SMCCC_HVC_INST __HVC(0)
165
166#endif
167
168#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
169
170#define __count_args(...) \
171 ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
172
173#define __constraint_write_0 \
174 "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
175#define __constraint_write_1 \
176 "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
177#define __constraint_write_2 \
178 "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
179#define __constraint_write_3 \
180 "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
181#define __constraint_write_4 __constraint_write_3
182#define __constraint_write_5 __constraint_write_4
183#define __constraint_write_6 __constraint_write_5
184#define __constraint_write_7 __constraint_write_6
185
186#define __constraint_read_0
187#define __constraint_read_1
188#define __constraint_read_2
189#define __constraint_read_3
190#define __constraint_read_4 "r" (r4)
191#define __constraint_read_5 __constraint_read_4, "r" (r5)
192#define __constraint_read_6 __constraint_read_5, "r" (r6)
193#define __constraint_read_7 __constraint_read_6, "r" (r7)
194
195#define __declare_arg_0(a0, res) \
196 struct arm_smccc_res *___res = res; \
197 register u32 r0 asm("r0") = a0; \
198 register unsigned long r1 asm("r1"); \
199 register unsigned long r2 asm("r2"); \
200 register unsigned long r3 asm("r3")
201
202#define __declare_arg_1(a0, a1, res) \
203 struct arm_smccc_res *___res = res; \
204 register u32 r0 asm("r0") = a0; \
205 register typeof(a1) r1 asm("r1") = a1; \
206 register unsigned long r2 asm("r2"); \
207 register unsigned long r3 asm("r3")
208
209#define __declare_arg_2(a0, a1, a2, res) \
210 struct arm_smccc_res *___res = res; \
211 register u32 r0 asm("r0") = a0; \
212 register typeof(a1) r1 asm("r1") = a1; \
213 register typeof(a2) r2 asm("r2") = a2; \
214 register unsigned long r3 asm("r3")
215
216#define __declare_arg_3(a0, a1, a2, a3, res) \
217 struct arm_smccc_res *___res = res; \
218 register u32 r0 asm("r0") = a0; \
219 register typeof(a1) r1 asm("r1") = a1; \
220 register typeof(a2) r2 asm("r2") = a2; \
221 register typeof(a3) r3 asm("r3") = a3
222
223#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
224 __declare_arg_3(a0, a1, a2, a3, res); \
225 register typeof(a4) r4 asm("r4") = a4
226
227#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
228 __declare_arg_4(a0, a1, a2, a3, a4, res); \
229 register typeof(a5) r5 asm("r5") = a5
230
231#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
232 __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
233 register typeof(a6) r6 asm("r6") = a6
234
235#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
236 __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
237 register typeof(a7) r7 asm("r7") = a7
238
239#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
240#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
241
242#define ___constraints(count) \
243 : __constraint_write_ ## count \
244 : __constraint_read_ ## count \
245 : "memory"
246#define __constraints(count) ___constraints(count)
247
248/*
249 * We have an output list that is not necessarily used, and GCC feels
250 * entitled to optimise the whole sequence away. "volatile" is what
251 * makes it stick.
252 */
253#define __arm_smccc_1_1(inst, ...) \
254 do { \
255 __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
256 asm volatile(inst "\n" \
257 __constraints(__count_args(__VA_ARGS__))); \
258 if (___res) \
259 *___res = (typeof(*___res)){r0, r1, r2, r3}; \
260 } while (0)
261
262/*
263 * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
264 *
265 * This is a variadic macro taking one to eight source arguments, and
266 * an optional return structure.
267 *
268 * @a0-a7: arguments passed in registers 0 to 7
269 * @res: result values from registers 0 to 3
270 *
271 * This macro is used to make SMC calls following SMC Calling Convention v1.1.
272 * The content of the supplied param are copied to registers 0 to 7 prior
273 * to the SMC instruction. The return values are updated with the content
274 * from register 0 to 3 on return from the SMC instruction if not NULL.
275 */
276#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
277
278/*
279 * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
280 *
281 * This is a variadic macro taking one to eight source arguments, and
282 * an optional return structure.
283 *
284 * @a0-a7: arguments passed in registers 0 to 7
285 * @res: result values from registers 0 to 3
286 *
287 * This macro is used to make HVC calls following SMC Calling Convention v1.1.
288 * The content of the supplied param are copied to registers 0 to 7 prior
289 * to the HVC instruction. The return values are updated with the content
290 * from register 0 to 3 on return from the HVC instruction if not NULL.
291 */
292#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
293
133#endif /*__ASSEMBLY__*/ 294#endif /*__ASSEMBLY__*/
134#endif /*__LINUX_ARM_SMCCC_H*/ 295#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
new file mode 100644
index 000000000000..942afbd544b7
--- /dev/null
+++ b/include/linux/arm_sdei.h
@@ -0,0 +1,79 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2017 Arm Ltd.
3#ifndef __LINUX_ARM_SDEI_H
4#define __LINUX_ARM_SDEI_H
5
6#include <uapi/linux/arm_sdei.h>
7
8enum sdei_conduit_types {
9 CONDUIT_INVALID = 0,
10 CONDUIT_SMC,
11 CONDUIT_HVC,
12};
13
14#include <asm/sdei.h>
15
16/* Arch code should override this to set the entry point from firmware... */
17#ifndef sdei_arch_get_entry_point
18#define sdei_arch_get_entry_point(conduit) (0)
19#endif
20
21/*
22 * When an event occurs sdei_event_handler() will call a user-provided callback
23 * like this in NMI context on the CPU that received the event.
24 */
25typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg);
26
27/*
28 * Register your callback to claim an event. The event must be described
29 * by firmware.
30 */
31int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg);
32
33/*
34 * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling
35 * it until it succeeds.
36 */
37int sdei_event_unregister(u32 event_num);
38
39int sdei_event_enable(u32 event_num);
40int sdei_event_disable(u32 event_num);
41
42#ifdef CONFIG_ARM_SDE_INTERFACE
43/* For use by arch code when CPU hotplug notifiers are not appropriate. */
44int sdei_mask_local_cpu(void);
45int sdei_unmask_local_cpu(void);
46#else
47static inline int sdei_mask_local_cpu(void) { return 0; }
48static inline int sdei_unmask_local_cpu(void) { return 0; }
49#endif /* CONFIG_ARM_SDE_INTERFACE */
50
51
52/*
53 * This struct represents an event that has been registered. The driver
54 * maintains a list of all events, and which ones are registered. (Private
55 * events have one entry in the list, but are registered on each CPU).
56 * A pointer to this struct is passed to firmware, and back to the event
57 * handler. The event handler can then use this to invoke the registered
58 * callback, without having to walk the list.
59 *
60 * For CPU private events, this structure is per-cpu.
61 */
62struct sdei_registered_event {
63 /* For use by arch code: */
64 struct pt_regs interrupted_regs;
65
66 sdei_event_callback *callback;
67 void *callback_arg;
68 u32 event_num;
69 u8 priority;
70};
71
72/* The arch code entry point should then call this when an event arrives. */
73int notrace sdei_event_handler(struct pt_regs *regs,
74 struct sdei_registered_event *arg);
75
76/* arch code may use this to retrieve the extra registers. */
77int sdei_api_event_context(u32 query, u64 *result);
78
79#endif /* __LINUX_ARM_SDEI_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c7a353825450..40d150ad7e07 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -448,6 +448,8 @@ enum {
448 ATA_SET_MAX_LOCK = 0x02, 448 ATA_SET_MAX_LOCK = 0x02,
449 ATA_SET_MAX_UNLOCK = 0x03, 449 ATA_SET_MAX_UNLOCK = 0x03,
450 ATA_SET_MAX_FREEZE_LOCK = 0x04, 450 ATA_SET_MAX_FREEZE_LOCK = 0x04,
451 ATA_SET_MAX_PASSWD_DMA = 0x05,
452 ATA_SET_MAX_UNLOCK_DMA = 0x06,
451 453
452 /* feature values for DEVICE CONFIGURATION OVERLAY */ 454 /* feature values for DEVICE CONFIGURATION OVERLAY */
453 ATA_DCO_RESTORE = 0xC0, 455 ATA_DCO_RESTORE = 0xC0,
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e54e7e0033eb..3e4ce54d84ab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
332 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the 332 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
333 * associated wb's list_lock. 333 * associated wb's list_lock.
334 */ 334 */
335static inline struct bdi_writeback *inode_to_wb(struct inode *inode) 335static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
336{ 336{
337#ifdef CONFIG_LOCKDEP 337#ifdef CONFIG_LOCKDEP
338 WARN_ON_ONCE(debug_locks && 338 WARN_ON_ONCE(debug_locks &&
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 23d29b39f71e..d0eb659fa733 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -300,6 +300,29 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
300 bv->bv_len = iter.bi_bvec_done; 300 bv->bv_len = iter.bi_bvec_done;
301} 301}
302 302
303static inline unsigned bio_pages_all(struct bio *bio)
304{
305 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
306 return bio->bi_vcnt;
307}
308
309static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
310{
311 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
312 return bio->bi_io_vec;
313}
314
315static inline struct page *bio_first_page_all(struct bio *bio)
316{
317 return bio_first_bvec_all(bio)->bv_page;
318}
319
320static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
321{
322 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
323 return &bio->bi_io_vec[bio->bi_vcnt - 1];
324}
325
303enum bip_flags { 326enum bip_flags {
304 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 327 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
305 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ 328 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -477,7 +500,6 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
477#endif 500#endif
478 501
479extern void bio_copy_data(struct bio *dst, struct bio *src); 502extern void bio_copy_data(struct bio *dst, struct bio *src);
480extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
481extern void bio_free_pages(struct bio *bio); 503extern void bio_free_pages(struct bio *bio);
482 504
483extern struct bio *bio_copy_user_iov(struct request_queue *, 505extern struct bio *bio_copy_user_iov(struct request_queue *,
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 1030651f8309..cf2588d81148 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -16,6 +16,7 @@
16#define _LINUX_BITFIELD_H 16#define _LINUX_BITFIELD_H
17 17
18#include <linux/build_bug.h> 18#include <linux/build_bug.h>
19#include <asm/byteorder.h>
19 20
20/* 21/*
21 * Bitfield access macros 22 * Bitfield access macros
@@ -103,4 +104,49 @@
103 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ 104 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
104 }) 105 })
105 106
107extern void __compiletime_warning("value doesn't fit into mask")
108__field_overflow(void);
109extern void __compiletime_error("bad bitfield mask")
110__bad_mask(void);
111static __always_inline u64 field_multiplier(u64 field)
112{
113 if ((field | (field - 1)) & ((field | (field - 1)) + 1))
114 __bad_mask();
115 return field & -field;
116}
117static __always_inline u64 field_mask(u64 field)
118{
119 return field / field_multiplier(field);
120}
121#define ____MAKE_OP(type,base,to,from) \
122static __always_inline __##type type##_encode_bits(base v, base field) \
123{ \
124 if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
125 __field_overflow(); \
126 return to((v & field_mask(field)) * field_multiplier(field)); \
127} \
128static __always_inline __##type type##_replace_bits(__##type old, \
129 base val, base field) \
130{ \
131 return (old & ~to(field)) | type##_encode_bits(val, field); \
132} \
133static __always_inline void type##p_replace_bits(__##type *p, \
134 base val, base field) \
135{ \
136 *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
137} \
138static __always_inline base type##_get_bits(__##type v, base field) \
139{ \
140 return (from(v) & field)/field_multiplier(field); \
141}
142#define __MAKE_OP(size) \
143 ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
144 ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
145 ____MAKE_OP(u##size,u##size,,)
146__MAKE_OP(16)
147__MAKE_OP(32)
148__MAKE_OP(64)
149#undef __MAKE_OP
150#undef ____MAKE_OP
151
106#endif 152#endif
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 3489253e38fc..5f11fbdc27f8 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -64,9 +64,14 @@
64 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region 64 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
65 * bitmap_release_region(bitmap, pos, order) Free specified bit region 65 * bitmap_release_region(bitmap, pos, order) Free specified bit region
66 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region 66 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
67 * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words) 67 * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
68 * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words) 68 * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
69 * 69 *
70 * Note, bitmap_zero() and bitmap_fill() operate over the region of
71 * unsigned longs, that is, bits behind bitmap till the unsigned long
72 * boundary will be zeroed or filled as well. Consider to use
73 * bitmap_clear() or bitmap_set() to make explicit zeroing or filling
74 * respectively.
70 */ 75 */
71 76
72/** 77/**
@@ -83,8 +88,12 @@
83 * test_and_change_bit(bit, addr) Change bit and return old value 88 * test_and_change_bit(bit, addr) Change bit and return old value
84 * find_first_zero_bit(addr, nbits) Position first zero bit in *addr 89 * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
85 * find_first_bit(addr, nbits) Position first set bit in *addr 90 * find_first_bit(addr, nbits) Position first set bit in *addr
86 * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit 91 * find_next_zero_bit(addr, nbits, bit)
92 * Position next zero bit in *addr >= bit
87 * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit 93 * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
94 * find_next_and_bit(addr1, addr2, nbits, bit)
95 * Same as find_next_bit, but in
96 * (*addr1 & *addr2)
88 * 97 *
89 */ 98 */
90 99
@@ -174,14 +183,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
174extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); 183extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
175extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); 184extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
176extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); 185extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
177extern unsigned int bitmap_from_u32array(unsigned long *bitmap, 186
178 unsigned int nbits,
179 const u32 *buf,
180 unsigned int nwords);
181extern unsigned int bitmap_to_u32array(u32 *buf,
182 unsigned int nwords,
183 const unsigned long *bitmap,
184 unsigned int nbits);
185#ifdef __BIG_ENDIAN 187#ifdef __BIG_ENDIAN
186extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); 188extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
187#else 189#else
@@ -209,12 +211,12 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
209 211
210static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) 212static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
211{ 213{
212 unsigned int nlongs = BITS_TO_LONGS(nbits); 214 if (small_const_nbits(nbits))
213 if (!small_const_nbits(nbits)) { 215 *dst = ~0UL;
214 unsigned int len = (nlongs - 1) * sizeof(unsigned long); 216 else {
215 memset(dst, 0xff, len); 217 unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
218 memset(dst, 0xff, len);
216 } 219 }
217 dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
218} 220}
219 221
220static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, 222static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
@@ -228,6 +230,35 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
228 } 230 }
229} 231}
230 232
233/*
234 * Copy bitmap and clear tail bits in last word.
235 */
236static inline void bitmap_copy_clear_tail(unsigned long *dst,
237 const unsigned long *src, unsigned int nbits)
238{
239 bitmap_copy(dst, src, nbits);
240 if (nbits % BITS_PER_LONG)
241 dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
242}
243
244/*
245 * On 32-bit systems bitmaps are represented as u32 arrays internally, and
246 * therefore conversion is not needed when copying data from/to arrays of u32.
247 */
248#if BITS_PER_LONG == 64
249extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
250 unsigned int nbits);
251extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
252 unsigned int nbits);
253#else
254#define bitmap_from_arr32(bitmap, buf, nbits) \
255 bitmap_copy_clear_tail((unsigned long *) (bitmap), \
256 (const unsigned long *) (buf), (nbits))
257#define bitmap_to_arr32(buf, bitmap, nbits) \
258 bitmap_copy_clear_tail((unsigned long *) (buf), \
259 (const unsigned long *) (bitmap), (nbits))
260#endif
261
231static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, 262static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
232 const unsigned long *src2, unsigned int nbits) 263 const unsigned long *src2, unsigned int nbits)
233{ 264{
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index e9825ff57b15..69bea82ebeb1 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -660,12 +660,14 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
660static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, 660static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
661 struct blkg_rwstat *from) 661 struct blkg_rwstat *from)
662{ 662{
663 struct blkg_rwstat v = blkg_rwstat_read(from); 663 u64 sum[BLKG_RWSTAT_NR];
664 int i; 664 int i;
665 665
666 for (i = 0; i < BLKG_RWSTAT_NR; i++) 666 for (i = 0; i < BLKG_RWSTAT_NR; i++)
667 atomic64_add(atomic64_read(&v.aux_cnt[i]) + 667 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
668 atomic64_read(&from->aux_cnt[i]), 668
669 for (i = 0; i < BLKG_RWSTAT_NR; i++)
670 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
669 &to->aux_cnt[i]); 671 &to->aux_cnt[i]);
670} 672}
671 673
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 95c9a5c862e2..8efcf49796a3 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
51 unsigned int queue_num; 51 unsigned int queue_num;
52 52
53 atomic_t nr_active; 53 atomic_t nr_active;
54 unsigned int nr_expired;
54 55
55 struct hlist_node cpuhp_dead; 56 struct hlist_node cpuhp_dead;
56 struct kobject kobj; 57 struct kobject kobj;
@@ -65,7 +66,7 @@ struct blk_mq_hw_ctx {
65#endif 66#endif
66 67
67 /* Must be the last member - see also blk_mq_hw_ctx_size(). */ 68 /* Must be the last member - see also blk_mq_hw_ctx_size(). */
68 struct srcu_struct queue_rq_srcu[0]; 69 struct srcu_struct srcu[0];
69}; 70};
70 71
71struct blk_mq_tag_set { 72struct blk_mq_tag_set {
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 9e7d8bd776d2..bf18b95ed92d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -39,6 +39,52 @@ typedef u8 __bitwise blk_status_t;
39 39
40#define BLK_STS_AGAIN ((__force blk_status_t)12) 40#define BLK_STS_AGAIN ((__force blk_status_t)12)
41 41
42/*
43 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
44 * device related resources are unavailable, but the driver can guarantee
45 * that the queue will be rerun in the future once resources become
46 * available again. This is typically the case for device specific
47 * resources that are consumed for IO. If the driver fails allocating these
48 * resources, we know that inflight (or pending) IO will free these
49 * resource upon completion.
50 *
51 * This is different from BLK_STS_RESOURCE in that it explicitly references
52 * a device specific resource. For resources of wider scope, allocation
53 * failure can happen without having pending IO. This means that we can't
54 * rely on request completions freeing these resources, as IO may not be in
55 * flight. Examples of that are kernel memory allocations, DMA mappings, or
56 * any other system wide resources.
57 */
58#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
59
60/**
61 * blk_path_error - returns true if error may be path related
62 * @error: status the request was completed with
63 *
64 * Description:
65 * This classifies block error status into non-retryable errors and ones
66 * that may be successful if retried on a failover path.
67 *
68 * Return:
69 * %false - retrying failover path will not help
70 * %true - may succeed if retried
71 */
72static inline bool blk_path_error(blk_status_t error)
73{
74 switch (error) {
75 case BLK_STS_NOTSUPP:
76 case BLK_STS_NOSPC:
77 case BLK_STS_TARGET:
78 case BLK_STS_NEXUS:
79 case BLK_STS_MEDIUM:
80 case BLK_STS_PROTECTION:
81 return false;
82 }
83
84 /* Anything else could be a path failure, so should be retried */
85 return true;
86}
87
42struct blk_issue_stat { 88struct blk_issue_stat {
43 u64 stat; 89 u64 stat;
44}; 90};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0ce8a372d506..4f3df807cf8f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,6 +27,8 @@
27#include <linux/percpu-refcount.h> 27#include <linux/percpu-refcount.h>
28#include <linux/scatterlist.h> 28#include <linux/scatterlist.h>
29#include <linux/blkzoned.h> 29#include <linux/blkzoned.h>
30#include <linux/seqlock.h>
31#include <linux/u64_stats_sync.h>
30 32
31struct module; 33struct module;
32struct scsi_ioctl_command; 34struct scsi_ioctl_command;
@@ -121,6 +123,12 @@ typedef __u32 __bitwise req_flags_t;
121/* Look at ->special_vec for the actual data payload instead of the 123/* Look at ->special_vec for the actual data payload instead of the
122 bio chain. */ 124 bio chain. */
123#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) 125#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
126/* The per-zone write lock is held for this request */
127#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
128/* timeout is expired */
129#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
130/* already slept for hybrid poll */
131#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21))
124 132
125/* flags that prevent us from merging requests: */ 133/* flags that prevent us from merging requests: */
126#define RQF_NOMERGE_FLAGS \ 134#define RQF_NOMERGE_FLAGS \
@@ -133,12 +141,6 @@ typedef __u32 __bitwise req_flags_t;
133 * especially blk_mq_rq_ctx_init() to take care of the added fields. 141 * especially blk_mq_rq_ctx_init() to take care of the added fields.
134 */ 142 */
135struct request { 143struct request {
136 struct list_head queuelist;
137 union {
138 struct __call_single_data csd;
139 u64 fifo_time;
140 };
141
142 struct request_queue *q; 144 struct request_queue *q;
143 struct blk_mq_ctx *mq_ctx; 145 struct blk_mq_ctx *mq_ctx;
144 146
@@ -148,8 +150,6 @@ struct request {
148 150
149 int internal_tag; 151 int internal_tag;
150 152
151 unsigned long atomic_flags;
152
153 /* the following two fields are internal, NEVER access directly */ 153 /* the following two fields are internal, NEVER access directly */
154 unsigned int __data_len; /* total data len */ 154 unsigned int __data_len; /* total data len */
155 int tag; 155 int tag;
@@ -158,6 +158,8 @@ struct request {
158 struct bio *bio; 158 struct bio *bio;
159 struct bio *biotail; 159 struct bio *biotail;
160 160
161 struct list_head queuelist;
162
161 /* 163 /*
162 * The hash is used inside the scheduler, and killed once the 164 * The hash is used inside the scheduler, and killed once the
163 * request reaches the dispatch list. The ipi_list is only used 165 * request reaches the dispatch list. The ipi_list is only used
@@ -205,19 +207,16 @@ struct request {
205 struct hd_struct *part; 207 struct hd_struct *part;
206 unsigned long start_time; 208 unsigned long start_time;
207 struct blk_issue_stat issue_stat; 209 struct blk_issue_stat issue_stat;
208#ifdef CONFIG_BLK_CGROUP
209 struct request_list *rl; /* rl this rq is alloced from */
210 unsigned long long start_time_ns;
211 unsigned long long io_start_time_ns; /* when passed to hardware */
212#endif
213 /* Number of scatter-gather DMA addr+len pairs after 210 /* Number of scatter-gather DMA addr+len pairs after
214 * physical address coalescing is performed. 211 * physical address coalescing is performed.
215 */ 212 */
216 unsigned short nr_phys_segments; 213 unsigned short nr_phys_segments;
214
217#if defined(CONFIG_BLK_DEV_INTEGRITY) 215#if defined(CONFIG_BLK_DEV_INTEGRITY)
218 unsigned short nr_integrity_segments; 216 unsigned short nr_integrity_segments;
219#endif 217#endif
220 218
219 unsigned short write_hint;
221 unsigned short ioprio; 220 unsigned short ioprio;
222 221
223 unsigned int timeout; 222 unsigned int timeout;
@@ -226,11 +225,37 @@ struct request {
226 225
227 unsigned int extra_len; /* length of alignment and padding */ 226 unsigned int extra_len; /* length of alignment and padding */
228 227
229 unsigned short write_hint; 228 /*
229 * On blk-mq, the lower bits of ->gstate (generation number and
230 * state) carry the MQ_RQ_* state value and the upper bits the
231 * generation number which is monotonically incremented and used to
232 * distinguish the reuse instances.
233 *
234 * ->gstate_seq allows updates to ->gstate and other fields
235 * (currently ->deadline) during request start to be read
236 * atomically from the timeout path, so that it can operate on a
237 * coherent set of information.
238 */
239 seqcount_t gstate_seq;
240 u64 gstate;
241
242 /*
243 * ->aborted_gstate is used by the timeout to claim a specific
244 * recycle instance of this request. See blk_mq_timeout_work().
245 */
246 struct u64_stats_sync aborted_gstate_sync;
247 u64 aborted_gstate;
248
249 /* access through blk_rq_set_deadline, blk_rq_deadline */
250 unsigned long __deadline;
230 251
231 unsigned long deadline;
232 struct list_head timeout_list; 252 struct list_head timeout_list;
233 253
254 union {
255 struct __call_single_data csd;
256 u64 fifo_time;
257 };
258
234 /* 259 /*
235 * completion callback. 260 * completion callback.
236 */ 261 */
@@ -239,6 +264,12 @@ struct request {
239 264
240 /* for bidi */ 265 /* for bidi */
241 struct request *next_rq; 266 struct request *next_rq;
267
268#ifdef CONFIG_BLK_CGROUP
269 struct request_list *rl; /* rl this rq is alloced from */
270 unsigned long long start_time_ns;
271 unsigned long long io_start_time_ns; /* when passed to hardware */
272#endif
242}; 273};
243 274
244static inline bool blk_op_is_scsi(unsigned int op) 275static inline bool blk_op_is_scsi(unsigned int op)
@@ -564,6 +595,22 @@ struct request_queue {
564 struct queue_limits limits; 595 struct queue_limits limits;
565 596
566 /* 597 /*
598 * Zoned block device information for request dispatch control.
599 * nr_zones is the total number of zones of the device. This is always
600 * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
601 * bits which indicates if a zone is conventional (bit clear) or
602 * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
603 * bits which indicates if a zone is write locked, that is, if a write
604 * request targeting the zone was dispatched. All three fields are
605 * initialized by the low level device driver (e.g. scsi/sd.c).
606 * Stacking drivers (device mappers) may or may not initialize
607 * these fields.
608 */
609 unsigned int nr_zones;
610 unsigned long *seq_zones_bitmap;
611 unsigned long *seq_zones_wlock;
612
613 /*
567 * sg stuff 614 * sg stuff
568 */ 615 */
569 unsigned int sg_timeout; 616 unsigned int sg_timeout;
@@ -807,6 +854,27 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
807 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 854 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
808} 855}
809 856
857static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
858{
859 return q->nr_zones;
860}
861
862static inline unsigned int blk_queue_zone_no(struct request_queue *q,
863 sector_t sector)
864{
865 if (!blk_queue_is_zoned(q))
866 return 0;
867 return sector >> ilog2(q->limits.chunk_sectors);
868}
869
870static inline bool blk_queue_zone_is_seq(struct request_queue *q,
871 sector_t sector)
872{
873 if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap)
874 return false;
875 return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
876}
877
810static inline bool rq_is_sync(struct request *rq) 878static inline bool rq_is_sync(struct request *rq)
811{ 879{
812 return op_is_sync(rq->cmd_flags); 880 return op_is_sync(rq->cmd_flags);
@@ -1046,6 +1114,16 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1046 return blk_rq_cur_bytes(rq) >> 9; 1114 return blk_rq_cur_bytes(rq) >> 9;
1047} 1115}
1048 1116
1117static inline unsigned int blk_rq_zone_no(struct request *rq)
1118{
1119 return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
1120}
1121
1122static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1123{
1124 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
1125}
1126
1049/* 1127/*
1050 * Some commands like WRITE SAME have a payload or data transfer size which 1128 * Some commands like WRITE SAME have a payload or data transfer size which
1051 * is different from the size of the request. Any driver that supports such 1129 * is different from the size of the request. Any driver that supports such
@@ -1595,7 +1673,15 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
1595 1673
1596 if (q) 1674 if (q)
1597 return blk_queue_zone_sectors(q); 1675 return blk_queue_zone_sectors(q);
1676 return 0;
1677}
1678
1679static inline unsigned int bdev_nr_zones(struct block_device *bdev)
1680{
1681 struct request_queue *q = bdev_get_queue(bdev);
1598 1682
1683 if (q)
1684 return blk_queue_nr_zones(q);
1599 return 0; 1685 return 0;
1600} 1686}
1601 1687
@@ -1731,8 +1817,6 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1731 1817
1732int kblockd_schedule_work(struct work_struct *work); 1818int kblockd_schedule_work(struct work_struct *work);
1733int kblockd_schedule_work_on(int cpu, struct work_struct *work); 1819int kblockd_schedule_work_on(int cpu, struct work_struct *work);
1734int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
1735int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1736int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); 1820int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1737 1821
1738#ifdef CONFIG_BLK_CGROUP 1822#ifdef CONFIG_BLK_CGROUP
@@ -1971,6 +2055,60 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1971extern int bdev_read_page(struct block_device *, sector_t, struct page *); 2055extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1972extern int bdev_write_page(struct block_device *, sector_t, struct page *, 2056extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1973 struct writeback_control *); 2057 struct writeback_control *);
2058
2059#ifdef CONFIG_BLK_DEV_ZONED
2060bool blk_req_needs_zone_write_lock(struct request *rq);
2061void __blk_req_zone_write_lock(struct request *rq);
2062void __blk_req_zone_write_unlock(struct request *rq);
2063
2064static inline void blk_req_zone_write_lock(struct request *rq)
2065{
2066 if (blk_req_needs_zone_write_lock(rq))
2067 __blk_req_zone_write_lock(rq);
2068}
2069
2070static inline void blk_req_zone_write_unlock(struct request *rq)
2071{
2072 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
2073 __blk_req_zone_write_unlock(rq);
2074}
2075
2076static inline bool blk_req_zone_is_write_locked(struct request *rq)
2077{
2078 return rq->q->seq_zones_wlock &&
2079 test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
2080}
2081
2082static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
2083{
2084 if (!blk_req_needs_zone_write_lock(rq))
2085 return true;
2086 return !blk_req_zone_is_write_locked(rq);
2087}
2088#else
2089static inline bool blk_req_needs_zone_write_lock(struct request *rq)
2090{
2091 return false;
2092}
2093
2094static inline void blk_req_zone_write_lock(struct request *rq)
2095{
2096}
2097
2098static inline void blk_req_zone_write_unlock(struct request *rq)
2099{
2100}
2101static inline bool blk_req_zone_is_write_locked(struct request *rq)
2102{
2103 return false;
2104}
2105
2106static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
2107{
2108 return true;
2109}
2110#endif /* CONFIG_BLK_DEV_ZONED */
2111
1974#else /* CONFIG_BLOCK */ 2112#else /* CONFIG_BLOCK */
1975 2113
1976struct block_device; 2114struct block_device;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0b25cf87b6d6..66df387106de 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -17,6 +17,7 @@
17#include <linux/numa.h> 17#include <linux/numa.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19 19
20struct bpf_verifier_env;
20struct perf_event; 21struct perf_event;
21struct bpf_prog; 22struct bpf_prog;
22struct bpf_map; 23struct bpf_map;
@@ -24,6 +25,7 @@ struct bpf_map;
24/* map is generic key/value storage optionally accesible by eBPF programs */ 25/* map is generic key/value storage optionally accesible by eBPF programs */
25struct bpf_map_ops { 26struct bpf_map_ops {
26 /* funcs callable from userspace (via syscall) */ 27 /* funcs callable from userspace (via syscall) */
28 int (*map_alloc_check)(union bpf_attr *attr);
27 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 29 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
28 void (*map_release)(struct bpf_map *map, struct file *map_file); 30 void (*map_release)(struct bpf_map *map, struct file *map_file);
29 void (*map_free)(struct bpf_map *map); 31 void (*map_free)(struct bpf_map *map);
@@ -72,6 +74,33 @@ struct bpf_map {
72 char name[BPF_OBJ_NAME_LEN]; 74 char name[BPF_OBJ_NAME_LEN];
73}; 75};
74 76
77struct bpf_offloaded_map;
78
79struct bpf_map_dev_ops {
80 int (*map_get_next_key)(struct bpf_offloaded_map *map,
81 void *key, void *next_key);
82 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
83 void *key, void *value);
84 int (*map_update_elem)(struct bpf_offloaded_map *map,
85 void *key, void *value, u64 flags);
86 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
87};
88
89struct bpf_offloaded_map {
90 struct bpf_map map;
91 struct net_device *netdev;
92 const struct bpf_map_dev_ops *dev_ops;
93 void *dev_priv;
94 struct list_head offloads;
95};
96
97static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
98{
99 return container_of(map, struct bpf_offloaded_map, map);
100}
101
102extern const struct bpf_map_ops bpf_map_offload_ops;
103
75/* function argument constraints */ 104/* function argument constraints */
76enum bpf_arg_type { 105enum bpf_arg_type {
77 ARG_DONTCARE = 0, /* unused argument in helper function */ 106 ARG_DONTCARE = 0, /* unused argument in helper function */
@@ -193,14 +222,20 @@ struct bpf_verifier_ops {
193 struct bpf_prog *prog, u32 *target_size); 222 struct bpf_prog *prog, u32 *target_size);
194}; 223};
195 224
196struct bpf_dev_offload { 225struct bpf_prog_offload_ops {
226 int (*insn_hook)(struct bpf_verifier_env *env,
227 int insn_idx, int prev_insn_idx);
228};
229
230struct bpf_prog_offload {
197 struct bpf_prog *prog; 231 struct bpf_prog *prog;
198 struct net_device *netdev; 232 struct net_device *netdev;
199 void *dev_priv; 233 void *dev_priv;
200 struct list_head offloads; 234 struct list_head offloads;
201 bool dev_state; 235 bool dev_state;
202 bool verifier_running; 236 const struct bpf_prog_offload_ops *dev_ops;
203 wait_queue_head_t verifier_done; 237 void *jited_image;
238 u32 jited_len;
204}; 239};
205 240
206struct bpf_prog_aux { 241struct bpf_prog_aux {
@@ -209,6 +244,10 @@ struct bpf_prog_aux {
209 u32 max_ctx_offset; 244 u32 max_ctx_offset;
210 u32 stack_depth; 245 u32 stack_depth;
211 u32 id; 246 u32 id;
247 u32 func_cnt;
248 bool offload_requested;
249 struct bpf_prog **func;
250 void *jit_data; /* JIT specific data. arch dependent */
212 struct latch_tree_node ksym_tnode; 251 struct latch_tree_node ksym_tnode;
213 struct list_head ksym_lnode; 252 struct list_head ksym_lnode;
214 const struct bpf_prog_ops *ops; 253 const struct bpf_prog_ops *ops;
@@ -220,7 +259,7 @@ struct bpf_prog_aux {
220#ifdef CONFIG_SECURITY 259#ifdef CONFIG_SECURITY
221 void *security; 260 void *security;
222#endif 261#endif
223 struct bpf_dev_offload *offload; 262 struct bpf_prog_offload *offload;
224 union { 263 union {
225 struct work_struct work; 264 struct work_struct work;
226 struct rcu_head rcu; 265 struct rcu_head rcu;
@@ -295,6 +334,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
295 334
296void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, 335void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
297 struct bpf_prog *old_prog); 336 struct bpf_prog *old_prog);
337int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
338 __u32 __user *prog_ids, u32 request_cnt,
339 __u32 __user *prog_cnt);
298int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, 340int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
299 struct bpf_prog *exclude_prog, 341 struct bpf_prog *exclude_prog,
300 struct bpf_prog *include_prog, 342 struct bpf_prog *include_prog,
@@ -355,6 +397,9 @@ void bpf_prog_put(struct bpf_prog *prog);
355int __bpf_prog_charge(struct user_struct *user, u32 pages); 397int __bpf_prog_charge(struct user_struct *user, u32 pages);
356void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 398void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
357 399
400void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
401void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
402
358struct bpf_map *bpf_map_get_with_uref(u32 ufd); 403struct bpf_map *bpf_map_get_with_uref(u32 ufd);
359struct bpf_map *__bpf_map_get(struct fd f); 404struct bpf_map *__bpf_map_get(struct fd f);
360struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); 405struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
@@ -363,6 +408,7 @@ void bpf_map_put(struct bpf_map *map);
363int bpf_map_precharge_memlock(u32 pages); 408int bpf_map_precharge_memlock(u32 pages);
364void *bpf_map_area_alloc(size_t size, int numa_node); 409void *bpf_map_area_alloc(size_t size, int numa_node);
365void bpf_map_area_free(void *base); 410void bpf_map_area_free(void *base);
411void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
366 412
367extern int sysctl_unprivileged_bpf_disabled; 413extern int sysctl_unprivileged_bpf_disabled;
368 414
@@ -409,6 +455,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
409 455
410/* verify correctness of eBPF program */ 456/* verify correctness of eBPF program */
411int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); 457int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
458void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
412 459
413/* Map specifics */ 460/* Map specifics */
414struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 461struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
@@ -536,14 +583,35 @@ bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
536 583
537int bpf_prog_offload_compile(struct bpf_prog *prog); 584int bpf_prog_offload_compile(struct bpf_prog *prog);
538void bpf_prog_offload_destroy(struct bpf_prog *prog); 585void bpf_prog_offload_destroy(struct bpf_prog *prog);
586int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
587 struct bpf_prog *prog);
588
589int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
590
591int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
592int bpf_map_offload_update_elem(struct bpf_map *map,
593 void *key, void *value, u64 flags);
594int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
595int bpf_map_offload_get_next_key(struct bpf_map *map,
596 void *key, void *next_key);
597
598bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
539 599
540#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 600#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
541int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 601int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
542 602
543static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 603static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
544{ 604{
545 return aux->offload; 605 return aux->offload_requested;
546} 606}
607
608static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
609{
610 return unlikely(map->ops == &bpf_map_offload_ops);
611}
612
613struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
614void bpf_map_offload_map_free(struct bpf_map *map);
547#else 615#else
548static inline int bpf_prog_offload_init(struct bpf_prog *prog, 616static inline int bpf_prog_offload_init(struct bpf_prog *prog,
549 union bpf_attr *attr) 617 union bpf_attr *attr)
@@ -555,9 +623,23 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
555{ 623{
556 return false; 624 return false;
557} 625}
626
627static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
628{
629 return false;
630}
631
632static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
633{
634 return ERR_PTR(-EOPNOTSUPP);
635}
636
637static inline void bpf_map_offload_map_free(struct bpf_map *map)
638{
639}
558#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 640#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
559 641
560#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) 642#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
561struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); 643struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
562int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); 644int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
563#else 645#else
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 978c1d9c9383..19b8349a3809 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -42,7 +42,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
42BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) 42BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
43#ifdef CONFIG_NET 43#ifdef CONFIG_NET
44BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) 44BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
45#ifdef CONFIG_STREAM_PARSER 45#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
46BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) 46BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
47#endif 47#endif
48BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) 48BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 1632bb13ad8a..6b66cd1aa0b9 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -76,6 +76,14 @@ struct bpf_reg_state {
76 s64 smax_value; /* maximum possible (s64)value */ 76 s64 smax_value; /* maximum possible (s64)value */
77 u64 umin_value; /* minimum possible (u64)value */ 77 u64 umin_value; /* minimum possible (u64)value */
78 u64 umax_value; /* maximum possible (u64)value */ 78 u64 umax_value; /* maximum possible (u64)value */
79 /* Inside the callee two registers can be both PTR_TO_STACK like
80 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
81 * while another to the caller's stack. To differentiate them 'frameno'
82 * is used which is an index in bpf_verifier_state->frame[] array
83 * pointing to bpf_func_state.
84 * This field must be second to last, for states_equal() reasons.
85 */
86 u32 frameno;
79 /* This field must be last, for states_equal() reasons. */ 87 /* This field must be last, for states_equal() reasons. */
80 enum bpf_reg_liveness live; 88 enum bpf_reg_liveness live;
81}; 89};
@@ -83,7 +91,8 @@ struct bpf_reg_state {
83enum bpf_stack_slot_type { 91enum bpf_stack_slot_type {
84 STACK_INVALID, /* nothing was stored in this stack slot */ 92 STACK_INVALID, /* nothing was stored in this stack slot */
85 STACK_SPILL, /* register spilled into stack */ 93 STACK_SPILL, /* register spilled into stack */
86 STACK_MISC /* BPF program wrote some data into this slot */ 94 STACK_MISC, /* BPF program wrote some data into this slot */
95 STACK_ZERO, /* BPF program wrote constant zero */
87}; 96};
88 97
89#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 98#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
@@ -96,13 +105,34 @@ struct bpf_stack_state {
96/* state of the program: 105/* state of the program:
97 * type of all registers and stack info 106 * type of all registers and stack info
98 */ 107 */
99struct bpf_verifier_state { 108struct bpf_func_state {
100 struct bpf_reg_state regs[MAX_BPF_REG]; 109 struct bpf_reg_state regs[MAX_BPF_REG];
101 struct bpf_verifier_state *parent; 110 struct bpf_verifier_state *parent;
111 /* index of call instruction that called into this func */
112 int callsite;
113 /* stack frame number of this function state from pov of
114 * enclosing bpf_verifier_state.
115 * 0 = main function, 1 = first callee.
116 */
117 u32 frameno;
118 /* subprog number == index within subprog_stack_depth
119 * zero == main subprog
120 */
121 u32 subprogno;
122
123 /* should be second to last. See copy_func_state() */
102 int allocated_stack; 124 int allocated_stack;
103 struct bpf_stack_state *stack; 125 struct bpf_stack_state *stack;
104}; 126};
105 127
128#define MAX_CALL_FRAMES 8
129struct bpf_verifier_state {
130 /* call stack tracking */
131 struct bpf_func_state *frame[MAX_CALL_FRAMES];
132 struct bpf_verifier_state *parent;
133 u32 curframe;
134};
135
106/* linked list of verifier states used to prune search */ 136/* linked list of verifier states used to prune search */
107struct bpf_verifier_state_list { 137struct bpf_verifier_state_list {
108 struct bpf_verifier_state state; 138 struct bpf_verifier_state state;
@@ -113,6 +143,7 @@ struct bpf_insn_aux_data {
113 union { 143 union {
114 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 144 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
115 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ 145 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
146 s32 call_imm; /* saved imm field of call insn */
116 }; 147 };
117 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 148 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
118 bool seen; /* this insn was processed by the verifier */ 149 bool seen; /* this insn was processed by the verifier */
@@ -135,11 +166,7 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
135 return log->len_used >= log->len_total - 1; 166 return log->len_used >= log->len_total - 1;
136} 167}
137 168
138struct bpf_verifier_env; 169#define BPF_MAX_SUBPROGS 256
139struct bpf_ext_analyzer_ops {
140 int (*insn_hook)(struct bpf_verifier_env *env,
141 int insn_idx, int prev_insn_idx);
142};
143 170
144/* single container for all structs 171/* single container for all structs
145 * one verifier_env per bpf_check() call 172 * one verifier_env per bpf_check() call
@@ -152,29 +179,31 @@ struct bpf_verifier_env {
152 bool strict_alignment; /* perform strict pointer alignment checks */ 179 bool strict_alignment; /* perform strict pointer alignment checks */
153 struct bpf_verifier_state *cur_state; /* current verifier state */ 180 struct bpf_verifier_state *cur_state; /* current verifier state */
154 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 181 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
155 const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
156 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 182 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
157 u32 used_map_cnt; /* number of used maps */ 183 u32 used_map_cnt; /* number of used maps */
158 u32 id_gen; /* used to generate unique reg IDs */ 184 u32 id_gen; /* used to generate unique reg IDs */
159 bool allow_ptr_leaks; 185 bool allow_ptr_leaks;
160 bool seen_direct_write; 186 bool seen_direct_write;
161 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 187 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
162
163 struct bpf_verifer_log log; 188 struct bpf_verifer_log log;
189 u32 subprog_starts[BPF_MAX_SUBPROGS];
190 /* computes the stack depth of each bpf function */
191 u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
192 u32 subprog_cnt;
164}; 193};
165 194
195__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
196 const char *fmt, ...);
197
166static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) 198static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
167{ 199{
168 return env->cur_state->regs; 200 struct bpf_verifier_state *cur = env->cur_state;
201
202 return cur->frame[cur->curframe]->regs;
169} 203}
170 204
171#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
172int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); 205int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
173#else 206int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
174static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) 207 int insn_idx, int prev_insn_idx);
175{
176 return -EOPNOTSUPP;
177}
178#endif
179 208
180#endif /* _LINUX_BPF_VERIFIER_H */ 209#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 8ff86b4c1b8a..d3339dd48b1a 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -14,6 +14,7 @@
14#define PHY_ID_BCM5241 0x0143bc30 14#define PHY_ID_BCM5241 0x0143bc30
15#define PHY_ID_BCMAC131 0x0143bc70 15#define PHY_ID_BCMAC131 0x0143bc70
16#define PHY_ID_BCM5481 0x0143bca0 16#define PHY_ID_BCM5481 0x0143bca0
17#define PHY_ID_BCM5395 0x0143bcf0
17#define PHY_ID_BCM54810 0x03625d00 18#define PHY_ID_BCM54810 0x03625d00
18#define PHY_ID_BCM5482 0x0143bcb0 19#define PHY_ID_BCM5482 0x0143bcb0
19#define PHY_ID_BCM5411 0x00206070 20#define PHY_ID_BCM5411 0x00206070
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 8b1bf8d3d4a2..894e5d125de6 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -81,11 +81,14 @@ struct buffer_head {
81/* 81/*
82 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() 82 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
83 * and buffer_foo() functions. 83 * and buffer_foo() functions.
84 * To avoid reset buffer flags that are already set, because that causes
85 * a costly cache line transition, check the flag first.
84 */ 86 */
85#define BUFFER_FNS(bit, name) \ 87#define BUFFER_FNS(bit, name) \
86static __always_inline void set_buffer_##name(struct buffer_head *bh) \ 88static __always_inline void set_buffer_##name(struct buffer_head *bh) \
87{ \ 89{ \
88 set_bit(BH_##bit, &(bh)->b_state); \ 90 if (!test_bit(BH_##bit, &(bh)->b_state)) \
91 set_bit(BH_##bit, &(bh)->b_state); \
89} \ 92} \
90static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ 93static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
91{ \ 94{ \
@@ -151,7 +154,6 @@ void buffer_check_dirty_writeback(struct page *page,
151 154
152void mark_buffer_dirty(struct buffer_head *bh); 155void mark_buffer_dirty(struct buffer_head *bh);
153void mark_buffer_write_io_error(struct buffer_head *bh); 156void mark_buffer_write_io_error(struct buffer_head *bh);
154void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
155void touch_buffer(struct buffer_head *bh); 157void touch_buffer(struct buffer_head *bh);
156void set_bh_page(struct buffer_head *bh, 158void set_bh_page(struct buffer_head *bh,
157 struct page *page, unsigned long offset); 159 struct page *page, unsigned long offset);
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index 3efed0d742a0..43d1fd50d433 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -8,7 +8,6 @@
8#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) 8#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
9#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) 9#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
10#define BUILD_BUG_ON_ZERO(e) (0) 10#define BUILD_BUG_ON_ZERO(e) (0)
11#define BUILD_BUG_ON_NULL(e) ((void *)0)
12#define BUILD_BUG_ON_INVALID(e) (0) 11#define BUILD_BUG_ON_INVALID(e) (0)
13#define BUILD_BUG_ON_MSG(cond, msg) (0) 12#define BUILD_BUG_ON_MSG(cond, msg) (0)
14#define BUILD_BUG_ON(condition) (0) 13#define BUILD_BUG_ON(condition) (0)
@@ -28,7 +27,6 @@
28 * aren't permitted). 27 * aren't permitted).
29 */ 28 */
30#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) 29#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); }))
31#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:(-!!(e)); }))
32 30
33/* 31/*
34 * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the 32 * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ec8a4d7af6bd..fe7a22dd133b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -125,4 +125,13 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv,
125 ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ 125 ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
126 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) 126 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
127 127
128/* for iterating one bio from start to end */
129#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
130{ \
131 .bi_sector = 0, \
132 .bi_size = UINT_MAX, \
133 .bi_idx = 0, \
134 .bi_bvec_done = 0, \
135}
136
128#endif /* __LINUX_BVEC_ITER_H */ 137#endif /* __LINUX_BVEC_ITER_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 61f1cf2d9f44..055aaf5ed9af 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -46,6 +46,7 @@ struct can_priv {
46 unsigned int bitrate_const_cnt; 46 unsigned int bitrate_const_cnt;
47 const u32 *data_bitrate_const; 47 const u32 *data_bitrate_const;
48 unsigned int data_bitrate_const_cnt; 48 unsigned int data_bitrate_const_cnt;
49 u32 bitrate_max;
49 struct can_clock clock; 50 struct can_clock clock;
50 51
51 enum can_state state; 52 enum can_state state;
@@ -166,6 +167,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
166unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); 167unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
167void can_free_echo_skb(struct net_device *dev, unsigned int idx); 168void can_free_echo_skb(struct net_device *dev, unsigned int idx);
168 169
170#ifdef CONFIG_OF
171void of_can_transceiver(struct net_device *dev);
172#else
173static inline void of_can_transceiver(struct net_device *dev) { }
174#endif
175
169struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); 176struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
170struct sk_buff *alloc_canfd_skb(struct net_device *dev, 177struct sk_buff *alloc_canfd_skb(struct net_device *dev,
171 struct canfd_frame **cfd); 178 struct canfd_frame **cfd);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 8b7fd8eeccee..9f242b876fde 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -561,7 +561,7 @@ struct cftype {
561 561
562/* 562/*
563 * Control Group subsystem type. 563 * Control Group subsystem type.
564 * See Documentation/cgroups/cgroups.txt for details 564 * See Documentation/cgroup-v1/cgroups.txt for details
565 */ 565 */
566struct cgroup_subsys { 566struct cgroup_subsys {
567 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); 567 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 7c925e6211f1..f711be6e8c44 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -20,6 +20,8 @@
20 * flags used across common struct clk. these flags should only affect the 20 * flags used across common struct clk. these flags should only affect the
21 * top-level framework. custom flags for dealing with hardware specifics 21 * top-level framework. custom flags for dealing with hardware specifics
22 * belong in struct clk_foo 22 * belong in struct clk_foo
23 *
24 * Please update clk_flags[] in drivers/clk/clk.c when making changes here!
23 */ 25 */
24#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */ 26#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */
25#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ 27#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
@@ -412,7 +414,7 @@ extern const struct clk_ops clk_divider_ro_ops;
412 414
413unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, 415unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
414 unsigned int val, const struct clk_div_table *table, 416 unsigned int val, const struct clk_div_table *table,
415 unsigned long flags); 417 unsigned long flags, unsigned long width);
416long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, 418long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
417 unsigned long rate, unsigned long *prate, 419 unsigned long rate, unsigned long *prate,
418 const struct clk_div_table *table, 420 const struct clk_div_table *table,
@@ -744,6 +746,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw);
744unsigned long __clk_get_flags(struct clk *clk); 746unsigned long __clk_get_flags(struct clk *clk);
745unsigned long clk_hw_get_flags(const struct clk_hw *hw); 747unsigned long clk_hw_get_flags(const struct clk_hw *hw);
746bool clk_hw_is_prepared(const struct clk_hw *hw); 748bool clk_hw_is_prepared(const struct clk_hw *hw);
749bool clk_hw_rate_is_protected(const struct clk_hw *hw);
747bool clk_hw_is_enabled(const struct clk_hw *hw); 750bool clk_hw_is_enabled(const struct clk_hw *hw);
748bool __clk_is_enabled(struct clk *clk); 751bool __clk_is_enabled(struct clk *clk);
749struct clk *__clk_lookup(const char *name); 752struct clk *__clk_lookup(const char *name);
@@ -806,6 +809,44 @@ extern struct of_device_id __clk_of_table;
806 } \ 809 } \
807 OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver) 810 OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver)
808 811
812#define CLK_HW_INIT(_name, _parent, _ops, _flags) \
813 (&(struct clk_init_data) { \
814 .flags = _flags, \
815 .name = _name, \
816 .parent_names = (const char *[]) { _parent }, \
817 .num_parents = 1, \
818 .ops = _ops, \
819 })
820
821#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \
822 (&(struct clk_init_data) { \
823 .flags = _flags, \
824 .name = _name, \
825 .parent_names = _parents, \
826 .num_parents = ARRAY_SIZE(_parents), \
827 .ops = _ops, \
828 })
829
830#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \
831 (&(struct clk_init_data) { \
832 .flags = _flags, \
833 .name = _name, \
834 .parent_names = NULL, \
835 .num_parents = 0, \
836 .ops = _ops, \
837 })
838
839#define CLK_FIXED_FACTOR(_struct, _name, _parent, \
840 _div, _mult, _flags) \
841 struct clk_fixed_factor _struct = { \
842 .div = _div, \
843 .mult = _mult, \
844 .hw.init = CLK_HW_INIT(_name, \
845 _parent, \
846 &clk_fixed_factor_ops, \
847 _flags), \
848 }
849
809#ifdef CONFIG_OF 850#ifdef CONFIG_OF
810int of_clk_add_provider(struct device_node *np, 851int of_clk_add_provider(struct device_node *np,
811 struct clk *(*clk_src_get)(struct of_phandle_args *args, 852 struct clk *(*clk_src_get)(struct of_phandle_args *args,
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 12c96d94d1fa..4c4ef9f34db3 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -331,6 +331,38 @@ struct clk *devm_clk_get(struct device *dev, const char *id);
331 */ 331 */
332struct clk *devm_get_clk_from_child(struct device *dev, 332struct clk *devm_get_clk_from_child(struct device *dev,
333 struct device_node *np, const char *con_id); 333 struct device_node *np, const char *con_id);
334/**
335 * clk_rate_exclusive_get - get exclusivity over the rate control of a
336 * producer
337 * @clk: clock source
338 *
339 * This function allows drivers to get exclusive control over the rate of a
340 * provider. It prevents any other consumer to execute, even indirectly,
341 * opereation which could alter the rate of the provider or cause glitches
342 *
343 * If exlusivity is claimed more than once on clock, even by the same driver,
344 * the rate effectively gets locked as exclusivity can't be preempted.
345 *
346 * Must not be called from within atomic context.
347 *
348 * Returns success (0) or negative errno.
349 */
350int clk_rate_exclusive_get(struct clk *clk);
351
352/**
353 * clk_rate_exclusive_put - release exclusivity over the rate control of a
354 * producer
355 * @clk: clock source
356 *
357 * This function allows drivers to release the exclusivity it previously got
358 * from clk_rate_exclusive_get()
359 *
360 * The caller must balance the number of clk_rate_exclusive_get() and
361 * clk_rate_exclusive_put() calls.
362 *
363 * Must not be called from within atomic context.
364 */
365void clk_rate_exclusive_put(struct clk *clk);
334 366
335/** 367/**
336 * clk_enable - inform the system when the clock source should be running. 368 * clk_enable - inform the system when the clock source should be running.
@@ -473,6 +505,23 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
473int clk_set_rate(struct clk *clk, unsigned long rate); 505int clk_set_rate(struct clk *clk, unsigned long rate);
474 506
475/** 507/**
508 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
509 * clock source
510 * @clk: clock source
511 * @rate: desired clock rate in Hz
512 *
513 * This helper function allows drivers to atomically set the rate of a producer
514 * and claim exclusivity over the rate control of the producer.
515 *
516 * It is essentially a combination of clk_set_rate() and
517 * clk_rate_exclusite_get(). Caller must balance this call with a call to
518 * clk_rate_exclusive_put()
519 *
520 * Returns success (0) or negative errno.
521 */
522int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
523
524/**
476 * clk_has_parent - check if a clock is a possible parent for another 525 * clk_has_parent - check if a clock is a possible parent for another
477 * @clk: clock source 526 * @clk: clock source
478 * @parent: parent clock source 527 * @parent: parent clock source
@@ -583,6 +632,14 @@ static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
583 632
584static inline void devm_clk_put(struct device *dev, struct clk *clk) {} 633static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
585 634
635
636static inline int clk_rate_exclusive_get(struct clk *clk)
637{
638 return 0;
639}
640
641static inline void clk_rate_exclusive_put(struct clk *clk) {}
642
586static inline int clk_enable(struct clk *clk) 643static inline int clk_enable(struct clk *clk)
587{ 644{
588 return 0; 645 return 0;
@@ -609,6 +666,11 @@ static inline int clk_set_rate(struct clk *clk, unsigned long rate)
609 return 0; 666 return 0;
610} 667}
611 668
669static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
670{
671 return 0;
672}
673
612static inline long clk_round_rate(struct clk *clk, unsigned long rate) 674static inline long clk_round_rate(struct clk *clk, unsigned long rate)
613{ 675{
614 return 0; 676 return 0;
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 2eabc862abdb..4890ff033220 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -12,7 +12,7 @@
12#ifndef __CLKDEV_H 12#ifndef __CLKDEV_H
13#define __CLKDEV_H 13#define __CLKDEV_H
14 14
15#include <asm/clkdev.h> 15#include <linux/slab.h>
16 16
17struct clk; 17struct clk;
18struct clk_hw; 18struct clk_hw;
@@ -52,9 +52,4 @@ int clk_add_alias(const char *, const char *, const char *, struct device *);
52int clk_register_clkdev(struct clk *, const char *, const char *); 52int clk_register_clkdev(struct clk *, const char *, const char *);
53int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); 53int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
54 54
55#ifdef CONFIG_COMMON_CLK
56int __clk_get(struct clk *clk);
57void __clk_put(struct clk *clk);
58#endif
59
60#endif 55#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 0fc36406f32c..8a9643857c4a 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -157,6 +157,104 @@ struct compat_sigaction {
157 compat_sigset_t sa_mask __packed; 157 compat_sigset_t sa_mask __packed;
158}; 158};
159 159
160typedef union compat_sigval {
161 compat_int_t sival_int;
162 compat_uptr_t sival_ptr;
163} compat_sigval_t;
164
165typedef struct compat_siginfo {
166 int si_signo;
167#ifndef __ARCH_HAS_SWAPPED_SIGINFO
168 int si_errno;
169 int si_code;
170#else
171 int si_code;
172 int si_errno;
173#endif
174
175 union {
176 int _pad[128/sizeof(int) - 3];
177
178 /* kill() */
179 struct {
180 compat_pid_t _pid; /* sender's pid */
181 __compat_uid32_t _uid; /* sender's uid */
182 } _kill;
183
184 /* POSIX.1b timers */
185 struct {
186 compat_timer_t _tid; /* timer id */
187 int _overrun; /* overrun count */
188 compat_sigval_t _sigval; /* same as below */
189 } _timer;
190
191 /* POSIX.1b signals */
192 struct {
193 compat_pid_t _pid; /* sender's pid */
194 __compat_uid32_t _uid; /* sender's uid */
195 compat_sigval_t _sigval;
196 } _rt;
197
198 /* SIGCHLD */
199 struct {
200 compat_pid_t _pid; /* which child */
201 __compat_uid32_t _uid; /* sender's uid */
202 int _status; /* exit code */
203 compat_clock_t _utime;
204 compat_clock_t _stime;
205 } _sigchld;
206
207#ifdef CONFIG_X86_X32_ABI
208 /* SIGCHLD (x32 version) */
209 struct {
210 compat_pid_t _pid; /* which child */
211 __compat_uid32_t _uid; /* sender's uid */
212 int _status; /* exit code */
213 compat_s64 _utime;
214 compat_s64 _stime;
215 } _sigchld_x32;
216#endif
217
218 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
219 struct {
220 compat_uptr_t _addr; /* faulting insn/memory ref. */
221#ifdef __ARCH_SI_TRAPNO
222 int _trapno; /* TRAP # which caused the signal */
223#endif
224 union {
225 /*
226 * used when si_code=BUS_MCEERR_AR or
227 * used when si_code=BUS_MCEERR_AO
228 */
229 short int _addr_lsb; /* Valid LSB of the reported address. */
230 /* used when si_code=SEGV_BNDERR */
231 struct {
232 short _dummy_bnd;
233 compat_uptr_t _lower;
234 compat_uptr_t _upper;
235 } _addr_bnd;
236 /* used when si_code=SEGV_PKUERR */
237 struct {
238 short _dummy_pkey;
239 u32 _pkey;
240 } _addr_pkey;
241 };
242 } _sigfault;
243
244 /* SIGPOLL */
245 struct {
246 compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
247 int _fd;
248 } _sigpoll;
249
250 struct {
251 compat_uptr_t _call_addr; /* calling user insn */
252 int _syscall; /* triggering system call number */
253 unsigned int _arch; /* AUDIT_ARCH_* of syscall */
254 } _sigsys;
255 } _sifields;
256} compat_siginfo_t;
257
160/* 258/*
161 * These functions operate on 32- or 64-bit specs depending on 259 * These functions operate on 32- or 64-bit specs depending on
162 * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments. 260 * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
@@ -412,7 +510,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
412 unsigned long bitmap_size); 510 unsigned long bitmap_size);
413long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, 511long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
414 unsigned long bitmap_size); 512 unsigned long bitmap_size);
415int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); 513int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from);
416int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); 514int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
417int get_compat_sigevent(struct sigevent *event, 515int get_compat_sigevent(struct sigevent *event,
418 const struct compat_sigevent __user *u_event); 516 const struct compat_sigevent __user *u_event);
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 3b609edffa8f..d02a4df3f473 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -19,3 +19,11 @@
19 19
20#define randomized_struct_fields_start struct { 20#define randomized_struct_fields_start struct {
21#define randomized_struct_fields_end }; 21#define randomized_struct_fields_end };
22
23/* all clang versions usable with the kernel support KASAN ABI version 5 */
24#define KASAN_ABI_VERSION 5
25
26/* emulate gcc's __SANITIZE_ADDRESS__ flag */
27#if __has_feature(address_sanitizer)
28#define __SANITIZE_ADDRESS__
29#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 52e611ab9a6c..c2cc57a2f508 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -185,23 +185,21 @@ void __read_once_size(const volatile void *p, void *res, int size)
185 185
186#ifdef CONFIG_KASAN 186#ifdef CONFIG_KASAN
187/* 187/*
188 * This function is not 'inline' because __no_sanitize_address confilcts 188 * We can't declare function 'inline' because __no_sanitize_address confilcts
189 * with inlining. Attempt to inline it may cause a build failure. 189 * with inlining. Attempt to inline it may cause a build failure.
190 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 190 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
191 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. 191 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
192 */ 192 */
193static __no_sanitize_address __maybe_unused 193# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
194void __read_once_size_nocheck(const volatile void *p, void *res, int size)
195{
196 __READ_ONCE_SIZE;
197}
198#else 194#else
199static __always_inline 195# define __no_kasan_or_inline __always_inline
196#endif
197
198static __no_kasan_or_inline
200void __read_once_size_nocheck(const volatile void *p, void *res, int size) 199void __read_once_size_nocheck(const volatile void *p, void *res, int size)
201{ 200{
202 __READ_ONCE_SIZE; 201 __READ_ONCE_SIZE;
203} 202}
204#endif
205 203
206static __always_inline void __write_once_size(volatile void *p, void *res, int size) 204static __always_inline void __write_once_size(volatile void *p, void *res, int size)
207{ 205{
@@ -240,6 +238,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
240 * required ordering. 238 * required ordering.
241 */ 239 */
242#include <asm/barrier.h> 240#include <asm/barrier.h>
241#include <linux/kasan-checks.h>
243 242
244#define __READ_ONCE(x, check) \ 243#define __READ_ONCE(x, check) \
245({ \ 244({ \
@@ -259,6 +258,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
259 */ 258 */
260#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) 259#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
261 260
261static __no_kasan_or_inline
262unsigned long read_word_at_a_time(const void *addr)
263{
264 kasan_check_read(addr, 1);
265 return *(unsigned long *)addr;
266}
267
262#define WRITE_ONCE(x, val) \ 268#define WRITE_ONCE(x, val) \
263({ \ 269({ \
264 union { typeof(x) __val; char __c[1]; } __u = \ 270 union { typeof(x) __val; char __c[1]; } __u = \
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 723e952fde0d..d14ef4e77c8a 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -275,6 +275,50 @@ enum {
275#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2) 275#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
276#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3) 276#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
277 277
278#define CPER_ARM_CACHE_ERROR 0
279#define CPER_ARM_TLB_ERROR 1
280#define CPER_ARM_BUS_ERROR 2
281#define CPER_ARM_VENDOR_ERROR 3
282#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR
283
284#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0)
285#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1)
286#define CPER_ARM_ERR_VALID_LEVEL BIT(2)
287#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3)
288#define CPER_ARM_ERR_VALID_CORRECTED BIT(4)
289#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5)
290#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6)
291#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7)
292#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8)
293#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9)
294#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10)
295#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11)
296
297#define CPER_ARM_ERR_TRANSACTION_SHIFT 16
298#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0)
299#define CPER_ARM_ERR_OPERATION_SHIFT 18
300#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0)
301#define CPER_ARM_ERR_LEVEL_SHIFT 22
302#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0)
303#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25
304#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0)
305#define CPER_ARM_ERR_CORRECTED_SHIFT 26
306#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0)
307#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27
308#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0)
309#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28
310#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0)
311#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29
312#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0)
313#define CPER_ARM_ERR_TIME_OUT_SHIFT 31
314#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0)
315#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32
316#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0)
317#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34
318#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0)
319#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43
320#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0)
321
278/* 322/*
279 * All tables and structs must be byte-packed to match CPER 323 * All tables and structs must be byte-packed to match CPER
280 * specification, since the tables are provided by the system BIOS 324 * specification, since the tables are provided by the system BIOS
@@ -494,6 +538,8 @@ struct cper_sec_pcie {
494/* Reset to default packing */ 538/* Reset to default packing */
495#pragma pack() 539#pragma pack()
496 540
541extern const char * const cper_proc_error_type_strs[4];
542
497u64 cper_next_record_id(void); 543u64 cper_next_record_id(void);
498const char *cper_severity_str(unsigned int); 544const char *cper_severity_str(unsigned int);
499const char *cper_mem_err_type_str(unsigned int); 545const char *cper_mem_err_type_str(unsigned int);
@@ -503,5 +549,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *,
503 struct cper_mem_err_compact *); 549 struct cper_mem_err_compact *);
504const char *cper_mem_err_unpack(struct trace_seq *, 550const char *cper_mem_err_unpack(struct trace_seq *,
505 struct cper_mem_err_compact *); 551 struct cper_mem_err_compact *);
552void cper_print_proc_arm(const char *pfx,
553 const struct cper_sec_proc_arm *proc);
506 554
507#endif 555#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 065f3a8eb486..21e8d248d956 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -629,6 +629,18 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
629 for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) 629 for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
630 630
631/* 631/*
632 * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table
633 * with index
634 * @pos: the cpufreq_frequency_table * to use as a loop cursor.
635 * @table: the cpufreq_frequency_table * to iterate over.
636 * @idx: the table entry currently being processed
637 */
638
639#define cpufreq_for_each_entry_idx(pos, table, idx) \
640 for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
641 pos++, idx++)
642
643/*
632 * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table 644 * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
633 * excluding CPUFREQ_ENTRY_INVALID frequencies. 645 * excluding CPUFREQ_ENTRY_INVALID frequencies.
634 * @pos: the cpufreq_frequency_table * to use as a loop cursor. 646 * @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -641,6 +653,21 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
641 continue; \ 653 continue; \
642 else 654 else
643 655
656/*
657 * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq
658 * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies.
659 * @pos: the cpufreq_frequency_table * to use as a loop cursor.
660 * @table: the cpufreq_frequency_table * to iterate over.
661 * @idx: the table entry currently being processed
662 */
663
664#define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
665 cpufreq_for_each_entry_idx(pos, table, idx) \
666 if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
667 continue; \
668 else
669
670
644int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, 671int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
645 struct cpufreq_frequency_table *table); 672 struct cpufreq_frequency_table *table);
646 673
@@ -667,19 +694,20 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
667 unsigned int target_freq) 694 unsigned int target_freq)
668{ 695{
669 struct cpufreq_frequency_table *table = policy->freq_table; 696 struct cpufreq_frequency_table *table = policy->freq_table;
670 struct cpufreq_frequency_table *pos, *best = table - 1; 697 struct cpufreq_frequency_table *pos;
671 unsigned int freq; 698 unsigned int freq;
699 int idx, best = -1;
672 700
673 cpufreq_for_each_valid_entry(pos, table) { 701 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
674 freq = pos->frequency; 702 freq = pos->frequency;
675 703
676 if (freq >= target_freq) 704 if (freq >= target_freq)
677 return pos - table; 705 return idx;
678 706
679 best = pos; 707 best = idx;
680 } 708 }
681 709
682 return best - table; 710 return best;
683} 711}
684 712
685/* Find lowest freq at or above target in a table in descending order */ 713/* Find lowest freq at or above target in a table in descending order */
@@ -687,28 +715,29 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
687 unsigned int target_freq) 715 unsigned int target_freq)
688{ 716{
689 struct cpufreq_frequency_table *table = policy->freq_table; 717 struct cpufreq_frequency_table *table = policy->freq_table;
690 struct cpufreq_frequency_table *pos, *best = table - 1; 718 struct cpufreq_frequency_table *pos;
691 unsigned int freq; 719 unsigned int freq;
720 int idx, best = -1;
692 721
693 cpufreq_for_each_valid_entry(pos, table) { 722 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
694 freq = pos->frequency; 723 freq = pos->frequency;
695 724
696 if (freq == target_freq) 725 if (freq == target_freq)
697 return pos - table; 726 return idx;
698 727
699 if (freq > target_freq) { 728 if (freq > target_freq) {
700 best = pos; 729 best = idx;
701 continue; 730 continue;
702 } 731 }
703 732
704 /* No freq found above target_freq */ 733 /* No freq found above target_freq */
705 if (best == table - 1) 734 if (best == -1)
706 return pos - table; 735 return idx;
707 736
708 return best - table; 737 return best;
709 } 738 }
710 739
711 return best - table; 740 return best;
712} 741}
713 742
714/* Works only on sorted freq-tables */ 743/* Works only on sorted freq-tables */
@@ -728,28 +757,29 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
728 unsigned int target_freq) 757 unsigned int target_freq)
729{ 758{
730 struct cpufreq_frequency_table *table = policy->freq_table; 759 struct cpufreq_frequency_table *table = policy->freq_table;
731 struct cpufreq_frequency_table *pos, *best = table - 1; 760 struct cpufreq_frequency_table *pos;
732 unsigned int freq; 761 unsigned int freq;
762 int idx, best = -1;
733 763
734 cpufreq_for_each_valid_entry(pos, table) { 764 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
735 freq = pos->frequency; 765 freq = pos->frequency;
736 766
737 if (freq == target_freq) 767 if (freq == target_freq)
738 return pos - table; 768 return idx;
739 769
740 if (freq < target_freq) { 770 if (freq < target_freq) {
741 best = pos; 771 best = idx;
742 continue; 772 continue;
743 } 773 }
744 774
745 /* No freq found below target_freq */ 775 /* No freq found below target_freq */
746 if (best == table - 1) 776 if (best == -1)
747 return pos - table; 777 return idx;
748 778
749 return best - table; 779 return best;
750 } 780 }
751 781
752 return best - table; 782 return best;
753} 783}
754 784
755/* Find highest freq at or below target in a table in descending order */ 785/* Find highest freq at or below target in a table in descending order */
@@ -757,19 +787,20 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
757 unsigned int target_freq) 787 unsigned int target_freq)
758{ 788{
759 struct cpufreq_frequency_table *table = policy->freq_table; 789 struct cpufreq_frequency_table *table = policy->freq_table;
760 struct cpufreq_frequency_table *pos, *best = table - 1; 790 struct cpufreq_frequency_table *pos;
761 unsigned int freq; 791 unsigned int freq;
792 int idx, best = -1;
762 793
763 cpufreq_for_each_valid_entry(pos, table) { 794 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
764 freq = pos->frequency; 795 freq = pos->frequency;
765 796
766 if (freq <= target_freq) 797 if (freq <= target_freq)
767 return pos - table; 798 return idx;
768 799
769 best = pos; 800 best = idx;
770 } 801 }
771 802
772 return best - table; 803 return best;
773} 804}
774 805
775/* Works only on sorted freq-tables */ 806/* Works only on sorted freq-tables */
@@ -789,32 +820,33 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
789 unsigned int target_freq) 820 unsigned int target_freq)
790{ 821{
791 struct cpufreq_frequency_table *table = policy->freq_table; 822 struct cpufreq_frequency_table *table = policy->freq_table;
792 struct cpufreq_frequency_table *pos, *best = table - 1; 823 struct cpufreq_frequency_table *pos;
793 unsigned int freq; 824 unsigned int freq;
825 int idx, best = -1;
794 826
795 cpufreq_for_each_valid_entry(pos, table) { 827 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
796 freq = pos->frequency; 828 freq = pos->frequency;
797 829
798 if (freq == target_freq) 830 if (freq == target_freq)
799 return pos - table; 831 return idx;
800 832
801 if (freq < target_freq) { 833 if (freq < target_freq) {
802 best = pos; 834 best = idx;
803 continue; 835 continue;
804 } 836 }
805 837
806 /* No freq found below target_freq */ 838 /* No freq found below target_freq */
807 if (best == table - 1) 839 if (best == -1)
808 return pos - table; 840 return idx;
809 841
810 /* Choose the closest freq */ 842 /* Choose the closest freq */
811 if (target_freq - best->frequency > freq - target_freq) 843 if (target_freq - table[best].frequency > freq - target_freq)
812 return pos - table; 844 return idx;
813 845
814 return best - table; 846 return best;
815 } 847 }
816 848
817 return best - table; 849 return best;
818} 850}
819 851
820/* Find closest freq to target in a table in descending order */ 852/* Find closest freq to target in a table in descending order */
@@ -822,32 +854,33 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
822 unsigned int target_freq) 854 unsigned int target_freq)
823{ 855{
824 struct cpufreq_frequency_table *table = policy->freq_table; 856 struct cpufreq_frequency_table *table = policy->freq_table;
825 struct cpufreq_frequency_table *pos, *best = table - 1; 857 struct cpufreq_frequency_table *pos;
826 unsigned int freq; 858 unsigned int freq;
859 int idx, best = -1;
827 860
828 cpufreq_for_each_valid_entry(pos, table) { 861 cpufreq_for_each_valid_entry_idx(pos, table, idx) {
829 freq = pos->frequency; 862 freq = pos->frequency;
830 863
831 if (freq == target_freq) 864 if (freq == target_freq)
832 return pos - table; 865 return idx;
833 866
834 if (freq > target_freq) { 867 if (freq > target_freq) {
835 best = pos; 868 best = idx;
836 continue; 869 continue;
837 } 870 }
838 871
839 /* No freq found above target_freq */ 872 /* No freq found above target_freq */
840 if (best == table - 1) 873 if (best == -1)
841 return pos - table; 874 return idx;
842 875
843 /* Choose the closest freq */ 876 /* Choose the closest freq */
844 if (best->frequency - target_freq > target_freq - freq) 877 if (table[best].frequency - target_freq > target_freq - freq)
845 return pos - table; 878 return idx;
846 879
847 return best - table; 880 return best;
848 } 881 }
849 882
850 return best - table; 883 return best;
851} 884}
852 885
853/* Works only on sorted freq-tables */ 886/* Works only on sorted freq-tables */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 1a32e558eb11..5172ad0daa7c 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -59,6 +59,7 @@ enum cpuhp_state {
59 CPUHP_PCI_XGENE_DEAD, 59 CPUHP_PCI_XGENE_DEAD,
60 CPUHP_IOMMU_INTEL_DEAD, 60 CPUHP_IOMMU_INTEL_DEAD,
61 CPUHP_LUSTRE_CFS_DEAD, 61 CPUHP_LUSTRE_CFS_DEAD,
62 CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
62 CPUHP_WORKQUEUE_PREP, 63 CPUHP_WORKQUEUE_PREP,
63 CPUHP_POWER_NUMA_PREPARE, 64 CPUHP_POWER_NUMA_PREPARE,
64 CPUHP_HRTIMERS_PREPARE, 65 CPUHP_HRTIMERS_PREPARE,
@@ -109,6 +110,7 @@ enum cpuhp_state {
109 CPUHP_AP_PERF_XTENSA_STARTING, 110 CPUHP_AP_PERF_XTENSA_STARTING,
110 CPUHP_AP_PERF_METAG_STARTING, 111 CPUHP_AP_PERF_METAG_STARTING,
111 CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, 112 CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
113 CPUHP_AP_ARM_SDEI_STARTING,
112 CPUHP_AP_ARM_VFP_STARTING, 114 CPUHP_AP_ARM_VFP_STARTING,
113 CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, 115 CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
114 CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, 116 CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
@@ -137,6 +139,7 @@ enum cpuhp_state {
137 CPUHP_AP_ARM64_ISNDEP_STARTING, 139 CPUHP_AP_ARM64_ISNDEP_STARTING,
138 CPUHP_AP_SMPCFD_DYING, 140 CPUHP_AP_SMPCFD_DYING,
139 CPUHP_AP_X86_TBOOT_DYING, 141 CPUHP_AP_X86_TBOOT_DYING,
142 CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
140 CPUHP_AP_ONLINE, 143 CPUHP_AP_ONLINE,
141 CPUHP_TEARDOWN_CPU, 144 CPUHP_TEARDOWN_CPU,
142 CPUHP_AP_ONLINE_IDLE, 145 CPUHP_AP_ONLINE_IDLE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 8f7788d23b57..871f9e21810c 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -257,22 +257,30 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
257{return 0;} 257{return 0;}
258#endif 258#endif
259 259
260#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ 260#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \
261({ \ 261({ \
262 int __ret; \ 262 int __ret = 0; \
263 \ 263 \
264 if (!idx) { \ 264 if (!idx) { \
265 cpu_do_idle(); \ 265 cpu_do_idle(); \
266 return idx; \ 266 return idx; \
267 } \ 267 } \
268 \ 268 \
269 __ret = cpu_pm_enter(); \ 269 if (!is_retention) \
270 if (!__ret) { \ 270 __ret = cpu_pm_enter(); \
271 __ret = low_level_idle_enter(idx); \ 271 if (!__ret) { \
272 cpu_pm_exit(); \ 272 __ret = low_level_idle_enter(idx); \
273 } \ 273 if (!is_retention) \
274 \ 274 cpu_pm_exit(); \
275 __ret ? -1 : idx; \ 275 } \
276 \
277 __ret ? -1 : idx; \
276}) 278})
277 279
280#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
281 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0)
282
283#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
284 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1)
285
278#endif /* _LINUX_CPUIDLE_H */ 286#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 75b565194437..d4a2a7dcd72d 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -640,7 +640,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
640/** 640/**
641 * cpumask_size - size to allocate for a 'struct cpumask' in bytes 641 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
642 */ 642 */
643static inline size_t cpumask_size(void) 643static inline unsigned int cpumask_size(void)
644{ 644{
645 return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); 645 return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
646} 646}
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 1b8e41597ef5..934633a05d20 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -112,7 +112,7 @@ static inline int cpuset_do_slab_mem_spread(void)
112 return task_spread_slab(current); 112 return task_spread_slab(current);
113} 113}
114 114
115extern int current_cpuset_is_being_rebound(void); 115extern bool current_cpuset_is_being_rebound(void);
116 116
117extern void rebuild_sched_domains(void); 117extern void rebuild_sched_domains(void);
118 118
@@ -247,9 +247,9 @@ static inline int cpuset_do_slab_mem_spread(void)
247 return 0; 247 return 0;
248} 248}
249 249
250static inline int current_cpuset_is_being_rebound(void) 250static inline bool current_cpuset_is_being_rebound(void)
251{ 251{
252 return 0; 252 return false;
253} 253}
254 254
255static inline void rebuild_sched_domains(void) 255static inline void rebuild_sched_domains(void)
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index a992e6ca2f1c..f7ac2aa93269 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -2,13 +2,13 @@
2#ifndef LINUX_CRASH_DUMP_H 2#ifndef LINUX_CRASH_DUMP_H
3#define LINUX_CRASH_DUMP_H 3#define LINUX_CRASH_DUMP_H
4 4
5#ifdef CONFIG_CRASH_DUMP
6#include <linux/kexec.h> 5#include <linux/kexec.h>
7#include <linux/proc_fs.h> 6#include <linux/proc_fs.h>
8#include <linux/elf.h> 7#include <linux/elf.h>
9 8
10#include <asm/pgtable.h> /* for pgprot_t */ 9#include <asm/pgtable.h> /* for pgprot_t */
11 10
11#ifdef CONFIG_CRASH_DUMP
12#define ELFCORE_ADDR_MAX (-1ULL) 12#define ELFCORE_ADDR_MAX (-1ULL)
13#define ELFCORE_ADDR_ERR (-2ULL) 13#define ELFCORE_ADDR_ERR (-2ULL)
14 14
@@ -52,13 +52,13 @@ void vmcore_cleanup(void);
52 * has passed the elf core header address on command line. 52 * has passed the elf core header address on command line.
53 * 53 *
54 * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will 54 * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
55 * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of 55 * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic
56 * previous kernel. 56 * of previous kernel.
57 */ 57 */
58 58
59static inline int is_kdump_kernel(void) 59static inline bool is_kdump_kernel(void)
60{ 60{
61 return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0; 61 return elfcorehdr_addr != ELFCORE_ADDR_MAX;
62} 62}
63 63
64/* is_vmcore_usable() checks if the kernel is booting after a panic and 64/* is_vmcore_usable() checks if the kernel is booting after a panic and
@@ -89,7 +89,7 @@ extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
89extern void unregister_oldmem_pfn_is_ram(void); 89extern void unregister_oldmem_pfn_is_ram(void);
90 90
91#else /* !CONFIG_CRASH_DUMP */ 91#else /* !CONFIG_CRASH_DUMP */
92static inline int is_kdump_kernel(void) { return 0; } 92static inline bool is_kdump_kernel(void) { return 0; }
93#endif /* CONFIG_CRASH_DUMP */ 93#endif /* CONFIG_CRASH_DUMP */
94 94
95extern unsigned long saved_max_pfn; 95extern unsigned long saved_max_pfn;
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index cd4f420231ba..72c92c396bb8 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,12 +5,19 @@
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7extern u16 const crc_ccitt_table[256]; 7extern u16 const crc_ccitt_table[256];
8extern u16 const crc_ccitt_false_table[256];
8 9
9extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len); 10extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
11extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
10 12
11static inline u16 crc_ccitt_byte(u16 crc, const u8 c) 13static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
12{ 14{
13 return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; 15 return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
14} 16}
15 17
18static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
19{
20 return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
21}
22
16#endif /* _LINUX_CRC_CCITT_H */ 23#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 78508ca4b108..7e6e84cf6383 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -107,8 +107,16 @@
107#define CRYPTO_ALG_INTERNAL 0x00002000 107#define CRYPTO_ALG_INTERNAL 0x00002000
108 108
109/* 109/*
110 * Set if the algorithm has a ->setkey() method but can be used without
111 * calling it first, i.e. there is a default key.
112 */
113#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
114
115/*
110 * Transform masks and values (for crt_flags). 116 * Transform masks and values (for crt_flags).
111 */ 117 */
118#define CRYPTO_TFM_NEED_KEY 0x00000001
119
112#define CRYPTO_TFM_REQ_MASK 0x000fff00 120#define CRYPTO_TFM_REQ_MASK 0x000fff00
113#define CRYPTO_TFM_RES_MASK 0xfff00000 121#define CRYPTO_TFM_RES_MASK 0xfff00000
114 122
@@ -447,7 +455,7 @@ struct crypto_alg {
447 unsigned int cra_alignmask; 455 unsigned int cra_alignmask;
448 456
449 int cra_priority; 457 int cra_priority;
450 atomic_t cra_refcnt; 458 refcount_t cra_refcnt;
451 459
452 char cra_name[CRYPTO_MAX_ALG_NAME]; 460 char cra_name[CRYPTO_MAX_ALG_NAME];
453 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 461 char cra_driver_name[CRYPTO_MAX_ALG_NAME];
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 5258346c558c..0185ecdae135 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -96,7 +96,7 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev);
96ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 96ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
97 const struct iomap_ops *ops); 97 const struct iomap_ops *ops);
98int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 98int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
99 pfn_t *pfnp, const struct iomap_ops *ops); 99 pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
100int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 100int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
101 pfn_t pfn); 101 pfn_t pfn);
102int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 102int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 65cd8ab60b7a..82a99d366aec 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -227,6 +227,7 @@ extern seqlock_t rename_lock;
227 */ 227 */
228extern void d_instantiate(struct dentry *, struct inode *); 228extern void d_instantiate(struct dentry *, struct inode *);
229extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); 229extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
230extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
230extern int d_instantiate_no_diralias(struct dentry *, struct inode *); 231extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
231extern void __d_drop(struct dentry *dentry); 232extern void __d_drop(struct dentry *dentry);
232extern void d_drop(struct dentry *dentry); 233extern void d_drop(struct dentry *dentry);
@@ -235,6 +236,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
235 236
236/* allocate/de-allocate */ 237/* allocate/de-allocate */
237extern struct dentry * d_alloc(struct dentry *, const struct qstr *); 238extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
239extern struct dentry * d_alloc_anon(struct super_block *);
238extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); 240extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
239extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, 241extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
240 wait_queue_head_t *); 242 wait_queue_head_t *);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a5538433c927..da83f64952e7 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -28,6 +28,7 @@ enum dm_queue_mode {
28 DM_TYPE_REQUEST_BASED = 2, 28 DM_TYPE_REQUEST_BASED = 2,
29 DM_TYPE_MQ_REQUEST_BASED = 3, 29 DM_TYPE_MQ_REQUEST_BASED = 3,
30 DM_TYPE_DAX_BIO_BASED = 4, 30 DM_TYPE_DAX_BIO_BASED = 4,
31 DM_TYPE_NVME_BIO_BASED = 5,
31}; 32};
32 33
33typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; 34typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -221,14 +222,6 @@ struct target_type {
221#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) 222#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
222 223
223/* 224/*
224 * Some targets need to be sent the same WRITE bio severals times so
225 * that they can send copies of it to different devices. This function
226 * examines any supplied bio and returns the number of copies of it the
227 * target requires.
228 */
229typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
230
231/*
232 * A target implements own bio data integrity. 225 * A target implements own bio data integrity.
233 */ 226 */
234#define DM_TARGET_INTEGRITY 0x00000010 227#define DM_TARGET_INTEGRITY 0x00000010
@@ -291,13 +284,6 @@ struct dm_target {
291 */ 284 */
292 unsigned per_io_data_size; 285 unsigned per_io_data_size;
293 286
294 /*
295 * If defined, this function is called to find out how many
296 * duplicate bios should be sent to the target when writing
297 * data.
298 */
299 dm_num_write_bios_fn num_write_bios;
300
301 /* target specific data */ 287 /* target specific data */
302 void *private; 288 void *private;
303 289
@@ -329,35 +315,9 @@ struct dm_target_callbacks {
329 int (*congested_fn) (struct dm_target_callbacks *, int); 315 int (*congested_fn) (struct dm_target_callbacks *, int);
330}; 316};
331 317
332/* 318void *dm_per_bio_data(struct bio *bio, size_t data_size);
333 * For bio-based dm. 319struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
334 * One of these is allocated for each bio. 320unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
335 * This structure shouldn't be touched directly by target drivers.
336 * It is here so that we can inline dm_per_bio_data and
337 * dm_bio_from_per_bio_data
338 */
339struct dm_target_io {
340 struct dm_io *io;
341 struct dm_target *ti;
342 unsigned target_bio_nr;
343 unsigned *len_ptr;
344 struct bio clone;
345};
346
347static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
348{
349 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
350}
351
352static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
353{
354 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
355}
356
357static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
358{
359 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
360}
361 321
362int dm_register_target(struct target_type *t); 322int dm_register_target(struct target_type *t);
363void dm_unregister_target(struct target_type *t); 323void dm_unregister_target(struct target_type *t);
@@ -500,6 +460,11 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
500int dm_table_complete(struct dm_table *t); 460int dm_table_complete(struct dm_table *t);
501 461
502/* 462/*
463 * Destroy the table when finished.
464 */
465void dm_table_destroy(struct dm_table *t);
466
467/*
503 * Target may require that it is never sent I/O larger than len. 468 * Target may require that it is never sent I/O larger than len.
504 */ 469 */
505int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); 470int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
@@ -585,6 +550,7 @@ do { \
585#define DM_ENDIO_DONE 0 550#define DM_ENDIO_DONE 0
586#define DM_ENDIO_INCOMPLETE 1 551#define DM_ENDIO_INCOMPLETE 1
587#define DM_ENDIO_REQUEUE 2 552#define DM_ENDIO_REQUEUE 2
553#define DM_ENDIO_DELAY_REQUEUE 3
588 554
589/* 555/*
590 * Definitions of return values from target map function. 556 * Definitions of return values from target map function.
@@ -592,7 +558,7 @@ do { \
592#define DM_MAPIO_SUBMITTED 0 558#define DM_MAPIO_SUBMITTED 0
593#define DM_MAPIO_REMAPPED 1 559#define DM_MAPIO_REMAPPED 1
594#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 560#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
595#define DM_MAPIO_DELAY_REQUEUE 3 561#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
596#define DM_MAPIO_KILL 4 562#define DM_MAPIO_KILL 4
597 563
598#define dm_sector_div64(x, y)( \ 564#define dm_sector_div64(x, y)( \
diff --git a/include/linux/device.h b/include/linux/device.h
index 9d32000725da..b093405ed525 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * device.h - generic, centralized driver model 3 * device.h - generic, centralized driver model
3 * 4 *
@@ -5,8 +6,6 @@
5 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> 6 * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
6 * Copyright (c) 2008-2009 Novell Inc. 7 * Copyright (c) 2008-2009 Novell Inc.
7 * 8 *
8 * This file is released under the GPLv2
9 *
10 * See Documentation/driver-model/ for more information. 9 * See Documentation/driver-model/ for more information.
11 */ 10 */
12 11
@@ -21,7 +20,6 @@
21#include <linux/compiler.h> 20#include <linux/compiler.h>
22#include <linux/types.h> 21#include <linux/types.h>
23#include <linux/mutex.h> 22#include <linux/mutex.h>
24#include <linux/pinctrl/devinfo.h>
25#include <linux/pm.h> 23#include <linux/pm.h>
26#include <linux/atomic.h> 24#include <linux/atomic.h>
27#include <linux/ratelimit.h> 25#include <linux/ratelimit.h>
@@ -42,6 +40,7 @@ struct fwnode_handle;
42struct iommu_ops; 40struct iommu_ops;
43struct iommu_group; 41struct iommu_group;
44struct iommu_fwspec; 42struct iommu_fwspec;
43struct dev_pin_info;
45 44
46struct bus_attribute { 45struct bus_attribute {
47 struct attribute attr; 46 struct attribute attr;
@@ -288,6 +287,7 @@ struct device_driver {
288 const struct attribute_group **groups; 287 const struct attribute_group **groups;
289 288
290 const struct dev_pm_ops *pm; 289 const struct dev_pm_ops *pm;
290 int (*coredump) (struct device *dev);
291 291
292 struct driver_private *p; 292 struct driver_private *p;
293}; 293};
@@ -301,7 +301,6 @@ extern struct device_driver *driver_find(const char *name,
301extern int driver_probe_done(void); 301extern int driver_probe_done(void);
302extern void wait_for_device_probe(void); 302extern void wait_for_device_probe(void);
303 303
304
305/* sysfs interface for exporting driver attributes */ 304/* sysfs interface for exporting driver attributes */
306 305
307struct driver_attribute { 306struct driver_attribute {
@@ -575,6 +574,9 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
575 574
576#define DEVICE_ATTR(_name, _mode, _show, _store) \ 575#define DEVICE_ATTR(_name, _mode, _show, _store) \
577 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) 576 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
577#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
578 struct device_attribute dev_attr_##_name = \
579 __ATTR_PREALLOC(_name, _mode, _show, _store)
578#define DEVICE_ATTR_RW(_name) \ 580#define DEVICE_ATTR_RW(_name) \
579 struct device_attribute dev_attr_##_name = __ATTR_RW(_name) 581 struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
580#define DEVICE_ATTR_RO(_name) \ 582#define DEVICE_ATTR_RO(_name) \
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 79f27d60ec66..085db2fee2d7 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -301,7 +301,7 @@ struct dma_buf {
301 struct dma_fence_cb cb; 301 struct dma_fence_cb cb;
302 wait_queue_head_t *poll; 302 wait_queue_head_t *poll;
303 303
304 unsigned long active; 304 __poll_t active;
305 } cb_excl, cb_shared; 305 } cb_excl, cb_shared;
306}; 306};
307 307
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
new file mode 100644
index 000000000000..bcdb1a3e4b1f
--- /dev/null
+++ b/include/linux/dma-direct.h
@@ -0,0 +1,47 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_DMA_DIRECT_H
3#define _LINUX_DMA_DIRECT_H 1
4
5#include <linux/dma-mapping.h>
6
7#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
8#include <asm/dma-direct.h>
9#else
10static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
11{
12 dma_addr_t dev_addr = (dma_addr_t)paddr;
13
14 return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
15}
16
17static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
18{
19 phys_addr_t paddr = (phys_addr_t)dev_addr;
20
21 return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
22}
23
24static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
25{
26 if (!dev->dma_mask)
27 return false;
28
29 return addr + size - 1 <= *dev->dma_mask;
30}
31#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
32
33#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
34void dma_mark_clean(void *addr, size_t size);
35#else
36static inline void dma_mark_clean(void *addr, size_t size)
37{
38}
39#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
40
41void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
42 gfp_t gfp, unsigned long attrs);
43void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
44 dma_addr_t dma_addr, unsigned long attrs);
45int dma_direct_supported(struct device *dev, u64 mask);
46
47#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 332a5420243c..bc8940ca280d 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -21,6 +21,7 @@
21#define __LINUX_DMA_FENCE_ARRAY_H 21#define __LINUX_DMA_FENCE_ARRAY_H
22 22
23#include <linux/dma-fence.h> 23#include <linux/dma-fence.h>
24#include <linux/irq_work.h>
24 25
25/** 26/**
26 * struct dma_fence_array_cb - callback helper for fence array 27 * struct dma_fence_array_cb - callback helper for fence array
@@ -47,6 +48,8 @@ struct dma_fence_array {
47 unsigned num_fences; 48 unsigned num_fences;
48 atomic_t num_pending; 49 atomic_t num_pending;
49 struct dma_fence **fences; 50 struct dma_fence **fences;
51
52 struct irq_work work;
50}; 53};
51 54
52extern const struct dma_fence_ops dma_fence_array_ops; 55extern const struct dma_fence_ops dma_fence_array_ops;
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index efdabbb64e3c..4c008170fe65 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -242,7 +242,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
242 * The caller is required to hold the RCU read lock. 242 * The caller is required to hold the RCU read lock.
243 */ 243 */
244static inline struct dma_fence * 244static inline struct dma_fence *
245dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) 245dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
246{ 246{
247 do { 247 do {
248 struct dma_fence *fence; 248 struct dma_fence *fence;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 81ed9b2d84dc..34fe8463d10e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -136,7 +136,7 @@ struct dma_map_ops {
136 int is_phys; 136 int is_phys;
137}; 137};
138 138
139extern const struct dma_map_ops dma_noop_ops; 139extern const struct dma_map_ops dma_direct_ops;
140extern const struct dma_map_ops dma_virt_ops; 140extern const struct dma_map_ops dma_virt_ops;
141 141
142#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 142#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -513,10 +513,18 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
513 void *cpu_addr; 513 void *cpu_addr;
514 514
515 BUG_ON(!ops); 515 BUG_ON(!ops);
516 WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
516 517
517 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) 518 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
518 return cpu_addr; 519 return cpu_addr;
519 520
521 /*
522 * Let the implementation decide on the zone to allocate from, and
523 * decide on the way of zeroing the memory given that the memory
524 * returned should always be zeroed.
525 */
526 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO);
527
520 if (!arch_dma_alloc_attrs(&dev, &flag)) 528 if (!arch_dma_alloc_attrs(&dev, &flag))
521 return NULL; 529 return NULL;
522 if (!ops->alloc) 530 if (!ops->alloc)
@@ -568,6 +576,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
568 return 0; 576 return 0;
569} 577}
570 578
579/*
580 * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
581 * don't use this is new code.
582 */
583#ifndef arch_dma_supported
584#define arch_dma_supported(dev, mask) (1)
585#endif
586
571static inline void dma_check_mask(struct device *dev, u64 mask) 587static inline void dma_check_mask(struct device *dev, u64 mask)
572{ 588{
573 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) 589 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
@@ -580,6 +596,9 @@ static inline int dma_supported(struct device *dev, u64 mask)
580 596
581 if (!ops) 597 if (!ops)
582 return 0; 598 return 0;
599 if (!arch_dma_supported(dev, mask))
600 return 0;
601
583 if (!ops->dma_supported) 602 if (!ops->dma_supported)
584 return 1; 603 return 1;
585 return ops->dma_supported(dev, mask); 604 return ops->dma_supported(dev, mask);
@@ -692,7 +711,7 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
692#ifndef dma_max_pfn 711#ifndef dma_max_pfn
693static inline unsigned long dma_max_pfn(struct device *dev) 712static inline unsigned long dma_max_pfn(struct device *dev)
694{ 713{
695 return *dev->dma_mask >> PAGE_SHIFT; 714 return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
696} 715}
697#endif 716#endif
698 717
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h
index f48a85c377de..b4f22112ba75 100644
--- a/include/linux/dsa/lan9303.h
+++ b/include/linux/dsa/lan9303.h
@@ -23,9 +23,10 @@ struct lan9303 {
23 struct regmap_irq_chip_data *irq_data; 23 struct regmap_irq_chip_data *irq_data;
24 struct gpio_desc *reset_gpio; 24 struct gpio_desc *reset_gpio;
25 u32 reset_duration; /* in [ms] */ 25 u32 reset_duration; /* in [ms] */
26 bool phy_addr_sel_strap; 26 int phy_addr_base;
27 struct dsa_switch *ds; 27 struct dsa_switch *ds;
28 struct mutex indirect_mutex; /* protect indexed register access */ 28 struct mutex indirect_mutex; /* protect indexed register access */
29 struct mutex alr_mutex; /* protect ALR access */
29 const struct lan9303_phy_ops *ops; 30 const struct lan9303_phy_ops *ops;
30 bool is_bridged; /* true if port 1 and 2 are bridged */ 31 bool is_bridged; /* true if port 1 and 2 are bridged */
31 32
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 29fdf8029cf6..f5083aa72eae 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -475,6 +475,39 @@ typedef struct {
475 u64 get_all; 475 u64 get_all;
476} apple_properties_protocol_64_t; 476} apple_properties_protocol_64_t;
477 477
478typedef struct {
479 u32 get_capability;
480 u32 get_event_log;
481 u32 hash_log_extend_event;
482 u32 submit_command;
483 u32 get_active_pcr_banks;
484 u32 set_active_pcr_banks;
485 u32 get_result_of_set_active_pcr_banks;
486} efi_tcg2_protocol_32_t;
487
488typedef struct {
489 u64 get_capability;
490 u64 get_event_log;
491 u64 hash_log_extend_event;
492 u64 submit_command;
493 u64 get_active_pcr_banks;
494 u64 set_active_pcr_banks;
495 u64 get_result_of_set_active_pcr_banks;
496} efi_tcg2_protocol_64_t;
497
498typedef u32 efi_tcg2_event_log_format;
499
500typedef struct {
501 void *get_capability;
502 efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format,
503 efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *);
504 void *hash_log_extend_event;
505 void *submit_command;
506 void *get_active_pcr_banks;
507 void *set_active_pcr_banks;
508 void *get_result_of_set_active_pcr_banks;
509} efi_tcg2_protocol_t;
510
478/* 511/*
479 * Types and defines for EFI ResetSystem 512 * Types and defines for EFI ResetSystem
480 */ 513 */
@@ -625,6 +658,7 @@ void efi_native_runtime_setup(void);
625#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) 658#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
626#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) 659#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
627#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) 660#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
661#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
628 662
629#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) 663#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
630#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23) 664#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -637,6 +671,7 @@ void efi_native_runtime_setup(void);
637#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) 671#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
638#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) 672#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
639#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) 673#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
674#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
640 675
641typedef struct { 676typedef struct {
642 efi_guid_t guid; 677 efi_guid_t guid;
@@ -911,6 +946,7 @@ extern struct efi {
911 unsigned long properties_table; /* properties table */ 946 unsigned long properties_table; /* properties table */
912 unsigned long mem_attr_table; /* memory attributes table */ 947 unsigned long mem_attr_table; /* memory attributes table */
913 unsigned long rng_seed; /* UEFI firmware random seed */ 948 unsigned long rng_seed; /* UEFI firmware random seed */
949 unsigned long tpm_log; /* TPM2 Event Log table */
914 efi_get_time_t *get_time; 950 efi_get_time_t *get_time;
915 efi_set_time_t *set_time; 951 efi_set_time_t *set_time;
916 efi_get_wakeup_time_t *get_wakeup_time; 952 efi_get_wakeup_time_t *get_wakeup_time;
@@ -1536,6 +1572,8 @@ static inline void
1536efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { } 1572efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { }
1537#endif 1573#endif
1538 1574
1575void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
1576
1539/* 1577/*
1540 * Arch code can implement the following three template macros, avoiding 1578 * Arch code can implement the following three template macros, avoiding
1541 * reptition for the void/non-void return cases of {__,}efi_call_virt(): 1579 * reptition for the void/non-void return cases of {__,}efi_call_virt():
@@ -1603,4 +1641,12 @@ struct linux_efi_random_seed {
1603 u8 bits[]; 1641 u8 bits[];
1604}; 1642};
1605 1643
1644struct linux_efi_tpm_eventlog {
1645 u32 size;
1646 u8 version;
1647 u8 log[];
1648};
1649
1650extern int efi_tpm_eventlog_init(void);
1651
1606#endif /* _LINUX_EFI_H */ 1652#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 3d794b3dc532..6d9e230dffd2 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -198,8 +198,6 @@ extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
198extern void elv_requeue_request(struct request_queue *, struct request *); 198extern void elv_requeue_request(struct request_queue *, struct request *);
199extern struct request *elv_former_request(struct request_queue *, struct request *); 199extern struct request *elv_former_request(struct request_queue *, struct request *);
200extern struct request *elv_latter_request(struct request_queue *, struct request *); 200extern struct request *elv_latter_request(struct request_queue *, struct request *);
201extern int elv_register_queue(struct request_queue *q);
202extern void elv_unregister_queue(struct request_queue *q);
203extern int elv_may_queue(struct request_queue *, unsigned int); 201extern int elv_may_queue(struct request_queue *, unsigned int);
204extern void elv_completed_request(struct request_queue *, struct request *); 202extern void elv_completed_request(struct request_queue *, struct request *);
205extern int elv_set_request(struct request_queue *q, struct request *rq, 203extern int elv_set_request(struct request_queue *q, struct request *rq,
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
new file mode 100644
index 000000000000..280c61ecbf20
--- /dev/null
+++ b/include/linux/error-injection.h
@@ -0,0 +1,27 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_ERROR_INJECTION_H
3#define _LINUX_ERROR_INJECTION_H
4
5#ifdef CONFIG_FUNCTION_ERROR_INJECTION
6
7#include <asm/error-injection.h>
8
9extern bool within_error_injection_list(unsigned long addr);
10extern int get_injectable_error_type(unsigned long addr);
11
12#else /* !CONFIG_FUNCTION_ERROR_INJECTION */
13
14#include <asm-generic/error-injection.h>
15static inline bool within_error_injection_list(unsigned long addr)
16{
17 return false;
18}
19
20static inline int get_injectable_error_type(unsigned long addr)
21{
22 return EI_ETYPE_NONE;
23}
24
25#endif
26
27#endif /* _LINUX_ERROR_INJECTION_H */
diff --git a/include/linux/errseq.h b/include/linux/errseq.h
index 6ffae9c5052d..fc2777770768 100644
--- a/include/linux/errseq.h
+++ b/include/linux/errseq.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * See Documentation/errseq.rst and lib/errseq.c 3 * See Documentation/core-api/errseq.rst and lib/errseq.c
4 */ 4 */
5#ifndef _LINUX_ERRSEQ_H 5#ifndef _LINUX_ERRSEQ_H
6#define _LINUX_ERRSEQ_H 6#define _LINUX_ERRSEQ_H
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 60b2985e8a18..7094718b653b 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -26,18 +26,16 @@
26#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) 26#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
27#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) 27#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
28 28
29struct eventfd_ctx;
29struct file; 30struct file;
30 31
31#ifdef CONFIG_EVENTFD 32#ifdef CONFIG_EVENTFD
32 33
33struct file *eventfd_file_create(unsigned int count, int flags);
34struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
35void eventfd_ctx_put(struct eventfd_ctx *ctx); 34void eventfd_ctx_put(struct eventfd_ctx *ctx);
36struct file *eventfd_fget(int fd); 35struct file *eventfd_fget(int fd);
37struct eventfd_ctx *eventfd_ctx_fdget(int fd); 36struct eventfd_ctx *eventfd_ctx_fdget(int fd);
38struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); 37struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
39__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); 38__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
40ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
41int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, 39int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
42 __u64 *cnt); 40 __u64 *cnt);
43 41
@@ -47,10 +45,6 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
47 * Ugly ugly ugly error layer to support modules that uses eventfd but 45 * Ugly ugly ugly error layer to support modules that uses eventfd but
48 * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. 46 * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
49 */ 47 */
50static inline struct file *eventfd_file_create(unsigned int count, int flags)
51{
52 return ERR_PTR(-ENOSYS);
53}
54 48
55static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) 49static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
56{ 50{
@@ -67,12 +61,6 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
67 61
68} 62}
69 63
70static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
71 __u64 *cnt)
72{
73 return -ENOSYS;
74}
75
76static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, 64static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
77 wait_queue_entry_t *wait, __u64 *cnt) 65 wait_queue_entry_t *wait, __u64 *cnt)
78{ 66{
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 43e98d30d2df..58aecb60ea51 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -117,6 +117,7 @@ struct f2fs_super_block {
117/* 117/*
118 * For checkpoint 118 * For checkpoint
119 */ 119 */
120#define CP_NOCRC_RECOVERY_FLAG 0x00000200
120#define CP_TRIMMED_FLAG 0x00000100 121#define CP_TRIMMED_FLAG 0x00000100
121#define CP_NAT_BITS_FLAG 0x00000080 122#define CP_NAT_BITS_FLAG 0x00000080
122#define CP_CRC_RECOVERY_FLAG 0x00000040 123#define CP_CRC_RECOVERY_FLAG 0x00000040
@@ -212,6 +213,7 @@ struct f2fs_extent {
212#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ 213#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
213#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ 214#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
214#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ 215#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
216#define F2FS_PIN_FILE 0x40 /* file should not be gced */
215 217
216struct f2fs_inode { 218struct f2fs_inode {
217 __le16 i_mode; /* file mode */ 219 __le16 i_mode; /* file mode */
@@ -229,7 +231,13 @@ struct f2fs_inode {
229 __le32 i_ctime_nsec; /* change time in nano scale */ 231 __le32 i_ctime_nsec; /* change time in nano scale */
230 __le32 i_mtime_nsec; /* modification time in nano scale */ 232 __le32 i_mtime_nsec; /* modification time in nano scale */
231 __le32 i_generation; /* file version (for NFS) */ 233 __le32 i_generation; /* file version (for NFS) */
232 __le32 i_current_depth; /* only for directory depth */ 234 union {
235 __le32 i_current_depth; /* only for directory depth */
236 __le16 i_gc_failures; /*
237 * # of gc failures on pinned file.
238 * only for regular files.
239 */
240 };
233 __le32 i_xattr_nid; /* nid to save xattr */ 241 __le32 i_xattr_nid; /* nid to save xattr */
234 __le32 i_flags; /* file attributes */ 242 __le32 i_flags; /* file attributes */
235 __le32 i_pino; /* parent inode number */ 243 __le32 i_pino; /* parent inode number */
@@ -245,8 +253,10 @@ struct f2fs_inode {
245 __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */ 253 __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */
246 __le32 i_projid; /* project id */ 254 __le32 i_projid; /* project id */
247 __le32 i_inode_checksum;/* inode meta checksum */ 255 __le32 i_inode_checksum;/* inode meta checksum */
256 __le64 i_crtime; /* creation time */
257 __le32 i_crtime_nsec; /* creation time in nano scale */
248 __le32 i_extra_end[0]; /* for attribute size calculation */ 258 __le32 i_extra_end[0]; /* for attribute size calculation */
249 }; 259 } __packed;
250 __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ 260 __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
251 }; 261 };
252 __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2), 262 __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
diff --git a/include/linux/fb.h b/include/linux/fb.h
index bc24e48e396d..f577d3c89618 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -465,6 +465,11 @@ struct fb_info {
465 atomic_t count; 465 atomic_t count;
466 int node; 466 int node;
467 int flags; 467 int flags;
468 /*
469 * -1 by default, set to a FB_ROTATE_* value by the driver, if it knows
470 * a lcd is not mounted upright and fbcon should rotate to compensate.
471 */
472 int fbcon_rotate_hint;
468 struct mutex lock; /* Lock for open/release/ioctl funcs */ 473 struct mutex lock; /* Lock for open/release/ioctl funcs */
469 struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ 474 struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */
470 struct fb_var_screeninfo var; /* Current var */ 475 struct fb_var_screeninfo var; /* Current var */
@@ -564,7 +569,10 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
564#define fb_memcpy_fromfb sbus_memcpy_fromio 569#define fb_memcpy_fromfb sbus_memcpy_fromio
565#define fb_memcpy_tofb sbus_memcpy_toio 570#define fb_memcpy_tofb sbus_memcpy_toio
566 571
567#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__) 572#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \
573 defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \
574 defined(__avr32__) || defined(__bfin__) || defined(__arm__) || \
575 defined(__aarch64__)
568 576
569#define fb_readb __raw_readb 577#define fb_readb __raw_readb
570#define fb_readw __raw_readw 578#define fb_readw __raw_readw
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 1c65817673db..41615f38bcff 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -10,6 +10,7 @@
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/rcupdate.h> 12#include <linux/rcupdate.h>
13#include <linux/nospec.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/fs.h> 16#include <linux/fs.h>
@@ -82,8 +83,10 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
82{ 83{
83 struct fdtable *fdt = rcu_dereference_raw(files->fdt); 84 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
84 85
85 if (fd < fdt->max_fds) 86 if (fd < fdt->max_fds) {
87 fd = array_index_nospec(fd, fdt->max_fds);
86 return rcu_dereference_raw(fdt->fd[fd]); 88 return rcu_dereference_raw(fdt->fd[fd]);
89 }
87 return NULL; 90 return NULL;
88} 91}
89 92
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 80b5b482cb46..276932d75975 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -18,7 +18,9 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/cryptohash.h> 19#include <linux/cryptohash.h>
20#include <linux/set_memory.h> 20#include <linux/set_memory.h>
21#include <linux/kallsyms.h>
21 22
23#include <net/xdp.h>
22#include <net/sch_generic.h> 24#include <net/sch_generic.h>
23 25
24#include <uapi/linux/filter.h> 26#include <uapi/linux/filter.h>
@@ -58,6 +60,9 @@ struct bpf_prog_aux;
58/* unused opcode to mark special call to bpf_tail_call() helper */ 60/* unused opcode to mark special call to bpf_tail_call() helper */
59#define BPF_TAIL_CALL 0xf0 61#define BPF_TAIL_CALL 0xf0
60 62
63/* unused opcode to mark call to interpreter with arguments */
64#define BPF_CALL_ARGS 0xe0
65
61/* As per nm, we expose JITed images as text (code) section for 66/* As per nm, we expose JITed images as text (code) section for
62 * kallsyms. That way, tools like perf can find it to match 67 * kallsyms. That way, tools like perf can find it to match
63 * addresses. 68 * addresses.
@@ -455,10 +460,14 @@ struct bpf_binary_header {
455struct bpf_prog { 460struct bpf_prog {
456 u16 pages; /* Number of allocated pages */ 461 u16 pages; /* Number of allocated pages */
457 u16 jited:1, /* Is our filter JIT'ed? */ 462 u16 jited:1, /* Is our filter JIT'ed? */
463 jit_requested:1,/* archs need to JIT the prog */
458 locked:1, /* Program image locked? */ 464 locked:1, /* Program image locked? */
459 gpl_compatible:1, /* Is filter GPL compatible? */ 465 gpl_compatible:1, /* Is filter GPL compatible? */
460 cb_access:1, /* Is control block accessed? */ 466 cb_access:1, /* Is control block accessed? */
461 dst_needed:1; /* Do we need dst entry? */ 467 dst_needed:1, /* Do we need dst entry? */
468 blinded:1, /* Was blinded */
469 is_func:1, /* program is a bpf function */
470 kprobe_override:1; /* Do we override a kprobe? */
462 enum bpf_prog_type type; /* Type of BPF program */ 471 enum bpf_prog_type type; /* Type of BPF program */
463 u32 len; /* Number of filter blocks */ 472 u32 len; /* Number of filter blocks */
464 u32 jited_len; /* Size of jited insns in bytes */ 473 u32 jited_len; /* Size of jited insns in bytes */
@@ -495,6 +504,7 @@ struct xdp_buff {
495 void *data_end; 504 void *data_end;
496 void *data_meta; 505 void *data_meta;
497 void *data_hard_start; 506 void *data_hard_start;
507 struct xdp_rxq_info *rxq;
498}; 508};
499 509
500/* Compute the linear packet data range [data, data_end) which 510/* Compute the linear packet data range [data, data_end) which
@@ -678,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
678struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); 688struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
679void bpf_prog_free(struct bpf_prog *fp); 689void bpf_prog_free(struct bpf_prog *fp);
680 690
691bool bpf_opcode_in_insntable(u8 code);
692
681struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); 693struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
682struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, 694struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
683 gfp_t gfp_extra_flags); 695 gfp_t gfp_extra_flags);
@@ -709,11 +721,22 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
709void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); 721void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
710 722
711u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 723u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
724#define __bpf_call_base_args \
725 ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
726 __bpf_call_base)
712 727
713struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); 728struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
714void bpf_jit_compile(struct bpf_prog *prog); 729void bpf_jit_compile(struct bpf_prog *prog);
715bool bpf_helper_changes_pkt_data(void *func); 730bool bpf_helper_changes_pkt_data(void *func);
716 731
732static inline bool bpf_dump_raw_ok(void)
733{
734 /* Reconstruction of call-sites is dependent on kallsyms,
735 * thus make dump the same restriction.
736 */
737 return kallsyms_show_value() == 1;
738}
739
717struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 740struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
718 const struct bpf_insn *patch, u32 len); 741 const struct bpf_insn *patch, u32 len);
719 742
@@ -797,7 +820,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
797 return fp->jited && bpf_jit_is_ebpf(); 820 return fp->jited && bpf_jit_is_ebpf();
798} 821}
799 822
800static inline bool bpf_jit_blinding_enabled(void) 823static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
801{ 824{
802 /* These are the prerequisites, should someone ever have the 825 /* These are the prerequisites, should someone ever have the
803 * idea to call blinding outside of them, we make sure to 826 * idea to call blinding outside of them, we make sure to
@@ -805,7 +828,7 @@ static inline bool bpf_jit_blinding_enabled(void)
805 */ 828 */
806 if (!bpf_jit_is_ebpf()) 829 if (!bpf_jit_is_ebpf())
807 return false; 830 return false;
808 if (!bpf_jit_enable) 831 if (!prog->jit_requested)
809 return false; 832 return false;
810 if (!bpf_jit_harden) 833 if (!bpf_jit_harden)
811 return false; 834 return false;
@@ -982,9 +1005,20 @@ struct bpf_sock_ops_kern {
982 struct sock *sk; 1005 struct sock *sk;
983 u32 op; 1006 u32 op;
984 union { 1007 union {
1008 u32 args[4];
985 u32 reply; 1009 u32 reply;
986 u32 replylong[4]; 1010 u32 replylong[4];
987 }; 1011 };
1012 u32 is_fullsock;
1013 u64 temp; /* temp and everything after is not
1014 * initialized to 0 before calling
1015 * the BPF program. New fields that
1016 * should be initialized to 0 should
1017 * be inserted before temp.
1018 * temp is scratch storage used by
1019 * sock_ops_convert_ctx_access
1020 * as temporary storage of a register.
1021 */
988}; 1022};
989 1023
990#endif /* __LINUX_FILTER_H__ */ 1024#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index aa66c87c120b..3694821a6d2d 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -1,10 +1,11 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/device.h>
3#include <linux/fpga/fpga-mgr.h>
4 2
5#ifndef _LINUX_FPGA_BRIDGE_H 3#ifndef _LINUX_FPGA_BRIDGE_H
6#define _LINUX_FPGA_BRIDGE_H 4#define _LINUX_FPGA_BRIDGE_H
7 5
6#include <linux/device.h>
7#include <linux/fpga/fpga-mgr.h>
8
8struct fpga_bridge; 9struct fpga_bridge;
9 10
10/** 11/**
@@ -12,11 +13,13 @@ struct fpga_bridge;
12 * @enable_show: returns the FPGA bridge's status 13 * @enable_show: returns the FPGA bridge's status
13 * @enable_set: set a FPGA bridge as enabled or disabled 14 * @enable_set: set a FPGA bridge as enabled or disabled
14 * @fpga_bridge_remove: set FPGA into a specific state during driver remove 15 * @fpga_bridge_remove: set FPGA into a specific state during driver remove
16 * @groups: optional attribute groups.
15 */ 17 */
16struct fpga_bridge_ops { 18struct fpga_bridge_ops {
17 int (*enable_show)(struct fpga_bridge *bridge); 19 int (*enable_show)(struct fpga_bridge *bridge);
18 int (*enable_set)(struct fpga_bridge *bridge, bool enable); 20 int (*enable_set)(struct fpga_bridge *bridge, bool enable);
19 void (*fpga_bridge_remove)(struct fpga_bridge *bridge); 21 void (*fpga_bridge_remove)(struct fpga_bridge *bridge);
22 const struct attribute_group **groups;
20}; 23};
21 24
22/** 25/**
@@ -43,6 +46,8 @@ struct fpga_bridge {
43 46
44struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, 47struct fpga_bridge *of_fpga_bridge_get(struct device_node *node,
45 struct fpga_image_info *info); 48 struct fpga_image_info *info);
49struct fpga_bridge *fpga_bridge_get(struct device *dev,
50 struct fpga_image_info *info);
46void fpga_bridge_put(struct fpga_bridge *bridge); 51void fpga_bridge_put(struct fpga_bridge *bridge);
47int fpga_bridge_enable(struct fpga_bridge *bridge); 52int fpga_bridge_enable(struct fpga_bridge *bridge);
48int fpga_bridge_disable(struct fpga_bridge *bridge); 53int fpga_bridge_disable(struct fpga_bridge *bridge);
@@ -50,9 +55,12 @@ int fpga_bridge_disable(struct fpga_bridge *bridge);
50int fpga_bridges_enable(struct list_head *bridge_list); 55int fpga_bridges_enable(struct list_head *bridge_list);
51int fpga_bridges_disable(struct list_head *bridge_list); 56int fpga_bridges_disable(struct list_head *bridge_list);
52void fpga_bridges_put(struct list_head *bridge_list); 57void fpga_bridges_put(struct list_head *bridge_list);
53int fpga_bridge_get_to_list(struct device_node *np, 58int fpga_bridge_get_to_list(struct device *dev,
54 struct fpga_image_info *info, 59 struct fpga_image_info *info,
55 struct list_head *bridge_list); 60 struct list_head *bridge_list);
61int of_fpga_bridge_get_to_list(struct device_node *np,
62 struct fpga_image_info *info,
63 struct list_head *bridge_list);
56 64
57int fpga_bridge_register(struct device *dev, const char *name, 65int fpga_bridge_register(struct device *dev, const char *name,
58 const struct fpga_bridge_ops *br_ops, void *priv); 66 const struct fpga_bridge_ops *br_ops, void *priv);
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index bfa14bc023fb..3c6de23aabdf 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * FPGA Framework 2 * FPGA Framework
3 * 3 *
4 * Copyright (C) 2013-2015 Altera Corporation 4 * Copyright (C) 2013-2016 Altera Corporation
5 * Copyright (C) 2017 Intel Corporation
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 8 * under the terms and conditions of the GNU General Public License,
@@ -15,12 +16,12 @@
15 * You should have received a copy of the GNU General Public License along with 16 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 18 */
18#include <linux/mutex.h>
19#include <linux/platform_device.h>
20
21#ifndef _LINUX_FPGA_MGR_H 19#ifndef _LINUX_FPGA_MGR_H
22#define _LINUX_FPGA_MGR_H 20#define _LINUX_FPGA_MGR_H
23 21
22#include <linux/mutex.h>
23#include <linux/platform_device.h>
24
24struct fpga_manager; 25struct fpga_manager;
25struct sg_table; 26struct sg_table;
26 27
@@ -83,12 +84,26 @@ enum fpga_mgr_states {
83 * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) 84 * @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
84 * @config_complete_timeout_us: maximum time for FPGA to switch to operating 85 * @config_complete_timeout_us: maximum time for FPGA to switch to operating
85 * status in the write_complete op. 86 * status in the write_complete op.
87 * @firmware_name: name of FPGA image firmware file
88 * @sgt: scatter/gather table containing FPGA image
89 * @buf: contiguous buffer containing FPGA image
90 * @count: size of buf
91 * @dev: device that owns this
92 * @overlay: Device Tree overlay
86 */ 93 */
87struct fpga_image_info { 94struct fpga_image_info {
88 u32 flags; 95 u32 flags;
89 u32 enable_timeout_us; 96 u32 enable_timeout_us;
90 u32 disable_timeout_us; 97 u32 disable_timeout_us;
91 u32 config_complete_timeout_us; 98 u32 config_complete_timeout_us;
99 char *firmware_name;
100 struct sg_table *sgt;
101 const char *buf;
102 size_t count;
103 struct device *dev;
104#ifdef CONFIG_OF
105 struct device_node *overlay;
106#endif
92}; 107};
93 108
94/** 109/**
@@ -100,6 +115,7 @@ struct fpga_image_info {
100 * @write_sg: write the scatter list of configuration data to the FPGA 115 * @write_sg: write the scatter list of configuration data to the FPGA
101 * @write_complete: set FPGA to operating state after writing is done 116 * @write_complete: set FPGA to operating state after writing is done
102 * @fpga_remove: optional: Set FPGA into a specific state during driver remove 117 * @fpga_remove: optional: Set FPGA into a specific state during driver remove
118 * @groups: optional attribute groups.
103 * 119 *
104 * fpga_manager_ops are the low level functions implemented by a specific 120 * fpga_manager_ops are the low level functions implemented by a specific
105 * fpga manager driver. The optional ones are tested for NULL before being 121 * fpga manager driver. The optional ones are tested for NULL before being
@@ -116,6 +132,7 @@ struct fpga_manager_ops {
116 int (*write_complete)(struct fpga_manager *mgr, 132 int (*write_complete)(struct fpga_manager *mgr,
117 struct fpga_image_info *info); 133 struct fpga_image_info *info);
118 void (*fpga_remove)(struct fpga_manager *mgr); 134 void (*fpga_remove)(struct fpga_manager *mgr);
135 const struct attribute_group **groups;
119}; 136};
120 137
121/** 138/**
@@ -138,14 +155,14 @@ struct fpga_manager {
138 155
139#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) 156#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
140 157
141int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, 158struct fpga_image_info *fpga_image_info_alloc(struct device *dev);
142 const char *buf, size_t count); 159
143int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info, 160void fpga_image_info_free(struct fpga_image_info *info);
144 struct sg_table *sgt); 161
162int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info);
145 163
146int fpga_mgr_firmware_load(struct fpga_manager *mgr, 164int fpga_mgr_lock(struct fpga_manager *mgr);
147 struct fpga_image_info *info, 165void fpga_mgr_unlock(struct fpga_manager *mgr);
148 const char *image_name);
149 166
150struct fpga_manager *of_fpga_mgr_get(struct device_node *node); 167struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
151 168
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
new file mode 100644
index 000000000000..b6520318ab9c
--- /dev/null
+++ b/include/linux/fpga/fpga-region.h
@@ -0,0 +1,40 @@
1#ifndef _FPGA_REGION_H
2#define _FPGA_REGION_H
3
4#include <linux/device.h>
5#include <linux/fpga/fpga-mgr.h>
6#include <linux/fpga/fpga-bridge.h>
7
8/**
9 * struct fpga_region - FPGA Region structure
10 * @dev: FPGA Region device
11 * @mutex: enforces exclusive reference to region
12 * @bridge_list: list of FPGA bridges specified in region
13 * @mgr: FPGA manager
14 * @info: FPGA image info
15 * @priv: private data
16 * @get_bridges: optional function to get bridges to a list
17 * @groups: optional attribute groups.
18 */
19struct fpga_region {
20 struct device dev;
21 struct mutex mutex; /* for exclusive reference to region */
22 struct list_head bridge_list;
23 struct fpga_manager *mgr;
24 struct fpga_image_info *info;
25 void *priv;
26 int (*get_bridges)(struct fpga_region *region);
27 const struct attribute_group **groups;
28};
29
30#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
31
32struct fpga_region *fpga_region_class_find(
33 struct device *start, const void *data,
34 int (*match)(struct device *, const void *));
35
36int fpga_region_program_fpga(struct fpga_region *region);
37int fpga_region_register(struct device *dev, struct fpga_region *region);
38int fpga_region_unregister(struct fpga_region *region);
39
40#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 511fbaabf624..2a815560fda0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -639,7 +639,7 @@ struct inode {
639 struct hlist_head i_dentry; 639 struct hlist_head i_dentry;
640 struct rcu_head i_rcu; 640 struct rcu_head i_rcu;
641 }; 641 };
642 u64 i_version; 642 atomic64_t i_version;
643 atomic_t i_count; 643 atomic_t i_count;
644 atomic_t i_dio_count; 644 atomic_t i_dio_count;
645 atomic_t i_writecount; 645 atomic_t i_writecount;
@@ -748,6 +748,11 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
748 down_write_nested(&inode->i_rwsem, subclass); 748 down_write_nested(&inode->i_rwsem, subclass);
749} 749}
750 750
751static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
752{
753 down_read_nested(&inode->i_rwsem, subclass);
754}
755
751void lock_two_nondirectories(struct inode *, struct inode*); 756void lock_two_nondirectories(struct inode *, struct inode*);
752void unlock_two_nondirectories(struct inode *, struct inode*); 757void unlock_two_nondirectories(struct inode *, struct inode*);
753 758
@@ -1359,7 +1364,7 @@ struct super_block {
1359 1364
1360 const struct fscrypt_operations *s_cop; 1365 const struct fscrypt_operations *s_cop;
1361 1366
1362 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ 1367 struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
1363 struct list_head s_mounts; /* list of mounts; _not_ for fs use */ 1368 struct list_head s_mounts; /* list of mounts; _not_ for fs use */
1364 struct block_device *s_bdev; 1369 struct block_device *s_bdev;
1365 struct backing_dev_info *s_bdi; 1370 struct backing_dev_info *s_bdi;
@@ -1608,6 +1613,10 @@ extern int vfs_whiteout(struct inode *, struct dentry *);
1608extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, 1613extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
1609 int open_flag); 1614 int open_flag);
1610 1615
1616int vfs_mkobj(struct dentry *, umode_t,
1617 int (*f)(struct dentry *, umode_t, void *),
1618 void *);
1619
1611/* 1620/*
1612 * VFS file helper functions. 1621 * VFS file helper functions.
1613 */ 1622 */
@@ -1698,7 +1707,7 @@ struct file_operations {
1698 ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); 1707 ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
1699 int (*iterate) (struct file *, struct dir_context *); 1708 int (*iterate) (struct file *, struct dir_context *);
1700 int (*iterate_shared) (struct file *, struct dir_context *); 1709 int (*iterate_shared) (struct file *, struct dir_context *);
1701 unsigned int (*poll) (struct file *, struct poll_table_struct *); 1710 __poll_t (*poll) (struct file *, struct poll_table_struct *);
1702 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 1711 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
1703 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 1712 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
1704 int (*mmap) (struct file *, struct vm_area_struct *); 1713 int (*mmap) (struct file *, struct vm_area_struct *);
@@ -2036,21 +2045,6 @@ static inline void inode_dec_link_count(struct inode *inode)
2036 mark_inode_dirty(inode); 2045 mark_inode_dirty(inode);
2037} 2046}
2038 2047
2039/**
2040 * inode_inc_iversion - increments i_version
2041 * @inode: inode that need to be updated
2042 *
2043 * Every time the inode is modified, the i_version field will be incremented.
2044 * The filesystem has to be mounted with i_version flag
2045 */
2046
2047static inline void inode_inc_iversion(struct inode *inode)
2048{
2049 spin_lock(&inode->i_lock);
2050 inode->i_version++;
2051 spin_unlock(&inode->i_lock);
2052}
2053
2054enum file_time_flags { 2048enum file_time_flags {
2055 S_ATIME = 1, 2049 S_ATIME = 1,
2056 S_MTIME = 2, 2050 S_MTIME = 2,
@@ -2699,7 +2693,6 @@ extern sector_t bmap(struct inode *, sector_t);
2699#endif 2693#endif
2700extern int notify_change(struct dentry *, struct iattr *, struct inode **); 2694extern int notify_change(struct dentry *, struct iattr *, struct inode **);
2701extern int inode_permission(struct inode *, int); 2695extern int inode_permission(struct inode *, int);
2702extern int __inode_permission(struct inode *, int);
2703extern int generic_permission(struct inode *, int); 2696extern int generic_permission(struct inode *, int);
2704extern int __check_sticky(struct inode *dir, struct inode *inode); 2697extern int __check_sticky(struct inode *dir, struct inode *inode);
2705 2698
@@ -2992,6 +2985,7 @@ enum {
2992}; 2985};
2993 2986
2994void dio_end_io(struct bio *bio); 2987void dio_end_io(struct bio *bio);
2988void dio_warn_stale_pagecache(struct file *filp);
2995 2989
2996ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, 2990ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
2997 struct block_device *bdev, struct iov_iter *iter, 2991 struct block_device *bdev, struct iov_iter *iter,
@@ -3239,6 +3233,8 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
3239 ki->ki_flags |= IOCB_DSYNC; 3233 ki->ki_flags |= IOCB_DSYNC;
3240 if (flags & RWF_SYNC) 3234 if (flags & RWF_SYNC)
3241 ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC); 3235 ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
3236 if (flags & RWF_APPEND)
3237 ki->ki_flags |= IOCB_APPEND;
3242 return 0; 3238 return 0;
3243} 3239}
3244 3240
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 08b4b40c5aa8..952ab97af325 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -14,42 +14,13 @@
14#ifndef _LINUX_FSCRYPT_H 14#ifndef _LINUX_FSCRYPT_H
15#define _LINUX_FSCRYPT_H 15#define _LINUX_FSCRYPT_H
16 16
17#include <linux/key.h>
18#include <linux/fs.h> 17#include <linux/fs.h>
19#include <linux/mm.h>
20#include <linux/bio.h>
21#include <linux/dcache.h>
22#include <crypto/skcipher.h>
23#include <uapi/linux/fs.h>
24 18
25#define FS_CRYPTO_BLOCK_SIZE 16 19#define FS_CRYPTO_BLOCK_SIZE 16
26 20
21struct fscrypt_ctx;
27struct fscrypt_info; 22struct fscrypt_info;
28 23
29struct fscrypt_ctx {
30 union {
31 struct {
32 struct page *bounce_page; /* Ciphertext page */
33 struct page *control_page; /* Original page */
34 } w;
35 struct {
36 struct bio *bio;
37 struct work_struct work;
38 } r;
39 struct list_head free_list; /* Free list */
40 };
41 u8 flags; /* Flags */
42};
43
44/**
45 * For encrypted symlinks, the ciphertext length is stored at the beginning
46 * of the string in little-endian format.
47 */
48struct fscrypt_symlink_data {
49 __le16 len;
50 char encrypted_path[1];
51} __packed;
52
53struct fscrypt_str { 24struct fscrypt_str {
54 unsigned char *name; 25 unsigned char *name;
55 u32 len; 26 u32 len;
@@ -68,89 +39,14 @@ struct fscrypt_name {
68#define fname_name(p) ((p)->disk_name.name) 39#define fname_name(p) ((p)->disk_name.name)
69#define fname_len(p) ((p)->disk_name.len) 40#define fname_len(p) ((p)->disk_name.len)
70 41
71/*
72 * fscrypt superblock flags
73 */
74#define FS_CFLG_OWN_PAGES (1U << 1)
75
76/*
77 * crypto opertions for filesystems
78 */
79struct fscrypt_operations {
80 unsigned int flags;
81 const char *key_prefix;
82 int (*get_context)(struct inode *, void *, size_t);
83 int (*set_context)(struct inode *, const void *, size_t, void *);
84 bool (*dummy_context)(struct inode *);
85 bool (*empty_dir)(struct inode *);
86 unsigned (*max_namelen)(struct inode *);
87};
88
89/* Maximum value for the third parameter of fscrypt_operations.set_context(). */ 42/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
90#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 43#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
91 44
92static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
93{
94 if (inode->i_sb->s_cop->dummy_context &&
95 inode->i_sb->s_cop->dummy_context(inode))
96 return true;
97 return false;
98}
99
100static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
101 u32 filenames_mode)
102{
103 if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
104 filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
105 return true;
106
107 if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
108 filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
109 return true;
110
111 return false;
112}
113
114static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
115{
116 if (str->len == 1 && str->name[0] == '.')
117 return true;
118
119 if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
120 return true;
121
122 return false;
123}
124
125#if __FS_HAS_ENCRYPTION 45#if __FS_HAS_ENCRYPTION
126
127static inline struct page *fscrypt_control_page(struct page *page)
128{
129 return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
130}
131
132static inline bool fscrypt_has_encryption_key(const struct inode *inode)
133{
134 return (inode->i_crypt_info != NULL);
135}
136
137#include <linux/fscrypt_supp.h> 46#include <linux/fscrypt_supp.h>
138 47#else
139#else /* !__FS_HAS_ENCRYPTION */
140
141static inline struct page *fscrypt_control_page(struct page *page)
142{
143 WARN_ON_ONCE(1);
144 return ERR_PTR(-EINVAL);
145}
146
147static inline bool fscrypt_has_encryption_key(const struct inode *inode)
148{
149 return 0;
150}
151
152#include <linux/fscrypt_notsupp.h> 48#include <linux/fscrypt_notsupp.h>
153#endif /* __FS_HAS_ENCRYPTION */ 49#endif
154 50
155/** 51/**
156 * fscrypt_require_key - require an inode's encryption key 52 * fscrypt_require_key - require an inode's encryption key
@@ -291,4 +187,68 @@ static inline int fscrypt_prepare_setattr(struct dentry *dentry,
291 return 0; 187 return 0;
292} 188}
293 189
190/**
191 * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink
192 * @dir: directory in which the symlink is being created
193 * @target: plaintext symlink target
194 * @len: length of @target excluding null terminator
195 * @max_len: space the filesystem has available to store the symlink target
196 * @disk_link: (out) the on-disk symlink target being prepared
197 *
198 * This function computes the size the symlink target will require on-disk,
199 * stores it in @disk_link->len, and validates it against @max_len. An
200 * encrypted symlink may be longer than the original.
201 *
202 * Additionally, @disk_link->name is set to @target if the symlink will be
203 * unencrypted, but left NULL if the symlink will be encrypted. For encrypted
204 * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the
205 * on-disk target later. (The reason for the two-step process is that some
206 * filesystems need to know the size of the symlink target before creating the
207 * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.)
208 *
209 * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long,
210 * -ENOKEY if the encryption key is missing, or another -errno code if a problem
211 * occurred while setting up the encryption key.
212 */
213static inline int fscrypt_prepare_symlink(struct inode *dir,
214 const char *target,
215 unsigned int len,
216 unsigned int max_len,
217 struct fscrypt_str *disk_link)
218{
219 if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir))
220 return __fscrypt_prepare_symlink(dir, len, max_len, disk_link);
221
222 disk_link->name = (unsigned char *)target;
223 disk_link->len = len + 1;
224 if (disk_link->len > max_len)
225 return -ENAMETOOLONG;
226 return 0;
227}
228
229/**
230 * fscrypt_encrypt_symlink - encrypt the symlink target if needed
231 * @inode: symlink inode
232 * @target: plaintext symlink target
233 * @len: length of @target excluding null terminator
234 * @disk_link: (in/out) the on-disk symlink target being prepared
235 *
236 * If the symlink target needs to be encrypted, then this function encrypts it
237 * into @disk_link->name. fscrypt_prepare_symlink() must have been called
238 * previously to compute @disk_link->len. If the filesystem did not allocate a
239 * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one
240 * will be kmalloc()'ed and the filesystem will be responsible for freeing it.
241 *
242 * Return: 0 on success, -errno on failure
243 */
244static inline int fscrypt_encrypt_symlink(struct inode *inode,
245 const char *target,
246 unsigned int len,
247 struct fscrypt_str *disk_link)
248{
249 if (IS_ENCRYPTED(inode))
250 return __fscrypt_encrypt_symlink(inode, target, len, disk_link);
251 return 0;
252}
253
294#endif /* _LINUX_FSCRYPT_H */ 254#endif /* _LINUX_FSCRYPT_H */
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 63e58808519a..44b50c04bae9 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -14,6 +14,16 @@
14#ifndef _LINUX_FSCRYPT_NOTSUPP_H 14#ifndef _LINUX_FSCRYPT_NOTSUPP_H
15#define _LINUX_FSCRYPT_NOTSUPP_H 15#define _LINUX_FSCRYPT_NOTSUPP_H
16 16
17static inline bool fscrypt_has_encryption_key(const struct inode *inode)
18{
19 return false;
20}
21
22static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
23{
24 return false;
25}
26
17/* crypto.c */ 27/* crypto.c */
18static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, 28static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
19 gfp_t gfp_flags) 29 gfp_t gfp_flags)
@@ -43,6 +53,11 @@ static inline int fscrypt_decrypt_page(const struct inode *inode,
43 return -EOPNOTSUPP; 53 return -EOPNOTSUPP;
44} 54}
45 55
56static inline struct page *fscrypt_control_page(struct page *page)
57{
58 WARN_ON_ONCE(1);
59 return ERR_PTR(-EINVAL);
60}
46 61
47static inline void fscrypt_restore_control_page(struct page *page) 62static inline void fscrypt_restore_control_page(struct page *page)
48{ 63{
@@ -90,8 +105,7 @@ static inline int fscrypt_get_encryption_info(struct inode *inode)
90 return -EOPNOTSUPP; 105 return -EOPNOTSUPP;
91} 106}
92 107
93static inline void fscrypt_put_encryption_info(struct inode *inode, 108static inline void fscrypt_put_encryption_info(struct inode *inode)
94 struct fscrypt_info *ci)
95{ 109{
96 return; 110 return;
97} 111}
@@ -116,16 +130,8 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
116 return; 130 return;
117} 131}
118 132
119static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode,
120 u32 ilen)
121{
122 /* never happens */
123 WARN_ON(1);
124 return 0;
125}
126
127static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, 133static inline int fscrypt_fname_alloc_buffer(const struct inode *inode,
128 u32 ilen, 134 u32 max_encrypted_len,
129 struct fscrypt_str *crypto_str) 135 struct fscrypt_str *crypto_str)
130{ 136{
131 return -EOPNOTSUPP; 137 return -EOPNOTSUPP;
@@ -144,13 +150,6 @@ static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
144 return -EOPNOTSUPP; 150 return -EOPNOTSUPP;
145} 151}
146 152
147static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
148 const struct qstr *iname,
149 struct fscrypt_str *oname)
150{
151 return -EOPNOTSUPP;
152}
153
154static inline bool fscrypt_match_name(const struct fscrypt_name *fname, 153static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
155 const u8 *de_name, u32 de_name_len) 154 const u8 *de_name, u32 de_name_len)
156{ 155{
@@ -208,4 +207,28 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir,
208 return -EOPNOTSUPP; 207 return -EOPNOTSUPP;
209} 208}
210 209
210static inline int __fscrypt_prepare_symlink(struct inode *dir,
211 unsigned int len,
212 unsigned int max_len,
213 struct fscrypt_str *disk_link)
214{
215 return -EOPNOTSUPP;
216}
217
218static inline int __fscrypt_encrypt_symlink(struct inode *inode,
219 const char *target,
220 unsigned int len,
221 struct fscrypt_str *disk_link)
222{
223 return -EOPNOTSUPP;
224}
225
226static inline const char *fscrypt_get_symlink(struct inode *inode,
227 const void *caddr,
228 unsigned int max_size,
229 struct delayed_call *done)
230{
231 return ERR_PTR(-EOPNOTSUPP);
232}
233
211#endif /* _LINUX_FSCRYPT_NOTSUPP_H */ 234#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index cf9e9fc02f0a..477a7a6504d2 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -11,8 +11,54 @@
11#ifndef _LINUX_FSCRYPT_SUPP_H 11#ifndef _LINUX_FSCRYPT_SUPP_H
12#define _LINUX_FSCRYPT_SUPP_H 12#define _LINUX_FSCRYPT_SUPP_H
13 13
14#include <linux/mm.h>
15#include <linux/slab.h>
16
17/*
18 * fscrypt superblock flags
19 */
20#define FS_CFLG_OWN_PAGES (1U << 1)
21
22/*
23 * crypto operations for filesystems
24 */
25struct fscrypt_operations {
26 unsigned int flags;
27 const char *key_prefix;
28 int (*get_context)(struct inode *, void *, size_t);
29 int (*set_context)(struct inode *, const void *, size_t, void *);
30 bool (*dummy_context)(struct inode *);
31 bool (*empty_dir)(struct inode *);
32 unsigned (*max_namelen)(struct inode *);
33};
34
35struct fscrypt_ctx {
36 union {
37 struct {
38 struct page *bounce_page; /* Ciphertext page */
39 struct page *control_page; /* Original page */
40 } w;
41 struct {
42 struct bio *bio;
43 struct work_struct work;
44 } r;
45 struct list_head free_list; /* Free list */
46 };
47 u8 flags; /* Flags */
48};
49
50static inline bool fscrypt_has_encryption_key(const struct inode *inode)
51{
52 return (inode->i_crypt_info != NULL);
53}
54
55static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
56{
57 return inode->i_sb->s_cop->dummy_context &&
58 inode->i_sb->s_cop->dummy_context(inode);
59}
60
14/* crypto.c */ 61/* crypto.c */
15extern struct kmem_cache *fscrypt_info_cachep;
16extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); 62extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
17extern void fscrypt_release_ctx(struct fscrypt_ctx *); 63extern void fscrypt_release_ctx(struct fscrypt_ctx *);
18extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, 64extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
@@ -20,6 +66,12 @@ extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
20 u64, gfp_t); 66 u64, gfp_t);
21extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, 67extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
22 unsigned int, u64); 68 unsigned int, u64);
69
70static inline struct page *fscrypt_control_page(struct page *page)
71{
72 return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
73}
74
23extern void fscrypt_restore_control_page(struct page *); 75extern void fscrypt_restore_control_page(struct page *);
24 76
25extern const struct dentry_operations fscrypt_d_ops; 77extern const struct dentry_operations fscrypt_d_ops;
@@ -44,7 +96,7 @@ extern int fscrypt_inherit_context(struct inode *, struct inode *,
44 void *, bool); 96 void *, bool);
45/* keyinfo.c */ 97/* keyinfo.c */
46extern int fscrypt_get_encryption_info(struct inode *); 98extern int fscrypt_get_encryption_info(struct inode *);
47extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); 99extern void fscrypt_put_encryption_info(struct inode *);
48 100
49/* fname.c */ 101/* fname.c */
50extern int fscrypt_setup_filename(struct inode *, const struct qstr *, 102extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
@@ -55,14 +107,11 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
55 kfree(fname->crypto_buf.name); 107 kfree(fname->crypto_buf.name);
56} 108}
57 109
58extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
59extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, 110extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
60 struct fscrypt_str *); 111 struct fscrypt_str *);
61extern void fscrypt_fname_free_buffer(struct fscrypt_str *); 112extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
62extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, 113extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
63 const struct fscrypt_str *, struct fscrypt_str *); 114 const struct fscrypt_str *, struct fscrypt_str *);
64extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
65 struct fscrypt_str *);
66 115
67#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 116#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
68 117
@@ -153,5 +202,14 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir,
153 struct dentry *new_dentry, 202 struct dentry *new_dentry,
154 unsigned int flags); 203 unsigned int flags);
155extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry); 204extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
205extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
206 unsigned int max_len,
207 struct fscrypt_str *disk_link);
208extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
209 unsigned int len,
210 struct fscrypt_str *disk_link);
211extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
212 unsigned int max_size,
213 struct delayed_call *done);
156 214
157#endif /* _LINUX_FSCRYPT_SUPP_H */ 215#endif /* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 411a84c6c400..4fa1a489efe4 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17struct fwnode_operations; 17struct fwnode_operations;
18struct device;
18 19
19struct fwnode_handle { 20struct fwnode_handle {
20 struct fwnode_handle *secondary; 21 struct fwnode_handle *secondary;
@@ -51,6 +52,7 @@ struct fwnode_reference_args {
51 * struct fwnode_operations - Operations for fwnode interface 52 * struct fwnode_operations - Operations for fwnode interface
52 * @get: Get a reference to an fwnode. 53 * @get: Get a reference to an fwnode.
53 * @put: Put a reference to an fwnode. 54 * @put: Put a reference to an fwnode.
55 * @device_get_match_data: Return the device driver match data.
54 * @property_present: Return true if a property is present. 56 * @property_present: Return true if a property is present.
55 * @property_read_integer_array: Read an array of integer properties. Return 57 * @property_read_integer_array: Read an array of integer properties. Return
56 * zero on success, a negative error code 58 * zero on success, a negative error code
@@ -71,6 +73,8 @@ struct fwnode_operations {
71 struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); 73 struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
72 void (*put)(struct fwnode_handle *fwnode); 74 void (*put)(struct fwnode_handle *fwnode);
73 bool (*device_is_available)(const struct fwnode_handle *fwnode); 75 bool (*device_is_available)(const struct fwnode_handle *fwnode);
76 void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
77 const struct device *dev);
74 bool (*property_present)(const struct fwnode_handle *fwnode, 78 bool (*property_present)(const struct fwnode_handle *fwnode,
75 const char *propname); 79 const char *propname);
76 int (*property_read_int_array)(const struct fwnode_handle *fwnode, 80 int (*property_read_int_array)(const struct fwnode_handle *fwnode,
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index ecc2928e8046..bc738504ab4a 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -31,8 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq;
31 * @p: The pointer to read, prior to dereferencing 31 * @p: The pointer to read, prior to dereferencing
32 * 32 *
33 * Return the value of the specified RCU-protected pointer, but omit 33 * Return the value of the specified RCU-protected pointer, but omit
34 * both the smp_read_barrier_depends() and the READ_ONCE(), because 34 * the READ_ONCE(), because caller holds genl mutex.
35 * caller holds genl mutex.
36 */ 35 */
37#define genl_dereference(p) \ 36#define genl_dereference(p) \
38 rcu_dereference_protected(p, lockdep_genl_is_held()) 37 rcu_dereference_protected(p, lockdep_genl_is_held())
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 5144ebe046c9..5e3531027b51 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -395,6 +395,11 @@ static inline void add_disk(struct gendisk *disk)
395{ 395{
396 device_add_disk(NULL, disk); 396 device_add_disk(NULL, disk);
397} 397}
398extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
399static inline void add_disk_no_queue_reg(struct gendisk *disk)
400{
401 device_add_disk_no_queue_reg(NULL, disk);
402}
398 403
399extern void del_gendisk(struct gendisk *gp); 404extern void del_gendisk(struct gendisk *gp);
400extern struct gendisk *get_gendisk(dev_t dev, int *partno); 405extern struct gendisk *get_gendisk(dev_t dev, int *partno);
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 604967609e55..83f81ac53282 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -2,6 +2,7 @@
2#ifndef GENL_MAGIC_FUNC_H 2#ifndef GENL_MAGIC_FUNC_H
3#define GENL_MAGIC_FUNC_H 3#define GENL_MAGIC_FUNC_H
4 4
5#include <linux/build_bug.h>
5#include <linux/genl_magic_struct.h> 6#include <linux/genl_magic_struct.h>
6 7
7/* 8/*
@@ -132,17 +133,6 @@ static void dprint_array(const char *dir, int nla_type,
132 * use one static buffer for parsing of nested attributes */ 133 * use one static buffer for parsing of nested attributes */
133static struct nlattr *nested_attr_tb[128]; 134static struct nlattr *nested_attr_tb[128];
134 135
135#ifndef BUILD_BUG_ON
136/* Force a compilation error if condition is true */
137#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
138/* Force a compilation error if condition is true, but also produce a
139 result (of value 0 and type size_t), so the expression can be used
140 e.g. in a structure initializer (or where-ever else comma expressions
141 aren't permitted). */
142#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
143#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
144#endif
145
146#undef GENL_struct 136#undef GENL_struct
147#define GENL_struct(tag_name, tag_number, s_name, s_fields) \ 137#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
148/* *_from_attrs functions are static, but potentially unused */ \ 138/* *_from_attrs functions are static, but potentially unused */ \
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 8ef7fc0ce0f0..91ed23468530 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,4 +1,14 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * <linux/gpio.h>
4 *
5 * This is the LEGACY GPIO bulk include file, including legacy APIs. It is
6 * used for GPIO drivers still referencing the global GPIO numberspace,
7 * and should not be included in new code.
8 *
9 * If you're implementing a GPIO driver, only include <linux/gpio/driver.h>
10 * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h>
11 */
2#ifndef __LINUX_GPIO_H 12#ifndef __LINUX_GPIO_H
3#define __LINUX_GPIO_H 13#define __LINUX_GPIO_H
4 14
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 7447d85dbe2f..dbd065963296 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -139,6 +139,7 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
139 int *value_array); 139 int *value_array);
140 140
141int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); 141int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
142int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
142 143
143int gpiod_is_active_low(const struct gpio_desc *desc); 144int gpiod_is_active_low(const struct gpio_desc *desc);
144int gpiod_cansleep(const struct gpio_desc *desc); 145int gpiod_cansleep(const struct gpio_desc *desc);
@@ -150,8 +151,14 @@ struct gpio_desc *gpio_to_desc(unsigned gpio);
150int desc_to_gpio(const struct gpio_desc *desc); 151int desc_to_gpio(const struct gpio_desc *desc);
151 152
152/* Child properties interface */ 153/* Child properties interface */
154struct device_node;
153struct fwnode_handle; 155struct fwnode_handle;
154 156
157struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
158 struct device_node *node,
159 const char *propname, int index,
160 enum gpiod_flags dflags,
161 const char *label);
155struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, 162struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
156 const char *propname, int index, 163 const char *propname, int index,
157 enum gpiod_flags dflags, 164 enum gpiod_flags dflags,
@@ -431,6 +438,13 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
431 return -ENOSYS; 438 return -ENOSYS;
432} 439}
433 440
441static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
442{
443 /* GPIO can never have been requested */
444 WARN_ON(1);
445 return -ENOSYS;
446}
447
434static inline int gpiod_is_active_low(const struct gpio_desc *desc) 448static inline int gpiod_is_active_low(const struct gpio_desc *desc)
435{ 449{
436 /* GPIO can never have been requested */ 450 /* GPIO can never have been requested */
@@ -464,9 +478,20 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
464} 478}
465 479
466/* Child properties interface */ 480/* Child properties interface */
481struct device_node;
467struct fwnode_handle; 482struct fwnode_handle;
468 483
469static inline 484static inline
485struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
486 struct device_node *node,
487 const char *propname, int index,
488 enum gpiod_flags dflags,
489 const char *label)
490{
491 return ERR_PTR(-ENOSYS);
492}
493
494static inline
470struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, 495struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
471 const char *propname, int index, 496 const char *propname, int index,
472 enum gpiod_flags dflags, 497 enum gpiod_flags dflags,
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 7258cd676df4..1ba9a331ec51 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -436,6 +436,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
436 struct lock_class_key *lock_key, 436 struct lock_class_key *lock_key,
437 struct lock_class_key *request_key); 437 struct lock_class_key *request_key);
438 438
439bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
440 unsigned int offset);
441
439#ifdef CONFIG_LOCKDEP 442#ifdef CONFIG_LOCKDEP
440 443
441/* 444/*
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 846be7c69a52..b2f2dc638463 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -10,8 +10,8 @@ enum gpio_lookup_flags {
10 GPIO_ACTIVE_LOW = (1 << 0), 10 GPIO_ACTIVE_LOW = (1 << 0),
11 GPIO_OPEN_DRAIN = (1 << 1), 11 GPIO_OPEN_DRAIN = (1 << 1),
12 GPIO_OPEN_SOURCE = (1 << 2), 12 GPIO_OPEN_SOURCE = (1 << 2),
13 GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3), 13 GPIO_PERSISTENT = (0 << 3),
14 GPIO_SLEEP_MAY_LOSE_VALUE = (1 << 3), 14 GPIO_TRANSITORY = (1 << 3),
15}; 15};
16 16
17/** 17/**
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d491027a7c22..091a81cf330f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -281,6 +281,7 @@ struct hid_item {
281 281
282#define HID_DG_DEVICECONFIG 0x000d000e 282#define HID_DG_DEVICECONFIG 0x000d000e
283#define HID_DG_DEVICESETTINGS 0x000d0023 283#define HID_DG_DEVICESETTINGS 0x000d0023
284#define HID_DG_AZIMUTH 0x000d003f
284#define HID_DG_CONFIDENCE 0x000d0047 285#define HID_DG_CONFIDENCE 0x000d0047
285#define HID_DG_WIDTH 0x000d0048 286#define HID_DG_WIDTH 0x000d0048
286#define HID_DG_HEIGHT 0x000d0049 287#define HID_DG_HEIGHT 0x000d0049
@@ -342,6 +343,7 @@ struct hid_item {
342#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 343#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
343#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 344#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
344#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 345#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
346#define HID_QUIRK_HAVE_SPECIAL_DRIVER 0x00080000
345#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 347#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
346#define HID_QUIRK_NO_INIT_REPORTS 0x20000000 348#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
347#define HID_QUIRK_NO_IGNORE 0x40000000 349#define HID_QUIRK_NO_IGNORE 0x40000000
@@ -671,6 +673,7 @@ struct hid_usage_id {
671 * to be called) 673 * to be called)
672 * @dyn_list: list of dynamically added device ids 674 * @dyn_list: list of dynamically added device ids
673 * @dyn_lock: lock protecting @dyn_list 675 * @dyn_lock: lock protecting @dyn_list
676 * @match: check if the given device is handled by this driver
674 * @probe: new device inserted 677 * @probe: new device inserted
675 * @remove: device removed (NULL if not a hot-plug capable driver) 678 * @remove: device removed (NULL if not a hot-plug capable driver)
676 * @report_table: on which reports to call raw_event (NULL means all) 679 * @report_table: on which reports to call raw_event (NULL means all)
@@ -683,6 +686,8 @@ struct hid_usage_id {
683 * @input_mapped: invoked on input registering after mapping an usage 686 * @input_mapped: invoked on input registering after mapping an usage
684 * @input_configured: invoked just before the device is registered 687 * @input_configured: invoked just before the device is registered
685 * @feature_mapping: invoked on feature registering 688 * @feature_mapping: invoked on feature registering
689 * @bus_add_driver: invoked when a HID driver is about to be added
690 * @bus_removed_driver: invoked when a HID driver has been removed
686 * @suspend: invoked on suspend (NULL means nop) 691 * @suspend: invoked on suspend (NULL means nop)
687 * @resume: invoked on resume if device was not reset (NULL means nop) 692 * @resume: invoked on resume if device was not reset (NULL means nop)
688 * @reset_resume: invoked on resume if device was reset (NULL means nop) 693 * @reset_resume: invoked on resume if device was reset (NULL means nop)
@@ -711,6 +716,7 @@ struct hid_driver {
711 struct list_head dyn_list; 716 struct list_head dyn_list;
712 spinlock_t dyn_lock; 717 spinlock_t dyn_lock;
713 718
719 bool (*match)(struct hid_device *dev, bool ignore_special_driver);
714 int (*probe)(struct hid_device *dev, const struct hid_device_id *id); 720 int (*probe)(struct hid_device *dev, const struct hid_device_id *id);
715 void (*remove)(struct hid_device *dev); 721 void (*remove)(struct hid_device *dev);
716 722
@@ -736,6 +742,8 @@ struct hid_driver {
736 void (*feature_mapping)(struct hid_device *hdev, 742 void (*feature_mapping)(struct hid_device *hdev,
737 struct hid_field *field, 743 struct hid_field *field,
738 struct hid_usage *usage); 744 struct hid_usage *usage);
745 void (*bus_add_driver)(struct hid_driver *driver);
746 void (*bus_removed_driver)(struct hid_driver *driver);
739#ifdef CONFIG_PM 747#ifdef CONFIG_PM
740 int (*suspend)(struct hid_device *hdev, pm_message_t message); 748 int (*suspend)(struct hid_device *hdev, pm_message_t message);
741 int (*resume)(struct hid_device *hdev); 749 int (*resume)(struct hid_device *hdev);
@@ -814,6 +822,8 @@ extern bool hid_ignore(struct hid_device *);
814extern int hid_add_device(struct hid_device *); 822extern int hid_add_device(struct hid_device *);
815extern void hid_destroy_device(struct hid_device *); 823extern void hid_destroy_device(struct hid_device *);
816 824
825extern struct bus_type hid_bus_type;
826
817extern int __must_check __hid_register_driver(struct hid_driver *, 827extern int __must_check __hid_register_driver(struct hid_driver *,
818 struct module *, const char *mod_name); 828 struct module *, const char *mod_name);
819 829
@@ -860,8 +870,12 @@ int hid_open_report(struct hid_device *device);
860int hid_check_keys_pressed(struct hid_device *hid); 870int hid_check_keys_pressed(struct hid_device *hid);
861int hid_connect(struct hid_device *hid, unsigned int connect_mask); 871int hid_connect(struct hid_device *hid, unsigned int connect_mask);
862void hid_disconnect(struct hid_device *hid); 872void hid_disconnect(struct hid_device *hid);
863const struct hid_device_id *hid_match_id(struct hid_device *hdev, 873bool hid_match_one_id(const struct hid_device *hdev,
874 const struct hid_device_id *id);
875const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
864 const struct hid_device_id *id); 876 const struct hid_device_id *id);
877const struct hid_device_id *hid_match_device(struct hid_device *hdev,
878 struct hid_driver *hdrv);
865s32 hid_snto32(__u32 value, unsigned n); 879s32 hid_snto32(__u32 value, unsigned n);
866__u32 hid_field_extract(const struct hid_device *hid, __u8 *report, 880__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
867 unsigned offset, unsigned n); 881 unsigned offset, unsigned n);
@@ -1098,9 +1112,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
1098 int interrupt); 1112 int interrupt);
1099 1113
1100/* HID quirks API */ 1114/* HID quirks API */
1101u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct); 1115unsigned long hid_lookup_quirk(const struct hid_device *hdev);
1102int usbhid_quirks_init(char **quirks_param); 1116int hid_quirks_init(char **quirks_param, __u16 bus, int count);
1103void usbhid_quirks_exit(void); 1117void hid_quirks_exit(__u16 bus);
1104 1118
1105#ifdef CONFIG_HID_PID 1119#ifdef CONFIG_HID_PID
1106int hid_pidff_init(struct hid_device *hid); 1120int hid_pidff_init(struct hid_device *hid);
diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h
index 394a8405dd74..774f7d3b8f6a 100644
--- a/include/linux/hil_mlc.h
+++ b/include/linux/hil_mlc.h
@@ -144,12 +144,12 @@ struct hil_mlc {
144 hil_packet ipacket[16]; 144 hil_packet ipacket[16];
145 hil_packet imatch; 145 hil_packet imatch;
146 int icount; 146 int icount;
147 struct timeval instart; 147 unsigned long instart;
148 suseconds_t intimeout; 148 unsigned long intimeout;
149 149
150 int ddi; /* Last operational device id */ 150 int ddi; /* Last operational device id */
151 int lcv; /* LCV to throttle loops */ 151 int lcv; /* LCV to throttle loops */
152 struct timeval lcv_tv; /* Time loop was started */ 152 time64_t lcv_time; /* Time loop was started */
153 153
154 int di_map[7]; /* Maps below items to live devs */ 154 int di_map[7]; /* Maps below items to live devs */
155 struct hil_mlc_devinfo di[HIL_MLC_DEVMEM]; 155 struct hil_mlc_devinfo di[HIL_MLC_DEVMEM];
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h
index d392975d8887..6f1dee7e67e0 100644
--- a/include/linux/hp_sdc.h
+++ b/include/linux/hp_sdc.h
@@ -281,7 +281,7 @@ typedef struct {
281 hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */ 281 hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */
282 282
283 int rcurr, rqty; /* Current read transact in process */ 283 int rcurr, rqty; /* Current read transact in process */
284 struct timeval rtv; /* Time when current read started */ 284 ktime_t rtime; /* Time when current read started */
285 int wcurr; /* Current write transact in process */ 285 int wcurr; /* Current write transact in process */
286 286
287 int dev_err; /* carries status from registration */ 287 int dev_err; /* carries status from registration */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 012c37fdb688..c7902ca7c9f4 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -28,13 +28,29 @@ struct hrtimer_cpu_base;
28 28
29/* 29/*
30 * Mode arguments of xxx_hrtimer functions: 30 * Mode arguments of xxx_hrtimer functions:
31 *
32 * HRTIMER_MODE_ABS - Time value is absolute
33 * HRTIMER_MODE_REL - Time value is relative to now
34 * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
35 * when starting the timer)
36 * HRTIMER_MODE_SOFT - Timer callback function will be executed in
37 * soft irq context
31 */ 38 */
32enum hrtimer_mode { 39enum hrtimer_mode {
33 HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */ 40 HRTIMER_MODE_ABS = 0x00,
34 HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */ 41 HRTIMER_MODE_REL = 0x01,
35 HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */ 42 HRTIMER_MODE_PINNED = 0x02,
36 HRTIMER_MODE_ABS_PINNED = 0x02, 43 HRTIMER_MODE_SOFT = 0x04,
37 HRTIMER_MODE_REL_PINNED = 0x03, 44
45 HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
46 HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
47
48 HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
49 HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
50
51 HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
52 HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
53
38}; 54};
39 55
40/* 56/*
@@ -87,6 +103,7 @@ enum hrtimer_restart {
87 * @base: pointer to the timer base (per cpu and per clock) 103 * @base: pointer to the timer base (per cpu and per clock)
88 * @state: state information (See bit values above) 104 * @state: state information (See bit values above)
89 * @is_rel: Set if the timer was armed relative 105 * @is_rel: Set if the timer was armed relative
106 * @is_soft: Set if hrtimer will be expired in soft interrupt context.
90 * 107 *
91 * The hrtimer structure must be initialized by hrtimer_init() 108 * The hrtimer structure must be initialized by hrtimer_init()
92 */ 109 */
@@ -97,6 +114,7 @@ struct hrtimer {
97 struct hrtimer_clock_base *base; 114 struct hrtimer_clock_base *base;
98 u8 state; 115 u8 state;
99 u8 is_rel; 116 u8 is_rel;
117 u8 is_soft;
100}; 118};
101 119
102/** 120/**
@@ -112,9 +130,9 @@ struct hrtimer_sleeper {
112}; 130};
113 131
114#ifdef CONFIG_64BIT 132#ifdef CONFIG_64BIT
115# define HRTIMER_CLOCK_BASE_ALIGN 64 133# define __hrtimer_clock_base_align ____cacheline_aligned
116#else 134#else
117# define HRTIMER_CLOCK_BASE_ALIGN 32 135# define __hrtimer_clock_base_align
118#endif 136#endif
119 137
120/** 138/**
@@ -123,48 +141,57 @@ struct hrtimer_sleeper {
123 * @index: clock type index for per_cpu support when moving a 141 * @index: clock type index for per_cpu support when moving a
124 * timer to a base on another cpu. 142 * timer to a base on another cpu.
125 * @clockid: clock id for per_cpu support 143 * @clockid: clock id for per_cpu support
144 * @seq: seqcount around __run_hrtimer
145 * @running: pointer to the currently running hrtimer
126 * @active: red black tree root node for the active timers 146 * @active: red black tree root node for the active timers
127 * @get_time: function to retrieve the current time of the clock 147 * @get_time: function to retrieve the current time of the clock
128 * @offset: offset of this clock to the monotonic base 148 * @offset: offset of this clock to the monotonic base
129 */ 149 */
130struct hrtimer_clock_base { 150struct hrtimer_clock_base {
131 struct hrtimer_cpu_base *cpu_base; 151 struct hrtimer_cpu_base *cpu_base;
132 int index; 152 unsigned int index;
133 clockid_t clockid; 153 clockid_t clockid;
154 seqcount_t seq;
155 struct hrtimer *running;
134 struct timerqueue_head active; 156 struct timerqueue_head active;
135 ktime_t (*get_time)(void); 157 ktime_t (*get_time)(void);
136 ktime_t offset; 158 ktime_t offset;
137} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); 159} __hrtimer_clock_base_align;
138 160
139enum hrtimer_base_type { 161enum hrtimer_base_type {
140 HRTIMER_BASE_MONOTONIC, 162 HRTIMER_BASE_MONOTONIC,
141 HRTIMER_BASE_REALTIME, 163 HRTIMER_BASE_REALTIME,
142 HRTIMER_BASE_BOOTTIME, 164 HRTIMER_BASE_BOOTTIME,
143 HRTIMER_BASE_TAI, 165 HRTIMER_BASE_TAI,
166 HRTIMER_BASE_MONOTONIC_SOFT,
167 HRTIMER_BASE_REALTIME_SOFT,
168 HRTIMER_BASE_BOOTTIME_SOFT,
169 HRTIMER_BASE_TAI_SOFT,
144 HRTIMER_MAX_CLOCK_BASES, 170 HRTIMER_MAX_CLOCK_BASES,
145}; 171};
146 172
147/* 173/**
148 * struct hrtimer_cpu_base - the per cpu clock bases 174 * struct hrtimer_cpu_base - the per cpu clock bases
149 * @lock: lock protecting the base and associated clock bases 175 * @lock: lock protecting the base and associated clock bases
150 * and timers 176 * and timers
151 * @seq: seqcount around __run_hrtimer
152 * @running: pointer to the currently running hrtimer
153 * @cpu: cpu number 177 * @cpu: cpu number
154 * @active_bases: Bitfield to mark bases with active timers 178 * @active_bases: Bitfield to mark bases with active timers
155 * @clock_was_set_seq: Sequence counter of clock was set events 179 * @clock_was_set_seq: Sequence counter of clock was set events
156 * @migration_enabled: The migration of hrtimers to other cpus is enabled
157 * @nohz_active: The nohz functionality is enabled
158 * @expires_next: absolute time of the next event which was scheduled
159 * via clock_set_next_event()
160 * @next_timer: Pointer to the first expiring timer
161 * @in_hrtirq: hrtimer_interrupt() is currently executing
162 * @hres_active: State of high resolution mode 180 * @hres_active: State of high resolution mode
181 * @in_hrtirq: hrtimer_interrupt() is currently executing
163 * @hang_detected: The last hrtimer interrupt detected a hang 182 * @hang_detected: The last hrtimer interrupt detected a hang
183 * @softirq_activated: displays, if the softirq is raised - update of softirq
184 * related settings is not required then.
164 * @nr_events: Total number of hrtimer interrupt events 185 * @nr_events: Total number of hrtimer interrupt events
165 * @nr_retries: Total number of hrtimer interrupt retries 186 * @nr_retries: Total number of hrtimer interrupt retries
166 * @nr_hangs: Total number of hrtimer interrupt hangs 187 * @nr_hangs: Total number of hrtimer interrupt hangs
167 * @max_hang_time: Maximum time spent in hrtimer_interrupt 188 * @max_hang_time: Maximum time spent in hrtimer_interrupt
189 * @expires_next: absolute time of the next event, is required for remote
190 * hrtimer enqueue; it is the total first expiry time (hard
191 * and soft hrtimer are taken into account)
192 * @next_timer: Pointer to the first expiring timer
193 * @softirq_expires_next: Time to check, if soft queues needs also to be expired
194 * @softirq_next_timer: Pointer to the first expiring softirq based timer
168 * @clock_base: array of clock bases for this cpu 195 * @clock_base: array of clock bases for this cpu
169 * 196 *
170 * Note: next_timer is just an optimization for __remove_hrtimer(). 197 * Note: next_timer is just an optimization for __remove_hrtimer().
@@ -173,31 +200,28 @@ enum hrtimer_base_type {
173 */ 200 */
174struct hrtimer_cpu_base { 201struct hrtimer_cpu_base {
175 raw_spinlock_t lock; 202 raw_spinlock_t lock;
176 seqcount_t seq;
177 struct hrtimer *running;
178 unsigned int cpu; 203 unsigned int cpu;
179 unsigned int active_bases; 204 unsigned int active_bases;
180 unsigned int clock_was_set_seq; 205 unsigned int clock_was_set_seq;
181 bool migration_enabled; 206 unsigned int hres_active : 1,
182 bool nohz_active; 207 in_hrtirq : 1,
208 hang_detected : 1,
209 softirq_activated : 1;
183#ifdef CONFIG_HIGH_RES_TIMERS 210#ifdef CONFIG_HIGH_RES_TIMERS
184 unsigned int in_hrtirq : 1,
185 hres_active : 1,
186 hang_detected : 1;
187 ktime_t expires_next;
188 struct hrtimer *next_timer;
189 unsigned int nr_events; 211 unsigned int nr_events;
190 unsigned int nr_retries; 212 unsigned short nr_retries;
191 unsigned int nr_hangs; 213 unsigned short nr_hangs;
192 unsigned int max_hang_time; 214 unsigned int max_hang_time;
193#endif 215#endif
216 ktime_t expires_next;
217 struct hrtimer *next_timer;
218 ktime_t softirq_expires_next;
219 struct hrtimer *softirq_next_timer;
194 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 220 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
195} ____cacheline_aligned; 221} ____cacheline_aligned;
196 222
197static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) 223static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
198{ 224{
199 BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
200
201 timer->node.expires = time; 225 timer->node.expires = time;
202 timer->_softexpires = time; 226 timer->_softexpires = time;
203} 227}
@@ -266,16 +290,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
266 return timer->base->get_time(); 290 return timer->base->get_time();
267} 291}
268 292
293static inline int hrtimer_is_hres_active(struct hrtimer *timer)
294{
295 return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
296 timer->base->cpu_base->hres_active : 0;
297}
298
269#ifdef CONFIG_HIGH_RES_TIMERS 299#ifdef CONFIG_HIGH_RES_TIMERS
270struct clock_event_device; 300struct clock_event_device;
271 301
272extern void hrtimer_interrupt(struct clock_event_device *dev); 302extern void hrtimer_interrupt(struct clock_event_device *dev);
273 303
274static inline int hrtimer_is_hres_active(struct hrtimer *timer)
275{
276 return timer->base->cpu_base->hres_active;
277}
278
279/* 304/*
280 * The resolution of the clocks. The resolution value is returned in 305 * The resolution of the clocks. The resolution value is returned in
281 * the clock_getres() system call to give application programmers an 306 * the clock_getres() system call to give application programmers an
@@ -298,11 +323,6 @@ extern unsigned int hrtimer_resolution;
298 323
299#define hrtimer_resolution (unsigned int)LOW_RES_NSEC 324#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
300 325
301static inline int hrtimer_is_hres_active(struct hrtimer *timer)
302{
303 return 0;
304}
305
306static inline void clock_was_set_delayed(void) { } 326static inline void clock_was_set_delayed(void) { }
307 327
308#endif 328#endif
@@ -365,11 +385,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
365 u64 range_ns, const enum hrtimer_mode mode); 385 u64 range_ns, const enum hrtimer_mode mode);
366 386
367/** 387/**
368 * hrtimer_start - (re)start an hrtimer on the current CPU 388 * hrtimer_start - (re)start an hrtimer
369 * @timer: the timer to be added 389 * @timer: the timer to be added
370 * @tim: expiry time 390 * @tim: expiry time
371 * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or 391 * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
372 * relative (HRTIMER_MODE_REL) 392 * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
393 * softirq based mode is considered for debug purpose only!
373 */ 394 */
374static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, 395static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
375 const enum hrtimer_mode mode) 396 const enum hrtimer_mode mode)
@@ -422,7 +443,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
422 */ 443 */
423static inline int hrtimer_callback_running(struct hrtimer *timer) 444static inline int hrtimer_callback_running(struct hrtimer *timer)
424{ 445{
425 return timer->base->cpu_base->running == timer; 446 return timer->base->running == timer;
426} 447}
427 448
428/* Forward a hrtimer so it expires after now: */ 449/* Forward a hrtimer so it expires after now: */
@@ -466,7 +487,7 @@ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
466extern int schedule_hrtimeout_range_clock(ktime_t *expires, 487extern int schedule_hrtimeout_range_clock(ktime_t *expires,
467 u64 delta, 488 u64 delta,
468 const enum hrtimer_mode mode, 489 const enum hrtimer_mode mode,
469 int clock); 490 clockid_t clock_id);
470extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); 491extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
471 492
472/* Soft interrupt function to run the hrtimer queues: */ 493/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 82a25880714a..36fa6a2a82e3 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -119,6 +119,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119 long freed); 119 long freed);
120bool isolate_huge_page(struct page *page, struct list_head *list); 120bool isolate_huge_page(struct page *page, struct list_head *list);
121void putback_active_hugepage(struct page *page); 121void putback_active_hugepage(struct page *page);
122void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
122void free_huge_page(struct page *page); 123void free_huge_page(struct page *page);
123void hugetlb_fix_reserve_counts(struct inode *inode); 124void hugetlb_fix_reserve_counts(struct inode *inode);
124extern struct mutex *hugetlb_fault_mutex_table; 125extern struct mutex *hugetlb_fault_mutex_table;
@@ -129,7 +130,6 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
129 130
130pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 131pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
131 132
132extern int hugepages_treat_as_movable;
133extern int sysctl_hugetlb_shm_group; 133extern int sysctl_hugetlb_shm_group;
134extern struct list_head huge_boot_pages; 134extern struct list_head huge_boot_pages;
135 135
@@ -158,6 +158,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
158 unsigned long address, unsigned long end, pgprot_t newprot); 158 unsigned long address, unsigned long end, pgprot_t newprot);
159 159
160bool is_hugetlb_entry_migration(pte_t pte); 160bool is_hugetlb_entry_migration(pte_t pte);
161
161#else /* !CONFIG_HUGETLB_PAGE */ 162#else /* !CONFIG_HUGETLB_PAGE */
162 163
163static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 164static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
@@ -198,6 +199,7 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
198 return false; 199 return false;
199} 200}
200#define putback_active_hugepage(p) do {} while (0) 201#define putback_active_hugepage(p) do {} while (0)
202#define move_hugetlb_state(old, new, reason) do {} while (0)
201 203
202static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 204static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
203 unsigned long address, unsigned long end, pgprot_t newprot) 205 unsigned long address, unsigned long end, pgprot_t newprot)
@@ -271,6 +273,17 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
271 return sb->s_fs_info; 273 return sb->s_fs_info;
272} 274}
273 275
276struct hugetlbfs_inode_info {
277 struct shared_policy policy;
278 struct inode vfs_inode;
279 unsigned int seals;
280};
281
282static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
283{
284 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
285}
286
274extern const struct file_operations hugetlbfs_file_operations; 287extern const struct file_operations hugetlbfs_file_operations;
275extern const struct vm_operations_struct hugetlb_vm_ops; 288extern const struct vm_operations_struct hugetlb_vm_ops;
276struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 289struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
@@ -343,10 +356,10 @@ struct huge_bootmem_page {
343struct page *alloc_huge_page(struct vm_area_struct *vma, 356struct page *alloc_huge_page(struct vm_area_struct *vma,
344 unsigned long addr, int avoid_reserve); 357 unsigned long addr, int avoid_reserve);
345struct page *alloc_huge_page_node(struct hstate *h, int nid); 358struct page *alloc_huge_page_node(struct hstate *h, int nid);
346struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
347 unsigned long addr, int avoid_reserve);
348struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 359struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
349 nodemask_t *nmask); 360 nodemask_t *nmask);
361struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
362 unsigned long address);
350int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 363int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
351 pgoff_t idx); 364 pgoff_t idx);
352 365
@@ -524,7 +537,7 @@ struct hstate {};
524#define alloc_huge_page(v, a, r) NULL 537#define alloc_huge_page(v, a, r) NULL
525#define alloc_huge_page_node(h, nid) NULL 538#define alloc_huge_page_node(h, nid) NULL
526#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL 539#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
527#define alloc_huge_page_noerr(v, a, r) NULL 540#define alloc_huge_page_vma(h, vma, address) NULL
528#define alloc_bootmem_huge_page(h) NULL 541#define alloc_bootmem_huge_page(h) NULL
529#define hstate_file(f) NULL 542#define hstate_file(f) NULL
530#define hstate_sizelog(s) NULL 543#define hstate_sizelog(s) NULL
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6c9336626592..93bd6fcd6e62 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -127,28 +127,6 @@ struct hv_ring_buffer_info {
127 u32 priv_read_index; 127 u32 priv_read_index;
128}; 128};
129 129
130/*
131 *
132 * hv_get_ringbuffer_availbytes()
133 *
134 * Get number of bytes available to read and to write to
135 * for the specified ring buffer
136 */
137static inline void
138hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
139 u32 *read, u32 *write)
140{
141 u32 read_loc, write_loc, dsize;
142
143 /* Capture the read/write indices before they changed */
144 read_loc = rbi->ring_buffer->read_index;
145 write_loc = rbi->ring_buffer->write_index;
146 dsize = rbi->ring_datasize;
147
148 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
149 read_loc - write_loc;
150 *read = dsize - *write;
151}
152 130
153static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) 131static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
154{ 132{
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 0f774406fad0..419a38e7c315 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -55,7 +55,7 @@ typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
55struct module; 55struct module;
56struct property_entry; 56struct property_entry;
57 57
58#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 58#if IS_ENABLED(CONFIG_I2C)
59/* 59/*
60 * The master routines are the ones normally used to transmit data to devices 60 * The master routines are the ones normally used to transmit data to devices
61 * on a bus (or read from them). Apart from two basic transfer functions to 61 * on a bus (or read from them). Apart from two basic transfer functions to
@@ -63,10 +63,68 @@ struct property_entry;
63 * transmit an arbitrary number of messages without interruption. 63 * transmit an arbitrary number of messages without interruption.
64 * @count must be be less than 64k since msg.len is u16. 64 * @count must be be less than 64k since msg.len is u16.
65 */ 65 */
66extern int i2c_master_send(const struct i2c_client *client, const char *buf, 66extern int i2c_transfer_buffer_flags(const struct i2c_client *client,
67 int count); 67 char *buf, int count, u16 flags);
68extern int i2c_master_recv(const struct i2c_client *client, char *buf, 68
69 int count); 69/**
70 * i2c_master_recv - issue a single I2C message in master receive mode
71 * @client: Handle to slave device
72 * @buf: Where to store data read from slave
73 * @count: How many bytes to read, must be less than 64k since msg.len is u16
74 *
75 * Returns negative errno, or else the number of bytes read.
76 */
77static inline int i2c_master_recv(const struct i2c_client *client,
78 char *buf, int count)
79{
80 return i2c_transfer_buffer_flags(client, buf, count, I2C_M_RD);
81};
82
83/**
84 * i2c_master_recv_dmasafe - issue a single I2C message in master receive mode
85 * using a DMA safe buffer
86 * @client: Handle to slave device
87 * @buf: Where to store data read from slave, must be safe to use with DMA
88 * @count: How many bytes to read, must be less than 64k since msg.len is u16
89 *
90 * Returns negative errno, or else the number of bytes read.
91 */
92static inline int i2c_master_recv_dmasafe(const struct i2c_client *client,
93 char *buf, int count)
94{
95 return i2c_transfer_buffer_flags(client, buf, count,
96 I2C_M_RD | I2C_M_DMA_SAFE);
97};
98
99/**
100 * i2c_master_send - issue a single I2C message in master transmit mode
101 * @client: Handle to slave device
102 * @buf: Data that will be written to the slave
103 * @count: How many bytes to write, must be less than 64k since msg.len is u16
104 *
105 * Returns negative errno, or else the number of bytes written.
106 */
107static inline int i2c_master_send(const struct i2c_client *client,
108 const char *buf, int count)
109{
110 return i2c_transfer_buffer_flags(client, (char *)buf, count, 0);
111};
112
113/**
114 * i2c_master_send_dmasafe - issue a single I2C message in master transmit mode
115 * using a DMA safe buffer
116 * @client: Handle to slave device
117 * @buf: Data that will be written to the slave, must be safe to use with DMA
118 * @count: How many bytes to write, must be less than 64k since msg.len is u16
119 *
120 * Returns negative errno, or else the number of bytes written.
121 */
122static inline int i2c_master_send_dmasafe(const struct i2c_client *client,
123 const char *buf, int count)
124{
125 return i2c_transfer_buffer_flags(client, (char *)buf, count,
126 I2C_M_DMA_SAFE);
127};
70 128
71/* Transfer num messages. 129/* Transfer num messages.
72 */ 130 */
@@ -354,7 +412,7 @@ struct i2c_board_info {
354 .type = dev_type, .addr = (dev_addr) 412 .type = dev_type, .addr = (dev_addr)
355 413
356 414
357#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 415#if IS_ENABLED(CONFIG_I2C)
358/* Add-on boards should register/unregister their devices; e.g. a board 416/* Add-on boards should register/unregister their devices; e.g. a board
359 * with integrated I2C, a config eeprom, sensors, and a codec that's 417 * with integrated I2C, a config eeprom, sensors, and a codec that's
360 * used in conjunction with the primary hardware. 418 * used in conjunction with the primary hardware.
@@ -485,40 +543,43 @@ struct i2c_timings {
485/** 543/**
486 * struct i2c_bus_recovery_info - I2C bus recovery information 544 * struct i2c_bus_recovery_info - I2C bus recovery information
487 * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or 545 * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or
488 * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery(). 546 * i2c_generic_scl_recovery().
489 * @get_scl: This gets current value of SCL line. Mandatory for generic SCL 547 * @get_scl: This gets current value of SCL line. Mandatory for generic SCL
490 * recovery. Used internally for generic GPIO recovery. 548 * recovery. Populated internally for generic GPIO recovery.
491 * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used 549 * @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery.
492 * internally for generic GPIO recovery. 550 * Populated internally for generic GPIO recovery.
493 * @get_sda: This gets current value of SDA line. Optional for generic SCL 551 * @get_sda: This gets current value of SDA line. Optional for generic SCL
494 * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO 552 * recovery. Populated internally, if sda_gpio is a valid GPIO, for generic
495 * recovery. 553 * GPIO recovery.
554 * @set_sda: This sets/clears the SDA line. Optional for generic SCL recovery.
555 * Populated internally, if sda_gpio is a valid GPIO, for generic GPIO
556 * recovery.
496 * @prepare_recovery: This will be called before starting recovery. Platform may 557 * @prepare_recovery: This will be called before starting recovery. Platform may
497 * configure padmux here for SDA/SCL line or something else they want. 558 * configure padmux here for SDA/SCL line or something else they want.
498 * @unprepare_recovery: This will be called after completing recovery. Platform 559 * @unprepare_recovery: This will be called after completing recovery. Platform
499 * may configure padmux here for SDA/SCL line or something else they want. 560 * may configure padmux here for SDA/SCL line or something else they want.
500 * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery. 561 * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery.
501 * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery. 562 * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery.
502 */ 563 */
503struct i2c_bus_recovery_info { 564struct i2c_bus_recovery_info {
504 int (*recover_bus)(struct i2c_adapter *); 565 int (*recover_bus)(struct i2c_adapter *adap);
505 566
506 int (*get_scl)(struct i2c_adapter *); 567 int (*get_scl)(struct i2c_adapter *adap);
507 void (*set_scl)(struct i2c_adapter *, int val); 568 void (*set_scl)(struct i2c_adapter *adap, int val);
508 int (*get_sda)(struct i2c_adapter *); 569 int (*get_sda)(struct i2c_adapter *adap);
570 void (*set_sda)(struct i2c_adapter *adap, int val);
509 571
510 void (*prepare_recovery)(struct i2c_adapter *); 572 void (*prepare_recovery)(struct i2c_adapter *adap);
511 void (*unprepare_recovery)(struct i2c_adapter *); 573 void (*unprepare_recovery)(struct i2c_adapter *adap);
512 574
513 /* gpio recovery */ 575 /* gpio recovery */
514 int scl_gpio; 576 struct gpio_desc *scl_gpiod;
515 int sda_gpio; 577 struct gpio_desc *sda_gpiod;
516}; 578};
517 579
518int i2c_recover_bus(struct i2c_adapter *adap); 580int i2c_recover_bus(struct i2c_adapter *adap);
519 581
520/* Generic recovery routines */ 582/* Generic recovery routines */
521int i2c_generic_gpio_recovery(struct i2c_adapter *adap);
522int i2c_generic_scl_recovery(struct i2c_adapter *adap); 583int i2c_generic_scl_recovery(struct i2c_adapter *adap);
523 584
524/** 585/**
@@ -706,7 +767,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
706 767
707/* administration... 768/* administration...
708 */ 769 */
709#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 770#if IS_ENABLED(CONFIG_I2C)
710extern int i2c_add_adapter(struct i2c_adapter *); 771extern int i2c_add_adapter(struct i2c_adapter *);
711extern void i2c_del_adapter(struct i2c_adapter *); 772extern void i2c_del_adapter(struct i2c_adapter *);
712extern int i2c_add_numbered_adapter(struct i2c_adapter *); 773extern int i2c_add_numbered_adapter(struct i2c_adapter *);
@@ -769,6 +830,9 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
769 return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); 830 return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
770} 831}
771 832
833u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
834void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf);
835
772int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); 836int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
773/** 837/**
774 * module_i2c_driver() - Helper macro for registering a modular I2C driver 838 * module_i2c_driver() - Helper macro for registering a modular I2C driver
diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h
deleted file mode 100644
index 4dbe651f71f5..000000000000
--- a/include/linux/i7300_idle.h
+++ /dev/null
@@ -1,84 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef I7300_IDLE_H
4#define I7300_IDLE_H
5
6#include <linux/pci.h>
7
8/*
9 * I/O AT controls (PCI bus 0 device 8 function 0)
10 * DIMM controls (PCI bus 0 device 16 function 1)
11 */
12#define IOAT_BUS 0
13#define IOAT_DEVFN PCI_DEVFN(8, 0)
14#define MEMCTL_BUS 0
15#define MEMCTL_DEVFN PCI_DEVFN(16, 1)
16
17struct fbd_ioat {
18 unsigned int vendor;
19 unsigned int ioat_dev;
20 unsigned int enabled;
21};
22
23/*
24 * The i5000 chip-set has the same hooks as the i7300
25 * but it is not enabled by default and must be manually
26 * manually enabled with "forceload=1" because it is
27 * only lightly validated.
28 */
29
30static const struct fbd_ioat fbd_ioat_list[] = {
31 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1},
32 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0},
33 {0, 0}
34};
35
36/* table of devices that work with this driver */
37static const struct pci_device_id pci_tbl[] = {
38 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
39 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
40 { } /* Terminating entry */
41};
42
43/* Check for known platforms with I/O-AT */
44static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
45 struct pci_dev **ioat_dev,
46 int enable_all)
47{
48 int i;
49 struct pci_dev *memdev, *dmadev;
50
51 memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN);
52 if (!memdev)
53 return -ENODEV;
54
55 for (i = 0; pci_tbl[i].vendor != 0; i++) {
56 if (memdev->vendor == pci_tbl[i].vendor &&
57 memdev->device == pci_tbl[i].device) {
58 break;
59 }
60 }
61 if (pci_tbl[i].vendor == 0)
62 return -ENODEV;
63
64 dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN);
65 if (!dmadev)
66 return -ENODEV;
67
68 for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
69 if (dmadev->vendor == fbd_ioat_list[i].vendor &&
70 dmadev->device == fbd_ioat_list[i].ioat_dev) {
71 if (!(fbd_ioat_list[i].enabled || enable_all))
72 continue;
73 if (fbd_dev)
74 *fbd_dev = memdev;
75 if (ioat_dev)
76 *ioat_dev = dmadev;
77
78 return 0;
79 }
80 }
81 return -ENODEV;
82}
83
84#endif
diff --git a/include/linux/idr.h b/include/linux/idr.h
index fa14f834e4ed..7d6a6313f0ab 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,10 +15,10 @@
15#include <linux/radix-tree.h> 15#include <linux/radix-tree.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/bug.h>
19 18
20struct idr { 19struct idr {
21 struct radix_tree_root idr_rt; 20 struct radix_tree_root idr_rt;
21 unsigned int idr_base;
22 unsigned int idr_next; 22 unsigned int idr_next;
23}; 23};
24 24
@@ -31,10 +31,26 @@ struct idr {
31/* Set the IDR flag and the IDR_FREE tag */ 31/* Set the IDR flag and the IDR_FREE tag */
32#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) 32#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
33 33
34#define IDR_INIT \ 34#define IDR_INIT_BASE(base) { \
35{ \ 35 .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER), \
36 .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \ 36 .idr_base = (base), \
37 .idr_next = 0, \
37} 38}
39
40/**
41 * IDR_INIT() - Initialise an IDR.
42 *
43 * A freshly-initialised IDR contains no IDs.
44 */
45#define IDR_INIT IDR_INIT_BASE(0)
46
47/**
48 * DEFINE_IDR() - Define a statically-allocated IDR
49 * @name: Name of IDR
50 *
51 * An IDR defined using this macro is ready for use with no additional
52 * initialisation required. It contains no IDs.
53 */
38#define DEFINE_IDR(name) struct idr name = IDR_INIT 54#define DEFINE_IDR(name) struct idr name = IDR_INIT
39 55
40/** 56/**
@@ -82,80 +98,52 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
82 98
83void idr_preload(gfp_t gfp_mask); 99void idr_preload(gfp_t gfp_mask);
84 100
85int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index, 101int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
86 unsigned long start, unsigned long end, gfp_t gfp, 102int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
87 bool ext); 103 unsigned long max, gfp_t);
88 104int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
89/** 105void *idr_remove(struct idr *, unsigned long id);
90 * idr_alloc - allocate an id 106void *idr_find(const struct idr *, unsigned long id);
91 * @idr: idr handle
92 * @ptr: pointer to be associated with the new id
93 * @start: the minimum id (inclusive)
94 * @end: the maximum id (exclusive)
95 * @gfp: memory allocation flags
96 *
97 * Allocates an unused ID in the range [start, end). Returns -ENOSPC
98 * if there are no unused IDs in that range.
99 *
100 * Note that @end is treated as max when <= 0. This is to always allow
101 * using @start + N as @end as long as N is inside integer range.
102 *
103 * Simultaneous modifications to the @idr are not allowed and should be
104 * prevented by the user, usually with a lock. idr_alloc() may be called
105 * concurrently with read-only accesses to the @idr, such as idr_find() and
106 * idr_for_each_entry().
107 */
108static inline int idr_alloc(struct idr *idr, void *ptr,
109 int start, int end, gfp_t gfp)
110{
111 unsigned long id;
112 int ret;
113
114 if (WARN_ON_ONCE(start < 0))
115 return -EINVAL;
116
117 ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false);
118
119 if (ret)
120 return ret;
121
122 return id;
123}
124
125static inline int idr_alloc_ext(struct idr *idr, void *ptr,
126 unsigned long *index,
127 unsigned long start,
128 unsigned long end,
129 gfp_t gfp)
130{
131 return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true);
132}
133
134int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
135int idr_for_each(const struct idr *, 107int idr_for_each(const struct idr *,
136 int (*fn)(int id, void *p, void *data), void *data); 108 int (*fn)(int id, void *p, void *data), void *data);
137void *idr_get_next(struct idr *, int *nextid); 109void *idr_get_next(struct idr *, int *nextid);
138void *idr_get_next_ext(struct idr *idr, unsigned long *nextid); 110void *idr_get_next_ul(struct idr *, unsigned long *nextid);
139void *idr_replace(struct idr *, void *, int id); 111void *idr_replace(struct idr *, void *, unsigned long id);
140void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id);
141void idr_destroy(struct idr *); 112void idr_destroy(struct idr *);
142 113
143static inline void *idr_remove_ext(struct idr *idr, unsigned long id) 114/**
144{ 115 * idr_init_base() - Initialise an IDR.
145 return radix_tree_delete_item(&idr->idr_rt, id, NULL); 116 * @idr: IDR handle.
146} 117 * @base: The base value for the IDR.
147 118 *
148static inline void *idr_remove(struct idr *idr, int id) 119 * This variation of idr_init() creates an IDR which will allocate IDs
120 * starting at %base.
121 */
122static inline void idr_init_base(struct idr *idr, int base)
149{ 123{
150 return idr_remove_ext(idr, id); 124 INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
125 idr->idr_base = base;
126 idr->idr_next = 0;
151} 127}
152 128
129/**
130 * idr_init() - Initialise an IDR.
131 * @idr: IDR handle.
132 *
133 * Initialise a dynamically allocated IDR. To initialise a
134 * statically allocated IDR, use DEFINE_IDR().
135 */
153static inline void idr_init(struct idr *idr) 136static inline void idr_init(struct idr *idr)
154{ 137{
155 INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); 138 idr_init_base(idr, 0);
156 idr->idr_next = 0;
157} 139}
158 140
141/**
142 * idr_is_empty() - Are there any IDs allocated?
143 * @idr: IDR handle.
144 *
145 * Return: %true if any IDs have been allocated from this IDR.
146 */
159static inline bool idr_is_empty(const struct idr *idr) 147static inline bool idr_is_empty(const struct idr *idr)
160{ 148{
161 return radix_tree_empty(&idr->idr_rt) && 149 return radix_tree_empty(&idr->idr_rt) &&
@@ -174,50 +162,38 @@ static inline void idr_preload_end(void)
174} 162}
175 163
176/** 164/**
177 * idr_find - return pointer for given id 165 * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
178 * @idr: idr handle 166 * @idr: IDR handle.
179 * @id: lookup key 167 * @entry: The type * to use as cursor
180 * 168 * @id: Entry ID.
181 * Return the pointer given the id it has been registered with. A %NULL
182 * return indicates that @id is not valid or you passed %NULL in
183 * idr_get_new().
184 * 169 *
185 * This function can be called under rcu_read_lock(), given that the leaf 170 * @entry and @id do not need to be initialized before the loop, and
186 * pointers lifetimes are correctly managed. 171 * after normal termination @entry is left with the value NULL. This
172 * is convenient for a "not found" value.
187 */ 173 */
188static inline void *idr_find_ext(const struct idr *idr, unsigned long id) 174#define idr_for_each_entry(idr, entry, id) \
189{ 175 for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
190 return radix_tree_lookup(&idr->idr_rt, id);
191}
192
193static inline void *idr_find(const struct idr *idr, int id)
194{
195 return idr_find_ext(idr, id);
196}
197 176
198/** 177/**
199 * idr_for_each_entry - iterate over an idr's elements of a given type 178 * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
200 * @idr: idr handle 179 * @idr: IDR handle.
201 * @entry: the type * to use as cursor 180 * @entry: The type * to use as cursor.
202 * @id: id entry's key 181 * @id: Entry ID.
203 * 182 *
204 * @entry and @id do not need to be initialized before the loop, and 183 * @entry and @id do not need to be initialized before the loop, and
205 * after normal terminatinon @entry is left with the value NULL. This 184 * after normal termination @entry is left with the value NULL. This
206 * is convenient for a "not found" value. 185 * is convenient for a "not found" value.
207 */ 186 */
208#define idr_for_each_entry(idr, entry, id) \ 187#define idr_for_each_entry_ul(idr, entry, id) \
209 for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) 188 for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id)
210#define idr_for_each_entry_ext(idr, entry, id) \
211 for (id = 0; ((entry) = idr_get_next_ext(idr, &(id))) != NULL; ++id)
212 189
213/** 190/**
214 * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type 191 * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
215 * @idr: idr handle 192 * @idr: IDR handle.
216 * @entry: the type * to use as cursor 193 * @entry: The type * to use as a cursor.
217 * @id: id entry's key 194 * @id: Entry ID.
218 * 195 *
219 * Continue to iterate over list of given type, continuing after 196 * Continue to iterate over entries, continuing after the current position.
220 * the current position.
221 */ 197 */
222#define idr_for_each_entry_continue(idr, entry, id) \ 198#define idr_for_each_entry_continue(idr, entry, id) \
223 for ((entry) = idr_get_next((idr), &(id)); \ 199 for ((entry) = idr_get_next((idr), &(id)); \
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 4c54611e03e9..622658dfbf0a 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -13,6 +13,8 @@ struct ifla_vf_stats {
13 __u64 tx_bytes; 13 __u64 tx_bytes;
14 __u64 broadcast; 14 __u64 broadcast;
15 __u64 multicast; 15 __u64 multicast;
16 __u64 rx_dropped;
17 __u64 tx_dropped;
16}; 18};
17 19
18struct ifla_vf_info { 20struct ifla_vf_info {
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index bedf54b6f943..4cb7aeeafce0 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -30,10 +30,10 @@ struct macvlan_dev {
30 enum macvlan_mode mode; 30 enum macvlan_mode mode;
31 u16 flags; 31 u16 flags;
32 int nest_level; 32 int nest_level;
33 unsigned int macaddr_count;
33#ifdef CONFIG_NET_POLL_CONTROLLER 34#ifdef CONFIG_NET_POLL_CONTROLLER
34 struct netpoll *netpoll; 35 struct netpoll *netpoll;
35#endif 36#endif
36 unsigned int macaddr_count;
37}; 37};
38 38
39static inline void macvlan_count_rx(const struct macvlan_dev *vlan, 39static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 3ecef57c31e3..8e66866c11be 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -4,7 +4,7 @@
4 4
5#if IS_ENABLED(CONFIG_TAP) 5#if IS_ENABLED(CONFIG_TAP)
6struct socket *tap_get_socket(struct file *); 6struct socket *tap_get_socket(struct file *);
7struct skb_array *tap_get_skb_array(struct file *file); 7struct ptr_ring *tap_get_ptr_ring(struct file *file);
8#else 8#else
9#include <linux/err.h> 9#include <linux/err.h>
10#include <linux/errno.h> 10#include <linux/errno.h>
@@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f)
14{ 14{
15 return ERR_PTR(-EINVAL); 15 return ERR_PTR(-EINVAL);
16} 16}
17static inline struct skb_array *tap_get_skb_array(struct file *f) 17static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
18{ 18{
19 return ERR_PTR(-EINVAL); 19 return ERR_PTR(-EINVAL);
20} 20}
@@ -70,7 +70,7 @@ struct tap_queue {
70 u16 queue_index; 70 u16 queue_index;
71 bool enabled; 71 bool enabled;
72 struct list_head next; 72 struct list_head next;
73 struct skb_array skb_array; 73 struct ptr_ring ring;
74}; 74};
75 75
76rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); 76rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index bf9bdf42d577..c5b0a75a7812 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -17,9 +17,14 @@
17 17
18#include <uapi/linux/if_tun.h> 18#include <uapi/linux/if_tun.h>
19 19
20#define TUN_XDP_FLAG 0x1UL
21
20#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) 22#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
21struct socket *tun_get_socket(struct file *); 23struct socket *tun_get_socket(struct file *);
22struct skb_array *tun_get_skb_array(struct file *file); 24struct ptr_ring *tun_get_tx_ring(struct file *file);
25bool tun_is_xdp_buff(void *ptr);
26void *tun_xdp_to_ptr(void *ptr);
27void *tun_ptr_to_xdp(void *ptr);
23#else 28#else
24#include <linux/err.h> 29#include <linux/err.h>
25#include <linux/errno.h> 30#include <linux/errno.h>
@@ -29,9 +34,21 @@ static inline struct socket *tun_get_socket(struct file *f)
29{ 34{
30 return ERR_PTR(-EINVAL); 35 return ERR_PTR(-EINVAL);
31} 36}
32static inline struct skb_array *tun_get_skb_array(struct file *f) 37static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
33{ 38{
34 return ERR_PTR(-EINVAL); 39 return ERR_PTR(-EINVAL);
35} 40}
41static inline bool tun_is_xdp_buff(void *ptr)
42{
43 return false;
44}
45static inline void *tun_xdp_to_ptr(void *ptr)
46{
47 return NULL;
48}
49static inline void *tun_ptr_to_xdp(void *ptr)
50{
51 return NULL;
52}
36#endif /* CONFIG_TUN */ 53#endif /* CONFIG_TUN */
37#endif /* __IF_TUN_H */ 54#endif /* __IF_TUN_H */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index f12a61be1ede..11579fd4126e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -578,8 +578,8 @@ const struct iio_chan_spec
578 * iio_device_register() - register a device with the IIO subsystem 578 * iio_device_register() - register a device with the IIO subsystem
579 * @indio_dev: Device structure filled by the device driver 579 * @indio_dev: Device structure filled by the device driver
580 **/ 580 **/
581#define iio_device_register(iio_dev) \ 581#define iio_device_register(indio_dev) \
582 __iio_device_register((iio_dev), THIS_MODULE) 582 __iio_device_register((indio_dev), THIS_MODULE)
583int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod); 583int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
584void iio_device_unregister(struct iio_dev *indio_dev); 584void iio_device_unregister(struct iio_dev *indio_dev);
585/** 585/**
diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h
index 1601a2a63a72..5e1cfa75f652 100644
--- a/include/linux/iio/machine.h
+++ b/include/linux/iio/machine.h
@@ -28,4 +28,11 @@ struct iio_map {
28 void *consumer_data; 28 void *consumer_data;
29}; 29};
30 30
31#define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \
32{ \
33 .adc_channel_label = _provider_channel, \
34 .consumer_dev_name = _consumer_dev_name, \
35 .consumer_channel = _consumer_channel, \
36}
37
31#endif 38#endif
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 7d5e44518379..b19b7204ef84 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -43,12 +43,13 @@ struct iio_trigger_ops {
43/** 43/**
44 * struct iio_trigger - industrial I/O trigger device 44 * struct iio_trigger - industrial I/O trigger device
45 * @ops: [DRIVER] operations structure 45 * @ops: [DRIVER] operations structure
46 * @owner: [INTERN] owner of this driver module
46 * @id: [INTERN] unique id number 47 * @id: [INTERN] unique id number
47 * @name: [DRIVER] unique name 48 * @name: [DRIVER] unique name
48 * @dev: [DRIVER] associated device (if relevant) 49 * @dev: [DRIVER] associated device (if relevant)
49 * @list: [INTERN] used in maintenance of global trigger list 50 * @list: [INTERN] used in maintenance of global trigger list
50 * @alloc_list: [DRIVER] used for driver specific trigger list 51 * @alloc_list: [DRIVER] used for driver specific trigger list
51 * @use_count: use count for the trigger 52 * @use_count: [INTERN] use count for the trigger.
52 * @subirq_chip: [INTERN] associate 'virtual' irq chip. 53 * @subirq_chip: [INTERN] associate 'virtual' irq chip.
53 * @subirq_base: [INTERN] base number for irqs provided by trigger. 54 * @subirq_base: [INTERN] base number for irqs provided by trigger.
54 * @subirqs: [INTERN] information about the 'child' irqs. 55 * @subirqs: [INTERN] information about the 'child' irqs.
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 1ac5bf95bfdd..e16fe7d44a71 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -173,7 +173,7 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
173} 173}
174 174
175int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); 175int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
176int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); 176int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
177void devinet_init(void); 177void devinet_init(void);
178struct in_device *inetdev_by_index(struct net *, int); 178struct in_device *inetdev_by_index(struct net *, int);
179__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); 179__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
diff --git a/include/linux/init.h b/include/linux/init.h
index ea1b31101d9e..506a98151131 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -5,6 +5,13 @@
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6#include <linux/types.h> 6#include <linux/types.h>
7 7
8/* Built-in __init functions needn't be compiled with retpoline */
9#if defined(RETPOLINE) && !defined(MODULE)
10#define __noretpoline __attribute__((indirect_branch("keep")))
11#else
12#define __noretpoline
13#endif
14
8/* These macros are used to mark some functions or 15/* These macros are used to mark some functions or
9 * initialized data (doesn't apply to uninitialized data) 16 * initialized data (doesn't apply to uninitialized data)
10 * as `initialization' functions. The kernel can take this 17 * as `initialization' functions. The kernel can take this
@@ -40,7 +47,7 @@
40 47
41/* These are for everybody (although not all archs will actually 48/* These are for everybody (although not all archs will actually
42 discard it in modules) */ 49 discard it in modules) */
43#define __init __section(.init.text) __cold __latent_entropy 50#define __init __section(.init.text) __cold __latent_entropy __noretpoline
44#define __initdata __section(.init.data) 51#define __initdata __section(.init.data)
45#define __initconst __section(.init.rodata) 52#define __initconst __section(.init.rodata)
46#define __exitdata __section(.exit.data) 53#define __exitdata __section(.exit.data)
diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h
deleted file mode 100644
index f9d932476a80..000000000000
--- a/include/linux/input/gpio_tilt.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _INPUT_GPIO_TILT_H
3#define _INPUT_GPIO_TILT_H
4
5/**
6 * struct gpio_tilt_axis - Axis used by the tilt switch
7 * @axis: Constant describing the axis, e.g. ABS_X
8 * @min: minimum value for abs_param
9 * @max: maximum value for abs_param
10 * @fuzz: fuzz value for abs_param
11 * @flat: flat value for abs_param
12 */
13struct gpio_tilt_axis {
14 int axis;
15 int min;
16 int max;
17 int fuzz;
18 int flat;
19};
20
21/**
22 * struct gpio_tilt_state - state description
23 * @gpios: bitfield of gpio target-states for the value
24 * @axes: array containing the axes settings for the gpio state
25 * The array indizes must correspond to the axes defined
26 * in platform_data
27 *
28 * This structure describes a supported axis settings
29 * and the necessary gpio-state which represent it.
30 *
31 * The n-th bit in the bitfield describes the state of the n-th GPIO
32 * from the gpios-array defined in gpio_regulator_config below.
33 */
34struct gpio_tilt_state {
35 int gpios;
36 int *axes;
37};
38
39/**
40 * struct gpio_tilt_platform_data
41 * @gpios: Array containing the gpios determining the tilt state
42 * @nr_gpios: Number of gpios
43 * @axes: Array of gpio_tilt_axis descriptions
44 * @nr_axes: Number of axes
45 * @states: Array of gpio_tilt_state entries describing
46 * the gpio state for specific tilts
47 * @nr_states: Number of states available
48 * @debounce_interval: debounce ticks interval in msecs
49 * @poll_interval: polling interval in msecs - for polling driver only
50 * @enable: callback to enable the tilt switch
51 * @disable: callback to disable the tilt switch
52 *
53 * This structure contains gpio-tilt-switch configuration
54 * information that must be passed by platform code to the
55 * gpio-tilt input driver.
56 */
57struct gpio_tilt_platform_data {
58 struct gpio *gpios;
59 int nr_gpios;
60
61 struct gpio_tilt_axis *axes;
62 int nr_axes;
63
64 struct gpio_tilt_state *states;
65 int nr_states;
66
67 int debounce_interval;
68
69 unsigned int poll_interval;
70 int (*enable)(struct device *dev);
71 void (*disable)(struct device *dev);
72};
73
74#endif
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index c2d6082a1a4c..858d3f4a2241 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -14,6 +14,7 @@
14 14
15enum integrity_status { 15enum integrity_status {
16 INTEGRITY_PASS = 0, 16 INTEGRITY_PASS = 0,
17 INTEGRITY_PASS_IMMUTABLE,
17 INTEGRITY_FAIL, 18 INTEGRITY_FAIL,
18 INTEGRITY_NOLABEL, 19 INTEGRITY_NOLABEL,
19 INTEGRITY_NOXATTRS, 20 INTEGRITY_NOXATTRS,
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index f3274d9f46a2..8dad3dd26eae 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -83,7 +83,9 @@
83/* 83/*
84 * Decoding Capability Register 84 * Decoding Capability Register
85 */ 85 */
86#define cap_5lp_support(c) (((c) >> 60) & 1)
86#define cap_pi_support(c) (((c) >> 59) & 1) 87#define cap_pi_support(c) (((c) >> 59) & 1)
88#define cap_fl1gp_support(c) (((c) >> 56) & 1)
87#define cap_read_drain(c) (((c) >> 55) & 1) 89#define cap_read_drain(c) (((c) >> 55) & 1)
88#define cap_write_drain(c) (((c) >> 54) & 1) 90#define cap_write_drain(c) (((c) >> 54) & 1)
89#define cap_max_amask_val(c) (((c) >> 48) & 0x3f) 91#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 93b4183cf53d..da0ebaec25f0 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -265,7 +265,7 @@ extern struct resource * __devm_request_region(struct device *dev,
265extern void __devm_release_region(struct device *dev, struct resource *parent, 265extern void __devm_release_region(struct device *dev, struct resource *parent,
266 resource_size_t start, resource_size_t n); 266 resource_size_t start, resource_size_t n);
267extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); 267extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
268extern int iomem_is_exclusive(u64 addr); 268extern bool iomem_is_exclusive(u64 addr);
269 269
270extern int 270extern int
271walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 271walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 0e81035b678f..b11fcdfd0770 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -13,10 +13,13 @@
13 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed 13 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
14 */ 14 */
15 15
16#define IRQ_WORK_PENDING 1UL 16#define IRQ_WORK_PENDING BIT(0)
17#define IRQ_WORK_BUSY 2UL 17#define IRQ_WORK_BUSY BIT(1)
18#define IRQ_WORK_FLAGS 3UL 18
19#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ 19/* Doesn't want IPI, wait for tick: */
20#define IRQ_WORK_LAZY BIT(2)
21
22#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
20 23
21struct irq_work { 24struct irq_work {
22 unsigned long flags; 25 unsigned long flags;
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
new file mode 100644
index 000000000000..be50ef7cedab
--- /dev/null
+++ b/include/linux/iversion.h
@@ -0,0 +1,337 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_IVERSION_H
3#define _LINUX_IVERSION_H
4
5#include <linux/fs.h>
6
7/*
8 * The inode->i_version field:
9 * ---------------------------
10 * The change attribute (i_version) is mandated by NFSv4 and is mostly for
11 * knfsd, but is also used for other purposes (e.g. IMA). The i_version must
12 * appear different to observers if there was a change to the inode's data or
13 * metadata since it was last queried.
14 *
15 * Observers see the i_version as a 64-bit number that never decreases. If it
16 * remains the same since it was last checked, then nothing has changed in the
17 * inode. If it's different then something has changed. Observers cannot infer
18 * anything about the nature or magnitude of the changes from the value, only
19 * that the inode has changed in some fashion.
20 *
21 * Not all filesystems properly implement the i_version counter. Subsystems that
22 * want to use i_version field on an inode should first check whether the
23 * filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro).
24 *
25 * Those that set SB_I_VERSION will automatically have their i_version counter
26 * incremented on writes to normal files. If the SB_I_VERSION is not set, then
27 * the VFS will not touch it on writes, and the filesystem can use it how it
28 * wishes. Note that the filesystem is always responsible for updating the
29 * i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.).
30 * We consider these sorts of filesystems to have a kernel-managed i_version.
31 *
32 * It may be impractical for filesystems to keep i_version updates atomic with
33 * respect to the changes that cause them. They should, however, guarantee
34 * that i_version updates are never visible before the changes that caused
35 * them. Also, i_version updates should never be delayed longer than it takes
36 * the original change to reach disk.
37 *
38 * This implementation uses the low bit in the i_version field as a flag to
39 * track when the value has been queried. If it has not been queried since it
40 * was last incremented, we can skip the increment in most cases.
41 *
42 * In the event that we're updating the ctime, we will usually go ahead and
43 * bump the i_version anyway. Since that has to go to stable storage in some
44 * fashion, we might as well increment it as well.
45 *
46 * With this implementation, the value should always appear to observers to
47 * increase over time if the file has changed. It's recommended to use
48 * inode_eq_iversion() helper to compare values.
49 *
50 * Note that some filesystems (e.g. NFS and AFS) just use the field to store
51 * a server-provided value (for the most part). For that reason, those
52 * filesystems do not set SB_I_VERSION. These filesystems are considered to
53 * have a self-managed i_version.
54 *
55 * Persistently storing the i_version
56 * ----------------------------------
57 * Queries of the i_version field are not gated on them hitting the backing
58 * store. It's always possible that the host could crash after allowing
59 * a query of the value but before it has made it to disk.
60 *
61 * To mitigate this problem, filesystems should always use
62 * inode_set_iversion_queried when loading an existing inode from disk. This
63 * ensures that the next attempted inode increment will result in the value
64 * changing.
65 *
66 * Storing the value to disk therefore does not count as a query, so those
67 * filesystems should use inode_peek_iversion to grab the value to be stored.
68 * There is no need to flag the value as having been queried in that case.
69 */
70
71/*
72 * We borrow the lowest bit in the i_version to use as a flag to tell whether
73 * it has been queried since we last incremented it. If it has, then we must
74 * increment it on the next change. After that, we can clear the flag and
75 * avoid incrementing it again until it has again been queried.
76 */
77#define I_VERSION_QUERIED_SHIFT (1)
78#define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1))
79#define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT)
80
81/**
82 * inode_set_iversion_raw - set i_version to the specified raw value
83 * @inode: inode to set
84 * @val: new i_version value to set
85 *
86 * Set @inode's i_version field to @val. This function is for use by
87 * filesystems that self-manage the i_version.
88 *
89 * For example, the NFS client stores its NFSv4 change attribute in this way,
90 * and the AFS client stores the data_version from the server here.
91 */
92static inline void
93inode_set_iversion_raw(struct inode *inode, u64 val)
94{
95 atomic64_set(&inode->i_version, val);
96}
97
98/**
99 * inode_peek_iversion_raw - grab a "raw" iversion value
100 * @inode: inode from which i_version should be read
101 *
102 * Grab a "raw" inode->i_version value and return it. The i_version is not
103 * flagged or converted in any way. This is mostly used to access a self-managed
104 * i_version.
105 *
106 * With those filesystems, we want to treat the i_version as an entirely
107 * opaque value.
108 */
109static inline u64
110inode_peek_iversion_raw(const struct inode *inode)
111{
112 return atomic64_read(&inode->i_version);
113}
114
115/**
116 * inode_set_iversion - set i_version to a particular value
117 * @inode: inode to set
118 * @val: new i_version value to set
119 *
120 * Set @inode's i_version field to @val. This function is for filesystems with
121 * a kernel-managed i_version, for initializing a newly-created inode from
122 * scratch.
123 *
124 * In this case, we do not set the QUERIED flag since we know that this value
125 * has never been queried.
126 */
127static inline void
128inode_set_iversion(struct inode *inode, u64 val)
129{
130 inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT);
131}
132
133/**
134 * inode_set_iversion_queried - set i_version to a particular value as quereied
135 * @inode: inode to set
136 * @val: new i_version value to set
137 *
138 * Set @inode's i_version field to @val, and flag it for increment on the next
139 * change.
140 *
141 * Filesystems that persistently store the i_version on disk should use this
142 * when loading an existing inode from disk.
143 *
144 * When loading in an i_version value from a backing store, we can't be certain
145 * that it wasn't previously viewed before being stored. Thus, we must assume
146 * that it was, to ensure that we don't end up handing out the same value for
147 * different versions of the same inode.
148 */
149static inline void
150inode_set_iversion_queried(struct inode *inode, u64 val)
151{
152 inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) |
153 I_VERSION_QUERIED);
154}
155
156/**
157 * inode_maybe_inc_iversion - increments i_version
158 * @inode: inode with the i_version that should be updated
159 * @force: increment the counter even if it's not necessary?
160 *
161 * Every time the inode is modified, the i_version field must be seen to have
162 * changed by any observer.
163 *
164 * If "force" is set or the QUERIED flag is set, then ensure that we increment
165 * the value, and clear the queried flag.
166 *
167 * In the common case where neither is set, then we can return "false" without
168 * updating i_version.
169 *
170 * If this function returns false, and no other metadata has changed, then we
171 * can avoid logging the metadata.
172 */
173static inline bool
174inode_maybe_inc_iversion(struct inode *inode, bool force)
175{
176 u64 cur, old, new;
177
178 /*
179 * The i_version field is not strictly ordered with any other inode
180 * information, but the legacy inode_inc_iversion code used a spinlock
181 * to serialize increments.
182 *
183 * Here, we add full memory barriers to ensure that any de-facto
184 * ordering with other info is preserved.
185 *
186 * This barrier pairs with the barrier in inode_query_iversion()
187 */
188 smp_mb();
189 cur = inode_peek_iversion_raw(inode);
190 for (;;) {
191 /* If flag is clear then we needn't do anything */
192 if (!force && !(cur & I_VERSION_QUERIED))
193 return false;
194
195 /* Since lowest bit is flag, add 2 to avoid it */
196 new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
197
198 old = atomic64_cmpxchg(&inode->i_version, cur, new);
199 if (likely(old == cur))
200 break;
201 cur = old;
202 }
203 return true;
204}
205
206
207/**
208 * inode_inc_iversion - forcibly increment i_version
209 * @inode: inode that needs to be updated
210 *
211 * Forcbily increment the i_version field. This always results in a change to
212 * the observable value.
213 */
214static inline void
215inode_inc_iversion(struct inode *inode)
216{
217 inode_maybe_inc_iversion(inode, true);
218}
219
220/**
221 * inode_iversion_need_inc - is the i_version in need of being incremented?
222 * @inode: inode to check
223 *
224 * Returns whether the inode->i_version counter needs incrementing on the next
225 * change. Just fetch the value and check the QUERIED flag.
226 */
227static inline bool
228inode_iversion_need_inc(struct inode *inode)
229{
230 return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED;
231}
232
233/**
234 * inode_inc_iversion_raw - forcibly increment raw i_version
235 * @inode: inode that needs to be updated
236 *
237 * Forcbily increment the raw i_version field. This always results in a change
238 * to the raw value.
239 *
240 * NFS will use the i_version field to store the value from the server. It
241 * mostly treats it as opaque, but in the case where it holds a write
242 * delegation, it must increment the value itself. This function does that.
243 */
244static inline void
245inode_inc_iversion_raw(struct inode *inode)
246{
247 atomic64_inc(&inode->i_version);
248}
249
250/**
251 * inode_peek_iversion - read i_version without flagging it to be incremented
252 * @inode: inode from which i_version should be read
253 *
254 * Read the inode i_version counter for an inode without registering it as a
255 * query.
256 *
257 * This is typically used by local filesystems that need to store an i_version
258 * on disk. In that situation, it's not necessary to flag it as having been
259 * viewed, as the result won't be used to gauge changes from that point.
260 */
261static inline u64
262inode_peek_iversion(const struct inode *inode)
263{
264 return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT;
265}
266
267/**
268 * inode_query_iversion - read i_version for later use
269 * @inode: inode from which i_version should be read
270 *
271 * Read the inode i_version counter. This should be used by callers that wish
272 * to store the returned i_version for later comparison. This will guarantee
273 * that a later query of the i_version will result in a different value if
274 * anything has changed.
275 *
276 * In this implementation, we fetch the current value, set the QUERIED flag and
277 * then try to swap it into place with a cmpxchg, if it wasn't already set. If
278 * that fails, we try again with the newly fetched value from the cmpxchg.
279 */
280static inline u64
281inode_query_iversion(struct inode *inode)
282{
283 u64 cur, old, new;
284
285 cur = inode_peek_iversion_raw(inode);
286 for (;;) {
287 /* If flag is already set, then no need to swap */
288 if (cur & I_VERSION_QUERIED) {
289 /*
290 * This barrier (and the implicit barrier in the
291 * cmpxchg below) pairs with the barrier in
292 * inode_maybe_inc_iversion().
293 */
294 smp_mb();
295 break;
296 }
297
298 new = cur | I_VERSION_QUERIED;
299 old = atomic64_cmpxchg(&inode->i_version, cur, new);
300 if (likely(old == cur))
301 break;
302 cur = old;
303 }
304 return cur >> I_VERSION_QUERIED_SHIFT;
305}
306
307/**
308 * inode_eq_iversion_raw - check whether the raw i_version counter has changed
309 * @inode: inode to check
310 * @old: old value to check against its i_version
311 *
312 * Compare the current raw i_version counter with a previous one. Returns true
313 * if they are the same or false if they are different.
314 */
315static inline bool
316inode_eq_iversion_raw(const struct inode *inode, u64 old)
317{
318 return inode_peek_iversion_raw(inode) == old;
319}
320
321/**
322 * inode_eq_iversion - check whether the i_version counter has changed
323 * @inode: inode to check
324 * @old: old value to check against its i_version
325 *
326 * Compare an i_version counter with a previous one. Returns true if they are
327 * the same, and false if they are different.
328 *
329 * Note that we don't need to set the QUERIED flag in this case, as the value
330 * in the inode is not being recorded for later use.
331 */
332static inline bool
333inode_eq_iversion(const struct inode *inode, u64 old)
334{
335 return inode_peek_iversion(inode) == old;
336}
337#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 296d1e0ea87b..b708e5169d1d 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
418#define JI_WAIT_DATA (1 << __JI_WAIT_DATA) 418#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
419 419
420/** 420/**
421 * struct jbd_inode is the structure linking inodes in ordered mode 421 * struct jbd_inode - The jbd_inode type is the structure linking inodes in
422 * present in a transaction so that we can sync them during commit. 422 * ordered mode present in a transaction so that we can sync them during commit.
423 */ 423 */
424struct jbd2_inode { 424struct jbd2_inode {
425 /* Which transaction does this inode belong to? Either the running 425 /**
426 * transaction or the committing one. [j_list_lock] */ 426 * @i_transaction:
427 *
428 * Which transaction does this inode belong to? Either the running
429 * transaction or the committing one. [j_list_lock]
430 */
427 transaction_t *i_transaction; 431 transaction_t *i_transaction;
428 432
429 /* Pointer to the running transaction modifying inode's data in case 433 /**
430 * there is already a committing transaction touching it. [j_list_lock] */ 434 * @i_next_transaction:
435 *
436 * Pointer to the running transaction modifying inode's data in case
437 * there is already a committing transaction touching it. [j_list_lock]
438 */
431 transaction_t *i_next_transaction; 439 transaction_t *i_next_transaction;
432 440
433 /* List of inodes in the i_transaction [j_list_lock] */ 441 /**
442 * @i_list: List of inodes in the i_transaction [j_list_lock]
443 */
434 struct list_head i_list; 444 struct list_head i_list;
435 445
436 /* VFS inode this inode belongs to [constant during the lifetime 446 /**
437 * of the structure] */ 447 * @i_vfs_inode:
448 *
449 * VFS inode this inode belongs to [constant for lifetime of structure]
450 */
438 struct inode *i_vfs_inode; 451 struct inode *i_vfs_inode;
439 452
440 /* Flags of inode [j_list_lock] */ 453 /**
454 * @i_flags: Flags of inode [j_list_lock]
455 */
441 unsigned long i_flags; 456 unsigned long i_flags;
442}; 457};
443 458
@@ -447,12 +462,20 @@ struct jbd2_revoke_table_s;
447 * struct handle_s - The handle_s type is the concrete type associated with 462 * struct handle_s - The handle_s type is the concrete type associated with
448 * handle_t. 463 * handle_t.
449 * @h_transaction: Which compound transaction is this update a part of? 464 * @h_transaction: Which compound transaction is this update a part of?
465 * @h_journal: Which journal handle belongs to - used iff h_reserved set.
466 * @h_rsv_handle: Handle reserved for finishing the logical operation.
450 * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. 467 * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
451 * @h_ref: Reference count on this handle 468 * @h_ref: Reference count on this handle.
452 * @h_err: Field for caller's use to track errors through large fs operations 469 * @h_err: Field for caller's use to track errors through large fs operations.
453 * @h_sync: flag for sync-on-close 470 * @h_sync: Flag for sync-on-close.
454 * @h_jdata: flag to force data journaling 471 * @h_jdata: Flag to force data journaling.
455 * @h_aborted: flag indicating fatal error on handle 472 * @h_reserved: Flag for handle for reserved credits.
473 * @h_aborted: Flag indicating fatal error on handle.
474 * @h_type: For handle statistics.
475 * @h_line_no: For handle statistics.
476 * @h_start_jiffies: Handle Start time.
477 * @h_requested_credits: Holds @h_buffer_credits after handle is started.
478 * @saved_alloc_context: Saved context while transaction is open.
456 **/ 479 **/
457 480
458/* Docbook can't yet cope with the bit fields, but will leave the documentation 481/* Docbook can't yet cope with the bit fields, but will leave the documentation
@@ -462,32 +485,23 @@ struct jbd2_revoke_table_s;
462struct jbd2_journal_handle 485struct jbd2_journal_handle
463{ 486{
464 union { 487 union {
465 /* Which compound transaction is this update a part of? */
466 transaction_t *h_transaction; 488 transaction_t *h_transaction;
467 /* Which journal handle belongs to - used iff h_reserved set */ 489 /* Which journal handle belongs to - used iff h_reserved set */
468 journal_t *h_journal; 490 journal_t *h_journal;
469 }; 491 };
470 492
471 /* Handle reserved for finishing the logical operation */
472 handle_t *h_rsv_handle; 493 handle_t *h_rsv_handle;
473
474 /* Number of remaining buffers we are allowed to dirty: */
475 int h_buffer_credits; 494 int h_buffer_credits;
476
477 /* Reference count on this handle */
478 int h_ref; 495 int h_ref;
479
480 /* Field for caller's use to track errors through large fs */
481 /* operations */
482 int h_err; 496 int h_err;
483 497
484 /* Flags [no locking] */ 498 /* Flags [no locking] */
485 unsigned int h_sync: 1; /* sync-on-close */ 499 unsigned int h_sync: 1;
486 unsigned int h_jdata: 1; /* force data journaling */ 500 unsigned int h_jdata: 1;
487 unsigned int h_reserved: 1; /* handle with reserved credits */ 501 unsigned int h_reserved: 1;
488 unsigned int h_aborted: 1; /* fatal error on handle */ 502 unsigned int h_aborted: 1;
489 unsigned int h_type: 8; /* for handle statistics */ 503 unsigned int h_type: 8;
490 unsigned int h_line_no: 16; /* for handle statistics */ 504 unsigned int h_line_no: 16;
491 505
492 unsigned long h_start_jiffies; 506 unsigned long h_start_jiffies;
493 unsigned int h_requested_credits; 507 unsigned int h_requested_credits;
@@ -729,228 +743,253 @@ jbd2_time_diff(unsigned long start, unsigned long end)
729/** 743/**
730 * struct journal_s - The journal_s type is the concrete type associated with 744 * struct journal_s - The journal_s type is the concrete type associated with
731 * journal_t. 745 * journal_t.
732 * @j_flags: General journaling state flags
733 * @j_errno: Is there an outstanding uncleared error on the journal (from a
734 * prior abort)?
735 * @j_sb_buffer: First part of superblock buffer
736 * @j_superblock: Second part of superblock buffer
737 * @j_format_version: Version of the superblock format
738 * @j_state_lock: Protect the various scalars in the journal
739 * @j_barrier_count: Number of processes waiting to create a barrier lock
740 * @j_barrier: The barrier lock itself
741 * @j_running_transaction: The current running transaction..
742 * @j_committing_transaction: the transaction we are pushing to disk
743 * @j_checkpoint_transactions: a linked circular list of all transactions
744 * waiting for checkpointing
745 * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
746 * to start committing, or for a barrier lock to be released
747 * @j_wait_done_commit: Wait queue for waiting for commit to complete
748 * @j_wait_commit: Wait queue to trigger commit
749 * @j_wait_updates: Wait queue to wait for updates to complete
750 * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
751 * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
752 * @j_head: Journal head - identifies the first unused block in the journal
753 * @j_tail: Journal tail - identifies the oldest still-used block in the
754 * journal.
755 * @j_free: Journal free - how many free blocks are there in the journal?
756 * @j_first: The block number of the first usable block
757 * @j_last: The block number one beyond the last usable block
758 * @j_dev: Device where we store the journal
759 * @j_blocksize: blocksize for the location where we store the journal.
760 * @j_blk_offset: starting block offset for into the device where we store the
761 * journal
762 * @j_fs_dev: Device which holds the client fs. For internal journal this will
763 * be equal to j_dev
764 * @j_reserved_credits: Number of buffers reserved from the running transaction
765 * @j_maxlen: Total maximum capacity of the journal region on disk.
766 * @j_list_lock: Protects the buffer lists and internal buffer state.
767 * @j_inode: Optional inode where we store the journal. If present, all journal
768 * block numbers are mapped into this inode via bmap().
769 * @j_tail_sequence: Sequence number of the oldest transaction in the log
770 * @j_transaction_sequence: Sequence number of the next transaction to grant
771 * @j_commit_sequence: Sequence number of the most recently committed
772 * transaction
773 * @j_commit_request: Sequence number of the most recent transaction wanting
774 * commit
775 * @j_uuid: Uuid of client object.
776 * @j_task: Pointer to the current commit thread for this journal
777 * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
778 * single compound commit transaction
779 * @j_commit_interval: What is the maximum transaction lifetime before we begin
780 * a commit?
781 * @j_commit_timer: The timer used to wakeup the commit thread
782 * @j_revoke_lock: Protect the revoke table
783 * @j_revoke: The revoke table - maintains the list of revoked blocks in the
784 * current transaction.
785 * @j_revoke_table: alternate revoke tables for j_revoke
786 * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
787 * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
788 * number that will fit in j_blocksize
789 * @j_last_sync_writer: most recent pid which did a synchronous write
790 * @j_history_lock: Protect the transactions statistics history
791 * @j_proc_entry: procfs entry for the jbd statistics directory
792 * @j_stats: Overall statistics
793 * @j_private: An opaque pointer to fs-private information.
794 * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies
795 */ 746 */
796
797struct journal_s 747struct journal_s
798{ 748{
799 /* General journaling state flags [j_state_lock] */ 749 /**
750 * @j_flags: General journaling state flags [j_state_lock]
751 */
800 unsigned long j_flags; 752 unsigned long j_flags;
801 753
802 /* 754 /**
755 * @j_errno:
756 *
803 * Is there an outstanding uncleared error on the journal (from a prior 757 * Is there an outstanding uncleared error on the journal (from a prior
804 * abort)? [j_state_lock] 758 * abort)? [j_state_lock]
805 */ 759 */
806 int j_errno; 760 int j_errno;
807 761
808 /* The superblock buffer */ 762 /**
763 * @j_sb_buffer: The first part of the superblock buffer.
764 */
809 struct buffer_head *j_sb_buffer; 765 struct buffer_head *j_sb_buffer;
766
767 /**
768 * @j_superblock: The second part of the superblock buffer.
769 */
810 journal_superblock_t *j_superblock; 770 journal_superblock_t *j_superblock;
811 771
812 /* Version of the superblock format */ 772 /**
773 * @j_format_version: Version of the superblock format.
774 */
813 int j_format_version; 775 int j_format_version;
814 776
815 /* 777 /**
816 * Protect the various scalars in the journal 778 * @j_state_lock: Protect the various scalars in the journal.
817 */ 779 */
818 rwlock_t j_state_lock; 780 rwlock_t j_state_lock;
819 781
820 /* 782 /**
783 * @j_barrier_count:
784 *
821 * Number of processes waiting to create a barrier lock [j_state_lock] 785 * Number of processes waiting to create a barrier lock [j_state_lock]
822 */ 786 */
823 int j_barrier_count; 787 int j_barrier_count;
824 788
825 /* The barrier lock itself */ 789 /**
790 * @j_barrier: The barrier lock itself.
791 */
826 struct mutex j_barrier; 792 struct mutex j_barrier;
827 793
828 /* 794 /**
795 * @j_running_transaction:
796 *
829 * Transactions: The current running transaction... 797 * Transactions: The current running transaction...
830 * [j_state_lock] [caller holding open handle] 798 * [j_state_lock] [caller holding open handle]
831 */ 799 */
832 transaction_t *j_running_transaction; 800 transaction_t *j_running_transaction;
833 801
834 /* 802 /**
803 * @j_committing_transaction:
804 *
835 * the transaction we are pushing to disk 805 * the transaction we are pushing to disk
836 * [j_state_lock] [caller holding open handle] 806 * [j_state_lock] [caller holding open handle]
837 */ 807 */
838 transaction_t *j_committing_transaction; 808 transaction_t *j_committing_transaction;
839 809
840 /* 810 /**
811 * @j_checkpoint_transactions:
812 *
841 * ... and a linked circular list of all transactions waiting for 813 * ... and a linked circular list of all transactions waiting for
842 * checkpointing. [j_list_lock] 814 * checkpointing. [j_list_lock]
843 */ 815 */
844 transaction_t *j_checkpoint_transactions; 816 transaction_t *j_checkpoint_transactions;
845 817
846 /* 818 /**
819 * @j_wait_transaction_locked:
820 *
847 * Wait queue for waiting for a locked transaction to start committing, 821 * Wait queue for waiting for a locked transaction to start committing,
848 * or for a barrier lock to be released 822 * or for a barrier lock to be released.
849 */ 823 */
850 wait_queue_head_t j_wait_transaction_locked; 824 wait_queue_head_t j_wait_transaction_locked;
851 825
852 /* Wait queue for waiting for commit to complete */ 826 /**
827 * @j_wait_done_commit: Wait queue for waiting for commit to complete.
828 */
853 wait_queue_head_t j_wait_done_commit; 829 wait_queue_head_t j_wait_done_commit;
854 830
855 /* Wait queue to trigger commit */ 831 /**
832 * @j_wait_commit: Wait queue to trigger commit.
833 */
856 wait_queue_head_t j_wait_commit; 834 wait_queue_head_t j_wait_commit;
857 835
858 /* Wait queue to wait for updates to complete */ 836 /**
837 * @j_wait_updates: Wait queue to wait for updates to complete.
838 */
859 wait_queue_head_t j_wait_updates; 839 wait_queue_head_t j_wait_updates;
860 840
861 /* Wait queue to wait for reserved buffer credits to drop */ 841 /**
842 * @j_wait_reserved:
843 *
844 * Wait queue to wait for reserved buffer credits to drop.
845 */
862 wait_queue_head_t j_wait_reserved; 846 wait_queue_head_t j_wait_reserved;
863 847
864 /* Semaphore for locking against concurrent checkpoints */ 848 /**
849 * @j_checkpoint_mutex:
850 *
851 * Semaphore for locking against concurrent checkpoints.
852 */
865 struct mutex j_checkpoint_mutex; 853 struct mutex j_checkpoint_mutex;
866 854
867 /* 855 /**
856 * @j_chkpt_bhs:
857 *
868 * List of buffer heads used by the checkpoint routine. This 858 * List of buffer heads used by the checkpoint routine. This
869 * was moved from jbd2_log_do_checkpoint() to reduce stack 859 * was moved from jbd2_log_do_checkpoint() to reduce stack
870 * usage. Access to this array is controlled by the 860 * usage. Access to this array is controlled by the
871 * j_checkpoint_mutex. [j_checkpoint_mutex] 861 * @j_checkpoint_mutex. [j_checkpoint_mutex]
872 */ 862 */
873 struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; 863 struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
874 864
875 /* 865 /**
866 * @j_head:
867 *
876 * Journal head: identifies the first unused block in the journal. 868 * Journal head: identifies the first unused block in the journal.
877 * [j_state_lock] 869 * [j_state_lock]
878 */ 870 */
879 unsigned long j_head; 871 unsigned long j_head;
880 872
881 /* 873 /**
874 * @j_tail:
875 *
882 * Journal tail: identifies the oldest still-used block in the journal. 876 * Journal tail: identifies the oldest still-used block in the journal.
883 * [j_state_lock] 877 * [j_state_lock]
884 */ 878 */
885 unsigned long j_tail; 879 unsigned long j_tail;
886 880
887 /* 881 /**
882 * @j_free:
883 *
888 * Journal free: how many free blocks are there in the journal? 884 * Journal free: how many free blocks are there in the journal?
889 * [j_state_lock] 885 * [j_state_lock]
890 */ 886 */
891 unsigned long j_free; 887 unsigned long j_free;
892 888
893 /* 889 /**
894 * Journal start and end: the block numbers of the first usable block 890 * @j_first:
895 * and one beyond the last usable block in the journal. [j_state_lock] 891 *
892 * The block number of the first usable block in the journal
893 * [j_state_lock].
896 */ 894 */
897 unsigned long j_first; 895 unsigned long j_first;
896
897 /**
898 * @j_last:
899 *
900 * The block number one beyond the last usable block in the journal
901 * [j_state_lock].
902 */
898 unsigned long j_last; 903 unsigned long j_last;
899 904
900 /* 905 /**
901 * Device, blocksize and starting block offset for the location where we 906 * @j_dev: Device where we store the journal.
902 * store the journal.
903 */ 907 */
904 struct block_device *j_dev; 908 struct block_device *j_dev;
909
910 /**
911 * @j_blocksize: Block size for the location where we store the journal.
912 */
905 int j_blocksize; 913 int j_blocksize;
914
915 /**
916 * @j_blk_offset:
917 *
918 * Starting block offset into the device where we store the journal.
919 */
906 unsigned long long j_blk_offset; 920 unsigned long long j_blk_offset;
921
922 /**
923 * @j_devname: Journal device name.
924 */
907 char j_devname[BDEVNAME_SIZE+24]; 925 char j_devname[BDEVNAME_SIZE+24];
908 926
909 /* 927 /**
928 * @j_fs_dev:
929 *
910 * Device which holds the client fs. For internal journal this will be 930 * Device which holds the client fs. For internal journal this will be
911 * equal to j_dev. 931 * equal to j_dev.
912 */ 932 */
913 struct block_device *j_fs_dev; 933 struct block_device *j_fs_dev;
914 934
915 /* Total maximum capacity of the journal region on disk. */ 935 /**
936 * @j_maxlen: Total maximum capacity of the journal region on disk.
937 */
916 unsigned int j_maxlen; 938 unsigned int j_maxlen;
917 939
918 /* Number of buffers reserved from the running transaction */ 940 /**
941 * @j_reserved_credits:
942 *
943 * Number of buffers reserved from the running transaction.
944 */
919 atomic_t j_reserved_credits; 945 atomic_t j_reserved_credits;
920 946
921 /* 947 /**
922 * Protects the buffer lists and internal buffer state. 948 * @j_list_lock: Protects the buffer lists and internal buffer state.
923 */ 949 */
924 spinlock_t j_list_lock; 950 spinlock_t j_list_lock;
925 951
926 /* Optional inode where we store the journal. If present, all */ 952 /**
927 /* journal block numbers are mapped into this inode via */ 953 * @j_inode:
928 /* bmap(). */ 954 *
955 * Optional inode where we store the journal. If present, all
956 * journal block numbers are mapped into this inode via bmap().
957 */
929 struct inode *j_inode; 958 struct inode *j_inode;
930 959
931 /* 960 /**
961 * @j_tail_sequence:
962 *
932 * Sequence number of the oldest transaction in the log [j_state_lock] 963 * Sequence number of the oldest transaction in the log [j_state_lock]
933 */ 964 */
934 tid_t j_tail_sequence; 965 tid_t j_tail_sequence;
935 966
936 /* 967 /**
968 * @j_transaction_sequence:
969 *
937 * Sequence number of the next transaction to grant [j_state_lock] 970 * Sequence number of the next transaction to grant [j_state_lock]
938 */ 971 */
939 tid_t j_transaction_sequence; 972 tid_t j_transaction_sequence;
940 973
941 /* 974 /**
975 * @j_commit_sequence:
976 *
942 * Sequence number of the most recently committed transaction 977 * Sequence number of the most recently committed transaction
943 * [j_state_lock]. 978 * [j_state_lock].
944 */ 979 */
945 tid_t j_commit_sequence; 980 tid_t j_commit_sequence;
946 981
947 /* 982 /**
983 * @j_commit_request:
984 *
948 * Sequence number of the most recent transaction wanting commit 985 * Sequence number of the most recent transaction wanting commit
949 * [j_state_lock] 986 * [j_state_lock]
950 */ 987 */
951 tid_t j_commit_request; 988 tid_t j_commit_request;
952 989
953 /* 990 /**
991 * @j_uuid:
992 *
954 * Journal uuid: identifies the object (filesystem, LVM volume etc) 993 * Journal uuid: identifies the object (filesystem, LVM volume etc)
955 * backed by this journal. This will eventually be replaced by an array 994 * backed by this journal. This will eventually be replaced by an array
956 * of uuids, allowing us to index multiple devices within a single 995 * of uuids, allowing us to index multiple devices within a single
@@ -958,85 +997,151 @@ struct journal_s
958 */ 997 */
959 __u8 j_uuid[16]; 998 __u8 j_uuid[16];
960 999
961 /* Pointer to the current commit thread for this journal */ 1000 /**
1001 * @j_task: Pointer to the current commit thread for this journal.
1002 */
962 struct task_struct *j_task; 1003 struct task_struct *j_task;
963 1004
964 /* 1005 /**
1006 * @j_max_transaction_buffers:
1007 *
965 * Maximum number of metadata buffers to allow in a single compound 1008 * Maximum number of metadata buffers to allow in a single compound
966 * commit transaction 1009 * commit transaction.
967 */ 1010 */
968 int j_max_transaction_buffers; 1011 int j_max_transaction_buffers;
969 1012
970 /* 1013 /**
1014 * @j_commit_interval:
1015 *
971 * What is the maximum transaction lifetime before we begin a commit? 1016 * What is the maximum transaction lifetime before we begin a commit?
972 */ 1017 */
973 unsigned long j_commit_interval; 1018 unsigned long j_commit_interval;
974 1019
975 /* The timer used to wakeup the commit thread: */ 1020 /**
1021 * @j_commit_timer: The timer used to wakeup the commit thread.
1022 */
976 struct timer_list j_commit_timer; 1023 struct timer_list j_commit_timer;
977 1024
978 /* 1025 /**
979 * The revoke table: maintains the list of revoked blocks in the 1026 * @j_revoke_lock: Protect the revoke table.
980 * current transaction. [j_revoke_lock]
981 */ 1027 */
982 spinlock_t j_revoke_lock; 1028 spinlock_t j_revoke_lock;
1029
1030 /**
1031 * @j_revoke:
1032 *
1033 * The revoke table - maintains the list of revoked blocks in the
1034 * current transaction.
1035 */
983 struct jbd2_revoke_table_s *j_revoke; 1036 struct jbd2_revoke_table_s *j_revoke;
1037
1038 /**
1039 * @j_revoke_table: Alternate revoke tables for j_revoke.
1040 */
984 struct jbd2_revoke_table_s *j_revoke_table[2]; 1041 struct jbd2_revoke_table_s *j_revoke_table[2];
985 1042
986 /* 1043 /**
987 * array of bhs for jbd2_journal_commit_transaction 1044 * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction.
988 */ 1045 */
989 struct buffer_head **j_wbuf; 1046 struct buffer_head **j_wbuf;
1047
1048 /**
1049 * @j_wbufsize:
1050 *
1051 * Size of @j_wbuf array.
1052 */
990 int j_wbufsize; 1053 int j_wbufsize;
991 1054
992 /* 1055 /**
993 * this is the pid of hte last person to run a synchronous operation 1056 * @j_last_sync_writer:
994 * through the journal 1057 *
1058 * The pid of the last person to run a synchronous operation
1059 * through the journal.
995 */ 1060 */
996 pid_t j_last_sync_writer; 1061 pid_t j_last_sync_writer;
997 1062
998 /* 1063 /**
999 * the average amount of time in nanoseconds it takes to commit a 1064 * @j_average_commit_time:
1065 *
1066 * The average amount of time in nanoseconds it takes to commit a
1000 * transaction to disk. [j_state_lock] 1067 * transaction to disk. [j_state_lock]
1001 */ 1068 */
1002 u64 j_average_commit_time; 1069 u64 j_average_commit_time;
1003 1070
1004 /* 1071 /**
1005 * minimum and maximum times that we should wait for 1072 * @j_min_batch_time:
1006 * additional filesystem operations to get batched into a 1073 *
1007 * synchronous handle in microseconds 1074 * Minimum time that we should wait for additional filesystem operations
1075 * to get batched into a synchronous handle in microseconds.
1008 */ 1076 */
1009 u32 j_min_batch_time; 1077 u32 j_min_batch_time;
1078
1079 /**
1080 * @j_max_batch_time:
1081 *
1082 * Maximum time that we should wait for additional filesystem operations
1083 * to get batched into a synchronous handle in microseconds.
1084 */
1010 u32 j_max_batch_time; 1085 u32 j_max_batch_time;
1011 1086
1012 /* This function is called when a transaction is closed */ 1087 /**
1088 * @j_commit_callback:
1089 *
1090 * This function is called when a transaction is closed.
1091 */
1013 void (*j_commit_callback)(journal_t *, 1092 void (*j_commit_callback)(journal_t *,
1014 transaction_t *); 1093 transaction_t *);
1015 1094
1016 /* 1095 /*
1017 * Journal statistics 1096 * Journal statistics
1018 */ 1097 */
1098
1099 /**
1100 * @j_history_lock: Protect the transactions statistics history.
1101 */
1019 spinlock_t j_history_lock; 1102 spinlock_t j_history_lock;
1103
1104 /**
1105 * @j_proc_entry: procfs entry for the jbd statistics directory.
1106 */
1020 struct proc_dir_entry *j_proc_entry; 1107 struct proc_dir_entry *j_proc_entry;
1108
1109 /**
1110 * @j_stats: Overall statistics.
1111 */
1021 struct transaction_stats_s j_stats; 1112 struct transaction_stats_s j_stats;
1022 1113
1023 /* Failed journal commit ID */ 1114 /**
1115 * @j_failed_commit: Failed journal commit ID.
1116 */
1024 unsigned int j_failed_commit; 1117 unsigned int j_failed_commit;
1025 1118
1026 /* 1119 /**
1120 * @j_private:
1121 *
1027 * An opaque pointer to fs-private information. ext3 puts its 1122 * An opaque pointer to fs-private information. ext3 puts its
1028 * superblock pointer here 1123 * superblock pointer here.
1029 */ 1124 */
1030 void *j_private; 1125 void *j_private;
1031 1126
1032 /* Reference to checksum algorithm driver via cryptoapi */ 1127 /**
1128 * @j_chksum_driver:
1129 *
1130 * Reference to checksum algorithm driver via cryptoapi.
1131 */
1033 struct crypto_shash *j_chksum_driver; 1132 struct crypto_shash *j_chksum_driver;
1034 1133
1035 /* Precomputed journal UUID checksum for seeding other checksums */ 1134 /**
1135 * @j_csum_seed:
1136 *
1137 * Precomputed journal UUID checksum for seeding other checksums.
1138 */
1036 __u32 j_csum_seed; 1139 __u32 j_csum_seed;
1037 1140
1038#ifdef CONFIG_DEBUG_LOCK_ALLOC 1141#ifdef CONFIG_DEBUG_LOCK_ALLOC
1039 /* 1142 /**
1143 * @j_trans_commit_map:
1144 *
1040 * Lockdep entity to track transaction commit dependencies. Handles 1145 * Lockdep entity to track transaction commit dependencies. Handles
1041 * hold this "lock" for read, when we wait for commit, we acquire the 1146 * hold this "lock" for read, when we wait for commit, we acquire the
1042 * "lock" for writing. This matches the properties of jbd2 journalling 1147 * "lock" for writing. This matches the properties of jbd2 journalling
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index e0340ca08d98..b6a29c126cc4 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -393,7 +393,7 @@ extern bool ____wrong_branch_error(void);
393 branch = !arch_static_branch_jump(&(x)->key, true); \ 393 branch = !arch_static_branch_jump(&(x)->key, true); \
394 else \ 394 else \
395 branch = ____wrong_branch_error(); \ 395 branch = ____wrong_branch_error(); \
396 branch; \ 396 likely(branch); \
397}) 397})
398 398
399#define static_branch_unlikely(x) \ 399#define static_branch_unlikely(x) \
@@ -405,7 +405,7 @@ extern bool ____wrong_branch_error(void);
405 branch = arch_static_branch(&(x)->key, false); \ 405 branch = arch_static_branch(&(x)->key, false); \
406 else \ 406 else \
407 branch = ____wrong_branch_error(); \ 407 branch = ____wrong_branch_error(); \
408 branch; \ 408 unlikely(branch); \
409}) 409})
410 410
411#else /* !HAVE_JUMP_LABEL */ 411#else /* !HAVE_JUMP_LABEL */
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index bd118a6c60cb..657a83b943f0 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -9,6 +9,10 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/stddef.h> 11#include <linux/stddef.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14
15#include <asm/sections.h>
12 16
13#define KSYM_NAME_LEN 128 17#define KSYM_NAME_LEN 128
14#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 18#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
@@ -16,6 +20,56 @@
16 20
17struct module; 21struct module;
18 22
23static inline int is_kernel_inittext(unsigned long addr)
24{
25 if (addr >= (unsigned long)_sinittext
26 && addr <= (unsigned long)_einittext)
27 return 1;
28 return 0;
29}
30
31static inline int is_kernel_text(unsigned long addr)
32{
33 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
34 arch_is_kernel_text(addr))
35 return 1;
36 return in_gate_area_no_mm(addr);
37}
38
39static inline int is_kernel(unsigned long addr)
40{
41 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
42 return 1;
43 return in_gate_area_no_mm(addr);
44}
45
46static inline int is_ksym_addr(unsigned long addr)
47{
48 if (IS_ENABLED(CONFIG_KALLSYMS_ALL))
49 return is_kernel(addr);
50
51 return is_kernel_text(addr) || is_kernel_inittext(addr);
52}
53
54static inline void *dereference_symbol_descriptor(void *ptr)
55{
56#ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR
57 struct module *mod;
58
59 ptr = dereference_kernel_function_descriptor(ptr);
60 if (is_ksym_addr((unsigned long)ptr))
61 return ptr;
62
63 preempt_disable();
64 mod = __module_address((unsigned long)ptr);
65 preempt_enable();
66
67 if (mod)
68 ptr = dereference_module_function_descriptor(mod, ptr);
69#endif
70 return ptr;
71}
72
19#ifdef CONFIG_KALLSYMS 73#ifdef CONFIG_KALLSYMS
20/* Lookup the address for a symbol. Returns 0 if not found. */ 74/* Lookup the address for a symbol. Returns 0 if not found. */
21unsigned long kallsyms_lookup_name(const char *name); 75unsigned long kallsyms_lookup_name(const char *name);
@@ -40,9 +94,6 @@ extern int sprint_symbol(char *buffer, unsigned long address);
40extern int sprint_symbol_no_offset(char *buffer, unsigned long address); 94extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
41extern int sprint_backtrace(char *buffer, unsigned long address); 95extern int sprint_backtrace(char *buffer, unsigned long address);
42 96
43/* Look up a kernel symbol and print it to the kernel messages. */
44extern void __print_symbol(const char *fmt, unsigned long address);
45
46int lookup_symbol_name(unsigned long addr, char *symname); 97int lookup_symbol_name(unsigned long addr, char *symname);
47int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); 98int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
48 99
@@ -112,26 +163,11 @@ static inline int kallsyms_show_value(void)
112 return false; 163 return false;
113} 164}
114 165
115/* Stupid that this does nothing, but I didn't create this mess. */
116#define __print_symbol(fmt, addr)
117#endif /*CONFIG_KALLSYMS*/ 166#endif /*CONFIG_KALLSYMS*/
118 167
119/* This macro allows us to keep printk typechecking */
120static __printf(1, 2)
121void __check_printsym_format(const char *fmt, ...)
122{
123}
124
125static inline void print_symbol(const char *fmt, unsigned long addr)
126{
127 __check_printsym_format(fmt, "");
128 __print_symbol(fmt, (unsigned long)
129 __builtin_extract_return_addr((void *)addr));
130}
131
132static inline void print_ip_sym(unsigned long ip) 168static inline void print_ip_sym(unsigned long ip)
133{ 169{
134 printk("[<%p>] %pS\n", (void *) ip, (void *) ip); 170 printk("[<%px>] %pS\n", (void *) ip, (void *) ip);
135} 171}
136 172
137#endif /*_LINUX_KALLSYMS_H*/ 173#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index e3eb834c9a35..adc13474a53b 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -11,8 +11,6 @@ struct task_struct;
11 11
12#ifdef CONFIG_KASAN 12#ifdef CONFIG_KASAN
13 13
14#define KASAN_SHADOW_SCALE_SHIFT 3
15
16#include <asm/kasan.h> 14#include <asm/kasan.h>
17#include <asm/pgtable.h> 15#include <asm/pgtable.h>
18 16
@@ -56,14 +54,14 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
56void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); 54void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
57 55
58void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); 56void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
59void kasan_kfree_large(const void *ptr); 57void kasan_kfree_large(void *ptr, unsigned long ip);
60void kasan_poison_kfree(void *ptr); 58void kasan_poison_kfree(void *ptr, unsigned long ip);
61void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, 59void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
62 gfp_t flags); 60 gfp_t flags);
63void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); 61void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
64 62
65void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); 63void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
66bool kasan_slab_free(struct kmem_cache *s, void *object); 64bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
67 65
68struct kasan_cache { 66struct kasan_cache {
69 int alloc_meta_offset; 67 int alloc_meta_offset;
@@ -108,8 +106,8 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache,
108 const void *object) {} 106 const void *object) {}
109 107
110static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} 108static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
111static inline void kasan_kfree_large(const void *ptr) {} 109static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
112static inline void kasan_poison_kfree(void *ptr) {} 110static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
113static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, 111static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
114 size_t size, gfp_t flags) {} 112 size_t size, gfp_t flags) {}
115static inline void kasan_krealloc(const void *object, size_t new_size, 113static inline void kasan_krealloc(const void *object, size_t new_size,
@@ -117,7 +115,8 @@ static inline void kasan_krealloc(const void *object, size_t new_size,
117 115
118static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, 116static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
119 gfp_t flags) {} 117 gfp_t flags) {}
120static inline bool kasan_slab_free(struct kmem_cache *s, void *object) 118static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
119 unsigned long ip)
121{ 120{
122 return false; 121 return false;
123} 122}
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 7b45959ebd92..e251533a5939 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -113,7 +113,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
113 * array is a part of the structure and the fifo type where the array is 113 * array is a part of the structure and the fifo type where the array is
114 * outside of the fifo structure. 114 * outside of the fifo structure.
115 */ 115 */
116#define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo)) 116#define __is_kfifo_ptr(fifo) \
117 (sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type))))
117 118
118/** 119/**
119 * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object 120 * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e0a6205caa71..7f6f93c3df9c 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * kobject.h - generic kernel object infrastructure. 3 * kobject.h - generic kernel object infrastructure.
3 * 4 *
@@ -6,8 +7,6 @@
6 * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> 7 * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
7 * Copyright (c) 2006-2008 Novell Inc. 8 * Copyright (c) 2006-2008 Novell Inc.
8 * 9 *
9 * This file is released under the GPLv2.
10 *
11 * Please read Documentation/kobject.txt before using the kobject 10 * Please read Documentation/kobject.txt before using the kobject
12 * interface, ESPECIALLY the parts about reference counts and object 11 * interface, ESPECIALLY the parts about reference counts and object
13 * destructors. 12 * destructors.
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index df32d2508290..069aa2ebef90 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* Kernel object name space definitions 2/* Kernel object name space definitions
2 * 3 *
3 * Copyright (c) 2002-2003 Patrick Mochel 4 * Copyright (c) 2002-2003 Patrick Mochel
@@ -7,8 +8,6 @@
7 * 8 *
8 * Split from kobject.h by David Howells (dhowells@redhat.com) 9 * Split from kobject.h by David Howells (dhowells@redhat.com)
9 * 10 *
10 * This file is released under the GPLv2.
11 *
12 * Please read Documentation/kobject.txt before using the kobject 11 * Please read Documentation/kobject.txt before using the kobject
13 * interface, ESPECIALLY the parts about reference counts and object 12 * interface, ESPECIALLY the parts about reference counts and object
14 * destructors. 13 * destructors.
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index e97966d1fb8d..700efaa9e115 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -121,6 +121,8 @@ extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
121static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev, 121static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
122 bool state) 122 bool state)
123{ 123{
124 if (!fled_cdev)
125 return -EINVAL;
124 return fled_cdev->ops->strobe_set(fled_cdev, state); 126 return fled_cdev->ops->strobe_set(fled_cdev, state);
125} 127}
126 128
@@ -136,6 +138,8 @@ static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
136static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev, 138static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
137 bool *state) 139 bool *state)
138{ 140{
141 if (!fled_cdev)
142 return -EINVAL;
139 if (fled_cdev->ops->strobe_get) 143 if (fled_cdev->ops->strobe_get)
140 return fled_cdev->ops->strobe_get(fled_cdev, state); 144 return fled_cdev->ops->strobe_get(fled_cdev, state);
141 145
diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h
index 27ba06e5d117..90ed4ebfa692 100644
--- a/include/linux/libfdt.h
+++ b/include/linux/libfdt.h
@@ -3,7 +3,6 @@
3#define _INCLUDE_LIBFDT_H_ 3#define _INCLUDE_LIBFDT_H_
4 4
5#include <linux/libfdt_env.h> 5#include <linux/libfdt_env.h>
6#include "../../scripts/dtc/libfdt/fdt.h"
7#include "../../scripts/dtc/libfdt/libfdt.h" 6#include "../../scripts/dtc/libfdt/libfdt.h"
8 7
9#endif /* _INCLUDE_LIBFDT_H_ */ 8#endif /* _INCLUDE_LIBFDT_H_ */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index f8109ddb5ef1..ff855ed965fb 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -47,6 +47,17 @@ enum {
47 47
48 /* region flag indicating to direct-map persistent memory by default */ 48 /* region flag indicating to direct-map persistent memory by default */
49 ND_REGION_PAGEMAP = 0, 49 ND_REGION_PAGEMAP = 0,
50 /*
51 * Platform ensures entire CPU store data path is flushed to pmem on
52 * system power loss.
53 */
54 ND_REGION_PERSIST_CACHE = 1,
55 /*
56 * Platform provides mechanisms to automatically flush outstanding
57 * write data from memory controler to pmem on system power loss.
58 * (ADR)
59 */
60 ND_REGION_PERSIST_MEMCTRL = 2,
50 61
51 /* mark newly adjusted resources as requiring a label update */ 62 /* mark newly adjusted resources as requiring a label update */
52 DPA_RESOURCE_ADJUSTED = 1 << 0, 63 DPA_RESOURCE_ADJUSTED = 1 << 0,
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 2d1d9de06728..7f4b60abdf27 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -50,10 +50,7 @@ struct nvm_id;
50struct nvm_dev; 50struct nvm_dev;
51struct nvm_tgt_dev; 51struct nvm_tgt_dev;
52 52
53typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
54typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); 53typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
55typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
56 nvm_l2p_update_fn *, void *);
57typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); 54typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
58typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); 55typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
59typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); 56typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
@@ -66,7 +63,6 @@ typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
66 63
67struct nvm_dev_ops { 64struct nvm_dev_ops {
68 nvm_id_fn *identity; 65 nvm_id_fn *identity;
69 nvm_get_l2p_tbl_fn *get_l2p_tbl;
70 nvm_op_bb_tbl_fn *get_bb_tbl; 66 nvm_op_bb_tbl_fn *get_bb_tbl;
71 nvm_op_set_bb_fn *set_bb_tbl; 67 nvm_op_set_bb_fn *set_bb_tbl;
72 68
@@ -112,8 +108,6 @@ enum {
112 NVM_RSP_WARN_HIGHECC = 0x4700, 108 NVM_RSP_WARN_HIGHECC = 0x4700,
113 109
114 /* Device opcodes */ 110 /* Device opcodes */
115 NVM_OP_HBREAD = 0x02,
116 NVM_OP_HBWRITE = 0x81,
117 NVM_OP_PWRITE = 0x91, 111 NVM_OP_PWRITE = 0x91,
118 NVM_OP_PREAD = 0x92, 112 NVM_OP_PREAD = 0x92,
119 NVM_OP_ERASE = 0x90, 113 NVM_OP_ERASE = 0x90,
@@ -165,12 +159,16 @@ struct nvm_id_group {
165 u8 fmtype; 159 u8 fmtype;
166 u8 num_ch; 160 u8 num_ch;
167 u8 num_lun; 161 u8 num_lun;
168 u8 num_pln; 162 u16 num_chk;
169 u16 num_blk; 163 u16 clba;
170 u16 num_pg;
171 u16 fpg_sz;
172 u16 csecs; 164 u16 csecs;
173 u16 sos; 165 u16 sos;
166
167 u16 ws_min;
168 u16 ws_opt;
169 u16 ws_seq;
170 u16 ws_per_chk;
171
174 u32 trdt; 172 u32 trdt;
175 u32 trdm; 173 u32 trdm;
176 u32 tprt; 174 u32 tprt;
@@ -181,7 +179,10 @@ struct nvm_id_group {
181 u32 mccap; 179 u32 mccap;
182 u16 cpar; 180 u16 cpar;
183 181
184 struct nvm_id_lp_tbl lptbl; 182 /* 1.2 compatibility */
183 u8 num_pln;
184 u16 num_pg;
185 u16 fpg_sz;
185}; 186};
186 187
187struct nvm_addr_format { 188struct nvm_addr_format {
@@ -217,6 +218,10 @@ struct nvm_target {
217 218
218#define ADDR_EMPTY (~0ULL) 219#define ADDR_EMPTY (~0ULL)
219 220
221#define NVM_TARGET_DEFAULT_OP (101)
222#define NVM_TARGET_MIN_OP (3)
223#define NVM_TARGET_MAX_OP (80)
224
220#define NVM_VERSION_MAJOR 1 225#define NVM_VERSION_MAJOR 1
221#define NVM_VERSION_MINOR 0 226#define NVM_VERSION_MINOR 0
222#define NVM_VERSION_PATCH 0 227#define NVM_VERSION_PATCH 0
@@ -239,7 +244,6 @@ struct nvm_rq {
239 void *meta_list; 244 void *meta_list;
240 dma_addr_t dma_meta_list; 245 dma_addr_t dma_meta_list;
241 246
242 struct completion *wait;
243 nvm_end_io_fn *end_io; 247 nvm_end_io_fn *end_io;
244 248
245 uint8_t opcode; 249 uint8_t opcode;
@@ -268,31 +272,38 @@ enum {
268 NVM_BLK_ST_BAD = 0x8, /* Bad block */ 272 NVM_BLK_ST_BAD = 0x8, /* Bad block */
269}; 273};
270 274
275
271/* Device generic information */ 276/* Device generic information */
272struct nvm_geo { 277struct nvm_geo {
278 /* generic geometry */
273 int nr_chnls; 279 int nr_chnls;
274 int nr_luns; 280 int all_luns; /* across channels */
275 int luns_per_chnl; /* -1 if channels are not symmetric */ 281 int nr_luns; /* per channel */
276 int nr_planes; 282 int nr_chks; /* per lun */
277 int sec_per_pg; /* only sectors for a single page */ 283
278 int pgs_per_blk;
279 int blks_per_lun;
280 int fpg_size;
281 int pfpg_size; /* size of buffer if all pages are to be read */
282 int sec_size; 284 int sec_size;
283 int oob_size; 285 int oob_size;
284 int mccap; 286 int mccap;
285 struct nvm_addr_format ppaf;
286 287
287 /* Calculated/Cached values. These do not reflect the actual usable 288 int sec_per_chk;
288 * blocks at run-time. 289 int sec_per_lun;
289 */ 290
291 int ws_min;
292 int ws_opt;
293 int ws_seq;
294 int ws_per_chk;
295
290 int max_rq_size; 296 int max_rq_size;
291 int plane_mode; /* drive device in single, double or quad mode */
292 297
298 int op;
299
300 struct nvm_addr_format ppaf;
301
302 /* Legacy 1.2 specific geometry */
303 int plane_mode; /* drive device in single, double or quad mode */
304 int nr_planes;
305 int sec_per_pg; /* only sectors for a single page */
293 int sec_per_pl; /* all sectors across planes */ 306 int sec_per_pl; /* all sectors across planes */
294 int sec_per_blk;
295 int sec_per_lun;
296}; 307};
297 308
298/* sub-device structure */ 309/* sub-device structure */
@@ -320,10 +331,6 @@ struct nvm_dev {
320 /* Device information */ 331 /* Device information */
321 struct nvm_geo geo; 332 struct nvm_geo geo;
322 333
323 /* lower page table */
324 int lps_per_blk;
325 int *lptbl;
326
327 unsigned long total_secs; 334 unsigned long total_secs;
328 335
329 unsigned long *lun_map; 336 unsigned long *lun_map;
@@ -346,36 +353,6 @@ struct nvm_dev {
346 struct list_head targets; 353 struct list_head targets;
347}; 354};
348 355
349static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
350 u64 pba)
351{
352 struct ppa_addr l;
353 int secs, pgs, blks, luns;
354 sector_t ppa = pba;
355
356 l.ppa = 0;
357
358 div_u64_rem(ppa, geo->sec_per_pg, &secs);
359 l.g.sec = secs;
360
361 sector_div(ppa, geo->sec_per_pg);
362 div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
363 l.g.pg = pgs;
364
365 sector_div(ppa, geo->pgs_per_blk);
366 div_u64_rem(ppa, geo->blks_per_lun, &blks);
367 l.g.blk = blks;
368
369 sector_div(ppa, geo->blks_per_lun);
370 div_u64_rem(ppa, geo->luns_per_chnl, &luns);
371 l.g.lun = luns;
372
373 sector_div(ppa, geo->luns_per_chnl);
374 l.g.ch = ppa;
375
376 return l;
377}
378
379static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, 356static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
380 struct ppa_addr r) 357 struct ppa_addr r)
381{ 358{
@@ -418,25 +395,6 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
418 return l; 395 return l;
419} 396}
420 397
421static inline int ppa_empty(struct ppa_addr ppa_addr)
422{
423 return (ppa_addr.ppa == ADDR_EMPTY);
424}
425
426static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
427{
428 ppa_addr->ppa = ADDR_EMPTY;
429}
430
431static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
432{
433 if (ppa_empty(ppa1) || ppa_empty(ppa2))
434 return 0;
435
436 return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
437 (ppa1.g.blk == ppa2.g.blk));
438}
439
440typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); 398typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
441typedef sector_t (nvm_tgt_capacity_fn)(void *); 399typedef sector_t (nvm_tgt_capacity_fn)(void *);
442typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, 400typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
@@ -481,17 +439,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
481extern int nvm_max_phys_sects(struct nvm_tgt_dev *); 439extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
482extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); 440extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
483extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); 441extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
484extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
485extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
486 void *);
487extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
488extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
489extern void nvm_end_io(struct nvm_rq *); 442extern void nvm_end_io(struct nvm_rq *);
490extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); 443extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
491extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); 444extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
492 445
493extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
494
495#else /* CONFIG_NVM */ 446#else /* CONFIG_NVM */
496struct nvm_dev_ops; 447struct nvm_dev_ops;
497 448
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index fc5c1be3f6f4..4754f01c1abb 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -40,7 +40,6 @@
40 * @new_func: pointer to the patched function code 40 * @new_func: pointer to the patched function code
41 * @old_sympos: a hint indicating which symbol position the old function 41 * @old_sympos: a hint indicating which symbol position the old function
42 * can be found (optional) 42 * can be found (optional)
43 * @immediate: patch the func immediately, bypassing safety mechanisms
44 * @old_addr: the address of the function being patched 43 * @old_addr: the address of the function being patched
45 * @kobj: kobject for sysfs resources 44 * @kobj: kobject for sysfs resources
46 * @stack_node: list node for klp_ops func_stack list 45 * @stack_node: list node for klp_ops func_stack list
@@ -76,7 +75,6 @@ struct klp_func {
76 * in kallsyms for the given object is used. 75 * in kallsyms for the given object is used.
77 */ 76 */
78 unsigned long old_sympos; 77 unsigned long old_sympos;
79 bool immediate;
80 78
81 /* internal */ 79 /* internal */
82 unsigned long old_addr; 80 unsigned long old_addr;
@@ -137,7 +135,6 @@ struct klp_object {
137 * struct klp_patch - patch structure for live patching 135 * struct klp_patch - patch structure for live patching
138 * @mod: reference to the live patch module 136 * @mod: reference to the live patch module
139 * @objs: object entries for kernel objects to be patched 137 * @objs: object entries for kernel objects to be patched
140 * @immediate: patch all funcs immediately, bypassing safety mechanisms
141 * @list: list node for global list of registered patches 138 * @list: list node for global list of registered patches
142 * @kobj: kobject for sysfs resources 139 * @kobj: kobject for sysfs resources
143 * @enabled: the patch is enabled (but operation may be incomplete) 140 * @enabled: the patch is enabled (but operation may be incomplete)
@@ -147,7 +144,6 @@ struct klp_patch {
147 /* external */ 144 /* external */
148 struct module *mod; 145 struct module *mod;
149 struct klp_object *objs; 146 struct klp_object *objs;
150 bool immediate;
151 147
152 /* internal */ 148 /* internal */
153 struct list_head list; 149 struct list_head list;
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index d7d313fb9cd4..4fd95dbeb52f 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -17,6 +17,7 @@
17#include <net/ipv6.h> 17#include <net/ipv6.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/kref.h> 19#include <linux/kref.h>
20#include <linux/refcount.h>
20#include <linux/utsname.h> 21#include <linux/utsname.h>
21#include <linux/lockd/bind.h> 22#include <linux/lockd/bind.h>
22#include <linux/lockd/xdr.h> 23#include <linux/lockd/xdr.h>
@@ -58,7 +59,7 @@ struct nlm_host {
58 u32 h_state; /* pseudo-state counter */ 59 u32 h_state; /* pseudo-state counter */
59 u32 h_nsmstate; /* true remote NSM state */ 60 u32 h_nsmstate; /* true remote NSM state */
60 u32 h_pidcount; /* Pseudopids */ 61 u32 h_pidcount; /* Pseudopids */
61 atomic_t h_count; /* reference count */ 62 refcount_t h_count; /* reference count */
62 struct mutex h_mutex; /* mutex for pmap binding */ 63 struct mutex h_mutex; /* mutex for pmap binding */
63 unsigned long h_nextrebind; /* next portmap call */ 64 unsigned long h_nextrebind; /* next portmap call */
64 unsigned long h_expires; /* eligible for GC */ 65 unsigned long h_expires; /* eligible for GC */
@@ -83,7 +84,7 @@ struct nlm_host {
83 84
84struct nsm_handle { 85struct nsm_handle {
85 struct list_head sm_link; 86 struct list_head sm_link;
86 atomic_t sm_count; 87 refcount_t sm_count;
87 char *sm_mon_name; 88 char *sm_mon_name;
88 char *sm_name; 89 char *sm_name;
89 struct sockaddr_storage sm_addr; 90 struct sockaddr_storage sm_addr;
@@ -122,7 +123,7 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
122 */ 123 */
123struct nlm_lockowner { 124struct nlm_lockowner {
124 struct list_head list; 125 struct list_head list;
125 atomic_t count; 126 refcount_t count;
126 127
127 struct nlm_host *host; 128 struct nlm_host *host;
128 fl_owner_t owner; 129 fl_owner_t owner;
@@ -136,7 +137,7 @@ struct nlm_wait;
136 */ 137 */
137#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u) 138#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u)
138struct nlm_rqst { 139struct nlm_rqst {
139 atomic_t a_count; 140 refcount_t a_count;
140 unsigned int a_flags; /* initial RPC task flags */ 141 unsigned int a_flags; /* initial RPC task flags */
141 struct nlm_host * a_host; /* host handle */ 142 struct nlm_host * a_host; /* host handle */
142 struct nlm_args a_args; /* arguments */ 143 struct nlm_args a_args; /* arguments */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 795634ee5aa5..6fc77d4dbdcd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
337/* 337/*
338 * Same "read" as for lock_acquire(), except -1 means any. 338 * Same "read" as for lock_acquire(), except -1 means any.
339 */ 339 */
340extern int lock_is_held_type(struct lockdep_map *lock, int read); 340extern int lock_is_held_type(const struct lockdep_map *lock, int read);
341 341
342static inline int lock_is_held(struct lockdep_map *lock) 342static inline int lock_is_held(const struct lockdep_map *lock)
343{ 343{
344 return lock_is_held_type(lock, -1); 344 return lock_is_held_type(lock, -1);
345} 345}
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index ef3c9342e119..2eac32095113 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -44,7 +44,7 @@ extern void lockref_mark_dead(struct lockref *);
44extern int lockref_get_not_dead(struct lockref *); 44extern int lockref_get_not_dead(struct lockref *);
45 45
46/* Must be called under spinlock for reliable results */ 46/* Must be called under spinlock for reliable results */
47static inline int __lockref_is_dead(const struct lockref *l) 47static inline bool __lockref_is_dead(const struct lockref *l)
48{ 48{
49 return ((int)l->count < 0); 49 return ((int)l->count < 0);
50} 50}
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ca08ab16ecdc..2cfffe586885 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -12,6 +12,7 @@
12#include <uapi/linux/mdio.h> 12#include <uapi/linux/mdio.h>
13#include <linux/mod_devicetable.h> 13#include <linux/mod_devicetable.h>
14 14
15struct gpio_desc;
15struct mii_bus; 16struct mii_bus;
16 17
17/* Multiple levels of nesting are possible. However typically this is 18/* Multiple levels of nesting are possible. However typically this is
@@ -39,6 +40,9 @@ struct mdio_device {
39 /* Bus address of the MDIO device (0-31) */ 40 /* Bus address of the MDIO device (0-31) */
40 int addr; 41 int addr;
41 int flags; 42 int flags;
43 struct gpio_desc *reset;
44 unsigned int reset_assert_delay;
45 unsigned int reset_deassert_delay;
42}; 46};
43#define to_mdio_device(d) container_of(d, struct mdio_device, dev) 47#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
44 48
@@ -71,6 +75,7 @@ void mdio_device_free(struct mdio_device *mdiodev);
71struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); 75struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr);
72int mdio_device_register(struct mdio_device *mdiodev); 76int mdio_device_register(struct mdio_device *mdiodev);
73void mdio_device_remove(struct mdio_device *mdiodev); 77void mdio_device_remove(struct mdio_device *mdiodev);
78void mdio_device_reset(struct mdio_device *mdiodev, int value);
74int mdio_driver_register(struct mdio_driver *drv); 79int mdio_driver_register(struct mdio_driver *drv);
75void mdio_driver_unregister(struct mdio_driver *drv); 80void mdio_driver_unregister(struct mdio_driver *drv);
76int mdio_device_bus_match(struct device *dev, struct device_driver *drv); 81int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
@@ -257,6 +262,9 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
257 return reg; 262 return reg;
258} 263}
259 264
265int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
266int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
267
260int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); 268int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
261int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); 269int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
262int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); 270int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 7ed0f7782d16..8be5077efb5f 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -332,8 +332,8 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit);
332void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); 332void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
333void memblock_mem_limit_remove_map(phys_addr_t limit); 333void memblock_mem_limit_remove_map(phys_addr_t limit);
334bool memblock_is_memory(phys_addr_t addr); 334bool memblock_is_memory(phys_addr_t addr);
335int memblock_is_map_memory(phys_addr_t addr); 335bool memblock_is_map_memory(phys_addr_t addr);
336int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); 336bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
337bool memblock_is_reserved(phys_addr_t addr); 337bool memblock_is_reserved(phys_addr_t addr);
338bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); 338bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
339 339
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 69966c461d1c..882046863581 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -108,7 +108,10 @@ struct lruvec_stat {
108 */ 108 */
109struct mem_cgroup_per_node { 109struct mem_cgroup_per_node {
110 struct lruvec lruvec; 110 struct lruvec lruvec;
111 struct lruvec_stat __percpu *lruvec_stat; 111
112 struct lruvec_stat __percpu *lruvec_stat_cpu;
113 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
114
112 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 115 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
113 116
114 struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; 117 struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
@@ -227,10 +230,10 @@ struct mem_cgroup {
227 spinlock_t move_lock; 230 spinlock_t move_lock;
228 struct task_struct *move_lock_task; 231 struct task_struct *move_lock_task;
229 unsigned long move_lock_flags; 232 unsigned long move_lock_flags;
230 /* 233
231 * percpu counter. 234 struct mem_cgroup_stat_cpu __percpu *stat_cpu;
232 */ 235 atomic_long_t stat[MEMCG_NR_STAT];
233 struct mem_cgroup_stat_cpu __percpu *stat; 236 atomic_long_t events[MEMCG_NR_EVENTS];
234 237
235 unsigned long socket_pressure; 238 unsigned long socket_pressure;
236 239
@@ -265,6 +268,12 @@ struct mem_cgroup {
265 /* WARNING: nodeinfo must be the last member here */ 268 /* WARNING: nodeinfo must be the last member here */
266}; 269};
267 270
271/*
272 * size of first charge trial. "32" comes from vmscan.c's magic value.
273 * TODO: maybe necessary to use big numbers in big irons.
274 */
275#define MEMCG_CHARGE_BATCH 32U
276
268extern struct mem_cgroup *root_mem_cgroup; 277extern struct mem_cgroup *root_mem_cgroup;
269 278
270static inline bool mem_cgroup_disabled(void) 279static inline bool mem_cgroup_disabled(void)
@@ -272,13 +281,6 @@ static inline bool mem_cgroup_disabled(void)
272 return !cgroup_subsys_enabled(memory_cgrp_subsys); 281 return !cgroup_subsys_enabled(memory_cgrp_subsys);
273} 282}
274 283
275static inline void mem_cgroup_event(struct mem_cgroup *memcg,
276 enum memcg_event_item event)
277{
278 this_cpu_inc(memcg->stat->events[event]);
279 cgroup_file_notify(&memcg->events_file);
280}
281
282bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); 284bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
283 285
284int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 286int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
@@ -492,32 +494,38 @@ void unlock_page_memcg(struct page *page);
492static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, 494static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
493 int idx) 495 int idx)
494{ 496{
495 long val = 0; 497 long x = atomic_long_read(&memcg->stat[idx]);
496 int cpu; 498#ifdef CONFIG_SMP
497 499 if (x < 0)
498 for_each_possible_cpu(cpu) 500 x = 0;
499 val += per_cpu(memcg->stat->count[idx], cpu); 501#endif
500 502 return x;
501 if (val < 0)
502 val = 0;
503
504 return val;
505} 503}
506 504
507/* idx can be of type enum memcg_stat_item or node_stat_item */ 505/* idx can be of type enum memcg_stat_item or node_stat_item */
508static inline void __mod_memcg_state(struct mem_cgroup *memcg, 506static inline void __mod_memcg_state(struct mem_cgroup *memcg,
509 int idx, int val) 507 int idx, int val)
510{ 508{
511 if (!mem_cgroup_disabled()) 509 long x;
512 __this_cpu_add(memcg->stat->count[idx], val); 510
511 if (mem_cgroup_disabled())
512 return;
513
514 x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
515 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
516 atomic_long_add(x, &memcg->stat[idx]);
517 x = 0;
518 }
519 __this_cpu_write(memcg->stat_cpu->count[idx], x);
513} 520}
514 521
515/* idx can be of type enum memcg_stat_item or node_stat_item */ 522/* idx can be of type enum memcg_stat_item or node_stat_item */
516static inline void mod_memcg_state(struct mem_cgroup *memcg, 523static inline void mod_memcg_state(struct mem_cgroup *memcg,
517 int idx, int val) 524 int idx, int val)
518{ 525{
519 if (!mem_cgroup_disabled()) 526 preempt_disable();
520 this_cpu_add(memcg->stat->count[idx], val); 527 __mod_memcg_state(memcg, idx, val);
528 preempt_enable();
521} 529}
522 530
523/** 531/**
@@ -555,87 +563,108 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
555 enum node_stat_item idx) 563 enum node_stat_item idx)
556{ 564{
557 struct mem_cgroup_per_node *pn; 565 struct mem_cgroup_per_node *pn;
558 long val = 0; 566 long x;
559 int cpu;
560 567
561 if (mem_cgroup_disabled()) 568 if (mem_cgroup_disabled())
562 return node_page_state(lruvec_pgdat(lruvec), idx); 569 return node_page_state(lruvec_pgdat(lruvec), idx);
563 570
564 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 571 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
565 for_each_possible_cpu(cpu) 572 x = atomic_long_read(&pn->lruvec_stat[idx]);
566 val += per_cpu(pn->lruvec_stat->count[idx], cpu); 573#ifdef CONFIG_SMP
567 574 if (x < 0)
568 if (val < 0) 575 x = 0;
569 val = 0; 576#endif
570 577 return x;
571 return val;
572} 578}
573 579
574static inline void __mod_lruvec_state(struct lruvec *lruvec, 580static inline void __mod_lruvec_state(struct lruvec *lruvec,
575 enum node_stat_item idx, int val) 581 enum node_stat_item idx, int val)
576{ 582{
577 struct mem_cgroup_per_node *pn; 583 struct mem_cgroup_per_node *pn;
584 long x;
578 585
586 /* Update node */
579 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 587 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
588
580 if (mem_cgroup_disabled()) 589 if (mem_cgroup_disabled())
581 return; 590 return;
591
582 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 592 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
593
594 /* Update memcg */
583 __mod_memcg_state(pn->memcg, idx, val); 595 __mod_memcg_state(pn->memcg, idx, val);
584 __this_cpu_add(pn->lruvec_stat->count[idx], val); 596
597 /* Update lruvec */
598 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
599 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
600 atomic_long_add(x, &pn->lruvec_stat[idx]);
601 x = 0;
602 }
603 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
585} 604}
586 605
587static inline void mod_lruvec_state(struct lruvec *lruvec, 606static inline void mod_lruvec_state(struct lruvec *lruvec,
588 enum node_stat_item idx, int val) 607 enum node_stat_item idx, int val)
589{ 608{
590 struct mem_cgroup_per_node *pn; 609 preempt_disable();
591 610 __mod_lruvec_state(lruvec, idx, val);
592 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 611 preempt_enable();
593 if (mem_cgroup_disabled())
594 return;
595 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
596 mod_memcg_state(pn->memcg, idx, val);
597 this_cpu_add(pn->lruvec_stat->count[idx], val);
598} 612}
599 613
600static inline void __mod_lruvec_page_state(struct page *page, 614static inline void __mod_lruvec_page_state(struct page *page,
601 enum node_stat_item idx, int val) 615 enum node_stat_item idx, int val)
602{ 616{
603 struct mem_cgroup_per_node *pn; 617 pg_data_t *pgdat = page_pgdat(page);
618 struct lruvec *lruvec;
604 619
605 __mod_node_page_state(page_pgdat(page), idx, val); 620 /* Untracked pages have no memcg, no lruvec. Update only the node */
606 if (mem_cgroup_disabled() || !page->mem_cgroup) 621 if (!page->mem_cgroup) {
622 __mod_node_page_state(pgdat, idx, val);
607 return; 623 return;
608 __mod_memcg_state(page->mem_cgroup, idx, val); 624 }
609 pn = page->mem_cgroup->nodeinfo[page_to_nid(page)]; 625
610 __this_cpu_add(pn->lruvec_stat->count[idx], val); 626 lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
627 __mod_lruvec_state(lruvec, idx, val);
611} 628}
612 629
613static inline void mod_lruvec_page_state(struct page *page, 630static inline void mod_lruvec_page_state(struct page *page,
614 enum node_stat_item idx, int val) 631 enum node_stat_item idx, int val)
615{ 632{
616 struct mem_cgroup_per_node *pn; 633 preempt_disable();
617 634 __mod_lruvec_page_state(page, idx, val);
618 mod_node_page_state(page_pgdat(page), idx, val); 635 preempt_enable();
619 if (mem_cgroup_disabled() || !page->mem_cgroup)
620 return;
621 mod_memcg_state(page->mem_cgroup, idx, val);
622 pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
623 this_cpu_add(pn->lruvec_stat->count[idx], val);
624} 636}
625 637
626unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 638unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
627 gfp_t gfp_mask, 639 gfp_t gfp_mask,
628 unsigned long *total_scanned); 640 unsigned long *total_scanned);
629 641
642/* idx can be of type enum memcg_event_item or vm_event_item */
643static inline void __count_memcg_events(struct mem_cgroup *memcg,
644 int idx, unsigned long count)
645{
646 unsigned long x;
647
648 if (mem_cgroup_disabled())
649 return;
650
651 x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
652 if (unlikely(x > MEMCG_CHARGE_BATCH)) {
653 atomic_long_add(x, &memcg->events[idx]);
654 x = 0;
655 }
656 __this_cpu_write(memcg->stat_cpu->events[idx], x);
657}
658
630static inline void count_memcg_events(struct mem_cgroup *memcg, 659static inline void count_memcg_events(struct mem_cgroup *memcg,
631 enum vm_event_item idx, 660 int idx, unsigned long count)
632 unsigned long count)
633{ 661{
634 if (!mem_cgroup_disabled()) 662 preempt_disable();
635 this_cpu_add(memcg->stat->events[idx], count); 663 __count_memcg_events(memcg, idx, count);
664 preempt_enable();
636} 665}
637 666
638/* idx can be of type enum memcg_stat_item or node_stat_item */ 667/* idx can be of type enum memcg_event_item or vm_event_item */
639static inline void count_memcg_page_event(struct page *page, 668static inline void count_memcg_page_event(struct page *page,
640 int idx) 669 int idx)
641{ 670{
@@ -654,12 +683,20 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
654 rcu_read_lock(); 683 rcu_read_lock();
655 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 684 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
656 if (likely(memcg)) { 685 if (likely(memcg)) {
657 this_cpu_inc(memcg->stat->events[idx]); 686 count_memcg_events(memcg, idx, 1);
658 if (idx == OOM_KILL) 687 if (idx == OOM_KILL)
659 cgroup_file_notify(&memcg->events_file); 688 cgroup_file_notify(&memcg->events_file);
660 } 689 }
661 rcu_read_unlock(); 690 rcu_read_unlock();
662} 691}
692
693static inline void mem_cgroup_event(struct mem_cgroup *memcg,
694 enum memcg_event_item event)
695{
696 count_memcg_events(memcg, event, 1);
697 cgroup_file_notify(&memcg->events_file);
698}
699
663#ifdef CONFIG_TRANSPARENT_HUGEPAGE 700#ifdef CONFIG_TRANSPARENT_HUGEPAGE
664void mem_cgroup_split_huge_fixup(struct page *head); 701void mem_cgroup_split_huge_fixup(struct page *head);
665#endif 702#endif
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 58e110aee7ab..aba5f86eb038 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -13,6 +13,7 @@ struct pglist_data;
13struct mem_section; 13struct mem_section;
14struct memory_block; 14struct memory_block;
15struct resource; 15struct resource;
16struct vmem_altmap;
16 17
17#ifdef CONFIG_MEMORY_HOTPLUG 18#ifdef CONFIG_MEMORY_HOTPLUG
18/* 19/*
@@ -125,24 +126,26 @@ static inline bool movable_node_is_enabled(void)
125 126
126#ifdef CONFIG_MEMORY_HOTREMOVE 127#ifdef CONFIG_MEMORY_HOTREMOVE
127extern bool is_pageblock_removable_nolock(struct page *page); 128extern bool is_pageblock_removable_nolock(struct page *page);
128extern int arch_remove_memory(u64 start, u64 size); 129extern int arch_remove_memory(u64 start, u64 size,
130 struct vmem_altmap *altmap);
129extern int __remove_pages(struct zone *zone, unsigned long start_pfn, 131extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
130 unsigned long nr_pages); 132 unsigned long nr_pages, struct vmem_altmap *altmap);
131#endif /* CONFIG_MEMORY_HOTREMOVE */ 133#endif /* CONFIG_MEMORY_HOTREMOVE */
132 134
133/* reasonably generic interface to expand the physical pages */ 135/* reasonably generic interface to expand the physical pages */
134extern int __add_pages(int nid, unsigned long start_pfn, 136extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
135 unsigned long nr_pages, bool want_memblock); 137 struct vmem_altmap *altmap, bool want_memblock);
136 138
137#ifndef CONFIG_ARCH_HAS_ADD_PAGES 139#ifndef CONFIG_ARCH_HAS_ADD_PAGES
138static inline int add_pages(int nid, unsigned long start_pfn, 140static inline int add_pages(int nid, unsigned long start_pfn,
139 unsigned long nr_pages, bool want_memblock) 141 unsigned long nr_pages, struct vmem_altmap *altmap,
142 bool want_memblock)
140{ 143{
141 return __add_pages(nid, start_pfn, nr_pages, want_memblock); 144 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
142} 145}
143#else /* ARCH_HAS_ADD_PAGES */ 146#else /* ARCH_HAS_ADD_PAGES */
144int add_pages(int nid, unsigned long start_pfn, 147int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
145 unsigned long nr_pages, bool want_memblock); 148 struct vmem_altmap *altmap, bool want_memblock);
146#endif /* ARCH_HAS_ADD_PAGES */ 149#endif /* ARCH_HAS_ADD_PAGES */
147 150
148#ifdef CONFIG_NUMA 151#ifdef CONFIG_NUMA
@@ -318,15 +321,17 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
318 void *arg, int (*func)(struct memory_block *, void *)); 321 void *arg, int (*func)(struct memory_block *, void *));
319extern int add_memory(int nid, u64 start, u64 size); 322extern int add_memory(int nid, u64 start, u64 size);
320extern int add_memory_resource(int nid, struct resource *resource, bool online); 323extern int add_memory_resource(int nid, struct resource *resource, bool online);
321extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock); 324extern int arch_add_memory(int nid, u64 start, u64 size,
325 struct vmem_altmap *altmap, bool want_memblock);
322extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 326extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
323 unsigned long nr_pages); 327 unsigned long nr_pages, struct vmem_altmap *altmap);
324extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 328extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
325extern bool is_memblock_offlined(struct memory_block *mem); 329extern bool is_memblock_offlined(struct memory_block *mem);
326extern void remove_memory(int nid, u64 start, u64 size); 330extern void remove_memory(int nid, u64 start, u64 size);
327extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn); 331extern int sparse_add_one_section(struct pglist_data *pgdat,
332 unsigned long start_pfn, struct vmem_altmap *altmap);
328extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, 333extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
329 unsigned long map_offset); 334 unsigned long map_offset, struct vmem_altmap *altmap);
330extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 335extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
331 unsigned long pnum); 336 unsigned long pnum);
332extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, 337extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 10d23c367048..7b4899c06f49 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -26,18 +26,6 @@ struct vmem_altmap {
26 unsigned long alloc; 26 unsigned long alloc;
27}; 27};
28 28
29unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
30void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
31
32#ifdef CONFIG_ZONE_DEVICE
33struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start);
34#else
35static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
36{
37 return NULL;
38}
39#endif
40
41/* 29/*
42 * Specialize ZONE_DEVICE memory into multiple types each having differents 30 * Specialize ZONE_DEVICE memory into multiple types each having differents
43 * usage. 31 * usage.
@@ -125,8 +113,9 @@ typedef void (*dev_page_free_t)(struct page *page, void *data);
125struct dev_pagemap { 113struct dev_pagemap {
126 dev_page_fault_t page_fault; 114 dev_page_fault_t page_fault;
127 dev_page_free_t page_free; 115 dev_page_free_t page_free;
128 struct vmem_altmap *altmap; 116 struct vmem_altmap altmap;
129 const struct resource *res; 117 bool altmap_valid;
118 struct resource res;
130 struct percpu_ref *ref; 119 struct percpu_ref *ref;
131 struct device *dev; 120 struct device *dev;
132 void *data; 121 void *data;
@@ -134,15 +123,17 @@ struct dev_pagemap {
134}; 123};
135 124
136#ifdef CONFIG_ZONE_DEVICE 125#ifdef CONFIG_ZONE_DEVICE
137void *devm_memremap_pages(struct device *dev, struct resource *res, 126void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
138 struct percpu_ref *ref, struct vmem_altmap *altmap); 127struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
139struct dev_pagemap *find_dev_pagemap(resource_size_t phys); 128 struct dev_pagemap *pgmap);
129
130unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
131void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
140 132
141static inline bool is_zone_device_page(const struct page *page); 133static inline bool is_zone_device_page(const struct page *page);
142#else 134#else
143static inline void *devm_memremap_pages(struct device *dev, 135static inline void *devm_memremap_pages(struct device *dev,
144 struct resource *res, struct percpu_ref *ref, 136 struct dev_pagemap *pgmap)
145 struct vmem_altmap *altmap)
146{ 137{
147 /* 138 /*
148 * Fail attempts to call devm_memremap_pages() without 139 * Fail attempts to call devm_memremap_pages() without
@@ -153,11 +144,22 @@ static inline void *devm_memremap_pages(struct device *dev,
153 return ERR_PTR(-ENXIO); 144 return ERR_PTR(-ENXIO);
154} 145}
155 146
156static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys) 147static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
148 struct dev_pagemap *pgmap)
157{ 149{
158 return NULL; 150 return NULL;
159} 151}
160#endif 152
153static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
154{
155 return 0;
156}
157
158static inline void vmem_altmap_free(struct vmem_altmap *altmap,
159 unsigned long nr_pfns)
160{
161}
162#endif /* CONFIG_ZONE_DEVICE */
161 163
162#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC) 164#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
163static inline bool is_device_private_page(const struct page *page) 165static inline bool is_device_private_page(const struct page *page)
@@ -173,39 +175,6 @@ static inline bool is_device_public_page(const struct page *page)
173} 175}
174#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ 176#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
175 177
176/**
177 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
178 * @pfn: page frame number to lookup page_map
179 * @pgmap: optional known pgmap that already has a reference
180 *
181 * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
182 * same mapping.
183 */
184static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
185 struct dev_pagemap *pgmap)
186{
187 const struct resource *res = pgmap ? pgmap->res : NULL;
188 resource_size_t phys = PFN_PHYS(pfn);
189
190 /*
191 * In the cached case we're already holding a live reference so
192 * we can simply do a blind increment
193 */
194 if (res && phys >= res->start && phys <= res->end) {
195 percpu_ref_get(pgmap->ref);
196 return pgmap;
197 }
198
199 /* fall back to slow path lookup */
200 rcu_read_lock();
201 pgmap = find_dev_pagemap(phys);
202 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
203 pgmap = NULL;
204 rcu_read_unlock();
205
206 return pgmap;
207}
208
209static inline void put_dev_pagemap(struct dev_pagemap *pgmap) 178static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
210{ 179{
211 if (pgmap) 180 if (pgmap)
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 78dc85365c4f..080798f17ece 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -645,11 +645,6 @@ struct axp20x_dev {
645 const struct regmap_irq_chip *regmap_irq_chip; 645 const struct regmap_irq_chip *regmap_irq_chip;
646}; 646};
647 647
648struct axp288_extcon_pdata {
649 /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
650 struct gpio_desc *gpio_mux_cntl;
651};
652
653/* generic helper function for reading 9-16 bit wide regs */ 648/* generic helper function for reading 9-16 bit wide regs */
654static inline int axp20x_read_variable_width(struct regmap *regmap, 649static inline int axp20x_read_variable_width(struct regmap *regmap,
655 unsigned int reg, unsigned int width) 650 unsigned int reg, unsigned int width)
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 4e887ba22635..c61535979b8f 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -322,6 +322,10 @@ extern struct attribute_group cros_ec_attr_group;
322extern struct attribute_group cros_ec_lightbar_attr_group; 322extern struct attribute_group cros_ec_lightbar_attr_group;
323extern struct attribute_group cros_ec_vbc_attr_group; 323extern struct attribute_group cros_ec_vbc_attr_group;
324 324
325/* debugfs stuff */
326int cros_ec_debugfs_init(struct cros_ec_dev *ec);
327void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
328
325/* ACPI GPE handler */ 329/* ACPI GPE handler */
326#ifdef CONFIG_ACPI 330#ifdef CONFIG_ACPI
327 331
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 2b16e95b9bb8..2b96e630e3b6 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -291,6 +291,9 @@ enum host_event_code {
291 /* EC desires to change state of host-controlled USB mux */ 291 /* EC desires to change state of host-controlled USB mux */
292 EC_HOST_EVENT_USB_MUX = 28, 292 EC_HOST_EVENT_USB_MUX = 28,
293 293
294 /* EC RTC event occurred */
295 EC_HOST_EVENT_RTC = 26,
296
294 /* 297 /*
295 * The high bit of the event mask is not used as a host event code. If 298 * The high bit of the event mask is not used as a host event code. If
296 * it reads back as set, then the entire event mask should be 299 * it reads back as set, then the entire event mask should be
@@ -799,6 +802,8 @@ enum ec_feature_code {
799 EC_FEATURE_USB_MUX = 23, 802 EC_FEATURE_USB_MUX = 23,
800 /* Motion Sensor code has an internal software FIFO */ 803 /* Motion Sensor code has an internal software FIFO */
801 EC_FEATURE_MOTION_SENSE_FIFO = 24, 804 EC_FEATURE_MOTION_SENSE_FIFO = 24,
805 /* EC has RTC feature that can be controlled by host commands */
806 EC_FEATURE_RTC = 27,
802}; 807};
803 808
804#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) 809#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
@@ -1709,6 +1714,9 @@ struct ec_response_rtc {
1709#define EC_CMD_RTC_SET_VALUE 0x46 1714#define EC_CMD_RTC_SET_VALUE 0x46
1710#define EC_CMD_RTC_SET_ALARM 0x47 1715#define EC_CMD_RTC_SET_ALARM 0x47
1711 1716
1717/* Pass as param to SET_ALARM to clear the current alarm */
1718#define EC_RTC_ALARM_CLEAR 0
1719
1712/*****************************************************************************/ 1720/*****************************************************************************/
1713/* Port80 log access */ 1721/* Port80 log access */
1714 1722
@@ -2904,16 +2912,33 @@ enum usb_pd_control_mux {
2904 USB_PD_CTRL_MUX_AUTO = 5, 2912 USB_PD_CTRL_MUX_AUTO = 5,
2905}; 2913};
2906 2914
2915enum usb_pd_control_swap {
2916 USB_PD_CTRL_SWAP_NONE = 0,
2917 USB_PD_CTRL_SWAP_DATA = 1,
2918 USB_PD_CTRL_SWAP_POWER = 2,
2919 USB_PD_CTRL_SWAP_VCONN = 3,
2920 USB_PD_CTRL_SWAP_COUNT
2921};
2922
2907struct ec_params_usb_pd_control { 2923struct ec_params_usb_pd_control {
2908 uint8_t port; 2924 uint8_t port;
2909 uint8_t role; 2925 uint8_t role;
2910 uint8_t mux; 2926 uint8_t mux;
2927 uint8_t swap;
2911} __packed; 2928} __packed;
2912 2929
2913#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */ 2930#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */
2914#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */ 2931#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */
2915#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */ 2932#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */
2916 2933
2934#define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */
2935#define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */
2936#define PD_CTRL_RESP_ROLE_VCONN BIT(2) /* Vconn status */
2937#define PD_CTRL_RESP_ROLE_DR_POWER BIT(3) /* Partner is dualrole power */
2938#define PD_CTRL_RESP_ROLE_DR_DATA BIT(4) /* Partner is dualrole data */
2939#define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */
2940#define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */
2941
2917struct ec_response_usb_pd_control_v1 { 2942struct ec_response_usb_pd_control_v1 {
2918 uint8_t enabled; 2943 uint8_t enabled;
2919 uint8_t role; 2944 uint8_t role;
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 3c8568aa82a5..75e5c8ff85fc 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -3733,6 +3733,9 @@ enum usb_irq_events {
3733#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01 3733#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
3734#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00 3734#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
3735 3735
3736/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
3737#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC
3738
3736/* Registers for function RESOURCE */ 3739/* Registers for function RESOURCE */
3737#define TPS65917_REGEN1_CTRL 0x2 3740#define TPS65917_REGEN1_CTRL 0x2
3738#define TPS65917_PLLEN_CTRL 0x3 3741#define TPS65917_PLLEN_CTRL 0x3
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h
new file mode 100644
index 000000000000..796fb9794c9e
--- /dev/null
+++ b/include/linux/mfd/rave-sp.h
@@ -0,0 +1,60 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2
3/*
4 * Core definitions for RAVE SP MFD driver.
5 *
6 * Copyright (C) 2017 Zodiac Inflight Innovations
7 */
8
9#ifndef _LINUX_RAVE_SP_H_
10#define _LINUX_RAVE_SP_H_
11
12#include <linux/notifier.h>
13
14enum rave_sp_command {
15 RAVE_SP_CMD_GET_FIRMWARE_VERSION = 0x20,
16 RAVE_SP_CMD_GET_BOOTLOADER_VERSION = 0x21,
17 RAVE_SP_CMD_BOOT_SOURCE = 0x26,
18 RAVE_SP_CMD_GET_BOARD_COPPER_REV = 0x2B,
19 RAVE_SP_CMD_GET_GPIO_STATE = 0x2F,
20
21 RAVE_SP_CMD_STATUS = 0xA0,
22 RAVE_SP_CMD_SW_WDT = 0xA1,
23 RAVE_SP_CMD_PET_WDT = 0xA2,
24 RAVE_SP_CMD_RESET = 0xA7,
25 RAVE_SP_CMD_RESET_REASON = 0xA8,
26
27 RAVE_SP_CMD_REQ_COPPER_REV = 0xB6,
28 RAVE_SP_CMD_GET_I2C_DEVICE_STATUS = 0xBA,
29 RAVE_SP_CMD_GET_SP_SILICON_REV = 0xB9,
30 RAVE_SP_CMD_CONTROL_EVENTS = 0xBB,
31
32 RAVE_SP_EVNT_BASE = 0xE0,
33};
34
35struct rave_sp;
36
37static inline unsigned long rave_sp_action_pack(u8 event, u8 value)
38{
39 return ((unsigned long)value << 8) | event;
40}
41
42static inline u8 rave_sp_action_unpack_event(unsigned long action)
43{
44 return action;
45}
46
47static inline u8 rave_sp_action_unpack_value(unsigned long action)
48{
49 return action >> 8;
50}
51
52int rave_sp_exec(struct rave_sp *sp,
53 void *__data, size_t data_size,
54 void *reply_data, size_t reply_data_size);
55
56struct device;
57int devm_rave_sp_register_event_notifier(struct device *dev,
58 struct notifier_block *nb);
59
60#endif /* _LINUX_RAVE_SP_H_ */
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 77c7cf40d9b4..605f62264825 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -1,13 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * STM32 Low-Power Timer parent driver. 3 * STM32 Low-Power Timer parent driver.
3 *
4 * Copyright (C) STMicroelectronics 2017 4 * Copyright (C) STMicroelectronics 2017
5 *
6 * Author: Fabrice Gasnier <fabrice.gasnier@st.com> 5 * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
7 *
8 * Inspired by Benjamin Gaignard's stm32-timers driver 6 * Inspired by Benjamin Gaignard's stm32-timers driver
9 *
10 * License terms: GNU General Public License (GPL), version 2
11 */ 7 */
12 8
13#ifndef _LINUX_STM32_LPTIMER_H_ 9#ifndef _LINUX_STM32_LPTIMER_H_
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index ce7346e7f77a..2aadab6f34a1 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -1,9 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright (C) STMicroelectronics 2016 3 * Copyright (C) STMicroelectronics 2016
3 *
4 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> 4 * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
5 *
6 * License terms: GNU General Public License (GPL), version 2
7 */ 5 */
8 6
9#ifndef _LINUX_STM32_GPTIMER_H_ 7#ifndef _LINUX_STM32_GPTIMER_H_
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index e1cfe9194129..396a103c8bc6 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -25,26 +25,6 @@
25 writew((val) >> 16, (addr) + 2); \ 25 writew((val) >> 16, (addr) + 2); \
26 } while (0) 26 } while (0)
27 27
28#define CNF_CMD 0x04
29#define CNF_CTL_BASE 0x10
30#define CNF_INT_PIN 0x3d
31#define CNF_STOP_CLK_CTL 0x40
32#define CNF_GCLK_CTL 0x41
33#define CNF_SD_CLK_MODE 0x42
34#define CNF_PIN_STATUS 0x44
35#define CNF_PWR_CTL_1 0x48
36#define CNF_PWR_CTL_2 0x49
37#define CNF_PWR_CTL_3 0x4a
38#define CNF_CARD_DETECT_MODE 0x4c
39#define CNF_SD_SLOT 0x50
40#define CNF_EXT_GCLK_CTL_1 0xf0
41#define CNF_EXT_GCLK_CTL_2 0xf1
42#define CNF_EXT_GCLK_CTL_3 0xf9
43#define CNF_SD_LED_EN_1 0xfa
44#define CNF_SD_LED_EN_2 0xfe
45
46#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
47
48#define sd_config_write8(base, shift, reg, val) \ 28#define sd_config_write8(base, shift, reg, val) \
49 tmio_iowrite8((val), (base) + ((reg) << (shift))) 29 tmio_iowrite8((val), (base) + ((reg) << (shift)))
50#define sd_config_write16(base, shift, reg, val) \ 30#define sd_config_write16(base, shift, reg, val) \
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 409ffb14298a..e5258ee4e38b 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -79,6 +79,11 @@
79 << __mlx5_dw_bit_off(typ, fld))); \ 79 << __mlx5_dw_bit_off(typ, fld))); \
80} while (0) 80} while (0)
81 81
82#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
83 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
84 MLX5_SET(typ, p, fld[idx], v); \
85} while (0)
86
82#define MLX5_SET_TO_ONES(typ, p, fld) do { \ 87#define MLX5_SET_TO_ONES(typ, p, fld) do { \
83 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 88 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
84 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 89 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
@@ -244,6 +249,8 @@ enum {
244 MLX5_NON_FP_BFREGS_PER_UAR, 249 MLX5_NON_FP_BFREGS_PER_UAR,
245 MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, 250 MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
246 MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, 251 MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
252 MLX5_MIN_DYN_BFREGS = 512,
253 MLX5_MAX_DYN_BFREGS = 1024,
247}; 254};
248 255
249enum { 256enum {
@@ -284,6 +291,7 @@ enum {
284 MLX5_EVENT_QUEUE_TYPE_QP = 0, 291 MLX5_EVENT_QUEUE_TYPE_QP = 0,
285 MLX5_EVENT_QUEUE_TYPE_RQ = 1, 292 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
286 MLX5_EVENT_QUEUE_TYPE_SQ = 2, 293 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
294 MLX5_EVENT_QUEUE_TYPE_DCT = 6,
287}; 295};
288 296
289enum mlx5_event { 297enum mlx5_event {
@@ -319,6 +327,8 @@ enum mlx5_event {
319 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, 327 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
320 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, 328 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
321 329
330 MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
331
322 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, 332 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
323}; 333};
324 334
@@ -611,6 +621,11 @@ struct mlx5_eqe_pps {
611 u8 rsvd2[12]; 621 u8 rsvd2[12];
612} __packed; 622} __packed;
613 623
624struct mlx5_eqe_dct {
625 __be32 reserved[6];
626 __be32 dctn;
627};
628
614union ev_data { 629union ev_data {
615 __be32 raw[7]; 630 __be32 raw[7];
616 struct mlx5_eqe_cmd cmd; 631 struct mlx5_eqe_cmd cmd;
@@ -626,6 +641,7 @@ union ev_data {
626 struct mlx5_eqe_vport_change vport_change; 641 struct mlx5_eqe_vport_change vport_change;
627 struct mlx5_eqe_port_module port_module; 642 struct mlx5_eqe_port_module port_module;
628 struct mlx5_eqe_pps pps; 643 struct mlx5_eqe_pps pps;
644 struct mlx5_eqe_dct dct;
629} __packed; 645} __packed;
630 646
631struct mlx5_eqe { 647struct mlx5_eqe {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a0610427e168..6ed79a8a8318 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -155,6 +155,13 @@ enum mlx5_dcbx_oper_mode {
155 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, 155 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
156}; 156};
157 157
158enum mlx5_dct_atomic_mode {
159 MLX5_ATOMIC_MODE_DCT_OFF = 20,
160 MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
161 MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
162 MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
163};
164
158enum { 165enum {
159 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, 166 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
160 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, 167 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
@@ -231,6 +238,9 @@ struct mlx5_bfreg_info {
231 u32 ver; 238 u32 ver;
232 bool lib_uar_4k; 239 bool lib_uar_4k;
233 u32 num_sys_pages; 240 u32 num_sys_pages;
241 u32 num_static_sys_pages;
242 u32 total_num_bfregs;
243 u32 num_dyn_bfregs;
234}; 244};
235 245
236struct mlx5_cmd_first { 246struct mlx5_cmd_first {
@@ -430,6 +440,7 @@ enum mlx5_res_type {
430 MLX5_RES_SRQ = 3, 440 MLX5_RES_SRQ = 3,
431 MLX5_RES_XSRQ = 4, 441 MLX5_RES_XSRQ = 4,
432 MLX5_RES_XRQ = 5, 442 MLX5_RES_XRQ = 5,
443 MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
433}; 444};
434 445
435struct mlx5_core_rsc_common { 446struct mlx5_core_rsc_common {
@@ -788,6 +799,7 @@ struct mlx5_clock {
788 u32 nominal_c_mult; 799 u32 nominal_c_mult;
789 unsigned long overflow_period; 800 unsigned long overflow_period;
790 struct delayed_work overflow_work; 801 struct delayed_work overflow_work;
802 struct mlx5_core_dev *mdev;
791 struct ptp_clock *ptp; 803 struct ptp_clock *ptp;
792 struct ptp_clock_info ptp_info; 804 struct ptp_clock_info ptp_info;
793 struct mlx5_pps pps_info; 805 struct mlx5_pps pps_info;
@@ -826,7 +838,7 @@ struct mlx5_core_dev {
826 struct mlx5e_resources mlx5e_res; 838 struct mlx5e_resources mlx5e_res;
827 struct { 839 struct {
828 struct mlx5_rsvd_gids reserved_gids; 840 struct mlx5_rsvd_gids reserved_gids;
829 atomic_t roce_en; 841 u32 roce_en;
830 } roce; 842 } roce;
831#ifdef CONFIG_MLX5_FPGA 843#ifdef CONFIG_MLX5_FPGA
832 struct mlx5_fpga_device *fpga; 844 struct mlx5_fpga_device *fpga;
@@ -835,6 +847,8 @@ struct mlx5_core_dev {
835 struct cpu_rmap *rmap; 847 struct cpu_rmap *rmap;
836#endif 848#endif
837 struct mlx5_clock clock; 849 struct mlx5_clock clock;
850 struct mlx5_ib_clock_info *clock_info;
851 struct page *clock_info_page;
838}; 852};
839 853
840struct mlx5_db { 854struct mlx5_db {
@@ -1103,7 +1117,7 @@ void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1103unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); 1117unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
1104int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, 1118int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
1105 u8 roce_version, u8 roce_l3_type, const u8 *gid, 1119 u8 roce_version, u8 roce_l3_type, const u8 *gid,
1106 const u8 *mac, bool vlan, u16 vlan_id); 1120 const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
1107 1121
1108static inline int fw_initializing(struct mlx5_core_dev *dev) 1122static inline int fw_initializing(struct mlx5_core_dev *dev)
1109{ 1123{
@@ -1225,6 +1239,31 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1225 return !!(dev->priv.rl_table.max_size); 1239 return !!(dev->priv.rl_table.max_size);
1226} 1240}
1227 1241
1242static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
1243{
1244 return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
1245 MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
1246}
1247
1248static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
1249{
1250 return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
1251}
1252
1253static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
1254{
1255 return mlx5_core_is_mp_slave(dev) ||
1256 mlx5_core_is_mp_master(dev);
1257}
1258
1259static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
1260{
1261 if (!mlx5_core_mp_enabled(dev))
1262 return 1;
1263
1264 return MLX5_CAP_GEN(dev, native_port_num);
1265}
1266
1228enum { 1267enum {
1229 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, 1268 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1230}; 1269};
@@ -1238,7 +1277,7 @@ mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
1238 int eqn; 1277 int eqn;
1239 int err; 1278 int err;
1240 1279
1241 err = mlx5_vector2eqn(dev, vector, &eqn, &irq); 1280 err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
1242 if (err) 1281 if (err)
1243 return NULL; 1282 return NULL;
1244 1283
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b25e7baa273e..a0b48afcb422 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -95,6 +95,10 @@ struct mlx5_flow_destination {
95struct mlx5_flow_namespace * 95struct mlx5_flow_namespace *
96mlx5_get_flow_namespace(struct mlx5_core_dev *dev, 96mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
97 enum mlx5_flow_namespace_type type); 97 enum mlx5_flow_namespace_type type);
98struct mlx5_flow_namespace *
99mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
100 enum mlx5_flow_namespace_type type,
101 int vport);
98 102
99struct mlx5_flow_table * 103struct mlx5_flow_table *
100mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, 104mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 1391a82da98e..f4e417686f62 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -502,7 +502,7 @@ struct mlx5_ifc_ads_bits {
502 u8 dei_cfi[0x1]; 502 u8 dei_cfi[0x1];
503 u8 eth_prio[0x3]; 503 u8 eth_prio[0x3];
504 u8 sl[0x4]; 504 u8 sl[0x4];
505 u8 port[0x8]; 505 u8 vhca_port_num[0x8];
506 u8 rmac_47_32[0x10]; 506 u8 rmac_47_32[0x10];
507 507
508 u8 rmac_31_0[0x20]; 508 u8 rmac_31_0[0x20];
@@ -794,7 +794,10 @@ enum {
794}; 794};
795 795
796struct mlx5_ifc_cmd_hca_cap_bits { 796struct mlx5_ifc_cmd_hca_cap_bits {
797 u8 reserved_at_0[0x80]; 797 u8 reserved_at_0[0x30];
798 u8 vhca_id[0x10];
799
800 u8 reserved_at_40[0x40];
798 801
799 u8 log_max_srq_sz[0x8]; 802 u8 log_max_srq_sz[0x8];
800 u8 log_max_qp_sz[0x8]; 803 u8 log_max_qp_sz[0x8];
@@ -1023,13 +1026,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
1023 u8 reserved_at_3b8[0x3]; 1026 u8 reserved_at_3b8[0x3];
1024 u8 log_min_stride_sz_sq[0x5]; 1027 u8 log_min_stride_sz_sq[0x5];
1025 1028
1026 u8 reserved_at_3c0[0x1b]; 1029 u8 hairpin[0x1];
1030 u8 reserved_at_3c1[0x2];
1031 u8 log_max_hairpin_queues[0x5];
1032 u8 reserved_at_3c8[0x3];
1033 u8 log_max_hairpin_wq_data_sz[0x5];
1034 u8 reserved_at_3d0[0x3];
1035 u8 log_max_hairpin_num_packets[0x5];
1036 u8 reserved_at_3d8[0x3];
1027 u8 log_max_wq_sz[0x5]; 1037 u8 log_max_wq_sz[0x5];
1028 1038
1029 u8 nic_vport_change_event[0x1]; 1039 u8 nic_vport_change_event[0x1];
1030 u8 disable_local_lb_uc[0x1]; 1040 u8 disable_local_lb_uc[0x1];
1031 u8 disable_local_lb_mc[0x1]; 1041 u8 disable_local_lb_mc[0x1];
1032 u8 reserved_at_3e3[0x8]; 1042 u8 log_min_hairpin_wq_data_sz[0x5];
1043 u8 reserved_at_3e8[0x3];
1033 u8 log_max_vlan_list[0x5]; 1044 u8 log_max_vlan_list[0x5];
1034 u8 reserved_at_3f0[0x3]; 1045 u8 reserved_at_3f0[0x3];
1035 u8 log_max_current_mc_list[0x5]; 1046 u8 log_max_current_mc_list[0x5];
@@ -1067,7 +1078,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
1067 u8 reserved_at_5f8[0x3]; 1078 u8 reserved_at_5f8[0x3];
1068 u8 log_max_xrq[0x5]; 1079 u8 log_max_xrq[0x5];
1069 1080
1070 u8 reserved_at_600[0x200]; 1081 u8 affiliate_nic_vport_criteria[0x8];
1082 u8 native_port_num[0x8];
1083 u8 num_vhca_ports[0x8];
1084 u8 reserved_at_618[0x6];
1085 u8 sw_owner_id[0x1];
1086 u8 reserved_at_61f[0x1e1];
1071}; 1087};
1072 1088
1073enum mlx5_flow_destination_type { 1089enum mlx5_flow_destination_type {
@@ -1163,7 +1179,12 @@ struct mlx5_ifc_wq_bits {
1163 u8 reserved_at_118[0x3]; 1179 u8 reserved_at_118[0x3];
1164 u8 log_wq_sz[0x5]; 1180 u8 log_wq_sz[0x5];
1165 1181
1166 u8 reserved_at_120[0x15]; 1182 u8 reserved_at_120[0x3];
1183 u8 log_hairpin_num_packets[0x5];
1184 u8 reserved_at_128[0x3];
1185 u8 log_hairpin_data_sz[0x5];
1186 u8 reserved_at_130[0x5];
1187
1167 u8 log_wqe_num_of_strides[0x3]; 1188 u8 log_wqe_num_of_strides[0x3];
1168 u8 two_byte_shift_en[0x1]; 1189 u8 two_byte_shift_en[0x1];
1169 u8 reserved_at_139[0x4]; 1190 u8 reserved_at_139[0x4];
@@ -2483,7 +2504,8 @@ struct mlx5_ifc_sqc_bits {
2483 u8 state[0x4]; 2504 u8 state[0x4];
2484 u8 reg_umr[0x1]; 2505 u8 reg_umr[0x1];
2485 u8 allow_swp[0x1]; 2506 u8 allow_swp[0x1];
2486 u8 reserved_at_e[0x12]; 2507 u8 hairpin[0x1];
2508 u8 reserved_at_f[0x11];
2487 2509
2488 u8 reserved_at_20[0x8]; 2510 u8 reserved_at_20[0x8];
2489 u8 user_index[0x18]; 2511 u8 user_index[0x18];
@@ -2491,7 +2513,13 @@ struct mlx5_ifc_sqc_bits {
2491 u8 reserved_at_40[0x8]; 2513 u8 reserved_at_40[0x8];
2492 u8 cqn[0x18]; 2514 u8 cqn[0x18];
2493 2515
2494 u8 reserved_at_60[0x90]; 2516 u8 reserved_at_60[0x8];
2517 u8 hairpin_peer_rq[0x18];
2518
2519 u8 reserved_at_80[0x10];
2520 u8 hairpin_peer_vhca[0x10];
2521
2522 u8 reserved_at_a0[0x50];
2495 2523
2496 u8 packet_pacing_rate_limit_index[0x10]; 2524 u8 packet_pacing_rate_limit_index[0x10];
2497 u8 tis_lst_sz[0x10]; 2525 u8 tis_lst_sz[0x10];
@@ -2563,7 +2591,8 @@ struct mlx5_ifc_rqc_bits {
2563 u8 state[0x4]; 2591 u8 state[0x4];
2564 u8 reserved_at_c[0x1]; 2592 u8 reserved_at_c[0x1];
2565 u8 flush_in_error_en[0x1]; 2593 u8 flush_in_error_en[0x1];
2566 u8 reserved_at_e[0x12]; 2594 u8 hairpin[0x1];
2595 u8 reserved_at_f[0x11];
2567 2596
2568 u8 reserved_at_20[0x8]; 2597 u8 reserved_at_20[0x8];
2569 u8 user_index[0x18]; 2598 u8 user_index[0x18];
@@ -2577,7 +2606,13 @@ struct mlx5_ifc_rqc_bits {
2577 u8 reserved_at_80[0x8]; 2606 u8 reserved_at_80[0x8];
2578 u8 rmpn[0x18]; 2607 u8 rmpn[0x18];
2579 2608
2580 u8 reserved_at_a0[0xe0]; 2609 u8 reserved_at_a0[0x8];
2610 u8 hairpin_peer_sq[0x18];
2611
2612 u8 reserved_at_c0[0x10];
2613 u8 hairpin_peer_vhca[0x10];
2614
2615 u8 reserved_at_e0[0xa0];
2581 2616
2582 struct mlx5_ifc_wq_bits wq; 2617 struct mlx5_ifc_wq_bits wq;
2583}; 2618};
@@ -2616,7 +2651,12 @@ struct mlx5_ifc_nic_vport_context_bits {
2616 u8 event_on_mc_address_change[0x1]; 2651 u8 event_on_mc_address_change[0x1];
2617 u8 event_on_uc_address_change[0x1]; 2652 u8 event_on_uc_address_change[0x1];
2618 2653
2619 u8 reserved_at_40[0xf0]; 2654 u8 reserved_at_40[0xc];
2655
2656 u8 affiliation_criteria[0x4];
2657 u8 affiliated_vhca_id[0x10];
2658
2659 u8 reserved_at_60[0xd0];
2620 2660
2621 u8 mtu[0x10]; 2661 u8 mtu[0x10];
2622 2662
@@ -3259,7 +3299,8 @@ struct mlx5_ifc_set_roce_address_in_bits {
3259 u8 op_mod[0x10]; 3299 u8 op_mod[0x10];
3260 3300
3261 u8 roce_address_index[0x10]; 3301 u8 roce_address_index[0x10];
3262 u8 reserved_at_50[0x10]; 3302 u8 reserved_at_50[0xc];
3303 u8 vhca_port_num[0x4];
3263 3304
3264 u8 reserved_at_60[0x20]; 3305 u8 reserved_at_60[0x20];
3265 3306
@@ -3879,7 +3920,8 @@ struct mlx5_ifc_query_roce_address_in_bits {
3879 u8 op_mod[0x10]; 3920 u8 op_mod[0x10];
3880 3921
3881 u8 roce_address_index[0x10]; 3922 u8 roce_address_index[0x10];
3882 u8 reserved_at_50[0x10]; 3923 u8 reserved_at_50[0xc];
3924 u8 vhca_port_num[0x4];
3883 3925
3884 u8 reserved_at_60[0x20]; 3926 u8 reserved_at_60[0x20];
3885}; 3927};
@@ -5311,7 +5353,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
5311}; 5353};
5312 5354
5313struct mlx5_ifc_modify_nic_vport_field_select_bits { 5355struct mlx5_ifc_modify_nic_vport_field_select_bits {
5314 u8 reserved_at_0[0x14]; 5356 u8 reserved_at_0[0x12];
5357 u8 affiliation[0x1];
5358 u8 reserved_at_e[0x1];
5315 u8 disable_uc_local_lb[0x1]; 5359 u8 disable_uc_local_lb[0x1];
5316 u8 disable_mc_local_lb[0x1]; 5360 u8 disable_mc_local_lb[0x1];
5317 u8 node_guid[0x1]; 5361 u8 node_guid[0x1];
@@ -5532,6 +5576,7 @@ struct mlx5_ifc_init_hca_in_bits {
5532 u8 op_mod[0x10]; 5576 u8 op_mod[0x10];
5533 5577
5534 u8 reserved_at_40[0x40]; 5578 u8 reserved_at_40[0x40];
5579 u8 sw_owner_id[4][0x20];
5535}; 5580};
5536 5581
5537struct mlx5_ifc_init2rtr_qp_out_bits { 5582struct mlx5_ifc_init2rtr_qp_out_bits {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 62af7512dabb..4778d41085d4 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -473,6 +473,11 @@ struct mlx5_core_qp {
473 int pid; 473 int pid;
474}; 474};
475 475
476struct mlx5_core_dct {
477 struct mlx5_core_qp mqp;
478 struct completion drained;
479};
480
476struct mlx5_qp_path { 481struct mlx5_qp_path {
477 u8 fl_free_ar; 482 u8 fl_free_ar;
478 u8 rsvd3; 483 u8 rsvd3;
@@ -549,6 +554,9 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
549 return radix_tree_lookup(&dev->priv.mkey_table.tree, key); 554 return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
550} 555}
551 556
557int mlx5_core_create_dct(struct mlx5_core_dev *dev,
558 struct mlx5_core_dct *qp,
559 u32 *in, int inlen);
552int mlx5_core_create_qp(struct mlx5_core_dev *dev, 560int mlx5_core_create_qp(struct mlx5_core_dev *dev,
553 struct mlx5_core_qp *qp, 561 struct mlx5_core_qp *qp,
554 u32 *in, 562 u32 *in,
@@ -558,8 +566,12 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
558 struct mlx5_core_qp *qp); 566 struct mlx5_core_qp *qp);
559int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, 567int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
560 struct mlx5_core_qp *qp); 568 struct mlx5_core_qp *qp);
569int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
570 struct mlx5_core_dct *dct);
561int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, 571int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
562 u32 *out, int outlen); 572 u32 *out, int outlen);
573int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
574 u32 *out, int outlen);
563 575
564int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, 576int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
565 u32 timeout_usec); 577 u32 timeout_usec);
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 88441f5ece25..7e8f281f8c00 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -75,4 +75,27 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
75 int inlen); 75 int inlen);
76void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); 76void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
77 77
78struct mlx5_hairpin_params {
79 u8 log_data_size;
80 u8 log_num_packets;
81 u16 q_counter;
82 int num_channels;
83};
84
85struct mlx5_hairpin {
86 struct mlx5_core_dev *func_mdev;
87 struct mlx5_core_dev *peer_mdev;
88
89 int num_channels;
90
91 u32 *rqn;
92 u32 *sqn;
93};
94
95struct mlx5_hairpin *
96mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
97 struct mlx5_core_dev *peer_mdev,
98 struct mlx5_hairpin_params *params);
99
100void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
78#endif /* __TRANSOBJ_H__ */ 101#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index aaa0bb9e7655..64e193e87394 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -116,4 +116,8 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
116 struct mlx5_hca_vport_context *req); 116 struct mlx5_hca_vport_context *req);
117int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable); 117int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
118int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status); 118int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
119
120int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
121 struct mlx5_core_dev *port_mdev);
122int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
119#endif /* __MLX5_VPORT_H__ */ 123#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ea818ff739cd..ad06d42adb1a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1312,8 +1312,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1312 unsigned long end, unsigned long floor, unsigned long ceiling); 1312 unsigned long end, unsigned long floor, unsigned long ceiling);
1313int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 1313int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1314 struct vm_area_struct *vma); 1314 struct vm_area_struct *vma);
1315void unmap_mapping_range(struct address_space *mapping,
1316 loff_t const holebegin, loff_t const holelen, int even_cows);
1317int follow_pte_pmd(struct mm_struct *mm, unsigned long address, 1315int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1318 unsigned long *start, unsigned long *end, 1316 unsigned long *start, unsigned long *end,
1319 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); 1317 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
@@ -1324,12 +1322,6 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address,
1324int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 1322int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1325 void *buf, int len, int write); 1323 void *buf, int len, int write);
1326 1324
1327static inline void unmap_shared_mapping_range(struct address_space *mapping,
1328 loff_t const holebegin, loff_t const holelen)
1329{
1330 unmap_mapping_range(mapping, holebegin, holelen, 0);
1331}
1332
1333extern void truncate_pagecache(struct inode *inode, loff_t new); 1325extern void truncate_pagecache(struct inode *inode, loff_t new);
1334extern void truncate_setsize(struct inode *inode, loff_t newsize); 1326extern void truncate_setsize(struct inode *inode, loff_t newsize);
1335void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 1327void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -1344,6 +1336,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
1344extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 1336extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1345 unsigned long address, unsigned int fault_flags, 1337 unsigned long address, unsigned int fault_flags,
1346 bool *unlocked); 1338 bool *unlocked);
1339void unmap_mapping_pages(struct address_space *mapping,
1340 pgoff_t start, pgoff_t nr, bool even_cows);
1341void unmap_mapping_range(struct address_space *mapping,
1342 loff_t const holebegin, loff_t const holelen, int even_cows);
1347#else 1343#else
1348static inline int handle_mm_fault(struct vm_area_struct *vma, 1344static inline int handle_mm_fault(struct vm_area_struct *vma,
1349 unsigned long address, unsigned int flags) 1345 unsigned long address, unsigned int flags)
@@ -1360,10 +1356,20 @@ static inline int fixup_user_fault(struct task_struct *tsk,
1360 BUG(); 1356 BUG();
1361 return -EFAULT; 1357 return -EFAULT;
1362} 1358}
1359static inline void unmap_mapping_pages(struct address_space *mapping,
1360 pgoff_t start, pgoff_t nr, bool even_cows) { }
1361static inline void unmap_mapping_range(struct address_space *mapping,
1362 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1363#endif 1363#endif
1364 1364
1365extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, 1365static inline void unmap_shared_mapping_range(struct address_space *mapping,
1366 unsigned int gup_flags); 1366 loff_t const holebegin, loff_t const holelen)
1367{
1368 unmap_mapping_range(mapping, holebegin, holelen, 0);
1369}
1370
1371extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1372 void *buf, int len, unsigned int gup_flags);
1367extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1373extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1368 void *buf, int len, unsigned int gup_flags); 1374 void *buf, int len, unsigned int gup_flags);
1369extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 1375extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
@@ -2069,8 +2075,8 @@ static inline void zero_resv_unavail(void) {}
2069#endif 2075#endif
2070 2076
2071extern void set_dma_reserve(unsigned long new_dma_reserve); 2077extern void set_dma_reserve(unsigned long new_dma_reserve);
2072extern void memmap_init_zone(unsigned long, int, unsigned long, 2078extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2073 unsigned long, enum memmap_context); 2079 enum memmap_context, struct vmem_altmap *);
2074extern void setup_per_zone_wmarks(void); 2080extern void setup_per_zone_wmarks(void);
2075extern int __meminit init_per_zone_wmark_min(void); 2081extern int __meminit init_per_zone_wmark_min(void);
2076extern void mem_init(void); 2082extern void mem_init(void);
@@ -2538,7 +2544,8 @@ void sparse_mem_maps_populate_node(struct page **map_map,
2538 unsigned long map_count, 2544 unsigned long map_count,
2539 int nodeid); 2545 int nodeid);
2540 2546
2541struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 2547struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
2548 struct vmem_altmap *altmap);
2542pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 2549pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2543p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); 2550p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2544pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); 2551pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
@@ -2546,20 +2553,17 @@ pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2546pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 2553pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2547void *vmemmap_alloc_block(unsigned long size, int node); 2554void *vmemmap_alloc_block(unsigned long size, int node);
2548struct vmem_altmap; 2555struct vmem_altmap;
2549void *__vmemmap_alloc_block_buf(unsigned long size, int node, 2556void *vmemmap_alloc_block_buf(unsigned long size, int node);
2550 struct vmem_altmap *altmap); 2557void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
2551static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
2552{
2553 return __vmemmap_alloc_block_buf(size, node, NULL);
2554}
2555
2556void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 2558void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2557int vmemmap_populate_basepages(unsigned long start, unsigned long end, 2559int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2558 int node); 2560 int node);
2559int vmemmap_populate(unsigned long start, unsigned long end, int node); 2561int vmemmap_populate(unsigned long start, unsigned long end, int node,
2562 struct vmem_altmap *altmap);
2560void vmemmap_populate_print_last(void); 2563void vmemmap_populate_print_last(void);
2561#ifdef CONFIG_MEMORY_HOTPLUG 2564#ifdef CONFIG_MEMORY_HOTPLUG
2562void vmemmap_free(unsigned long start, unsigned long end); 2565void vmemmap_free(unsigned long start, unsigned long end,
2566 struct vmem_altmap *altmap);
2563#endif 2567#endif
2564void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 2568void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2565 unsigned long nr_pages); 2569 unsigned long nr_pages);
@@ -2570,8 +2574,8 @@ enum mf_flags {
2570 MF_MUST_KILL = 1 << 2, 2574 MF_MUST_KILL = 1 << 2,
2571 MF_SOFT_OFFLINE = 1 << 3, 2575 MF_SOFT_OFFLINE = 1 << 3,
2572}; 2576};
2573extern int memory_failure(unsigned long pfn, int trapno, int flags); 2577extern int memory_failure(unsigned long pfn, int flags);
2574extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 2578extern void memory_failure_queue(unsigned long pfn, int flags);
2575extern int unpoison_memory(unsigned long pfn); 2579extern int unpoison_memory(unsigned long pfn);
2576extern int get_hwpoison_page(struct page *page); 2580extern int get_hwpoison_page(struct page *page);
2577#define put_hwpoison_page(page) put_page(page) 2581#define put_hwpoison_page(page) put_page(page)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cfd0ac4e5e0e..fd1af6b9591d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -31,28 +31,56 @@ struct hmm;
31 * it to keep track of whatever it is we are using the page for at the 31 * it to keep track of whatever it is we are using the page for at the
32 * moment. Note that we have no way to track which tasks are using 32 * moment. Note that we have no way to track which tasks are using
33 * a page, though if it is a pagecache page, rmap structures can tell us 33 * a page, though if it is a pagecache page, rmap structures can tell us
34 * who is mapping it. 34 * who is mapping it. If you allocate the page using alloc_pages(), you
35 * can use some of the space in struct page for your own purposes.
35 * 36 *
36 * The objects in struct page are organized in double word blocks in 37 * Pages that were once in the page cache may be found under the RCU lock
37 * order to allows us to use atomic double word operations on portions 38 * even after they have been recycled to a different purpose. The page
38 * of struct page. That is currently only used by slub but the arrangement 39 * cache reads and writes some of the fields in struct page to pin the
39 * allows the use of atomic double word operations on the flags/mapping 40 * page before checking that it's still in the page cache. It is vital
40 * and lru list pointers also. 41 * that all users of struct page:
42 * 1. Use the first word as PageFlags.
43 * 2. Clear or preserve bit 0 of page->compound_head. It is used as
44 * PageTail for compound pages, and the page cache must not see false
45 * positives. Some users put a pointer here (guaranteed to be at least
46 * 4-byte aligned), other users avoid using the field altogether.
47 * 3. page->_refcount must either not be used, or must be used in such a
48 * way that other CPUs temporarily incrementing and then decrementing the
49 * refcount does not cause problems. On receiving the page from
50 * alloc_pages(), the refcount will be positive.
51 * 4. Either preserve page->_mapcount or restore it to -1 before freeing it.
52 *
53 * If you allocate pages of order > 0, you can use the fields in the struct
54 * page associated with each page, but bear in mind that the pages may have
55 * been inserted individually into the page cache, so you must use the above
56 * four fields in a compatible way for each struct page.
57 *
58 * SLUB uses cmpxchg_double() to atomically update its freelist and
59 * counters. That requires that freelist & counters be adjacent and
60 * double-word aligned. We align all struct pages to double-word
61 * boundaries, and ensure that 'freelist' is aligned within the
62 * struct.
41 */ 63 */
64#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
65#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
66#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
67#define _slub_counter_t unsigned long
68#else
69#define _slub_counter_t unsigned int
70#endif
71#else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
72#define _struct_page_alignment
73#define _slub_counter_t unsigned int
74#endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
75
42struct page { 76struct page {
43 /* First double word block */ 77 /* First double word block */
44 unsigned long flags; /* Atomic flags, some possibly 78 unsigned long flags; /* Atomic flags, some possibly
45 * updated asynchronously */ 79 * updated asynchronously */
46 union { 80 union {
47 struct address_space *mapping; /* If low bit clear, points to 81 /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
48 * inode address_space, or NULL. 82 struct address_space *mapping;
49 * If page mapped as anonymous 83
50 * memory, low bit is set, and
51 * it points to anon_vma object
52 * or KSM private structure. See
53 * PAGE_MAPPING_ANON and
54 * PAGE_MAPPING_KSM.
55 */
56 void *s_mem; /* slab first object */ 84 void *s_mem; /* slab first object */
57 atomic_t compound_mapcount; /* first tail page */ 85 atomic_t compound_mapcount; /* first tail page */
58 /* page_deferred_list().next -- second tail page */ 86 /* page_deferred_list().next -- second tail page */
@@ -66,40 +94,27 @@ struct page {
66 }; 94 };
67 95
68 union { 96 union {
69#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 97 _slub_counter_t counters;
70 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 98 unsigned int active; /* SLAB */
71 /* Used for cmpxchg_double in slub */ 99 struct { /* SLUB */
72 unsigned long counters; 100 unsigned inuse:16;
73#else 101 unsigned objects:15;
74 /* 102 unsigned frozen:1;
75 * Keep _refcount separate from slub cmpxchg_double data. 103 };
76 * As the rest of the double word is protected by slab_lock 104 int units; /* SLOB */
77 * but _refcount is not. 105
78 */ 106 struct { /* Page cache */
79 unsigned counters; 107 /*
80#endif 108 * Count of ptes mapped in mms, to show when
81 struct { 109 * page is mapped & limit reverse map searches.
110 *
111 * Extra information about page type may be
112 * stored here for pages that are never mapped,
113 * in which case the value MUST BE <= -2.
114 * See page-flags.h for more details.
115 */
116 atomic_t _mapcount;
82 117
83 union {
84 /*
85 * Count of ptes mapped in mms, to show when
86 * page is mapped & limit reverse map searches.
87 *
88 * Extra information about page type may be
89 * stored here for pages that are never mapped,
90 * in which case the value MUST BE <= -2.
91 * See page-flags.h for more details.
92 */
93 atomic_t _mapcount;
94
95 unsigned int active; /* SLAB */
96 struct { /* SLUB */
97 unsigned inuse:16;
98 unsigned objects:15;
99 unsigned frozen:1;
100 };
101 int units; /* SLOB */
102 };
103 /* 118 /*
104 * Usage count, *USE WRAPPER FUNCTION* when manual 119 * Usage count, *USE WRAPPER FUNCTION* when manual
105 * accounting. See page_ref.h 120 * accounting. See page_ref.h
@@ -109,8 +124,6 @@ struct page {
109 }; 124 };
110 125
111 /* 126 /*
112 * Third double word block
113 *
114 * WARNING: bit 0 of the first word encode PageTail(). That means 127 * WARNING: bit 0 of the first word encode PageTail(). That means
115 * the rest users of the storage space MUST NOT use the bit to 128 * the rest users of the storage space MUST NOT use the bit to
116 * avoid collision and false-positive PageTail(). 129 * avoid collision and false-positive PageTail().
@@ -145,19 +158,9 @@ struct page {
145 unsigned long compound_head; /* If bit zero is set */ 158 unsigned long compound_head; /* If bit zero is set */
146 159
147 /* First tail page only */ 160 /* First tail page only */
148#ifdef CONFIG_64BIT 161 unsigned char compound_dtor;
149 /* 162 unsigned char compound_order;
150 * On 64 bit system we have enough space in struct page 163 /* two/six bytes available here */
151 * to encode compound_dtor and compound_order with
152 * unsigned int. It can help compiler generate better or
153 * smaller code on some archtectures.
154 */
155 unsigned int compound_dtor;
156 unsigned int compound_order;
157#else
158 unsigned short int compound_dtor;
159 unsigned short int compound_order;
160#endif
161 }; 164 };
162 165
163#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS 166#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
@@ -171,15 +174,14 @@ struct page {
171#endif 174#endif
172 }; 175 };
173 176
174 /* Remainder is not double word aligned */
175 union { 177 union {
176 unsigned long private; /* Mapping-private opaque data: 178 /*
177 * usually used for buffer_heads 179 * Mapping-private opaque data:
178 * if PagePrivate set; used for 180 * Usually used for buffer_heads if PagePrivate
179 * swp_entry_t if PageSwapCache; 181 * Used for swp_entry_t if PageSwapCache
180 * indicates order in the buddy 182 * Indicates order in the buddy system if PageBuddy
181 * system if PG_buddy is set. 183 */
182 */ 184 unsigned long private;
183#if USE_SPLIT_PTE_PTLOCKS 185#if USE_SPLIT_PTE_PTLOCKS
184#if ALLOC_SPLIT_PTLOCKS 186#if ALLOC_SPLIT_PTLOCKS
185 spinlock_t *ptl; 187 spinlock_t *ptl;
@@ -212,15 +214,7 @@ struct page {
212#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 214#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
213 int _last_cpupid; 215 int _last_cpupid;
214#endif 216#endif
215} 217} _struct_page_alignment;
216/*
217 * The struct page can be forced to be double word aligned so that atomic ops
218 * on double words work. The SLUB allocator can make use of such a feature.
219 */
220#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
221 __aligned(2 * sizeof(unsigned long))
222#endif
223;
224 218
225#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) 219#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
226#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) 220#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index e7743eca1021..85146235231e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -324,6 +324,7 @@ struct mmc_host {
324#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ 324#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
325#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ 325#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
326#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ 326#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
327#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */
327#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */ 328#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
328#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */ 329#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
329#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ 330#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
@@ -380,6 +381,7 @@ struct mmc_host {
380 unsigned int doing_retune:1; /* re-tuning in progress */ 381 unsigned int doing_retune:1; /* re-tuning in progress */
381 unsigned int retune_now:1; /* do re-tuning at next req */ 382 unsigned int retune_now:1; /* do re-tuning at next req */
382 unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ 383 unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
384 unsigned int use_blk_mq:1; /* use blk-mq */
383 385
384 int rescan_disable; /* disable card detection */ 386 int rescan_disable; /* disable card detection */
385 int rescan_entered; /* used with nonremovable devices */ 387 int rescan_entered; /* used with nonremovable devices */
@@ -422,9 +424,6 @@ struct mmc_host {
422 424
423 struct dentry *debugfs_root; 425 struct dentry *debugfs_root;
424 426
425 struct mmc_async_req *areq; /* active async req */
426 struct mmc_context_info context_info; /* async synchronization info */
427
428 /* Ongoing data transfer that allows commands during transfer */ 427 /* Ongoing data transfer that allows commands during transfer */
429 struct mmc_request *ongoing_mrq; 428 struct mmc_request *ongoing_mrq;
430 429
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 82f0d289f110..91f1ba0663c8 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -33,5 +33,6 @@ void mmc_gpio_set_cd_isr(struct mmc_host *host,
33 irqreturn_t (*isr)(int irq, void *dev_id)); 33 irqreturn_t (*isr)(int irq, void *dev_id));
34void mmc_gpiod_request_cd_irq(struct mmc_host *host); 34void mmc_gpiod_request_cd_irq(struct mmc_host *host);
35bool mmc_can_gpio_cd(struct mmc_host *host); 35bool mmc_can_gpio_cd(struct mmc_host *host);
36bool mmc_can_gpio_ro(struct mmc_host *host);
36 37
37#endif 38#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index b25dc9db19fc..2d07a1ed5a31 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -2,6 +2,7 @@
2#ifndef _LINUX_MMU_NOTIFIER_H 2#ifndef _LINUX_MMU_NOTIFIER_H
3#define _LINUX_MMU_NOTIFIER_H 3#define _LINUX_MMU_NOTIFIER_H
4 4
5#include <linux/types.h>
5#include <linux/list.h> 6#include <linux/list.h>
6#include <linux/spinlock.h> 7#include <linux/spinlock.h>
7#include <linux/mm_types.h> 8#include <linux/mm_types.h>
@@ -10,6 +11,9 @@
10struct mmu_notifier; 11struct mmu_notifier;
11struct mmu_notifier_ops; 12struct mmu_notifier_ops;
12 13
14/* mmu_notifier_ops flags */
15#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
16
13#ifdef CONFIG_MMU_NOTIFIER 17#ifdef CONFIG_MMU_NOTIFIER
14 18
15/* 19/*
@@ -27,6 +31,15 @@ struct mmu_notifier_mm {
27 31
28struct mmu_notifier_ops { 32struct mmu_notifier_ops {
29 /* 33 /*
34 * Flags to specify behavior of callbacks for this MMU notifier.
35 * Used to determine which context an operation may be called.
36 *
37 * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not
38 * block
39 */
40 int flags;
41
42 /*
30 * Called either by mmu_notifier_unregister or when the mm is 43 * Called either by mmu_notifier_unregister or when the mm is
31 * being destroyed by exit_mmap, always before all pages are 44 * being destroyed by exit_mmap, always before all pages are
32 * freed. This can run concurrently with other mmu notifier 45 * freed. This can run concurrently with other mmu notifier
@@ -137,6 +150,10 @@ struct mmu_notifier_ops {
137 * page. Pages will no longer be referenced by the linux 150 * page. Pages will no longer be referenced by the linux
138 * address space but may still be referenced by sptes until 151 * address space but may still be referenced by sptes until
139 * the last refcount is dropped. 152 * the last refcount is dropped.
153 *
154 * If both of these callbacks cannot block, and invalidate_range
155 * cannot block, mmu_notifier_ops.flags should have
156 * MMU_INVALIDATE_DOES_NOT_BLOCK set.
140 */ 157 */
141 void (*invalidate_range_start)(struct mmu_notifier *mn, 158 void (*invalidate_range_start)(struct mmu_notifier *mn,
142 struct mm_struct *mm, 159 struct mm_struct *mm,
@@ -159,12 +176,13 @@ struct mmu_notifier_ops {
159 * external TLB range needs to be flushed. For more in depth 176 * external TLB range needs to be flushed. For more in depth
160 * discussion on this see Documentation/vm/mmu_notifier.txt 177 * discussion on this see Documentation/vm/mmu_notifier.txt
161 * 178 *
162 * The invalidate_range() function is called under the ptl
163 * spin-lock and not allowed to sleep.
164 *
165 * Note that this function might be called with just a sub-range 179 * Note that this function might be called with just a sub-range
166 * of what was passed to invalidate_range_start()/end(), if 180 * of what was passed to invalidate_range_start()/end(), if
167 * called between those functions. 181 * called between those functions.
182 *
183 * If this callback cannot block, and invalidate_range_{start,end}
184 * cannot block, mmu_notifier_ops.flags should have
185 * MMU_INVALIDATE_DOES_NOT_BLOCK set.
168 */ 186 */
169 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, 187 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
170 unsigned long start, unsigned long end); 188 unsigned long start, unsigned long end);
@@ -218,6 +236,7 @@ extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
218 bool only_end); 236 bool only_end);
219extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, 237extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
220 unsigned long start, unsigned long end); 238 unsigned long start, unsigned long end);
239extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
221 240
222static inline void mmu_notifier_release(struct mm_struct *mm) 241static inline void mmu_notifier_release(struct mm_struct *mm)
223{ 242{
@@ -457,6 +476,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
457{ 476{
458} 477}
459 478
479static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
480{
481 return false;
482}
483
460static inline void mmu_notifier_mm_init(struct mm_struct *mm) 484static inline void mmu_notifier_mm_init(struct mm_struct *mm)
461{ 485{
462} 486}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 67f2e3c38939..7522a6987595 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1166,8 +1166,16 @@ extern unsigned long usemap_size(void);
1166 1166
1167/* 1167/*
1168 * We use the lower bits of the mem_map pointer to store 1168 * We use the lower bits of the mem_map pointer to store
1169 * a little bit of information. There should be at least 1169 * a little bit of information. The pointer is calculated
1170 * 3 bits here due to 32-bit alignment. 1170 * as mem_map - section_nr_to_pfn(pnum). The result is
1171 * aligned to the minimum alignment of the two values:
1172 * 1. All mem_map arrays are page-aligned.
1173 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
1174 * lowest bits. PFN_SECTION_SHIFT is arch-specific
1175 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
1176 * worst combination is powerpc with 256k pages,
1177 * which results in PFN_SECTION_SHIFT equal 6.
1178 * To sum it up, at least 6 bits are available.
1171 */ 1179 */
1172#define SECTION_MARKED_PRESENT (1UL<<0) 1180#define SECTION_MARKED_PRESENT (1UL<<0)
1173#define SECTION_HAS_MEM_MAP (1UL<<1) 1181#define SECTION_HAS_MEM_MAP (1UL<<1)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index abb6dc2ebbf8..48fb2b43c35a 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -229,6 +229,12 @@ struct hda_device_id {
229 unsigned long driver_data; 229 unsigned long driver_data;
230}; 230};
231 231
232struct sdw_device_id {
233 __u16 mfg_id;
234 __u16 part_id;
235 kernel_ulong_t driver_data;
236};
237
232/* 238/*
233 * Struct used for matching a device 239 * Struct used for matching a device
234 */ 240 */
@@ -452,6 +458,19 @@ struct spi_device_id {
452 kernel_ulong_t driver_data; /* Data private to the driver */ 458 kernel_ulong_t driver_data; /* Data private to the driver */
453}; 459};
454 460
461/* SLIMbus */
462
463#define SLIMBUS_NAME_SIZE 32
464#define SLIMBUS_MODULE_PREFIX "slim:"
465
466struct slim_device_id {
467 __u16 manf_id, prod_code;
468 __u16 dev_index, instance;
469
470 /* Data private to the driver */
471 kernel_ulong_t driver_data;
472};
473
455#define SPMI_NAME_SIZE 32 474#define SPMI_NAME_SIZE 32
456#define SPMI_MODULE_PREFIX "spmi:" 475#define SPMI_MODULE_PREFIX "spmi:"
457 476
diff --git a/include/linux/module.h b/include/linux/module.h
index c69b49abe877..d44df9b2c131 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -19,6 +19,7 @@
19#include <linux/jump_label.h> 19#include <linux/jump_label.h>
20#include <linux/export.h> 20#include <linux/export.h>
21#include <linux/rbtree_latch.h> 21#include <linux/rbtree_latch.h>
22#include <linux/error-injection.h>
22 23
23#include <linux/percpu.h> 24#include <linux/percpu.h>
24#include <asm/module.h> 25#include <asm/module.h>
@@ -475,6 +476,11 @@ struct module {
475 ctor_fn_t *ctors; 476 ctor_fn_t *ctors;
476 unsigned int num_ctors; 477 unsigned int num_ctors;
477#endif 478#endif
479
480#ifdef CONFIG_FUNCTION_ERROR_INJECTION
481 struct error_injection_entry *ei_funcs;
482 unsigned int num_ei_funcs;
483#endif
478} ____cacheline_aligned __randomize_layout; 484} ____cacheline_aligned __randomize_layout;
479#ifndef MODULE_ARCH_INIT 485#ifndef MODULE_ARCH_INIT
480#define MODULE_ARCH_INIT {} 486#define MODULE_ARCH_INIT {}
@@ -485,7 +491,7 @@ extern struct mutex module_mutex;
485/* FIXME: It'd be nice to isolate modules during init, too, so they 491/* FIXME: It'd be nice to isolate modules during init, too, so they
486 aren't used before they (may) fail. But presently too much code 492 aren't used before they (may) fail. But presently too much code
487 (IDE & SCSI) require entry into the module during init.*/ 493 (IDE & SCSI) require entry into the module during init.*/
488static inline int module_is_live(struct module *mod) 494static inline bool module_is_live(struct module *mod)
489{ 495{
490 return mod->state != MODULE_STATE_GOING; 496 return mod->state != MODULE_STATE_GOING;
491} 497}
@@ -606,6 +612,9 @@ int ref_module(struct module *a, struct module *b);
606 __mod ? __mod->name : "kernel"; \ 612 __mod ? __mod->name : "kernel"; \
607}) 613})
608 614
615/* Dereference module function descriptor */
616void *dereference_module_function_descriptor(struct module *mod, void *ptr);
617
609/* For kallsyms to ask for address resolution. namebuf should be at 618/* For kallsyms to ask for address resolution. namebuf should be at
610 * least KSYM_NAME_LEN long: a pointer to namebuf is returned if 619 * least KSYM_NAME_LEN long: a pointer to namebuf is returned if
611 * found, otherwise NULL. */ 620 * found, otherwise NULL. */
@@ -760,6 +769,13 @@ static inline bool is_module_sig_enforced(void)
760 return false; 769 return false;
761} 770}
762 771
772/* Dereference module function descriptor */
773static inline
774void *dereference_module_function_descriptor(struct module *mod, void *ptr)
775{
776 return ptr;
777}
778
763#endif /* CONFIG_MODULES */ 779#endif /* CONFIG_MODULES */
764 780
765#ifdef CONFIG_SYSFS 781#ifdef CONFIG_SYSFS
@@ -801,6 +817,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
801static inline void module_bug_cleanup(struct module *mod) {} 817static inline void module_bug_cleanup(struct module *mod) {}
802#endif /* CONFIG_GENERIC_BUG */ 818#endif /* CONFIG_GENERIC_BUG */
803 819
820#ifdef RETPOLINE
821extern bool retpoline_module_ok(bool has_retpoline);
822#else
823static inline bool retpoline_module_ok(bool has_retpoline)
824{
825 return true;
826}
827#endif
828
804#ifdef CONFIG_MODULE_SIG 829#ifdef CONFIG_MODULE_SIG
805static inline bool module_sig_ok(struct module *module) 830static inline bool module_sig_ok(struct module *module)
806{ 831{
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 3aa56e3104bb..b5b43f94f311 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd);
270#define INVALIDATE_CACHED_RANGE(map, from, size) \ 270#define INVALIDATE_CACHED_RANGE(map, from, size) \
271 do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) 271 do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
272 272
273 273#define map_word_equal(map, val1, val2) \
274static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2) 274({ \
275{ 275 int i, ret = 1; \
276 int i; 276 for (i = 0; i < map_words(map); i++) \
277 277 if ((val1).x[i] != (val2).x[i]) { \
278 for (i = 0; i < map_words(map); i++) { 278 ret = 0; \
279 if (val1.x[i] != val2.x[i]) 279 break; \
280 return 0; 280 } \
281 } 281 ret; \
282 282})
283 return 1; 283
284} 284#define map_word_and(map, val1, val2) \
285 285({ \
286static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2) 286 map_word r; \
287{ 287 int i; \
288 map_word r; 288 for (i = 0; i < map_words(map); i++) \
289 int i; 289 r.x[i] = (val1).x[i] & (val2).x[i]; \
290 290 r; \
291 for (i = 0; i < map_words(map); i++) 291})
292 r.x[i] = val1.x[i] & val2.x[i]; 292
293 293#define map_word_clr(map, val1, val2) \
294 return r; 294({ \
295} 295 map_word r; \
296 296 int i; \
297static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2) 297 for (i = 0; i < map_words(map); i++) \
298{ 298 r.x[i] = (val1).x[i] & ~(val2).x[i]; \
299 map_word r; 299 r; \
300 int i; 300})
301 301
302 for (i = 0; i < map_words(map); i++) 302#define map_word_or(map, val1, val2) \
303 r.x[i] = val1.x[i] & ~val2.x[i]; 303({ \
304 304 map_word r; \
305 return r; 305 int i; \
306} 306 for (i = 0; i < map_words(map); i++) \
307 307 r.x[i] = (val1).x[i] | (val2).x[i]; \
308static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2) 308 r; \
309{ 309})
310 map_word r; 310
311 int i; 311#define map_word_andequal(map, val1, val2, val3) \
312 312({ \
313 for (i = 0; i < map_words(map); i++) 313 int i, ret = 1; \
314 r.x[i] = val1.x[i] | val2.x[i]; 314 for (i = 0; i < map_words(map); i++) { \
315 315 if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
316 return r; 316 ret = 0; \
317} 317 break; \
318 318 } \
319static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3) 319 } \
320{ 320 ret; \
321 int i; 321})
322 322
323 for (i = 0; i < map_words(map); i++) { 323#define map_word_bitsset(map, val1, val2) \
324 if ((val1.x[i] & val2.x[i]) != val3.x[i]) 324({ \
325 return 0; 325 int i, ret = 0; \
326 } 326 for (i = 0; i < map_words(map); i++) { \
327 327 if ((val1).x[i] & (val2).x[i]) { \
328 return 1; 328 ret = 1; \
329} 329 break; \
330 330 } \
331static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2) 331 } \
332{ 332 ret; \
333 int i; 333})
334
335 for (i = 0; i < map_words(map); i++) {
336 if (val1.x[i] & val2.x[i])
337 return 1;
338 }
339
340 return 0;
341}
342 334
343static inline map_word map_word_load(struct map_info *map, const void *ptr) 335static inline map_word map_word_load(struct map_info *map, const void *ptr)
344{ 336{
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cd55bf14ad51..205ededccc60 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -489,6 +489,34 @@ static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
489 return do_div(sz, mtd->erasesize); 489 return do_div(sz, mtd->erasesize);
490} 490}
491 491
492/**
493 * mtd_align_erase_req - Adjust an erase request to align things on eraseblock
494 * boundaries.
495 * @mtd: the MTD device this erase request applies on
496 * @req: the erase request to adjust
497 *
498 * This function will adjust @req->addr and @req->len to align them on
499 * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
500 */
501static inline void mtd_align_erase_req(struct mtd_info *mtd,
502 struct erase_info *req)
503{
504 u32 mod;
505
506 if (WARN_ON(!mtd->erasesize))
507 return;
508
509 mod = mtd_mod_by_eb(req->addr, mtd);
510 if (mod) {
511 req->addr -= mod;
512 req->len += mod;
513 }
514
515 mod = mtd_mod_by_eb(req->addr + req->len, mtd);
516 if (mod)
517 req->len += mtd->erasesize - mod;
518}
519
492static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) 520static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
493{ 521{
494 if (mtd->writesize_shift) 522 if (mtd->writesize_shift)
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 749bb08c4772..56c5570aadbe 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -133,12 +133,6 @@ enum nand_ecc_algo {
133 */ 133 */
134#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) 134#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
135#define NAND_ECC_MAXIMIZE BIT(1) 135#define NAND_ECC_MAXIMIZE BIT(1)
136/*
137 * If your controller already sends the required NAND commands when
138 * reading or writing a page, then the framework is not supposed to
139 * send READ0 and SEQIN/PAGEPROG respectively.
140 */
141#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2)
142 136
143/* Bit mask for flags passed to do_nand_read_ecc */ 137/* Bit mask for flags passed to do_nand_read_ecc */
144#define NAND_GET_DEVICE 0x80 138#define NAND_GET_DEVICE 0x80
@@ -191,11 +185,6 @@ enum nand_ecc_algo {
191/* Non chip related options */ 185/* Non chip related options */
192/* This option skips the bbt scan during initialization. */ 186/* This option skips the bbt scan during initialization. */
193#define NAND_SKIP_BBTSCAN 0x00010000 187#define NAND_SKIP_BBTSCAN 0x00010000
194/*
195 * This option is defined if the board driver allocates its own buffers
196 * (e.g. because it needs them DMA-coherent).
197 */
198#define NAND_OWN_BUFFERS 0x00020000
199/* Chip may not exist, so silence any errors in scan */ 188/* Chip may not exist, so silence any errors in scan */
200#define NAND_SCAN_SILENT_NODEV 0x00040000 189#define NAND_SCAN_SILENT_NODEV 0x00040000
201/* 190/*
@@ -525,6 +514,8 @@ static const struct nand_ecc_caps __name = { \
525 * @postpad: padding information for syndrome based ECC generators 514 * @postpad: padding information for syndrome based ECC generators
526 * @options: ECC specific options (see NAND_ECC_XXX flags defined above) 515 * @options: ECC specific options (see NAND_ECC_XXX flags defined above)
527 * @priv: pointer to private ECC control data 516 * @priv: pointer to private ECC control data
517 * @calc_buf: buffer for calculated ECC, size is oobsize.
518 * @code_buf: buffer for ECC read from flash, size is oobsize.
528 * @hwctl: function to control hardware ECC generator. Must only 519 * @hwctl: function to control hardware ECC generator. Must only
529 * be provided if an hardware ECC is available 520 * be provided if an hardware ECC is available
530 * @calculate: function for ECC calculation or readback from ECC hardware 521 * @calculate: function for ECC calculation or readback from ECC hardware
@@ -575,6 +566,8 @@ struct nand_ecc_ctrl {
575 int postpad; 566 int postpad;
576 unsigned int options; 567 unsigned int options;
577 void *priv; 568 void *priv;
569 u8 *calc_buf;
570 u8 *code_buf;
578 void (*hwctl)(struct mtd_info *mtd, int mode); 571 void (*hwctl)(struct mtd_info *mtd, int mode);
579 int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, 572 int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
580 uint8_t *ecc_code); 573 uint8_t *ecc_code);
@@ -602,26 +595,6 @@ struct nand_ecc_ctrl {
602 int page); 595 int page);
603}; 596};
604 597
605static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc)
606{
607 return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS);
608}
609
610/**
611 * struct nand_buffers - buffer structure for read/write
612 * @ecccalc: buffer pointer for calculated ECC, size is oobsize.
613 * @ecccode: buffer pointer for ECC read from flash, size is oobsize.
614 * @databuf: buffer pointer for data, size is (page size + oobsize).
615 *
616 * Do not change the order of buffers. databuf and oobrbuf must be in
617 * consecutive order.
618 */
619struct nand_buffers {
620 uint8_t *ecccalc;
621 uint8_t *ecccode;
622 uint8_t *databuf;
623};
624
625/** 598/**
626 * struct nand_sdr_timings - SDR NAND chip timings 599 * struct nand_sdr_timings - SDR NAND chip timings
627 * 600 *
@@ -762,6 +735,350 @@ struct nand_manufacturer_ops {
762}; 735};
763 736
764/** 737/**
738 * struct nand_op_cmd_instr - Definition of a command instruction
739 * @opcode: the command to issue in one cycle
740 */
741struct nand_op_cmd_instr {
742 u8 opcode;
743};
744
745/**
746 * struct nand_op_addr_instr - Definition of an address instruction
747 * @naddrs: length of the @addrs array
748 * @addrs: array containing the address cycles to issue
749 */
750struct nand_op_addr_instr {
751 unsigned int naddrs;
752 const u8 *addrs;
753};
754
755/**
756 * struct nand_op_data_instr - Definition of a data instruction
757 * @len: number of data bytes to move
758 * @in: buffer to fill when reading from the NAND chip
759 * @out: buffer to read from when writing to the NAND chip
760 * @force_8bit: force 8-bit access
761 *
762 * Please note that "in" and "out" are inverted from the ONFI specification
763 * and are from the controller perspective, so a "in" is a read from the NAND
764 * chip while a "out" is a write to the NAND chip.
765 */
766struct nand_op_data_instr {
767 unsigned int len;
768 union {
769 void *in;
770 const void *out;
771 } buf;
772 bool force_8bit;
773};
774
775/**
776 * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
777 * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
778 */
779struct nand_op_waitrdy_instr {
780 unsigned int timeout_ms;
781};
782
783/**
784 * enum nand_op_instr_type - Definition of all instruction types
785 * @NAND_OP_CMD_INSTR: command instruction
786 * @NAND_OP_ADDR_INSTR: address instruction
787 * @NAND_OP_DATA_IN_INSTR: data in instruction
788 * @NAND_OP_DATA_OUT_INSTR: data out instruction
789 * @NAND_OP_WAITRDY_INSTR: wait ready instruction
790 */
791enum nand_op_instr_type {
792 NAND_OP_CMD_INSTR,
793 NAND_OP_ADDR_INSTR,
794 NAND_OP_DATA_IN_INSTR,
795 NAND_OP_DATA_OUT_INSTR,
796 NAND_OP_WAITRDY_INSTR,
797};
798
799/**
800 * struct nand_op_instr - Instruction object
801 * @type: the instruction type
802 * @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
803 * You'll have to use the appropriate element
804 * depending on @type
805 * @delay_ns: delay the controller should apply after the instruction has been
806 * issued on the bus. Most modern controllers have internal timings
807 * control logic, and in this case, the controller driver can ignore
808 * this field.
809 */
810struct nand_op_instr {
811 enum nand_op_instr_type type;
812 union {
813 struct nand_op_cmd_instr cmd;
814 struct nand_op_addr_instr addr;
815 struct nand_op_data_instr data;
816 struct nand_op_waitrdy_instr waitrdy;
817 } ctx;
818 unsigned int delay_ns;
819};
820
821/*
822 * Special handling must be done for the WAITRDY timeout parameter as it usually
823 * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
824 * tBERS (during an erase) which all of them are u64 values that cannot be
825 * divided by usual kernel macros and must be handled with the special
826 * DIV_ROUND_UP_ULL() macro.
827 */
828#define __DIVIDE(dividend, divisor) ({ \
829 sizeof(dividend) == sizeof(u32) ? \
830 DIV_ROUND_UP(dividend, divisor) : \
831 DIV_ROUND_UP_ULL(dividend, divisor); \
832 })
833#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
834#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
835
836#define NAND_OP_CMD(id, ns) \
837 { \
838 .type = NAND_OP_CMD_INSTR, \
839 .ctx.cmd.opcode = id, \
840 .delay_ns = ns, \
841 }
842
843#define NAND_OP_ADDR(ncycles, cycles, ns) \
844 { \
845 .type = NAND_OP_ADDR_INSTR, \
846 .ctx.addr = { \
847 .naddrs = ncycles, \
848 .addrs = cycles, \
849 }, \
850 .delay_ns = ns, \
851 }
852
853#define NAND_OP_DATA_IN(l, b, ns) \
854 { \
855 .type = NAND_OP_DATA_IN_INSTR, \
856 .ctx.data = { \
857 .len = l, \
858 .buf.in = b, \
859 .force_8bit = false, \
860 }, \
861 .delay_ns = ns, \
862 }
863
864#define NAND_OP_DATA_OUT(l, b, ns) \
865 { \
866 .type = NAND_OP_DATA_OUT_INSTR, \
867 .ctx.data = { \
868 .len = l, \
869 .buf.out = b, \
870 .force_8bit = false, \
871 }, \
872 .delay_ns = ns, \
873 }
874
875#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
876 { \
877 .type = NAND_OP_DATA_IN_INSTR, \
878 .ctx.data = { \
879 .len = l, \
880 .buf.in = b, \
881 .force_8bit = true, \
882 }, \
883 .delay_ns = ns, \
884 }
885
886#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
887 { \
888 .type = NAND_OP_DATA_OUT_INSTR, \
889 .ctx.data = { \
890 .len = l, \
891 .buf.out = b, \
892 .force_8bit = true, \
893 }, \
894 .delay_ns = ns, \
895 }
896
897#define NAND_OP_WAIT_RDY(tout_ms, ns) \
898 { \
899 .type = NAND_OP_WAITRDY_INSTR, \
900 .ctx.waitrdy.timeout_ms = tout_ms, \
901 .delay_ns = ns, \
902 }
903
904/**
905 * struct nand_subop - a sub operation
906 * @instrs: array of instructions
907 * @ninstrs: length of the @instrs array
908 * @first_instr_start_off: offset to start from for the first instruction
909 * of the sub-operation
910 * @last_instr_end_off: offset to end at (excluded) for the last instruction
911 * of the sub-operation
912 *
913 * Both @first_instr_start_off and @last_instr_end_off only apply to data or
914 * address instructions.
915 *
916 * When an operation cannot be handled as is by the NAND controller, it will
917 * be split by the parser into sub-operations which will be passed to the
918 * controller driver.
919 */
920struct nand_subop {
921 const struct nand_op_instr *instrs;
922 unsigned int ninstrs;
923 unsigned int first_instr_start_off;
924 unsigned int last_instr_end_off;
925};
926
927int nand_subop_get_addr_start_off(const struct nand_subop *subop,
928 unsigned int op_id);
929int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
930 unsigned int op_id);
931int nand_subop_get_data_start_off(const struct nand_subop *subop,
932 unsigned int op_id);
933int nand_subop_get_data_len(const struct nand_subop *subop,
934 unsigned int op_id);
935
936/**
937 * struct nand_op_parser_addr_constraints - Constraints for address instructions
938 * @maxcycles: maximum number of address cycles the controller can issue in a
939 * single step
940 */
941struct nand_op_parser_addr_constraints {
942 unsigned int maxcycles;
943};
944
945/**
946 * struct nand_op_parser_data_constraints - Constraints for data instructions
947 * @maxlen: maximum data length that the controller can handle in a single step
948 */
949struct nand_op_parser_data_constraints {
950 unsigned int maxlen;
951};
952
953/**
954 * struct nand_op_parser_pattern_elem - One element of a pattern
955 * @type: the instructuction type
956 * @optional: whether this element of the pattern is optional or mandatory
957 * @addr/@data: address or data constraint (number of cycles or data length)
958 */
959struct nand_op_parser_pattern_elem {
960 enum nand_op_instr_type type;
961 bool optional;
962 union {
963 struct nand_op_parser_addr_constraints addr;
964 struct nand_op_parser_data_constraints data;
965 } ctx;
966};
967
968#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
969 { \
970 .type = NAND_OP_CMD_INSTR, \
971 .optional = _opt, \
972 }
973
974#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
975 { \
976 .type = NAND_OP_ADDR_INSTR, \
977 .optional = _opt, \
978 .ctx.addr.maxcycles = _maxcycles, \
979 }
980
981#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
982 { \
983 .type = NAND_OP_DATA_IN_INSTR, \
984 .optional = _opt, \
985 .ctx.data.maxlen = _maxlen, \
986 }
987
988#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
989 { \
990 .type = NAND_OP_DATA_OUT_INSTR, \
991 .optional = _opt, \
992 .ctx.data.maxlen = _maxlen, \
993 }
994
995#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
996 { \
997 .type = NAND_OP_WAITRDY_INSTR, \
998 .optional = _opt, \
999 }
1000
1001/**
1002 * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
1003 * @elems: array of pattern elements
1004 * @nelems: number of pattern elements in @elems array
1005 * @exec: the function that will issue a sub-operation
1006 *
1007 * A pattern is a list of elements, each element reprensenting one instruction
1008 * with its constraints. The pattern itself is used by the core to match NAND
1009 * chip operation with NAND controller operations.
1010 * Once a match between a NAND controller operation pattern and a NAND chip
1011 * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
1012 * hook is called so that the controller driver can issue the operation on the
1013 * bus.
1014 *
1015 * Controller drivers should declare as many patterns as they support and pass
1016 * this list of patterns (created with the help of the following macro) to
1017 * the nand_op_parser_exec_op() helper.
1018 */
1019struct nand_op_parser_pattern {
1020 const struct nand_op_parser_pattern_elem *elems;
1021 unsigned int nelems;
1022 int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
1023};
1024
1025#define NAND_OP_PARSER_PATTERN(_exec, ...) \
1026 { \
1027 .exec = _exec, \
1028 .elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
1029 .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
1030 sizeof(struct nand_op_parser_pattern_elem), \
1031 }
1032
1033/**
1034 * struct nand_op_parser - NAND controller operation parser descriptor
1035 * @patterns: array of supported patterns
1036 * @npatterns: length of the @patterns array
1037 *
1038 * The parser descriptor is just an array of supported patterns which will be
1039 * iterated by nand_op_parser_exec_op() everytime it tries to execute an
1040 * NAND operation (or tries to determine if a specific operation is supported).
1041 *
1042 * It is worth mentioning that patterns will be tested in their declaration
1043 * order, and the first match will be taken, so it's important to order patterns
1044 * appropriately so that simple/inefficient patterns are placed at the end of
1045 * the list. Usually, this is where you put single instruction patterns.
1046 */
1047struct nand_op_parser {
1048 const struct nand_op_parser_pattern *patterns;
1049 unsigned int npatterns;
1050};
1051
1052#define NAND_OP_PARSER(...) \
1053 { \
1054 .patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
1055 .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
1056 sizeof(struct nand_op_parser_pattern), \
1057 }
1058
1059/**
1060 * struct nand_operation - NAND operation descriptor
1061 * @instrs: array of instructions to execute
1062 * @ninstrs: length of the @instrs array
1063 *
1064 * The actual operation structure that will be passed to chip->exec_op().
1065 */
1066struct nand_operation {
1067 const struct nand_op_instr *instrs;
1068 unsigned int ninstrs;
1069};
1070
1071#define NAND_OPERATION(_instrs) \
1072 { \
1073 .instrs = _instrs, \
1074 .ninstrs = ARRAY_SIZE(_instrs), \
1075 }
1076
1077int nand_op_parser_exec_op(struct nand_chip *chip,
1078 const struct nand_op_parser *parser,
1079 const struct nand_operation *op, bool check_only);
1080
1081/**
765 * struct nand_chip - NAND Private Flash Chip Data 1082 * struct nand_chip - NAND Private Flash Chip Data
766 * @mtd: MTD device registered to the MTD framework 1083 * @mtd: MTD device registered to the MTD framework
767 * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the 1084 * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
@@ -787,10 +1104,13 @@ struct nand_manufacturer_ops {
787 * commands to the chip. 1104 * commands to the chip.
788 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on 1105 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
789 * ready. 1106 * ready.
1107 * @exec_op: controller specific method to execute NAND operations.
1108 * This method replaces ->cmdfunc(),
1109 * ->{read,write}_{buf,byte,word}(), ->dev_ready() and
1110 * ->waifunc().
790 * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for 1111 * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
791 * setting the read-retry mode. Mostly needed for MLC NAND. 1112 * setting the read-retry mode. Mostly needed for MLC NAND.
792 * @ecc: [BOARDSPECIFIC] ECC control structure 1113 * @ecc: [BOARDSPECIFIC] ECC control structure
793 * @buffers: buffer structure for read/write
794 * @buf_align: minimum buffer alignment required by a platform 1114 * @buf_align: minimum buffer alignment required by a platform
795 * @hwcontrol: platform-specific hardware control structure 1115 * @hwcontrol: platform-specific hardware control structure
796 * @erase: [REPLACEABLE] erase function 1116 * @erase: [REPLACEABLE] erase function
@@ -830,6 +1150,7 @@ struct nand_manufacturer_ops {
830 * @numchips: [INTERN] number of physical chips 1150 * @numchips: [INTERN] number of physical chips
831 * @chipsize: [INTERN] the size of one chip for multichip arrays 1151 * @chipsize: [INTERN] the size of one chip for multichip arrays
832 * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 1152 * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
1153 * @data_buf: [INTERN] buffer for data, size is (page size + oobsize).
833 * @pagebuf: [INTERN] holds the pagenumber which is currently in 1154 * @pagebuf: [INTERN] holds the pagenumber which is currently in
834 * data_buf. 1155 * data_buf.
835 * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is 1156 * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
@@ -886,6 +1207,9 @@ struct nand_chip {
886 void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, 1207 void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
887 int page_addr); 1208 int page_addr);
888 int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); 1209 int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
1210 int (*exec_op)(struct nand_chip *chip,
1211 const struct nand_operation *op,
1212 bool check_only);
889 int (*erase)(struct mtd_info *mtd, int page); 1213 int (*erase)(struct mtd_info *mtd, int page);
890 int (*scan_bbt)(struct mtd_info *mtd); 1214 int (*scan_bbt)(struct mtd_info *mtd);
891 int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, 1215 int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -896,7 +1220,6 @@ struct nand_chip {
896 int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, 1220 int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
897 const struct nand_data_interface *conf); 1221 const struct nand_data_interface *conf);
898 1222
899
900 int chip_delay; 1223 int chip_delay;
901 unsigned int options; 1224 unsigned int options;
902 unsigned int bbt_options; 1225 unsigned int bbt_options;
@@ -908,6 +1231,7 @@ struct nand_chip {
908 int numchips; 1231 int numchips;
909 uint64_t chipsize; 1232 uint64_t chipsize;
910 int pagemask; 1233 int pagemask;
1234 u8 *data_buf;
911 int pagebuf; 1235 int pagebuf;
912 unsigned int pagebuf_bitflips; 1236 unsigned int pagebuf_bitflips;
913 int subpagesize; 1237 int subpagesize;
@@ -928,7 +1252,7 @@ struct nand_chip {
928 u16 max_bb_per_die; 1252 u16 max_bb_per_die;
929 u32 blocks_per_die; 1253 u32 blocks_per_die;
930 1254
931 struct nand_data_interface *data_interface; 1255 struct nand_data_interface data_interface;
932 1256
933 int read_retries; 1257 int read_retries;
934 1258
@@ -938,7 +1262,6 @@ struct nand_chip {
938 struct nand_hw_control *controller; 1262 struct nand_hw_control *controller;
939 1263
940 struct nand_ecc_ctrl ecc; 1264 struct nand_ecc_ctrl ecc;
941 struct nand_buffers *buffers;
942 unsigned long buf_align; 1265 unsigned long buf_align;
943 struct nand_hw_control hwcontrol; 1266 struct nand_hw_control hwcontrol;
944 1267
@@ -956,6 +1279,15 @@ struct nand_chip {
956 } manufacturer; 1279 } manufacturer;
957}; 1280};
958 1281
1282static inline int nand_exec_op(struct nand_chip *chip,
1283 const struct nand_operation *op)
1284{
1285 if (!chip->exec_op)
1286 return -ENOTSUPP;
1287
1288 return chip->exec_op(chip, op, false);
1289}
1290
959extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; 1291extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
960extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; 1292extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
961 1293
@@ -1225,8 +1557,7 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
1225 return le16_to_cpu(chip->onfi_params.src_sync_timing_mode); 1557 return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
1226} 1558}
1227 1559
1228int onfi_init_data_interface(struct nand_chip *chip, 1560int onfi_fill_data_interface(struct nand_chip *chip,
1229 struct nand_data_interface *iface,
1230 enum nand_data_interface_type type, 1561 enum nand_data_interface_type type,
1231 int timing_mode); 1562 int timing_mode);
1232 1563
@@ -1269,8 +1600,6 @@ static inline int jedec_feature(struct nand_chip *chip)
1269 1600
1270/* get timing characteristics from ONFI timing mode. */ 1601/* get timing characteristics from ONFI timing mode. */
1271const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); 1602const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
1272/* get data interface from ONFI timing mode 0, used after reset. */
1273const struct nand_data_interface *nand_get_default_data_interface(void);
1274 1603
1275int nand_check_erased_ecc_chunk(void *data, int datalen, 1604int nand_check_erased_ecc_chunk(void *data, int datalen,
1276 void *ecc, int ecclen, 1605 void *ecc, int ecclen,
@@ -1316,9 +1645,45 @@ int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1316/* Reset and initialize a NAND device */ 1645/* Reset and initialize a NAND device */
1317int nand_reset(struct nand_chip *chip, int chipnr); 1646int nand_reset(struct nand_chip *chip, int chipnr);
1318 1647
1648/* NAND operation helpers */
1649int nand_reset_op(struct nand_chip *chip);
1650int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1651 unsigned int len);
1652int nand_status_op(struct nand_chip *chip, u8 *status);
1653int nand_exit_status_op(struct nand_chip *chip);
1654int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
1655int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1656 unsigned int offset_in_page, void *buf, unsigned int len);
1657int nand_change_read_column_op(struct nand_chip *chip,
1658 unsigned int offset_in_page, void *buf,
1659 unsigned int len, bool force_8bit);
1660int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1661 unsigned int offset_in_page, void *buf, unsigned int len);
1662int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1663 unsigned int offset_in_page, const void *buf,
1664 unsigned int len);
1665int nand_prog_page_end_op(struct nand_chip *chip);
1666int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1667 unsigned int offset_in_page, const void *buf,
1668 unsigned int len);
1669int nand_change_write_column_op(struct nand_chip *chip,
1670 unsigned int offset_in_page, const void *buf,
1671 unsigned int len, bool force_8bit);
1672int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1673 bool force_8bit);
1674int nand_write_data_op(struct nand_chip *chip, const void *buf,
1675 unsigned int len, bool force_8bit);
1676
1319/* Free resources held by the NAND device */ 1677/* Free resources held by the NAND device */
1320void nand_cleanup(struct nand_chip *chip); 1678void nand_cleanup(struct nand_chip *chip);
1321 1679
1322/* Default extended ID decoding function */ 1680/* Default extended ID decoding function */
1323void nand_decode_ext_id(struct nand_chip *chip); 1681void nand_decode_ext_id(struct nand_chip *chip);
1682
1683/*
1684 * External helper for controller drivers that have to implement the WAITRDY
1685 * instruction and have no physical pin to check it.
1686 */
1687int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
1688
1324#endif /* __LINUX_MTD_RAWNAND_H */ 1689#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index d0c66a0975cf..de36969eb359 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -61,6 +61,7 @@
61#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */ 61#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
62#define SPINOR_OP_RDCR 0x35 /* Read configuration register */ 62#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
63#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ 63#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
64#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
64 65
65/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ 66/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
66#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ 67#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -130,7 +131,10 @@
130#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ 131#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
131 132
132/* Flag Status Register bits */ 133/* Flag Status Register bits */
133#define FSR_READY BIT(7) 134#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
135#define FSR_E_ERR BIT(5) /* Erase operation status */
136#define FSR_P_ERR BIT(4) /* Program operation status */
137#define FSR_PT_ERR BIT(1) /* Protection error bit */
134 138
135/* Configuration Register bits. */ 139/* Configuration Register bits. */
136#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ 140#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
@@ -399,4 +403,10 @@ struct spi_nor_hwcaps {
399int spi_nor_scan(struct spi_nor *nor, const char *name, 403int spi_nor_scan(struct spi_nor *nor, const char *name,
400 const struct spi_nor_hwcaps *hwcaps); 404 const struct spi_nor_hwcaps *hwcaps);
401 405
406/**
407 * spi_nor_restore_addr_mode() - restore the status of SPI NOR
408 * @nor: the spi_nor structure
409 */
410void spi_nor_restore(struct spi_nor *nor);
411
402#endif 412#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 153274f78402..f25c13423bd4 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -138,9 +138,9 @@ extern void __mutex_init(struct mutex *lock, const char *name,
138 * mutex_is_locked - is the mutex locked 138 * mutex_is_locked - is the mutex locked
139 * @lock: the mutex to be queried 139 * @lock: the mutex to be queried
140 * 140 *
141 * Returns 1 if the mutex is locked, 0 if unlocked. 141 * Returns true if the mutex is locked, false if unlocked.
142 */ 142 */
143static inline int mutex_is_locked(struct mutex *lock) 143static inline bool mutex_is_locked(struct mutex *lock)
144{ 144{
145 /* 145 /*
146 * XXX think about spin_is_locked 146 * XXX think about spin_is_locked
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index ea96d4c82be7..5fc6bb2fefad 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -1,13 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * mux/consumer.h - definitions for the multiplexer consumer interface 3 * mux/consumer.h - definitions for the multiplexer consumer interface
3 * 4 *
4 * Copyright (C) 2017 Axentia Technologies AB 5 * Copyright (C) 2017 Axentia Technologies AB
5 * 6 *
6 * Author: Peter Rosin <peda@axentia.se> 7 * Author: Peter Rosin <peda@axentia.se>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#ifndef _LINUX_MUX_CONSUMER_H 10#ifndef _LINUX_MUX_CONSUMER_H
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h
index 35c3579c3304..627a2c6bc02d 100644
--- a/include/linux/mux/driver.h
+++ b/include/linux/mux/driver.h
@@ -1,13 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * mux/driver.h - definitions for the multiplexer driver interface 3 * mux/driver.h - definitions for the multiplexer driver interface
3 * 4 *
4 * Copyright (C) 2017 Axentia Technologies AB 5 * Copyright (C) 2017 Axentia Technologies AB
5 * 6 *
6 * Author: Peter Rosin <peda@axentia.se> 7 * Author: Peter Rosin <peda@axentia.se>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#ifndef _LINUX_MUX_DRIVER_H 10#ifndef _LINUX_MUX_DRIVER_H
diff --git a/include/linux/net.h b/include/linux/net.h
index caeb159abda5..91216b16feb7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -147,7 +147,7 @@ struct proto_ops {
147 int (*getname) (struct socket *sock, 147 int (*getname) (struct socket *sock,
148 struct sockaddr *addr, 148 struct sockaddr *addr,
149 int *sockaddr_len, int peer); 149 int *sockaddr_len, int peer);
150 unsigned int (*poll) (struct file *file, struct socket *sock, 150 __poll_t (*poll) (struct file *file, struct socket *sock,
151 struct poll_table_struct *wait); 151 struct poll_table_struct *wait);
152 int (*ioctl) (struct socket *sock, unsigned int cmd, 152 int (*ioctl) (struct socket *sock, unsigned int cmd,
153 unsigned long arg); 153 unsigned long arg);
@@ -306,7 +306,6 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
306 size_t size, int flags); 306 size_t size, int flags);
307int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, 307int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
308 size_t size, int flags); 308 size_t size, int flags);
309int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
310int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); 309int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
311 310
312/* Routine returns the IP overhead imposed by a (caller-protected) socket. */ 311/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
new file mode 100644
index 000000000000..bebeaad897cc
--- /dev/null
+++ b/include/linux/net_dim.h
@@ -0,0 +1,380 @@
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef NET_DIM_H
35#define NET_DIM_H
36
37#include <linux/module.h>
38
39struct net_dim_cq_moder {
40 u16 usec;
41 u16 pkts;
42 u8 cq_period_mode;
43};
44
45struct net_dim_sample {
46 ktime_t time;
47 u32 pkt_ctr;
48 u32 byte_ctr;
49 u16 event_ctr;
50};
51
52struct net_dim_stats {
53 int ppms; /* packets per msec */
54 int bpms; /* bytes per msec */
55 int epms; /* events per msec */
56};
57
58struct net_dim { /* Adaptive Moderation */
59 u8 state;
60 struct net_dim_stats prev_stats;
61 struct net_dim_sample start_sample;
62 struct work_struct work;
63 u8 profile_ix;
64 u8 mode;
65 u8 tune_state;
66 u8 steps_right;
67 u8 steps_left;
68 u8 tired;
69};
70
71enum {
72 NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
73 NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
74 NET_DIM_CQ_PERIOD_NUM_MODES
75};
76
77/* Adaptive moderation logic */
78enum {
79 NET_DIM_START_MEASURE,
80 NET_DIM_MEASURE_IN_PROGRESS,
81 NET_DIM_APPLY_NEW_PROFILE,
82};
83
84enum {
85 NET_DIM_PARKING_ON_TOP,
86 NET_DIM_PARKING_TIRED,
87 NET_DIM_GOING_RIGHT,
88 NET_DIM_GOING_LEFT,
89};
90
91enum {
92 NET_DIM_STATS_WORSE,
93 NET_DIM_STATS_SAME,
94 NET_DIM_STATS_BETTER,
95};
96
97enum {
98 NET_DIM_STEPPED,
99 NET_DIM_TOO_TIRED,
100 NET_DIM_ON_EDGE,
101};
102
103#define NET_DIM_PARAMS_NUM_PROFILES 5
104/* Adaptive moderation profiles */
105#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
106#define NET_DIM_DEF_PROFILE_CQE 1
107#define NET_DIM_DEF_PROFILE_EQE 1
108
109/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */
110#define NET_DIM_EQE_PROFILES { \
111 {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
112 {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
113 {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
114 {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
115 {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
116}
117
118#define NET_DIM_CQE_PROFILES { \
119 {2, 256}, \
120 {8, 128}, \
121 {16, 64}, \
122 {32, 64}, \
123 {64, 64} \
124}
125
126static const struct net_dim_cq_moder
127profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
128 NET_DIM_EQE_PROFILES,
129 NET_DIM_CQE_PROFILES,
130};
131
132static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode,
133 int ix)
134{
135 struct net_dim_cq_moder cq_moder;
136
137 cq_moder = profile[cq_period_mode][ix];
138 cq_moder.cq_period_mode = cq_period_mode;
139 return cq_moder;
140}
141
142static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode)
143{
144 int default_profile_ix;
145
146 if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE)
147 default_profile_ix = NET_DIM_DEF_PROFILE_CQE;
148 else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */
149 default_profile_ix = NET_DIM_DEF_PROFILE_EQE;
150
151 return net_dim_get_profile(rx_cq_period_mode, default_profile_ix);
152}
153
154static inline bool net_dim_on_top(struct net_dim *dim)
155{
156 switch (dim->tune_state) {
157 case NET_DIM_PARKING_ON_TOP:
158 case NET_DIM_PARKING_TIRED:
159 return true;
160 case NET_DIM_GOING_RIGHT:
161 return (dim->steps_left > 1) && (dim->steps_right == 1);
162 default: /* NET_DIM_GOING_LEFT */
163 return (dim->steps_right > 1) && (dim->steps_left == 1);
164 }
165}
166
167static inline void net_dim_turn(struct net_dim *dim)
168{
169 switch (dim->tune_state) {
170 case NET_DIM_PARKING_ON_TOP:
171 case NET_DIM_PARKING_TIRED:
172 break;
173 case NET_DIM_GOING_RIGHT:
174 dim->tune_state = NET_DIM_GOING_LEFT;
175 dim->steps_left = 0;
176 break;
177 case NET_DIM_GOING_LEFT:
178 dim->tune_state = NET_DIM_GOING_RIGHT;
179 dim->steps_right = 0;
180 break;
181 }
182}
183
184static inline int net_dim_step(struct net_dim *dim)
185{
186 if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
187 return NET_DIM_TOO_TIRED;
188
189 switch (dim->tune_state) {
190 case NET_DIM_PARKING_ON_TOP:
191 case NET_DIM_PARKING_TIRED:
192 break;
193 case NET_DIM_GOING_RIGHT:
194 if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
195 return NET_DIM_ON_EDGE;
196 dim->profile_ix++;
197 dim->steps_right++;
198 break;
199 case NET_DIM_GOING_LEFT:
200 if (dim->profile_ix == 0)
201 return NET_DIM_ON_EDGE;
202 dim->profile_ix--;
203 dim->steps_left++;
204 break;
205 }
206
207 dim->tired++;
208 return NET_DIM_STEPPED;
209}
210
211static inline void net_dim_park_on_top(struct net_dim *dim)
212{
213 dim->steps_right = 0;
214 dim->steps_left = 0;
215 dim->tired = 0;
216 dim->tune_state = NET_DIM_PARKING_ON_TOP;
217}
218
219static inline void net_dim_park_tired(struct net_dim *dim)
220{
221 dim->steps_right = 0;
222 dim->steps_left = 0;
223 dim->tune_state = NET_DIM_PARKING_TIRED;
224}
225
226static inline void net_dim_exit_parking(struct net_dim *dim)
227{
228 dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT :
229 NET_DIM_GOING_RIGHT;
230 net_dim_step(dim);
231}
232
233#define IS_SIGNIFICANT_DIFF(val, ref) \
234 (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
235
236static inline int net_dim_stats_compare(struct net_dim_stats *curr,
237 struct net_dim_stats *prev)
238{
239 if (!prev->bpms)
240 return curr->bpms ? NET_DIM_STATS_BETTER :
241 NET_DIM_STATS_SAME;
242
243 if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
244 return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER :
245 NET_DIM_STATS_WORSE;
246
247 if (!prev->ppms)
248 return curr->ppms ? NET_DIM_STATS_BETTER :
249 NET_DIM_STATS_SAME;
250
251 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
252 return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER :
253 NET_DIM_STATS_WORSE;
254
255 if (!prev->epms)
256 return NET_DIM_STATS_SAME;
257
258 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
259 return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER :
260 NET_DIM_STATS_WORSE;
261
262 return NET_DIM_STATS_SAME;
263}
264
265static inline bool net_dim_decision(struct net_dim_stats *curr_stats,
266 struct net_dim *dim)
267{
268 int prev_state = dim->tune_state;
269 int prev_ix = dim->profile_ix;
270 int stats_res;
271 int step_res;
272
273 switch (dim->tune_state) {
274 case NET_DIM_PARKING_ON_TOP:
275 stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
276 if (stats_res != NET_DIM_STATS_SAME)
277 net_dim_exit_parking(dim);
278 break;
279
280 case NET_DIM_PARKING_TIRED:
281 dim->tired--;
282 if (!dim->tired)
283 net_dim_exit_parking(dim);
284 break;
285
286 case NET_DIM_GOING_RIGHT:
287 case NET_DIM_GOING_LEFT:
288 stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
289 if (stats_res != NET_DIM_STATS_BETTER)
290 net_dim_turn(dim);
291
292 if (net_dim_on_top(dim)) {
293 net_dim_park_on_top(dim);
294 break;
295 }
296
297 step_res = net_dim_step(dim);
298 switch (step_res) {
299 case NET_DIM_ON_EDGE:
300 net_dim_park_on_top(dim);
301 break;
302 case NET_DIM_TOO_TIRED:
303 net_dim_park_tired(dim);
304 break;
305 }
306
307 break;
308 }
309
310 if ((prev_state != NET_DIM_PARKING_ON_TOP) ||
311 (dim->tune_state != NET_DIM_PARKING_ON_TOP))
312 dim->prev_stats = *curr_stats;
313
314 return dim->profile_ix != prev_ix;
315}
316
317static inline void net_dim_sample(u16 event_ctr,
318 u64 packets,
319 u64 bytes,
320 struct net_dim_sample *s)
321{
322 s->time = ktime_get();
323 s->pkt_ctr = packets;
324 s->byte_ctr = bytes;
325 s->event_ctr = event_ctr;
326}
327
328#define NET_DIM_NEVENTS 64
329#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
330#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
331
332static inline void net_dim_calc_stats(struct net_dim_sample *start,
333 struct net_dim_sample *end,
334 struct net_dim_stats *curr_stats)
335{
336 /* u32 holds up to 71 minutes, should be enough */
337 u32 delta_us = ktime_us_delta(end->time, start->time);
338 u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
339 u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
340 start->byte_ctr);
341
342 if (!delta_us)
343 return;
344
345 curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
346 curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
347 curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC,
348 delta_us);
349}
350
351static inline void net_dim(struct net_dim *dim,
352 struct net_dim_sample end_sample)
353{
354 struct net_dim_stats curr_stats;
355 u16 nevents;
356
357 switch (dim->state) {
358 case NET_DIM_MEASURE_IN_PROGRESS:
359 nevents = BIT_GAP(BITS_PER_TYPE(u16),
360 end_sample.event_ctr,
361 dim->start_sample.event_ctr);
362 if (nevents < NET_DIM_NEVENTS)
363 break;
364 net_dim_calc_stats(&dim->start_sample, &end_sample,
365 &curr_stats);
366 if (net_dim_decision(&curr_stats, dim)) {
367 dim->state = NET_DIM_APPLY_NEW_PROFILE;
368 schedule_work(&dim->work);
369 break;
370 }
371 /* fall through */
372 case NET_DIM_START_MEASURE:
373 dim->state = NET_DIM_MEASURE_IN_PROGRESS;
374 break;
375 case NET_DIM_APPLY_NEW_PROFILE:
376 break;
377 }
378}
379
380#endif /* NET_DIM_H */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index b1b0ca7ccb2b..db84c516bcfb 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -78,6 +78,8 @@ enum {
78 NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ 78 NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
79 NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ 79 NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
80 80
81 NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
82
81 /* 83 /*
82 * Add your fresh new feature above and remember to update 84 * Add your fresh new feature above and remember to update
83 * netdev_features_strings[] in net/core/ethtool.c and maybe 85 * netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -97,6 +99,7 @@ enum {
97#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) 99#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST)
98#define NETIF_F_FSO __NETIF_F(FSO) 100#define NETIF_F_FSO __NETIF_F(FSO)
99#define NETIF_F_GRO __NETIF_F(GRO) 101#define NETIF_F_GRO __NETIF_F(GRO)
102#define NETIF_F_GRO_HW __NETIF_F(GRO_HW)
100#define NETIF_F_GSO __NETIF_F(GSO) 103#define NETIF_F_GSO __NETIF_F(GSO)
101#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) 104#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST)
102#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) 105#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ef789e1d679e..5eef6c8e2741 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -44,6 +44,7 @@
44#include <net/dcbnl.h> 44#include <net/dcbnl.h>
45#endif 45#endif
46#include <net/netprio_cgroup.h> 46#include <net/netprio_cgroup.h>
47#include <net/xdp.h>
47 48
48#include <linux/netdev_features.h> 49#include <linux/netdev_features.h>
49#include <linux/neighbour.h> 50#include <linux/neighbour.h>
@@ -686,6 +687,7 @@ struct netdev_rx_queue {
686#endif 687#endif
687 struct kobject kobj; 688 struct kobject kobj;
688 struct net_device *dev; 689 struct net_device *dev;
690 struct xdp_rxq_info xdp_rxq;
689} ____cacheline_aligned_in_smp; 691} ____cacheline_aligned_in_smp;
690 692
691/* 693/*
@@ -778,6 +780,7 @@ enum tc_setup_type {
778 TC_SETUP_BLOCK, 780 TC_SETUP_BLOCK,
779 TC_SETUP_QDISC_CBS, 781 TC_SETUP_QDISC_CBS,
780 TC_SETUP_QDISC_RED, 782 TC_SETUP_QDISC_RED,
783 TC_SETUP_QDISC_PRIO,
781}; 784};
782 785
783/* These structures hold the attributes of bpf state that are being passed 786/* These structures hold the attributes of bpf state that are being passed
@@ -802,9 +805,11 @@ enum bpf_netdev_command {
802 BPF_OFFLOAD_VERIFIER_PREP, 805 BPF_OFFLOAD_VERIFIER_PREP,
803 BPF_OFFLOAD_TRANSLATE, 806 BPF_OFFLOAD_TRANSLATE,
804 BPF_OFFLOAD_DESTROY, 807 BPF_OFFLOAD_DESTROY,
808 BPF_OFFLOAD_MAP_ALLOC,
809 BPF_OFFLOAD_MAP_FREE,
805}; 810};
806 811
807struct bpf_ext_analyzer_ops; 812struct bpf_prog_offload_ops;
808struct netlink_ext_ack; 813struct netlink_ext_ack;
809 814
810struct netdev_bpf { 815struct netdev_bpf {
@@ -820,16 +825,22 @@ struct netdev_bpf {
820 struct { 825 struct {
821 u8 prog_attached; 826 u8 prog_attached;
822 u32 prog_id; 827 u32 prog_id;
828 /* flags with which program was installed */
829 u32 prog_flags;
823 }; 830 };
824 /* BPF_OFFLOAD_VERIFIER_PREP */ 831 /* BPF_OFFLOAD_VERIFIER_PREP */
825 struct { 832 struct {
826 struct bpf_prog *prog; 833 struct bpf_prog *prog;
827 const struct bpf_ext_analyzer_ops *ops; /* callee set */ 834 const struct bpf_prog_offload_ops *ops; /* callee set */
828 } verifier; 835 } verifier;
829 /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ 836 /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
830 struct { 837 struct {
831 struct bpf_prog *prog; 838 struct bpf_prog *prog;
832 } offload; 839 } offload;
840 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
841 struct {
842 struct bpf_offloaded_map *offmap;
843 };
833 }; 844 };
834}; 845};
835 846
@@ -840,6 +851,7 @@ struct xfrmdev_ops {
840 void (*xdo_dev_state_free) (struct xfrm_state *x); 851 void (*xdo_dev_state_free) (struct xfrm_state *x);
841 bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 852 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
842 struct xfrm_state *x); 853 struct xfrm_state *x);
854 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
843}; 855};
844#endif 856#endif
845 857
@@ -1458,8 +1470,6 @@ enum netdev_priv_flags {
1458 * @base_addr: Device I/O address 1470 * @base_addr: Device I/O address
1459 * @irq: Device IRQ number 1471 * @irq: Device IRQ number
1460 * 1472 *
1461 * @carrier_changes: Stats to monitor carrier on<->off transitions
1462 *
1463 * @state: Generic network queuing layer state, see netdev_state_t 1473 * @state: Generic network queuing layer state, see netdev_state_t
1464 * @dev_list: The global list of network devices 1474 * @dev_list: The global list of network devices
1465 * @napi_list: List entry used for polling NAPI devices 1475 * @napi_list: List entry used for polling NAPI devices
@@ -1495,6 +1505,8 @@ enum netdev_priv_flags {
1495 * do not use this in drivers 1505 * do not use this in drivers
1496 * @rx_nohandler: nohandler dropped packets by core network on 1506 * @rx_nohandler: nohandler dropped packets by core network on
1497 * inactive devices, do not use this in drivers 1507 * inactive devices, do not use this in drivers
1508 * @carrier_up_count: Number of times the carrier has been up
1509 * @carrier_down_count: Number of times the carrier has been down
1498 * 1510 *
1499 * @wireless_handlers: List of functions to handle Wireless Extensions, 1511 * @wireless_handlers: List of functions to handle Wireless Extensions,
1500 * instead of ioctl, 1512 * instead of ioctl,
@@ -1669,8 +1681,6 @@ struct net_device {
1669 unsigned long base_addr; 1681 unsigned long base_addr;
1670 int irq; 1682 int irq;
1671 1683
1672 atomic_t carrier_changes;
1673
1674 /* 1684 /*
1675 * Some hardware also needs these fields (state,dev_list, 1685 * Some hardware also needs these fields (state,dev_list,
1676 * napi_list,unreg_list,close_list) but they are not 1686 * napi_list,unreg_list,close_list) but they are not
@@ -1708,6 +1718,10 @@ struct net_device {
1708 atomic_long_t tx_dropped; 1718 atomic_long_t tx_dropped;
1709 atomic_long_t rx_nohandler; 1719 atomic_long_t rx_nohandler;
1710 1720
1721 /* Stats to monitor link on/off, flapping */
1722 atomic_t carrier_up_count;
1723 atomic_t carrier_down_count;
1724
1711#ifdef CONFIG_WIRELESS_EXT 1725#ifdef CONFIG_WIRELESS_EXT
1712 const struct iw_handler_def *wireless_handlers; 1726 const struct iw_handler_def *wireless_handlers;
1713 struct iw_public_data *wireless_data; 1727 struct iw_public_data *wireless_data;
@@ -1724,7 +1738,7 @@ struct net_device {
1724 const struct ndisc_ops *ndisc_ops; 1738 const struct ndisc_ops *ndisc_ops;
1725#endif 1739#endif
1726 1740
1727#ifdef CONFIG_XFRM 1741#ifdef CONFIG_XFRM_OFFLOAD
1728 const struct xfrmdev_ops *xfrmdev_ops; 1742 const struct xfrmdev_ops *xfrmdev_ops;
1729#endif 1743#endif
1730 1744
@@ -1801,12 +1815,9 @@ struct net_device {
1801 /* Interface address info used in eth_type_trans() */ 1815 /* Interface address info used in eth_type_trans() */
1802 unsigned char *dev_addr; 1816 unsigned char *dev_addr;
1803 1817
1804#ifdef CONFIG_SYSFS
1805 struct netdev_rx_queue *_rx; 1818 struct netdev_rx_queue *_rx;
1806
1807 unsigned int num_rx_queues; 1819 unsigned int num_rx_queues;
1808 unsigned int real_num_rx_queues; 1820 unsigned int real_num_rx_queues;
1809#endif
1810 1821
1811 struct bpf_prog __rcu *xdp_prog; 1822 struct bpf_prog __rcu *xdp_prog;
1812 unsigned long gro_flush_timeout; 1823 unsigned long gro_flush_timeout;
@@ -2751,7 +2762,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
2751 return false; 2762 return false;
2752} 2763}
2753 2764
2754typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); 2765typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
2766 int len, int size);
2755int register_gifconf(unsigned int family, gifconf_func_t *gifconf); 2767int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
2756static inline int unregister_gifconf(unsigned int family) 2768static inline int unregister_gifconf(unsigned int family)
2757{ 2769{
@@ -2791,7 +2803,9 @@ struct softnet_data {
2791 struct Qdisc *output_queue; 2803 struct Qdisc *output_queue;
2792 struct Qdisc **output_queue_tailp; 2804 struct Qdisc **output_queue_tailp;
2793 struct sk_buff *completion_queue; 2805 struct sk_buff *completion_queue;
2794 2806#ifdef CONFIG_XFRM_OFFLOAD
2807 struct sk_buff_head xfrm_backlog;
2808#endif
2795#ifdef CONFIG_RPS 2809#ifdef CONFIG_RPS
2796 /* input_queue_head should be written by cpu owning this struct, 2810 /* input_queue_head should be written by cpu owning this struct,
2797 * and only read by other cpus. Worth using a cache line. 2811 * and only read by other cpus. Worth using a cache line.
@@ -3214,6 +3228,12 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3214} 3228}
3215#endif 3229#endif
3216 3230
3231static inline struct netdev_rx_queue *
3232__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3233{
3234 return dev->_rx + rxq;
3235}
3236
3217#ifdef CONFIG_SYSFS 3237#ifdef CONFIG_SYSFS
3218static inline unsigned int get_netdev_rx_queue_index( 3238static inline unsigned int get_netdev_rx_queue_index(
3219 struct netdev_rx_queue *queue) 3239 struct netdev_rx_queue *queue)
@@ -3302,7 +3322,9 @@ int netdev_rx_handler_register(struct net_device *dev,
3302void netdev_rx_handler_unregister(struct net_device *dev); 3322void netdev_rx_handler_unregister(struct net_device *dev);
3303 3323
3304bool dev_valid_name(const char *name); 3324bool dev_valid_name(const char *name);
3305int dev_ioctl(struct net *net, unsigned int cmd, void __user *); 3325int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
3326 bool *need_copyout);
3327int dev_ifconf(struct net *net, struct ifconf *, int);
3306int dev_ethtool(struct net *net, struct ifreq *); 3328int dev_ethtool(struct net *net, struct ifreq *);
3307unsigned int dev_get_flags(const struct net_device *); 3329unsigned int dev_get_flags(const struct net_device *);
3308int __dev_change_flags(struct net_device *, unsigned int flags); 3330int __dev_change_flags(struct net_device *, unsigned int flags);
@@ -3315,6 +3337,7 @@ int dev_get_alias(const struct net_device *, char *, size_t);
3315int dev_change_net_namespace(struct net_device *, struct net *, const char *); 3337int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3316int __dev_set_mtu(struct net_device *, int); 3338int __dev_set_mtu(struct net_device *, int);
3317int dev_set_mtu(struct net_device *, int); 3339int dev_set_mtu(struct net_device *, int);
3340int dev_change_tx_queue_len(struct net_device *, unsigned long);
3318void dev_set_group(struct net_device *, int); 3341void dev_set_group(struct net_device *, int);
3319int dev_set_mac_address(struct net_device *, struct sockaddr *); 3342int dev_set_mac_address(struct net_device *, struct sockaddr *);
3320int dev_change_carrier(struct net_device *, bool new_carrier); 3343int dev_change_carrier(struct net_device *, bool new_carrier);
@@ -3323,14 +3346,15 @@ int dev_get_phys_port_id(struct net_device *dev,
3323int dev_get_phys_port_name(struct net_device *dev, 3346int dev_get_phys_port_name(struct net_device *dev,
3324 char *name, size_t len); 3347 char *name, size_t len);
3325int dev_change_proto_down(struct net_device *dev, bool proto_down); 3348int dev_change_proto_down(struct net_device *dev, bool proto_down);
3326struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); 3349struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
3327struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3350struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3328 struct netdev_queue *txq, int *ret); 3351 struct netdev_queue *txq, int *ret);
3329 3352
3330typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); 3353typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3331int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 3354int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3332 int fd, u32 flags); 3355 int fd, u32 flags);
3333u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id); 3356void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
3357 struct netdev_bpf *xdp);
3334 3358
3335int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3359int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3336int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 3360int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
@@ -4399,11 +4423,11 @@ do { \
4399 * file/line information and a backtrace. 4423 * file/line information and a backtrace.
4400 */ 4424 */
4401#define netdev_WARN(dev, format, args...) \ 4425#define netdev_WARN(dev, format, args...) \
4402 WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \ 4426 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
4403 netdev_reg_state(dev), ##args) 4427 netdev_reg_state(dev), ##args)
4404 4428
4405#define netdev_WARN_ONCE(dev, condition, format, arg...) \ 4429#define netdev_WARN_ONCE(dev, format, args...) \
4406 WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev) \ 4430 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
4407 netdev_reg_state(dev), ##args) 4431 netdev_reg_state(dev), ##args)
4408 4432
4409/* netif printk helpers, similar to netdev_printk */ 4433/* netif printk helpers, similar to netdev_printk */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b24e9b101651..85a1a0b32c66 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -67,6 +67,7 @@ struct nf_hook_ops {
67 struct net_device *dev; 67 struct net_device *dev;
68 void *priv; 68 void *priv;
69 u_int8_t pf; 69 u_int8_t pf;
70 bool nat_hook;
70 unsigned int hooknum; 71 unsigned int hooknum;
71 /* Hooks are ordered in ascending priority. */ 72 /* Hooks are ordered in ascending priority. */
72 int priority; 73 int priority;
@@ -77,17 +78,28 @@ struct nf_hook_entry {
77 void *priv; 78 void *priv;
78}; 79};
79 80
81struct nf_hook_entries_rcu_head {
82 struct rcu_head head;
83 void *allocation;
84};
85
80struct nf_hook_entries { 86struct nf_hook_entries {
81 u16 num_hook_entries; 87 u16 num_hook_entries;
82 /* padding */ 88 /* padding */
83 struct nf_hook_entry hooks[]; 89 struct nf_hook_entry hooks[];
84 90
85 /* trailer: pointers to original orig_ops of each hook. 91 /* trailer: pointers to original orig_ops of each hook,
86 * 92 * followed by rcu_head and scratch space used for freeing
87 * This is not part of struct nf_hook_entry since its only 93 * the structure via call_rcu.
88 * needed in slow path (hook register/unregister).
89 * 94 *
95 * This is not part of struct nf_hook_entry since its only
96 * needed in slow path (hook register/unregister):
90 * const struct nf_hook_ops *orig_ops[] 97 * const struct nf_hook_ops *orig_ops[]
98 *
99 * For the same reason, we store this at end -- its
100 * only needed when a hook is deleted, not during
101 * packet path processing:
102 * struct nf_hook_entries_rcu_head head
91 */ 103 */
92}; 104};
93 105
@@ -184,7 +196,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
184 struct net_device *indev, struct net_device *outdev, 196 struct net_device *indev, struct net_device *outdev,
185 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 197 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
186{ 198{
187 struct nf_hook_entries *hook_head; 199 struct nf_hook_entries *hook_head = NULL;
188 int ret = 1; 200 int ret = 1;
189 201
190#ifdef HAVE_JUMP_LABEL 202#ifdef HAVE_JUMP_LABEL
@@ -195,7 +207,33 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
195#endif 207#endif
196 208
197 rcu_read_lock(); 209 rcu_read_lock();
198 hook_head = rcu_dereference(net->nf.hooks[pf][hook]); 210 switch (pf) {
211 case NFPROTO_IPV4:
212 hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
213 break;
214 case NFPROTO_IPV6:
215 hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
216 break;
217 case NFPROTO_ARP:
218#ifdef CONFIG_NETFILTER_FAMILY_ARP
219 hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
220#endif
221 break;
222 case NFPROTO_BRIDGE:
223#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
224 hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
225#endif
226 break;
227#if IS_ENABLED(CONFIG_DECNET)
228 case NFPROTO_DECNET:
229 hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
230 break;
231#endif
232 default:
233 WARN_ON_ONCE(1);
234 break;
235 }
236
199 if (hook_head) { 237 if (hook_head) {
200 struct nf_hook_state state; 238 struct nf_hook_state state;
201 239
@@ -271,64 +309,16 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
271struct flowi; 309struct flowi;
272struct nf_queue_entry; 310struct nf_queue_entry;
273 311
274struct nf_afinfo { 312__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
275 unsigned short family; 313 unsigned int dataoff, u_int8_t protocol,
276 __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, 314 unsigned short family);
277 unsigned int dataoff, u_int8_t protocol);
278 __sum16 (*checksum_partial)(struct sk_buff *skb,
279 unsigned int hook,
280 unsigned int dataoff,
281 unsigned int len,
282 u_int8_t protocol);
283 int (*route)(struct net *net, struct dst_entry **dst,
284 struct flowi *fl, bool strict);
285 void (*saveroute)(const struct sk_buff *skb,
286 struct nf_queue_entry *entry);
287 int (*reroute)(struct net *net, struct sk_buff *skb,
288 const struct nf_queue_entry *entry);
289 int route_key_size;
290};
291
292extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
293static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
294{
295 return rcu_dereference(nf_afinfo[family]);
296}
297
298static inline __sum16
299nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
300 u_int8_t protocol, unsigned short family)
301{
302 const struct nf_afinfo *afinfo;
303 __sum16 csum = 0;
304
305 rcu_read_lock();
306 afinfo = nf_get_afinfo(family);
307 if (afinfo)
308 csum = afinfo->checksum(skb, hook, dataoff, protocol);
309 rcu_read_unlock();
310 return csum;
311}
312
313static inline __sum16
314nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
315 unsigned int dataoff, unsigned int len,
316 u_int8_t protocol, unsigned short family)
317{
318 const struct nf_afinfo *afinfo;
319 __sum16 csum = 0;
320
321 rcu_read_lock();
322 afinfo = nf_get_afinfo(family);
323 if (afinfo)
324 csum = afinfo->checksum_partial(skb, hook, dataoff, len,
325 protocol);
326 rcu_read_unlock();
327 return csum;
328}
329 315
330int nf_register_afinfo(const struct nf_afinfo *afinfo); 316__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
331void nf_unregister_afinfo(const struct nf_afinfo *afinfo); 317 unsigned int dataoff, unsigned int len,
318 u_int8_t protocol, unsigned short family);
319int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
320 bool strict, unsigned short family);
321int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
332 322
333#include <net/flow.h> 323#include <net/flow.h>
334extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); 324extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 8e42253e5d4d..34fc80f3eb90 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -122,6 +122,8 @@ struct ip_set_ext {
122 u64 bytes; 122 u64 bytes;
123 char *comment; 123 char *comment;
124 u32 timeout; 124 u32 timeout;
125 u8 packets_op;
126 u8 bytes_op;
125}; 127};
126 128
127struct ip_set; 129struct ip_set;
@@ -339,6 +341,10 @@ extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
339 struct ip_set_ext *ext); 341 struct ip_set_ext *ext);
340extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, 342extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
341 const void *e, bool active); 343 const void *e, bool active);
344extern bool ip_set_match_extensions(struct ip_set *set,
345 const struct ip_set_ext *ext,
346 struct ip_set_ext *mext,
347 u32 flags, void *data);
342 348
343static inline int 349static inline int
344ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) 350ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h
index bb6fba480118..3d33a2c3f39f 100644
--- a/include/linux/netfilter/ipset/ip_set_counter.h
+++ b/include/linux/netfilter/ipset/ip_set_counter.h
@@ -34,20 +34,33 @@ ip_set_get_packets(const struct ip_set_counter *counter)
34 return (u64)atomic64_read(&(counter)->packets); 34 return (u64)atomic64_read(&(counter)->packets);
35} 35}
36 36
37static inline bool
38ip_set_match_counter(u64 counter, u64 match, u8 op)
39{
40 switch (op) {
41 case IPSET_COUNTER_NONE:
42 return true;
43 case IPSET_COUNTER_EQ:
44 return counter == match;
45 case IPSET_COUNTER_NE:
46 return counter != match;
47 case IPSET_COUNTER_LT:
48 return counter < match;
49 case IPSET_COUNTER_GT:
50 return counter > match;
51 }
52 return false;
53}
54
37static inline void 55static inline void
38ip_set_update_counter(struct ip_set_counter *counter, 56ip_set_update_counter(struct ip_set_counter *counter,
39 const struct ip_set_ext *ext, 57 const struct ip_set_ext *ext, u32 flags)
40 struct ip_set_ext *mext, u32 flags)
41{ 58{
42 if (ext->packets != ULLONG_MAX && 59 if (ext->packets != ULLONG_MAX &&
43 !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { 60 !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
44 ip_set_add_bytes(ext->bytes, counter); 61 ip_set_add_bytes(ext->bytes, counter);
45 ip_set_add_packets(ext->packets, counter); 62 ip_set_add_packets(ext->packets, counter);
46 } 63 }
47 if (flags & IPSET_FLAG_MATCH_COUNTERS) {
48 mext->packets = ip_set_get_packets(counter);
49 mext->bytes = ip_set_get_bytes(counter);
50 }
51} 64}
52 65
53static inline bool 66static inline bool
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 495ba4dd9da5..34551f8aaf9d 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -67,8 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
67 * @ss: The nfnetlink subsystem ID 67 * @ss: The nfnetlink subsystem ID
68 * 68 *
69 * Return the value of the specified RCU-protected pointer, but omit 69 * Return the value of the specified RCU-protected pointer, but omit
70 * both the smp_read_barrier_depends() and the READ_ONCE(), because 70 * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
71 * caller holds the NFNL subsystem mutex.
72 */ 71 */
73#define nfnl_dereference(p, ss) \ 72#define nfnl_dereference(p, ss) \
74 rcu_dereference_protected(p, lockdep_nfnl_is_held(ss)) 73 rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 33f7530f96b9..1313b35c3ab7 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -320,6 +320,8 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
320 320
321struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, 321struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
322 const char *name); 322 const char *name);
323struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
324 const char *name);
323void xt_table_unlock(struct xt_table *t); 325void xt_table_unlock(struct xt_table *t);
324 326
325int xt_proto_init(struct net *net, u_int8_t af); 327int xt_proto_init(struct net *net, u_int8_t af);
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index dc6111adea06..8dddfb151f00 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -4,7 +4,17 @@
4 4
5#include <uapi/linux/netfilter.h> 5#include <uapi/linux/netfilter.h>
6 6
7/* in/out/forward only */
8#define NF_ARP_NUMHOOKS 3
9
10/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */
11#define NF_DN_NUMHOOKS 7
12
13#if IS_ENABLED(CONFIG_DECNET)
7/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ 14/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
8#define NF_MAX_HOOKS 8 15#define NF_MAX_HOOKS NF_DN_NUMHOOKS
16#else
17#define NF_MAX_HOOKS NF_INET_NUMHOOKS
18#endif
9 19
10#endif 20#endif
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 98c03b2462b5..b31dabfdb453 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,53 @@
6 6
7#include <uapi/linux/netfilter_ipv4.h> 7#include <uapi/linux/netfilter_ipv4.h>
8 8
9/* Extra routing may needed on local out, as the QUEUE target never returns
10 * control to the table.
11 */
12struct ip_rt_info {
13 __be32 daddr;
14 __be32 saddr;
15 u_int8_t tos;
16 u_int32_t mark;
17};
18
9int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); 19int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
20
21struct nf_queue_entry;
22
23#ifdef CONFIG_INET
10__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, 24__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
11 unsigned int dataoff, u_int8_t protocol); 25 unsigned int dataoff, u_int8_t protocol);
26__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
27 unsigned int dataoff, unsigned int len,
28 u_int8_t protocol);
29int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
30 bool strict);
31int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
32#else
33static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
34 unsigned int dataoff, u_int8_t protocol)
35{
36 return 0;
37}
38static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb,
39 unsigned int hook,
40 unsigned int dataoff,
41 unsigned int len,
42 u_int8_t protocol)
43{
44 return 0;
45}
46static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
47 struct flowi *fl, bool strict)
48{
49 return -EOPNOTSUPP;
50}
51static inline int nf_ip_reroute(struct sk_buff *skb,
52 const struct nf_queue_entry *entry)
53{
54 return -EOPNOTSUPP;
55}
56#endif /* CONFIG_INET */
57
12#endif /*__LINUX_IP_NETFILTER_H*/ 58#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 47c6b04c28c0..288c597e75b3 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,6 +9,17 @@
9 9
10#include <uapi/linux/netfilter_ipv6.h> 10#include <uapi/linux/netfilter_ipv6.h>
11 11
12/* Extra routing may needed on local out, as the QUEUE target never returns
13 * control to the table.
14 */
15struct ip6_rt_info {
16 struct in6_addr daddr;
17 struct in6_addr saddr;
18 u_int32_t mark;
19};
20
21struct nf_queue_entry;
22
12/* 23/*
13 * Hook functions for ipv6 to allow xt_* modules to be built-in even 24 * Hook functions for ipv6 to allow xt_* modules to be built-in even
14 * if IPv6 is a module. 25 * if IPv6 is a module.
@@ -19,6 +30,14 @@ struct nf_ipv6_ops {
19 void (*route_input)(struct sk_buff *skb); 30 void (*route_input)(struct sk_buff *skb);
20 int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, 31 int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
21 int (*output)(struct net *, struct sock *, struct sk_buff *)); 32 int (*output)(struct net *, struct sock *, struct sk_buff *));
33 __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
34 unsigned int dataoff, u_int8_t protocol);
35 __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook,
36 unsigned int dataoff, unsigned int len,
37 u_int8_t protocol);
38 int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
39 bool strict);
40 int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
22}; 41};
23 42
24#ifdef CONFIG_NETFILTER 43#ifdef CONFIG_NETFILTER
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 47adac640191..57ffaa20d564 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -457,7 +457,12 @@ enum lock_type4 {
457 457
458#define NFS4_DEBUG 1 458#define NFS4_DEBUG 1
459 459
460/* Index of predefined Linux client operations */ 460/*
461 * Index of predefined Linux client operations
462 *
463 * To ensure that /proc/net/rpc/nfs remains correctly ordered, please
464 * append only to this enum when adding new client operations.
465 */
461 466
462enum { 467enum {
463 NFSPROC4_CLNT_NULL = 0, /* Unused */ 468 NFSPROC4_CLNT_NULL = 0, /* Unused */
@@ -480,7 +485,6 @@ enum {
480 NFSPROC4_CLNT_ACCESS, 485 NFSPROC4_CLNT_ACCESS,
481 NFSPROC4_CLNT_GETATTR, 486 NFSPROC4_CLNT_GETATTR,
482 NFSPROC4_CLNT_LOOKUP, 487 NFSPROC4_CLNT_LOOKUP,
483 NFSPROC4_CLNT_LOOKUPP,
484 NFSPROC4_CLNT_LOOKUP_ROOT, 488 NFSPROC4_CLNT_LOOKUP_ROOT,
485 NFSPROC4_CLNT_REMOVE, 489 NFSPROC4_CLNT_REMOVE,
486 NFSPROC4_CLNT_RENAME, 490 NFSPROC4_CLNT_RENAME,
@@ -500,7 +504,6 @@ enum {
500 NFSPROC4_CLNT_SECINFO, 504 NFSPROC4_CLNT_SECINFO,
501 NFSPROC4_CLNT_FSID_PRESENT, 505 NFSPROC4_CLNT_FSID_PRESENT,
502 506
503 /* nfs41 */
504 NFSPROC4_CLNT_EXCHANGE_ID, 507 NFSPROC4_CLNT_EXCHANGE_ID,
505 NFSPROC4_CLNT_CREATE_SESSION, 508 NFSPROC4_CLNT_CREATE_SESSION,
506 NFSPROC4_CLNT_DESTROY_SESSION, 509 NFSPROC4_CLNT_DESTROY_SESSION,
@@ -518,13 +521,14 @@ enum {
518 NFSPROC4_CLNT_BIND_CONN_TO_SESSION, 521 NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
519 NFSPROC4_CLNT_DESTROY_CLIENTID, 522 NFSPROC4_CLNT_DESTROY_CLIENTID,
520 523
521 /* nfs42 */
522 NFSPROC4_CLNT_SEEK, 524 NFSPROC4_CLNT_SEEK,
523 NFSPROC4_CLNT_ALLOCATE, 525 NFSPROC4_CLNT_ALLOCATE,
524 NFSPROC4_CLNT_DEALLOCATE, 526 NFSPROC4_CLNT_DEALLOCATE,
525 NFSPROC4_CLNT_LAYOUTSTATS, 527 NFSPROC4_CLNT_LAYOUTSTATS,
526 NFSPROC4_CLNT_CLONE, 528 NFSPROC4_CLNT_CLONE,
527 NFSPROC4_CLNT_COPY, 529 NFSPROC4_CLNT_COPY,
530
531 NFSPROC4_CLNT_LOOKUPP,
528}; 532};
529 533
530/* nfs41 types */ 534/* nfs41 types */
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
new file mode 100644
index 000000000000..b99bced39ac2
--- /dev/null
+++ b/include/linux/nospec.h
@@ -0,0 +1,72 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright(c) 2018 Linus Torvalds. All rights reserved.
3// Copyright(c) 2018 Alexei Starovoitov. All rights reserved.
4// Copyright(c) 2018 Intel Corporation. All rights reserved.
5
6#ifndef _LINUX_NOSPEC_H
7#define _LINUX_NOSPEC_H
8
9/**
10 * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
11 * @index: array element index
12 * @size: number of elements in array
13 *
14 * When @index is out of bounds (@index >= @size), the sign bit will be
15 * set. Extend the sign bit to all bits and invert, giving a result of
16 * zero for an out of bounds index, or ~0 if within bounds [0, @size).
17 */
18#ifndef array_index_mask_nospec
19static inline unsigned long array_index_mask_nospec(unsigned long index,
20 unsigned long size)
21{
22 /*
23 * Warn developers about inappropriate array_index_nospec() usage.
24 *
25 * Even if the CPU speculates past the WARN_ONCE branch, the
26 * sign bit of @index is taken into account when generating the
27 * mask.
28 *
29 * This warning is compiled out when the compiler can infer that
30 * @index and @size are less than LONG_MAX.
31 */
32 if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
33 "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
34 return 0;
35
36 /*
37 * Always calculate and emit the mask even if the compiler
38 * thinks the mask is not needed. The compiler does not take
39 * into account the value of @index under speculation.
40 */
41 OPTIMIZER_HIDE_VAR(index);
42 return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1);
43}
44#endif
45
46/*
47 * array_index_nospec - sanitize an array index after a bounds check
48 *
49 * For a code sequence like:
50 *
51 * if (index < size) {
52 * index = array_index_nospec(index, size);
53 * val = array[index];
54 * }
55 *
56 * ...if the CPU speculates past the bounds check then
57 * array_index_nospec() will clamp the index within the range of [0,
58 * size).
59 */
60#define array_index_nospec(index, size) \
61({ \
62 typeof(index) _i = (index); \
63 typeof(size) _s = (size); \
64 unsigned long _mask = array_index_mask_nospec(_i, _s); \
65 \
66 BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
67 BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
68 \
69 _i &= _mask; \
70 _i; \
71})
72#endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index c308964777eb..181d16601dd9 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -71,6 +71,7 @@ struct pci_dev;
71 * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. 71 * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
72 * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. 72 * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
73 * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb. 73 * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb.
74 * @NTB_TOPO_CROSSLINK: Connected via two symmetric switchecs
74 */ 75 */
75enum ntb_topo { 76enum ntb_topo {
76 NTB_TOPO_NONE = -1, 77 NTB_TOPO_NONE = -1,
@@ -79,6 +80,7 @@ enum ntb_topo {
79 NTB_TOPO_B2B_USD, 80 NTB_TOPO_B2B_USD,
80 NTB_TOPO_B2B_DSD, 81 NTB_TOPO_B2B_DSD,
81 NTB_TOPO_SWITCH, 82 NTB_TOPO_SWITCH,
83 NTB_TOPO_CROSSLINK,
82}; 84};
83 85
84static inline int ntb_topo_is_b2b(enum ntb_topo topo) 86static inline int ntb_topo_is_b2b(enum ntb_topo topo)
@@ -94,12 +96,13 @@ static inline int ntb_topo_is_b2b(enum ntb_topo topo)
94static inline char *ntb_topo_string(enum ntb_topo topo) 96static inline char *ntb_topo_string(enum ntb_topo topo)
95{ 97{
96 switch (topo) { 98 switch (topo) {
97 case NTB_TOPO_NONE: return "NTB_TOPO_NONE"; 99 case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
98 case NTB_TOPO_PRI: return "NTB_TOPO_PRI"; 100 case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
99 case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; 101 case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
100 case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; 102 case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
101 case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; 103 case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
102 case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH"; 104 case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH";
105 case NTB_TOPO_CROSSLINK: return "NTB_TOPO_CROSSLINK";
103 } 106 }
104 return "NTB_TOPO_INVALID"; 107 return "NTB_TOPO_INVALID";
105} 108}
@@ -250,7 +253,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
250 * @msg_set_mask: See ntb_msg_set_mask(). 253 * @msg_set_mask: See ntb_msg_set_mask().
251 * @msg_clear_mask: See ntb_msg_clear_mask(). 254 * @msg_clear_mask: See ntb_msg_clear_mask().
252 * @msg_read: See ntb_msg_read(). 255 * @msg_read: See ntb_msg_read().
253 * @msg_write: See ntb_msg_write(). 256 * @peer_msg_write: See ntb_peer_msg_write().
254 */ 257 */
255struct ntb_dev_ops { 258struct ntb_dev_ops {
256 int (*port_number)(struct ntb_dev *ntb); 259 int (*port_number)(struct ntb_dev *ntb);
@@ -321,8 +324,8 @@ struct ntb_dev_ops {
321 int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits); 324 int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits);
322 int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits); 325 int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits);
323 int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits); 326 int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits);
324 int (*msg_read)(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg); 327 u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx);
325 int (*msg_write)(struct ntb_dev *ntb, int midx, int pidx, u32 msg); 328 int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg);
326}; 329};
327 330
328static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) 331static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
@@ -384,7 +387,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
384 /* !ops->msg_set_mask == !ops->msg_count && */ 387 /* !ops->msg_set_mask == !ops->msg_count && */
385 /* !ops->msg_clear_mask == !ops->msg_count && */ 388 /* !ops->msg_clear_mask == !ops->msg_count && */
386 !ops->msg_read == !ops->msg_count && 389 !ops->msg_read == !ops->msg_count &&
387 !ops->msg_write == !ops->msg_count && 390 !ops->peer_msg_write == !ops->msg_count &&
388 1; 391 1;
389} 392}
390 393
@@ -764,7 +767,7 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
764 resource_size_t *size_align, 767 resource_size_t *size_align,
765 resource_size_t *size_max) 768 resource_size_t *size_max)
766{ 769{
767 if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx))) 770 if (!(ntb_link_is_up(ntb, NULL, NULL) & BIT_ULL(pidx)))
768 return -ENOTCONN; 771 return -ENOTCONN;
769 772
770 return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, 773 return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
@@ -1459,31 +1462,29 @@ static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
1459} 1462}
1460 1463
1461/** 1464/**
1462 * ntb_msg_read() - read message register with specified index 1465 * ntb_msg_read() - read inbound message register with specified index
1463 * @ntb: NTB device context. 1466 * @ntb: NTB device context.
1464 * @midx: Message register index
1465 * @pidx: OUT - Port index of peer device a message retrieved from 1467 * @pidx: OUT - Port index of peer device a message retrieved from
1466 * @msg: OUT - Data 1468 * @midx: Message register index
1467 * 1469 *
1468 * Read data from the specified message register. Source port index of a 1470 * Read data from the specified message register. Source port index of a
1469 * message is retrieved as well. 1471 * message is retrieved as well.
1470 * 1472 *
1471 * Return: Zero on success, otherwise a negative error number. 1473 * Return: The value of the inbound message register.
1472 */ 1474 */
1473static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, 1475static inline u32 ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx)
1474 u32 *msg)
1475{ 1476{
1476 if (!ntb->ops->msg_read) 1477 if (!ntb->ops->msg_read)
1477 return -EINVAL; 1478 return ~(u32)0;
1478 1479
1479 return ntb->ops->msg_read(ntb, midx, pidx, msg); 1480 return ntb->ops->msg_read(ntb, pidx, midx);
1480} 1481}
1481 1482
1482/** 1483/**
1483 * ntb_msg_write() - write data to the specified message register 1484 * ntb_peer_msg_write() - write data to the specified peer message register
1484 * @ntb: NTB device context. 1485 * @ntb: NTB device context.
1485 * @midx: Message register index
1486 * @pidx: Port index of peer device a message being sent to 1486 * @pidx: Port index of peer device a message being sent to
1487 * @midx: Message register index
1487 * @msg: Data to send 1488 * @msg: Data to send
1488 * 1489 *
1489 * Send data to a specified peer device using the defined message register. 1490 * Send data to a specified peer device using the defined message register.
@@ -1492,13 +1493,13 @@ static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx,
1492 * 1493 *
1493 * Return: Zero on success, otherwise a negative error number. 1494 * Return: Zero on success, otherwise a negative error number.
1494 */ 1495 */
1495static inline int ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx, 1496static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
1496 u32 msg) 1497 u32 msg)
1497{ 1498{
1498 if (!ntb->ops->msg_write) 1499 if (!ntb->ops->peer_msg_write)
1499 return -EINVAL; 1500 return -EINVAL;
1500 1501
1501 return ntb->ops->msg_write(ntb, midx, pidx, msg); 1502 return ntb->ops->peer_msg_write(ntb, pidx, midx, msg);
1502} 1503}
1503 1504
1504#endif 1505#endif
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 11ce6b1117a8..6e8200215321 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -5,20 +5,36 @@
5 Originally written by Alan Cox. 5 Originally written by Alan Cox.
6 6
7 Hacked to death by C. Scott Ananian and David Huggins-Daines. 7 Hacked to death by C. Scott Ananian and David Huggins-Daines.
8 8*/
9 Some of the constants in here are from the corresponding 9
10 NetBSD/OpenBSD header file, by Allen Briggs. We figured out the
11 rest of them on our own. */
12#ifndef LINUX_NUBUS_H 10#ifndef LINUX_NUBUS_H
13#define LINUX_NUBUS_H 11#define LINUX_NUBUS_H
14 12
13#include <linux/device.h>
15#include <asm/nubus.h> 14#include <asm/nubus.h>
16#include <uapi/linux/nubus.h> 15#include <uapi/linux/nubus.h>
17 16
17struct proc_dir_entry;
18struct seq_file;
19
20struct nubus_dir {
21 unsigned char *base;
22 unsigned char *ptr;
23 int done;
24 int mask;
25 struct proc_dir_entry *procdir;
26};
27
28struct nubus_dirent {
29 unsigned char *base;
30 unsigned char type;
31 __u32 data; /* Actually 24 bits used */
32 int mask;
33};
34
18struct nubus_board { 35struct nubus_board {
19 struct nubus_board* next; 36 struct device dev;
20 struct nubus_dev* first_dev; 37
21
22 /* Only 9-E actually exist, though 0-8 are also theoretically 38 /* Only 9-E actually exist, though 0-8 are also theoretically
23 possible, and 0 is a special case which represents the 39 possible, and 0 is a special case which represents the
24 motherboard and onboard peripherals (Ethernet, video) */ 40 motherboard and onboard peripherals (Ethernet, video) */
@@ -27,10 +43,10 @@ struct nubus_board {
27 char name[64]; 43 char name[64];
28 44
29 /* Format block */ 45 /* Format block */
30 unsigned char* fblock; 46 unsigned char *fblock;
31 /* Root directory (does *not* always equal fblock + doffset!) */ 47 /* Root directory (does *not* always equal fblock + doffset!) */
32 unsigned char* directory; 48 unsigned char *directory;
33 49
34 unsigned long slot_addr; 50 unsigned long slot_addr;
35 /* Offset to root directory (sometimes) */ 51 /* Offset to root directory (sometimes) */
36 unsigned long doffset; 52 unsigned long doffset;
@@ -41,15 +57,15 @@ struct nubus_board {
41 unsigned char rev; 57 unsigned char rev;
42 unsigned char format; 58 unsigned char format;
43 unsigned char lanes; 59 unsigned char lanes;
44};
45 60
46struct nubus_dev {
47 /* Next link in device list */
48 struct nubus_dev* next;
49 /* Directory entry in /proc/bus/nubus */ 61 /* Directory entry in /proc/bus/nubus */
50 struct proc_dir_entry* procdir; 62 struct proc_dir_entry *procdir;
63};
64
65struct nubus_rsrc {
66 struct list_head list;
51 67
52 /* The functional resource ID of this device */ 68 /* The functional resource ID */
53 unsigned char resid; 69 unsigned char resid;
54 /* These are mostly here for convenience; we could always read 70 /* These are mostly here for convenience; we could always read
55 them from the ROMs if we wanted to */ 71 them from the ROMs if we wanted to */
@@ -57,79 +73,116 @@ struct nubus_dev {
57 unsigned short type; 73 unsigned short type;
58 unsigned short dr_sw; 74 unsigned short dr_sw;
59 unsigned short dr_hw; 75 unsigned short dr_hw;
60 /* This is the device's name rather than the board's. 76
61 Sometimes they are different. Usually the board name is
62 more correct. */
63 char name[64];
64 /* MacOS driver (I kid you not) */
65 unsigned char* driver;
66 /* Actually this is an offset */
67 unsigned long iobase;
68 unsigned long iosize;
69 unsigned char flags, hwdevid;
70
71 /* Functional directory */ 77 /* Functional directory */
72 unsigned char* directory; 78 unsigned char *directory;
73 /* Much of our info comes from here */ 79 /* Much of our info comes from here */
74 struct nubus_board* board; 80 struct nubus_board *board;
81};
82
83/* This is all NuBus functional resources (used to find devices later on) */
84extern struct list_head nubus_func_rsrcs;
85
86struct nubus_driver {
87 struct device_driver driver;
88 int (*probe)(struct nubus_board *board);
89 int (*remove)(struct nubus_board *board);
75}; 90};
76 91
77/* This is all NuBus devices (used to find devices later on) */ 92extern struct bus_type nubus_bus_type;
78extern struct nubus_dev* nubus_devices;
79/* This is all NuBus cards */
80extern struct nubus_board* nubus_boards;
81 93
82/* Generic NuBus interface functions, modelled after the PCI interface */ 94/* Generic NuBus interface functions, modelled after the PCI interface */
83void nubus_scan_bus(void);
84#ifdef CONFIG_PROC_FS 95#ifdef CONFIG_PROC_FS
85extern void nubus_proc_init(void); 96void nubus_proc_init(void);
97struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board);
98struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
99 const struct nubus_dirent *ent,
100 struct nubus_board *board);
101void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
102 const struct nubus_dirent *ent,
103 unsigned int size);
104void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
105 const struct nubus_dirent *ent);
86#else 106#else
87static inline void nubus_proc_init(void) {} 107static inline void nubus_proc_init(void) {}
108static inline
109struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
110{ return NULL; }
111static inline
112struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
113 const struct nubus_dirent *ent,
114 struct nubus_board *board)
115{ return NULL; }
116static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
117 const struct nubus_dirent *ent,
118 unsigned int size) {}
119static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
120 const struct nubus_dirent *ent) {}
88#endif 121#endif
89int get_nubus_list(char *buf); 122
90int nubus_proc_attach_device(struct nubus_dev *dev); 123struct nubus_rsrc *nubus_first_rsrc_or_null(void);
91/* If we need more precision we can add some more of these */ 124struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from);
92struct nubus_dev* nubus_find_device(unsigned short category, 125
93 unsigned short type, 126#define for_each_func_rsrc(f) \
94 unsigned short dr_hw, 127 for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f))
95 unsigned short dr_sw, 128
96 const struct nubus_dev* from); 129#define for_each_board_func_rsrc(b, f) \
97struct nubus_dev* nubus_find_type(unsigned short category, 130 for_each_func_rsrc(f) if (f->board != b) {} else
98 unsigned short type,
99 const struct nubus_dev* from);
100/* Might have more than one device in a slot, you know... */
101struct nubus_dev* nubus_find_slot(unsigned int slot,
102 const struct nubus_dev* from);
103 131
104/* These are somewhat more NuBus-specific. They all return 0 for 132/* These are somewhat more NuBus-specific. They all return 0 for
105 success and -1 for failure, as you'd expect. */ 133 success and -1 for failure, as you'd expect. */
106 134
107/* The root directory which contains the board and functional 135/* The root directory which contains the board and functional
108 directories */ 136 directories */
109int nubus_get_root_dir(const struct nubus_board* board, 137int nubus_get_root_dir(const struct nubus_board *board,
110 struct nubus_dir* dir); 138 struct nubus_dir *dir);
111/* The board directory */ 139/* The board directory */
112int nubus_get_board_dir(const struct nubus_board* board, 140int nubus_get_board_dir(const struct nubus_board *board,
113 struct nubus_dir* dir); 141 struct nubus_dir *dir);
114/* The functional directory */ 142/* The functional directory */
115int nubus_get_func_dir(const struct nubus_dev* dev, 143int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir);
116 struct nubus_dir* dir);
117 144
118/* These work on any directory gotten via the above */ 145/* These work on any directory gotten via the above */
119int nubus_readdir(struct nubus_dir* dir, 146int nubus_readdir(struct nubus_dir *dir,
120 struct nubus_dirent* ent); 147 struct nubus_dirent *ent);
121int nubus_find_rsrc(struct nubus_dir* dir, 148int nubus_find_rsrc(struct nubus_dir *dir,
122 unsigned char rsrc_type, 149 unsigned char rsrc_type,
123 struct nubus_dirent* ent); 150 struct nubus_dirent *ent);
124int nubus_rewinddir(struct nubus_dir* dir); 151int nubus_rewinddir(struct nubus_dir *dir);
125 152
126/* Things to do with directory entries */ 153/* Things to do with directory entries */
127int nubus_get_subdir(const struct nubus_dirent* ent, 154int nubus_get_subdir(const struct nubus_dirent *ent,
128 struct nubus_dir* dir); 155 struct nubus_dir *dir);
129void nubus_get_rsrc_mem(void* dest, 156void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
130 const struct nubus_dirent *dirent, 157 unsigned int len);
131 int len); 158unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
132void nubus_get_rsrc_str(void* dest, 159 unsigned int len);
133 const struct nubus_dirent *dirent, 160void nubus_seq_write_rsrc_mem(struct seq_file *m,
134 int maxlen); 161 const struct nubus_dirent *dirent,
162 unsigned int len);
163unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
164
165/* Declarations relating to driver model objects */
166int nubus_bus_register(void);
167int nubus_device_register(struct nubus_board *board);
168int nubus_driver_register(struct nubus_driver *ndrv);
169void nubus_driver_unregister(struct nubus_driver *ndrv);
170int nubus_proc_show(struct seq_file *m, void *data);
171
172static inline void nubus_set_drvdata(struct nubus_board *board, void *data)
173{
174 dev_set_drvdata(&board->dev, data);
175}
176
177static inline void *nubus_get_drvdata(struct nubus_board *board)
178{
179 return dev_get_drvdata(&board->dev);
180}
181
182/* Returns a pointer to the "standard" slot space. */
183static inline void *nubus_slot_addr(int slot)
184{
185 return (void *)(0xF0000000 | (slot << 24));
186}
187
135#endif /* LINUX_NUBUS_H */ 188#endif /* LINUX_NUBUS_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index aea87f0d917b..4112e2bd747f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -124,14 +124,20 @@ enum {
124 124
125#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7) 125#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
126#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff) 126#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
127#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff) 127
128#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf) 128enum {
129 129 NVME_CMBSZ_SQS = 1 << 0,
130#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10) 130 NVME_CMBSZ_CQS = 1 << 1,
131#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8) 131 NVME_CMBSZ_LISTS = 1 << 2,
132#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4) 132 NVME_CMBSZ_RDS = 1 << 3,
133#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2) 133 NVME_CMBSZ_WDS = 1 << 4,
134#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1) 134
135 NVME_CMBSZ_SZ_SHIFT = 12,
136 NVME_CMBSZ_SZ_MASK = 0xfffff,
137
138 NVME_CMBSZ_SZU_SHIFT = 8,
139 NVME_CMBSZ_SZU_MASK = 0xf,
140};
135 141
136/* 142/*
137 * Submission and Completion Queue Entry Sizes for the NVM command set. 143 * Submission and Completion Queue Entry Sizes for the NVM command set.
diff --git a/include/linux/of.h b/include/linux/of.h
index d3dea1d1e3a9..da1ee95241c1 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1#ifndef _LINUX_OF_H 2#ifndef _LINUX_OF_H
2#define _LINUX_OF_H 3#define _LINUX_OF_H
3/* 4/*
@@ -9,11 +10,6 @@
9 * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. 10 * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
10 * Updates for SPARC64 by David S. Miller 11 * Updates for SPARC64 by David S. Miller
11 * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp. 12 * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */ 13 */
18#include <linux/types.h> 14#include <linux/types.h>
19#include <linux/bitops.h> 15#include <linux/bitops.h>
@@ -544,6 +540,8 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
544 540
545bool of_console_check(struct device_node *dn, char *name, int index); 541bool of_console_check(struct device_node *dn, char *name, int index);
546 542
543extern int of_cpu_node_to_id(struct device_node *np);
544
547#else /* CONFIG_OF */ 545#else /* CONFIG_OF */
548 546
549static inline void of_core_init(void) 547static inline void of_core_init(void)
@@ -916,6 +914,11 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
916{ 914{
917} 915}
918 916
917static inline int of_cpu_node_to_id(struct device_node *np)
918{
919 return -ENODEV;
920}
921
919#define of_match_ptr(_ptr) NULL 922#define of_match_ptr(_ptr) NULL
920#define of_match_node(_matches, _node) NULL 923#define of_match_node(_matches, _node) NULL
921#endif /* CONFIG_OF */ 924#endif /* CONFIG_OF */
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index b90d8ec57c1f..fd706cdf255c 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -1,13 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * OF helpers for DMA request / controller 3 * OF helpers for DMA request / controller
3 * 4 *
4 * Based on of_gpio.h 5 * Based on of_gpio.h
5 * 6 *
6 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ 7 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#ifndef __LINUX_OF_DMA_H 10#ifndef __LINUX_OF_DMA_H
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 013c5418aeec..b9cd9ebdf9b9 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Definitions for working with the Flattened Device Tree data format 3 * Definitions for working with the Flattened Device Tree data format
3 * 4 *
4 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp 5 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
5 * benh@kernel.crashing.org 6 * benh@kernel.crashing.org
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 */ 7 */
11 8
12#ifndef _LINUX_OF_FDT_H 9#ifndef _LINUX_OF_FDT_H
@@ -47,6 +44,12 @@ extern void *initial_boot_params;
47extern char __dtb_start[]; 44extern char __dtb_start[];
48extern char __dtb_end[]; 45extern char __dtb_end[];
49 46
47/* Other Prototypes */
48extern u64 of_flat_dt_translate_address(unsigned long node);
49extern void of_fdt_limit_memory(int limit);
50#endif /* CONFIG_OF_FLATTREE */
51
52#ifdef CONFIG_OF_EARLY_FLATTREE
50/* For scanning the flat device-tree at boot time */ 53/* For scanning the flat device-tree at boot time */
51extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, 54extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
52 int depth, void *data), 55 int depth, void *data),
@@ -77,7 +80,6 @@ extern void early_init_dt_add_memory_arch(u64 base, u64 size);
77extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); 80extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
78extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, 81extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
79 bool no_map); 82 bool no_map);
80extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
81extern u64 dt_mem_next_cell(int s, const __be32 **cellp); 83extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
82 84
83/* Early flat tree scan hooks */ 85/* Early flat tree scan hooks */
@@ -97,16 +99,14 @@ extern void unflatten_device_tree(void);
97extern void unflatten_and_copy_device_tree(void); 99extern void unflatten_and_copy_device_tree(void);
98extern void early_init_devtree(void *); 100extern void early_init_devtree(void *);
99extern void early_get_first_memblock_info(void *, phys_addr_t *); 101extern void early_get_first_memblock_info(void *, phys_addr_t *);
100extern u64 of_flat_dt_translate_address(unsigned long node); 102#else /* CONFIG_OF_EARLY_FLATTREE */
101extern void of_fdt_limit_memory(int limit);
102#else /* CONFIG_OF_FLATTREE */
103static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; } 103static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
104static inline void early_init_fdt_scan_reserved_mem(void) {} 104static inline void early_init_fdt_scan_reserved_mem(void) {}
105static inline void early_init_fdt_reserve_self(void) {} 105static inline void early_init_fdt_reserve_self(void) {}
106static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } 106static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
107static inline void unflatten_device_tree(void) {} 107static inline void unflatten_device_tree(void) {}
108static inline void unflatten_and_copy_device_tree(void) {} 108static inline void unflatten_and_copy_device_tree(void) {}
109#endif /* CONFIG_OF_FLATTREE */ 109#endif /* CONFIG_OF_EARLY_FLATTREE */
110 110
111#endif /* __ASSEMBLY__ */ 111#endif /* __ASSEMBLY__ */
112#endif /* _LINUX_OF_FDT_H */ 112#endif /* _LINUX_OF_FDT_H */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 1fe205582111..163b79ecd01a 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -1,14 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * OF helpers for the GPIO API 3 * OF helpers for the GPIO API
3 * 4 *
4 * Copyright (c) 2007-2008 MontaVista Software, Inc. 5 * Copyright (c) 2007-2008 MontaVista Software, Inc.
5 * 6 *
6 * Author: Anton Vorontsov <avorontsov@ru.mvista.com> 7 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */ 8 */
13 9
14#ifndef __LINUX_OF_GPIO_H 10#ifndef __LINUX_OF_GPIO_H
@@ -31,7 +27,7 @@ enum of_gpio_flags {
31 OF_GPIO_ACTIVE_LOW = 0x1, 27 OF_GPIO_ACTIVE_LOW = 0x1,
32 OF_GPIO_SINGLE_ENDED = 0x2, 28 OF_GPIO_SINGLE_ENDED = 0x2,
33 OF_GPIO_OPEN_DRAIN = 0x4, 29 OF_GPIO_OPEN_DRAIN = 0x4,
34 OF_GPIO_SLEEP_MAY_LOSE_VALUE = 0x8, 30 OF_GPIO_TRANSITORY = 0x8,
35}; 31};
36 32
37#ifdef CONFIG_OF_GPIO 33#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index 3e058f05ab04..01038a6aade0 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * OF graph binding parsing helpers 3 * OF graph binding parsing helpers
3 * 4 *
@@ -6,10 +7,6 @@
6 * 7 *
7 * Copyright (C) 2012 Renesas Electronics Corp. 8 * Copyright (C) 2012 Renesas Electronics Corp.
8 * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> 9 * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 */ 10 */
14#ifndef __LINUX_OF_GRAPH_H 11#ifndef __LINUX_OF_GRAPH_H
15#define __LINUX_OF_GRAPH_H 12#define __LINUX_OF_GRAPH_H
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index cddfaff4d0b7..4fa654e4b5a9 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -34,9 +34,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
34 34
35extern struct of_device_id __iommu_of_table; 35extern struct of_device_id __iommu_of_table;
36 36
37typedef int (*of_iommu_init_fn)(struct device_node *); 37#define IOMMU_OF_DECLARE(name, compat) OF_DECLARE_1(iommu, name, compat, NULL)
38
39#define IOMMU_OF_DECLARE(name, compat, fn) \
40 _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
41 38
42#endif /* __OF_IOMMU_H */ 39#endif /* __OF_IOMMU_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index bf588a05d0d0..88865e0ebf4d 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -9,8 +9,7 @@ struct pci_dev;
9struct of_phandle_args; 9struct of_phandle_args;
10struct device_node; 10struct device_node;
11 11
12#ifdef CONFIG_OF_PCI 12#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI)
13int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
14struct device_node *of_pci_find_child_device(struct device_node *parent, 13struct device_node *of_pci_find_child_device(struct device_node *parent,
15 unsigned int devfn); 14 unsigned int devfn);
16int of_pci_get_devfn(struct device_node *np); 15int of_pci_get_devfn(struct device_node *np);
@@ -23,11 +22,6 @@ int of_pci_map_rid(struct device_node *np, u32 rid,
23 const char *map_name, const char *map_mask_name, 22 const char *map_name, const char *map_mask_name,
24 struct device_node **target, u32 *id_out); 23 struct device_node **target, u32 *id_out);
25#else 24#else
26static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
27{
28 return 0;
29}
30
31static inline struct device_node *of_pci_find_child_device(struct device_node *parent, 25static inline struct device_node *of_pci_find_child_device(struct device_node *parent,
32 unsigned int devfn) 26 unsigned int devfn)
33{ 27{
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
index 7e09244bb679..d0b183ab65c6 100644
--- a/include/linux/of_pdt.h
+++ b/include/linux/of_pdt.h
@@ -1,13 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * Definitions for building a device tree by calling into the 3 * Definitions for building a device tree by calling into the
3 * Open Firmware PROM. 4 * Open Firmware PROM.
4 * 5 *
5 * Copyright (C) 2010 Andres Salomon <dilinger@queued.net> 6 * Copyright (C) 2010 Andres Salomon <dilinger@queued.net>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */ 7 */
12 8
13#ifndef _LINUX_OF_PDT_H 9#ifndef _LINUX_OF_PDT_H
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index fb908e598348..84a966623e78 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -1,14 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1#ifndef _LINUX_OF_PLATFORM_H 2#ifndef _LINUX_OF_PLATFORM_H
2#define _LINUX_OF_PLATFORM_H 3#define _LINUX_OF_PLATFORM_H
3/* 4/*
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. 5 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org> 6 * <benh@kernel.crashing.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 */ 7 */
13 8
14#include <linux/device.h> 9#include <linux/device.h>
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index edfa280c3d56..053feb41510a 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -25,15 +25,43 @@ struct gpmc_nand_ops {
25 25
26struct gpmc_nand_regs; 26struct gpmc_nand_regs;
27 27
28struct gpmc_onenand_info {
29 bool sync_read;
30 bool sync_write;
31 int burst_len;
32};
33
28#if IS_ENABLED(CONFIG_OMAP_GPMC) 34#if IS_ENABLED(CONFIG_OMAP_GPMC)
29struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, 35struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
30 int cs); 36 int cs);
37/**
38 * gpmc_omap_onenand_set_timings - set optimized sync timings.
39 * @cs: Chip Select Region
40 * @freq: Chip frequency
41 * @latency: Burst latency cycle count
42 * @info: Structure describing parameters used
43 *
44 * Sets optimized timings for the @cs region based on @freq and @latency.
45 * Updates the @info structure based on the GPMC settings.
46 */
47int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
48 int latency,
49 struct gpmc_onenand_info *info);
50
31#else 51#else
32static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, 52static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
33 int cs) 53 int cs)
34{ 54{
35 return NULL; 55 return NULL;
36} 56}
57
58static inline
59int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
60 int latency,
61 struct gpmc_onenand_info *info)
62{
63 return -EINVAL;
64}
37#endif /* CONFIG_OMAP_GPMC */ 65#endif /* CONFIG_OMAP_GPMC */
38 66
39extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, 67extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 3ec44e27aa9d..50c2b8786831 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -46,11 +46,6 @@
46 * guarantees that this bit is cleared for a page when it first is entered into 46 * guarantees that this bit is cleared for a page when it first is entered into
47 * the page cache. 47 * the page cache.
48 * 48 *
49 * PG_highmem pages are not permanently mapped into the kernel virtual address
50 * space, they need to be kmapped separately for doing IO on the pages. The
51 * struct page (these bits with information) are always mapped into kernel
52 * address space...
53 *
54 * PG_hwpoison indicates that a page got corrupted in hardware and contains 49 * PG_hwpoison indicates that a page got corrupted in hardware and contains
55 * data with incorrect ECC bits that triggered a machine check. Accessing is 50 * data with incorrect ECC bits that triggered a machine check. Accessing is
56 * not safe since it may cause another machine check. Don't touch! 51 * not safe since it may cause another machine check. Don't touch!
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 5fb6580f7f23..6dc456ac6136 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -9,14 +9,14 @@
9#ifndef _LINUX_PAGEVEC_H 9#ifndef _LINUX_PAGEVEC_H
10#define _LINUX_PAGEVEC_H 10#define _LINUX_PAGEVEC_H
11 11
12/* 14 pointers + two long's align the pagevec structure to a power of two */ 12/* 15 pointers + header align the pagevec structure to a power of two */
13#define PAGEVEC_SIZE 14 13#define PAGEVEC_SIZE 15
14 14
15struct page; 15struct page;
16struct address_space; 16struct address_space;
17 17
18struct pagevec { 18struct pagevec {
19 unsigned long nr; 19 unsigned char nr;
20 bool percpu_pvec_drained; 20 bool percpu_pvec_drained;
21 struct page *pages[PAGEVEC_SIZE]; 21 struct page *pages[PAGEVEC_SIZE];
22}; 22};
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
index 3cc06b059017..df28af5cef21 100644
--- a/include/linux/pci-aspm.h
+++ b/include/linux/pci-aspm.h
@@ -24,43 +24,12 @@
24#define PCIE_LINK_STATE_CLKPM 4 24#define PCIE_LINK_STATE_CLKPM 4
25 25
26#ifdef CONFIG_PCIEASPM 26#ifdef CONFIG_PCIEASPM
27void pcie_aspm_init_link_state(struct pci_dev *pdev);
28void pcie_aspm_exit_link_state(struct pci_dev *pdev);
29void pcie_aspm_pm_state_change(struct pci_dev *pdev);
30void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
31void pci_disable_link_state(struct pci_dev *pdev, int state); 27void pci_disable_link_state(struct pci_dev *pdev, int state);
32void pci_disable_link_state_locked(struct pci_dev *pdev, int state); 28void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
33void pcie_no_aspm(void); 29void pcie_no_aspm(void);
34#else 30#else
35static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) 31static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { }
36{ 32static inline void pcie_no_aspm(void) { }
37}
38static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev)
39{
40}
41static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev)
42{
43}
44static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
45{
46}
47static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
48{
49}
50static inline void pcie_no_aspm(void)
51{
52}
53#endif 33#endif
54 34
55#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */
56void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
57void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
58#else
59static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
60{
61}
62static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
63{
64}
65#endif
66#endif /* LINUX_ASPM_H */ 35#endif /* LINUX_ASPM_H */
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index d1f9fdade1e0..0dd1a3f7b309 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -17,91 +17,90 @@ static inline void *
17pci_alloc_consistent(struct pci_dev *hwdev, size_t size, 17pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
18 dma_addr_t *dma_handle) 18 dma_addr_t *dma_handle)
19{ 19{
20 return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); 20 return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
21} 21}
22 22
23static inline void * 23static inline void *
24pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, 24pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
25 dma_addr_t *dma_handle) 25 dma_addr_t *dma_handle)
26{ 26{
27 return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, 27 return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
28 size, dma_handle, GFP_ATOMIC);
29} 28}
30 29
31static inline void 30static inline void
32pci_free_consistent(struct pci_dev *hwdev, size_t size, 31pci_free_consistent(struct pci_dev *hwdev, size_t size,
33 void *vaddr, dma_addr_t dma_handle) 32 void *vaddr, dma_addr_t dma_handle)
34{ 33{
35 dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle); 34 dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle);
36} 35}
37 36
38static inline dma_addr_t 37static inline dma_addr_t
39pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) 38pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
40{ 39{
41 return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction); 40 return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction);
42} 41}
43 42
44static inline void 43static inline void
45pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, 44pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
46 size_t size, int direction) 45 size_t size, int direction)
47{ 46{
48 dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); 47 dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
49} 48}
50 49
51static inline dma_addr_t 50static inline dma_addr_t
52pci_map_page(struct pci_dev *hwdev, struct page *page, 51pci_map_page(struct pci_dev *hwdev, struct page *page,
53 unsigned long offset, size_t size, int direction) 52 unsigned long offset, size_t size, int direction)
54{ 53{
55 return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction); 54 return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
56} 55}
57 56
58static inline void 57static inline void
59pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, 58pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
60 size_t size, int direction) 59 size_t size, int direction)
61{ 60{
62 dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction); 61 dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
63} 62}
64 63
65static inline int 64static inline int
66pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, 65pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
67 int nents, int direction) 66 int nents, int direction)
68{ 67{
69 return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); 68 return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
70} 69}
71 70
72static inline void 71static inline void
73pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, 72pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
74 int nents, int direction) 73 int nents, int direction)
75{ 74{
76 dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); 75 dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
77} 76}
78 77
79static inline void 78static inline void
80pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, 79pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
81 size_t size, int direction) 80 size_t size, int direction)
82{ 81{
83 dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); 82 dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
84} 83}
85 84
86static inline void 85static inline void
87pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, 86pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
88 size_t size, int direction) 87 size_t size, int direction)
89{ 88{
90 dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); 89 dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
91} 90}
92 91
93static inline void 92static inline void
94pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, 93pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
95 int nelems, int direction) 94 int nelems, int direction)
96{ 95{
97 dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); 96 dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
98} 97}
99 98
100static inline void 99static inline void
101pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, 100pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
102 int nelems, int direction) 101 int nelems, int direction)
103{ 102{
104 dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); 103 dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
105} 104}
106 105
107static inline int 106static inline int
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 809c2f1873ac..baadad1aabbc 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -1,17 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright 2016 Broadcom 3 * Copyright 2016 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation (the "GPL").
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License version 2 (GPLv2) for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * version 2 (GPLv2) along with this source code.
15 */ 4 */
16#ifndef DRIVERS_PCI_ECAM_H 5#ifndef DRIVERS_PCI_ECAM_H
17#define DRIVERS_PCI_ECAM_H 6#define DRIVERS_PCI_ECAM_H
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
index 263b89ea5705..f42b0fd4b4bc 100644
--- a/include/linux/pci-ep-cfs.h
+++ b/include/linux/pci-ep-cfs.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/** 2/**
2 * PCI Endpoint ConfigFS header file 3 * PCI Endpoint ConfigFS header file
3 * 4 *
4 * Copyright (C) 2017 Texas Instruments 5 * Copyright (C) 2017 Texas Instruments
5 * Author: Kishon Vijay Abraham I <kishon@ti.com> 6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 of
9 * the License as published by the Free Software Foundation.
10 */ 7 */
11 8
12#ifndef __LINUX_PCI_EP_CFS_H 9#ifndef __LINUX_PCI_EP_CFS_H
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index f7a04e1af112..a1a5e5df0f66 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/** 2/**
2 * PCI Endpoint *Controller* (EPC) header file 3 * PCI Endpoint *Controller* (EPC) header file
3 * 4 *
4 * Copyright (C) 2017 Texas Instruments 5 * Copyright (C) 2017 Texas Instruments
5 * Author: Kishon Vijay Abraham I <kishon@ti.com> 6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 of
9 * the License as published by the Free Software Foundation.
10 */ 7 */
11 8
12#ifndef __LINUX_PCI_EPC_H 9#ifndef __LINUX_PCI_EPC_H
@@ -39,17 +36,20 @@ enum pci_epc_irq_type {
39 * @owner: the module owner containing the ops 36 * @owner: the module owner containing the ops
40 */ 37 */
41struct pci_epc_ops { 38struct pci_epc_ops {
42 int (*write_header)(struct pci_epc *pci_epc, 39 int (*write_header)(struct pci_epc *epc, u8 func_no,
43 struct pci_epf_header *hdr); 40 struct pci_epf_header *hdr);
44 int (*set_bar)(struct pci_epc *epc, enum pci_barno bar, 41 int (*set_bar)(struct pci_epc *epc, u8 func_no,
42 enum pci_barno bar,
45 dma_addr_t bar_phys, size_t size, int flags); 43 dma_addr_t bar_phys, size_t size, int flags);
46 void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar); 44 void (*clear_bar)(struct pci_epc *epc, u8 func_no,
47 int (*map_addr)(struct pci_epc *epc, phys_addr_t addr, 45 enum pci_barno bar);
48 u64 pci_addr, size_t size); 46 int (*map_addr)(struct pci_epc *epc, u8 func_no,
49 void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr); 47 phys_addr_t addr, u64 pci_addr, size_t size);
50 int (*set_msi)(struct pci_epc *epc, u8 interrupts); 48 void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
51 int (*get_msi)(struct pci_epc *epc); 49 phys_addr_t addr);
52 int (*raise_irq)(struct pci_epc *pci_epc, 50 int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
51 int (*get_msi)(struct pci_epc *epc, u8 func_no);
52 int (*raise_irq)(struct pci_epc *epc, u8 func_no,
53 enum pci_epc_irq_type type, u8 interrupt_num); 53 enum pci_epc_irq_type type, u8 interrupt_num);
54 int (*start)(struct pci_epc *epc); 54 int (*start)(struct pci_epc *epc);
55 void (*stop)(struct pci_epc *epc); 55 void (*stop)(struct pci_epc *epc);
@@ -124,17 +124,21 @@ void pci_epc_destroy(struct pci_epc *epc);
124int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); 124int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
125void pci_epc_linkup(struct pci_epc *epc); 125void pci_epc_linkup(struct pci_epc *epc);
126void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); 126void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
127int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr); 127int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
128int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar, 128 struct pci_epf_header *hdr);
129int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
130 enum pci_barno bar,
129 dma_addr_t bar_phys, size_t size, int flags); 131 dma_addr_t bar_phys, size_t size, int flags);
130void pci_epc_clear_bar(struct pci_epc *epc, int bar); 132void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar);
131int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr, 133int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
134 phys_addr_t phys_addr,
132 u64 pci_addr, size_t size); 135 u64 pci_addr, size_t size);
133void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr); 136void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
134int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts); 137 phys_addr_t phys_addr);
135int pci_epc_get_msi(struct pci_epc *epc); 138int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
136int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type, 139int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
137 u8 interrupt_num); 140int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
141 enum pci_epc_irq_type type, u8 interrupt_num);
138int pci_epc_start(struct pci_epc *epc); 142int pci_epc_start(struct pci_epc *epc);
139void pci_epc_stop(struct pci_epc *epc); 143void pci_epc_stop(struct pci_epc *epc);
140struct pci_epc *pci_epc_get(const char *epc_name); 144struct pci_epc *pci_epc_get(const char *epc_name);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 60d551a9a1ba..e897bf076701 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/** 2/**
2 * PCI Endpoint *Function* (EPF) header file 3 * PCI Endpoint *Function* (EPF) header file
3 * 4 *
4 * Copyright (C) 2017 Texas Instruments 5 * Copyright (C) 2017 Texas Instruments
5 * Author: Kishon Vijay Abraham I <kishon@ti.com> 6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 of
9 * the License as published by the Free Software Foundation.
10 */ 7 */
11 8
12#ifndef __LINUX_PCI_EPF_H 9#ifndef __LINUX_PCI_EPF_H
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c170c9250c8b..024a1beda008 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -48,17 +48,17 @@
48 * In the interest of not exposing interfaces to user-space unnecessarily, 48 * In the interest of not exposing interfaces to user-space unnecessarily,
49 * the following kernel-only defines are being added here. 49 * the following kernel-only defines are being added here.
50 */ 50 */
51#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) 51#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
52/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ 52/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
53#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) 53#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
54 54
55/* pci_slot represents a physical slot */ 55/* pci_slot represents a physical slot */
56struct pci_slot { 56struct pci_slot {
57 struct pci_bus *bus; /* The bus this slot is on */ 57 struct pci_bus *bus; /* Bus this slot is on */
58 struct list_head list; /* node in list of slots on this bus */ 58 struct list_head list; /* Node in list of slots */
59 struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ 59 struct hotplug_slot *hotplug; /* Hotplug info (move here) */
60 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ 60 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
61 struct kobject kobj; 61 struct kobject kobj;
62}; 62};
63 63
64static inline const char *pci_slot_name(const struct pci_slot *slot) 64static inline const char *pci_slot_name(const struct pci_slot *slot)
@@ -72,9 +72,7 @@ enum pci_mmap_state {
72 pci_mmap_mem 72 pci_mmap_mem
73}; 73};
74 74
75/* 75/* For PCI devices, the region numbers are assigned this way: */
76 * For PCI devices, the region numbers are assigned this way:
77 */
78enum { 76enum {
79 /* #0-5: standard PCI resources */ 77 /* #0-5: standard PCI resources */
80 PCI_STD_RESOURCES, 78 PCI_STD_RESOURCES,
@@ -83,23 +81,23 @@ enum {
83 /* #6: expansion ROM resource */ 81 /* #6: expansion ROM resource */
84 PCI_ROM_RESOURCE, 82 PCI_ROM_RESOURCE,
85 83
86 /* device specific resources */ 84 /* Device-specific resources */
87#ifdef CONFIG_PCI_IOV 85#ifdef CONFIG_PCI_IOV
88 PCI_IOV_RESOURCES, 86 PCI_IOV_RESOURCES,
89 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, 87 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
90#endif 88#endif
91 89
92 /* resources assigned to buses behind the bridge */ 90 /* Resources assigned to buses behind the bridge */
93#define PCI_BRIDGE_RESOURCE_NUM 4 91#define PCI_BRIDGE_RESOURCE_NUM 4
94 92
95 PCI_BRIDGE_RESOURCES, 93 PCI_BRIDGE_RESOURCES,
96 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + 94 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
97 PCI_BRIDGE_RESOURCE_NUM - 1, 95 PCI_BRIDGE_RESOURCE_NUM - 1,
98 96
99 /* total resources associated with a PCI device */ 97 /* Total resources associated with a PCI device */
100 PCI_NUM_RESOURCES, 98 PCI_NUM_RESOURCES,
101 99
102 /* preserve this for compatibility */ 100 /* Preserve this for compatibility */
103 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, 101 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
104}; 102};
105 103
@@ -152,9 +150,10 @@ static inline const char *pci_power_name(pci_power_t state)
152#define PCI_PM_D3COLD_WAIT 100 150#define PCI_PM_D3COLD_WAIT 100
153#define PCI_PM_BUS_WAIT 50 151#define PCI_PM_BUS_WAIT 50
154 152
155/** The pci_channel state describes connectivity between the CPU and 153/**
156 * the pci device. If some PCI bus between here and the pci device 154 * The pci_channel state describes connectivity between the CPU and
157 * has crashed or locked up, this info is reflected here. 155 * the PCI device. If some PCI bus between here and the PCI device
156 * has crashed or locked up, this info is reflected here.
158 */ 157 */
159typedef unsigned int __bitwise pci_channel_state_t; 158typedef unsigned int __bitwise pci_channel_state_t;
160 159
@@ -184,9 +183,7 @@ enum pcie_reset_state {
184 183
185typedef unsigned short __bitwise pci_dev_flags_t; 184typedef unsigned short __bitwise pci_dev_flags_t;
186enum pci_dev_flags { 185enum pci_dev_flags {
187 /* INTX_DISABLE in PCI_COMMAND register disables MSI 186 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
188 * generation too.
189 */
190 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), 187 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
191 /* Device configuration is irrevocably lost if disabled into D3 */ 188 /* Device configuration is irrevocably lost if disabled into D3 */
192 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), 189 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
@@ -202,7 +199,7 @@ enum pci_dev_flags {
202 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), 199 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
203 /* Get VPD from function 0 VPD */ 200 /* Get VPD from function 0 VPD */
204 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), 201 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
205 /* a non-root bridge where translation occurs, stop alias search here */ 202 /* A non-root bridge where translation occurs, stop alias search here */
206 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), 203 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
207 /* Do not use FLR even if device advertises PCI_AF_CAP */ 204 /* Do not use FLR even if device advertises PCI_AF_CAP */
208 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), 205 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
@@ -222,17 +219,17 @@ enum pci_bus_flags {
222 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, 219 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
223}; 220};
224 221
225/* These values come from the PCI Express Spec */ 222/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
226enum pcie_link_width { 223enum pcie_link_width {
227 PCIE_LNK_WIDTH_RESRV = 0x00, 224 PCIE_LNK_WIDTH_RESRV = 0x00,
228 PCIE_LNK_X1 = 0x01, 225 PCIE_LNK_X1 = 0x01,
229 PCIE_LNK_X2 = 0x02, 226 PCIE_LNK_X2 = 0x02,
230 PCIE_LNK_X4 = 0x04, 227 PCIE_LNK_X4 = 0x04,
231 PCIE_LNK_X8 = 0x08, 228 PCIE_LNK_X8 = 0x08,
232 PCIE_LNK_X12 = 0x0C, 229 PCIE_LNK_X12 = 0x0c,
233 PCIE_LNK_X16 = 0x10, 230 PCIE_LNK_X16 = 0x10,
234 PCIE_LNK_X32 = 0x20, 231 PCIE_LNK_X32 = 0x20,
235 PCIE_LNK_WIDTH_UNKNOWN = 0xFF, 232 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
236}; 233};
237 234
238/* Based on the PCI Hotplug Spec, but some values are made up by us */ 235/* Based on the PCI Hotplug Spec, but some values are made up by us */
@@ -263,15 +260,15 @@ enum pci_bus_speed {
263}; 260};
264 261
265struct pci_cap_saved_data { 262struct pci_cap_saved_data {
266 u16 cap_nr; 263 u16 cap_nr;
267 bool cap_extended; 264 bool cap_extended;
268 unsigned int size; 265 unsigned int size;
269 u32 data[0]; 266 u32 data[0];
270}; 267};
271 268
272struct pci_cap_saved_state { 269struct pci_cap_saved_state {
273 struct hlist_node next; 270 struct hlist_node next;
274 struct pci_cap_saved_data cap; 271 struct pci_cap_saved_data cap;
275}; 272};
276 273
277struct irq_affinity; 274struct irq_affinity;
@@ -280,19 +277,17 @@ struct pci_vpd;
280struct pci_sriov; 277struct pci_sriov;
281struct pci_ats; 278struct pci_ats;
282 279
283/* 280/* The pci_dev structure describes PCI devices */
284 * The pci_dev structure is used to describe PCI devices.
285 */
286struct pci_dev { 281struct pci_dev {
287 struct list_head bus_list; /* node in per-bus list */ 282 struct list_head bus_list; /* Node in per-bus list */
288 struct pci_bus *bus; /* bus this device is on */ 283 struct pci_bus *bus; /* Bus this device is on */
289 struct pci_bus *subordinate; /* bus this device bridges to */ 284 struct pci_bus *subordinate; /* Bus this device bridges to */
290 285
291 void *sysdata; /* hook for sys-specific extension */ 286 void *sysdata; /* Hook for sys-specific extension */
292 struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ 287 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
293 struct pci_slot *slot; /* Physical slot this device is in */ 288 struct pci_slot *slot; /* Physical slot this device is in */
294 289
295 unsigned int devfn; /* encoded device & function index */ 290 unsigned int devfn; /* Encoded device & function index */
296 unsigned short vendor; 291 unsigned short vendor;
297 unsigned short device; 292 unsigned short device;
298 unsigned short subsystem_vendor; 293 unsigned short subsystem_vendor;
@@ -307,12 +302,12 @@ struct pci_dev {
307 u8 msi_cap; /* MSI capability offset */ 302 u8 msi_cap; /* MSI capability offset */
308 u8 msix_cap; /* MSI-X capability offset */ 303 u8 msix_cap; /* MSI-X capability offset */
309 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */ 304 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
310 u8 rom_base_reg; /* which config register controls the ROM */ 305 u8 rom_base_reg; /* Config register controlling ROM */
311 u8 pin; /* which interrupt pin this device uses */ 306 u8 pin; /* Interrupt pin this device uses */
312 u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ 307 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
313 unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */ 308 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
314 309
315 struct pci_driver *driver; /* which driver has allocated this device */ 310 struct pci_driver *driver; /* Driver bound to this device */
316 u64 dma_mask; /* Mask of the bits of bus address this 311 u64 dma_mask; /* Mask of the bits of bus address this
317 device implements. Normally this is 312 device implements. Normally this is
318 0xffffffff. You only need to change 313 0xffffffff. You only need to change
@@ -321,9 +316,9 @@ struct pci_dev {
321 316
322 struct device_dma_parameters dma_parms; 317 struct device_dma_parameters dma_parms;
323 318
324 pci_power_t current_state; /* Current operating state. In ACPI-speak, 319 pci_power_t current_state; /* Current operating state. In ACPI,
325 this is D0-D3, D0 being fully functional, 320 this is D0-D3, D0 being fully
326 and D3 being off. */ 321 functional, and D3 being off. */
327 u8 pm_cap; /* PM capability offset */ 322 u8 pm_cap; /* PM capability offset */
328 unsigned int pme_support:5; /* Bitmask of states from which PME# 323 unsigned int pme_support:5; /* Bitmask of states from which PME#
329 can be generated */ 324 can be generated */
@@ -334,10 +329,10 @@ struct pci_dev {
334 unsigned int no_d3cold:1; /* D3cold is forbidden */ 329 unsigned int no_d3cold:1; /* D3cold is forbidden */
335 unsigned int bridge_d3:1; /* Allow D3 for bridge */ 330 unsigned int bridge_d3:1; /* Allow D3 for bridge */
336 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ 331 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
337 unsigned int mmio_always_on:1; /* disallow turning off io/mem 332 unsigned int mmio_always_on:1; /* Disallow turning off io/mem
338 decoding during bar sizing */ 333 decoding during BAR sizing */
339 unsigned int wakeup_prepared:1; 334 unsigned int wakeup_prepared:1;
340 unsigned int runtime_d3cold:1; /* whether go through runtime 335 unsigned int runtime_d3cold:1; /* Whether go through runtime
341 D3cold, not set for devices 336 D3cold, not set for devices
342 powered on/off by the 337 powered on/off by the
343 corresponding bridge */ 338 corresponding bridge */
@@ -350,12 +345,14 @@ struct pci_dev {
350 345
351#ifdef CONFIG_PCIEASPM 346#ifdef CONFIG_PCIEASPM
352 struct pcie_link_state *link_state; /* ASPM link state */ 347 struct pcie_link_state *link_state; /* ASPM link state */
348 unsigned int ltr_path:1; /* Latency Tolerance Reporting
349 supported from root to here */
353#endif 350#endif
354 351
355 pci_channel_state_t error_state; /* current connectivity state */ 352 pci_channel_state_t error_state; /* Current connectivity state */
356 struct device dev; /* Generic device interface */ 353 struct device dev; /* Generic device interface */
357 354
358 int cfg_size; /* Size of configuration space */ 355 int cfg_size; /* Size of config space */
359 356
360 /* 357 /*
361 * Instead of touching interrupt line and base address registers 358 * Instead of touching interrupt line and base address registers
@@ -364,47 +361,47 @@ struct pci_dev {
364 unsigned int irq; 361 unsigned int irq;
365 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ 362 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
366 363
367 bool match_driver; /* Skip attaching driver */ 364 bool match_driver; /* Skip attaching driver */
368 /* These fields are used by common fixups */ 365
369 unsigned int transparent:1; /* Subtractive decode PCI bridge */ 366 unsigned int transparent:1; /* Subtractive decode bridge */
370 unsigned int multifunction:1;/* Part of multi-function device */ 367 unsigned int multifunction:1; /* Multi-function device */
371 /* keep track of device state */ 368
372 unsigned int is_added:1; 369 unsigned int is_added:1;
373 unsigned int is_busmaster:1; /* device is busmaster */ 370 unsigned int is_busmaster:1; /* Is busmaster */
374 unsigned int no_msi:1; /* device may not use msi */ 371 unsigned int no_msi:1; /* May not use MSI */
375 unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ 372 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
376 unsigned int block_cfg_access:1; /* config space access is blocked */ 373 unsigned int block_cfg_access:1; /* Config space access blocked */
377 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 374 unsigned int broken_parity_status:1; /* Generates false positive parity */
378 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ 375 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
379 unsigned int msi_enabled:1; 376 unsigned int msi_enabled:1;
380 unsigned int msix_enabled:1; 377 unsigned int msix_enabled:1;
381 unsigned int ari_enabled:1; /* ARI forwarding */ 378 unsigned int ari_enabled:1; /* ARI forwarding */
382 unsigned int ats_enabled:1; /* Address Translation Service */ 379 unsigned int ats_enabled:1; /* Address Translation Svc */
383 unsigned int pasid_enabled:1; /* Process Address Space ID */ 380 unsigned int pasid_enabled:1; /* Process Address Space ID */
384 unsigned int pri_enabled:1; /* Page Request Interface */ 381 unsigned int pri_enabled:1; /* Page Request Interface */
385 unsigned int is_managed:1; 382 unsigned int is_managed:1;
386 unsigned int needs_freset:1; /* Dev requires fundamental reset */ 383 unsigned int needs_freset:1; /* Requires fundamental reset */
387 unsigned int state_saved:1; 384 unsigned int state_saved:1;
388 unsigned int is_physfn:1; 385 unsigned int is_physfn:1;
389 unsigned int is_virtfn:1; 386 unsigned int is_virtfn:1;
390 unsigned int reset_fn:1; 387 unsigned int reset_fn:1;
391 unsigned int is_hotplug_bridge:1; 388 unsigned int is_hotplug_bridge:1;
392 unsigned int is_thunderbolt:1; /* Thunderbolt controller */ 389 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
393 unsigned int __aer_firmware_first_valid:1; 390 unsigned int __aer_firmware_first_valid:1;
394 unsigned int __aer_firmware_first:1; 391 unsigned int __aer_firmware_first:1;
395 unsigned int broken_intx_masking:1; /* INTx masking can't be used */ 392 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
396 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ 393 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
397 unsigned int irq_managed:1; 394 unsigned int irq_managed:1;
398 unsigned int has_secondary_link:1; 395 unsigned int has_secondary_link:1;
399 unsigned int non_compliant_bars:1; /* broken BARs; ignore them */ 396 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
400 unsigned int is_probed:1; /* device probing in progress */ 397 unsigned int is_probed:1; /* Device probing in progress */
401 pci_dev_flags_t dev_flags; 398 pci_dev_flags_t dev_flags;
402 atomic_t enable_cnt; /* pci_enable_device has been called */ 399 atomic_t enable_cnt; /* pci_enable_device has been called */
403 400
404 u32 saved_config_space[16]; /* config space saved at suspend time */ 401 u32 saved_config_space[16]; /* Config space saved at suspend time */
405 struct hlist_head saved_cap_space; 402 struct hlist_head saved_cap_space;
406 struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ 403 struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */
407 int rom_attr_enabled; /* has display of the rom attribute been enabled? */ 404 int rom_attr_enabled; /* Display of ROM attribute enabled? */
408 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 405 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
409 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 406 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
410 407
@@ -419,12 +416,12 @@ struct pci_dev {
419 struct pci_vpd *vpd; 416 struct pci_vpd *vpd;
420#ifdef CONFIG_PCI_ATS 417#ifdef CONFIG_PCI_ATS
421 union { 418 union {
422 struct pci_sriov *sriov; /* SR-IOV capability related */ 419 struct pci_sriov *sriov; /* PF: SR-IOV info */
423 struct pci_dev *physfn; /* the PF this VF is associated with */ 420 struct pci_dev *physfn; /* VF: related PF */
424 }; 421 };
425 u16 ats_cap; /* ATS Capability offset */ 422 u16 ats_cap; /* ATS Capability offset */
426 u8 ats_stu; /* ATS Smallest Translation Unit */ 423 u8 ats_stu; /* ATS Smallest Translation Unit */
427 atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */ 424 atomic_t ats_ref_cnt; /* Number of VFs with ATS enabled */
428#endif 425#endif
429#ifdef CONFIG_PCI_PRI 426#ifdef CONFIG_PCI_PRI
430 u32 pri_reqs_alloc; /* Number of PRI requests allocated */ 427 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
@@ -432,11 +429,11 @@ struct pci_dev {
432#ifdef CONFIG_PCI_PASID 429#ifdef CONFIG_PCI_PASID
433 u16 pasid_features; 430 u16 pasid_features;
434#endif 431#endif
435 phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ 432 phys_addr_t rom; /* Physical address if not from BAR */
436 size_t romlen; /* Length of ROM if it's not from the BAR */ 433 size_t romlen; /* Length if not from BAR */
437 char *driver_override; /* Driver name to force a match */ 434 char *driver_override; /* Driver name to force a match */
438 435
439 unsigned long priv_flags; /* Private flags for the pci driver */ 436 unsigned long priv_flags; /* Private flags for the PCI driver */
440}; 437};
441 438
442static inline struct pci_dev *pci_physfn(struct pci_dev *dev) 439static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -459,26 +456,26 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
459} 456}
460 457
461struct pci_host_bridge { 458struct pci_host_bridge {
462 struct device dev; 459 struct device dev;
463 struct pci_bus *bus; /* root bus */ 460 struct pci_bus *bus; /* Root bus */
464 struct pci_ops *ops; 461 struct pci_ops *ops;
465 void *sysdata; 462 void *sysdata;
466 int busnr; 463 int busnr;
467 struct list_head windows; /* resource_entry */ 464 struct list_head windows; /* resource_entry */
468 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* platform IRQ swizzler */ 465 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
469 int (*map_irq)(const struct pci_dev *, u8, u8); 466 int (*map_irq)(const struct pci_dev *, u8, u8);
470 void (*release_fn)(struct pci_host_bridge *); 467 void (*release_fn)(struct pci_host_bridge *);
471 void *release_data; 468 void *release_data;
472 struct msi_controller *msi; 469 struct msi_controller *msi;
473 unsigned int ignore_reset_delay:1; /* for entire hierarchy */ 470 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
474 unsigned int no_ext_tags:1; /* no Extended Tags */ 471 unsigned int no_ext_tags:1; /* No Extended Tags */
475 /* Resource alignment requirements */ 472 /* Resource alignment requirements */
476 resource_size_t (*align_resource)(struct pci_dev *dev, 473 resource_size_t (*align_resource)(struct pci_dev *dev,
477 const struct resource *res, 474 const struct resource *res,
478 resource_size_t start, 475 resource_size_t start,
479 resource_size_t size, 476 resource_size_t size,
480 resource_size_t align); 477 resource_size_t align);
481 unsigned long private[0] ____cacheline_aligned; 478 unsigned long private[0] ____cacheline_aligned;
482}; 479};
483 480
484#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) 481#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
@@ -500,8 +497,8 @@ void pci_free_host_bridge(struct pci_host_bridge *bridge);
500struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); 497struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
501 498
502void pci_set_host_bridge_release(struct pci_host_bridge *bridge, 499void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
503 void (*release_fn)(struct pci_host_bridge *), 500 void (*release_fn)(struct pci_host_bridge *),
504 void *release_data); 501 void *release_data);
505 502
506int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); 503int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
507 504
@@ -521,32 +518,32 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
521#define PCI_SUBTRACTIVE_DECODE 0x1 518#define PCI_SUBTRACTIVE_DECODE 0x1
522 519
523struct pci_bus_resource { 520struct pci_bus_resource {
524 struct list_head list; 521 struct list_head list;
525 struct resource *res; 522 struct resource *res;
526 unsigned int flags; 523 unsigned int flags;
527}; 524};
528 525
529#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ 526#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
530 527
531struct pci_bus { 528struct pci_bus {
532 struct list_head node; /* node in list of buses */ 529 struct list_head node; /* Node in list of buses */
533 struct pci_bus *parent; /* parent bus this bridge is on */ 530 struct pci_bus *parent; /* Parent bus this bridge is on */
534 struct list_head children; /* list of child buses */ 531 struct list_head children; /* List of child buses */
535 struct list_head devices; /* list of devices on this bus */ 532 struct list_head devices; /* List of devices on this bus */
536 struct pci_dev *self; /* bridge device as seen by parent */ 533 struct pci_dev *self; /* Bridge device as seen by parent */
537 struct list_head slots; /* list of slots on this bus; 534 struct list_head slots; /* List of slots on this bus;
538 protected by pci_slot_mutex */ 535 protected by pci_slot_mutex */
539 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; 536 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
540 struct list_head resources; /* address space routed to this bus */ 537 struct list_head resources; /* Address space routed to this bus */
541 struct resource busn_res; /* bus numbers routed to this bus */ 538 struct resource busn_res; /* Bus numbers routed to this bus */
542 539
543 struct pci_ops *ops; /* configuration access functions */ 540 struct pci_ops *ops; /* Configuration access functions */
544 struct msi_controller *msi; /* MSI controller */ 541 struct msi_controller *msi; /* MSI controller */
545 void *sysdata; /* hook for sys-specific extension */ 542 void *sysdata; /* Hook for sys-specific extension */
546 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ 543 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
547 544
548 unsigned char number; /* bus number */ 545 unsigned char number; /* Bus number */
549 unsigned char primary; /* number of primary bridge */ 546 unsigned char primary; /* Number of primary bridge */
550 unsigned char max_bus_speed; /* enum pci_bus_speed */ 547 unsigned char max_bus_speed; /* enum pci_bus_speed */
551 unsigned char cur_bus_speed; /* enum pci_bus_speed */ 548 unsigned char cur_bus_speed; /* enum pci_bus_speed */
552#ifdef CONFIG_PCI_DOMAINS_GENERIC 549#ifdef CONFIG_PCI_DOMAINS_GENERIC
@@ -555,12 +552,12 @@ struct pci_bus {
555 552
556 char name[48]; 553 char name[48];
557 554
558 unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ 555 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
559 pci_bus_flags_t bus_flags; /* inherited by child buses */ 556 pci_bus_flags_t bus_flags; /* Inherited by child buses */
560 struct device *bridge; 557 struct device *bridge;
561 struct device dev; 558 struct device dev;
562 struct bin_attribute *legacy_io; /* legacy I/O for this bus */ 559 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
563 struct bin_attribute *legacy_mem; /* legacy mem */ 560 struct bin_attribute *legacy_mem; /* Legacy mem */
564 unsigned int is_added:1; 561 unsigned int is_added:1;
565}; 562};
566 563
@@ -617,9 +614,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
617static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } 614static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
618#endif 615#endif
619 616
620/* 617/* Error values that may be returned by PCI functions */
621 * Error values that may be returned by PCI functions.
622 */
623#define PCIBIOS_SUCCESSFUL 0x00 618#define PCIBIOS_SUCCESSFUL 0x00
624#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 619#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
625#define PCIBIOS_BAD_VENDOR_ID 0x83 620#define PCIBIOS_BAD_VENDOR_ID 0x83
@@ -628,9 +623,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false;
628#define PCIBIOS_SET_FAILED 0x88 623#define PCIBIOS_SET_FAILED 0x88
629#define PCIBIOS_BUFFER_TOO_SMALL 0x89 624#define PCIBIOS_BUFFER_TOO_SMALL 0x89
630 625
631/* 626/* Translate above to generic errno for passing back through non-PCI code */
632 * Translate above to generic errno for passing back through non-PCI code.
633 */
634static inline int pcibios_err_to_errno(int err) 627static inline int pcibios_err_to_errno(int err)
635{ 628{
636 if (err <= PCIBIOS_SUCCESSFUL) 629 if (err <= PCIBIOS_SUCCESSFUL)
@@ -680,13 +673,13 @@ typedef u32 pci_bus_addr_t;
680#endif 673#endif
681 674
682struct pci_bus_region { 675struct pci_bus_region {
683 pci_bus_addr_t start; 676 pci_bus_addr_t start;
684 pci_bus_addr_t end; 677 pci_bus_addr_t end;
685}; 678};
686 679
687struct pci_dynids { 680struct pci_dynids {
688 spinlock_t lock; /* protects list, index */ 681 spinlock_t lock; /* Protects list, index */
689 struct list_head list; /* for IDs added at runtime */ 682 struct list_head list; /* For IDs added at runtime */
690}; 683};
691 684
692 685
@@ -700,13 +693,13 @@ struct pci_dynids {
700typedef unsigned int __bitwise pci_ers_result_t; 693typedef unsigned int __bitwise pci_ers_result_t;
701 694
702enum pci_ers_result { 695enum pci_ers_result {
703 /* no result/none/not supported in device driver */ 696 /* No result/none/not supported in device driver */
704 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, 697 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
705 698
706 /* Device driver can recover without slot reset */ 699 /* Device driver can recover without slot reset */
707 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, 700 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
708 701
709 /* Device driver wants slot to be reset. */ 702 /* Device driver wants slot to be reset */
710 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, 703 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
711 704
712 /* Device has completely failed, is unrecoverable */ 705 /* Device has completely failed, is unrecoverable */
@@ -742,27 +735,27 @@ struct pci_error_handlers {
742 735
743struct module; 736struct module;
744struct pci_driver { 737struct pci_driver {
745 struct list_head node; 738 struct list_head node;
746 const char *name; 739 const char *name;
747 const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ 740 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
748 int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ 741 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
749 void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ 742 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
750 int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ 743 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
751 int (*suspend_late) (struct pci_dev *dev, pm_message_t state); 744 int (*suspend_late)(struct pci_dev *dev, pm_message_t state);
752 int (*resume_early) (struct pci_dev *dev); 745 int (*resume_early)(struct pci_dev *dev);
753 int (*resume) (struct pci_dev *dev); /* Device woken up */ 746 int (*resume) (struct pci_dev *dev); /* Device woken up */
754 void (*shutdown) (struct pci_dev *dev); 747 void (*shutdown) (struct pci_dev *dev);
755 int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */ 748 int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */
756 const struct pci_error_handlers *err_handler; 749 const struct pci_error_handlers *err_handler;
757 const struct attribute_group **groups; 750 const struct attribute_group **groups;
758 struct device_driver driver; 751 struct device_driver driver;
759 struct pci_dynids dynids; 752 struct pci_dynids dynids;
760}; 753};
761 754
762#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) 755#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
763 756
764/** 757/**
765 * PCI_DEVICE - macro used to describe a specific pci device 758 * PCI_DEVICE - macro used to describe a specific PCI device
766 * @vend: the 16 bit PCI Vendor ID 759 * @vend: the 16 bit PCI Vendor ID
767 * @dev: the 16 bit PCI Device ID 760 * @dev: the 16 bit PCI Device ID
768 * 761 *
@@ -775,7 +768,7 @@ struct pci_driver {
775 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 768 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
776 769
777/** 770/**
778 * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem 771 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
779 * @vend: the 16 bit PCI Vendor ID 772 * @vend: the 16 bit PCI Vendor ID
780 * @dev: the 16 bit PCI Device ID 773 * @dev: the 16 bit PCI Device ID
781 * @subvend: the 16 bit PCI Subvendor ID 774 * @subvend: the 16 bit PCI Subvendor ID
@@ -789,7 +782,7 @@ struct pci_driver {
789 .subvendor = (subvend), .subdevice = (subdev) 782 .subvendor = (subvend), .subdevice = (subdev)
790 783
791/** 784/**
792 * PCI_DEVICE_CLASS - macro used to describe a specific pci device class 785 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
793 * @dev_class: the class, subclass, prog-if triple for this device 786 * @dev_class: the class, subclass, prog-if triple for this device
794 * @dev_class_mask: the class mask for this device 787 * @dev_class_mask: the class mask for this device
795 * 788 *
@@ -803,7 +796,7 @@ struct pci_driver {
803 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 796 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
804 797
805/** 798/**
806 * PCI_VDEVICE - macro used to describe a specific pci device in short form 799 * PCI_VDEVICE - macro used to describe a specific PCI device in short form
807 * @vend: the vendor name 800 * @vend: the vendor name
808 * @dev: the 16 bit PCI Device ID 801 * @dev: the 16 bit PCI Device ID
809 * 802 *
@@ -812,22 +805,21 @@ struct pci_driver {
812 * to PCI_ANY_ID. The macro allows the next field to follow as the device 805 * to PCI_ANY_ID. The macro allows the next field to follow as the device
813 * private data. 806 * private data.
814 */ 807 */
815
816#define PCI_VDEVICE(vend, dev) \ 808#define PCI_VDEVICE(vend, dev) \
817 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ 809 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
818 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 810 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
819 811
820enum { 812enum {
821 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */ 813 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
822 PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */ 814 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
823 PCI_PROBE_ONLY = 0x00000004, /* use existing setup */ 815 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
824 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */ 816 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
825 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */ 817 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
826 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ 818 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
827 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */ 819 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
828}; 820};
829 821
830/* these external functions are only available when PCI support is enabled */ 822/* These external functions are only available when PCI support is enabled */
831#ifdef CONFIG_PCI 823#ifdef CONFIG_PCI
832 824
833extern unsigned int pci_flags; 825extern unsigned int pci_flags;
@@ -840,11 +832,11 @@ static inline int pci_has_flag(int flag) { return pci_flags & flag; }
840void pcie_bus_configure_settings(struct pci_bus *bus); 832void pcie_bus_configure_settings(struct pci_bus *bus);
841 833
842enum pcie_bus_config_types { 834enum pcie_bus_config_types {
843 PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */ 835 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
844 PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */ 836 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
845 PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */ 837 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
846 PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */ 838 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
847 PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */ 839 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
848}; 840};
849 841
850extern enum pcie_bus_config_types pcie_bus_config; 842extern enum pcie_bus_config_types pcie_bus_config;
@@ -853,7 +845,7 @@ extern struct bus_type pci_bus_type;
853 845
854/* Do NOT directly access these two variables, unless you are arch-specific PCI 846/* Do NOT directly access these two variables, unless you are arch-specific PCI
855 * code, or PCI core code. */ 847 * code, or PCI core code. */
856extern struct list_head pci_root_buses; /* list of all known PCI buses */ 848extern struct list_head pci_root_buses; /* List of all known PCI buses */
857/* Some device drivers need know if PCI is initiated */ 849/* Some device drivers need know if PCI is initiated */
858int no_pci_devices(void); 850int no_pci_devices(void);
859 851
@@ -887,12 +879,13 @@ struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
887struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 879struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
888 struct pci_ops *ops, void *sysdata, 880 struct pci_ops *ops, void *sysdata,
889 struct list_head *resources); 881 struct list_head *resources);
882int pci_host_probe(struct pci_host_bridge *bridge);
890int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); 883int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
891int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); 884int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
892void pci_bus_release_busn_res(struct pci_bus *b); 885void pci_bus_release_busn_res(struct pci_bus *b);
893struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 886struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
894 struct pci_ops *ops, void *sysdata, 887 struct pci_ops *ops, void *sysdata,
895 struct list_head *resources); 888 struct list_head *resources);
896int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); 889int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
897struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 890struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
898 int busnr); 891 int busnr);
@@ -949,10 +942,10 @@ int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
949struct pci_bus *pci_find_next_bus(const struct pci_bus *from); 942struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
950 943
951struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, 944struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
952 struct pci_dev *from); 945 struct pci_dev *from);
953struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, 946struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
954 unsigned int ss_vendor, unsigned int ss_device, 947 unsigned int ss_vendor, unsigned int ss_device,
955 struct pci_dev *from); 948 struct pci_dev *from);
956struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); 949struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
957struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, 950struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
958 unsigned int devfn); 951 unsigned int devfn);
@@ -1028,7 +1021,7 @@ static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1028 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); 1021 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1029} 1022}
1030 1023
1031/* user-space driven config access */ 1024/* User-space driven config access */
1032int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 1025int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1033int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 1026int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1034int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val); 1027int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
@@ -1072,6 +1065,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1072int pci_set_cacheline_size(struct pci_dev *dev); 1065int pci_set_cacheline_size(struct pci_dev *dev);
1073#define HAVE_PCI_SET_MWI 1066#define HAVE_PCI_SET_MWI
1074int __must_check pci_set_mwi(struct pci_dev *dev); 1067int __must_check pci_set_mwi(struct pci_dev *dev);
1068int __must_check pcim_set_mwi(struct pci_dev *dev);
1075int pci_try_set_mwi(struct pci_dev *dev); 1069int pci_try_set_mwi(struct pci_dev *dev);
1076void pci_clear_mwi(struct pci_dev *dev); 1070void pci_clear_mwi(struct pci_dev *dev);
1077void pci_intx(struct pci_dev *dev, int enable); 1071void pci_intx(struct pci_dev *dev, int enable);
@@ -1170,7 +1164,7 @@ unsigned int pci_rescan_bus(struct pci_bus *bus);
1170void pci_lock_rescan_remove(void); 1164void pci_lock_rescan_remove(void);
1171void pci_unlock_rescan_remove(void); 1165void pci_unlock_rescan_remove(void);
1172 1166
1173/* Vital product data routines */ 1167/* Vital Product Data routines */
1174ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1168ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1175ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1169ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1176int pci_set_vpd_size(struct pci_dev *dev, size_t len); 1170int pci_set_vpd_size(struct pci_dev *dev, size_t len);
@@ -1255,9 +1249,7 @@ static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1255int __must_check __pci_register_driver(struct pci_driver *, struct module *, 1249int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1256 const char *mod_name); 1250 const char *mod_name);
1257 1251
1258/* 1252/* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1259 * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
1260 */
1261#define pci_register_driver(driver) \ 1253#define pci_register_driver(driver) \
1262 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) 1254 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1263 1255
@@ -1272,8 +1264,7 @@ void pci_unregister_driver(struct pci_driver *dev);
1272 * use this macro once, and calling it replaces module_init() and module_exit() 1264 * use this macro once, and calling it replaces module_init() and module_exit()
1273 */ 1265 */
1274#define module_pci_driver(__pci_driver) \ 1266#define module_pci_driver(__pci_driver) \
1275 module_driver(__pci_driver, pci_register_driver, \ 1267 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1276 pci_unregister_driver)
1277 1268
1278/** 1269/**
1279 * builtin_pci_driver() - Helper macro for registering a PCI driver 1270 * builtin_pci_driver() - Helper macro for registering a PCI driver
@@ -1312,10 +1303,10 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
1312int pci_set_vga_state(struct pci_dev *pdev, bool decode, 1303int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1313 unsigned int command_bits, u32 flags); 1304 unsigned int command_bits, u32 flags);
1314 1305
1315#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */ 1306#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
1316#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */ 1307#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
1317#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */ 1308#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
1318#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */ 1309#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
1319#define PCI_IRQ_ALL_TYPES \ 1310#define PCI_IRQ_ALL_TYPES \
1320 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) 1311 (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1321 1312
@@ -1334,8 +1325,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1334#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) 1325#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1335 1326
1336struct msix_entry { 1327struct msix_entry {
1337 u32 vector; /* kernel uses to write allocated vector */ 1328 u32 vector; /* Kernel uses to write allocated vector */
1338 u16 entry; /* driver uses to specify entry, OS writes */ 1329 u16 entry; /* Driver uses to specify entry, OS writes */
1339}; 1330};
1340 1331
1341#ifdef CONFIG_PCI_MSI 1332#ifdef CONFIG_PCI_MSI
@@ -1375,10 +1366,10 @@ static inline int pci_msi_enabled(void) { return 0; }
1375static inline int pci_enable_msi(struct pci_dev *dev) 1366static inline int pci_enable_msi(struct pci_dev *dev)
1376{ return -ENOSYS; } 1367{ return -ENOSYS; }
1377static inline int pci_enable_msix_range(struct pci_dev *dev, 1368static inline int pci_enable_msix_range(struct pci_dev *dev,
1378 struct msix_entry *entries, int minvec, int maxvec) 1369 struct msix_entry *entries, int minvec, int maxvec)
1379{ return -ENOSYS; } 1370{ return -ENOSYS; }
1380static inline int pci_enable_msix_exact(struct pci_dev *dev, 1371static inline int pci_enable_msix_exact(struct pci_dev *dev,
1381 struct msix_entry *entries, int nvec) 1372 struct msix_entry *entries, int nvec)
1382{ return -ENOSYS; } 1373{ return -ENOSYS; }
1383 1374
1384static inline int 1375static inline int
@@ -1543,9 +1534,9 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1543int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); 1534int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1544#endif 1535#endif
1545 1536
1546/* some architectures require additional setup to direct VGA traffic */ 1537/* Some architectures require additional setup to direct VGA traffic */
1547typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1538typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1548 unsigned int command_bits, u32 flags); 1539 unsigned int command_bits, u32 flags);
1549void pci_register_set_vga_state(arch_set_vga_state_t func); 1540void pci_register_set_vga_state(arch_set_vga_state_t func);
1550 1541
1551static inline int 1542static inline int
@@ -1584,10 +1575,9 @@ static inline void pci_clear_flags(int flags) { }
1584static inline int pci_has_flag(int flag) { return 0; } 1575static inline int pci_has_flag(int flag) { return 0; }
1585 1576
1586/* 1577/*
1587 * If the system does not have PCI, clearly these return errors. Define 1578 * If the system does not have PCI, clearly these return errors. Define
1588 * these as simple inline functions to avoid hair in drivers. 1579 * these as simple inline functions to avoid hair in drivers.
1589 */ 1580 */
1590
1591#define _PCI_NOP(o, s, t) \ 1581#define _PCI_NOP(o, s, t) \
1592 static inline int pci_##o##_config_##s(struct pci_dev *dev, \ 1582 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
1593 int where, t val) \ 1583 int where, t val) \
@@ -1686,6 +1676,13 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1686#define dev_is_pf(d) (false) 1676#define dev_is_pf(d) (false)
1687static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) 1677static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
1688{ return false; } 1678{ return false; }
1679static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1680 struct device_node *node,
1681 const u32 *intspec,
1682 unsigned int intsize,
1683 unsigned long *out_hwirq,
1684 unsigned int *out_type)
1685{ return -EINVAL; }
1689#endif /* CONFIG_PCI */ 1686#endif /* CONFIG_PCI */
1690 1687
1691/* Include architecture-dependent settings and functions */ 1688/* Include architecture-dependent settings and functions */
@@ -1726,8 +1723,10 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
1726#define pci_root_bus_fwnode(bus) NULL 1723#define pci_root_bus_fwnode(bus) NULL
1727#endif 1724#endif
1728 1725
1729/* these helpers provide future and backwards compatibility 1726/*
1730 * for accessing popular PCI BAR info */ 1727 * These helpers provide future and backwards compatibility
1728 * for accessing popular PCI BAR info
1729 */
1731#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) 1730#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
1732#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) 1731#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
1733#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) 1732#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
@@ -1739,7 +1738,8 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
1739 (pci_resource_end((dev), (bar)) - \ 1738 (pci_resource_end((dev), (bar)) - \
1740 pci_resource_start((dev), (bar)) + 1)) 1739 pci_resource_start((dev), (bar)) + 1))
1741 1740
1742/* Similar to the helpers above, these manipulate per-pci_dev 1741/*
1742 * Similar to the helpers above, these manipulate per-pci_dev
1743 * driver-specific data. They are really just a wrapper around 1743 * driver-specific data. They are really just a wrapper around
1744 * the generic device structure functions of these calls. 1744 * the generic device structure functions of these calls.
1745 */ 1745 */
@@ -1753,16 +1753,14 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
1753 dev_set_drvdata(&pdev->dev, data); 1753 dev_set_drvdata(&pdev->dev, data);
1754} 1754}
1755 1755
1756/* If you want to know what to call your pci_dev, ask this function.
1757 * Again, it's a wrapper around the generic device.
1758 */
1759static inline const char *pci_name(const struct pci_dev *pdev) 1756static inline const char *pci_name(const struct pci_dev *pdev)
1760{ 1757{
1761 return dev_name(&pdev->dev); 1758 return dev_name(&pdev->dev);
1762} 1759}
1763 1760
1764 1761
1765/* Some archs don't want to expose struct resource to userland as-is 1762/*
1763 * Some archs don't want to expose struct resource to userland as-is
1766 * in sysfs and /proc 1764 * in sysfs and /proc
1767 */ 1765 */
1768#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER 1766#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
@@ -1781,16 +1779,16 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
1781 1779
1782 1780
1783/* 1781/*
1784 * The world is not perfect and supplies us with broken PCI devices. 1782 * The world is not perfect and supplies us with broken PCI devices.
1785 * For at least a part of these bugs we need a work-around, so both 1783 * For at least a part of these bugs we need a work-around, so both
1786 * generic (drivers/pci/quirks.c) and per-architecture code can define 1784 * generic (drivers/pci/quirks.c) and per-architecture code can define
1787 * fixup hooks to be called for particular buggy devices. 1785 * fixup hooks to be called for particular buggy devices.
1788 */ 1786 */
1789 1787
1790struct pci_fixup { 1788struct pci_fixup {
1791 u16 vendor; /* You can use PCI_ANY_ID here of course */ 1789 u16 vendor; /* Or PCI_ANY_ID */
1792 u16 device; /* You can use PCI_ANY_ID here of course */ 1790 u16 device; /* Or PCI_ANY_ID */
1793 u32 class; /* You can use PCI_ANY_ID here too */ 1791 u32 class; /* Or PCI_ANY_ID */
1794 unsigned int class_shift; /* should be 0, 8, 16 */ 1792 unsigned int class_shift; /* should be 0, 8, 16 */
1795 void (*hook)(struct pci_dev *dev); 1793 void (*hook)(struct pci_dev *dev);
1796}; 1794};
@@ -1832,23 +1830,19 @@ enum pci_fixup_pass {
1832#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ 1830#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
1833 class_shift, hook) \ 1831 class_shift, hook) \
1834 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1832 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1835 resume##hook, vendor, device, class, \ 1833 resume##hook, vendor, device, class, class_shift, hook)
1836 class_shift, hook)
1837#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ 1834#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
1838 class_shift, hook) \ 1835 class_shift, hook) \
1839 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1836 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1840 resume_early##hook, vendor, device, \ 1837 resume_early##hook, vendor, device, class, class_shift, hook)
1841 class, class_shift, hook)
1842#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ 1838#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
1843 class_shift, hook) \ 1839 class_shift, hook) \
1844 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1840 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1845 suspend##hook, vendor, device, class, \ 1841 suspend##hook, vendor, device, class, class_shift, hook)
1846 class_shift, hook)
1847#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ 1842#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
1848 class_shift, hook) \ 1843 class_shift, hook) \
1849 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ 1844 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1850 suspend_late##hook, vendor, device, \ 1845 suspend_late##hook, vendor, device, class, class_shift, hook)
1851 class, class_shift, hook)
1852 1846
1853#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 1847#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
1854 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 1848 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
@@ -1864,20 +1858,16 @@ enum pci_fixup_pass {
1864 hook, vendor, device, PCI_ANY_ID, 0, hook) 1858 hook, vendor, device, PCI_ANY_ID, 0, hook)
1865#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 1859#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
1866 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 1860 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
1867 resume##hook, vendor, device, \ 1861 resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
1868 PCI_ANY_ID, 0, hook)
1869#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 1862#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
1870 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 1863 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
1871 resume_early##hook, vendor, device, \ 1864 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
1872 PCI_ANY_ID, 0, hook)
1873#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 1865#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
1874 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 1866 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
1875 suspend##hook, vendor, device, \ 1867 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
1876 PCI_ANY_ID, 0, hook)
1877#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ 1868#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
1878 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ 1869 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
1879 suspend_late##hook, vendor, device, \ 1870 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
1880 PCI_ANY_ID, 0, hook)
1881 1871
1882#ifdef CONFIG_PCI_QUIRKS 1872#ifdef CONFIG_PCI_QUIRKS
1883void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 1873void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
@@ -1964,6 +1954,7 @@ int pci_vfs_assigned(struct pci_dev *dev);
1964int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); 1954int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
1965int pci_sriov_get_totalvfs(struct pci_dev *dev); 1955int pci_sriov_get_totalvfs(struct pci_dev *dev);
1966resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); 1956resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
1957void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
1967#else 1958#else
1968static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) 1959static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
1969{ 1960{
@@ -1991,6 +1982,7 @@ static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
1991{ return 0; } 1982{ return 0; }
1992static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) 1983static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
1993{ return 0; } 1984{ return 0; }
1985static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
1994#endif 1986#endif
1995 1987
1996#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) 1988#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
@@ -2061,6 +2053,7 @@ void pci_request_acs(void);
2061bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); 2053bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2062bool pci_acs_path_enabled(struct pci_dev *start, 2054bool pci_acs_path_enabled(struct pci_dev *start,
2063 struct pci_dev *end, u16 acs_flags); 2055 struct pci_dev *end, u16 acs_flags);
2056int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2064 2057
2065#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ 2058#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
2066#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) 2059#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
@@ -2112,7 +2105,7 @@ static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
2112 */ 2105 */
2113static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt) 2106static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
2114{ 2107{
2115 return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); 2108 return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
2116} 2109}
2117 2110
2118/** 2111/**
@@ -2182,6 +2175,9 @@ void pci_release_of_node(struct pci_dev *dev);
2182void pci_set_bus_of_node(struct pci_bus *bus); 2175void pci_set_bus_of_node(struct pci_bus *bus);
2183void pci_release_bus_of_node(struct pci_bus *bus); 2176void pci_release_bus_of_node(struct pci_bus *bus);
2184struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); 2177struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2178int pci_parse_request_of_pci_ranges(struct device *dev,
2179 struct list_head *resources,
2180 struct resource **bus_range);
2185 2181
2186/* Arch may override this (weak) */ 2182/* Arch may override this (weak) */
2187struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); 2183struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2197,7 +2193,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2197 return bus ? bus->dev.of_node : NULL; 2193 return bus ? bus->dev.of_node : NULL;
2198} 2194}
2199 2195
2200#else /* CONFIG_OF */ 2196#else /* CONFIG_OF */
2201static inline void pci_set_of_node(struct pci_dev *dev) { } 2197static inline void pci_set_of_node(struct pci_dev *dev) { }
2202static inline void pci_release_of_node(struct pci_dev *dev) { } 2198static inline void pci_release_of_node(struct pci_dev *dev) { }
2203static inline void pci_set_bus_of_node(struct pci_bus *bus) { } 2199static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
@@ -2206,6 +2202,12 @@ static inline struct device_node *
2206pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } 2202pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
2207static inline struct irq_domain * 2203static inline struct irq_domain *
2208pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } 2204pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
2205static inline int pci_parse_request_of_pci_ranges(struct device *dev,
2206 struct list_head *resources,
2207 struct resource **bus_range)
2208{
2209 return -EINVAL;
2210}
2209#endif /* CONFIG_OF */ 2211#endif /* CONFIG_OF */
2210 2212
2211#ifdef CONFIG_ACPI 2213#ifdef CONFIG_ACPI
@@ -2231,7 +2233,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
2231 int (*fn)(struct pci_dev *pdev, 2233 int (*fn)(struct pci_dev *pdev,
2232 u16 alias, void *data), void *data); 2234 u16 alias, void *data), void *data);
2233 2235
2234/* helper functions for operation of device flag */ 2236/* Helper functions for operation of device flag */
2235static inline void pci_set_dev_assigned(struct pci_dev *pdev) 2237static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2236{ 2238{
2237 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; 2239 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
@@ -2278,7 +2280,55 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2278 return false; 2280 return false;
2279} 2281}
2280 2282
2281/* provide the legacy pci_dma_* API */ 2283/**
2284 * pci_uevent_ers - emit a uevent during recovery path of pci device
2285 * @pdev: pci device to check
2286 * @err_type: type of error event
2287 *
2288 */
2289static inline void pci_uevent_ers(struct pci_dev *pdev,
2290 enum pci_ers_result err_type)
2291{
2292 int idx = 0;
2293 char *envp[3];
2294
2295 switch (err_type) {
2296 case PCI_ERS_RESULT_NONE:
2297 case PCI_ERS_RESULT_CAN_RECOVER:
2298 envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
2299 envp[idx++] = "DEVICE_ONLINE=0";
2300 break;
2301 case PCI_ERS_RESULT_RECOVERED:
2302 envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY";
2303 envp[idx++] = "DEVICE_ONLINE=1";
2304 break;
2305 case PCI_ERS_RESULT_DISCONNECT:
2306 envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY";
2307 envp[idx++] = "DEVICE_ONLINE=0";
2308 break;
2309 default:
2310 break;
2311 }
2312
2313 if (idx > 0) {
2314 envp[idx++] = NULL;
2315 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp);
2316 }
2317}
2318
2319/* Provide the legacy pci_dma_* API */
2282#include <linux/pci-dma-compat.h> 2320#include <linux/pci-dma-compat.h>
2283 2321
2322#define pci_printk(level, pdev, fmt, arg...) \
2323 dev_printk(level, &(pdev)->dev, fmt, ##arg)
2324
2325#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2326#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2327#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2328#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2329#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2330#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2331#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2332#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2333
2284#endif /* LINUX_PCI_H */ 2334#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 2e855afa0212..26213024e81b 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * PCI HotPlug Core Functions 3 * PCI HotPlug Core Functions
3 * 4 *
@@ -7,21 +8,6 @@
7 * 8 *
8 * All rights reserved. 9 * All rights reserved.
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Send feedback to <kristen.c.accardi@intel.com> 11 * Send feedback to <kristen.c.accardi@intel.com>
26 * 12 *
27 */ 13 */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index ab20dc5db423..eb13e84e1fef 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2381,6 +2381,8 @@
2381 2381
2382#define PCI_VENDOR_ID_LENOVO 0x17aa 2382#define PCI_VENDOR_ID_LENOVO 0x17aa
2383 2383
2384#define PCI_VENDOR_ID_CDNS 0x17cd
2385
2384#define PCI_VENDOR_ID_ARECA 0x17d3 2386#define PCI_VENDOR_ID_ARECA 0x17d3
2385#define PCI_DEVICE_ID_ARECA_1110 0x1110 2387#define PCI_DEVICE_ID_ARECA_1110 0x1110
2386#define PCI_DEVICE_ID_ARECA_1120 0x1120 2388#define PCI_DEVICE_ID_ARECA_1120 0x1120
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 6658d9ee5257..864d167a1073 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -139,12 +139,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
139 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in 139 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
140 * between contaminating the pointer value, meaning that 140 * between contaminating the pointer value, meaning that
141 * READ_ONCE() is required when fetching it. 141 * READ_ONCE() is required when fetching it.
142 *
143 * The smp_read_barrier_depends() implied by READ_ONCE() pairs
144 * with smp_store_release() in __percpu_ref_switch_to_percpu().
142 */ 145 */
143 percpu_ptr = READ_ONCE(ref->percpu_count_ptr); 146 percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
144 147
145 /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
146 smp_read_barrier_depends();
147
148 /* 148 /*
149 * Theoretically, the following could test just ATOMIC; however, 149 * Theoretically, the following could test just ATOMIC; however,
150 * then we'd have to mask off DEAD separately as DEAD may be 150 * then we'd have to mask off DEAD separately as DEAD may be
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 73a7bf30fe9a..4f052496cdfd 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -86,7 +86,7 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
86 return 0; 86 return 0;
87} 87}
88 88
89static inline int percpu_counter_initialized(struct percpu_counter *fbc) 89static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
90{ 90{
91 return (fbc->counters != NULL); 91 return (fbc->counters != NULL);
92} 92}
@@ -167,9 +167,9 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
167 return percpu_counter_read(fbc); 167 return percpu_counter_read(fbc);
168} 168}
169 169
170static inline int percpu_counter_initialized(struct percpu_counter *fbc) 170static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
171{ 171{
172 return 1; 172 return true;
173} 173}
174 174
175#endif /* CONFIG_SMP */ 175#endif /* CONFIG_SMP */
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 43b1d7648e82..a03c2642a87c 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -15,8 +15,10 @@
15#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) 15#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
16#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) 16#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
17#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) 17#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
18#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5))
18 19
19#define PFN_FLAGS_TRACE \ 20#define PFN_FLAGS_TRACE \
21 { PFN_SPECIAL, "SPECIAL" }, \
20 { PFN_SG_CHAIN, "SG_CHAIN" }, \ 22 { PFN_SG_CHAIN, "SG_CHAIN" }, \
21 { PFN_SG_LAST, "SG_LAST" }, \ 23 { PFN_SG_LAST, "SG_LAST" }, \
22 { PFN_DEV, "DEV" }, \ 24 { PFN_DEV, "DEV" }, \
@@ -120,4 +122,15 @@ pud_t pud_mkdevmap(pud_t pud);
120#endif 122#endif
121#endif /* __HAVE_ARCH_PTE_DEVMAP */ 123#endif /* __HAVE_ARCH_PTE_DEVMAP */
122 124
125#ifdef __HAVE_ARCH_PTE_SPECIAL
126static inline bool pfn_t_special(pfn_t pfn)
127{
128 return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
129}
130#else
131static inline bool pfn_t_special(pfn_t pfn)
132{
133 return false;
134}
135#endif /* __HAVE_ARCH_PTE_SPECIAL */
123#endif /* _LINUX_PFN_T_H_ */ 136#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index dc82a07cb4fd..5a0c3e53e7c2 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -59,6 +59,7 @@
59 59
60#define PHY_HAS_INTERRUPT 0x00000001 60#define PHY_HAS_INTERRUPT 0x00000001
61#define PHY_IS_INTERNAL 0x00000002 61#define PHY_IS_INTERNAL 0x00000002
62#define PHY_RST_AFTER_CLK_EN 0x00000004
62#define MDIO_DEVICE_IS_PHY 0x80000000 63#define MDIO_DEVICE_IS_PHY 0x80000000
63 64
64/* Interface Mode definitions */ 65/* Interface Mode definitions */
@@ -468,7 +469,6 @@ struct phy_device {
468 /* Interrupt and Polling infrastructure */ 469 /* Interrupt and Polling infrastructure */
469 struct work_struct phy_queue; 470 struct work_struct phy_queue;
470 struct delayed_work state_queue; 471 struct delayed_work state_queue;
471 atomic_t irq_disable;
472 472
473 struct mutex lock; 473 struct mutex lock;
474 474
@@ -497,19 +497,19 @@ struct phy_device {
497 * flags: A bitfield defining certain other features this PHY 497 * flags: A bitfield defining certain other features this PHY
498 * supports (like interrupts) 498 * supports (like interrupts)
499 * 499 *
500 * The drivers must implement config_aneg and read_status. All 500 * All functions are optional. If config_aneg or read_status
501 * other functions are optional. Note that none of these 501 * are not implemented, the phy core uses the genphy versions.
502 * functions should be called from interrupt time. The goal is 502 * Note that none of these functions should be called from
503 * for the bus read/write functions to be able to block when the 503 * interrupt time. The goal is for the bus read/write functions
504 * bus transaction is happening, and be freed up by an interrupt 504 * to be able to block when the bus transaction is happening,
505 * (The MPC85xx has this ability, though it is not currently 505 * and be freed up by an interrupt (The MPC85xx has this ability,
506 * supported in the driver). 506 * though it is not currently supported in the driver).
507 */ 507 */
508struct phy_driver { 508struct phy_driver {
509 struct mdio_driver_common mdiodrv; 509 struct mdio_driver_common mdiodrv;
510 u32 phy_id; 510 u32 phy_id;
511 char *name; 511 char *name;
512 unsigned int phy_id_mask; 512 u32 phy_id_mask;
513 u32 features; 513 u32 features;
514 u32 flags; 514 u32 flags;
515 const void *driver_data; 515 const void *driver_data;
@@ -634,6 +634,9 @@ struct phy_driver {
634 int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum, 634 int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
635 u16 val); 635 u16 val);
636 636
637 int (*read_page)(struct phy_device *dev);
638 int (*write_page)(struct phy_device *dev, int page);
639
637 /* Get the size and type of the eeprom contained within a plug-in 640 /* Get the size and type of the eeprom contained within a plug-in
638 * module */ 641 * module */
639 int (*module_info)(struct phy_device *dev, 642 int (*module_info)(struct phy_device *dev,
@@ -690,6 +693,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
690size_t phy_speeds(unsigned int *speeds, size_t size, 693size_t phy_speeds(unsigned int *speeds, size_t size,
691 unsigned long *mask, size_t maxbit); 694 unsigned long *mask, size_t maxbit);
692 695
696void phy_resolve_aneg_linkmode(struct phy_device *phydev);
697
693/** 698/**
694 * phy_read_mmd - Convenience function for reading a register 699 * phy_read_mmd - Convenience function for reading a register
695 * from an MMD on a given PHY. 700 * from an MMD on a given PHY.
@@ -716,6 +721,18 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
716} 721}
717 722
718/** 723/**
724 * __phy_read - convenience function for reading a given PHY register
725 * @phydev: the phy_device struct
726 * @regnum: register number to read
727 *
728 * The caller must have taken the MDIO bus lock.
729 */
730static inline int __phy_read(struct phy_device *phydev, u32 regnum)
731{
732 return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
733}
734
735/**
719 * phy_write - Convenience function for writing a given PHY register 736 * phy_write - Convenience function for writing a given PHY register
720 * @phydev: the phy_device struct 737 * @phydev: the phy_device struct
721 * @regnum: register number to write 738 * @regnum: register number to write
@@ -731,6 +748,72 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
731} 748}
732 749
733/** 750/**
751 * __phy_write - Convenience function for writing a given PHY register
752 * @phydev: the phy_device struct
753 * @regnum: register number to write
754 * @val: value to write to @regnum
755 *
756 * The caller must have taken the MDIO bus lock.
757 */
758static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
759{
760 return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum,
761 val);
762}
763
764int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
765int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
766
767/**
768 * __phy_set_bits - Convenience function for setting bits in a PHY register
769 * @phydev: the phy_device struct
770 * @regnum: register number to write
771 * @val: bits to set
772 *
773 * The caller must have taken the MDIO bus lock.
774 */
775static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
776{
777 return __phy_modify(phydev, regnum, 0, val);
778}
779
780/**
781 * __phy_clear_bits - Convenience function for clearing bits in a PHY register
782 * @phydev: the phy_device struct
783 * @regnum: register number to write
784 * @val: bits to clear
785 *
786 * The caller must have taken the MDIO bus lock.
787 */
788static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum,
789 u16 val)
790{
791 return __phy_modify(phydev, regnum, val, 0);
792}
793
794/**
795 * phy_set_bits - Convenience function for setting bits in a PHY register
796 * @phydev: the phy_device struct
797 * @regnum: register number to write
798 * @val: bits to set
799 */
800static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val)
801{
802 return phy_modify(phydev, regnum, 0, val);
803}
804
805/**
806 * phy_clear_bits - Convenience function for clearing bits in a PHY register
807 * @phydev: the phy_device struct
808 * @regnum: register number to write
809 * @val: bits to clear
810 */
811static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val)
812{
813 return phy_modify(phydev, regnum, val, 0);
814}
815
816/**
734 * phy_interrupt_is_valid - Convenience function for testing a given PHY irq 817 * phy_interrupt_is_valid - Convenience function for testing a given PHY irq
735 * @phydev: the phy_device struct 818 * @phydev: the phy_device struct
736 * 819 *
@@ -763,6 +846,20 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
763}; 846};
764 847
765/** 848/**
849 * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z
850 * negotiation
851 * @mode: one of &enum phy_interface_t
852 *
853 * Returns true if the phy interface mode uses the 16-bit negotiation
854 * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding)
855 */
856static inline bool phy_interface_mode_is_8023z(phy_interface_t mode)
857{
858 return mode == PHY_INTERFACE_MODE_1000BASEX ||
859 mode == PHY_INTERFACE_MODE_2500BASEX;
860}
861
862/**
766 * phy_interface_is_rgmii - Convenience function for testing if a PHY interface 863 * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
767 * is RGMII (all variants) 864 * is RGMII (all variants)
768 * @phydev: the phy_device struct 865 * @phydev: the phy_device struct
@@ -794,6 +891,14 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
794 */ 891 */
795int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); 892int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
796 893
894int phy_save_page(struct phy_device *phydev);
895int phy_select_page(struct phy_device *phydev, int page);
896int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
897int phy_read_paged(struct phy_device *phydev, int page, u32 regnum);
898int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val);
899int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
900 u16 mask, u16 set);
901
797struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, 902struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
798 bool is_c45, 903 bool is_c45,
799 struct phy_c45_device_ids *c45_ids); 904 struct phy_c45_device_ids *c45_ids);
@@ -840,13 +945,11 @@ int phy_aneg_done(struct phy_device *phydev);
840 945
841int phy_stop_interrupts(struct phy_device *phydev); 946int phy_stop_interrupts(struct phy_device *phydev);
842int phy_restart_aneg(struct phy_device *phydev); 947int phy_restart_aneg(struct phy_device *phydev);
948int phy_reset_after_clk_enable(struct phy_device *phydev);
843 949
844static inline int phy_read_status(struct phy_device *phydev) 950static inline void phy_device_reset(struct phy_device *phydev, int value)
845{ 951{
846 if (!phydev->drv) 952 mdio_device_reset(&phydev->mdio, value);
847 return -EIO;
848
849 return phydev->drv->read_status(phydev);
850} 953}
851 954
852#define phydev_err(_phydev, format, args...) \ 955#define phydev_err(_phydev, format, args...) \
@@ -889,6 +992,18 @@ int genphy_c45_read_lpa(struct phy_device *phydev);
889int genphy_c45_read_pma(struct phy_device *phydev); 992int genphy_c45_read_pma(struct phy_device *phydev);
890int genphy_c45_pma_setup_forced(struct phy_device *phydev); 993int genphy_c45_pma_setup_forced(struct phy_device *phydev);
891int genphy_c45_an_disable_aneg(struct phy_device *phydev); 994int genphy_c45_an_disable_aneg(struct phy_device *phydev);
995int genphy_c45_read_mdix(struct phy_device *phydev);
996
997static inline int phy_read_status(struct phy_device *phydev)
998{
999 if (!phydev->drv)
1000 return -EIO;
1001
1002 if (phydev->drv->read_status)
1003 return phydev->drv->read_status(phydev);
1004 else
1005 return genphy_read_status(phydev);
1006}
892 1007
893void phy_driver_unregister(struct phy_driver *drv); 1008void phy_driver_unregister(struct phy_driver *drv);
894void phy_drivers_unregister(struct phy_driver *drv, int n); 1009void phy_drivers_unregister(struct phy_driver *drv, int n);
@@ -898,7 +1013,7 @@ int phy_drivers_register(struct phy_driver *new_driver, int n,
898void phy_state_machine(struct work_struct *work); 1013void phy_state_machine(struct work_struct *work);
899void phy_change(struct phy_device *phydev); 1014void phy_change(struct phy_device *phydev);
900void phy_change_work(struct work_struct *work); 1015void phy_change_work(struct work_struct *work);
901void phy_mac_interrupt(struct phy_device *phydev, int new_link); 1016void phy_mac_interrupt(struct phy_device *phydev);
902void phy_start_machine(struct phy_device *phydev); 1017void phy_start_machine(struct phy_device *phydev);
903void phy_stop_machine(struct phy_device *phydev); 1018void phy_stop_machine(struct phy_device *phydev);
904void phy_trigger_machine(struct phy_device *phydev, bool sync); 1019void phy_trigger_machine(struct phy_device *phydev, bool sync);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index cf6392de6eb0..ee54453a40a0 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -24,9 +24,6 @@ extern void fixed_phy_unregister(struct phy_device *phydev);
24extern int fixed_phy_set_link_update(struct phy_device *phydev, 24extern int fixed_phy_set_link_update(struct phy_device *phydev,
25 int (*link_update)(struct net_device *, 25 int (*link_update)(struct net_device *,
26 struct fixed_phy_status *)); 26 struct fixed_phy_status *));
27extern int fixed_phy_update_state(struct phy_device *phydev,
28 const struct fixed_phy_status *status,
29 const struct fixed_phy_status *changed);
30#else 27#else
31static inline int fixed_phy_add(unsigned int irq, int phy_id, 28static inline int fixed_phy_add(unsigned int irq, int phy_id,
32 struct fixed_phy_status *status, 29 struct fixed_phy_status *status,
@@ -50,12 +47,6 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
50{ 47{
51 return -ENODEV; 48 return -ENODEV;
52} 49}
53static inline int fixed_phy_update_state(struct phy_device *phydev,
54 const struct fixed_phy_status *status,
55 const struct fixed_phy_status *changed)
56{
57 return -ENODEV;
58}
59#endif /* CONFIG_FIXED_PHY */ 50#endif /* CONFIG_FIXED_PHY */
60 51
61#endif /* __PHY_FIXED_H */ 52#endif /* __PHY_FIXED_H */
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index af67edd4ae38..bd137c273d38 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -7,6 +7,7 @@
7 7
8struct device_node; 8struct device_node;
9struct ethtool_cmd; 9struct ethtool_cmd;
10struct fwnode_handle;
10struct net_device; 11struct net_device;
11 12
12enum { 13enum {
@@ -20,19 +21,31 @@ enum {
20 21
21 MLO_AN_PHY = 0, /* Conventional PHY */ 22 MLO_AN_PHY = 0, /* Conventional PHY */
22 MLO_AN_FIXED, /* Fixed-link mode */ 23 MLO_AN_FIXED, /* Fixed-link mode */
23 MLO_AN_SGMII, /* Cisco SGMII protocol */ 24 MLO_AN_INBAND, /* In-band protocol */
24 MLO_AN_8023Z, /* 1000base-X protocol */
25}; 25};
26 26
27static inline bool phylink_autoneg_inband(unsigned int mode) 27static inline bool phylink_autoneg_inband(unsigned int mode)
28{ 28{
29 return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z; 29 return mode == MLO_AN_INBAND;
30} 30}
31 31
32/**
33 * struct phylink_link_state - link state structure
34 * @advertising: ethtool bitmask containing advertised link modes
35 * @lp_advertising: ethtool bitmask containing link partner advertised link
36 * modes
37 * @interface: link &typedef phy_interface_t mode
38 * @speed: link speed, one of the SPEED_* constants.
39 * @duplex: link duplex mode, one of DUPLEX_* constants.
40 * @pause: link pause state, described by MLO_PAUSE_* constants.
41 * @link: true if the link is up.
42 * @an_enabled: true if autonegotiation is enabled/desired.
43 * @an_complete: true if autonegotiation has completed.
44 */
32struct phylink_link_state { 45struct phylink_link_state {
33 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 46 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
34 __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); 47 __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
35 phy_interface_t interface; /* PHY_INTERFACE_xxx */ 48 phy_interface_t interface;
36 int speed; 49 int speed;
37 int duplex; 50 int duplex;
38 int pause; 51 int pause;
@@ -41,72 +54,145 @@ struct phylink_link_state {
41 unsigned int an_complete:1; 54 unsigned int an_complete:1;
42}; 55};
43 56
57/**
58 * struct phylink_mac_ops - MAC operations structure.
59 * @validate: Validate and update the link configuration.
60 * @mac_link_state: Read the current link state from the hardware.
61 * @mac_config: configure the MAC for the selected mode and state.
62 * @mac_an_restart: restart 802.3z BaseX autonegotiation.
63 * @mac_link_down: take the link down.
64 * @mac_link_up: allow the link to come up.
65 *
66 * The individual methods are described more fully below.
67 */
44struct phylink_mac_ops { 68struct phylink_mac_ops {
45 /**
46 * validate: validate and update the link configuration
47 * @ndev: net_device structure associated with MAC
48 * @config: configuration to validate
49 *
50 * Update the %config->supported and %config->advertised masks
51 * clearing bits that can not be supported.
52 *
53 * Note: the PHY may be able to transform from one connection
54 * technology to another, so, eg, don't clear 1000BaseX just
55 * because the MAC is unable to support it. This is more about
56 * clearing unsupported speeds and duplex settings.
57 *
58 * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX
59 * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
60 * based on %config->advertised and/or %config->speed.
61 */
62 void (*validate)(struct net_device *ndev, unsigned long *supported, 69 void (*validate)(struct net_device *ndev, unsigned long *supported,
63 struct phylink_link_state *state); 70 struct phylink_link_state *state);
64 71 int (*mac_link_state)(struct net_device *ndev,
65 /* Read the current link state from the hardware */ 72 struct phylink_link_state *state);
66 int (*mac_link_state)(struct net_device *, struct phylink_link_state *);
67
68 /* Configure the MAC */
69 /**
70 * mac_config: configure the MAC for the selected mode and state
71 * @ndev: net_device structure for the MAC
72 * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII
73 * @state: state structure
74 *
75 * The action performed depends on the currently selected mode:
76 *
77 * %MLO_AN_FIXED, %MLO_AN_PHY:
78 * set the specified speed, duplex, pause mode, and phy interface
79 * mode in the provided @state.
80 * %MLO_AN_8023Z:
81 * place the link in 1000base-X mode, advertising the parameters
82 * given in advertising in @state.
83 * %MLO_AN_SGMII:
84 * place the link in Cisco SGMII mode - there is no advertisment
85 * to make as the PHY communicates the speed and duplex to the
86 * MAC over the in-band control word. Configuration of the pause
87 * mode is as per MLO_AN_PHY since this is not included.
88 */
89 void (*mac_config)(struct net_device *ndev, unsigned int mode, 73 void (*mac_config)(struct net_device *ndev, unsigned int mode,
90 const struct phylink_link_state *state); 74 const struct phylink_link_state *state);
91
92 /**
93 * mac_an_restart: restart 802.3z BaseX autonegotiation
94 * @ndev: net_device structure for the MAC
95 */
96 void (*mac_an_restart)(struct net_device *ndev); 75 void (*mac_an_restart)(struct net_device *ndev);
97 76 void (*mac_link_down)(struct net_device *ndev, unsigned int mode);
98 void (*mac_link_down)(struct net_device *, unsigned int mode); 77 void (*mac_link_up)(struct net_device *ndev, unsigned int mode,
99 void (*mac_link_up)(struct net_device *, unsigned int mode, 78 struct phy_device *phy);
100 struct phy_device *);
101}; 79};
102 80
103struct phylink *phylink_create(struct net_device *, struct device_node *, 81#if 0 /* For kernel-doc purposes only. */
82/**
83 * validate - Validate and update the link configuration
84 * @ndev: a pointer to a &struct net_device for the MAC.
85 * @supported: ethtool bitmask for supported link modes.
86 * @state: a pointer to a &struct phylink_link_state.
87 *
88 * Clear bits in the @supported and @state->advertising masks that
89 * are not supportable by the MAC.
90 *
91 * Note that the PHY may be able to transform from one connection
92 * technology to another, so, eg, don't clear 1000BaseX just
93 * because the MAC is unable to BaseX mode. This is more about
94 * clearing unsupported speeds and duplex settings.
95 *
96 * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
97 * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
98 * based on @state->advertising and/or @state->speed and update
99 * @state->interface accordingly.
100 */
101void validate(struct net_device *ndev, unsigned long *supported,
102 struct phylink_link_state *state);
103
104/**
105 * mac_link_state() - Read the current link state from the hardware
106 * @ndev: a pointer to a &struct net_device for the MAC.
107 * @state: a pointer to a &struct phylink_link_state.
108 *
109 * Read the current link state from the MAC, reporting the current
110 * speed in @state->speed, duplex mode in @state->duplex, pause mode
111 * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
112 * negotiation completion state in @state->an_complete, and link
113 * up state in @state->link.
114 */
115int mac_link_state(struct net_device *ndev,
116 struct phylink_link_state *state);
117
118/**
119 * mac_config() - configure the MAC for the selected mode and state
120 * @ndev: a pointer to a &struct net_device for the MAC.
121 * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
122 * @state: a pointer to a &struct phylink_link_state.
123 *
124 * The action performed depends on the currently selected mode:
125 *
126 * %MLO_AN_FIXED, %MLO_AN_PHY:
127 * Configure the specified @state->speed, @state->duplex and
128 * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode.
129 *
130 * %MLO_AN_INBAND:
131 * place the link in an inband negotiation mode (such as 802.3z
132 * 1000base-X or Cisco SGMII mode depending on the @state->interface
133 * mode). In both cases, link state management (whether the link
134 * is up or not) is performed by the MAC, and reported via the
135 * mac_link_state() callback. Changes in link state must be made
136 * by calling phylink_mac_change().
137 *
138 * If in 802.3z mode, the link speed is fixed, dependent on the
139 * @state->interface. Duplex is negotiated, and pause is advertised
140 * according to @state->an_enabled, @state->pause and
141 * @state->advertising flags. Beware of MACs which only support full
142 * duplex at gigabit and higher speeds.
143 *
144 * If in Cisco SGMII mode, the link speed and duplex mode are passed
145 * in the serial bitstream 16-bit configuration word, and the MAC
146 * should be configured to read these bits and acknowledge the
147 * configuration word. Nothing is advertised by the MAC. The MAC is
148 * responsible for reading the configuration word and configuring
149 * itself accordingly.
150 */
151void mac_config(struct net_device *ndev, unsigned int mode,
152 const struct phylink_link_state *state);
153
154/**
155 * mac_an_restart() - restart 802.3z BaseX autonegotiation
156 * @ndev: a pointer to a &struct net_device for the MAC.
157 */
158void mac_an_restart(struct net_device *ndev);
159
160/**
161 * mac_link_down() - take the link down
162 * @ndev: a pointer to a &struct net_device for the MAC.
163 * @mode: link autonegotiation mode
164 *
165 * If @mode is not an in-band negotiation mode (as defined by
166 * phylink_autoneg_inband()), force the link down and disable any
167 * Energy Efficient Ethernet MAC configuration.
168 */
169void mac_link_down(struct net_device *ndev, unsigned int mode);
170
171/**
172 * mac_link_up() - allow the link to come up
173 * @ndev: a pointer to a &struct net_device for the MAC.
174 * @mode: link autonegotiation mode
175 * @phy: any attached phy
176 *
177 * If @mode is not an in-band negotiation mode (as defined by
178 * phylink_autoneg_inband()), allow the link to come up. If @phy
179 * is non-%NULL, configure Energy Efficient Ethernet by calling
180 * phy_init_eee() and perform appropriate MAC configuration for EEE.
181 */
182void mac_link_up(struct net_device *ndev, unsigned int mode,
183 struct phy_device *phy);
184#endif
185
186struct phylink *phylink_create(struct net_device *, struct fwnode_handle *,
104 phy_interface_t iface, const struct phylink_mac_ops *ops); 187 phy_interface_t iface, const struct phylink_mac_ops *ops);
105void phylink_destroy(struct phylink *); 188void phylink_destroy(struct phylink *);
106 189
107int phylink_connect_phy(struct phylink *, struct phy_device *); 190int phylink_connect_phy(struct phylink *, struct phy_device *);
108int phylink_of_phy_connect(struct phylink *, struct device_node *); 191int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
109void phylink_disconnect_phy(struct phylink *); 192void phylink_disconnect_phy(struct phylink *);
193int phylink_fixed_state_cb(struct phylink *,
194 void (*cb)(struct net_device *dev,
195 struct phylink_link_state *));
110 196
111void phylink_mac_change(struct phylink *, bool up); 197void phylink_mac_change(struct phylink *, bool up);
112 198
@@ -128,7 +214,6 @@ int phylink_ethtool_set_pauseparam(struct phylink *,
128int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *); 214int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *);
129int phylink_ethtool_get_module_eeprom(struct phylink *, 215int phylink_ethtool_get_module_eeprom(struct phylink *,
130 struct ethtool_eeprom *, u8 *); 216 struct ethtool_eeprom *, u8 *);
131int phylink_init_eee(struct phylink *, bool);
132int phylink_get_eee_err(struct phylink *); 217int phylink_get_eee_err(struct phylink *);
133int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); 218int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
134int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); 219int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h
index 05082e407c4a..d01a8638bb45 100644
--- a/include/linux/pinctrl/devinfo.h
+++ b/include/linux/pinctrl/devinfo.h
@@ -43,6 +43,8 @@ extern int pinctrl_init_done(struct device *dev);
43 43
44#else 44#else
45 45
46struct device;
47
46/* Stubs if we're not using pinctrl */ 48/* Stubs if we're not using pinctrl */
47 49
48static inline int pinctrl_bind_pins(struct device *dev) 50static inline int pinctrl_bind_pins(struct device *dev)
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index ec6dadcc1fde..6c0680641108 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -94,6 +94,7 @@
94 * or latch delay (on outputs) this parameter (in a custom format) 94 * or latch delay (on outputs) this parameter (in a custom format)
95 * specifies the clock skew or latch delay. It typically controls how 95 * specifies the clock skew or latch delay. It typically controls how
96 * many double inverters are put in front of the line. 96 * many double inverters are put in front of the line.
97 * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
97 * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if 98 * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
98 * you need to pass in custom configurations to the pin controller, use 99 * you need to pass in custom configurations to the pin controller, use
99 * PIN_CONFIG_END+1 as the base offset. 100 * PIN_CONFIG_END+1 as the base offset.
@@ -122,6 +123,7 @@ enum pin_config_param {
122 PIN_CONFIG_SLEEP_HARDWARE_STATE, 123 PIN_CONFIG_SLEEP_HARDWARE_STATE,
123 PIN_CONFIG_SLEW_RATE, 124 PIN_CONFIG_SLEW_RATE,
124 PIN_CONFIG_SKEW_DELAY, 125 PIN_CONFIG_SKEW_DELAY,
126 PIN_CONFIG_PERSIST_STATE,
125 PIN_CONFIG_END = 0x7F, 127 PIN_CONFIG_END = 0x7F,
126 PIN_CONFIG_MAX = 0xFF, 128 PIN_CONFIG_MAX = 0xFF,
127}; 129};
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 5e45385c5bdc..8f5dbb84547a 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -18,6 +18,7 @@
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/pinctrl/pinctrl-state.h> 20#include <linux/pinctrl/pinctrl-state.h>
21#include <linux/pinctrl/devinfo.h>
21 22
22struct device; 23struct device;
23struct pinctrl_dev; 24struct pinctrl_dev;
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 2dc5e9870fcd..5a3bb3b7c9ad 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -167,10 +167,9 @@ void pipe_lock(struct pipe_inode_info *);
167void pipe_unlock(struct pipe_inode_info *); 167void pipe_unlock(struct pipe_inode_info *);
168void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); 168void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
169 169
170extern unsigned int pipe_max_size, pipe_min_size; 170extern unsigned int pipe_max_size;
171extern unsigned long pipe_user_pages_hard; 171extern unsigned long pipe_user_pages_hard;
172extern unsigned long pipe_user_pages_soft; 172extern unsigned long pipe_user_pages_soft;
173int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
174 173
175/* Drop the inode semaphore and wait for a pipe event, atomically */ 174/* Drop the inode semaphore and wait for a pipe event, atomically */
176void pipe_wait(struct pipe_inode_info *pipe); 175void pipe_wait(struct pipe_inode_info *pipe);
@@ -191,6 +190,6 @@ long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
191struct pipe_inode_info *get_pipe_info(struct file *file); 190struct pipe_inode_info *get_pipe_info(struct file *file);
192 191
193int create_pipe_files(struct file **, int); 192int create_pipe_files(struct file **, int);
194unsigned int round_pipe_size(unsigned int size); 193unsigned int round_pipe_size(unsigned long size);
195 194
196#endif 195#endif
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
index 271a4e25af67..63507ff464ee 100644
--- a/include/linux/platform_data/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -50,6 +50,8 @@ struct at24_platform_data {
50#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */ 50#define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */
51#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */ 51#define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */
52#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */ 52#define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */
53#define AT24_FLAG_NO_RDROL BIT(1) /* does not auto-rollover reads to */
54 /* the next slave address */
53 55
54 void (*setup)(struct nvmem_device *nvmem, void *context); 56 void (*setup)(struct nvmem_device *nvmem, void *context);
55 void *context; 57 void *context;
diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h
index 89fd34727a24..98967df07468 100644
--- a/include/linux/platform_data/i2c-davinci.h
+++ b/include/linux/platform_data/i2c-davinci.h
@@ -16,9 +16,8 @@
16struct davinci_i2c_platform_data { 16struct davinci_i2c_platform_data {
17 unsigned int bus_freq; /* standard bus frequency (kHz) */ 17 unsigned int bus_freq; /* standard bus frequency (kHz) */
18 unsigned int bus_delay; /* post-transaction delay (usec) */ 18 unsigned int bus_delay; /* post-transaction delay (usec) */
19 unsigned int sda_pin; /* GPIO pin ID to use for SDA */ 19 bool gpio_recovery; /* Use GPIO recovery method */
20 unsigned int scl_pin; /* GPIO pin ID to use for SCL */ 20 bool has_pfunc; /* Chip has a ICPFUNC register */
21 bool has_pfunc; /*chip has a ICPFUNC register */
22}; 21};
23 22
24/* for board setup code */ 23/* for board setup code */
diff --git a/include/linux/i2c/pxa-i2c.h b/include/linux/platform_data/i2c-pxa.h
index 53aab243cbd8..5236f216dfae 100644
--- a/include/linux/i2c/pxa-i2c.h
+++ b/include/linux/platform_data/i2c-pxa.h
@@ -71,15 +71,4 @@ struct i2c_pxa_platform_data {
71 unsigned char master_code; 71 unsigned char master_code;
72 unsigned long rate; 72 unsigned long rate;
73}; 73};
74
75extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info);
76
77#ifdef CONFIG_PXA27x
78extern void pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info);
79#endif
80
81#ifdef CONFIG_PXA3xx
82extern void pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info);
83#endif
84
85#endif 74#endif
diff --git a/include/linux/platform_data/mlxcpld-hotplug.h b/include/linux/platform_data/mlxcpld-hotplug.h
deleted file mode 100644
index e4cfcffaa6f4..000000000000
--- a/include/linux/platform_data/mlxcpld-hotplug.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * include/linux/platform_data/mlxcpld-hotplug.h
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#ifndef __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H
36#define __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H
37
38/**
39 * struct mlxcpld_hotplug_device - I2C device data:
40 * @adapter: I2C device adapter;
41 * @client: I2C device client;
42 * @brdinfo: device board information;
43 * @bus: I2C bus, where device is attached;
44 *
45 * Structure represents I2C hotplug device static data (board topology) and
46 * dynamic data (related kernel objects handles).
47 */
48struct mlxcpld_hotplug_device {
49 struct i2c_adapter *adapter;
50 struct i2c_client *client;
51 struct i2c_board_info brdinfo;
52 u16 bus;
53};
54
55/**
56 * struct mlxcpld_hotplug_platform_data - device platform data:
57 * @top_aggr_offset: offset of top aggregation interrupt register;
58 * @top_aggr_mask: top aggregation interrupt common mask;
59 * @top_aggr_psu_mask: top aggregation interrupt PSU mask;
60 * @psu_reg_offset: offset of PSU interrupt register;
61 * @psu_mask: PSU interrupt mask;
62 * @psu_count: number of equipped replaceable PSUs;
63 * @psu: pointer to PSU devices data array;
64 * @top_aggr_pwr_mask: top aggregation interrupt power mask;
65 * @pwr_reg_offset: offset of power interrupt register
66 * @pwr_mask: power interrupt mask;
67 * @pwr_count: number of power sources;
68 * @pwr: pointer to power devices data array;
69 * @top_aggr_fan_mask: top aggregation interrupt FAN mask;
70 * @fan_reg_offset: offset of FAN interrupt register;
71 * @fan_mask: FAN interrupt mask;
72 * @fan_count: number of equipped replaceable FANs;
73 * @fan: pointer to FAN devices data array;
74 *
75 * Structure represents board platform data, related to system hotplug events,
76 * like FAN, PSU, power cable insertion and removing. This data provides the
77 * number of hot-pluggable devices and hardware description for event handling.
78 */
79struct mlxcpld_hotplug_platform_data {
80 u16 top_aggr_offset;
81 u8 top_aggr_mask;
82 u8 top_aggr_psu_mask;
83 u16 psu_reg_offset;
84 u8 psu_mask;
85 u8 psu_count;
86 struct mlxcpld_hotplug_device *psu;
87 u8 top_aggr_pwr_mask;
88 u16 pwr_reg_offset;
89 u8 pwr_mask;
90 u8 pwr_count;
91 struct mlxcpld_hotplug_device *pwr;
92 u8 top_aggr_fan_mask;
93 u16 fan_reg_offset;
94 u8 fan_mask;
95 u8 fan_count;
96 struct mlxcpld_hotplug_device *fan;
97};
98
99#endif /* __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H */
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
new file mode 100644
index 000000000000..fcdc707eab99
--- /dev/null
+++ b/include/linux/platform_data/mlxreg.h
@@ -0,0 +1,144 @@
1/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the names of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2 as published by the Free
19 * Software Foundation.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#ifndef __LINUX_PLATFORM_DATA_MLXREG_H
35#define __LINUX_PLATFORM_DATA_MLXREG_H
36
37#define MLXREG_CORE_LABEL_MAX_SIZE 32
38
39/**
40 * struct mlxreg_hotplug_device - I2C device data:
41 *
42 * @adapter: I2C device adapter;
43 * @client: I2C device client;
44 * @brdinfo: device board information;
45 * @nr: I2C device adapter number, to which device is to be attached;
46 *
47 * Structure represents I2C hotplug device static data (board topology) and
48 * dynamic data (related kernel objects handles).
49 */
50struct mlxreg_hotplug_device {
51 struct i2c_adapter *adapter;
52 struct i2c_client *client;
53 struct i2c_board_info *brdinfo;
54 int nr;
55};
56
57/**
58 * struct mlxreg_core_data - attributes control data:
59 *
60 * @label: attribute label;
61 * @label: attribute register offset;
62 * @reg: attribute register;
63 * @mask: attribute access mask;
64 * @mode: access mode;
65 * @bit: attribute effective bit;
66 * @np - pointer to node platform associated with attribute;
67 * @hpdev - hotplug device data;
68 * @health_cntr: dynamic device health indication counter;
69 * @attached: true if device has been attached after good health indication;
70 */
71struct mlxreg_core_data {
72 char label[MLXREG_CORE_LABEL_MAX_SIZE];
73 u32 reg;
74 u32 mask;
75 u32 bit;
76 umode_t mode;
77 struct device_node *np;
78 struct mlxreg_hotplug_device hpdev;
79 u8 health_cntr;
80 bool attached;
81};
82
83/**
84 * struct mlxreg_core_item - same type components controlled by the driver:
85 *
86 * @data: component data;
87 * @aggr_mask: group aggregation mask;
88 * @reg: group interrupt status register;
89 * @mask: group interrupt mask;
90 * @cache: last status value for elements fro the same group;
91 * @count: number of available elements in the group;
92 * @ind: element's index inside the group;
93 * @inversed: if 0: 0 for signal status is OK, if 1 - 1 is OK;
94 * @health: true if device has health indication, false in other case;
95 */
96struct mlxreg_core_item {
97 struct mlxreg_core_data *data;
98 u32 aggr_mask;
99 u32 reg;
100 u32 mask;
101 u32 cache;
102 u8 count;
103 u8 ind;
104 u8 inversed;
105 u8 health;
106};
107
108/**
109 * struct mlxreg_core_platform_data - platform data:
110 *
111 * @led_data: led private data;
112 * @regmap: register map of parent device;
113 * @counter: number of led instances;
114 */
115struct mlxreg_core_platform_data {
116 struct mlxreg_core_data *data;
117 void *regmap;
118 int counter;
119};
120
121/**
122 * struct mlxreg_core_hotplug_platform_data - hotplug platform data:
123 *
124 * @items: same type components with the hotplug capability;
125 * @irq: platform interrupt number;
126 * @regmap: register map of parent device;
127 * @counter: number of the components with the hotplug capability;
128 * @cell: location of top aggregation interrupt register;
129 * @mask: top aggregation interrupt common mask;
130 * @cell_low: location of low aggregation interrupt register;
131 * @mask_low: low aggregation interrupt common mask;
132 */
133struct mlxreg_core_hotplug_platform_data {
134 struct mlxreg_core_item *items;
135 int irq;
136 void *regmap;
137 int counter;
138 u32 cell;
139 u32 mask;
140 u32 cell_low;
141 u32 mask_low;
142};
143
144#endif /* __LINUX_PLATFORM_DATA_MLXREG_H */
diff --git a/include/linux/platform_data/mms114.h b/include/linux/platform_data/mms114.h
deleted file mode 100644
index 5722ebfb2738..000000000000
--- a/include/linux/platform_data/mms114.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Author: Joonyoung Shim <jy0922.shim@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundationr
8 */
9
10#ifndef __LINUX_MMS114_H
11#define __LINUX_MMS114_H
12
13struct mms114_platform_data {
14 unsigned int x_size;
15 unsigned int y_size;
16 unsigned int contact_threshold;
17 unsigned int moving_threshold;
18 bool x_invert;
19 bool y_invert;
20
21 void (*cfg_pin)(bool);
22};
23
24#endif /* __LINUX_MMS114_H */
diff --git a/include/linux/platform_data/mtd-onenand-omap2.h b/include/linux/platform_data/mtd-onenand-omap2.h
deleted file mode 100644
index 56ff0e6f5ad1..000000000000
--- a/include/linux/platform_data/mtd-onenand-omap2.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (C) 2006 Nokia Corporation
3 * Author: Juha Yrjola
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __MTD_ONENAND_OMAP2_H
11#define __MTD_ONENAND_OMAP2_H
12
13#include <linux/mtd/mtd.h>
14#include <linux/mtd/partitions.h>
15
16#define ONENAND_SYNC_READ (1 << 0)
17#define ONENAND_SYNC_READWRITE (1 << 1)
18#define ONENAND_IN_OMAP34XX (1 << 2)
19
20struct omap_onenand_platform_data {
21 int cs;
22 int gpio_irq;
23 struct mtd_partition *parts;
24 int nr_parts;
25 int (*onenand_setup)(void __iomem *, int *freq_ptr);
26 int dma_channel;
27 u8 flags;
28 u8 regulator_can_sleep;
29 u8 skip_initial_unlocking;
30
31 /* for passing the partitions */
32 struct device_node *of_node;
33};
34#endif
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index 818c5c6e203f..c71a2dd66143 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -86,6 +86,7 @@ enum si5351_disable_state {
86 * @multisynth_src: multisynth source clock 86 * @multisynth_src: multisynth source clock
87 * @clkout_src: clkout source clock 87 * @clkout_src: clkout source clock
88 * @pll_master: if true, clkout can also change pll rate 88 * @pll_master: if true, clkout can also change pll rate
89 * @pll_reset: if true, clkout can reset its pll
89 * @drive: output drive strength 90 * @drive: output drive strength
90 * @rate: initial clkout rate, or default if 0 91 * @rate: initial clkout rate, or default if 0
91 */ 92 */
@@ -95,6 +96,7 @@ struct si5351_clkout_config {
95 enum si5351_drive_strength drive; 96 enum si5351_drive_strength drive;
96 enum si5351_disable_state disable_state; 97 enum si5351_disable_state disable_state;
97 bool pll_master; 98 bool pll_master;
99 bool pll_reset;
98 unsigned long rate; 100 unsigned long rate;
99}; 101};
100 102
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index da79774078a7..773daf7915a3 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -1,10 +1,8 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
1/* 3/*
2 * Copyright (C) 2009 Samsung Electronics Ltd. 4 * Copyright (C) 2009 Samsung Electronics Ltd.
3 * Jaswinder Singh <jassi.brar@samsung.com> 5 * Jaswinder Singh <jassi.brar@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */ 6 */
9 7
10#ifndef __SPI_S3C64XX_H 8#ifndef __SPI_S3C64XX_H
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
new file mode 100644
index 000000000000..1be356330b96
--- /dev/null
+++ b/include/linux/platform_data/ti-sysc.h
@@ -0,0 +1,86 @@
1#ifndef __TI_SYSC_DATA_H__
2#define __TI_SYSC_DATA_H__
3
4enum ti_sysc_module_type {
5 TI_SYSC_OMAP2,
6 TI_SYSC_OMAP2_TIMER,
7 TI_SYSC_OMAP3_SHAM,
8 TI_SYSC_OMAP3_AES,
9 TI_SYSC_OMAP4,
10 TI_SYSC_OMAP4_TIMER,
11 TI_SYSC_OMAP4_SIMPLE,
12 TI_SYSC_OMAP34XX_SR,
13 TI_SYSC_OMAP36XX_SR,
14 TI_SYSC_OMAP4_SR,
15 TI_SYSC_OMAP4_MCASP,
16 TI_SYSC_OMAP4_USB_HOST_FS,
17};
18
19/**
20 * struct sysc_regbits - TI OCP_SYSCONFIG register field offsets
21 * @midle_shift: Offset of the midle bit
22 * @clkact_shift: Offset of the clockactivity bit
23 * @sidle_shift: Offset of the sidle bit
24 * @enwkup_shift: Offset of the enawakeup bit
25 * @srst_shift: Offset of the softreset bit
26 * @autoidle_shift: Offset of the autoidle bit
27 * @dmadisable_shift: Offset of the dmadisable bit
28 * @emufree_shift; Offset of the emufree bit
29 *
30 * Note that 0 is a valid shift, and for ti-sysc.c -ENODEV can be used if a
31 * feature is not available.
32 */
33struct sysc_regbits {
34 s8 midle_shift;
35 s8 clkact_shift;
36 s8 sidle_shift;
37 s8 enwkup_shift;
38 s8 srst_shift;
39 s8 autoidle_shift;
40 s8 dmadisable_shift;
41 s8 emufree_shift;
42};
43
44#define SYSC_QUIRK_RESET_STATUS BIT(7)
45#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6)
46#define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5)
47#define SYSC_QUIRK_OPT_CLKS_NEEDED BIT(4)
48#define SYSC_QUIRK_OPT_CLKS_IN_RESET BIT(3)
49#define SYSC_QUIRK_16BIT BIT(2)
50#define SYSC_QUIRK_UNCACHED BIT(1)
51#define SYSC_QUIRK_USE_CLOCKACT BIT(0)
52
53#define SYSC_NR_IDLEMODES 4
54
55/**
56 * struct sysc_capabilities - capabilities for an interconnect target module
57 *
58 * @sysc_mask: bitmask of supported SYSCONFIG register bits
59 * @regbits: bitmask of SYSCONFIG register bits
60 * @mod_quirks: bitmask of module specific quirks
61 */
62struct sysc_capabilities {
63 const enum ti_sysc_module_type type;
64 const u32 sysc_mask;
65 const struct sysc_regbits *regbits;
66 const u32 mod_quirks;
67};
68
69/**
70 * struct sysc_config - configuration for an interconnect target module
71 * @sysc_val: configured value for sysc register
72 * @midlemodes: bitmask of supported master idle modes
73 * @sidlemodes: bitmask of supported master idle modes
74 * @srst_udelay: optional delay needed after OCP soft reset
75 * @quirks: bitmask of enabled quirks
76 */
77struct sysc_config {
78 u32 sysc_val;
79 u32 syss_mask;
80 u8 midlemodes;
81 u8 sidlemodes;
82 u8 srst_udelay;
83 u32 quirks;
84};
85
86#endif /* __TI_SYSC_DATA_H__ */
diff --git a/include/linux/poll.h b/include/linux/poll.h
index d384f12abdd5..04781a753326 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -37,7 +37,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_
37 */ 37 */
38typedef struct poll_table_struct { 38typedef struct poll_table_struct {
39 poll_queue_proc _qproc; 39 poll_queue_proc _qproc;
40 unsigned long _key; 40 __poll_t _key;
41} poll_table; 41} poll_table;
42 42
43static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) 43static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -62,20 +62,20 @@ static inline bool poll_does_not_wait(const poll_table *p)
62 * to be started implicitly on poll(). You typically only want to do that 62 * to be started implicitly on poll(). You typically only want to do that
63 * if the application is actually polling for POLLIN and/or POLLOUT. 63 * if the application is actually polling for POLLIN and/or POLLOUT.
64 */ 64 */
65static inline unsigned long poll_requested_events(const poll_table *p) 65static inline __poll_t poll_requested_events(const poll_table *p)
66{ 66{
67 return p ? p->_key : ~0UL; 67 return p ? p->_key : ~(__poll_t)0;
68} 68}
69 69
70static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) 70static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
71{ 71{
72 pt->_qproc = qproc; 72 pt->_qproc = qproc;
73 pt->_key = ~0UL; /* all events enabled */ 73 pt->_key = ~(__poll_t)0; /* all events enabled */
74} 74}
75 75
76struct poll_table_entry { 76struct poll_table_entry {
77 struct file *filp; 77 struct file *filp;
78 unsigned long key; 78 __poll_t key;
79 wait_queue_entry_t wait; 79 wait_queue_entry_t wait;
80 wait_queue_head_t *wait_address; 80 wait_queue_head_t *wait_address;
81}; 81};
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 38d8225510f1..3a3bc71017d5 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -68,7 +68,7 @@ struct posix_clock_operations {
68 68
69 int (*open) (struct posix_clock *pc, fmode_t f_mode); 69 int (*open) (struct posix_clock *pc, fmode_t f_mode);
70 70
71 uint (*poll) (struct posix_clock *pc, 71 __poll_t (*poll) (struct posix_clock *pc,
72 struct file *file, poll_table *wait); 72 struct file *file, poll_table *wait);
73 73
74 int (*release) (struct posix_clock *pc); 74 int (*release) (struct posix_clock *pc);
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 672c4f32311e..c85704fcdbd2 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -42,13 +42,26 @@ struct cpu_timer_list {
42#define CLOCKFD CPUCLOCK_MAX 42#define CLOCKFD CPUCLOCK_MAX
43#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) 43#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
44 44
45#define MAKE_PROCESS_CPUCLOCK(pid, clock) \ 45static inline clockid_t make_process_cpuclock(const unsigned int pid,
46 ((~(clockid_t) (pid) << 3) | (clockid_t) (clock)) 46 const clockid_t clock)
47#define MAKE_THREAD_CPUCLOCK(tid, clock) \ 47{
48 MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK) 48 return ((~pid) << 3) | clock;
49}
50static inline clockid_t make_thread_cpuclock(const unsigned int tid,
51 const clockid_t clock)
52{
53 return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK);
54}
49 55
50#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD) 56static inline clockid_t fd_to_clockid(const int fd)
51#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3)) 57{
58 return make_process_cpuclock((unsigned int) fd, CLOCKFD);
59}
60
61static inline int clockid_to_fd(const clockid_t clk)
62{
63 return ~(clk >> 3);
64}
52 65
53#define REQUEUE_PENDING 1 66#define REQUEUE_PENDING 1
54 67
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index b2b7255ec7f5..540595a321a7 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -12,6 +12,7 @@
12#include <linux/bug.h> 12#include <linux/bug.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
15#include <linux/refcount.h>
15#include <uapi/linux/posix_acl.h> 16#include <uapi/linux/posix_acl.h>
16 17
17struct posix_acl_entry { 18struct posix_acl_entry {
@@ -24,7 +25,7 @@ struct posix_acl_entry {
24}; 25};
25 26
26struct posix_acl { 27struct posix_acl {
27 atomic_t a_refcount; 28 refcount_t a_refcount;
28 struct rcu_head a_rcu; 29 struct rcu_head a_rcu;
29 unsigned int a_count; 30 unsigned int a_count;
30 struct posix_acl_entry a_entries[0]; 31 struct posix_acl_entry a_entries[0];
@@ -41,7 +42,7 @@ static inline struct posix_acl *
41posix_acl_dup(struct posix_acl *acl) 42posix_acl_dup(struct posix_acl *acl)
42{ 43{
43 if (acl) 44 if (acl)
44 atomic_inc(&acl->a_refcount); 45 refcount_inc(&acl->a_refcount);
45 return acl; 46 return acl;
46} 47}
47 48
@@ -51,7 +52,7 @@ posix_acl_dup(struct posix_acl *acl)
51static inline void 52static inline void
52posix_acl_release(struct posix_acl *acl) 53posix_acl_release(struct posix_acl *acl)
53{ 54{
54 if (acl && atomic_dec_and_test(&acl->a_refcount)) 55 if (acl && refcount_dec_and_test(&acl->a_refcount))
55 kfree_rcu(acl, a_rcu); 56 kfree_rcu(acl, a_rcu);
56} 57}
57 58
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index e6187f524f2c..01fbf1b16258 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -16,6 +16,7 @@ enum bq27xxx_chip {
16 BQ27520G2, /* bq27520G2 */ 16 BQ27520G2, /* bq27520G2 */
17 BQ27520G3, /* bq27520G3 */ 17 BQ27520G3, /* bq27520G3 */
18 BQ27520G4, /* bq27520G4 */ 18 BQ27520G4, /* bq27520G4 */
19 BQ27521, /* bq27521 */
19 BQ27530, /* bq27530, bq27531 */ 20 BQ27530, /* bq27530, bq27531 */
20 BQ27531, 21 BQ27531,
21 BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ 22 BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 2ff18c9840a7..d31cb6215905 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -78,6 +78,9 @@ extern struct file *proc_ns_fget(int fd);
78#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) 78#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
79extern void *ns_get_path(struct path *path, struct task_struct *task, 79extern void *ns_get_path(struct path *path, struct task_struct *task,
80 const struct proc_ns_operations *ns_ops); 80 const struct proc_ns_operations *ns_ops);
81typedef struct ns_common *ns_get_path_helper_t(void *);
82extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
83 void *private_data);
81 84
82extern int ns_get_name(char *buf, size_t size, struct task_struct *task, 85extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
83 const struct proc_ns_operations *ns_ops); 86 const struct proc_ns_operations *ns_ops);
diff --git a/include/linux/property.h b/include/linux/property.h
index f6189a3ac63c..769d372c1edf 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -83,11 +83,17 @@ struct fwnode_handle *fwnode_get_next_parent(
83 struct fwnode_handle *fwnode); 83 struct fwnode_handle *fwnode);
84struct fwnode_handle *fwnode_get_next_child_node( 84struct fwnode_handle *fwnode_get_next_child_node(
85 const struct fwnode_handle *fwnode, struct fwnode_handle *child); 85 const struct fwnode_handle *fwnode, struct fwnode_handle *child);
86struct fwnode_handle *fwnode_get_next_available_child_node(
87 const struct fwnode_handle *fwnode, struct fwnode_handle *child);
86 88
87#define fwnode_for_each_child_node(fwnode, child) \ 89#define fwnode_for_each_child_node(fwnode, child) \
88 for (child = fwnode_get_next_child_node(fwnode, NULL); child; \ 90 for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
89 child = fwnode_get_next_child_node(fwnode, child)) 91 child = fwnode_get_next_child_node(fwnode, child))
90 92
93#define fwnode_for_each_available_child_node(fwnode, child) \
94 for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\
95 child = fwnode_get_next_available_child_node(fwnode, child))
96
91struct fwnode_handle *device_get_next_child_node( 97struct fwnode_handle *device_get_next_child_node(
92 struct device *dev, struct fwnode_handle *child); 98 struct device *dev, struct fwnode_handle *child);
93 99
@@ -103,6 +109,8 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev,
103struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); 109struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
104void fwnode_handle_put(struct fwnode_handle *fwnode); 110void fwnode_handle_put(struct fwnode_handle *fwnode);
105 111
112int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index);
113
106unsigned int device_get_child_node_count(struct device *dev); 114unsigned int device_get_child_node_count(struct device *dev);
107 115
108static inline bool device_property_read_bool(struct device *dev, 116static inline bool device_property_read_bool(struct device *dev,
@@ -206,7 +214,7 @@ struct property_entry {
206 */ 214 */
207 215
208#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \ 216#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \
209{ \ 217(struct property_entry) { \
210 .name = _name_, \ 218 .name = _name_, \
211 .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ 219 .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
212 .is_array = true, \ 220 .is_array = true, \
@@ -224,7 +232,7 @@ struct property_entry {
224 PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_) 232 PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_)
225 233
226#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ 234#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
227{ \ 235(struct property_entry) { \
228 .name = _name_, \ 236 .name = _name_, \
229 .length = ARRAY_SIZE(_val_) * sizeof(const char *), \ 237 .length = ARRAY_SIZE(_val_) * sizeof(const char *), \
230 .is_array = true, \ 238 .is_array = true, \
@@ -233,7 +241,7 @@ struct property_entry {
233} 241}
234 242
235#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \ 243#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \
236{ \ 244(struct property_entry) { \
237 .name = _name_, \ 245 .name = _name_, \
238 .length = sizeof(_type_), \ 246 .length = sizeof(_type_), \
239 .is_string = false, \ 247 .is_string = false, \
@@ -250,7 +258,7 @@ struct property_entry {
250 PROPERTY_ENTRY_INTEGER(_name_, u64, _val_) 258 PROPERTY_ENTRY_INTEGER(_name_, u64, _val_)
251 259
252#define PROPERTY_ENTRY_STRING(_name_, _val_) \ 260#define PROPERTY_ENTRY_STRING(_name_, _val_) \
253{ \ 261(struct property_entry) { \
254 .name = _name_, \ 262 .name = _name_, \
255 .length = sizeof(_val_), \ 263 .length = sizeof(_val_), \
256 .is_string = true, \ 264 .is_string = true, \
@@ -258,7 +266,7 @@ struct property_entry {
258} 266}
259 267
260#define PROPERTY_ENTRY_BOOL(_name_) \ 268#define PROPERTY_ENTRY_BOOL(_name_) \
261{ \ 269(struct property_entry) { \
262 .name = _name_, \ 270 .name = _name_, \
263} 271}
264 272
@@ -275,10 +283,15 @@ bool device_dma_supported(struct device *dev);
275 283
276enum dev_dma_attr device_get_dma_attr(struct device *dev); 284enum dev_dma_attr device_get_dma_attr(struct device *dev);
277 285
286void *device_get_match_data(struct device *dev);
287
278int device_get_phy_mode(struct device *dev); 288int device_get_phy_mode(struct device *dev);
279 289
280void *device_get_mac_address(struct device *dev, char *addr, int alen); 290void *device_get_mac_address(struct device *dev, char *addr, int alen);
281 291
292int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
293void *fwnode_get_mac_address(struct fwnode_handle *fwnode,
294 char *addr, int alen);
282struct fwnode_handle *fwnode_graph_get_next_endpoint( 295struct fwnode_handle *fwnode_graph_get_next_endpoint(
283 const struct fwnode_handle *fwnode, struct fwnode_handle *prev); 296 const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
284struct fwnode_handle * 297struct fwnode_handle *
diff --git a/include/linux/psci.h b/include/linux/psci.h
index bdea1cb5e1db..8b1b3b5935ab 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -25,7 +25,19 @@ bool psci_tos_resident_on(int cpu);
25int psci_cpu_init_idle(unsigned int cpu); 25int psci_cpu_init_idle(unsigned int cpu);
26int psci_cpu_suspend_enter(unsigned long index); 26int psci_cpu_suspend_enter(unsigned long index);
27 27
28enum psci_conduit {
29 PSCI_CONDUIT_NONE,
30 PSCI_CONDUIT_SMC,
31 PSCI_CONDUIT_HVC,
32};
33
34enum smccc_version {
35 SMCCC_VERSION_1_0,
36 SMCCC_VERSION_1_1,
37};
38
28struct psci_operations { 39struct psci_operations {
40 u32 (*get_version)(void);
29 int (*cpu_suspend)(u32 state, unsigned long entry_point); 41 int (*cpu_suspend)(u32 state, unsigned long entry_point);
30 int (*cpu_off)(u32 state); 42 int (*cpu_off)(u32 state);
31 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); 43 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
@@ -33,6 +45,8 @@ struct psci_operations {
33 int (*affinity_info)(unsigned long target_affinity, 45 int (*affinity_info)(unsigned long target_affinity,
34 unsigned long lowest_affinity_level); 46 unsigned long lowest_affinity_level);
35 int (*migrate_info_type)(void); 47 int (*migrate_info_type)(void);
48 enum psci_conduit conduit;
49 enum smccc_version smccc_version;
36}; 50};
37 51
38extern struct psci_operations psci_ops; 52extern struct psci_operations psci_ops;
@@ -46,10 +60,11 @@ static inline int psci_dt_init(void) { return 0; }
46#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI) 60#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
47int __init psci_acpi_init(void); 61int __init psci_acpi_init(void);
48bool __init acpi_psci_present(void); 62bool __init acpi_psci_present(void);
49bool __init acpi_psci_use_hvc(void); 63bool acpi_psci_use_hvc(void);
50#else 64#else
51static inline int psci_acpi_init(void) { return 0; } 65static inline int psci_acpi_init(void) { return 0; }
52static inline bool acpi_psci_present(void) { return false; } 66static inline bool acpi_psci_present(void) { return false; }
67static inline bool acpi_psci_use_hvc(void) {return false; }
53#endif 68#endif
54 69
55#endif /* __LINUX_PSCI_H */ 70#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index d72b2e7dd500..1883d6137e9b 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -45,9 +45,10 @@ struct ptr_ring {
45}; 45};
46 46
47/* Note: callers invoking this in a loop must use a compiler barrier, 47/* Note: callers invoking this in a loop must use a compiler barrier,
48 * for example cpu_relax(). If ring is ever resized, callers must hold 48 * for example cpu_relax().
49 * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold 49 *
50 * producer_lock, the next call to __ptr_ring_produce may fail. 50 * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock:
51 * see e.g. ptr_ring_full.
51 */ 52 */
52static inline bool __ptr_ring_full(struct ptr_ring *r) 53static inline bool __ptr_ring_full(struct ptr_ring *r)
53{ 54{
@@ -113,7 +114,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
113 /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ 114 /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
114 smp_wmb(); 115 smp_wmb();
115 116
116 r->queue[r->producer++] = ptr; 117 WRITE_ONCE(r->queue[r->producer++], ptr);
117 if (unlikely(r->producer >= r->size)) 118 if (unlikely(r->producer >= r->size))
118 r->producer = 0; 119 r->producer = 0;
119 return 0; 120 return 0;
@@ -169,32 +170,36 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
169 return ret; 170 return ret;
170} 171}
171 172
172/* Note: callers invoking this in a loop must use a compiler barrier,
173 * for example cpu_relax(). Callers must take consumer_lock
174 * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
175 * If ring is never resized, and if the pointer is merely
176 * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
177 * However, if called outside the lock, and if some other CPU
178 * consumes ring entries at the same time, the value returned
179 * is not guaranteed to be correct.
180 * In this case - to avoid incorrectly detecting the ring
181 * as empty - the CPU consuming the ring entries is responsible
182 * for either consuming all ring entries until the ring is empty,
183 * or synchronizing with some other CPU and causing it to
184 * execute __ptr_ring_peek and/or consume the ring enteries
185 * after the synchronization point.
186 */
187static inline void *__ptr_ring_peek(struct ptr_ring *r) 173static inline void *__ptr_ring_peek(struct ptr_ring *r)
188{ 174{
189 if (likely(r->size)) 175 if (likely(r->size))
190 return r->queue[r->consumer_head]; 176 return READ_ONCE(r->queue[r->consumer_head]);
191 return NULL; 177 return NULL;
192} 178}
193 179
194/* See __ptr_ring_peek above for locking rules. */ 180/*
181 * Test ring empty status without taking any locks.
182 *
183 * NB: This is only safe to call if ring is never resized.
184 *
185 * However, if some other CPU consumes ring entries at the same time, the value
186 * returned is not guaranteed to be correct.
187 *
188 * In this case - to avoid incorrectly detecting the ring
189 * as empty - the CPU consuming the ring entries is responsible
190 * for either consuming all ring entries until the ring is empty,
191 * or synchronizing with some other CPU and causing it to
192 * re-test __ptr_ring_empty and/or consume the ring enteries
193 * after the synchronization point.
194 *
195 * Note: callers invoking this in a loop must use a compiler barrier,
196 * for example cpu_relax().
197 */
195static inline bool __ptr_ring_empty(struct ptr_ring *r) 198static inline bool __ptr_ring_empty(struct ptr_ring *r)
196{ 199{
197 return !__ptr_ring_peek(r); 200 if (likely(r->size))
201 return !r->queue[READ_ONCE(r->consumer_head)];
202 return true;
198} 203}
199 204
200static inline bool ptr_ring_empty(struct ptr_ring *r) 205static inline bool ptr_ring_empty(struct ptr_ring *r)
@@ -248,22 +253,28 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
248 /* Fundamentally, what we want to do is update consumer 253 /* Fundamentally, what we want to do is update consumer
249 * index and zero out the entry so producer can reuse it. 254 * index and zero out the entry so producer can reuse it.
250 * Doing it naively at each consume would be as simple as: 255 * Doing it naively at each consume would be as simple as:
251 * r->queue[r->consumer++] = NULL; 256 * consumer = r->consumer;
252 * if (unlikely(r->consumer >= r->size)) 257 * r->queue[consumer++] = NULL;
253 * r->consumer = 0; 258 * if (unlikely(consumer >= r->size))
259 * consumer = 0;
260 * r->consumer = consumer;
254 * but that is suboptimal when the ring is full as producer is writing 261 * but that is suboptimal when the ring is full as producer is writing
255 * out new entries in the same cache line. Defer these updates until a 262 * out new entries in the same cache line. Defer these updates until a
256 * batch of entries has been consumed. 263 * batch of entries has been consumed.
257 */ 264 */
258 int head = r->consumer_head++; 265 /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
266 * to work correctly.
267 */
268 int consumer_head = r->consumer_head;
269 int head = consumer_head++;
259 270
260 /* Once we have processed enough entries invalidate them in 271 /* Once we have processed enough entries invalidate them in
261 * the ring all at once so producer can reuse their space in the ring. 272 * the ring all at once so producer can reuse their space in the ring.
262 * We also do this when we reach end of the ring - not mandatory 273 * We also do this when we reach end of the ring - not mandatory
263 * but helps keep the implementation simple. 274 * but helps keep the implementation simple.
264 */ 275 */
265 if (unlikely(r->consumer_head - r->consumer_tail >= r->batch || 276 if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
266 r->consumer_head >= r->size)) { 277 consumer_head >= r->size)) {
267 /* Zero out entries in the reverse order: this way we touch the 278 /* Zero out entries in the reverse order: this way we touch the
268 * cache line that producer might currently be reading the last; 279 * cache line that producer might currently be reading the last;
269 * producer won't make progress and touch other cache lines 280 * producer won't make progress and touch other cache lines
@@ -271,12 +282,14 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
271 */ 282 */
272 while (likely(head >= r->consumer_tail)) 283 while (likely(head >= r->consumer_tail))
273 r->queue[head--] = NULL; 284 r->queue[head--] = NULL;
274 r->consumer_tail = r->consumer_head; 285 r->consumer_tail = consumer_head;
275 } 286 }
276 if (unlikely(r->consumer_head >= r->size)) { 287 if (unlikely(consumer_head >= r->size)) {
277 r->consumer_head = 0; 288 consumer_head = 0;
278 r->consumer_tail = 0; 289 r->consumer_tail = 0;
279 } 290 }
291 /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
292 WRITE_ONCE(r->consumer_head, consumer_head);
280} 293}
281 294
282static inline void *__ptr_ring_consume(struct ptr_ring *r) 295static inline void *__ptr_ring_consume(struct ptr_ring *r)
@@ -527,7 +540,9 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
527 goto done; 540 goto done;
528 } 541 }
529 r->queue[head] = batch[--n]; 542 r->queue[head] = batch[--n];
530 r->consumer_tail = r->consumer_head = head; 543 r->consumer_tail = head;
544 /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
545 WRITE_ONCE(r->consumer_head, head);
531 } 546 }
532 547
533done: 548done:
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 1fd27d68926b..b401b962afff 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -13,6 +13,9 @@
13#ifndef __QCOM_SCM_H 13#ifndef __QCOM_SCM_H
14#define __QCOM_SCM_H 14#define __QCOM_SCM_H
15 15
16#include <linux/types.h>
17#include <linux/cpumask.h>
18
16#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) 19#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
17#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 20#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
18#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 21#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 39e2a2ac2471..2b3b350e07b7 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -32,14 +32,15 @@
32 32
33#ifndef _COMMON_HSI_H 33#ifndef _COMMON_HSI_H
34#define _COMMON_HSI_H 34#define _COMMON_HSI_H
35
35#include <linux/types.h> 36#include <linux/types.h>
36#include <asm/byteorder.h> 37#include <asm/byteorder.h>
37#include <linux/bitops.h> 38#include <linux/bitops.h>
38#include <linux/slab.h> 39#include <linux/slab.h>
39 40
40/* dma_addr_t manip */ 41/* dma_addr_t manip */
41#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) 42#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff))
42#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) 43#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16))
43#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) 44#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
44#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) 45#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
45#define DMA_REGPAIR_LE(x, val) do { \ 46#define DMA_REGPAIR_LE(x, val) do { \
@@ -47,39 +48,45 @@
47 (x).lo = DMA_LO_LE((val)); \ 48 (x).lo = DMA_LO_LE((val)); \
48 } while (0) 49 } while (0)
49 50
50#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) 51#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
51#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) 52#define HILO_64(hi, lo) \
52#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) 53 HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64)
54#define HILO_64_REGPAIR(regpair) ({ \
55 typeof(regpair) __regpair = (regpair); \
56 HILO_64(__regpair.hi, __regpair.lo); })
53#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) 57#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
54 58
55#ifndef __COMMON_HSI__ 59#ifndef __COMMON_HSI__
56#define __COMMON_HSI__ 60#define __COMMON_HSI__
57 61
62/********************************/
63/* PROTOCOL COMMON FW CONSTANTS */
64/********************************/
58 65
59#define X_FINAL_CLEANUP_AGG_INT 1 66#define X_FINAL_CLEANUP_AGG_INT 1
60 67
61#define EVENT_RING_PAGE_SIZE_BYTES 4096 68#define EVENT_RING_PAGE_SIZE_BYTES 4096
62 69
63#define NUM_OF_GLOBAL_QUEUES 128 70#define NUM_OF_GLOBAL_QUEUES 128
64#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 71#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
65 72
66#define ISCSI_CDU_TASK_SEG_TYPE 0 73#define ISCSI_CDU_TASK_SEG_TYPE 0
67#define FCOE_CDU_TASK_SEG_TYPE 0 74#define FCOE_CDU_TASK_SEG_TYPE 0
68#define RDMA_CDU_TASK_SEG_TYPE 1 75#define RDMA_CDU_TASK_SEG_TYPE 1
69 76
70#define FW_ASSERT_GENERAL_ATTN_IDX 32 77#define FW_ASSERT_GENERAL_ATTN_IDX 32
71 78
72#define MAX_PINNED_CCFC 32 79#define MAX_PINNED_CCFC 32
73 80
74/* Queue Zone sizes in bytes */ 81/* Queue Zone sizes in bytes */
75#define TSTORM_QZONE_SIZE 8 82#define TSTORM_QZONE_SIZE 8
76#define MSTORM_QZONE_SIZE 16 83#define MSTORM_QZONE_SIZE 16
77#define USTORM_QZONE_SIZE 8 84#define USTORM_QZONE_SIZE 8
78#define XSTORM_QZONE_SIZE 8 85#define XSTORM_QZONE_SIZE 8
79#define YSTORM_QZONE_SIZE 0 86#define YSTORM_QZONE_SIZE 0
80#define PSTORM_QZONE_SIZE 0 87#define PSTORM_QZONE_SIZE 0
81 88
82#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 89#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
83#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 90#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
84#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 91#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
85#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 92#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
@@ -102,8 +109,8 @@
102#define MAX_NUM_LL2_TX_STATS_COUNTERS 48 109#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
103 110
104#define FW_MAJOR_VERSION 8 111#define FW_MAJOR_VERSION 8
105#define FW_MINOR_VERSION 20 112#define FW_MINOR_VERSION 33
106#define FW_REVISION_VERSION 0 113#define FW_REVISION_VERSION 1
107#define FW_ENGINEERING_VERSION 0 114#define FW_ENGINEERING_VERSION 0
108 115
109/***********************/ 116/***********************/
@@ -115,10 +122,10 @@
115#define MAX_NUM_PORTS_BB (2) 122#define MAX_NUM_PORTS_BB (2)
116#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) 123#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
117 124
118#define MAX_NUM_PFS_K2 (16) 125#define MAX_NUM_PFS_K2 (16)
119#define MAX_NUM_PFS_BB (8) 126#define MAX_NUM_PFS_BB (8)
120#define MAX_NUM_PFS (MAX_NUM_PFS_K2) 127#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
121#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ 128#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
122 129
123#define MAX_NUM_VFS_K2 (192) 130#define MAX_NUM_VFS_K2 (192)
124#define MAX_NUM_VFS_BB (120) 131#define MAX_NUM_VFS_BB (120)
@@ -141,29 +148,14 @@
141/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ 148/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
142#define NUM_PHYS_TCS_4PORT_K2 (4) 149#define NUM_PHYS_TCS_4PORT_K2 (4)
143#define NUM_OF_PHYS_TCS (8) 150#define NUM_OF_PHYS_TCS (8)
144 151#define PURE_LB_TC NUM_OF_PHYS_TCS
145#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) 152#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
146#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) 153#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
147 154
148#define LB_TC (NUM_OF_PHYS_TCS)
149
150/* Num of possible traffic priority values */
151#define NUM_OF_PRIO (8)
152
153#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
154#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
155#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
156#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
157
158/* CIDs */ 155/* CIDs */
159#define NUM_OF_CONNECTION_TYPES (8) 156#define NUM_OF_CONNECTION_TYPES_E4 (8)
160#define NUM_OF_LCIDS (320) 157#define NUM_OF_LCIDS (320)
161#define NUM_OF_LTIDS (320) 158#define NUM_OF_LTIDS (320)
162
163/* Clock values */
164#define MASTER_CLK_FREQ_E4 (375e6)
165#define STORM_CLK_FREQ_E4 (1000e6)
166#define CLK25M_CLK_FREQ_E4 (25e6)
167 159
168/* Global PXP windows (GTT) */ 160/* Global PXP windows (GTT) */
169#define NUM_OF_GTT 19 161#define NUM_OF_GTT 19
@@ -172,17 +164,17 @@
172#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) 164#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
173 165
174/* Tools Version */ 166/* Tools Version */
175#define TOOLS_VERSION 10 167#define TOOLS_VERSION 10
176 168
177/*****************/ 169/*****************/
178/* CDU CONSTANTS */ 170/* CDU CONSTANTS */
179/*****************/ 171/*****************/
180 172
181#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) 173#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
182#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) 174#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
183 175
184#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) 176#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
185#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) 177#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
186 178
187#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) 179#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
188#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) 180#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
@@ -201,45 +193,45 @@
201#define DQ_DEMS_TOE_LOCAL_ADV_WND 4 193#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
202#define DQ_DEMS_ROCE_CQ_CONS 7 194#define DQ_DEMS_ROCE_CQ_CONS 7
203 195
204/* XCM agg val selection */ 196/* XCM agg val selection (HW) */
205#define DQ_XCM_AGG_VAL_SEL_WORD2 0 197#define DQ_XCM_AGG_VAL_SEL_WORD2 0
206#define DQ_XCM_AGG_VAL_SEL_WORD3 1 198#define DQ_XCM_AGG_VAL_SEL_WORD3 1
207#define DQ_XCM_AGG_VAL_SEL_WORD4 2 199#define DQ_XCM_AGG_VAL_SEL_WORD4 2
208#define DQ_XCM_AGG_VAL_SEL_WORD5 3 200#define DQ_XCM_AGG_VAL_SEL_WORD5 3
209#define DQ_XCM_AGG_VAL_SEL_REG3 4 201#define DQ_XCM_AGG_VAL_SEL_REG3 4
210#define DQ_XCM_AGG_VAL_SEL_REG4 5 202#define DQ_XCM_AGG_VAL_SEL_REG4 5
211#define DQ_XCM_AGG_VAL_SEL_REG5 6 203#define DQ_XCM_AGG_VAL_SEL_REG5 6
212#define DQ_XCM_AGG_VAL_SEL_REG6 7 204#define DQ_XCM_AGG_VAL_SEL_REG6 7
213 205
214/* XCM agg val selection */ 206/* XCM agg val selection (FW) */
215#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 207#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
216#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 208#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
217#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 209#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
218#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 210#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2
219#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 211#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
220#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 212#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
221#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 213#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
222#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 214#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
223#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 215#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
224#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 216#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
225#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 217#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
226#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 218#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
227#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 219#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
228#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 220#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
229#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 221#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
230#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 222#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
231#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 223#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
232#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 224#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
233 225
234/* UCM agg val selection (HW) */ 226/* UCM agg val selection (HW) */
235#define DQ_UCM_AGG_VAL_SEL_WORD0 0 227#define DQ_UCM_AGG_VAL_SEL_WORD0 0
236#define DQ_UCM_AGG_VAL_SEL_WORD1 1 228#define DQ_UCM_AGG_VAL_SEL_WORD1 1
237#define DQ_UCM_AGG_VAL_SEL_WORD2 2 229#define DQ_UCM_AGG_VAL_SEL_WORD2 2
238#define DQ_UCM_AGG_VAL_SEL_WORD3 3 230#define DQ_UCM_AGG_VAL_SEL_WORD3 3
239#define DQ_UCM_AGG_VAL_SEL_REG0 4 231#define DQ_UCM_AGG_VAL_SEL_REG0 4
240#define DQ_UCM_AGG_VAL_SEL_REG1 5 232#define DQ_UCM_AGG_VAL_SEL_REG1 5
241#define DQ_UCM_AGG_VAL_SEL_REG2 6 233#define DQ_UCM_AGG_VAL_SEL_REG2 6
242#define DQ_UCM_AGG_VAL_SEL_REG3 7 234#define DQ_UCM_AGG_VAL_SEL_REG3 7
243 235
244/* UCM agg val selection (FW) */ 236/* UCM agg val selection (FW) */
245#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 237#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
@@ -263,7 +255,7 @@
263#define DQ_TCM_ROCE_RQ_PROD_CMD \ 255#define DQ_TCM_ROCE_RQ_PROD_CMD \
264 DQ_TCM_AGG_VAL_SEL_WORD0 256 DQ_TCM_AGG_VAL_SEL_WORD0
265 257
266/* XCM agg counter flag selection */ 258/* XCM agg counter flag selection (HW) */
267#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 259#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
268#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 260#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
269#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 261#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
@@ -273,20 +265,20 @@
273#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 265#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
274#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 266#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
275 267
276/* XCM agg counter flag selection */ 268/* XCM agg counter flag selection (FW) */
277#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) 269#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
278#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) 270#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
279#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) 271#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
280#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) 272#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
281#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) 273#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
282#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) 274#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
283#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) 275#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
284#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) 276#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
285#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) 277#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
286#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) 278#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
287#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) 279#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
288#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) 280#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
289#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) 281#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
290 282
291/* UCM agg counter flag selection (HW) */ 283/* UCM agg counter flag selection (HW) */
292#define DQ_UCM_AGG_FLG_SHIFT_CF0 0 284#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -317,9 +309,9 @@
317#define DQ_TCM_AGG_FLG_SHIFT_CF6 6 309#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
318#define DQ_TCM_AGG_FLG_SHIFT_CF7 7 310#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
319/* TCM agg counter flag selection (FW) */ 311/* TCM agg counter flag selection (FW) */
320#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) 312#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
321#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) 313#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
322#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) 314#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
323#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) 315#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
324#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) 316#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
325#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) 317#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
@@ -327,18 +319,18 @@
327#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) 319#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
328 320
329/* PWM address mapping */ 321/* PWM address mapping */
330#define DQ_PWM_OFFSET_DPM_BASE 0x0 322#define DQ_PWM_OFFSET_DPM_BASE 0x0
331#define DQ_PWM_OFFSET_DPM_END 0x27 323#define DQ_PWM_OFFSET_DPM_END 0x27
332#define DQ_PWM_OFFSET_XCM16_BASE 0x40 324#define DQ_PWM_OFFSET_XCM16_BASE 0x40
333#define DQ_PWM_OFFSET_XCM32_BASE 0x44 325#define DQ_PWM_OFFSET_XCM32_BASE 0x44
334#define DQ_PWM_OFFSET_UCM16_BASE 0x48 326#define DQ_PWM_OFFSET_UCM16_BASE 0x48
335#define DQ_PWM_OFFSET_UCM32_BASE 0x4C 327#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
336#define DQ_PWM_OFFSET_UCM16_4 0x50 328#define DQ_PWM_OFFSET_UCM16_4 0x50
337#define DQ_PWM_OFFSET_TCM16_BASE 0x58 329#define DQ_PWM_OFFSET_TCM16_BASE 0x58
338#define DQ_PWM_OFFSET_TCM32_BASE 0x5C 330#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
339#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 331#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
340#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 332#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
341#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B 333#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
342 334
343#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) 335#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
344#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) 336#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
@@ -347,10 +339,11 @@
347#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) 339#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
348#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) 340#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
349#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) 341#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
350#define DQ_REGION_SHIFT (12) 342
343#define DQ_REGION_SHIFT (12)
351 344
352/* DPM */ 345/* DPM */
353#define DQ_DPM_WQE_BUFF_SIZE (320) 346#define DQ_DPM_WQE_BUFF_SIZE (320)
354 347
355/* Conn type ranges */ 348/* Conn type ranges */
356#define DQ_CONN_TYPE_RANGE_SHIFT (4) 349#define DQ_CONN_TYPE_RANGE_SHIFT (4)
@@ -359,29 +352,30 @@
359/* QM CONSTANTS */ 352/* QM CONSTANTS */
360/*****************/ 353/*****************/
361 354
362/* number of TX queues in the QM */ 355/* Number of TX queues in the QM */
363#define MAX_QM_TX_QUEUES_K2 512 356#define MAX_QM_TX_QUEUES_K2 512
364#define MAX_QM_TX_QUEUES_BB 448 357#define MAX_QM_TX_QUEUES_BB 448
365#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 358#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
366 359
367/* number of Other queues in the QM */ 360/* Number of Other queues in the QM */
368#define MAX_QM_OTHER_QUEUES_BB 64 361#define MAX_QM_OTHER_QUEUES_BB 64
369#define MAX_QM_OTHER_QUEUES_K2 128 362#define MAX_QM_OTHER_QUEUES_K2 128
370#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 363#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
371 364
372/* number of queues in a PF queue group */ 365/* Number of queues in a PF queue group */
373#define QM_PF_QUEUE_GROUP_SIZE 8 366#define QM_PF_QUEUE_GROUP_SIZE 8
374 367
375/* the size of a single queue element in bytes */ 368/* The size of a single queue element in bytes */
376#define QM_PQ_ELEMENT_SIZE 4 369#define QM_PQ_ELEMENT_SIZE 4
377 370
378/* base number of Tx PQs in the CM PQ representation. 371/* Base number of Tx PQs in the CM PQ representation.
379 * should be used when storing PQ IDs in CM PQ registers and context 372 * Should be used when storing PQ IDs in CM PQ registers and context.
380 */ 373 */
381#define CM_TX_PQ_BASE 0x200 374#define CM_TX_PQ_BASE 0x200
382 375
383/* number of global Vport/QCN rate limiters */ 376/* Number of global Vport/QCN rate limiters */
384#define MAX_QM_GLOBAL_RLS 256 377#define MAX_QM_GLOBAL_RLS 256
378
385/* QM registers data */ 379/* QM registers data */
386#define QM_LINE_CRD_REG_WIDTH 16 380#define QM_LINE_CRD_REG_WIDTH 16
387#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) 381#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
@@ -400,7 +394,7 @@
400#define CAU_FSM_ETH_TX 1 394#define CAU_FSM_ETH_TX 1
401 395
402/* Number of Protocol Indices per Status Block */ 396/* Number of Protocol Indices per Status Block */
403#define PIS_PER_SB 12 397#define PIS_PER_SB_E4 12
404 398
405#define CAU_HC_STOPPED_STATE 3 399#define CAU_HC_STOPPED_STATE 3
406#define CAU_HC_DISABLE_STATE 4 400#define CAU_HC_DISABLE_STATE 4
@@ -432,8 +426,7 @@
432 426
433#define IGU_CMD_INT_ACK_BASE 0x0400 427#define IGU_CMD_INT_ACK_BASE 0x0400
434#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ 428#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
435 MAX_TOT_SB_PER_PATH - \ 429 MAX_TOT_SB_PER_PATH - 1)
436 1)
437#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff 430#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
438 431
439#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 432#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
@@ -447,8 +440,7 @@
447 440
448#define IGU_CMD_PROD_UPD_BASE 0x0600 441#define IGU_CMD_PROD_UPD_BASE 0x0600
449#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ 442#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
450 MAX_TOT_SB_PER_PATH - \ 443 MAX_TOT_SB_PER_PATH - 1)
451 1)
452#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff 444#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
453 445
454/*****************/ 446/*****************/
@@ -514,129 +506,126 @@
514 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) 506 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
515 507
516/* PF BAR */ 508/* PF BAR */
517#define PXP_BAR0_START_GRC 0x0000 509#define PXP_BAR0_START_GRC 0x0000
518#define PXP_BAR0_GRC_LENGTH 0x1C00000 510#define PXP_BAR0_GRC_LENGTH 0x1C00000
519#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ 511#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
520 PXP_BAR0_GRC_LENGTH - 1) 512 PXP_BAR0_GRC_LENGTH - 1)
521 513
522#define PXP_BAR0_START_IGU 0x1C00000 514#define PXP_BAR0_START_IGU 0x1C00000
523#define PXP_BAR0_IGU_LENGTH 0x10000 515#define PXP_BAR0_IGU_LENGTH 0x10000
524#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ 516#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
525 PXP_BAR0_IGU_LENGTH - 1) 517 PXP_BAR0_IGU_LENGTH - 1)
526 518
527#define PXP_BAR0_START_TSDM 0x1C80000 519#define PXP_BAR0_START_TSDM 0x1C80000
528#define PXP_BAR0_SDM_LENGTH 0x40000 520#define PXP_BAR0_SDM_LENGTH 0x40000
529#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 521#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
530#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ 522#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
531 PXP_BAR0_SDM_LENGTH - 1) 523 PXP_BAR0_SDM_LENGTH - 1)
532 524
533#define PXP_BAR0_START_MSDM 0x1D00000 525#define PXP_BAR0_START_MSDM 0x1D00000
534#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ 526#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
535 PXP_BAR0_SDM_LENGTH - 1) 527 PXP_BAR0_SDM_LENGTH - 1)
536 528
537#define PXP_BAR0_START_USDM 0x1D80000 529#define PXP_BAR0_START_USDM 0x1D80000
538#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ 530#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
539 PXP_BAR0_SDM_LENGTH - 1) 531 PXP_BAR0_SDM_LENGTH - 1)
540 532
541#define PXP_BAR0_START_XSDM 0x1E00000 533#define PXP_BAR0_START_XSDM 0x1E00000
542#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ 534#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
543 PXP_BAR0_SDM_LENGTH - 1) 535 PXP_BAR0_SDM_LENGTH - 1)
544 536
545#define PXP_BAR0_START_YSDM 0x1E80000 537#define PXP_BAR0_START_YSDM 0x1E80000
546#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ 538#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
547 PXP_BAR0_SDM_LENGTH - 1) 539 PXP_BAR0_SDM_LENGTH - 1)
548 540
549#define PXP_BAR0_START_PSDM 0x1F00000 541#define PXP_BAR0_START_PSDM 0x1F00000
550#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ 542#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
551 PXP_BAR0_SDM_LENGTH - 1) 543 PXP_BAR0_SDM_LENGTH - 1)
552 544
553#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1) 545#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
554 546
555/* VF BAR */ 547/* VF BAR */
556#define PXP_VF_BAR0 0 548#define PXP_VF_BAR0 0
557 549
558#define PXP_VF_BAR0_START_GRC 0x3E00 550#define PXP_VF_BAR0_START_IGU 0
559#define PXP_VF_BAR0_GRC_LENGTH 0x200 551#define PXP_VF_BAR0_IGU_LENGTH 0x3000
560#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ 552#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
561 PXP_VF_BAR0_GRC_LENGTH - 1) 553 PXP_VF_BAR0_IGU_LENGTH - 1)
562 554
563#define PXP_VF_BAR0_START_IGU 0 555#define PXP_VF_BAR0_START_DQ 0x3000
564#define PXP_VF_BAR0_IGU_LENGTH 0x3000 556#define PXP_VF_BAR0_DQ_LENGTH 0x200
565#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ 557#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
566 PXP_VF_BAR0_IGU_LENGTH - 1) 558#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
567 559 PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
568#define PXP_VF_BAR0_START_DQ 0x3000 560#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
569#define PXP_VF_BAR0_DQ_LENGTH 0x200 561 + 4)
570#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 562#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
571#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ 563 PXP_VF_BAR0_DQ_LENGTH - 1)
572 PXP_VF_BAR0_DQ_OPAQUE_OFFSET) 564
573#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ 565#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
574 + 4) 566#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
575#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ 567#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \
576 PXP_VF_BAR0_DQ_LENGTH - 1) 568 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
577 569
578#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 570#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
579#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 571#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \
580#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \ 572 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
581 + \ 573
582 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ 574#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
583 - 1) 575#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \
584 576 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
585#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 577
586#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \ 578#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
587 + \ 579#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \
588 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ 580 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
589 - 1) 581
590 582#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
591#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 583#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \
592#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \ 584 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
593 + \ 585
594 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ 586#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
595 - 1) 587#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \
596 588 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
597#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 589
598#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \ 590#define PXP_VF_BAR0_START_GRC 0x3E00
599 + \ 591#define PXP_VF_BAR0_GRC_LENGTH 0x200
600 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ 592#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
601 - 1) 593 PXP_VF_BAR0_GRC_LENGTH - 1)
602 594
603#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 595#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
604#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \ 596#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
605 + \ 597
606 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ 598#define PXP_VF_BAR0_START_IGU2 0x10000
607 - 1) 599#define PXP_VF_BAR0_IGU2_LENGTH 0xD000
608 600#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \
609#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 601 PXP_VF_BAR0_IGU2_LENGTH - 1)
610#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \ 602
611 + \ 603#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
612 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ 604
613 - 1) 605#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
614 606#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
615#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
616#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
617
618#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
619
620#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
621#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
622 607
623/* ILT Records */ 608/* ILT Records */
624#define PXP_NUM_ILT_RECORDS_BB 7600 609#define PXP_NUM_ILT_RECORDS_BB 7600
625#define PXP_NUM_ILT_RECORDS_K2 11000 610#define PXP_NUM_ILT_RECORDS_K2 11000
626#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) 611#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
627#define PXP_QUEUES_ZONE_MAX_NUM 320 612
613/* Host Interface */
614#define PXP_QUEUES_ZONE_MAX_NUM 320
615
628/*****************/ 616/*****************/
629/* PRM CONSTANTS */ 617/* PRM CONSTANTS */
630/*****************/ 618/*****************/
631#define PRM_DMA_PAD_BYTES_NUM 2 619#define PRM_DMA_PAD_BYTES_NUM 2
620
632/*****************/ 621/*****************/
633/* SDMs CONSTANTS */ 622/* SDMs CONSTANTS */
634/*****************/ 623/*****************/
635 624
636#define SDM_OP_GEN_TRIG_NONE 0 625#define SDM_OP_GEN_TRIG_NONE 0
637#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 626#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
638#define SDM_OP_GEN_TRIG_AGG_INT 2 627#define SDM_OP_GEN_TRIG_AGG_INT 2
639#define SDM_OP_GEN_TRIG_LOADER 4 628#define SDM_OP_GEN_TRIG_LOADER 4
640#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 629#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
641#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 630#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
642 631
@@ -644,26 +633,26 @@
644/* Completion types */ 633/* Completion types */
645/********************/ 634/********************/
646 635
647#define SDM_COMP_TYPE_NONE 0 636#define SDM_COMP_TYPE_NONE 0
648#define SDM_COMP_TYPE_WAKE_THREAD 1 637#define SDM_COMP_TYPE_WAKE_THREAD 1
649#define SDM_COMP_TYPE_AGG_INT 2 638#define SDM_COMP_TYPE_AGG_INT 2
650#define SDM_COMP_TYPE_CM 3 639#define SDM_COMP_TYPE_CM 3
651#define SDM_COMP_TYPE_LOADER 4 640#define SDM_COMP_TYPE_LOADER 4
652#define SDM_COMP_TYPE_PXP 5 641#define SDM_COMP_TYPE_PXP 5
653#define SDM_COMP_TYPE_INDICATE_ERROR 6 642#define SDM_COMP_TYPE_INDICATE_ERROR 6
654#define SDM_COMP_TYPE_RELEASE_THREAD 7 643#define SDM_COMP_TYPE_RELEASE_THREAD 7
655#define SDM_COMP_TYPE_RAM 8 644#define SDM_COMP_TYPE_RAM 8
656#define SDM_COMP_TYPE_INC_ORDER_CNT 9 645#define SDM_COMP_TYPE_INC_ORDER_CNT 9
657 646
658/*****************/ 647/*****************/
659/* PBF Constants */ 648/* PBF CONSTANTS */
660/*****************/ 649/*****************/
661 650
662/* Number of PBF command queue lines. Each line is 32B. */ 651/* Number of PBF command queue lines. Each line is 32B. */
663#define PBF_MAX_CMD_LINES 3328 652#define PBF_MAX_CMD_LINES 3328
664 653
665/* Number of BTB blocks. Each block is 256B. */ 654/* Number of BTB blocks. Each block is 256B. */
666#define BTB_MAX_BLOCKS 1440 655#define BTB_MAX_BLOCKS 1440
667 656
668/*****************/ 657/*****************/
669/* PRS CONSTANTS */ 658/* PRS CONSTANTS */
@@ -671,14 +660,7 @@
671 660
672#define PRS_GFT_CAM_LINES_NO_MATCH 31 661#define PRS_GFT_CAM_LINES_NO_MATCH 31
673 662
674/* Async data KCQ CQE */ 663/* Interrupt coalescing TimeSet */
675struct async_data {
676 __le32 cid;
677 __le16 itid;
678 u8 error_code;
679 u8 fw_debug_param;
680};
681
682struct coalescing_timeset { 664struct coalescing_timeset {
683 u8 value; 665 u8 value;
684#define COALESCING_TIMESET_TIMESET_MASK 0x7F 666#define COALESCING_TIMESET_TIMESET_MASK 0x7F
@@ -692,23 +674,32 @@ struct common_queue_zone {
692 __le16 reserved; 674 __le16 reserved;
693}; 675};
694 676
677/* ETH Rx producers data */
695struct eth_rx_prod_data { 678struct eth_rx_prod_data {
696 __le16 bd_prod; 679 __le16 bd_prod;
697 __le16 cqe_prod; 680 __le16 cqe_prod;
698}; 681};
699 682
700struct regpair { 683struct tcp_ulp_connect_done_params {
701 __le32 lo; 684 __le16 mss;
702 __le32 hi; 685 u8 snd_wnd_scale;
686 u8 flags;
687#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1
688#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0
689#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F
690#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
703}; 691};
704 692
705struct vf_pf_channel_eqe_data { 693struct iscsi_connect_done_results {
706 struct regpair msg_addr; 694 __le16 icid;
695 __le16 conn_id;
696 struct tcp_ulp_connect_done_params params;
707}; 697};
708 698
709struct iscsi_eqe_data { 699struct iscsi_eqe_data {
710 __le32 cid; 700 __le16 icid;
711 __le16 conn_id; 701 __le16 conn_id;
702 __le16 reserved;
712 u8 error_code; 703 u8 error_code;
713 u8 error_pdu_opcode_reserved; 704 u8 error_pdu_opcode_reserved;
714#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F 705#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
@@ -719,52 +710,6 @@ struct iscsi_eqe_data {
719#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 710#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
720}; 711};
721 712
722struct rdma_eqe_destroy_qp {
723 __le32 cid;
724 u8 reserved[4];
725};
726
727union rdma_eqe_data {
728 struct regpair async_handle;
729 struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
730};
731
732struct malicious_vf_eqe_data {
733 u8 vf_id;
734 u8 err_id;
735 __le16 reserved[3];
736};
737
738struct initial_cleanup_eqe_data {
739 u8 vf_id;
740 u8 reserved[7];
741};
742
743/* Event Data Union */
744union event_ring_data {
745 u8 bytes[8];
746 struct vf_pf_channel_eqe_data vf_pf_channel;
747 struct iscsi_eqe_data iscsi_info;
748 union rdma_eqe_data rdma_data;
749 struct malicious_vf_eqe_data malicious_vf;
750 struct initial_cleanup_eqe_data vf_init_cleanup;
751};
752
753/* Event Ring Entry */
754struct event_ring_entry {
755 u8 protocol_id;
756 u8 opcode;
757 __le16 reserved0;
758 __le16 echo;
759 u8 fw_return_code;
760 u8 flags;
761#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
762#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
763#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
764#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
765 union event_ring_data data;
766};
767
768/* Multi function mode */ 713/* Multi function mode */
769enum mf_mode { 714enum mf_mode {
770 ERROR_MODE /* Unsupported mode */, 715 ERROR_MODE /* Unsupported mode */,
@@ -781,13 +726,31 @@ enum protocol_type {
781 PROTOCOLID_CORE, 726 PROTOCOLID_CORE,
782 PROTOCOLID_ETH, 727 PROTOCOLID_ETH,
783 PROTOCOLID_IWARP, 728 PROTOCOLID_IWARP,
784 PROTOCOLID_RESERVED5, 729 PROTOCOLID_RESERVED0,
785 PROTOCOLID_PREROCE, 730 PROTOCOLID_PREROCE,
786 PROTOCOLID_COMMON, 731 PROTOCOLID_COMMON,
787 PROTOCOLID_RESERVED6, 732 PROTOCOLID_RESERVED1,
788 MAX_PROTOCOL_TYPE 733 MAX_PROTOCOL_TYPE
789}; 734};
790 735
736struct regpair {
737 __le32 lo;
738 __le32 hi;
739};
740
741/* RoCE Destroy Event Data */
742struct rdma_eqe_destroy_qp {
743 __le32 cid;
744 u8 reserved[4];
745};
746
747/* RDMA Event Data Union */
748union rdma_eqe_data {
749 struct regpair async_handle;
750 struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
751};
752
753/* Ustorm Queue Zone */
791struct ustorm_eth_queue_zone { 754struct ustorm_eth_queue_zone {
792 struct coalescing_timeset int_coalescing_timeset; 755 struct coalescing_timeset int_coalescing_timeset;
793 u8 reserved[3]; 756 u8 reserved[3];
@@ -798,62 +761,71 @@ struct ustorm_queue_zone {
798 struct common_queue_zone common; 761 struct common_queue_zone common;
799}; 762};
800 763
801/* status block structure */ 764/* Status block structure */
802struct cau_pi_entry { 765struct cau_pi_entry {
803 u32 prod; 766 __le32 prod;
804#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF 767#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
805#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 768#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
806#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F 769#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
807#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 770#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
808#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 771#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
809#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 772#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
810#define CAU_PI_ENTRY_RESERVED_MASK 0xFF 773#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
811#define CAU_PI_ENTRY_RESERVED_SHIFT 24 774#define CAU_PI_ENTRY_RESERVED_SHIFT 24
812}; 775};
813 776
814/* status block structure */ 777/* Status block structure */
815struct cau_sb_entry { 778struct cau_sb_entry {
816 u32 data; 779 __le32 data;
817#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF 780#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
818#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 781#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
819#define CAU_SB_ENTRY_STATE0_MASK 0xF 782#define CAU_SB_ENTRY_STATE0_MASK 0xF
820#define CAU_SB_ENTRY_STATE0_SHIFT 24 783#define CAU_SB_ENTRY_STATE0_SHIFT 24
821#define CAU_SB_ENTRY_STATE1_MASK 0xF 784#define CAU_SB_ENTRY_STATE1_MASK 0xF
822#define CAU_SB_ENTRY_STATE1_SHIFT 28 785#define CAU_SB_ENTRY_STATE1_SHIFT 28
823 u32 params; 786 __le32 params;
824#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F 787#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
825#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 788#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
826#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F 789#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
827#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 790#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
828#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 791#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
829#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 792#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
830#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 793#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
831#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 794#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
832#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF 795#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
833#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 796#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
834#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 797#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
835#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 798#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
836#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF 799#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
837#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 800#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
838#define CAU_SB_ENTRY_TPH_MASK 0x1 801#define CAU_SB_ENTRY_TPH_MASK 0x1
839#define CAU_SB_ENTRY_TPH_SHIFT 31 802#define CAU_SB_ENTRY_TPH_SHIFT 31
840}; 803};
841 804
842/* core doorbell data */ 805/* Igu cleanup bit values to distinguish between clean or producer consumer
806 * update.
807 */
808enum command_type_bit {
809 IGU_COMMAND_TYPE_NOP = 0,
810 IGU_COMMAND_TYPE_SET = 1,
811 MAX_COMMAND_TYPE_BIT
812};
813
814/* Core doorbell data */
843struct core_db_data { 815struct core_db_data {
844 u8 params; 816 u8 params;
845#define CORE_DB_DATA_DEST_MASK 0x3 817#define CORE_DB_DATA_DEST_MASK 0x3
846#define CORE_DB_DATA_DEST_SHIFT 0 818#define CORE_DB_DATA_DEST_SHIFT 0
847#define CORE_DB_DATA_AGG_CMD_MASK 0x3 819#define CORE_DB_DATA_AGG_CMD_MASK 0x3
848#define CORE_DB_DATA_AGG_CMD_SHIFT 2 820#define CORE_DB_DATA_AGG_CMD_SHIFT 2
849#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 821#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
850#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 822#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
851#define CORE_DB_DATA_RESERVED_MASK 0x1 823#define CORE_DB_DATA_RESERVED_MASK 0x1
852#define CORE_DB_DATA_RESERVED_SHIFT 5 824#define CORE_DB_DATA_RESERVED_SHIFT 5
853#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 825#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
854#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 826#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
855 u8 agg_flags; 827 u8 agg_flags;
856 __le16 spq_prod; 828 __le16 spq_prod;
857}; 829};
858 830
859/* Enum of doorbell aggregative command selection */ 831/* Enum of doorbell aggregative command selection */
@@ -909,67 +881,69 @@ struct db_l2_dpm_sge {
909 struct regpair addr; 881 struct regpair addr;
910 __le16 nbytes; 882 __le16 nbytes;
911 __le16 bitfields; 883 __le16 bitfields;
912#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF 884#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
913#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 885#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
914#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 886#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
915#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 887#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
916#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 888#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
917#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 889#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
918#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF 890#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
919#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 891#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
920 __le32 reserved2; 892 __le32 reserved2;
921}; 893};
922 894
923/* Structure for doorbell address, in legacy mode */ 895/* Structure for doorbell address, in legacy mode */
924struct db_legacy_addr { 896struct db_legacy_addr {
925 __le32 addr; 897 __le32 addr;
926#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 898#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
927#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 899#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
928#define DB_LEGACY_ADDR_DEMS_MASK 0x7 900#define DB_LEGACY_ADDR_DEMS_MASK 0x7
929#define DB_LEGACY_ADDR_DEMS_SHIFT 2 901#define DB_LEGACY_ADDR_DEMS_SHIFT 2
930#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF 902#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
931#define DB_LEGACY_ADDR_ICID_SHIFT 5 903#define DB_LEGACY_ADDR_ICID_SHIFT 5
932}; 904};
933 905
934/* Structure for doorbell address, in PWM mode */ 906/* Structure for doorbell address, in PWM mode */
935struct db_pwm_addr { 907struct db_pwm_addr {
936 __le32 addr; 908 __le32 addr;
937#define DB_PWM_ADDR_RESERVED0_MASK 0x7 909#define DB_PWM_ADDR_RESERVED0_MASK 0x7
938#define DB_PWM_ADDR_RESERVED0_SHIFT 0 910#define DB_PWM_ADDR_RESERVED0_SHIFT 0
939#define DB_PWM_ADDR_OFFSET_MASK 0x7F 911#define DB_PWM_ADDR_OFFSET_MASK 0x7F
940#define DB_PWM_ADDR_OFFSET_SHIFT 3 912#define DB_PWM_ADDR_OFFSET_SHIFT 3
941#define DB_PWM_ADDR_WID_MASK 0x3 913#define DB_PWM_ADDR_WID_MASK 0x3
942#define DB_PWM_ADDR_WID_SHIFT 10 914#define DB_PWM_ADDR_WID_SHIFT 10
943#define DB_PWM_ADDR_DPI_MASK 0xFFFF 915#define DB_PWM_ADDR_DPI_MASK 0xFFFF
944#define DB_PWM_ADDR_DPI_SHIFT 12 916#define DB_PWM_ADDR_DPI_SHIFT 12
945#define DB_PWM_ADDR_RESERVED1_MASK 0xF 917#define DB_PWM_ADDR_RESERVED1_MASK 0xF
946#define DB_PWM_ADDR_RESERVED1_SHIFT 28 918#define DB_PWM_ADDR_RESERVED1_SHIFT 28
947}; 919};
948 920
949/* Parameters to RoCE firmware, passed in EDPM doorbell */ 921/* Parameters to RDMA firmware, passed in EDPM doorbell */
950struct db_rdma_dpm_params { 922struct db_rdma_dpm_params {
951 __le32 params; 923 __le32 params;
952#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F 924#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
953#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 925#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
954#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 926#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
955#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 927#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
956#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF 928#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
957#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 929#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
958#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF 930#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
959#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 931#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
960#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 932#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
961#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 933#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
962#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 934#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
963#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 935#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
964#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 936#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
965#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 937#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
966#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 938#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
967#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 939#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
968#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 940#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
969#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 941#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
970}; 942};
971 943
972/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ 944/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
945 * DPM burst.
946 */
973struct db_rdma_dpm_data { 947struct db_rdma_dpm_data {
974 __le16 icid; 948 __le16 icid;
975 __le16 prod_val; 949 __le16 prod_val;
@@ -987,22 +961,22 @@ enum igu_int_cmd {
987 961
988/* IGU producer or consumer update command */ 962/* IGU producer or consumer update command */
989struct igu_prod_cons_update { 963struct igu_prod_cons_update {
990 u32 sb_id_and_flags; 964 __le32 sb_id_and_flags;
991#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF 965#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
992#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 966#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
993#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 967#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
994#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 968#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
995#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 969#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
996#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 970#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
997#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 971#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
998#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 972#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
999#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 973#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
1000#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 974#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
1001#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 975#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
1002#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 976#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
1003#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 977#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
1004#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 978#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
1005 u32 reserved1; 979 __le32 reserved1;
1006}; 980};
1007 981
1008/* Igu segments access for default status block only */ 982/* Igu segments access for default status block only */
@@ -1012,38 +986,63 @@ enum igu_seg_access {
1012 MAX_IGU_SEG_ACCESS 986 MAX_IGU_SEG_ACCESS
1013}; 987};
1014 988
989/* Enumeration for L3 type field of parsing_and_err_flags.
990 * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6
991 * (This field can be filled according to the last-ethertype)
992 */
993enum l3_type {
994 e_l3_type_unknown,
995 e_l3_type_ipv4,
996 e_l3_type_ipv6,
997 MAX_L3_TYPE
998};
999
1000/* Enumeration for l4Protocol field of parsing_and_err_flags.
1001 * L4-protocol: 0 - none, 1 - TCP, 2 - UDP.
1002 * If the packet is IPv4 fragment, and its not the first fragment, the
1003 * protocol-type should be set to none.
1004 */
1005enum l4_protocol {
1006 e_l4_protocol_none,
1007 e_l4_protocol_tcp,
1008 e_l4_protocol_udp,
1009 MAX_L4_PROTOCOL
1010};
1011
1012/* Parsing and error flags field */
1015struct parsing_and_err_flags { 1013struct parsing_and_err_flags {
1016 __le16 flags; 1014 __le16 flags;
1017#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 1015#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
1018#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 1016#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
1019#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 1017#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
1020#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 1018#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
1021#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 1019#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
1022#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 1020#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
1023#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 1021#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
1024#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 1022#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
1025#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 1023#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
1026#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 1024#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
1027#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 1025#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
1028#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 1026#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
1029#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 1027#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
1030#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 1028#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
1031#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 1029#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
1032#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 1030#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
1033#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 1031#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
1034#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 1032#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
1035#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 1033#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
1036#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 1034#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
1037#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 1035#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
1038#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 1036#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
1039#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 1037#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
1040#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 1038#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
1041#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 1039#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
1042#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 1040#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
1043#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 1041#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
1044#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 1042#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
1045}; 1043};
1046 1044
1045/* Parsing error flags bitmap */
1047struct parsing_err_flags { 1046struct parsing_err_flags {
1048 __le16 flags; 1047 __le16 flags;
1049#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 1048#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
@@ -1080,266 +1079,260 @@ struct parsing_err_flags {
1080#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 1079#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
1081}; 1080};
1082 1081
1082/* Pb context */
1083struct pb_context { 1083struct pb_context {
1084 __le32 crc[4]; 1084 __le32 crc[4];
1085}; 1085};
1086 1086
1087/* Concrete Function ID */
1087struct pxp_concrete_fid { 1088struct pxp_concrete_fid {
1088 __le16 fid; 1089 __le16 fid;
1089#define PXP_CONCRETE_FID_PFID_MASK 0xF 1090#define PXP_CONCRETE_FID_PFID_MASK 0xF
1090#define PXP_CONCRETE_FID_PFID_SHIFT 0 1091#define PXP_CONCRETE_FID_PFID_SHIFT 0
1091#define PXP_CONCRETE_FID_PORT_MASK 0x3 1092#define PXP_CONCRETE_FID_PORT_MASK 0x3
1092#define PXP_CONCRETE_FID_PORT_SHIFT 4 1093#define PXP_CONCRETE_FID_PORT_SHIFT 4
1093#define PXP_CONCRETE_FID_PATH_MASK 0x1 1094#define PXP_CONCRETE_FID_PATH_MASK 0x1
1094#define PXP_CONCRETE_FID_PATH_SHIFT 6 1095#define PXP_CONCRETE_FID_PATH_SHIFT 6
1095#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 1096#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
1096#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 1097#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
1097#define PXP_CONCRETE_FID_VFID_MASK 0xFF 1098#define PXP_CONCRETE_FID_VFID_MASK 0xFF
1098#define PXP_CONCRETE_FID_VFID_SHIFT 8 1099#define PXP_CONCRETE_FID_VFID_SHIFT 8
1099}; 1100};
1100 1101
1102/* Concrete Function ID */
1101struct pxp_pretend_concrete_fid { 1103struct pxp_pretend_concrete_fid {
1102 __le16 fid; 1104 __le16 fid;
1103#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF 1105#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
1104#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 1106#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
1105#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 1107#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
1106#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 1108#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
1107#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 1109#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
1108#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 1110#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
1109#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF 1111#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
1110#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 1112#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
1111}; 1113};
1112 1114
1115/* Function ID */
1113union pxp_pretend_fid { 1116union pxp_pretend_fid {
1114 struct pxp_pretend_concrete_fid concrete_fid; 1117 struct pxp_pretend_concrete_fid concrete_fid;
1115 __le16 opaque_fid; 1118 __le16 opaque_fid;
1116}; 1119};
1117 1120
1118/* Pxp Pretend Command Register. */ 1121/* Pxp Pretend Command Register */
1119struct pxp_pretend_cmd { 1122struct pxp_pretend_cmd {
1120 union pxp_pretend_fid fid; 1123 union pxp_pretend_fid fid;
1121 __le16 control; 1124 __le16 control;
1122#define PXP_PRETEND_CMD_PATH_MASK 0x1 1125#define PXP_PRETEND_CMD_PATH_MASK 0x1
1123#define PXP_PRETEND_CMD_PATH_SHIFT 0 1126#define PXP_PRETEND_CMD_PATH_SHIFT 0
1124#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 1127#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
1125#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 1128#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
1126#define PXP_PRETEND_CMD_PORT_MASK 0x3 1129#define PXP_PRETEND_CMD_PORT_MASK 0x3
1127#define PXP_PRETEND_CMD_PORT_SHIFT 2 1130#define PXP_PRETEND_CMD_PORT_SHIFT 2
1128#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF 1131#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
1129#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 1132#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
1130#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF 1133#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
1131#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 1134#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
1132#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 1135#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
1133#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 1136#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
1134#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 1137#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
1135#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 1138#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
1136#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 1139#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
1137#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 1140#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
1138#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 1141#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
1139#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 1142#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
1140}; 1143};
1141 1144
1142/* PTT Record in PXP Admin Window. */ 1145/* PTT Record in PXP Admin Window */
1143struct pxp_ptt_entry { 1146struct pxp_ptt_entry {
1144 __le32 offset; 1147 __le32 offset;
1145#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF 1148#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
1146#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 1149#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
1147#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF 1150#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
1148#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 1151#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
1149 struct pxp_pretend_cmd pretend; 1152 struct pxp_pretend_cmd pretend;
1150}; 1153};
1151 1154
1152/* VF Zone A Permission Register. */ 1155/* VF Zone A Permission Register */
1153struct pxp_vf_zone_a_permission { 1156struct pxp_vf_zone_a_permission {
1154 __le32 control; 1157 __le32 control;
1155#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF 1158#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
1156#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 1159#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
1157#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 1160#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
1158#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 1161#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
1159#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F 1162#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
1160#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 1163#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
1161#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF 1164#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
1162#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 1165#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
1163}; 1166};
1164 1167
1165/* RSS hash type */ 1168/* Rdif context */
1166struct rdif_task_context { 1169struct rdif_task_context {
1167 __le32 initial_ref_tag; 1170 __le32 initial_ref_tag;
1168 __le16 app_tag_value; 1171 __le16 app_tag_value;
1169 __le16 app_tag_mask; 1172 __le16 app_tag_mask;
1170 u8 flags0; 1173 u8 flags0;
1171#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 1174#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
1172#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 1175#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
1173#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 1176#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
1174#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 1177#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
1175#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 1178#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
1176#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 1179#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
1177#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 1180#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
1178#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 1181#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
1179#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 1182#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
1180#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 1183#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
1181#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 1184#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
1182#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 1185#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
1183#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 1186#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
1184#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7 1187#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7
1185 u8 partial_dif_data[7]; 1188 u8 partial_dif_data[7];
1186 __le16 partial_crc_value; 1189 __le16 partial_crc_value;
1187 __le16 partial_checksum_value; 1190 __le16 partial_checksum_value;
1188 __le32 offset_in_io; 1191 __le32 offset_in_io;
1189 __le16 flags1; 1192 __le16 flags1;
1190#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 1193#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
1191#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 1194#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
1192#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 1195#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
1193#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 1196#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
1194#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 1197#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
1195#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 1198#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
1196#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 1199#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
1197#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 1200#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
1198#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 1201#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
1199#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 1202#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
1200#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 1203#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
1201#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 1204#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
1202#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 1205#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
1203#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 1206#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
1204#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 1207#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
1205#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 1208#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
1206#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 1209#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
1207#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 1210#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
1208#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 1211#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
1209#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 1212#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
1210#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 1213#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
1211#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 1214#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
1212#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 1215#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
1213#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14 1216#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
1214#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 1217#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
1215#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15 1218#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
1216 __le16 state; 1219 __le16 state;
1217#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF 1220#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF
1218#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0 1221#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0
1219#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF 1222#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF
1220#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4 1223#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
1221#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1 1224#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1
1222#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8 1225#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8
1223#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 1226#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1
1224#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 1227#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9
1225#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF 1228#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
1226#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10 1229#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10
1227#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 1230#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
1228#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 1231#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
1229 __le32 reserved2; 1232 __le32 reserved2;
1230}; 1233};
1231 1234
1232/* RSS hash type */ 1235/* Status block structure */
1233enum rss_hash_type { 1236struct status_block_e4 {
1234 RSS_HASH_TYPE_DEFAULT = 0, 1237 __le16 pi_array[PIS_PER_SB_E4];
1235 RSS_HASH_TYPE_IPV4 = 1,
1236 RSS_HASH_TYPE_TCP_IPV4 = 2,
1237 RSS_HASH_TYPE_IPV6 = 3,
1238 RSS_HASH_TYPE_TCP_IPV6 = 4,
1239 RSS_HASH_TYPE_UDP_IPV4 = 5,
1240 RSS_HASH_TYPE_UDP_IPV6 = 6,
1241 MAX_RSS_HASH_TYPE
1242};
1243
1244/* status block structure */
1245struct status_block {
1246 __le16 pi_array[PIS_PER_SB];
1247 __le32 sb_num; 1238 __le32 sb_num;
1248#define STATUS_BLOCK_SB_NUM_MASK 0x1FF 1239#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
1249#define STATUS_BLOCK_SB_NUM_SHIFT 0 1240#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
1250#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F 1241#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
1251#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 1242#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
1252#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF 1243#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
1253#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 1244#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
1254 __le32 prod_index; 1245 __le32 prod_index;
1255#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF 1246#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
1256#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 1247#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
1257#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF 1248#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
1258#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 1249#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
1259}; 1250};
1260 1251
1252/* Tdif context */
1261struct tdif_task_context { 1253struct tdif_task_context {
1262 __le32 initial_ref_tag; 1254 __le32 initial_ref_tag;
1263 __le16 app_tag_value; 1255 __le16 app_tag_value;
1264 __le16 app_tag_mask; 1256 __le16 app_tag_mask;
1265 __le16 partial_crc_valueB; 1257 __le16 partial_crc_value_b;
1266 __le16 partial_checksum_valueB; 1258 __le16 partial_checksum_value_b;
1267 __le16 stateB; 1259 __le16 stateB;
1268#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF 1260#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF
1269#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0 1261#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0
1270#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF 1262#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF
1271#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4 1263#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
1272#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1 1264#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1
1273#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8 1265#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8
1274#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 1266#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1
1275#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 1267#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9
1276#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F 1268#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
1277#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 1269#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
1278 u8 reserved1; 1270 u8 reserved1;
1279 u8 flags0; 1271 u8 flags0;
1280#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 1272#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
1281#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 1273#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
1282#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 1274#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
1283#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 1275#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
1284#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 1276#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
1285#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 1277#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
1286#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 1278#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
1287#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 1279#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
1288#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 1280#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
1289#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 1281#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
1290#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 1282#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
1291#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 1283#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
1292#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 1284#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
1293#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 1285#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
1294 __le32 flags1; 1286 __le32 flags1;
1295#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 1287#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
1296#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 1288#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
1297#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 1289#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
1298#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 1290#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
1299#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 1291#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
1300#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 1292#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
1301#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 1293#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
1302#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 1294#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
1303#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 1295#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
1304#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 1296#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
1305#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 1297#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
1306#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 1298#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
1307#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 1299#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
1308#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 1300#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
1309#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 1301#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
1310#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 1302#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
1311#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 1303#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
1312#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 1304#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
1313#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 1305#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
1314#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 1306#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
1315#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 1307#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
1316#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 1308#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
1317#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF 1309#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF
1318#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14 1310#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14
1319#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF 1311#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF
1320#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18 1312#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
1321#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1 1313#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1
1322#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22 1314#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22
1323#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1 1315#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1
1324#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23 1316#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23
1325#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF 1317#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
1326#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24 1318#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24
1327#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 1319#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
1328#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28 1320#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28
1329#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 1321#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
1330#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29 1322#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29
1331#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 1323#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
1332#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30 1324#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30
1333#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 1325#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
1334#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 1326#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
1335 __le32 offset_in_iob; 1327 __le32 offset_in_io_b;
1336 __le16 partial_crc_value_a; 1328 __le16 partial_crc_value_a;
1337 __le16 partial_checksum_valuea_; 1329 __le16 partial_checksum_value_a;
1338 __le32 offset_in_ioa; 1330 __le32 offset_in_io_a;
1339 u8 partial_dif_data_a[8]; 1331 u8 partial_dif_data_a[8];
1340 u8 partial_dif_data_b[8]; 1332 u8 partial_dif_data_b[8];
1341}; 1333};
1342 1334
1335/* Timers context */
1343struct timers_context { 1336struct timers_context {
1344 __le32 logical_client_0; 1337 __le32 logical_client_0;
1345#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF 1338#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
@@ -1385,6 +1378,7 @@ struct timers_context {
1385#define TIMERS_CONTEXT_RESERVED7_SHIFT 29 1378#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
1386}; 1379};
1387 1380
1381/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */
1388enum tunnel_next_protocol { 1382enum tunnel_next_protocol {
1389 e_unknown = 0, 1383 e_unknown = 0,
1390 e_l2 = 1, 1384 e_l2 = 1,
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index cb06e6e368e1..9db02856623b 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -36,150 +36,168 @@
36/********************/ 36/********************/
37/* ETH FW CONSTANTS */ 37/* ETH FW CONSTANTS */
38/********************/ 38/********************/
39#define ETH_HSI_VER_MAJOR 3 39
40#define ETH_HSI_VER_MINOR 10 40#define ETH_HSI_VER_MAJOR 3
41#define ETH_HSI_VER_MINOR 10
41 42
42#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 43#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
43 44
44#define ETH_CACHE_LINE_SIZE 64 45#define ETH_CACHE_LINE_SIZE 64
45#define ETH_RX_CQE_GAP 32 46#define ETH_RX_CQE_GAP 32
46#define ETH_MAX_RAMROD_PER_CON 8 47#define ETH_MAX_RAMROD_PER_CON 8
47#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 48#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
48#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 49#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
49#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 50#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
50#define ETH_RX_NUM_NEXT_PAGE_BDS 2 51#define ETH_RX_NUM_NEXT_PAGE_BDS 2
51 52
52#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 53#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
53#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 54#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
54 55
55#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 56#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
56#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 57#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
57#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 58#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
58#define ETH_TX_MAX_LSO_HDR_NBD 4 59#define ETH_TX_MAX_LSO_HDR_NBD 4
59#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 60#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
60#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 61#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
61#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 62#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
62#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 63#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
63#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) 64#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
64#define ETH_TX_MAX_LSO_HDR_BYTES 510 65#define ETH_TX_MAX_LSO_HDR_BYTES 510
65#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) 66#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
66#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 67#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
67#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 68#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
68#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 69#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
69#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF 70#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
70 71
71#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS 72#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
72#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ 73#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
73 (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) 74 (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
74#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ 75#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
75 (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) 76 (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
76 77
77/* Maximum number of buffers, used for RX packet placement */ 78/* Maximum number of buffers, used for RX packet placement */
78#define ETH_RX_MAX_BUFF_PER_PKT 5 79#define ETH_RX_MAX_BUFF_PER_PKT 5
79#define ETH_RX_BD_THRESHOLD 12 80#define ETH_RX_BD_THRESHOLD 12
80 81
81/* num of MAC/VLAN filters */ 82/* Num of MAC/VLAN filters */
82#define ETH_NUM_MAC_FILTERS 512 83#define ETH_NUM_MAC_FILTERS 512
83#define ETH_NUM_VLAN_FILTERS 512 84#define ETH_NUM_VLAN_FILTERS 512
84 85
85/* approx. multicast constants */ 86/* Approx. multicast constants */
86#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 87#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
87#define ETH_MULTICAST_MAC_BINS 256 88#define ETH_MULTICAST_MAC_BINS 256
88#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) 89#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
89 90
90/* ethernet vport update constants */ 91/* Ethernet vport update constants */
91#define ETH_FILTER_RULES_COUNT 10 92#define ETH_FILTER_RULES_COUNT 10
92#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 93#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
93#define ETH_RSS_KEY_SIZE_REGS 10 94#define ETH_RSS_KEY_SIZE_REGS 10
94#define ETH_RSS_ENGINE_NUM_K2 207 95#define ETH_RSS_ENGINE_NUM_K2 207
95#define ETH_RSS_ENGINE_NUM_BB 127 96#define ETH_RSS_ENGINE_NUM_BB 127
96 97
97/* TPA constants */ 98/* TPA constants */
98#define ETH_TPA_MAX_AGGS_NUM 64 99#define ETH_TPA_MAX_AGGS_NUM 64
99#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT 100#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
100#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 101#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
101#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 102#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
102 103
103/* Control frame check constants */ 104/* Control frame check constants */
104#define ETH_CTL_FRAME_ETH_TYPE_NUM 4 105#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
105 106
107/* GFS constants */
108#define ETH_GFT_TRASH_CAN_VPORT 0x1FF
109
110/* Destination port mode */
111enum dest_port_mode {
112 DEST_PORT_PHY,
113 DEST_PORT_LOOPBACK,
114 DEST_PORT_PHY_LOOPBACK,
115 DEST_PORT_DROP,
116 MAX_DEST_PORT_MODE
117};
118
119/* Ethernet address type */
120enum eth_addr_type {
121 BROADCAST_ADDRESS,
122 MULTICAST_ADDRESS,
123 UNICAST_ADDRESS,
124 UNKNOWN_ADDRESS,
125 MAX_ETH_ADDR_TYPE
126};
127
106struct eth_tx_1st_bd_flags { 128struct eth_tx_1st_bd_flags {
107 u8 bitfields; 129 u8 bitfields;
108#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 130#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
109#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 131#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
110#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 132#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
111#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 133#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
112#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 134#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
113#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 135#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
114#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 136#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
115#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 137#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
116#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 138#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
117#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 139#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
118#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 140#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
119#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 141#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
120#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 142#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
121#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 143#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
122#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 144#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
123#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 145#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
124}; 146};
125 147
126/* The parsing information data fo rthe first tx bd of a given packet. */ 148/* The parsing information data fo rthe first tx bd of a given packet */
127struct eth_tx_data_1st_bd { 149struct eth_tx_data_1st_bd {
128 __le16 vlan; 150 __le16 vlan;
129 u8 nbds; 151 u8 nbds;
130 struct eth_tx_1st_bd_flags bd_flags; 152 struct eth_tx_1st_bd_flags bd_flags;
131 __le16 bitfields; 153 __le16 bitfields;
132#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 154#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
133#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 155#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
134#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 156#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
135#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 157#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
136#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF 158#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
137#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 159#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
138}; 160};
139 161
140/* The parsing information data for the second tx bd of a given packet. */ 162/* The parsing information data for the second tx bd of a given packet */
141struct eth_tx_data_2nd_bd { 163struct eth_tx_data_2nd_bd {
142 __le16 tunn_ip_size; 164 __le16 tunn_ip_size;
143 __le16 bitfields1; 165 __le16 bitfields1;
144#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF 166#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
145#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 167#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
146#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 168#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
147#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 169#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
148#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 170#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
149#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 171#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
150#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 172#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
151#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 173#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
152#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 174#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
153#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 175#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
154#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 176#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
155#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 177#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
156#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 178#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
157#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 179#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
158#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 180#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
159#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 181#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
160#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 182#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
161#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 183#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
162#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 184#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
163#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 185#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
164 __le16 bitfields2; 186 __le16 bitfields2;
165#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF 187#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
166#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 188#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
167#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 189#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
168#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 190#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
169}; 191};
170 192
171/* Firmware data for L2-EDPM packet. */ 193/* Firmware data for L2-EDPM packet */
172struct eth_edpm_fw_data { 194struct eth_edpm_fw_data {
173 struct eth_tx_data_1st_bd data_1st_bd; 195 struct eth_tx_data_1st_bd data_1st_bd;
174 struct eth_tx_data_2nd_bd data_2nd_bd; 196 struct eth_tx_data_2nd_bd data_2nd_bd;
175 __le32 reserved; 197 __le32 reserved;
176}; 198};
177 199
178struct eth_fast_path_cqe_fw_debug { 200/* Tunneling parsing flags */
179 __le16 reserved2;
180};
181
182/* tunneling parsing flags */
183struct eth_tunnel_parsing_flags { 201struct eth_tunnel_parsing_flags {
184 u8 flags; 202 u8 flags;
185#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 203#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
@@ -199,24 +217,24 @@ struct eth_tunnel_parsing_flags {
199/* PMD flow control bits */ 217/* PMD flow control bits */
200struct eth_pmd_flow_flags { 218struct eth_pmd_flow_flags {
201 u8 flags; 219 u8 flags;
202#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 220#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
203#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 221#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
204#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 222#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
205#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 223#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
206#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F 224#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
207#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 225#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
208}; 226};
209 227
210/* Regular ETH Rx FP CQE. */ 228/* Regular ETH Rx FP CQE */
211struct eth_fast_path_rx_reg_cqe { 229struct eth_fast_path_rx_reg_cqe {
212 u8 type; 230 u8 type;
213 u8 bitfields; 231 u8 bitfields;
214#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 232#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
215#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 233#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
216#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF 234#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
217#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 235#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
218#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 236#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
219#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 237#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
220 __le16 pkt_len; 238 __le16 pkt_len;
221 struct parsing_and_err_flags pars_flags; 239 struct parsing_and_err_flags pars_flags;
222 __le16 vlan_tag; 240 __le16 vlan_tag;
@@ -225,13 +243,13 @@ struct eth_fast_path_rx_reg_cqe {
225 u8 placement_offset; 243 u8 placement_offset;
226 struct eth_tunnel_parsing_flags tunnel_pars_flags; 244 struct eth_tunnel_parsing_flags tunnel_pars_flags;
227 u8 bd_num; 245 u8 bd_num;
228 u8 reserved[9]; 246 u8 reserved;
229 struct eth_fast_path_cqe_fw_debug fw_debug; 247 __le16 flow_id;
230 u8 reserved1[3]; 248 u8 reserved1[11];
231 struct eth_pmd_flow_flags pmd_flags; 249 struct eth_pmd_flow_flags pmd_flags;
232}; 250};
233 251
234/* TPA-continue ETH Rx FP CQE. */ 252/* TPA-continue ETH Rx FP CQE */
235struct eth_fast_path_rx_tpa_cont_cqe { 253struct eth_fast_path_rx_tpa_cont_cqe {
236 u8 type; 254 u8 type;
237 u8 tpa_agg_index; 255 u8 tpa_agg_index;
@@ -243,7 +261,7 @@ struct eth_fast_path_rx_tpa_cont_cqe {
243 struct eth_pmd_flow_flags pmd_flags; 261 struct eth_pmd_flow_flags pmd_flags;
244}; 262};
245 263
246/* TPA-end ETH Rx FP CQE. */ 264/* TPA-end ETH Rx FP CQE */
247struct eth_fast_path_rx_tpa_end_cqe { 265struct eth_fast_path_rx_tpa_end_cqe {
248 u8 type; 266 u8 type;
249 u8 tpa_agg_index; 267 u8 tpa_agg_index;
@@ -259,16 +277,16 @@ struct eth_fast_path_rx_tpa_end_cqe {
259 struct eth_pmd_flow_flags pmd_flags; 277 struct eth_pmd_flow_flags pmd_flags;
260}; 278};
261 279
262/* TPA-start ETH Rx FP CQE. */ 280/* TPA-start ETH Rx FP CQE */
263struct eth_fast_path_rx_tpa_start_cqe { 281struct eth_fast_path_rx_tpa_start_cqe {
264 u8 type; 282 u8 type;
265 u8 bitfields; 283 u8 bitfields;
266#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 284#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
267#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 285#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
268#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF 286#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
269#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 287#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
270#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 288#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
271#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 289#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
272 __le16 seg_len; 290 __le16 seg_len;
273 struct parsing_and_err_flags pars_flags; 291 struct parsing_and_err_flags pars_flags;
274 __le16 vlan_tag; 292 __le16 vlan_tag;
@@ -279,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe {
279 u8 tpa_agg_index; 297 u8 tpa_agg_index;
280 u8 header_len; 298 u8 header_len;
281 __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; 299 __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
282 struct eth_fast_path_cqe_fw_debug fw_debug; 300 __le16 flow_id;
283 u8 reserved; 301 u8 reserved;
284 struct eth_pmd_flow_flags pmd_flags; 302 struct eth_pmd_flow_flags pmd_flags;
285}; 303};
@@ -295,24 +313,24 @@ struct eth_rx_bd {
295 struct regpair addr; 313 struct regpair addr;
296}; 314};
297 315
298/* regular ETH Rx SP CQE */ 316/* Regular ETH Rx SP CQE */
299struct eth_slow_path_rx_cqe { 317struct eth_slow_path_rx_cqe {
300 u8 type; 318 u8 type;
301 u8 ramrod_cmd_id; 319 u8 ramrod_cmd_id;
302 u8 error_flag; 320 u8 error_flag;
303 u8 reserved[25]; 321 u8 reserved[25];
304 __le16 echo; 322 __le16 echo;
305 u8 reserved1; 323 u8 reserved1;
306 struct eth_pmd_flow_flags pmd_flags; 324 struct eth_pmd_flow_flags pmd_flags;
307}; 325};
308 326
309/* union for all ETH Rx CQE types */ 327/* Union for all ETH Rx CQE types */
310union eth_rx_cqe { 328union eth_rx_cqe {
311 struct eth_fast_path_rx_reg_cqe fast_path_regular; 329 struct eth_fast_path_rx_reg_cqe fast_path_regular;
312 struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; 330 struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
313 struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; 331 struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
314 struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; 332 struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
315 struct eth_slow_path_rx_cqe slow_path; 333 struct eth_slow_path_rx_cqe slow_path;
316}; 334};
317 335
318/* ETH Rx CQE type */ 336/* ETH Rx CQE type */
@@ -339,7 +357,7 @@ enum eth_rx_tunn_type {
339 MAX_ETH_RX_TUNN_TYPE 357 MAX_ETH_RX_TUNN_TYPE
340}; 358};
341 359
342/* Aggregation end reason. */ 360/* Aggregation end reason. */
343enum eth_tpa_end_reason { 361enum eth_tpa_end_reason {
344 ETH_AGG_END_UNUSED, 362 ETH_AGG_END_UNUSED,
345 ETH_AGG_END_SP_UPDATE, 363 ETH_AGG_END_SP_UPDATE,
@@ -354,59 +372,59 @@ enum eth_tpa_end_reason {
354 372
355/* The first tx bd of a given packet */ 373/* The first tx bd of a given packet */
356struct eth_tx_1st_bd { 374struct eth_tx_1st_bd {
357 struct regpair addr; 375 struct regpair addr;
358 __le16 nbytes; 376 __le16 nbytes;
359 struct eth_tx_data_1st_bd data; 377 struct eth_tx_data_1st_bd data;
360}; 378};
361 379
362/* The second tx bd of a given packet */ 380/* The second tx bd of a given packet */
363struct eth_tx_2nd_bd { 381struct eth_tx_2nd_bd {
364 struct regpair addr; 382 struct regpair addr;
365 __le16 nbytes; 383 __le16 nbytes;
366 struct eth_tx_data_2nd_bd data; 384 struct eth_tx_data_2nd_bd data;
367}; 385};
368 386
369/* The parsing information data for the third tx bd of a given packet. */ 387/* The parsing information data for the third tx bd of a given packet */
370struct eth_tx_data_3rd_bd { 388struct eth_tx_data_3rd_bd {
371 __le16 lso_mss; 389 __le16 lso_mss;
372 __le16 bitfields; 390 __le16 bitfields;
373#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF 391#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
374#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 392#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
375#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF 393#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
376#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 394#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
377#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 395#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
378#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 396#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
379#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F 397#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
380#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 398#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
381 u8 tunn_l4_hdr_start_offset_w; 399 u8 tunn_l4_hdr_start_offset_w;
382 u8 tunn_hdr_size_w; 400 u8 tunn_hdr_size_w;
383}; 401};
384 402
385/* The third tx bd of a given packet */ 403/* The third tx bd of a given packet */
386struct eth_tx_3rd_bd { 404struct eth_tx_3rd_bd {
387 struct regpair addr; 405 struct regpair addr;
388 __le16 nbytes; 406 __le16 nbytes;
389 struct eth_tx_data_3rd_bd data; 407 struct eth_tx_data_3rd_bd data;
390}; 408};
391 409
392/* Complementary information for the regular tx bd of a given packet. */ 410/* Complementary information for the regular tx bd of a given packet */
393struct eth_tx_data_bd { 411struct eth_tx_data_bd {
394 __le16 reserved0; 412 __le16 reserved0;
395 __le16 bitfields; 413 __le16 bitfields;
396#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF 414#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
397#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 415#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
398#define ETH_TX_DATA_BD_START_BD_MASK 0x1 416#define ETH_TX_DATA_BD_START_BD_MASK 0x1
399#define ETH_TX_DATA_BD_START_BD_SHIFT 8 417#define ETH_TX_DATA_BD_START_BD_SHIFT 8
400#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F 418#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
401#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 419#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
402 __le16 reserved3; 420 __le16 reserved3;
403}; 421};
404 422
405/* The common non-special TX BD ring element */ 423/* The common non-special TX BD ring element */
406struct eth_tx_bd { 424struct eth_tx_bd {
407 struct regpair addr; 425 struct regpair addr;
408 __le16 nbytes; 426 __le16 nbytes;
409 struct eth_tx_data_bd data; 427 struct eth_tx_data_bd data;
410}; 428};
411 429
412union eth_tx_bd_types { 430union eth_tx_bd_types {
@@ -434,18 +452,30 @@ struct xstorm_eth_queue_zone {
434/* ETH doorbell data */ 452/* ETH doorbell data */
435struct eth_db_data { 453struct eth_db_data {
436 u8 params; 454 u8 params;
437#define ETH_DB_DATA_DEST_MASK 0x3 455#define ETH_DB_DATA_DEST_MASK 0x3
438#define ETH_DB_DATA_DEST_SHIFT 0 456#define ETH_DB_DATA_DEST_SHIFT 0
439#define ETH_DB_DATA_AGG_CMD_MASK 0x3 457#define ETH_DB_DATA_AGG_CMD_MASK 0x3
440#define ETH_DB_DATA_AGG_CMD_SHIFT 2 458#define ETH_DB_DATA_AGG_CMD_SHIFT 2
441#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 459#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
442#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 460#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
443#define ETH_DB_DATA_RESERVED_MASK 0x1 461#define ETH_DB_DATA_RESERVED_MASK 0x1
444#define ETH_DB_DATA_RESERVED_SHIFT 5 462#define ETH_DB_DATA_RESERVED_SHIFT 5
445#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 463#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
446#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 464#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
447 u8 agg_flags; 465 u8 agg_flags;
448 __le16 bd_prod; 466 __le16 bd_prod;
449}; 467};
450 468
469/* RSS hash type */
470enum rss_hash_type {
471 RSS_HASH_TYPE_DEFAULT = 0,
472 RSS_HASH_TYPE_IPV4 = 1,
473 RSS_HASH_TYPE_TCP_IPV4 = 2,
474 RSS_HASH_TYPE_IPV6 = 3,
475 RSS_HASH_TYPE_TCP_IPV6 = 4,
476 RSS_HASH_TYPE_UDP_IPV4 = 5,
477 RSS_HASH_TYPE_UDP_IPV6 = 6,
478 MAX_RSS_HASH_TYPE
479};
480
451#endif /* __ETH_COMMON__ */ 481#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 12fc9e788eea..22077c586853 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -8,217 +8,78 @@
8 8
9#ifndef __FCOE_COMMON__ 9#ifndef __FCOE_COMMON__
10#define __FCOE_COMMON__ 10#define __FCOE_COMMON__
11
11/*********************/ 12/*********************/
12/* FCOE FW CONSTANTS */ 13/* FCOE FW CONSTANTS */
13/*********************/ 14/*********************/
14 15
15#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 16#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
16 17
17struct fcoe_abts_pkt { 18/* The fcoe storm task context protection-information of Ystorm */
18 __le32 abts_rsp_fc_payload_lo; 19struct protection_info_ctx {
19 __le16 abts_rsp_rx_id; 20 __le16 flags;
20 u8 abts_rsp_rctl; 21#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
21 u8 reserved2; 22#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
22}; 23#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
23 24#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
24/* FCoE additional WQE (Sq/XferQ) information */ 25#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
25union fcoe_additional_info_union { 26#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
26 __le32 previous_tid; 27#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
27 __le32 parent_tid; 28#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
28 __le32 burst_length; 29#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
29 __le32 seq_rec_updated_offset; 30#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
30}; 31#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
31 32#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
32struct fcoe_exp_ro { 33 u8 dix_block_size;
33 __le32 data_offset; 34 u8 dst_size;
34 __le32 reserved;
35};
36
37union fcoe_cleanup_addr_exp_ro_union {
38 struct regpair abts_rsp_fc_payload_hi;
39 struct fcoe_exp_ro exp_ro;
40};
41
42/* FCoE Ramrod Command IDs */
43enum fcoe_completion_status {
44 FCOE_COMPLETION_STATUS_SUCCESS,
45 FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
46 FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
47 MAX_FCOE_COMPLETION_STATUS
48};
49
50struct fc_addr_nw {
51 u8 addr_lo;
52 u8 addr_mid;
53 u8 addr_hi;
54};
55
56/* FCoE connection offload */
57struct fcoe_conn_offload_ramrod_data {
58 struct regpair sq_pbl_addr;
59 struct regpair sq_curr_page_addr;
60 struct regpair sq_next_page_addr;
61 struct regpair xferq_pbl_addr;
62 struct regpair xferq_curr_page_addr;
63 struct regpair xferq_next_page_addr;
64 struct regpair respq_pbl_addr;
65 struct regpair respq_curr_page_addr;
66 struct regpair respq_next_page_addr;
67 __le16 dst_mac_addr_lo;
68 __le16 dst_mac_addr_mid;
69 __le16 dst_mac_addr_hi;
70 __le16 src_mac_addr_lo;
71 __le16 src_mac_addr_mid;
72 __le16 src_mac_addr_hi;
73 __le16 tx_max_fc_pay_len;
74 __le16 e_d_tov_timer_val;
75 __le16 rx_max_fc_pay_len;
76 __le16 vlan_tag;
77#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
78#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
79#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
80#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
81#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
82#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
83 __le16 physical_q0;
84 __le16 rec_rr_tov_timer_val;
85 struct fc_addr_nw s_id;
86 u8 max_conc_seqs_c3;
87 struct fc_addr_nw d_id;
88 u8 flags;
89#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
90#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
91#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
92#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
93#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
94#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
95#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
96#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
97#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
98#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
99#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
100#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
101 __le16 conn_id;
102 u8 def_q_idx;
103 u8 reserved[5];
104};
105
106/* FCoE terminate connection request */
107struct fcoe_conn_terminate_ramrod_data {
108 struct regpair terminate_params_addr;
109};
110
111struct fcoe_slow_sgl_ctx {
112 struct regpair base_sgl_addr;
113 __le16 curr_sge_off;
114 __le16 remainder_num_sges;
115 __le16 curr_sgl_index;
116 __le16 reserved;
117};
118
119union fcoe_dix_desc_ctx {
120 struct fcoe_slow_sgl_ctx dix_sgl;
121 struct scsi_sge cached_dix_sge;
122}; 35};
123 36
124struct fcoe_fast_sgl_ctx { 37/* The fcoe storm task context protection-information of Ystorm */
125 struct regpair sgl_start_addr; 38union protection_info_union_ctx {
126 __le32 sgl_byte_offset; 39 struct protection_info_ctx info;
127 __le16 task_reuse_cnt; 40 __le32 value;
128 __le16 init_offset_in_first_sge;
129}; 41};
130 42
43/* FCP CMD payload */
131struct fcoe_fcp_cmd_payload { 44struct fcoe_fcp_cmd_payload {
132 __le32 opaque[8]; 45 __le32 opaque[8];
133}; 46};
134 47
48/* FCP RSP payload */
135struct fcoe_fcp_rsp_payload { 49struct fcoe_fcp_rsp_payload {
136 __le32 opaque[6]; 50 __le32 opaque[6];
137}; 51};
138 52
139struct fcoe_fcp_xfer_payload { 53/* FCP RSP payload */
140 __le32 opaque[3];
141};
142
143/* FCoE firmware function init */
144struct fcoe_init_func_ramrod_data {
145 struct scsi_init_func_params func_params;
146 struct scsi_init_func_queues q_params;
147 __le16 mtu;
148 __le16 sq_num_pages_in_pbl;
149 __le32 reserved;
150};
151
152/* FCoE: Mode of the connection: Target or Initiator or both */
153enum fcoe_mode_type {
154 FCOE_INITIATOR_MODE = 0x0,
155 FCOE_TARGET_MODE = 0x1,
156 FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
157 MAX_FCOE_MODE_TYPE
158};
159
160struct fcoe_rx_stat {
161 struct regpair fcoe_rx_byte_cnt;
162 struct regpair fcoe_rx_data_pkt_cnt;
163 struct regpair fcoe_rx_xfer_pkt_cnt;
164 struct regpair fcoe_rx_other_pkt_cnt;
165 __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
166 __le32 fcoe_silent_drop_pkt_rq_full_cnt;
167 __le32 fcoe_silent_drop_pkt_crc_error_cnt;
168 __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
169 __le32 fcoe_silent_drop_total_pkt_cnt;
170 __le32 rsrv;
171};
172
173struct fcoe_stat_ramrod_data {
174 struct regpair stat_params_addr;
175};
176
177struct protection_info_ctx {
178 __le16 flags;
179#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3
180#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0
181#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1
182#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2
183#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1
184#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
185#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF
186#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4
187#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
188#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
189#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F
190#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9
191 u8 dix_block_size;
192 u8 dst_size;
193};
194
195union protection_info_union_ctx {
196 struct protection_info_ctx info;
197 __le32 value;
198};
199
200struct fcp_rsp_payload_padded { 54struct fcp_rsp_payload_padded {
201 struct fcoe_fcp_rsp_payload rsp_payload; 55 struct fcoe_fcp_rsp_payload rsp_payload;
202 __le32 reserved[2]; 56 __le32 reserved[2];
203}; 57};
204 58
59/* FCP RSP payload */
60struct fcoe_fcp_xfer_payload {
61 __le32 opaque[3];
62};
63
64/* FCP RSP payload */
205struct fcp_xfer_payload_padded { 65struct fcp_xfer_payload_padded {
206 struct fcoe_fcp_xfer_payload xfer_payload; 66 struct fcoe_fcp_xfer_payload xfer_payload;
207 __le32 reserved[5]; 67 __le32 reserved[5];
208}; 68};
209 69
70/* Task params */
210struct fcoe_tx_data_params { 71struct fcoe_tx_data_params {
211 __le32 data_offset; 72 __le32 data_offset;
212 __le32 offset_in_io; 73 __le32 offset_in_io;
213 u8 flags; 74 u8 flags;
214#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 75#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1
215#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 76#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
216#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 77#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1
217#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 78#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1
218#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 79#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1
219#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 80#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2
220#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F 81#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F
221#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 82#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3
222 u8 dif_residual; 83 u8 dif_residual;
223 __le16 seq_cnt; 84 __le16 seq_cnt;
224 __le16 single_sge_saved_offset; 85 __le16 single_sge_saved_offset;
@@ -227,6 +88,7 @@ struct fcoe_tx_data_params {
227 __le16 reserved3; 88 __le16 reserved3;
228}; 89};
229 90
91/* Middle path parameters: FC header fields provided by the driver */
230struct fcoe_tx_mid_path_params { 92struct fcoe_tx_mid_path_params {
231 __le32 parameter; 93 __le32 parameter;
232 u8 r_ctl; 94 u8 r_ctl;
@@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params {
237 __le16 ox_id; 99 __le16 ox_id;
238}; 100};
239 101
102/* Task params */
240struct fcoe_tx_params { 103struct fcoe_tx_params {
241 struct fcoe_tx_data_params data; 104 struct fcoe_tx_data_params data;
242 struct fcoe_tx_mid_path_params mid_path; 105 struct fcoe_tx_mid_path_params mid_path;
243}; 106};
244 107
108/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */
245union fcoe_tx_info_union_ctx { 109union fcoe_tx_info_union_ctx {
246 struct fcoe_fcp_cmd_payload fcp_cmd_payload; 110 struct fcoe_fcp_cmd_payload fcp_cmd_payload;
247 struct fcp_rsp_payload_padded fcp_rsp_payload; 111 struct fcp_rsp_payload_padded fcp_rsp_payload;
@@ -249,13 +113,29 @@ union fcoe_tx_info_union_ctx {
249 struct fcoe_tx_params tx_params; 113 struct fcoe_tx_params tx_params;
250}; 114};
251 115
116/* Data sgl */
117struct fcoe_slow_sgl_ctx {
118 struct regpair base_sgl_addr;
119 __le16 curr_sge_off;
120 __le16 remainder_num_sges;
121 __le16 curr_sgl_index;
122 __le16 reserved;
123};
124
125/* Union of DIX SGL \ cached DIX sges */
126union fcoe_dix_desc_ctx {
127 struct fcoe_slow_sgl_ctx dix_sgl;
128 struct scsi_sge cached_dix_sge;
129};
130
131/* The fcoe storm task context of Ystorm */
252struct ystorm_fcoe_task_st_ctx { 132struct ystorm_fcoe_task_st_ctx {
253 u8 task_type; 133 u8 task_type;
254 u8 sgl_mode; 134 u8 sgl_mode;
255#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 135#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
256#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 136#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
257#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F 137#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
258#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 138#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
259 u8 cached_dix_sge; 139 u8 cached_dix_sge;
260 u8 expect_first_xfer; 140 u8 expect_first_xfer;
261 __le32 num_pbf_zero_write; 141 __le32 num_pbf_zero_write;
@@ -272,49 +152,49 @@ struct ystorm_fcoe_task_st_ctx {
272 u8 reserved2[8]; 152 u8 reserved2[8];
273}; 153};
274 154
275struct ystorm_fcoe_task_ag_ctx { 155struct e4_ystorm_fcoe_task_ag_ctx {
276 u8 byte0; 156 u8 byte0;
277 u8 byte1; 157 u8 byte1;
278 __le16 word0; 158 __le16 word0;
279 u8 flags0; 159 u8 flags0;
280#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF 160#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
281#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 161#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
282#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 162#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
283#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 163#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
284#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 164#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
285#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 165#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
286#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 166#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
287#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 167#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
288#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 168#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
289#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 169#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
290 u8 flags1; 170 u8 flags1;
291#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 171#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
292#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 172#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
293#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 173#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
294#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 174#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
295#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 175#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
296#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 176#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
297#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 177#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
298#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 178#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
299#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 179#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
300#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 180#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
301 u8 flags2; 181 u8 flags2;
302#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 182#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
303#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 183#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
304#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 184#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
305#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 185#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
306#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 186#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
307#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 187#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
308#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 188#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
309#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 189#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
310#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 190#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
311#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 191#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
312#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 192#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
313#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 193#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
314#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 194#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
315#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 195#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
316#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 196#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
317#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 197#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
318 u8 byte2; 198 u8 byte2;
319 __le32 reg0; 199 __le32 reg0;
320 u8 byte3; 200 u8 byte3;
@@ -328,73 +208,73 @@ struct ystorm_fcoe_task_ag_ctx {
328 __le32 reg2; 208 __le32 reg2;
329}; 209};
330 210
331struct tstorm_fcoe_task_ag_ctx { 211struct e4_tstorm_fcoe_task_ag_ctx {
332 u8 reserved; 212 u8 reserved;
333 u8 byte1; 213 u8 byte1;
334 __le16 icid; 214 __le16 icid;
335 u8 flags0; 215 u8 flags0;
336#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF 216#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
337#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 217#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
338#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 218#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
339#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 219#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
340#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 220#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
341#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 221#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
342#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 222#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
343#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 223#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
344#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 224#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
345#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 225#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
346 u8 flags1; 226 u8 flags1;
347#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 227#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
348#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 228#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
349#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 229#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
350#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 230#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
351#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 231#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
352#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 232#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
353#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 233#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
354#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 234#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
355#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 235#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
356#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 236#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
357 u8 flags2; 237 u8 flags2;
358#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 238#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
359#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 239#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
360#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 240#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
361#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 241#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
362#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 242#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
363#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 243#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
364#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 244#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
365#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 245#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
366 u8 flags3; 246 u8 flags3;
367#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 247#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
368#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 248#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
369#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 249#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
370#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 250#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
371#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 251#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
372#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 252#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
373#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 253#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
374#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 254#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
375#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 255#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
376#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 256#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
377#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 257#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
378#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 258#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
379#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 259#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
380#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 260#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
381 u8 flags4; 261 u8 flags4;
382#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 262#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
383#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 263#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
384#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 264#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
385#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 265#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
386#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 266#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
387#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 267#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
388#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 268#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
389#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 269#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
390#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 270#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
391#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 271#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
392#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 272#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
393#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 273#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
394#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 274#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
395#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 275#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
396#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 276#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
397#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 277#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
398 u8 cleanup_state; 278 u8 cleanup_state;
399 __le16 last_sent_tid; 279 __le16 last_sent_tid;
400 __le32 rec_rr_tov_exp_timeout; 280 __le32 rec_rr_tov_exp_timeout;
@@ -407,25 +287,46 @@ struct tstorm_fcoe_task_ag_ctx {
407 __le32 data_offset_next; 287 __le32 data_offset_next;
408}; 288};
409 289
290/* Cached data sges */
291struct fcoe_exp_ro {
292 __le32 data_offset;
293 __le32 reserved;
294};
295
296/* Union of Cleanup address \ expected relative offsets */
297union fcoe_cleanup_addr_exp_ro_union {
298 struct regpair abts_rsp_fc_payload_hi;
299 struct fcoe_exp_ro exp_ro;
300};
301
302/* Fields coppied from ABTSrsp pckt */
303struct fcoe_abts_pkt {
304 __le32 abts_rsp_fc_payload_lo;
305 __le16 abts_rsp_rx_id;
306 u8 abts_rsp_rctl;
307 u8 reserved2;
308};
309
310/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */
410struct fcoe_tstorm_fcoe_task_st_ctx_read_write { 311struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
411 union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; 312 union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
412 __le16 flags; 313 __le16 flags;
413#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 314#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
414#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 315#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
415#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 316#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
416#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 317#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
417#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 318#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
418#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 319#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
419#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 320#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
420#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 321#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
421#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 322#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
422#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 323#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
423#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 324#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
424#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 325#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
425#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 326#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
426#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 327#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
427#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF 328#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
428#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 329#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
429 __le16 seq_cnt; 330 __le16 seq_cnt;
430 u8 seq_id; 331 u8 seq_id;
431 u8 ooo_rx_seq_id; 332 u8 ooo_rx_seq_id;
@@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
436 __le16 reserved1; 337 __le16 reserved1;
437}; 338};
438 339
340/* FW read only part The fcoe task storm context of Tstorm */
439struct fcoe_tstorm_fcoe_task_st_ctx_read_only { 341struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
440 u8 task_type; 342 u8 task_type;
441 u8 dev_type; 343 u8 dev_type;
@@ -446,54 +348,55 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
446 __le32 rsrv; 348 __le32 rsrv;
447}; 349};
448 350
351/** The fcoe task storm context of Tstorm */
449struct tstorm_fcoe_task_st_ctx { 352struct tstorm_fcoe_task_st_ctx {
450 struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; 353 struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
451 struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; 354 struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
452}; 355};
453 356
454struct mstorm_fcoe_task_ag_ctx { 357struct e4_mstorm_fcoe_task_ag_ctx {
455 u8 byte0; 358 u8 byte0;
456 u8 byte1; 359 u8 byte1;
457 __le16 icid; 360 __le16 icid;
458 u8 flags0; 361 u8 flags0;
459#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF 362#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
460#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 363#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
461#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 364#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
462#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 365#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
463#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 366#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
464#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 367#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
465#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 368#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
466#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 369#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
467#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 370#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
468#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 371#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
469 u8 flags1; 372 u8 flags1;
470#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 373#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
471#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 374#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
472#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 375#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
473#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 376#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
474#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 377#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
475#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 378#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
476#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 379#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
477#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 380#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
478#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 381#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
479#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 382#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
480 u8 flags2; 383 u8 flags2;
481#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 384#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
482#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 385#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
483#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 386#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
484#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 387#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
485#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 388#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
486#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 389#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
487#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 390#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
488#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 391#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
489#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 392#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
490#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 393#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
491#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 394#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
492#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 395#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
493#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 396#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
494#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 397#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
495#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 398#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
496#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 399#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
497 u8 cleanup_state; 400 u8 cleanup_state;
498 __le32 received_bytes; 401 __le32 received_bytes;
499 u8 byte3; 402 u8 byte3;
@@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx {
507 __le32 reg2; 410 __le32 reg2;
508}; 411};
509 412
413/* The fcoe task storm context of Mstorm */
510struct mstorm_fcoe_task_st_ctx { 414struct mstorm_fcoe_task_st_ctx {
511 struct regpair rsp_buf_addr; 415 struct regpair rsp_buf_addr;
512 __le32 rsrv[2]; 416 __le32 rsrv[2];
@@ -515,79 +419,79 @@ struct mstorm_fcoe_task_st_ctx {
515 __le32 data_buffer_offset; 419 __le32 data_buffer_offset;
516 __le16 parent_id; 420 __le16 parent_id;
517 __le16 flags; 421 __le16 flags;
518#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF 422#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
519#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 423#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
520#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 424#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
521#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 425#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
522#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 426#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
523#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 427#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
524#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 428#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
525#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 429#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
526#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 430#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
527#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 431#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
528#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 432#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
529#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 433#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
530#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 434#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
531#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 435#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
532#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 436#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
533#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 437#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
534#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 438#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
535#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 439#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
536#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 440#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
537#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 441#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
538 struct scsi_cached_sges data_desc; 442 struct scsi_cached_sges data_desc;
539}; 443};
540 444
541struct ustorm_fcoe_task_ag_ctx { 445struct e4_ustorm_fcoe_task_ag_ctx {
542 u8 reserved; 446 u8 reserved;
543 u8 byte1; 447 u8 byte1;
544 __le16 icid; 448 __le16 icid;
545 u8 flags0; 449 u8 flags0;
546#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF 450#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
547#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 451#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
548#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 452#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
549#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 453#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
550#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 454#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
551#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 455#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
552#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 456#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
553#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 457#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
554 u8 flags1; 458 u8 flags1;
555#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 459#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
556#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 460#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
557#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 461#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
558#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 462#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
559#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 463#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
560#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 464#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
561#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 465#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
562#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 466#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
563 u8 flags2; 467 u8 flags2;
564#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 468#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
565#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 469#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
566#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 470#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
567#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 471#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
568#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 472#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
569#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 473#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
570#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 474#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
571#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 475#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
572#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 476#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
573#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 477#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
574#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 478#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
575#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 479#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
576#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 480#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
577#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 481#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
578#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 482#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
579#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 483#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
580 u8 flags3; 484 u8 flags3;
581#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 485#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
582#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 486#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
583#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 487#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
584#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 488#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
585#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 489#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
586#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 490#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
587#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 491#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
588#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 492#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
589#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF 493#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
590#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 494#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
591 __le32 dif_err_intervals; 495 __le32 dif_err_intervals;
592 __le32 dif_error_1st_interval; 496 __le32 dif_error_1st_interval;
593 __le32 global_cq_num; 497 __le32 global_cq_num;
@@ -596,21 +500,189 @@ struct ustorm_fcoe_task_ag_ctx {
596 __le32 reg5; 500 __le32 reg5;
597}; 501};
598 502
599struct fcoe_task_context { 503/* FCoE task context */
504struct e4_fcoe_task_context {
600 struct ystorm_fcoe_task_st_ctx ystorm_st_context; 505 struct ystorm_fcoe_task_st_ctx ystorm_st_context;
601 struct regpair ystorm_st_padding[2]; 506 struct regpair ystorm_st_padding[2];
602 struct tdif_task_context tdif_context; 507 struct tdif_task_context tdif_context;
603 struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; 508 struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
604 struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; 509 struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
605 struct timers_context timer_context; 510 struct timers_context timer_context;
606 struct tstorm_fcoe_task_st_ctx tstorm_st_context; 511 struct tstorm_fcoe_task_st_ctx tstorm_st_context;
607 struct regpair tstorm_st_padding[2]; 512 struct regpair tstorm_st_padding[2];
608 struct mstorm_fcoe_task_ag_ctx mstorm_ag_context; 513 struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
609 struct mstorm_fcoe_task_st_ctx mstorm_st_context; 514 struct mstorm_fcoe_task_st_ctx mstorm_st_context;
610 struct ustorm_fcoe_task_ag_ctx ustorm_ag_context; 515 struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
611 struct rdif_task_context rdif_context; 516 struct rdif_task_context rdif_context;
612}; 517};
613 518
519/* FCoE additional WQE (Sq/XferQ) information */
520union fcoe_additional_info_union {
521 __le32 previous_tid;
522 __le32 parent_tid;
523 __le32 burst_length;
524 __le32 seq_rec_updated_offset;
525};
526
527/* FCoE Ramrod Command IDs */
528enum fcoe_completion_status {
529 FCOE_COMPLETION_STATUS_SUCCESS,
530 FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
531 FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
532 MAX_FCOE_COMPLETION_STATUS
533};
534
535/* FC address (SID/DID) network presentation */
536struct fc_addr_nw {
537 u8 addr_lo;
538 u8 addr_mid;
539 u8 addr_hi;
540};
541
542/* FCoE connection offload */
543struct fcoe_conn_offload_ramrod_data {
544 struct regpair sq_pbl_addr;
545 struct regpair sq_curr_page_addr;
546 struct regpair sq_next_page_addr;
547 struct regpair xferq_pbl_addr;
548 struct regpair xferq_curr_page_addr;
549 struct regpair xferq_next_page_addr;
550 struct regpair respq_pbl_addr;
551 struct regpair respq_curr_page_addr;
552 struct regpair respq_next_page_addr;
553 __le16 dst_mac_addr_lo;
554 __le16 dst_mac_addr_mid;
555 __le16 dst_mac_addr_hi;
556 __le16 src_mac_addr_lo;
557 __le16 src_mac_addr_mid;
558 __le16 src_mac_addr_hi;
559 __le16 tx_max_fc_pay_len;
560 __le16 e_d_tov_timer_val;
561 __le16 rx_max_fc_pay_len;
562 __le16 vlan_tag;
563#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF
564#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
565#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1
566#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
567#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7
568#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
569 __le16 physical_q0;
570 __le16 rec_rr_tov_timer_val;
571 struct fc_addr_nw s_id;
572 u8 max_conc_seqs_c3;
573 struct fc_addr_nw d_id;
574 u8 flags;
575#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
576#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
577#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1
578#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
579#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1
580#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
581#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
582#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
583#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1
584#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4
585#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
586#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5
587#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1
588#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7
589 __le16 conn_id;
590 u8 def_q_idx;
591 u8 reserved[5];
592};
593
594/* FCoE terminate connection request */
595struct fcoe_conn_terminate_ramrod_data {
596 struct regpair terminate_params_addr;
597};
598
599/* FCoE device type */
600enum fcoe_device_type {
601 FCOE_TASK_DEV_TYPE_DISK,
602 FCOE_TASK_DEV_TYPE_TAPE,
603 MAX_FCOE_DEVICE_TYPE
604};
605
606/* Data sgl */
607struct fcoe_fast_sgl_ctx {
608 struct regpair sgl_start_addr;
609 __le32 sgl_byte_offset;
610 __le16 task_reuse_cnt;
611 __le16 init_offset_in_first_sge;
612};
613
614/* FCoE firmware function init */
615struct fcoe_init_func_ramrod_data {
616 struct scsi_init_func_params func_params;
617 struct scsi_init_func_queues q_params;
618 __le16 mtu;
619 __le16 sq_num_pages_in_pbl;
620 __le32 reserved[3];
621};
622
623/* FCoE: Mode of the connection: Target or Initiator or both */
624enum fcoe_mode_type {
625 FCOE_INITIATOR_MODE = 0x0,
626 FCOE_TARGET_MODE = 0x1,
627 FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
628 MAX_FCOE_MODE_TYPE
629};
630
631/* Per PF FCoE receive path statistics - tStorm RAM structure */
632struct fcoe_rx_stat {
633 struct regpair fcoe_rx_byte_cnt;
634 struct regpair fcoe_rx_data_pkt_cnt;
635 struct regpair fcoe_rx_xfer_pkt_cnt;
636 struct regpair fcoe_rx_other_pkt_cnt;
637 __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
638 __le32 fcoe_silent_drop_pkt_rq_full_cnt;
639 __le32 fcoe_silent_drop_pkt_crc_error_cnt;
640 __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
641 __le32 fcoe_silent_drop_total_pkt_cnt;
642 __le32 rsrv;
643};
644
645/* FCoE SQE request type */
646enum fcoe_sqe_request_type {
647 SEND_FCOE_CMD,
648 SEND_FCOE_MIDPATH,
649 SEND_FCOE_ABTS_REQUEST,
650 FCOE_EXCHANGE_CLEANUP,
651 FCOE_SEQUENCE_RECOVERY,
652 SEND_FCOE_XFER_RDY,
653 SEND_FCOE_RSP,
654 SEND_FCOE_RSP_WITH_SENSE_DATA,
655 SEND_FCOE_TARGET_DATA,
656 SEND_FCOE_INITIATOR_DATA,
657 SEND_FCOE_XFER_CONTINUATION_RDY,
658 SEND_FCOE_TARGET_ABTS_RSP,
659 MAX_FCOE_SQE_REQUEST_TYPE
660};
661
662/* FCoe statistics request */
663struct fcoe_stat_ramrod_data {
664 struct regpair stat_params_addr;
665};
666
667/* FCoE task type */
668enum fcoe_task_type {
669 FCOE_TASK_TYPE_WRITE_INITIATOR,
670 FCOE_TASK_TYPE_READ_INITIATOR,
671 FCOE_TASK_TYPE_MIDPATH,
672 FCOE_TASK_TYPE_UNSOLICITED,
673 FCOE_TASK_TYPE_ABTS,
674 FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
675 FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
676 FCOE_TASK_TYPE_WRITE_TARGET,
677 FCOE_TASK_TYPE_READ_TARGET,
678 FCOE_TASK_TYPE_RSP,
679 FCOE_TASK_TYPE_RSP_SENSE_DATA,
680 FCOE_TASK_TYPE_ABTS_TARGET,
681 FCOE_TASK_TYPE_ENUM_SIZE,
682 MAX_FCOE_TASK_TYPE
683};
684
685/* Per PF FCoE transmit path statistics - pStorm RAM structure */
614struct fcoe_tx_stat { 686struct fcoe_tx_stat {
615 struct regpair fcoe_tx_byte_cnt; 687 struct regpair fcoe_tx_byte_cnt;
616 struct regpair fcoe_tx_data_pkt_cnt; 688 struct regpair fcoe_tx_data_pkt_cnt;
@@ -618,51 +690,55 @@ struct fcoe_tx_stat {
618 struct regpair fcoe_tx_other_pkt_cnt; 690 struct regpair fcoe_tx_other_pkt_cnt;
619}; 691};
620 692
693/* FCoE SQ/XferQ element */
621struct fcoe_wqe { 694struct fcoe_wqe {
622 __le16 task_id; 695 __le16 task_id;
623 __le16 flags; 696 __le16 flags;
624#define FCOE_WQE_REQ_TYPE_MASK 0xF 697#define FCOE_WQE_REQ_TYPE_MASK 0xF
625#define FCOE_WQE_REQ_TYPE_SHIFT 0 698#define FCOE_WQE_REQ_TYPE_SHIFT 0
626#define FCOE_WQE_SGL_MODE_MASK 0x1 699#define FCOE_WQE_SGL_MODE_MASK 0x1
627#define FCOE_WQE_SGL_MODE_SHIFT 4 700#define FCOE_WQE_SGL_MODE_SHIFT 4
628#define FCOE_WQE_CONTINUATION_MASK 0x1 701#define FCOE_WQE_CONTINUATION_MASK 0x1
629#define FCOE_WQE_CONTINUATION_SHIFT 5 702#define FCOE_WQE_CONTINUATION_SHIFT 5
630#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 703#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
631#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 704#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
632#define FCOE_WQE_RESERVED_MASK 0x1 705#define FCOE_WQE_RESERVED_MASK 0x1
633#define FCOE_WQE_RESERVED_SHIFT 7 706#define FCOE_WQE_RESERVED_SHIFT 7
634#define FCOE_WQE_NUM_SGES_MASK 0xF 707#define FCOE_WQE_NUM_SGES_MASK 0xF
635#define FCOE_WQE_NUM_SGES_SHIFT 8 708#define FCOE_WQE_NUM_SGES_SHIFT 8
636#define FCOE_WQE_RESERVED1_MASK 0xF 709#define FCOE_WQE_RESERVED1_MASK 0xF
637#define FCOE_WQE_RESERVED1_SHIFT 12 710#define FCOE_WQE_RESERVED1_SHIFT 12
638 union fcoe_additional_info_union additional_info_union; 711 union fcoe_additional_info_union additional_info_union;
639}; 712};
640 713
714/* FCoE XFRQ element */
641struct xfrqe_prot_flags { 715struct xfrqe_prot_flags {
642 u8 flags; 716 u8 flags;
643#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF 717#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
644#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 718#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
645#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 719#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1
646#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 720#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
647#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 721#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3
648#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 722#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
649#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 723#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1
650#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 724#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
651}; 725};
652 726
727/* FCoE doorbell data */
653struct fcoe_db_data { 728struct fcoe_db_data {
654 u8 params; 729 u8 params;
655#define FCOE_DB_DATA_DEST_MASK 0x3 730#define FCOE_DB_DATA_DEST_MASK 0x3
656#define FCOE_DB_DATA_DEST_SHIFT 0 731#define FCOE_DB_DATA_DEST_SHIFT 0
657#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 732#define FCOE_DB_DATA_AGG_CMD_MASK 0x3
658#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 733#define FCOE_DB_DATA_AGG_CMD_SHIFT 2
659#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 734#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1
660#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 735#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4
661#define FCOE_DB_DATA_RESERVED_MASK 0x1 736#define FCOE_DB_DATA_RESERVED_MASK 0x1
662#define FCOE_DB_DATA_RESERVED_SHIFT 5 737#define FCOE_DB_DATA_RESERVED_SHIFT 5
663#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 738#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
664#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 739#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
665 u8 agg_flags; 740 u8 agg_flags;
666 __le16 sq_prod; 741 __le16 sq_prod;
667}; 742};
743
668#endif /* __FCOE_COMMON__ */ 744#endif /* __FCOE_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 85e086cba639..4cc9b37b8d95 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -32,47 +32,48 @@
32 32
33#ifndef __ISCSI_COMMON__ 33#ifndef __ISCSI_COMMON__
34#define __ISCSI_COMMON__ 34#define __ISCSI_COMMON__
35
35/**********************/ 36/**********************/
36/* ISCSI FW CONSTANTS */ 37/* ISCSI FW CONSTANTS */
37/**********************/ 38/**********************/
38 39
39/* iSCSI HSI constants */ 40/* iSCSI HSI constants */
40#define ISCSI_DEFAULT_MTU (1500) 41#define ISCSI_DEFAULT_MTU (1500)
41 42
42/* KWQ (kernel work queue) layer codes */ 43/* KWQ (kernel work queue) layer codes */
43#define ISCSI_SLOW_PATH_LAYER_CODE (6) 44#define ISCSI_SLOW_PATH_LAYER_CODE (6)
44 45
45/* iSCSI parameter defaults */ 46/* iSCSI parameter defaults */
46#define ISCSI_DEFAULT_HEADER_DIGEST (0) 47#define ISCSI_DEFAULT_HEADER_DIGEST (0)
47#define ISCSI_DEFAULT_DATA_DIGEST (0) 48#define ISCSI_DEFAULT_DATA_DIGEST (0)
48#define ISCSI_DEFAULT_INITIAL_R2T (1) 49#define ISCSI_DEFAULT_INITIAL_R2T (1)
49#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) 50#define ISCSI_DEFAULT_IMMEDIATE_DATA (1)
50#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) 51#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000)
51#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) 52#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000)
52#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) 53#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000)
53#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) 54#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
54 55
55/* iSCSI parameter limits */ 56/* iSCSI parameter limits */
56#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) 57#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200)
57#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) 58#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff)
58#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) 59#define ISCSI_MIN_VAL_BURST_LENGTH (0x200)
59#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) 60#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff)
60#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) 61#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
61#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) 62#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
62 63
63#define ISCSI_AHS_CNTL_SIZE 4 64#define ISCSI_AHS_CNTL_SIZE 4
64 65
65#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) 66#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
66 67
67/* iSCSI reserved params */ 68/* iSCSI reserved params */
68#define ISCSI_ITT_ALL_ONES (0xffffffff) 69#define ISCSI_ITT_ALL_ONES (0xffffffff)
69#define ISCSI_TTT_ALL_ONES (0xffffffff) 70#define ISCSI_TTT_ALL_ONES (0xffffffff)
70 71
71#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 72#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
72#define ISCSI_OPTION_2_ON_CHIP_TCP 2 73#define ISCSI_OPTION_2_ON_CHIP_TCP 2
73 74
74#define ISCSI_INITIATOR_MODE 0 75#define ISCSI_INITIATOR_MODE 0
75#define ISCSI_TARGET_MODE 1 76#define ISCSI_TARGET_MODE 1
76 77
77/* iSCSI request op codes */ 78/* iSCSI request op codes */
78#define ISCSI_OPCODE_NOP_OUT (0) 79#define ISCSI_OPCODE_NOP_OUT (0)
@@ -84,41 +85,48 @@
84#define ISCSI_OPCODE_LOGOUT_REQUEST (6) 85#define ISCSI_OPCODE_LOGOUT_REQUEST (6)
85 86
86/* iSCSI response/messages op codes */ 87/* iSCSI response/messages op codes */
87#define ISCSI_OPCODE_NOP_IN (0x20) 88#define ISCSI_OPCODE_NOP_IN (0x20)
88#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) 89#define ISCSI_OPCODE_SCSI_RESPONSE (0x21)
89#define ISCSI_OPCODE_TMF_RESPONSE (0x22) 90#define ISCSI_OPCODE_TMF_RESPONSE (0x22)
90#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) 91#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23)
91#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) 92#define ISCSI_OPCODE_TEXT_RESPONSE (0x24)
92#define ISCSI_OPCODE_DATA_IN (0x25) 93#define ISCSI_OPCODE_DATA_IN (0x25)
93#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) 94#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26)
94#define ISCSI_OPCODE_R2T (0x31) 95#define ISCSI_OPCODE_R2T (0x31)
95#define ISCSI_OPCODE_ASYNC_MSG (0x32) 96#define ISCSI_OPCODE_ASYNC_MSG (0x32)
96#define ISCSI_OPCODE_REJECT (0x3f) 97#define ISCSI_OPCODE_REJECT (0x3f)
97 98
98/* iSCSI stages */ 99/* iSCSI stages */
99#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) 100#define ISCSI_STAGE_SECURITY_NEGOTIATION (0)
100#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) 101#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1)
101#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) 102#define ISCSI_STAGE_FULL_FEATURE_PHASE (3)
102 103
103/* iSCSI CQE errors */ 104/* iSCSI CQE errors */
104#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) 105#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08)
105#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) 106#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
106#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) 107#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
108
109/* Union of data bd_opaque/ tq_tid */
110union bd_opaque_tq_union {
111 __le16 bd_opaque;
112 __le16 tq_tid;
113};
107 114
115/* ISCSI SGL entry */
108struct cqe_error_bitmap { 116struct cqe_error_bitmap {
109 u8 cqe_error_status_bits; 117 u8 cqe_error_status_bits;
110#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 118#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
111#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 119#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
112#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 120#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
113#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 121#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
114#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 122#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
115#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 123#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
116#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 124#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1
117#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 125#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5
118#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 126#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1
119#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 127#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6
120#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 128#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1
121#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 129#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
122}; 130};
123 131
124union cqe_error_status { 132union cqe_error_status {
@@ -126,86 +134,133 @@ union cqe_error_status {
126 struct cqe_error_bitmap error_bits; 134 struct cqe_error_bitmap error_bits;
127}; 135};
128 136
137/* iSCSI Login Response PDU header */
129struct data_hdr { 138struct data_hdr {
130 __le32 data[12]; 139 __le32 data[12];
131}; 140};
132 141
133struct iscsi_async_msg_hdr { 142struct lun_mapper_addr_reserved {
134 __le16 reserved0; 143 struct regpair lun_mapper_addr;
135 u8 flags_attr; 144 u8 reserved0[8];
136#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F 145};
137#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 146
138#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 147/* rdif conetxt for dif on immediate */
139#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 148struct dif_on_immediate_params {
140 u8 opcode; 149 __le32 initial_ref_tag;
141 __le32 hdr_second_dword; 150 __le16 application_tag;
142#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 151 __le16 application_tag_mask;
143#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 152 __le16 flags1;
144#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF 153#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1
145#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 154#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0
146 struct regpair lun; 155#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1
147 __le32 all_ones; 156#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1
148 __le32 reserved1; 157#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1
149 __le32 stat_sn; 158#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2
150 __le32 exp_cmd_sn; 159#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1
151 __le32 max_cmd_sn; 160#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3
152 __le16 param1_rsrv; 161#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1
153 u8 async_vcode; 162#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4
154 u8 async_event; 163#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1
155 __le16 param3_rsrv; 164#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5
156 __le16 param2_rsrv; 165#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1
157 __le32 reserved7; 166#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6
167#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1
168#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7
169#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3
170#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8
171#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF
172#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10
173#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
174#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
175#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
176#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
177 u8 flags0;
178#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1
179#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0
180#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1
181#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1
182#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1
183#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2
184#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1
185#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3
186#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3
187#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4
188#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1
189#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6
190#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1
191#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7
192 u8 reserved_zero[5];
193};
194
195/* iSCSI dif on immediate mode attributes union */
196union dif_configuration_params {
197 struct lun_mapper_addr_reserved lun_mapper_address;
198 struct dif_on_immediate_params def_dif_conf;
199};
200
201/* Union of data/r2t sequence number */
202union iscsi_seq_num {
203 __le16 data_sn;
204 __le16 r2t_sn;
158}; 205};
159 206
160struct iscsi_cmd_hdr { 207/* iSCSI DIF flags */
161 __le16 reserved1; 208struct iscsi_dif_flags {
162 u8 flags_attr; 209 u8 flags;
163#define ISCSI_CMD_HDR_ATTR_MASK 0x7 210#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
164#define ISCSI_CMD_HDR_ATTR_SHIFT 0 211#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
165#define ISCSI_CMD_HDR_RSRV_MASK 0x3 212#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
166#define ISCSI_CMD_HDR_RSRV_SHIFT 3 213#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
167#define ISCSI_CMD_HDR_WRITE_MASK 0x1 214#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
168#define ISCSI_CMD_HDR_WRITE_SHIFT 5 215#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
169#define ISCSI_CMD_HDR_READ_MASK 0x1
170#define ISCSI_CMD_HDR_READ_SHIFT 6
171#define ISCSI_CMD_HDR_FINAL_MASK 0x1
172#define ISCSI_CMD_HDR_FINAL_SHIFT 7
173 u8 hdr_first_byte;
174#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
175#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
176#define ISCSI_CMD_HDR_IMM_MASK 0x1
177#define ISCSI_CMD_HDR_IMM_SHIFT 6
178#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
179#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
180 __le32 hdr_second_dword;
181#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
182#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
183#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
184#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
185 struct regpair lun;
186 __le32 itt;
187 __le32 expected_transfer_length;
188 __le32 cmd_sn;
189 __le32 exp_stat_sn;
190 __le32 cdb[4];
191}; 216};
192 217
218/* The iscsi storm task context of Ystorm */
219struct ystorm_iscsi_task_state {
220 struct scsi_cached_sges data_desc;
221 struct scsi_sgl_params sgl_params;
222 __le32 exp_r2t_sn;
223 __le32 buffer_offset;
224 union iscsi_seq_num seq_num;
225 struct iscsi_dif_flags dif_flags;
226 u8 flags;
227#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
228#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
229#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
230#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
231#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
232#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
233#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F
234#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3
235};
236
237/* The iscsi storm task context of Ystorm */
238struct ystorm_iscsi_task_rxmit_opt {
239 __le32 fast_rxmit_sge_offset;
240 __le32 scan_start_buffer_offset;
241 __le32 fast_rxmit_buffer_offset;
242 u8 scan_start_sgl_index;
243 u8 fast_rxmit_sgl_index;
244 __le16 reserved;
245};
246
247/* iSCSI Common PDU header */
193struct iscsi_common_hdr { 248struct iscsi_common_hdr {
194 u8 hdr_status; 249 u8 hdr_status;
195 u8 hdr_response; 250 u8 hdr_response;
196 u8 hdr_flags; 251 u8 hdr_flags;
197 u8 hdr_first_byte; 252 u8 hdr_first_byte;
198#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F 253#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F
199#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 254#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0
200#define ISCSI_COMMON_HDR_IMM_MASK 0x1 255#define ISCSI_COMMON_HDR_IMM_MASK 0x1
201#define ISCSI_COMMON_HDR_IMM_SHIFT 6 256#define ISCSI_COMMON_HDR_IMM_SHIFT 6
202#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 257#define ISCSI_COMMON_HDR_RSRV_MASK 0x1
203#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 258#define ISCSI_COMMON_HDR_RSRV_SHIFT 7
204 __le32 hdr_second_dword; 259 __le32 hdr_second_dword;
205#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 260#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
206#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 261#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
207#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF 262#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
208#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 263#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
209 struct regpair lun_reserved; 264 struct regpair lun_reserved;
210 __le32 itt; 265 __le32 itt;
211 __le32 ttt; 266 __le32 ttt;
@@ -215,86 +270,60 @@ struct iscsi_common_hdr {
215 __le32 data[3]; 270 __le32 data[3];
216}; 271};
217 272
218struct iscsi_conn_offload_params { 273/* iSCSI Command PDU header */
219 struct regpair sq_pbl_addr; 274struct iscsi_cmd_hdr {
220 struct regpair r2tq_pbl_addr; 275 __le16 reserved1;
221 struct regpair xhq_pbl_addr; 276 u8 flags_attr;
222 struct regpair uhq_pbl_addr; 277#define ISCSI_CMD_HDR_ATTR_MASK 0x7
223 __le32 initial_ack; 278#define ISCSI_CMD_HDR_ATTR_SHIFT 0
224 __le16 physical_q0; 279#define ISCSI_CMD_HDR_RSRV_MASK 0x3
225 __le16 physical_q1; 280#define ISCSI_CMD_HDR_RSRV_SHIFT 3
226 u8 flags; 281#define ISCSI_CMD_HDR_WRITE_MASK 0x1
227#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 282#define ISCSI_CMD_HDR_WRITE_SHIFT 5
228#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 283#define ISCSI_CMD_HDR_READ_MASK 0x1
229#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 284#define ISCSI_CMD_HDR_READ_SHIFT 6
230#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 285#define ISCSI_CMD_HDR_FINAL_MASK 0x1
231#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 286#define ISCSI_CMD_HDR_FINAL_SHIFT 7
232#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 287 u8 hdr_first_byte;
233#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F 288#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F
234#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 289#define ISCSI_CMD_HDR_OPCODE_SHIFT 0
235 u8 pbl_page_size_log; 290#define ISCSI_CMD_HDR_IMM_MASK 0x1
236 u8 pbe_page_size_log; 291#define ISCSI_CMD_HDR_IMM_SHIFT 6
237 u8 default_cq; 292#define ISCSI_CMD_HDR_RSRV1_MASK 0x1
238 __le32 stat_sn; 293#define ISCSI_CMD_HDR_RSRV1_SHIFT 7
239}; 294 __le32 hdr_second_dword;
240 295#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
241struct iscsi_slow_path_hdr { 296#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0
242 u8 op_code; 297#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF
243 u8 flags; 298#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
244#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF 299 struct regpair lun;
245#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 300 __le32 itt;
246#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 301 __le32 expected_transfer_length;
247#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 302 __le32 cmd_sn;
248#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
249#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
250};
251
252struct iscsi_conn_update_ramrod_params {
253 struct iscsi_slow_path_hdr hdr;
254 __le16 conn_id;
255 __le32 fw_cid;
256 u8 flags;
257#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
258#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
259#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
260#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
261#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
262#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
263#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
264#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
265#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
266#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
267#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
268#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
269#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
270#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
271 u8 reserved0[3];
272 __le32 max_seq_size;
273 __le32 max_send_pdu_length;
274 __le32 max_recv_pdu_length;
275 __le32 first_seq_length;
276 __le32 exp_stat_sn; 303 __le32 exp_stat_sn;
304 __le32 cdb[4];
277}; 305};
278 306
307/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */
279struct iscsi_ext_cdb_cmd_hdr { 308struct iscsi_ext_cdb_cmd_hdr {
280 __le16 reserved1; 309 __le16 reserved1;
281 u8 flags_attr; 310 u8 flags_attr;
282#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 311#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7
283#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 312#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0
284#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 313#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3
285#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 314#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3
286#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 315#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1
287#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 316#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5
288#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 317#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1
289#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 318#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6
290#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 319#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1
291#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 320#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7
292 u8 opcode; 321 u8 opcode;
293 __le32 hdr_second_dword; 322 __le32 hdr_second_dword;
294#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 323#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
295#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 324#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
296#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF 325#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF
297#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 326#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24
298 struct regpair lun; 327 struct regpair lun;
299 __le32 itt; 328 __le32 itt;
300 __le32 expected_transfer_length; 329 __le32 expected_transfer_length;
@@ -303,26 +332,27 @@ struct iscsi_ext_cdb_cmd_hdr {
303 struct scsi_sge cdb_sge; 332 struct scsi_sge cdb_sge;
304}; 333};
305 334
335/* iSCSI login request PDU header */
306struct iscsi_login_req_hdr { 336struct iscsi_login_req_hdr {
307 u8 version_min; 337 u8 version_min;
308 u8 version_max; 338 u8 version_max;
309 u8 flags_attr; 339 u8 flags_attr;
310#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 340#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3
311#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 341#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0
312#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 342#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3
313#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 343#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2
314#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 344#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3
315#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 345#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
316#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 346#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1
317#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 347#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6
318#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 348#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1
319#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 349#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7
320 u8 opcode; 350 u8 opcode;
321 __le32 hdr_second_dword; 351 __le32 hdr_second_dword;
322#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 352#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
323#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 353#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
324#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF 354#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
325#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 355#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
326 __le32 isid_tabc; 356 __le32 isid_tabc;
327 __le16 tsih; 357 __le16 tsih;
328 __le16 isid_d; 358 __le16 isid_d;
@@ -334,6 +364,7 @@ struct iscsi_login_req_hdr {
334 __le32 reserved2[4]; 364 __le32 reserved2[4];
335}; 365};
336 366
367/* iSCSI logout request PDU header */
337struct iscsi_logout_req_hdr { 368struct iscsi_logout_req_hdr {
338 __le16 reserved0; 369 __le16 reserved0;
339 u8 reason_code; 370 u8 reason_code;
@@ -348,13 +379,14 @@ struct iscsi_logout_req_hdr {
348 __le32 reserved4[4]; 379 __le32 reserved4[4];
349}; 380};
350 381
382/* iSCSI Data-out PDU header */
351struct iscsi_data_out_hdr { 383struct iscsi_data_out_hdr {
352 __le16 reserved1; 384 __le16 reserved1;
353 u8 flags_attr; 385 u8 flags_attr;
354#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F 386#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F
355#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 387#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0
356#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 388#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1
357#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 389#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
358 u8 opcode; 390 u8 opcode;
359 __le32 reserved2; 391 __le32 reserved2;
360 struct regpair lun; 392 struct regpair lun;
@@ -368,22 +400,23 @@ struct iscsi_data_out_hdr {
368 __le32 reserved5; 400 __le32 reserved5;
369}; 401};
370 402
403/* iSCSI Data-in PDU header */
371struct iscsi_data_in_hdr { 404struct iscsi_data_in_hdr {
372 u8 status_rsvd; 405 u8 status_rsvd;
373 u8 reserved1; 406 u8 reserved1;
374 u8 flags; 407 u8 flags;
375#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 408#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1
376#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 409#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0
377#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 410#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1
378#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 411#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
379#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 412#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1
380#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 413#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2
381#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 414#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7
382#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 415#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3
383#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 416#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1
384#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 417#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6
385#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 418#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1
386#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 419#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7
387 u8 opcode; 420 u8 opcode;
388 __le32 reserved2; 421 __le32 reserved2;
389 struct regpair lun; 422 struct regpair lun;
@@ -397,6 +430,7 @@ struct iscsi_data_in_hdr {
397 __le32 residual_count; 430 __le32 residual_count;
398}; 431};
399 432
433/* iSCSI R2T PDU header */
400struct iscsi_r2t_hdr { 434struct iscsi_r2t_hdr {
401 u8 reserved0[3]; 435 u8 reserved0[3];
402 u8 opcode; 436 u8 opcode;
@@ -412,13 +446,14 @@ struct iscsi_r2t_hdr {
412 __le32 desired_data_trns_len; 446 __le32 desired_data_trns_len;
413}; 447};
414 448
449/* iSCSI NOP-out PDU header */
415struct iscsi_nop_out_hdr { 450struct iscsi_nop_out_hdr {
416 __le16 reserved1; 451 __le16 reserved1;
417 u8 flags_attr; 452 u8 flags_attr;
418#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F 453#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F
419#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 454#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0
420#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 455#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1
421#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 456#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
422 u8 opcode; 457 u8 opcode;
423 __le32 reserved2; 458 __le32 reserved2;
424 struct regpair lun; 459 struct regpair lun;
@@ -432,19 +467,20 @@ struct iscsi_nop_out_hdr {
432 __le32 reserved6; 467 __le32 reserved6;
433}; 468};
434 469
470/* iSCSI NOP-in PDU header */
435struct iscsi_nop_in_hdr { 471struct iscsi_nop_in_hdr {
436 __le16 reserved0; 472 __le16 reserved0;
437 u8 flags_attr; 473 u8 flags_attr;
438#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F 474#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F
439#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 475#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0
440#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 476#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1
441#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 477#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7
442 u8 opcode; 478 u8 opcode;
443 __le32 hdr_second_dword; 479 __le32 hdr_second_dword;
444#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 480#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
445#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 481#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0
446#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF 482#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF
447#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 483#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
448 struct regpair lun; 484 struct regpair lun;
449 __le32 itt; 485 __le32 itt;
450 __le32 ttt; 486 __le32 ttt;
@@ -456,26 +492,27 @@ struct iscsi_nop_in_hdr {
456 __le32 reserved7; 492 __le32 reserved7;
457}; 493};
458 494
495/* iSCSI Login Response PDU header */
459struct iscsi_login_response_hdr { 496struct iscsi_login_response_hdr {
460 u8 version_active; 497 u8 version_active;
461 u8 version_max; 498 u8 version_max;
462 u8 flags_attr; 499 u8 flags_attr;
463#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 500#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3
464#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 501#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0
465#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 502#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3
466#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 503#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2
467#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 504#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3
468#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 505#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4
469#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 506#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1
470#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 507#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6
471#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 508#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1
472#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 509#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7
473 u8 opcode; 510 u8 opcode;
474 __le32 hdr_second_dword; 511 __le32 hdr_second_dword;
475#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 512#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
476#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 513#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
477#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF 514#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
478#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 515#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
479 __le32 isid_tabc; 516 __le32 isid_tabc;
480 __le16 tsih; 517 __le16 tsih;
481 __le16 isid_d; 518 __le16 isid_d;
@@ -490,16 +527,17 @@ struct iscsi_login_response_hdr {
490 __le32 reserved4[2]; 527 __le32 reserved4[2];
491}; 528};
492 529
530/* iSCSI Logout Response PDU header */
493struct iscsi_logout_response_hdr { 531struct iscsi_logout_response_hdr {
494 u8 reserved1; 532 u8 reserved1;
495 u8 response; 533 u8 response;
496 u8 flags; 534 u8 flags;
497 u8 opcode; 535 u8 opcode;
498 __le32 hdr_second_dword; 536 __le32 hdr_second_dword;
499#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 537#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
500#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 538#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
501#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF 539#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
502#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 540#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
503 __le32 reserved2[2]; 541 __le32 reserved2[2];
504 __le32 itt; 542 __le32 itt;
505 __le32 reserved3; 543 __le32 reserved3;
@@ -512,21 +550,22 @@ struct iscsi_logout_response_hdr {
512 __le32 reserved5[1]; 550 __le32 reserved5[1];
513}; 551};
514 552
553/* iSCSI Text Request PDU header */
515struct iscsi_text_request_hdr { 554struct iscsi_text_request_hdr {
516 __le16 reserved0; 555 __le16 reserved0;
517 u8 flags_attr; 556 u8 flags_attr;
518#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F 557#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F
519#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 558#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0
520#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 559#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1
521#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 560#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6
522#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 561#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1
523#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 562#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7
524 u8 opcode; 563 u8 opcode;
525 __le32 hdr_second_dword; 564 __le32 hdr_second_dword;
526#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 565#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
527#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 566#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
528#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF 567#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
529#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 568#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
530 struct regpair lun; 569 struct regpair lun;
531 __le32 itt; 570 __le32 itt;
532 __le32 ttt; 571 __le32 ttt;
@@ -535,21 +574,22 @@ struct iscsi_text_request_hdr {
535 __le32 reserved4[4]; 574 __le32 reserved4[4];
536}; 575};
537 576
577/* iSCSI Text Response PDU header */
538struct iscsi_text_response_hdr { 578struct iscsi_text_response_hdr {
539 __le16 reserved1; 579 __le16 reserved1;
540 u8 flags; 580 u8 flags;
541#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F 581#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F
542#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 582#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0
543#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 583#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1
544#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 584#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6
545#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 585#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1
546#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 586#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7
547 u8 opcode; 587 u8 opcode;
548 __le32 hdr_second_dword; 588 __le32 hdr_second_dword;
549#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 589#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
550#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 590#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
551#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF 591#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
552#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 592#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
553 struct regpair lun; 593 struct regpair lun;
554 __le32 itt; 594 __le32 itt;
555 __le32 ttt; 595 __le32 ttt;
@@ -559,15 +599,16 @@ struct iscsi_text_response_hdr {
559 __le32 reserved4[3]; 599 __le32 reserved4[3];
560}; 600};
561 601
602/* iSCSI TMF Request PDU header */
562struct iscsi_tmf_request_hdr { 603struct iscsi_tmf_request_hdr {
563 __le16 reserved0; 604 __le16 reserved0;
564 u8 function; 605 u8 function;
565 u8 opcode; 606 u8 opcode;
566 __le32 hdr_second_dword; 607 __le32 hdr_second_dword;
567#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 608#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
568#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 609#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0
569#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF 610#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF
570#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 611#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
571 struct regpair lun; 612 struct regpair lun;
572 __le32 itt; 613 __le32 itt;
573 __le32 rtt; 614 __le32 rtt;
@@ -584,10 +625,10 @@ struct iscsi_tmf_response_hdr {
584 u8 hdr_flags; 625 u8 hdr_flags;
585 u8 opcode; 626 u8 opcode;
586 __le32 hdr_second_dword; 627 __le32 hdr_second_dword;
587#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 628#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
588#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 629#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
589#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF 630#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
590#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 631#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
591 struct regpair reserved0; 632 struct regpair reserved0;
592 __le32 itt; 633 __le32 itt;
593 __le32 reserved1; 634 __le32 reserved1;
@@ -597,16 +638,17 @@ struct iscsi_tmf_response_hdr {
597 __le32 reserved4[3]; 638 __le32 reserved4[3];
598}; 639};
599 640
641/* iSCSI Response PDU header */
600struct iscsi_response_hdr { 642struct iscsi_response_hdr {
601 u8 hdr_status; 643 u8 hdr_status;
602 u8 hdr_response; 644 u8 hdr_response;
603 u8 hdr_flags; 645 u8 hdr_flags;
604 u8 opcode; 646 u8 opcode;
605 __le32 hdr_second_dword; 647 __le32 hdr_second_dword;
606#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 648#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
607#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 649#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
608#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF 650#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
609#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 651#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
610 struct regpair lun; 652 struct regpair lun;
611 __le32 itt; 653 __le32 itt;
612 __le32 snack_tag; 654 __le32 snack_tag;
@@ -618,16 +660,17 @@ struct iscsi_response_hdr {
618 __le32 residual_count; 660 __le32 residual_count;
619}; 661};
620 662
663/* iSCSI Reject PDU header */
621struct iscsi_reject_hdr { 664struct iscsi_reject_hdr {
622 u8 reserved4; 665 u8 reserved4;
623 u8 hdr_reason; 666 u8 hdr_reason;
624 u8 hdr_flags; 667 u8 hdr_flags;
625 u8 opcode; 668 u8 opcode;
626 __le32 hdr_second_dword; 669 __le32 hdr_second_dword;
627#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF 670#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
628#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 671#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0
629#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF 672#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
630#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 673#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
631 struct regpair reserved0; 674 struct regpair reserved0;
632 __le32 all_ones; 675 __le32 all_ones;
633 __le32 reserved2; 676 __le32 reserved2;
@@ -638,6 +681,35 @@ struct iscsi_reject_hdr {
638 __le32 reserved3[2]; 681 __le32 reserved3[2];
639}; 682};
640 683
684/* iSCSI Asynchronous Message PDU header */
685struct iscsi_async_msg_hdr {
686 __le16 reserved0;
687 u8 flags_attr;
688#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F
689#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0
690#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1
691#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7
692 u8 opcode;
693 __le32 hdr_second_dword;
694#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF
695#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
696#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
697#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
698 struct regpair lun;
699 __le32 all_ones;
700 __le32 reserved1;
701 __le32 stat_sn;
702 __le32 exp_cmd_sn;
703 __le32 max_cmd_sn;
704 __le16 param1_rsrv;
705 u8 async_vcode;
706 u8 async_event;
707 __le16 param3_rsrv;
708 __le16 param2_rsrv;
709 __le32 reserved7;
710};
711
712/* PDU header part of Ystorm task context */
641union iscsi_task_hdr { 713union iscsi_task_hdr {
642 struct iscsi_common_hdr common; 714 struct iscsi_common_hdr common;
643 struct data_hdr data; 715 struct data_hdr data;
@@ -661,6 +733,348 @@ union iscsi_task_hdr {
661 struct iscsi_async_msg_hdr async_msg; 733 struct iscsi_async_msg_hdr async_msg;
662}; 734};
663 735
736/* The iscsi storm task context of Ystorm */
737struct ystorm_iscsi_task_st_ctx {
738 struct ystorm_iscsi_task_state state;
739 struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
740 union iscsi_task_hdr pdu_hdr;
741};
742
743struct e4_ystorm_iscsi_task_ag_ctx {
744 u8 reserved;
745 u8 byte1;
746 __le16 word0;
747 u8 flags0;
748#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
749#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
750#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
751#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
752#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
753#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
754#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
755#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
756#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
757#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
758 u8 flags1;
759#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
760#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
761#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
762#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
763#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
764#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
765#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
766#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
767#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
768#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
769 u8 flags2;
770#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
771#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
772#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
773#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
774#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
775#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
776#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
777#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
778#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
779#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
780#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
781#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
782#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
783#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
784#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
785#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
786 u8 byte2;
787 __le32 TTT;
788 u8 byte3;
789 u8 byte4;
790 __le16 word1;
791};
792
793struct e4_mstorm_iscsi_task_ag_ctx {
794 u8 cdu_validation;
795 u8 byte1;
796 __le16 task_cid;
797 u8 flags0;
798#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
799#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
800#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
801#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
802#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
803#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
804#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
805#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
806#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
807#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
808 u8 flags1;
809#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
810#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
811#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
812#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
813#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
814#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
815#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
816#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
817#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
818#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
819 u8 flags2;
820#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
821#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
822#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
823#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
824#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
825#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
826#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
827#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
828#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
829#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
830#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
831#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
832#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
833#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
834#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
835#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
836 u8 byte2;
837 __le32 reg0;
838 u8 byte3;
839 u8 byte4;
840 __le16 word1;
841};
842
843struct e4_ustorm_iscsi_task_ag_ctx {
844 u8 reserved;
845 u8 state;
846 __le16 icid;
847 u8 flags0;
848#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
849#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
850#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
851#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
852#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
853#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
854#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
855#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
856 u8 flags1;
857#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
858#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
859#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
860#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
861#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
862#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
863#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
864#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
865 u8 flags2;
866#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
867#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
868#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
869#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
870#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
871#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
872#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
873#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
874#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
875#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
876#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
877#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
878#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
879#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
880#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
881#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
882 u8 flags3;
883#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
884#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
885#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
886#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
887#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
888#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
889#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
890#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
891#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
892#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
893 __le32 dif_err_intervals;
894 __le32 dif_error_1st_interval;
895 __le32 rcv_cont_len;
896 __le32 exp_cont_len;
897 __le32 total_data_acked;
898 __le32 exp_data_acked;
899 u8 next_tid_valid;
900 u8 byte3;
901 __le16 word1;
902 __le16 next_tid;
903 __le16 word3;
904 __le32 hdr_residual_count;
905 __le32 exp_r2t_sn;
906};
907
908/* The iscsi storm task context of Mstorm */
909struct mstorm_iscsi_task_st_ctx {
910 struct scsi_cached_sges data_desc;
911 struct scsi_sgl_params sgl_params;
912 __le32 rem_task_size;
913 __le32 data_buffer_offset;
914 u8 task_type;
915 struct iscsi_dif_flags dif_flags;
916 __le16 dif_task_icid;
917 struct regpair sense_db;
918 __le32 expected_itt;
919 __le32 reserved1;
920};
921
922struct iscsi_reg1 {
923 __le32 reg1_map;
924#define ISCSI_REG1_NUM_SGES_MASK 0xF
925#define ISCSI_REG1_NUM_SGES_SHIFT 0
926#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
927#define ISCSI_REG1_RESERVED1_SHIFT 4
928};
929
930struct tqe_opaque {
931 __le16 opaque[2];
932};
933
934/* The iscsi storm task context of Ustorm */
935struct ustorm_iscsi_task_st_ctx {
936 __le32 rem_rcv_len;
937 __le32 exp_data_transfer_len;
938 __le32 exp_data_sn;
939 struct regpair lun;
940 struct iscsi_reg1 reg1;
941 u8 flags2;
942#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
943#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
944#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
945#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
946 struct iscsi_dif_flags dif_flags;
947 __le16 reserved3;
948 struct tqe_opaque tqe_opaque_list;
949 __le32 reserved5;
950 __le32 reserved6;
951 __le32 reserved7;
952 u8 task_type;
953 u8 error_flags;
954#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
955#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
956#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
957#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
958#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
959#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
960#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
961#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
962 u8 flags;
963#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
964#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
965#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
966#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
967#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
968#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
969#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
970#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
971#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
972#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
973#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
974#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
975#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
976#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
977 u8 cq_rss_number;
978};
979
980/* iscsi task context */
981struct e4_iscsi_task_context {
982 struct ystorm_iscsi_task_st_ctx ystorm_st_context;
983 struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
984 struct regpair ystorm_ag_padding[2];
985 struct tdif_task_context tdif_context;
986 struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
987 struct regpair mstorm_ag_padding[2];
988 struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
989 struct mstorm_iscsi_task_st_ctx mstorm_st_context;
990 struct ustorm_iscsi_task_st_ctx ustorm_st_context;
991 struct rdif_task_context rdif_context;
992};
993
994/* iSCSI connection offload params passed by driver to FW in ISCSI offload
995 * ramrod.
996 */
997struct iscsi_conn_offload_params {
998 struct regpair sq_pbl_addr;
999 struct regpair r2tq_pbl_addr;
1000 struct regpair xhq_pbl_addr;
1001 struct regpair uhq_pbl_addr;
1002 __le32 initial_ack;
1003 __le16 physical_q0;
1004 __le16 physical_q1;
1005 u8 flags;
1006#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
1007#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
1008#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
1009#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
1010#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
1011#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
1012#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
1013#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
1014 u8 pbl_page_size_log;
1015 u8 pbe_page_size_log;
1016 u8 default_cq;
1017 __le32 stat_sn;
1018};
1019
1020/* iSCSI connection statistics */
1021struct iscsi_conn_stats_params {
1022 struct regpair iscsi_tcp_tx_packets_cnt;
1023 struct regpair iscsi_tcp_tx_bytes_cnt;
1024 struct regpair iscsi_tcp_tx_rxmit_cnt;
1025 struct regpair iscsi_tcp_rx_packets_cnt;
1026 struct regpair iscsi_tcp_rx_bytes_cnt;
1027 struct regpair iscsi_tcp_rx_dup_ack_cnt;
1028 __le32 iscsi_tcp_rx_chksum_err_cnt;
1029 __le32 reserved;
1030};
1031
1032/* spe message header */
1033struct iscsi_slow_path_hdr {
1034 u8 op_code;
1035 u8 flags;
1036#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
1037#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
1038#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
1039#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
1040#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
1041#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
1042};
1043
1044/* iSCSI connection update params passed by driver to FW in ISCSI update
1045 *ramrod.
1046 */
1047struct iscsi_conn_update_ramrod_params {
1048 struct iscsi_slow_path_hdr hdr;
1049 __le16 conn_id;
1050 __le32 fw_cid;
1051 u8 flags;
1052#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
1053#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
1054#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
1055#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
1056#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1
1057#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
1058#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
1059#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
1060#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
1061#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
1062#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
1063#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
1064#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1
1065#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
1066#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
1067#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
1068 u8 reserved0[3];
1069 __le32 max_seq_size;
1070 __le32 max_send_pdu_length;
1071 __le32 max_recv_pdu_length;
1072 __le32 first_seq_length;
1073 __le32 exp_stat_sn;
1074 union dif_configuration_params dif_on_imme_params;
1075};
1076
1077/* iSCSI CQ element */
664struct iscsi_cqe_common { 1078struct iscsi_cqe_common {
665 __le16 conn_id; 1079 __le16 conn_id;
666 u8 cqe_type; 1080 u8 cqe_type;
@@ -669,6 +1083,7 @@ struct iscsi_cqe_common {
669 union iscsi_task_hdr iscsi_hdr; 1083 union iscsi_task_hdr iscsi_hdr;
670}; 1084};
671 1085
1086/* iSCSI CQ element */
672struct iscsi_cqe_solicited { 1087struct iscsi_cqe_solicited {
673 __le16 conn_id; 1088 __le16 conn_id;
674 u8 cqe_type; 1089 u8 cqe_type;
@@ -678,10 +1093,11 @@ struct iscsi_cqe_solicited {
678 u8 fw_dbg_field; 1093 u8 fw_dbg_field;
679 u8 caused_conn_err; 1094 u8 caused_conn_err;
680 u8 reserved0[3]; 1095 u8 reserved0[3];
681 __le32 reserved1[1]; 1096 __le32 data_truncated_bytes;
682 union iscsi_task_hdr iscsi_hdr; 1097 union iscsi_task_hdr iscsi_hdr;
683}; 1098};
684 1099
1100/* iSCSI CQ element */
685struct iscsi_cqe_unsolicited { 1101struct iscsi_cqe_unsolicited {
686 __le16 conn_id; 1102 __le16 conn_id;
687 u8 cqe_type; 1103 u8 cqe_type;
@@ -689,16 +1105,19 @@ struct iscsi_cqe_unsolicited {
689 __le16 reserved0; 1105 __le16 reserved0;
690 u8 reserved1; 1106 u8 reserved1;
691 u8 unsol_cqe_type; 1107 u8 unsol_cqe_type;
692 struct regpair rqe_opaque; 1108 __le16 rqe_opaque;
1109 __le16 reserved2[3];
693 union iscsi_task_hdr iscsi_hdr; 1110 union iscsi_task_hdr iscsi_hdr;
694}; 1111};
695 1112
1113/* iSCSI CQ element */
696union iscsi_cqe { 1114union iscsi_cqe {
697 struct iscsi_cqe_common cqe_common; 1115 struct iscsi_cqe_common cqe_common;
698 struct iscsi_cqe_solicited cqe_solicited; 1116 struct iscsi_cqe_solicited cqe_solicited;
699 struct iscsi_cqe_unsolicited cqe_unsolicited; 1117 struct iscsi_cqe_unsolicited cqe_unsolicited;
700}; 1118};
701 1119
1120/* iSCSI CQE type */
702enum iscsi_cqes_type { 1121enum iscsi_cqes_type {
703 ISCSI_CQE_TYPE_SOLICITED = 1, 1122 ISCSI_CQE_TYPE_SOLICITED = 1,
704 ISCSI_CQE_TYPE_UNSOLICITED, 1123 ISCSI_CQE_TYPE_UNSOLICITED,
@@ -708,6 +1127,7 @@ enum iscsi_cqes_type {
708 MAX_ISCSI_CQES_TYPE 1127 MAX_ISCSI_CQES_TYPE
709}; 1128};
710 1129
1130/* iSCSI CQE type */
711enum iscsi_cqe_unsolicited_type { 1131enum iscsi_cqe_unsolicited_type {
712 ISCSI_CQE_UNSOLICITED_NONE, 1132 ISCSI_CQE_UNSOLICITED_NONE,
713 ISCSI_CQE_UNSOLICITED_SINGLE, 1133 ISCSI_CQE_UNSOLICITED_SINGLE,
@@ -717,37 +1137,28 @@ enum iscsi_cqe_unsolicited_type {
717 MAX_ISCSI_CQE_UNSOLICITED_TYPE 1137 MAX_ISCSI_CQE_UNSOLICITED_TYPE
718}; 1138};
719 1139
720 1140/* iscsi debug modes */
721struct iscsi_debug_modes { 1141struct iscsi_debug_modes {
722 u8 flags; 1142 u8 flags;
723#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 1143#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
724#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 1144#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
725#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 1145#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
726#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 1146#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
727#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 1147#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
728#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 1148#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
729#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 1149#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
730#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 1150#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
731#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 1151#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
732#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 1152#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
733#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 1153#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
734#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 1154#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
735#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1 1155#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1
736#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6 1156#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6
737#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1 1157#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1
738#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7 1158#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7
739}; 1159};
740 1160
741struct iscsi_dif_flags { 1161/* iSCSI kernel completion queue IDs */
742 u8 flags;
743#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF
744#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
745#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1
746#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4
747#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7
748#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5
749};
750
751enum iscsi_eqe_opcode { 1162enum iscsi_eqe_opcode {
752 ISCSI_EVENT_TYPE_INIT_FUNC = 0, 1163 ISCSI_EVENT_TYPE_INIT_FUNC = 0,
753 ISCSI_EVENT_TYPE_DESTROY_FUNC, 1164 ISCSI_EVENT_TYPE_DESTROY_FUNC,
@@ -756,9 +1167,9 @@ enum iscsi_eqe_opcode {
756 ISCSI_EVENT_TYPE_CLEAR_SQ, 1167 ISCSI_EVENT_TYPE_CLEAR_SQ,
757 ISCSI_EVENT_TYPE_TERMINATE_CONN, 1168 ISCSI_EVENT_TYPE_TERMINATE_CONN,
758 ISCSI_EVENT_TYPE_MAC_UPDATE_CONN, 1169 ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
1170 ISCSI_EVENT_TYPE_COLLECT_STATS_CONN,
759 ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, 1171 ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
760 ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, 1172 ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
761 RESERVED9,
762 ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, 1173 ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
763 ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, 1174 ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
764 ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, 1175 ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
@@ -772,6 +1183,7 @@ enum iscsi_eqe_opcode {
772 MAX_ISCSI_EQE_OPCODE 1183 MAX_ISCSI_EQE_OPCODE
773}; 1184};
774 1185
1186/* iSCSI EQE and CQE completion status */
775enum iscsi_error_types { 1187enum iscsi_error_types {
776 ISCSI_STATUS_NONE = 0, 1188 ISCSI_STATUS_NONE = 0,
777 ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, 1189 ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
@@ -823,7 +1235,7 @@ enum iscsi_error_types {
823 MAX_ISCSI_ERROR_TYPES 1235 MAX_ISCSI_ERROR_TYPES
824}; 1236};
825 1237
826 1238/* iSCSI Ramrod Command IDs */
827enum iscsi_ramrod_cmd_id { 1239enum iscsi_ramrod_cmd_id {
828 ISCSI_RAMROD_CMD_ID_UNUSED = 0, 1240 ISCSI_RAMROD_CMD_ID_UNUSED = 0,
829 ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, 1241 ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
@@ -833,22 +1245,11 @@ enum iscsi_ramrod_cmd_id {
833 ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, 1245 ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
834 ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, 1246 ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
835 ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7, 1247 ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
1248 ISCSI_RAMROD_CMD_ID_CONN_STATS = 8,
836 MAX_ISCSI_RAMROD_CMD_ID 1249 MAX_ISCSI_RAMROD_CMD_ID
837}; 1250};
838 1251
839struct iscsi_reg1 { 1252/* iSCSI connection termination request */
840 __le32 reg1_map;
841#define ISCSI_REG1_NUM_SGES_MASK 0xF
842#define ISCSI_REG1_NUM_SGES_SHIFT 0
843#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
844#define ISCSI_REG1_RESERVED1_SHIFT 4
845};
846
847union iscsi_seq_num {
848 __le16 data_sn;
849 __le16 r2t_sn;
850};
851
852struct iscsi_spe_conn_mac_update { 1253struct iscsi_spe_conn_mac_update {
853 struct iscsi_slow_path_hdr hdr; 1254 struct iscsi_slow_path_hdr hdr;
854 __le16 conn_id; 1255 __le16 conn_id;
@@ -859,6 +1260,9 @@ struct iscsi_spe_conn_mac_update {
859 u8 reserved0[2]; 1260 u8 reserved0[2];
860}; 1261};
861 1262
1263/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
1264 * iSCSI offload ramrod.
1265 */
862struct iscsi_spe_conn_offload { 1266struct iscsi_spe_conn_offload {
863 struct iscsi_slow_path_hdr hdr; 1267 struct iscsi_slow_path_hdr hdr;
864 __le16 conn_id; 1268 __le16 conn_id;
@@ -867,6 +1271,9 @@ struct iscsi_spe_conn_offload {
867 struct tcp_offload_params tcp; 1271 struct tcp_offload_params tcp;
868}; 1272};
869 1273
1274/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in
1275 * iSCSI offload ramrod.
1276 */
870struct iscsi_spe_conn_offload_option2 { 1277struct iscsi_spe_conn_offload_option2 {
871 struct iscsi_slow_path_hdr hdr; 1278 struct iscsi_slow_path_hdr hdr;
872 __le16 conn_id; 1279 __le16 conn_id;
@@ -875,6 +1282,17 @@ struct iscsi_spe_conn_offload_option2 {
875 struct tcp_offload_params_opt2 tcp; 1282 struct tcp_offload_params_opt2 tcp;
876}; 1283};
877 1284
1285/* iSCSI collect connection statistics request */
1286struct iscsi_spe_conn_statistics {
1287 struct iscsi_slow_path_hdr hdr;
1288 __le16 conn_id;
1289 __le32 fw_cid;
1290 u8 reset_stats;
1291 u8 reserved0[7];
1292 struct regpair stats_cnts_addr;
1293};
1294
1295/* iSCSI connection termination request */
878struct iscsi_spe_conn_termination { 1296struct iscsi_spe_conn_termination {
879 struct iscsi_slow_path_hdr hdr; 1297 struct iscsi_slow_path_hdr hdr;
880 __le16 conn_id; 1298 __le16 conn_id;
@@ -885,12 +1303,14 @@ struct iscsi_spe_conn_termination {
885 struct regpair query_params_addr; 1303 struct regpair query_params_addr;
886}; 1304};
887 1305
1306/* iSCSI firmware function destroy parameters */
888struct iscsi_spe_func_dstry { 1307struct iscsi_spe_func_dstry {
889 struct iscsi_slow_path_hdr hdr; 1308 struct iscsi_slow_path_hdr hdr;
890 __le16 reserved0; 1309 __le16 reserved0;
891 __le32 reserved1; 1310 __le32 reserved1;
892}; 1311};
893 1312
1313/* iSCSI firmware function init parameters */
894struct iscsi_spe_func_init { 1314struct iscsi_spe_func_init {
895 struct iscsi_slow_path_hdr hdr; 1315 struct iscsi_slow_path_hdr hdr;
896 __le16 half_way_close_timeout; 1316 __le16 half_way_close_timeout;
@@ -898,283 +1318,19 @@ struct iscsi_spe_func_init {
898 u8 num_r2tq_pages_in_ring; 1318 u8 num_r2tq_pages_in_ring;
899 u8 num_uhq_pages_in_ring; 1319 u8 num_uhq_pages_in_ring;
900 u8 ll2_rx_queue_id; 1320 u8 ll2_rx_queue_id;
901 u8 ooo_enable; 1321 u8 flags;
1322#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
1323#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
1324#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
1325#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
902 struct iscsi_debug_modes debug_mode; 1326 struct iscsi_debug_modes debug_mode;
903 __le16 reserved1; 1327 __le16 reserved1;
904 __le32 reserved2; 1328 __le32 reserved2;
905 __le32 reserved3;
906 __le32 reserved4;
907 struct scsi_init_func_params func_params; 1329 struct scsi_init_func_params func_params;
908 struct scsi_init_func_queues q_params; 1330 struct scsi_init_func_queues q_params;
909}; 1331};
910 1332
911struct ystorm_iscsi_task_state { 1333/* iSCSI task type */
912 struct scsi_cached_sges data_desc;
913 struct scsi_sgl_params sgl_params;
914 __le32 exp_r2t_sn;
915 __le32 buffer_offset;
916 union iscsi_seq_num seq_num;
917 struct iscsi_dif_flags dif_flags;
918 u8 flags;
919#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
920#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
921#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
922#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
923#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
924#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
925};
926
927struct ystorm_iscsi_task_rxmit_opt {
928 __le32 fast_rxmit_sge_offset;
929 __le32 scan_start_buffer_offset;
930 __le32 fast_rxmit_buffer_offset;
931 u8 scan_start_sgl_index;
932 u8 fast_rxmit_sgl_index;
933 __le16 reserved;
934};
935
936struct ystorm_iscsi_task_st_ctx {
937 struct ystorm_iscsi_task_state state;
938 struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
939 union iscsi_task_hdr pdu_hdr;
940};
941
942struct ystorm_iscsi_task_ag_ctx {
943 u8 reserved;
944 u8 byte1;
945 __le16 word0;
946 u8 flags0;
947#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
948#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
949#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
950#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
951#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
952#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
953#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
954#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
955#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
956#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
957 u8 flags1;
958#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
959#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
960#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
961#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
962#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
963#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
964#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
965#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
966#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
967#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
968 u8 flags2;
969#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
970#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
971#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
972#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
973#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
974#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
975#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
976#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
977#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
978#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
979#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
980#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
981#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
982#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
983#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
984#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
985 u8 byte2;
986 __le32 TTT;
987 u8 byte3;
988 u8 byte4;
989 __le16 word1;
990};
991
992struct mstorm_iscsi_task_ag_ctx {
993 u8 cdu_validation;
994 u8 byte1;
995 __le16 task_cid;
996 u8 flags0;
997#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
998#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
999#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
1000#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
1001#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
1002#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
1003#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
1004#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
1005#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
1006#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
1007 u8 flags1;
1008#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
1009#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
1010#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
1011#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
1012#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
1013#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
1014#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
1015#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
1016#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
1017#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
1018 u8 flags2;
1019#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
1020#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
1021#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
1022#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
1023#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
1024#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
1025#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
1026#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
1027#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
1028#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
1029#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
1030#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
1031#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
1032#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
1033#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
1034#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
1035 u8 byte2;
1036 __le32 reg0;
1037 u8 byte3;
1038 u8 byte4;
1039 __le16 word1;
1040};
1041
1042struct ustorm_iscsi_task_ag_ctx {
1043 u8 reserved;
1044 u8 state;
1045 __le16 icid;
1046 u8 flags0;
1047#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
1048#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
1049#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
1050#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
1051#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
1052#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
1053#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
1054#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
1055 u8 flags1;
1056#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
1057#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
1058#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
1059#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
1060#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
1061#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
1062#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
1063#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
1064 u8 flags2;
1065#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
1066#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
1067#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
1068#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
1069#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
1070#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
1071#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
1072#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
1073#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
1074#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
1075#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
1076#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
1077#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
1078#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
1079#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
1080#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
1081 u8 flags3;
1082#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
1083#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
1084#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
1085#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
1086#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
1087#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
1088#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
1089#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
1090#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
1091#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
1092 __le32 dif_err_intervals;
1093 __le32 dif_error_1st_interval;
1094 __le32 rcv_cont_len;
1095 __le32 exp_cont_len;
1096 __le32 total_data_acked;
1097 __le32 exp_data_acked;
1098 u8 next_tid_valid;
1099 u8 byte3;
1100 __le16 word1;
1101 __le16 next_tid;
1102 __le16 word3;
1103 __le32 hdr_residual_count;
1104 __le32 exp_r2t_sn;
1105};
1106
1107struct mstorm_iscsi_task_st_ctx {
1108 struct scsi_cached_sges data_desc;
1109 struct scsi_sgl_params sgl_params;
1110 __le32 rem_task_size;
1111 __le32 data_buffer_offset;
1112 u8 task_type;
1113 struct iscsi_dif_flags dif_flags;
1114 u8 reserved0[2];
1115 struct regpair sense_db;
1116 __le32 expected_itt;
1117 __le32 reserved1;
1118};
1119
1120struct ustorm_iscsi_task_st_ctx {
1121 __le32 rem_rcv_len;
1122 __le32 exp_data_transfer_len;
1123 __le32 exp_data_sn;
1124 struct regpair lun;
1125 struct iscsi_reg1 reg1;
1126 u8 flags2;
1127#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1
1128#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
1129#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
1130#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
1131 struct iscsi_dif_flags dif_flags;
1132 __le16 reserved3;
1133 __le32 reserved4;
1134 __le32 reserved5;
1135 __le32 reserved6;
1136 __le32 reserved7;
1137 u8 task_type;
1138 u8 error_flags;
1139#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
1140#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
1141#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
1142#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
1143#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
1144#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
1145#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F
1146#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3
1147 u8 flags;
1148#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3
1149#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0
1150#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
1151#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
1152#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
1153#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
1154#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
1155#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
1156#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
1157#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
1158#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
1159#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
1160#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
1161#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7
1162 u8 cq_rss_number;
1163};
1164
1165struct iscsi_task_context {
1166 struct ystorm_iscsi_task_st_ctx ystorm_st_context;
1167 struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
1168 struct regpair ystorm_ag_padding[2];
1169 struct tdif_task_context tdif_context;
1170 struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
1171 struct regpair mstorm_ag_padding[2];
1172 struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
1173 struct mstorm_iscsi_task_st_ctx mstorm_st_context;
1174 struct ustorm_iscsi_task_st_ctx ustorm_st_context;
1175 struct rdif_task_context rdif_context;
1176};
1177
1178enum iscsi_task_type { 1334enum iscsi_task_type {
1179 ISCSI_TASK_TYPE_INITIATOR_WRITE, 1335 ISCSI_TASK_TYPE_INITIATOR_WRITE,
1180 ISCSI_TASK_TYPE_INITIATOR_READ, 1336 ISCSI_TASK_TYPE_INITIATOR_READ,
@@ -1186,53 +1342,57 @@ enum iscsi_task_type {
1186 ISCSI_TASK_TYPE_TARGET_READ, 1342 ISCSI_TASK_TYPE_TARGET_READ,
1187 ISCSI_TASK_TYPE_TARGET_RESPONSE, 1343 ISCSI_TASK_TYPE_TARGET_RESPONSE,
1188 ISCSI_TASK_TYPE_LOGIN_RESPONSE, 1344 ISCSI_TASK_TYPE_LOGIN_RESPONSE,
1345 ISCSI_TASK_TYPE_TARGET_IMM_W_DIF,
1189 MAX_ISCSI_TASK_TYPE 1346 MAX_ISCSI_TASK_TYPE
1190}; 1347};
1191 1348
1349/* iSCSI DesiredDataTransferLength/ttt union */
1192union iscsi_ttt_txlen_union { 1350union iscsi_ttt_txlen_union {
1193 __le32 desired_tx_len; 1351 __le32 desired_tx_len;
1194 __le32 ttt; 1352 __le32 ttt;
1195}; 1353};
1196 1354
1355/* iSCSI uHQ element */
1197struct iscsi_uhqe { 1356struct iscsi_uhqe {
1198 __le32 reg1; 1357 __le32 reg1;
1199#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF 1358#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF
1200#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 1359#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0
1201#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 1360#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1
1202#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 1361#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20
1203#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 1362#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1
1204#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 1363#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21
1205#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 1364#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1
1206#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 1365#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22
1207#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 1366#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1
1208#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 1367#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
1209#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF 1368#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF
1210#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 1369#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24
1211 __le32 reg2; 1370 __le32 reg2;
1212#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF 1371#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF
1213#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 1372#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
1214#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF 1373#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF
1215#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 1374#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
1216}; 1375};
1217 1376
1218 1377/* iSCSI WQ element */
1219struct iscsi_wqe { 1378struct iscsi_wqe {
1220 __le16 task_id; 1379 __le16 task_id;
1221 u8 flags; 1380 u8 flags;
1222#define ISCSI_WQE_WQE_TYPE_MASK 0x7 1381#define ISCSI_WQE_WQE_TYPE_MASK 0x7
1223#define ISCSI_WQE_WQE_TYPE_SHIFT 0 1382#define ISCSI_WQE_WQE_TYPE_SHIFT 0
1224#define ISCSI_WQE_NUM_SGES_MASK 0xF 1383#define ISCSI_WQE_NUM_SGES_MASK 0xF
1225#define ISCSI_WQE_NUM_SGES_SHIFT 3 1384#define ISCSI_WQE_NUM_SGES_SHIFT 3
1226#define ISCSI_WQE_RESPONSE_MASK 0x1 1385#define ISCSI_WQE_RESPONSE_MASK 0x1
1227#define ISCSI_WQE_RESPONSE_SHIFT 7 1386#define ISCSI_WQE_RESPONSE_SHIFT 7
1228 struct iscsi_dif_flags prot_flags; 1387 struct iscsi_dif_flags prot_flags;
1229 __le32 contlen_cdbsize; 1388 __le32 contlen_cdbsize;
1230#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF 1389#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
1231#define ISCSI_WQE_CONT_LEN_SHIFT 0 1390#define ISCSI_WQE_CONT_LEN_SHIFT 0
1232#define ISCSI_WQE_CDB_SIZE_MASK 0xFF 1391#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
1233#define ISCSI_WQE_CDB_SIZE_SHIFT 24 1392#define ISCSI_WQE_CDB_SIZE_SHIFT 24
1234}; 1393};
1235 1394
1395/* iSCSI wqe type */
1236enum iscsi_wqe_type { 1396enum iscsi_wqe_type {
1237 ISCSI_WQE_TYPE_NORMAL, 1397 ISCSI_WQE_TYPE_NORMAL,
1238 ISCSI_WQE_TYPE_TASK_CLEANUP, 1398 ISCSI_WQE_TYPE_TASK_CLEANUP,
@@ -1244,6 +1404,7 @@ enum iscsi_wqe_type {
1244 MAX_ISCSI_WQE_TYPE 1404 MAX_ISCSI_WQE_TYPE
1245}; 1405};
1246 1406
1407/* iSCSI xHQ element */
1247struct iscsi_xhqe { 1408struct iscsi_xhqe {
1248 union iscsi_ttt_txlen_union ttt_or_txlen; 1409 union iscsi_ttt_txlen_union ttt_or_txlen;
1249 __le32 exp_stat_sn; 1410 __le32 exp_stat_sn;
@@ -1251,120 +1412,134 @@ struct iscsi_xhqe {
1251 u8 total_ahs_length; 1412 u8 total_ahs_length;
1252 u8 opcode; 1413 u8 opcode;
1253 u8 flags; 1414 u8 flags;
1254#define ISCSI_XHQE_FINAL_MASK 0x1 1415#define ISCSI_XHQE_FINAL_MASK 0x1
1255#define ISCSI_XHQE_FINAL_SHIFT 0 1416#define ISCSI_XHQE_FINAL_SHIFT 0
1256#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 1417#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
1257#define ISCSI_XHQE_STATUS_BIT_SHIFT 1 1418#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
1258#define ISCSI_XHQE_NUM_SGES_MASK 0xF 1419#define ISCSI_XHQE_NUM_SGES_MASK 0xF
1259#define ISCSI_XHQE_NUM_SGES_SHIFT 2 1420#define ISCSI_XHQE_NUM_SGES_SHIFT 2
1260#define ISCSI_XHQE_RESERVED0_MASK 0x3 1421#define ISCSI_XHQE_RESERVED0_MASK 0x3
1261#define ISCSI_XHQE_RESERVED0_SHIFT 6 1422#define ISCSI_XHQE_RESERVED0_SHIFT 6
1262 union iscsi_seq_num seq_num; 1423 union iscsi_seq_num seq_num;
1263 __le16 reserved1; 1424 __le16 reserved1;
1264}; 1425};
1265 1426
1427/* Per PF iSCSI receive path statistics - mStorm RAM structure */
1266struct mstorm_iscsi_stats_drv { 1428struct mstorm_iscsi_stats_drv {
1267 struct regpair iscsi_rx_dropped_pdus_task_not_valid; 1429 struct regpair iscsi_rx_dropped_pdus_task_not_valid;
1430 struct regpair iscsi_rx_dup_ack_cnt;
1268}; 1431};
1269 1432
1433/* Per PF iSCSI transmit path statistics - pStorm RAM structure */
1270struct pstorm_iscsi_stats_drv { 1434struct pstorm_iscsi_stats_drv {
1271 struct regpair iscsi_tx_bytes_cnt; 1435 struct regpair iscsi_tx_bytes_cnt;
1272 struct regpair iscsi_tx_packet_cnt; 1436 struct regpair iscsi_tx_packet_cnt;
1273}; 1437};
1274 1438
1439/* Per PF iSCSI receive path statistics - tStorm RAM structure */
1275struct tstorm_iscsi_stats_drv { 1440struct tstorm_iscsi_stats_drv {
1276 struct regpair iscsi_rx_bytes_cnt; 1441 struct regpair iscsi_rx_bytes_cnt;
1277 struct regpair iscsi_rx_packet_cnt; 1442 struct regpair iscsi_rx_packet_cnt;
1278 struct regpair iscsi_rx_new_ooo_isle_events_cnt; 1443 struct regpair iscsi_rx_new_ooo_isle_events_cnt;
1444 struct regpair iscsi_rx_tcp_payload_bytes_cnt;
1445 struct regpair iscsi_rx_tcp_pkt_cnt;
1446 struct regpair iscsi_rx_pure_ack_cnt;
1279 __le32 iscsi_cmdq_threshold_cnt; 1447 __le32 iscsi_cmdq_threshold_cnt;
1280 __le32 iscsi_rq_threshold_cnt; 1448 __le32 iscsi_rq_threshold_cnt;
1281 __le32 iscsi_immq_threshold_cnt; 1449 __le32 iscsi_immq_threshold_cnt;
1282}; 1450};
1283 1451
1452/* Per PF iSCSI receive path statistics - uStorm RAM structure */
1284struct ustorm_iscsi_stats_drv { 1453struct ustorm_iscsi_stats_drv {
1285 struct regpair iscsi_rx_data_pdu_cnt; 1454 struct regpair iscsi_rx_data_pdu_cnt;
1286 struct regpair iscsi_rx_r2t_pdu_cnt; 1455 struct regpair iscsi_rx_r2t_pdu_cnt;
1287 struct regpair iscsi_rx_total_pdu_cnt; 1456 struct regpair iscsi_rx_total_pdu_cnt;
1288}; 1457};
1289 1458
1459/* Per PF iSCSI transmit path statistics - xStorm RAM structure */
1290struct xstorm_iscsi_stats_drv { 1460struct xstorm_iscsi_stats_drv {
1291 struct regpair iscsi_tx_go_to_slow_start_event_cnt; 1461 struct regpair iscsi_tx_go_to_slow_start_event_cnt;
1292 struct regpair iscsi_tx_fast_retransmit_event_cnt; 1462 struct regpair iscsi_tx_fast_retransmit_event_cnt;
1463 struct regpair iscsi_tx_pure_ack_cnt;
1464 struct regpair iscsi_tx_delayed_ack_cnt;
1293}; 1465};
1294 1466
1467/* Per PF iSCSI transmit path statistics - yStorm RAM structure */
1295struct ystorm_iscsi_stats_drv { 1468struct ystorm_iscsi_stats_drv {
1296 struct regpair iscsi_tx_data_pdu_cnt; 1469 struct regpair iscsi_tx_data_pdu_cnt;
1297 struct regpair iscsi_tx_r2t_pdu_cnt; 1470 struct regpair iscsi_tx_r2t_pdu_cnt;
1298 struct regpair iscsi_tx_total_pdu_cnt; 1471 struct regpair iscsi_tx_total_pdu_cnt;
1472 struct regpair iscsi_tx_tcp_payload_bytes_cnt;
1473 struct regpair iscsi_tx_tcp_pkt_cnt;
1299}; 1474};
1300 1475
1301struct tstorm_iscsi_task_ag_ctx { 1476struct e4_tstorm_iscsi_task_ag_ctx {
1302 u8 byte0; 1477 u8 byte0;
1303 u8 byte1; 1478 u8 byte1;
1304 __le16 word0; 1479 __le16 word0;
1305 u8 flags0; 1480 u8 flags0;
1306#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF 1481#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
1307#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 1482#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
1308#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 1483#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
1309#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 1484#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
1310#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 1485#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
1311#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 1486#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
1312#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 1487#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
1313#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 1488#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
1314#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 1489#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
1315#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 1490#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
1316 u8 flags1; 1491 u8 flags1;
1317#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 1492#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
1318#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 1493#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
1319#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 1494#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
1320#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 1495#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
1321#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 1496#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
1322#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 1497#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
1323#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 1498#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
1324#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 1499#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
1325#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 1500#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
1326#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 1501#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
1327 u8 flags2; 1502 u8 flags2;
1328#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 1503#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
1329#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 1504#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
1330#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 1505#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
1331#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 1506#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
1332#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 1507#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
1333#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 1508#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
1334#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 1509#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
1335#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 1510#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
1336 u8 flags3; 1511 u8 flags3;
1337#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 1512#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
1338#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 1513#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
1339#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 1514#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
1340#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 1515#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
1341#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 1516#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
1342#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 1517#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
1343#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 1518#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
1344#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 1519#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
1345#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 1520#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
1346#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 1521#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
1347#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 1522#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
1348#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 1523#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
1349#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 1524#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
1350#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 1525#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
1351 u8 flags4; 1526 u8 flags4;
1352#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 1527#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
1353#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 1528#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
1354#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 1529#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
1355#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 1530#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
1356#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 1531#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
1357#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 1532#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
1358#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 1533#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
1359#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 1534#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
1360#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 1535#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
1361#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 1536#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
1362#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 1537#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
1363#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 1538#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
1364#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 1539#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
1365#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 1540#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
1366#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 1541#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
1367#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 1542#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
1368 u8 byte2; 1543 u8 byte2;
1369 __le16 word1; 1544 __le16 word1;
1370 __le32 reg0; 1545 __le32 reg0;
@@ -1376,18 +1551,20 @@ struct tstorm_iscsi_task_ag_ctx {
1376 __le32 reg1; 1551 __le32 reg1;
1377 __le32 reg2; 1552 __le32 reg2;
1378}; 1553};
1554
1555/* iSCSI doorbell data */
1379struct iscsi_db_data { 1556struct iscsi_db_data {
1380 u8 params; 1557 u8 params;
1381#define ISCSI_DB_DATA_DEST_MASK 0x3 1558#define ISCSI_DB_DATA_DEST_MASK 0x3
1382#define ISCSI_DB_DATA_DEST_SHIFT 0 1559#define ISCSI_DB_DATA_DEST_SHIFT 0
1383#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 1560#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3
1384#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 1561#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2
1385#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 1562#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1
1386#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 1563#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4
1387#define ISCSI_DB_DATA_RESERVED_MASK 0x1 1564#define ISCSI_DB_DATA_RESERVED_MASK 0x1
1388#define ISCSI_DB_DATA_RESERVED_SHIFT 5 1565#define ISCSI_DB_DATA_RESERVED_SHIFT 5
1389#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 1566#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
1390#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 1567#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
1391 u8 agg_flags; 1568 u8 agg_flags;
1392 __le16 sq_prod; 1569 __le16 sq_prod;
1393}; 1570};
diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h
index b8b3e1cfae90..c6cfd39cd910 100644
--- a/include/linux/qed/iwarp_common.h
+++ b/include/linux/qed/iwarp_common.h
@@ -29,9 +29,12 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32
32#ifndef __IWARP_COMMON__ 33#ifndef __IWARP_COMMON__
33#define __IWARP_COMMON__ 34#define __IWARP_COMMON__
35
34#include <linux/qed/rdma_common.h> 36#include <linux/qed/rdma_common.h>
37
35/************************/ 38/************************/
36/* IWARP FW CONSTANTS */ 39/* IWARP FW CONSTANTS */
37/************************/ 40/************************/
@@ -40,14 +43,14 @@
40#define IWARP_PASSIVE_MODE 1 43#define IWARP_PASSIVE_MODE 1
41 44
42#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) 45#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000)
43#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) 46#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
44#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) 47#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
45#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) 48#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
46#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) 49#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
47 50
48#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) 51#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
49#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) 52#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
50 53
51#define IWARP_MAX_QPS (64 * 1024) 54#define IWARP_MAX_QPS (64 * 1024)
52 55
53#endif /* __IWARP_COMMON__ */ 56#endif /* __IWARP_COMMON__ */
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index d60de4a39810..147d08ccf813 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -61,6 +61,35 @@ struct qed_txq_start_ret_params {
61 void *p_handle; 61 void *p_handle;
62}; 62};
63 63
64enum qed_filter_config_mode {
65 QED_FILTER_CONFIG_MODE_DISABLE,
66 QED_FILTER_CONFIG_MODE_5_TUPLE,
67 QED_FILTER_CONFIG_MODE_L4_PORT,
68 QED_FILTER_CONFIG_MODE_IP_DEST,
69};
70
71struct qed_ntuple_filter_params {
72 /* Physically mapped address containing header of buffer to be used
73 * as filter.
74 */
75 dma_addr_t addr;
76
77 /* Length of header in bytes */
78 u16 length;
79
80 /* Relative queue-id to receive classified packet */
81#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
82 u16 qid;
83
84 /* Identifier can either be according to vport-id or vfid */
85 bool b_is_vf;
86 u8 vport_id;
87 u8 vf_id;
88
89 /* true iff this filter is to be added. Else to be removed */
90 bool b_is_add;
91};
92
64struct qed_dev_eth_info { 93struct qed_dev_eth_info {
65 struct qed_dev_info common; 94 struct qed_dev_info common;
66 95
@@ -316,13 +345,12 @@ struct qed_eth_ops {
316 int (*tunn_config)(struct qed_dev *cdev, 345 int (*tunn_config)(struct qed_dev *cdev,
317 struct qed_tunn_params *params); 346 struct qed_tunn_params *params);
318 347
319 int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie, 348 int (*ntuple_filter_config)(struct qed_dev *cdev,
320 dma_addr_t mapping, u16 length, 349 void *cookie,
321 u16 vport_id, u16 rx_queue_id, 350 struct qed_ntuple_filter_params *params);
322 bool add_filter);
323 351
324 int (*configure_arfs_searcher)(struct qed_dev *cdev, 352 int (*configure_arfs_searcher)(struct qed_dev *cdev,
325 bool en_searcher); 353 enum qed_filter_config_mode mode);
326 int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); 354 int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
327}; 355};
328 356
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index cc646ca97974..15e398c7230e 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -244,16 +244,11 @@ struct qed_fcoe_pf_params {
244/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ 244/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
245struct qed_iscsi_pf_params { 245struct qed_iscsi_pf_params {
246 u64 glbl_q_params_addr; 246 u64 glbl_q_params_addr;
247 u64 bdq_pbl_base_addr[2]; 247 u64 bdq_pbl_base_addr[3];
248 u32 max_cwnd;
249 u16 cq_num_entries; 248 u16 cq_num_entries;
250 u16 cmdq_num_entries; 249 u16 cmdq_num_entries;
251 u32 two_msl_timer; 250 u32 two_msl_timer;
252 u16 dup_ack_threshold;
253 u16 tx_sws_timer; 251 u16 tx_sws_timer;
254 u16 min_rto;
255 u16 min_rto_rt;
256 u16 max_rto;
257 252
258 /* The following parameters are used during HW-init 253 /* The following parameters are used during HW-init
259 * and these parameters need to be passed as arguments 254 * and these parameters need to be passed as arguments
@@ -264,8 +259,8 @@ struct qed_iscsi_pf_params {
264 259
265 /* The following parameters are used during protocol-init */ 260 /* The following parameters are used during protocol-init */
266 u16 half_way_close_timeout; 261 u16 half_way_close_timeout;
267 u16 bdq_xoff_threshold[2]; 262 u16 bdq_xoff_threshold[3];
268 u16 bdq_xon_threshold[2]; 263 u16 bdq_xon_threshold[3];
269 u16 cmdq_xoff_threshold; 264 u16 cmdq_xoff_threshold;
270 u16 cmdq_xon_threshold; 265 u16 cmdq_xon_threshold;
271 u16 rq_buffer_size; 266 u16 rq_buffer_size;
@@ -281,10 +276,11 @@ struct qed_iscsi_pf_params {
281 u8 gl_cmd_pi; 276 u8 gl_cmd_pi;
282 u8 debug_mode; 277 u8 debug_mode;
283 u8 ll2_ooo_queue_id; 278 u8 ll2_ooo_queue_id;
284 u8 ooo_enable;
285 279
286 u8 is_target; 280 u8 is_target;
287 u8 bdq_pbl_num_entries[2]; 281 u8 is_soc_en;
282 u8 soc_num_of_blocks_log;
283 u8 bdq_pbl_num_entries[3];
288}; 284};
289 285
290struct qed_rdma_pf_params { 286struct qed_rdma_pf_params {
@@ -316,16 +312,16 @@ enum qed_int_mode {
316}; 312};
317 313
318struct qed_sb_info { 314struct qed_sb_info {
319 struct status_block *sb_virt; 315 struct status_block_e4 *sb_virt;
320 dma_addr_t sb_phys; 316 dma_addr_t sb_phys;
321 u32 sb_ack; /* Last given ack */ 317 u32 sb_ack; /* Last given ack */
322 u16 igu_sb_id; 318 u16 igu_sb_id;
323 void __iomem *igu_addr; 319 void __iomem *igu_addr;
324 u8 flags; 320 u8 flags;
325#define QED_SB_INFO_INIT 0x1 321#define QED_SB_INFO_INIT 0x1
326#define QED_SB_INFO_SETUP 0x2 322#define QED_SB_INFO_SETUP 0x2
327 323
328 struct qed_dev *cdev; 324 struct qed_dev *cdev;
329}; 325};
330 326
331enum qed_dev_type { 327enum qed_dev_type {
@@ -939,7 +935,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
939 u16 rc = 0; 935 u16 rc = 0;
940 936
941 prod = le32_to_cpu(sb_info->sb_virt->prod_index) & 937 prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
942 STATUS_BLOCK_PROD_INDEX_MASK; 938 STATUS_BLOCK_E4_PROD_INDEX_MASK;
943 if (sb_info->sb_ack != prod) { 939 if (sb_info->sb_ack != prod) {
944 sb_info->sb_ack = prod; 940 sb_info->sb_ack = prod;
945 rc |= QED_SB_IDX; 941 rc |= QED_SB_IDX;
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 111e606a74c8..d0df1bec5357 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -102,7 +102,6 @@ struct qed_iscsi_params_offload {
102 u32 ss_thresh; 102 u32 ss_thresh;
103 u16 srtt; 103 u16 srtt;
104 u16 rtt_var; 104 u16 rtt_var;
105 u32 ts_time;
106 u32 ts_recent; 105 u32 ts_recent;
107 u32 ts_recent_age; 106 u32 ts_recent_age;
108 u32 total_rt; 107 u32 total_rt;
@@ -124,7 +123,6 @@ struct qed_iscsi_params_offload {
124 u16 mss; 123 u16 mss;
125 u8 snd_wnd_scale; 124 u8 snd_wnd_scale;
126 u8 rcv_wnd_scale; 125 u8 rcv_wnd_scale;
127 u32 ts_ticks_per_second;
128 u16 da_timeout_value; 126 u16 da_timeout_value;
129 u8 ack_frequency; 127 u8 ack_frequency;
130}; 128};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index e755954d85fd..266c1fb45387 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data {
116 u32 opaque_data_1; 116 u32 opaque_data_1;
117 117
118 /* GSI only */ 118 /* GSI only */
119 u32 gid_dst[4]; 119 u32 src_qp;
120 u16 qp_id; 120 u16 qp_id;
121 121
122 union { 122 union {
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index a9b3050f469c..c1a446ebe362 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -32,28 +32,29 @@
32 32
33#ifndef __RDMA_COMMON__ 33#ifndef __RDMA_COMMON__
34#define __RDMA_COMMON__ 34#define __RDMA_COMMON__
35
35/************************/ 36/************************/
36/* RDMA FW CONSTANTS */ 37/* RDMA FW CONSTANTS */
37/************************/ 38/************************/
38 39
39#define RDMA_RESERVED_LKEY (0) 40#define RDMA_RESERVED_LKEY (0)
40#define RDMA_RING_PAGE_SIZE (0x1000) 41#define RDMA_RING_PAGE_SIZE (0x1000)
41 42
42#define RDMA_MAX_SGE_PER_SQ_WQE (4) 43#define RDMA_MAX_SGE_PER_SQ_WQE (4)
43#define RDMA_MAX_SGE_PER_RQ_WQE (4) 44#define RDMA_MAX_SGE_PER_RQ_WQE (4)
44 45
45#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) 46#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000)
46 47
47#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) 48#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
48#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) 49#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
49 50
50#define RDMA_MAX_CQS (64 * 1024) 51#define RDMA_MAX_CQS (64 * 1024)
51#define RDMA_MAX_TIDS (128 * 1024 - 1) 52#define RDMA_MAX_TIDS (128 * 1024 - 1)
52#define RDMA_MAX_PDS (64 * 1024) 53#define RDMA_MAX_PDS (64 * 1024)
53 54
54#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS 55#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
55#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 56#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
56#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB 57#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
57 58
58#define RDMA_TASK_TYPE (PROTOCOLID_ROCE) 59#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
59 60
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index fe6a33e45977..e15e0da71240 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -33,13 +33,18 @@
33#ifndef __ROCE_COMMON__ 33#ifndef __ROCE_COMMON__
34#define __ROCE_COMMON__ 34#define __ROCE_COMMON__
35 35
36#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) 36/************************/
37#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) 37/* ROCE FW CONSTANTS */
38/************************/
38 39
39#define ROCE_MAX_QPS (32 * 1024) 40#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
40#define ROCE_DCQCN_NP_MAX_QPS (64) 41#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
41#define ROCE_DCQCN_RP_MAX_QPS (64)
42 42
43#define ROCE_MAX_QPS (32 * 1024)
44#define ROCE_DCQCN_NP_MAX_QPS (64)
45#define ROCE_DCQCN_RP_MAX_QPS (64)
46
47/* Affiliated asynchronous events / errors enumeration */
43enum roce_async_events_type { 48enum roce_async_events_type {
44 ROCE_ASYNC_EVENT_NONE = 0, 49 ROCE_ASYNC_EVENT_NONE = 0,
45 ROCE_ASYNC_EVENT_COMM_EST = 1, 50 ROCE_ASYNC_EVENT_COMM_EST = 1,
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 08df82a096b6..505c0b48a761 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -33,43 +33,77 @@
33#ifndef __STORAGE_COMMON__ 33#ifndef __STORAGE_COMMON__
34#define __STORAGE_COMMON__ 34#define __STORAGE_COMMON__
35 35
36#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) 36/*********************/
37#define BDQ_NUM_RESOURCES (4) 37/* SCSI CONSTANTS */
38 38/*********************/
39#define BDQ_ID_RQ (0) 39
40#define BDQ_ID_IMM_DATA (1) 40#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2)
41#define BDQ_NUM_IDS (2) 41#define BDQ_NUM_RESOURCES (4)
42 42
43#define SCSI_NUM_SGES_SLOW_SGL_THR 8 43#define BDQ_ID_RQ (0)
44#define BDQ_ID_IMM_DATA (1)
45#define BDQ_ID_TQ (2)
46#define BDQ_NUM_IDS (3)
47
48#define SCSI_NUM_SGES_SLOW_SGL_THR 8
49
50#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15)
51
52/* SCSI op codes */
53#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89)
54#define SCSI_OPCODE_READ_10 (0x28)
55#define SCSI_OPCODE_WRITE_6 (0x0A)
56#define SCSI_OPCODE_WRITE_10 (0x2A)
57#define SCSI_OPCODE_WRITE_12 (0xAA)
58#define SCSI_OPCODE_WRITE_16 (0x8A)
59#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E)
60#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE)
61#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E)
62
63/* iSCSI Drv opaque */
64struct iscsi_drv_opaque {
65 __le16 reserved_zero[3];
66 __le16 opaque;
67};
44 68
45#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) 69/* Scsi 2B/8B opaque union */
70union scsi_opaque {
71 struct regpair fcoe_opaque;
72 struct iscsi_drv_opaque iscsi_opaque;
73};
46 74
75/* SCSI buffer descriptor */
47struct scsi_bd { 76struct scsi_bd {
48 struct regpair address; 77 struct regpair address;
49 struct regpair opaque; 78 union scsi_opaque opaque;
50}; 79};
51 80
81/* Scsi Drv BDQ struct */
52struct scsi_bdq_ram_drv_data { 82struct scsi_bdq_ram_drv_data {
53 __le16 external_producer; 83 __le16 external_producer;
54 __le16 reserved0[3]; 84 __le16 reserved0[3];
55}; 85};
56 86
87/* SCSI SGE entry */
57struct scsi_sge { 88struct scsi_sge {
58 struct regpair sge_addr; 89 struct regpair sge_addr;
59 __le32 sge_len; 90 __le32 sge_len;
60 __le32 reserved; 91 __le32 reserved;
61}; 92};
62 93
94/* Cached SGEs section */
63struct scsi_cached_sges { 95struct scsi_cached_sges {
64 struct scsi_sge sge[4]; 96 struct scsi_sge sge[4];
65}; 97};
66 98
99/* Scsi Drv CMDQ struct */
67struct scsi_drv_cmdq { 100struct scsi_drv_cmdq {
68 __le16 cmdq_cons; 101 __le16 cmdq_cons;
69 __le16 reserved0; 102 __le16 reserved0;
70 __le32 reserved1; 103 __le32 reserved1;
71}; 104};
72 105
106/* Common SCSI init params passed by driver to FW in function init ramrod */
73struct scsi_init_func_params { 107struct scsi_init_func_params {
74 __le16 num_tasks; 108 __le16 num_tasks;
75 u8 log_page_size; 109 u8 log_page_size;
@@ -77,6 +111,7 @@ struct scsi_init_func_params {
77 u8 reserved2[12]; 111 u8 reserved2[12];
78}; 112};
79 113
114/* SCSI RQ/CQ/CMDQ firmware function init parameters */
80struct scsi_init_func_queues { 115struct scsi_init_func_queues {
81 struct regpair glbl_q_params_addr; 116 struct regpair glbl_q_params_addr;
82 __le16 rq_buffer_size; 117 __le16 rq_buffer_size;
@@ -84,39 +119,45 @@ struct scsi_init_func_queues {
84 __le16 cmdq_num_entries; 119 __le16 cmdq_num_entries;
85 u8 bdq_resource_id; 120 u8 bdq_resource_id;
86 u8 q_validity; 121 u8 q_validity;
87#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 122#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
88#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 123#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
89#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 124#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
90#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 125#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
91#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 126#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
92#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 127#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
93#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F 128#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1
94#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 129#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3
130#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1
131#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4
132#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7
133#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5
134 __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
95 u8 num_queues; 135 u8 num_queues;
96 u8 queue_relative_offset; 136 u8 queue_relative_offset;
97 u8 cq_sb_pi; 137 u8 cq_sb_pi;
98 u8 cmdq_sb_pi; 138 u8 cmdq_sb_pi;
99 __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
100 __le16 reserved0;
101 u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; 139 u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
140 u8 reserved1;
102 struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; 141 struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
103 __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; 142 __le16 bdq_xoff_threshold[BDQ_NUM_IDS];
104 __le16 bdq_xon_threshold[BDQ_NUM_IDS];
105 __le16 cmdq_xoff_threshold; 143 __le16 cmdq_xoff_threshold;
144 __le16 bdq_xon_threshold[BDQ_NUM_IDS];
106 __le16 cmdq_xon_threshold; 145 __le16 cmdq_xon_threshold;
107 __le32 reserved1;
108}; 146};
109 147
148/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
110struct scsi_ram_per_bdq_resource_drv_data { 149struct scsi_ram_per_bdq_resource_drv_data {
111 struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; 150 struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
112}; 151};
113 152
153/* SCSI SGL types */
114enum scsi_sgl_mode { 154enum scsi_sgl_mode {
115 SCSI_TX_SLOW_SGL, 155 SCSI_TX_SLOW_SGL,
116 SCSI_FAST_SGL, 156 SCSI_FAST_SGL,
117 MAX_SCSI_SGL_MODE 157 MAX_SCSI_SGL_MODE
118}; 158};
119 159
160/* SCSI SGL parameters */
120struct scsi_sgl_params { 161struct scsi_sgl_params {
121 struct regpair sgl_addr; 162 struct regpair sgl_addr;
122 __le32 sgl_total_length; 163 __le32 sgl_total_length;
@@ -126,10 +167,16 @@ struct scsi_sgl_params {
126 u8 reserved; 167 u8 reserved;
127}; 168};
128 169
170/* SCSI terminate connection params */
129struct scsi_terminate_extra_params { 171struct scsi_terminate_extra_params {
130 __le16 unsolicited_cq_count; 172 __le16 unsolicited_cq_count;
131 __le16 cmdq_count; 173 __le16 cmdq_count;
132 u8 reserved[4]; 174 u8 reserved[4];
133}; 175};
134 176
177/* SCSI Task Queue Element */
178struct scsi_tqe {
179 __le16 itid;
180};
181
135#endif /* __STORAGE_COMMON__ */ 182#endif /* __STORAGE_COMMON__ */
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index dbf7a43c3e1f..4a4845193539 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -33,8 +33,13 @@
33#ifndef __TCP_COMMON__ 33#ifndef __TCP_COMMON__
34#define __TCP_COMMON__ 34#define __TCP_COMMON__
35 35
36#define TCP_INVALID_TIMEOUT_VAL -1 36/********************/
37/* TCP FW CONSTANTS */
38/********************/
37 39
40#define TCP_INVALID_TIMEOUT_VAL -1
41
42/* OOO opaque data received from LL2 */
38struct ooo_opaque { 43struct ooo_opaque {
39 __le32 cid; 44 __le32 cid;
40 u8 drop_isle; 45 u8 drop_isle;
@@ -43,25 +48,29 @@ struct ooo_opaque {
43 u8 ooo_isle; 48 u8 ooo_isle;
44}; 49};
45 50
51/* tcp connect mode enum */
46enum tcp_connect_mode { 52enum tcp_connect_mode {
47 TCP_CONNECT_ACTIVE, 53 TCP_CONNECT_ACTIVE,
48 TCP_CONNECT_PASSIVE, 54 TCP_CONNECT_PASSIVE,
49 MAX_TCP_CONNECT_MODE 55 MAX_TCP_CONNECT_MODE
50}; 56};
51 57
58/* tcp function init parameters */
52struct tcp_init_params { 59struct tcp_init_params {
53 __le32 two_msl_timer; 60 __le32 two_msl_timer;
54 __le16 tx_sws_timer; 61 __le16 tx_sws_timer;
55 u8 maxfinrt; 62 u8 max_fin_rt;
56 u8 reserved[9]; 63 u8 reserved[9];
57}; 64};
58 65
66/* tcp IPv4/IPv6 enum */
59enum tcp_ip_version { 67enum tcp_ip_version {
60 TCP_IPV4, 68 TCP_IPV4,
61 TCP_IPV6, 69 TCP_IPV6,
62 MAX_TCP_IP_VERSION 70 MAX_TCP_IP_VERSION
63}; 71};
64 72
73/* tcp offload parameters */
65struct tcp_offload_params { 74struct tcp_offload_params {
66 __le16 local_mac_addr_lo; 75 __le16 local_mac_addr_lo;
67 __le16 local_mac_addr_mid; 76 __le16 local_mac_addr_mid;
@@ -70,24 +79,29 @@ struct tcp_offload_params {
70 __le16 remote_mac_addr_mid; 79 __le16 remote_mac_addr_mid;
71 __le16 remote_mac_addr_hi; 80 __le16 remote_mac_addr_hi;
72 __le16 vlan_id; 81 __le16 vlan_id;
73 u8 flags; 82 __le16 flags;
74#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 83#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
75#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 84#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
76#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 85#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
77#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 86#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
78#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 87#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
79#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 88#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
80#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 89#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1
81#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 90#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3
82#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 91#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1
83#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 92#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4
84#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 93#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
85#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 94#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5
86#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 95#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
87#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 96#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6
88#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 97#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
89#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 98#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7
99#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
100#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8
101#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F
102#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9
90 u8 ip_version; 103 u8 ip_version;
104 u8 reserved0[3];
91 __le32 remote_ip[4]; 105 __le32 remote_ip[4];
92 __le32 local_ip[4]; 106 __le32 local_ip[4];
93 __le32 flow_label; 107 __le32 flow_label;
@@ -99,17 +113,21 @@ struct tcp_offload_params {
99 u8 rcv_wnd_scale; 113 u8 rcv_wnd_scale;
100 u8 connect_mode; 114 u8 connect_mode;
101 __le16 srtt; 115 __le16 srtt;
102 __le32 cwnd;
103 __le32 ss_thresh; 116 __le32 ss_thresh;
104 __le16 reserved1; 117 __le32 rcv_wnd;
118 __le32 cwnd;
105 u8 ka_max_probe_cnt; 119 u8 ka_max_probe_cnt;
106 u8 dup_ack_theshold; 120 u8 dup_ack_theshold;
121 __le16 reserved1;
122 __le32 ka_timeout;
123 __le32 ka_interval;
124 __le32 max_rt_time;
125 __le32 initial_rcv_wnd;
107 __le32 rcv_next; 126 __le32 rcv_next;
108 __le32 snd_una; 127 __le32 snd_una;
109 __le32 snd_next; 128 __le32 snd_next;
110 __le32 snd_max; 129 __le32 snd_max;
111 __le32 snd_wnd; 130 __le32 snd_wnd;
112 __le32 rcv_wnd;
113 __le32 snd_wl1; 131 __le32 snd_wl1;
114 __le32 ts_recent; 132 __le32 ts_recent;
115 __le32 ts_recent_age; 133 __le32 ts_recent_age;
@@ -122,16 +140,13 @@ struct tcp_offload_params {
122 u8 rt_cnt; 140 u8 rt_cnt;
123 __le16 rtt_var; 141 __le16 rtt_var;
124 __le16 fw_internal; 142 __le16 fw_internal;
125 __le32 ka_timeout;
126 __le32 ka_interval;
127 __le32 max_rt_time;
128 __le32 initial_rcv_wnd;
129 u8 snd_wnd_scale; 143 u8 snd_wnd_scale;
130 u8 ack_frequency; 144 u8 ack_frequency;
131 __le16 da_timeout_value; 145 __le16 da_timeout_value;
132 __le32 reserved3[2]; 146 __le32 reserved3;
133}; 147};
134 148
149/* tcp offload parameters */
135struct tcp_offload_params_opt2 { 150struct tcp_offload_params_opt2 {
136 __le16 local_mac_addr_lo; 151 __le16 local_mac_addr_lo;
137 __le16 local_mac_addr_mid; 152 __le16 local_mac_addr_mid;
@@ -140,16 +155,19 @@ struct tcp_offload_params_opt2 {
140 __le16 remote_mac_addr_mid; 155 __le16 remote_mac_addr_mid;
141 __le16 remote_mac_addr_hi; 156 __le16 remote_mac_addr_hi;
142 __le16 vlan_id; 157 __le16 vlan_id;
143 u8 flags; 158 __le16 flags;
144#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 159#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
145#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 160#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
146#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 161#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
147#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 162#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
148#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 163#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
149#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 164#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
150#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F 165#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1
151#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 166#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3
167#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF
168#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4
152 u8 ip_version; 169 u8 ip_version;
170 u8 reserved1[3];
153 __le32 remote_ip[4]; 171 __le32 remote_ip[4];
154 __le32 local_ip[4]; 172 __le32 local_ip[4];
155 __le32 flow_label; 173 __le32 flow_label;
@@ -163,9 +181,16 @@ struct tcp_offload_params_opt2 {
163 __le16 syn_ip_payload_length; 181 __le16 syn_ip_payload_length;
164 __le32 syn_phy_addr_lo; 182 __le32 syn_phy_addr_lo;
165 __le32 syn_phy_addr_hi; 183 __le32 syn_phy_addr_hi;
166 __le32 reserved1[22]; 184 __le32 cwnd;
185 u8 ka_max_probe_cnt;
186 u8 reserved2[3];
187 __le32 ka_timeout;
188 __le32 ka_interval;
189 __le32 max_rt_time;
190 __le32 reserved3[16];
167}; 191};
168 192
193/* tcp IPv4/IPv6 enum */
169enum tcp_seg_placement_event { 194enum tcp_seg_placement_event {
170 TCP_EVENT_ADD_PEN, 195 TCP_EVENT_ADD_PEN,
171 TCP_EVENT_ADD_NEW_ISLE, 196 TCP_EVENT_ADD_NEW_ISLE,
@@ -177,40 +202,41 @@ enum tcp_seg_placement_event {
177 MAX_TCP_SEG_PLACEMENT_EVENT 202 MAX_TCP_SEG_PLACEMENT_EVENT
178}; 203};
179 204
205/* tcp init parameters */
180struct tcp_update_params { 206struct tcp_update_params {
181 __le16 flags; 207 __le16 flags;
182#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 208#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
183#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 209#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
184#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 210#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
185#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 211#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
186#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 212#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
187#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 213#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
188#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 214#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
189#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 215#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
190#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 216#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
191#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 217#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
192#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 218#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
193#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 219#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
194#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 220#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
195#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 221#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
196#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 222#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
197#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 223#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
198#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 224#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
199#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 225#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
200#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 226#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
201#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 227#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
202#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 228#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
203#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 229#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
204#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 230#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
205#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 231#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
206#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 232#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
207#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 233#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
208#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 234#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
209#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 235#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
210#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 236#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
211#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 237#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
212#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 238#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
213#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 239#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
214 __le16 remote_mac_addr_lo; 240 __le16 remote_mac_addr_lo;
215 __le16 remote_mac_addr_mid; 241 __le16 remote_mac_addr_mid;
216 __le16 remote_mac_addr_hi; 242 __le16 remote_mac_addr_hi;
@@ -226,6 +252,7 @@ struct tcp_update_params {
226 u8 reserved1[7]; 252 u8 reserved1[7];
227}; 253};
228 254
255/* toe upload parameters */
229struct tcp_upload_params { 256struct tcp_upload_params {
230 __le32 rcv_next; 257 __le32 rcv_next;
231 __le32 snd_una; 258 __le32 snd_una;
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 23a9c89c7ad9..fc55ff31eca7 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -356,24 +356,9 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
356int radix_tree_join(struct radix_tree_root *, unsigned long index, 356int radix_tree_join(struct radix_tree_root *, unsigned long index,
357 unsigned new_order, void *); 357 unsigned new_order, void *);
358 358
359void __rcu **idr_get_free_cmn(struct radix_tree_root *root, 359void __rcu **idr_get_free(struct radix_tree_root *root,
360 struct radix_tree_iter *iter, gfp_t gfp, 360 struct radix_tree_iter *iter, gfp_t gfp,
361 unsigned long max); 361 unsigned long max);
362static inline void __rcu **idr_get_free(struct radix_tree_root *root,
363 struct radix_tree_iter *iter,
364 gfp_t gfp,
365 int end)
366{
367 return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX);
368}
369
370static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root,
371 struct radix_tree_iter *iter,
372 gfp_t gfp,
373 unsigned long end)
374{
375 return idr_get_free_cmn(root, iter, gfp, end - 1);
376}
377 362
378enum { 363enum {
379 RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ 364 RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a6ddc42f87a5..043d04784675 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -197,7 +197,7 @@ static inline void exit_tasks_rcu_finish(void) { }
197#define cond_resched_rcu_qs() \ 197#define cond_resched_rcu_qs() \
198do { \ 198do { \
199 if (!cond_resched()) \ 199 if (!cond_resched()) \
200 rcu_note_voluntary_context_switch(current); \ 200 rcu_note_voluntary_context_switch_lite(current); \
201} while (0) 201} while (0)
202 202
203/* 203/*
@@ -433,12 +433,12 @@ static inline void rcu_preempt_sleep_check(void) { }
433 * @p: The pointer to read 433 * @p: The pointer to read
434 * 434 *
435 * Return the value of the specified RCU-protected pointer, but omit the 435 * Return the value of the specified RCU-protected pointer, but omit the
436 * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful 436 * lockdep checks for being in an RCU read-side critical section. This is
437 * when the value of this pointer is accessed, but the pointer is not 437 * useful when the value of this pointer is accessed, but the pointer is
438 * dereferenced, for example, when testing an RCU-protected pointer against 438 * not dereferenced, for example, when testing an RCU-protected pointer
439 * NULL. Although rcu_access_pointer() may also be used in cases where 439 * against NULL. Although rcu_access_pointer() may also be used in cases
440 * update-side locks prevent the value of the pointer from changing, you 440 * where update-side locks prevent the value of the pointer from changing,
441 * should instead use rcu_dereference_protected() for this use case. 441 * you should instead use rcu_dereference_protected() for this use case.
442 * 442 *
443 * It is also permissible to use rcu_access_pointer() when read-side 443 * It is also permissible to use rcu_access_pointer() when read-side
444 * access to the pointer was removed at least one grace period ago, as 444 * access to the pointer was removed at least one grace period ago, as
@@ -521,12 +521,11 @@ static inline void rcu_preempt_sleep_check(void) { }
521 * @c: The conditions under which the dereference will take place 521 * @c: The conditions under which the dereference will take place
522 * 522 *
523 * Return the value of the specified RCU-protected pointer, but omit 523 * Return the value of the specified RCU-protected pointer, but omit
524 * both the smp_read_barrier_depends() and the READ_ONCE(). This 524 * the READ_ONCE(). This is useful in cases where update-side locks
525 * is useful in cases where update-side locks prevent the value of the 525 * prevent the value of the pointer from changing. Please note that this
526 * pointer from changing. Please note that this primitive does *not* 526 * primitive does *not* prevent the compiler from repeating this reference
527 * prevent the compiler from repeating this reference or combining it 527 * or combining it with other references, so it should not be used without
528 * with other references, so it should not be used without protection 528 * protection of appropriate locks.
529 * of appropriate locks.
530 * 529 *
531 * This function is only for update-side use. Using this function 530 * This function is only for update-side use. Using this function
532 * when protected only by rcu_read_lock() will result in infrequent 531 * when protected only by rcu_read_lock() will result in infrequent
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index b3dbf9502fd0..ce9beec35e34 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -111,7 +111,6 @@ static inline void rcu_cpu_stall_reset(void) { }
111static inline void rcu_idle_enter(void) { } 111static inline void rcu_idle_enter(void) { }
112static inline void rcu_idle_exit(void) { } 112static inline void rcu_idle_exit(void) { }
113static inline void rcu_irq_enter(void) { } 113static inline void rcu_irq_enter(void) { }
114static inline bool rcu_irq_enter_disabled(void) { return false; }
115static inline void rcu_irq_exit_irqson(void) { } 114static inline void rcu_irq_exit_irqson(void) { }
116static inline void rcu_irq_enter_irqson(void) { } 115static inline void rcu_irq_enter_irqson(void) { }
117static inline void rcu_irq_exit(void) { } 116static inline void rcu_irq_exit(void) { }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 37d6fd3b7ff8..fd996cdf1833 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -85,7 +85,6 @@ void rcu_irq_enter(void);
85void rcu_irq_exit(void); 85void rcu_irq_exit(void);
86void rcu_irq_enter_irqson(void); 86void rcu_irq_enter_irqson(void);
87void rcu_irq_exit_irqson(void); 87void rcu_irq_exit_irqson(void);
88bool rcu_irq_enter_disabled(void);
89 88
90void exit_rcu(void); 89void exit_rcu(void);
91 90
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index e8286585e149..4193c41e383a 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -8,7 +8,7 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9 9
10/** 10/**
11 * refcount_t - variant of atomic_t specialized for reference counts 11 * struct refcount_t - variant of atomic_t specialized for reference counts
12 * @refs: atomic_t counter field 12 * @refs: atomic_t counter field
13 * 13 *
14 * The counter saturates at UINT_MAX and will not move once 14 * The counter saturates at UINT_MAX and will not move once
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 15eddc1353ba..6a3aeba40e9e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -24,12 +24,14 @@ struct module;
24struct device; 24struct device;
25struct i2c_client; 25struct i2c_client;
26struct irq_domain; 26struct irq_domain;
27struct slim_device;
27struct spi_device; 28struct spi_device;
28struct spmi_device; 29struct spmi_device;
29struct regmap; 30struct regmap;
30struct regmap_range_cfg; 31struct regmap_range_cfg;
31struct regmap_field; 32struct regmap_field;
32struct snd_ac97; 33struct snd_ac97;
34struct sdw_slave;
33 35
34/* An enum of all the supported cache types */ 36/* An enum of all the supported cache types */
35enum regcache_type { 37enum regcache_type {
@@ -264,6 +266,9 @@ typedef void (*regmap_unlock)(void *);
264 * field is NULL but precious_table (see below) is not, the 266 * field is NULL but precious_table (see below) is not, the
265 * check is performed on such table (a register is precious if 267 * check is performed on such table (a register is precious if
266 * it belongs to one of the ranges specified by precious_table). 268 * it belongs to one of the ranges specified by precious_table).
269 * @disable_locking: This regmap is either protected by external means or
270 * is guaranteed not be be accessed from multiple threads.
271 * Don't use any locking mechanisms.
267 * @lock: Optional lock callback (overrides regmap's default lock 272 * @lock: Optional lock callback (overrides regmap's default lock
268 * function, based on spinlock or mutex). 273 * function, based on spinlock or mutex).
269 * @unlock: As above for unlocking. 274 * @unlock: As above for unlocking.
@@ -296,7 +301,10 @@ typedef void (*regmap_unlock)(void *);
296 * a read. 301 * a read.
297 * @write_flag_mask: Mask to be set in the top bytes of the register when doing 302 * @write_flag_mask: Mask to be set in the top bytes of the register when doing
298 * a write. If both read_flag_mask and write_flag_mask are 303 * a write. If both read_flag_mask and write_flag_mask are
299 * empty the regmap_bus default masks are used. 304 * empty and zero_flag_mask is not set the regmap_bus default
305 * masks are used.
306 * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
307 * if they are both empty.
300 * @use_single_rw: If set, converts the bulk read and write operations into 308 * @use_single_rw: If set, converts the bulk read and write operations into
301 * a series of single read and write operations. This is useful 309 * a series of single read and write operations. This is useful
302 * for device that does not support bulk read and write. 310 * for device that does not support bulk read and write.
@@ -317,6 +325,7 @@ typedef void (*regmap_unlock)(void *);
317 * 325 *
318 * @ranges: Array of configuration entries for virtual address ranges. 326 * @ranges: Array of configuration entries for virtual address ranges.
319 * @num_ranges: Number of range configuration entries. 327 * @num_ranges: Number of range configuration entries.
328 * @use_hwlock: Indicate if a hardware spinlock should be used.
320 * @hwlock_id: Specify the hardware spinlock id. 329 * @hwlock_id: Specify the hardware spinlock id.
321 * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, 330 * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
322 * HWLOCK_IRQ or 0. 331 * HWLOCK_IRQ or 0.
@@ -333,6 +342,8 @@ struct regmap_config {
333 bool (*readable_reg)(struct device *dev, unsigned int reg); 342 bool (*readable_reg)(struct device *dev, unsigned int reg);
334 bool (*volatile_reg)(struct device *dev, unsigned int reg); 343 bool (*volatile_reg)(struct device *dev, unsigned int reg);
335 bool (*precious_reg)(struct device *dev, unsigned int reg); 344 bool (*precious_reg)(struct device *dev, unsigned int reg);
345
346 bool disable_locking;
336 regmap_lock lock; 347 regmap_lock lock;
337 regmap_unlock unlock; 348 regmap_unlock unlock;
338 void *lock_arg; 349 void *lock_arg;
@@ -355,6 +366,7 @@ struct regmap_config {
355 366
356 unsigned long read_flag_mask; 367 unsigned long read_flag_mask;
357 unsigned long write_flag_mask; 368 unsigned long write_flag_mask;
369 bool zero_flag_mask;
358 370
359 bool use_single_rw; 371 bool use_single_rw;
360 bool can_multi_write; 372 bool can_multi_write;
@@ -365,6 +377,7 @@ struct regmap_config {
365 const struct regmap_range_cfg *ranges; 377 const struct regmap_range_cfg *ranges;
366 unsigned int num_ranges; 378 unsigned int num_ranges;
367 379
380 bool use_hwlock;
368 unsigned int hwlock_id; 381 unsigned int hwlock_id;
369 unsigned int hwlock_mode; 382 unsigned int hwlock_mode;
370}; 383};
@@ -499,6 +512,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
499 const struct regmap_config *config, 512 const struct regmap_config *config,
500 struct lock_class_key *lock_key, 513 struct lock_class_key *lock_key,
501 const char *lock_name); 514 const char *lock_name);
515struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
516 const struct regmap_config *config,
517 struct lock_class_key *lock_key,
518 const char *lock_name);
502struct regmap *__regmap_init_spi(struct spi_device *dev, 519struct regmap *__regmap_init_spi(struct spi_device *dev,
503 const struct regmap_config *config, 520 const struct regmap_config *config,
504 struct lock_class_key *lock_key, 521 struct lock_class_key *lock_key,
@@ -524,6 +541,10 @@ struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
524 const struct regmap_config *config, 541 const struct regmap_config *config,
525 struct lock_class_key *lock_key, 542 struct lock_class_key *lock_key,
526 const char *lock_name); 543 const char *lock_name);
544struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
545 const struct regmap_config *config,
546 struct lock_class_key *lock_key,
547 const char *lock_name);
527 548
528struct regmap *__devm_regmap_init(struct device *dev, 549struct regmap *__devm_regmap_init(struct device *dev,
529 const struct regmap_bus *bus, 550 const struct regmap_bus *bus,
@@ -561,6 +582,10 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
561 const struct regmap_config *config, 582 const struct regmap_config *config,
562 struct lock_class_key *lock_key, 583 struct lock_class_key *lock_key,
563 const char *lock_name); 584 const char *lock_name);
585struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
586 const struct regmap_config *config,
587 struct lock_class_key *lock_key,
588 const char *lock_name);
564 589
565/* 590/*
566 * Wrapper for regmap_init macros to include a unique lockdep key and name 591 * Wrapper for regmap_init macros to include a unique lockdep key and name
@@ -616,6 +641,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
616 i2c, config) 641 i2c, config)
617 642
618/** 643/**
644 * regmap_init_slimbus() - Initialise register map
645 *
646 * @slimbus: Device that will be interacted with
647 * @config: Configuration for register map
648 *
649 * The return value will be an ERR_PTR() on error or a valid pointer to
650 * a struct regmap.
651 */
652#define regmap_init_slimbus(slimbus, config) \
653 __regmap_lockdep_wrapper(__regmap_init_slimbus, #config, \
654 slimbus, config)
655
656/**
619 * regmap_init_spi() - Initialise register map 657 * regmap_init_spi() - Initialise register map
620 * 658 *
621 * @dev: Device that will be interacted with 659 * @dev: Device that will be interacted with
@@ -710,6 +748,20 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
710bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); 748bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
711 749
712/** 750/**
751 * regmap_init_sdw() - Initialise register map
752 *
753 * @sdw: Device that will be interacted with
754 * @config: Configuration for register map
755 *
756 * The return value will be an ERR_PTR() on error or a valid pointer to
757 * a struct regmap.
758 */
759#define regmap_init_sdw(sdw, config) \
760 __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
761 sdw, config)
762
763
764/**
713 * devm_regmap_init() - Initialise managed register map 765 * devm_regmap_init() - Initialise managed register map
714 * 766 *
715 * @dev: Device that will be interacted with 767 * @dev: Device that will be interacted with
@@ -839,6 +891,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
839 __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \ 891 __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
840 ac97, config) 892 ac97, config)
841 893
894/**
895 * devm_regmap_init_sdw() - Initialise managed register map
896 *
897 * @sdw: Device that will be interacted with
898 * @config: Configuration for register map
899 *
900 * The return value will be an ERR_PTR() on error or a valid pointer
901 * to a struct regmap. The regmap will be automatically freed by the
902 * device management code.
903 */
904#define devm_regmap_init_sdw(sdw, config) \
905 __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \
906 sdw, config)
907
842void regmap_exit(struct regmap *map); 908void regmap_exit(struct regmap *map);
843int regmap_reinit_cache(struct regmap *map, 909int regmap_reinit_cache(struct regmap *map,
844 const struct regmap_config *config); 910 const struct regmap_config *config);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 94417b4226bd..4c00486b7a78 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -214,6 +214,8 @@ struct regulator_ops {
214 /* set regulator suspend operating mode (defined in consumer.h) */ 214 /* set regulator suspend operating mode (defined in consumer.h) */
215 int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); 215 int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
216 216
217 int (*resume_early)(struct regulator_dev *rdev);
218
217 int (*set_pull_down) (struct regulator_dev *); 219 int (*set_pull_down) (struct regulator_dev *);
218}; 220};
219 221
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 9cd4fef37203..93a04893c739 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -42,6 +42,16 @@ struct regulator;
42#define REGULATOR_CHANGE_DRMS 0x10 42#define REGULATOR_CHANGE_DRMS 0x10
43#define REGULATOR_CHANGE_BYPASS 0x20 43#define REGULATOR_CHANGE_BYPASS 0x20
44 44
45/*
46 * operations in suspend mode
47 * DO_NOTHING_IN_SUSPEND - the default value
48 * DISABLE_IN_SUSPEND - turn off regulator in suspend states
49 * ENABLE_IN_SUSPEND - keep regulator on in suspend states
50 */
51#define DO_NOTHING_IN_SUSPEND (-1)
52#define DISABLE_IN_SUSPEND 0
53#define ENABLE_IN_SUSPEND 1
54
45/* Regulator active discharge flags */ 55/* Regulator active discharge flags */
46enum regulator_active_discharge { 56enum regulator_active_discharge {
47 REGULATOR_ACTIVE_DISCHARGE_DEFAULT, 57 REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -56,16 +66,24 @@ enum regulator_active_discharge {
56 * state. One of enabled or disabled must be set for the 66 * state. One of enabled or disabled must be set for the
57 * configuration to be applied. 67 * configuration to be applied.
58 * 68 *
59 * @uV: Operating voltage during suspend. 69 * @uV: Default operating voltage during suspend, it can be adjusted
70 * among <min_uV, max_uV>.
71 * @min_uV: Minimum suspend voltage may be set.
72 * @max_uV: Maximum suspend voltage may be set.
60 * @mode: Operating mode during suspend. 73 * @mode: Operating mode during suspend.
61 * @enabled: Enabled during suspend. 74 * @enabled: operations during suspend.
62 * @disabled: Disabled during suspend. 75 * - DO_NOTHING_IN_SUSPEND
76 * - DISABLE_IN_SUSPEND
77 * - ENABLE_IN_SUSPEND
78 * @changeable: Is this state can be switched between enabled/disabled,
63 */ 79 */
64struct regulator_state { 80struct regulator_state {
65 int uV; /* suspend voltage */ 81 int uV;
66 unsigned int mode; /* suspend regulator operating mode */ 82 int min_uV;
67 int enabled; /* is regulator enabled in this suspend state */ 83 int max_uV;
68 int disabled; /* is the regulator disabled in this suspend state */ 84 unsigned int mode;
85 int enabled;
86 bool changeable;
69}; 87};
70 88
71/** 89/**
@@ -225,12 +243,12 @@ struct regulator_init_data {
225 243
226#ifdef CONFIG_REGULATOR 244#ifdef CONFIG_REGULATOR
227void regulator_has_full_constraints(void); 245void regulator_has_full_constraints(void);
228int regulator_suspend_prepare(suspend_state_t state);
229int regulator_suspend_finish(void);
230#else 246#else
231static inline void regulator_has_full_constraints(void) 247static inline void regulator_has_full_constraints(void)
232{ 248{
233} 249}
250#endif
251
234static inline int regulator_suspend_prepare(suspend_state_t state) 252static inline int regulator_suspend_prepare(suspend_state_t state)
235{ 253{
236 return 0; 254 return 0;
@@ -239,6 +257,5 @@ static inline int regulator_suspend_finish(void)
239{ 257{
240 return 0; 258 return 0;
241} 259}
242#endif
243 260
244#endif 261#endif
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 44e630eb3d94..728d421fffe9 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -324,6 +324,7 @@ struct rproc_mem_entry {
324}; 324};
325 325
326struct rproc; 326struct rproc;
327struct firmware;
327 328
328/** 329/**
329 * struct rproc_ops - platform-specific device handlers 330 * struct rproc_ops - platform-specific device handlers
@@ -331,12 +332,24 @@ struct rproc;
331 * @stop: power off the device 332 * @stop: power off the device
332 * @kick: kick a virtqueue (virtqueue id given as a parameter) 333 * @kick: kick a virtqueue (virtqueue id given as a parameter)
333 * @da_to_va: optional platform hook to perform address translations 334 * @da_to_va: optional platform hook to perform address translations
335 * @load_rsc_table: load resource table from firmware image
336 * @find_loaded_rsc_table: find the loaded resouce table
337 * @load: load firmeware to memory, where the remote processor
338 * expects to find it
339 * @sanity_check: sanity check the fw image
340 * @get_boot_addr: get boot address to entry point specified in firmware
334 */ 341 */
335struct rproc_ops { 342struct rproc_ops {
336 int (*start)(struct rproc *rproc); 343 int (*start)(struct rproc *rproc);
337 int (*stop)(struct rproc *rproc); 344 int (*stop)(struct rproc *rproc);
338 void (*kick)(struct rproc *rproc, int vqid); 345 void (*kick)(struct rproc *rproc, int vqid);
339 void * (*da_to_va)(struct rproc *rproc, u64 da, int len); 346 void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
347 int (*load_rsc_table)(struct rproc *rproc, const struct firmware *fw);
348 struct resource_table *(*find_loaded_rsc_table)(
349 struct rproc *rproc, const struct firmware *fw);
350 int (*load)(struct rproc *rproc, const struct firmware *fw);
351 int (*sanity_check)(struct rproc *rproc, const struct firmware *fw);
352 u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
340}; 353};
341 354
342/** 355/**
@@ -390,7 +403,6 @@ enum rproc_crash_type {
390 * @priv: private data which belongs to the platform-specific rproc module 403 * @priv: private data which belongs to the platform-specific rproc module
391 * @ops: platform-specific start/stop rproc handlers 404 * @ops: platform-specific start/stop rproc handlers
392 * @dev: virtual device for refcounting and common remoteproc behavior 405 * @dev: virtual device for refcounting and common remoteproc behavior
393 * @fw_ops: firmware-specific handlers
394 * @power: refcount of users who need this rproc powered up 406 * @power: refcount of users who need this rproc powered up
395 * @state: state of the device 407 * @state: state of the device
396 * @lock: lock which protects concurrent manipulations of the rproc 408 * @lock: lock which protects concurrent manipulations of the rproc
@@ -406,11 +418,11 @@ enum rproc_crash_type {
406 * @index: index of this rproc device 418 * @index: index of this rproc device
407 * @crash_handler: workqueue for handling a crash 419 * @crash_handler: workqueue for handling a crash
408 * @crash_cnt: crash counter 420 * @crash_cnt: crash counter
409 * @crash_comp: completion used to sync crash handler and the rproc reload
410 * @recovery_disabled: flag that state if recovery was disabled 421 * @recovery_disabled: flag that state if recovery was disabled
411 * @max_notifyid: largest allocated notify id. 422 * @max_notifyid: largest allocated notify id.
412 * @table_ptr: pointer to the resource table in effect 423 * @table_ptr: pointer to the resource table in effect
413 * @cached_table: copy of the resource table 424 * @cached_table: copy of the resource table
425 * @table_sz: size of @cached_table
414 * @has_iommu: flag to indicate if remote processor is behind an MMU 426 * @has_iommu: flag to indicate if remote processor is behind an MMU
415 */ 427 */
416struct rproc { 428struct rproc {
@@ -419,9 +431,8 @@ struct rproc {
419 const char *name; 431 const char *name;
420 char *firmware; 432 char *firmware;
421 void *priv; 433 void *priv;
422 const struct rproc_ops *ops; 434 struct rproc_ops *ops;
423 struct device dev; 435 struct device dev;
424 const struct rproc_fw_ops *fw_ops;
425 atomic_t power; 436 atomic_t power;
426 unsigned int state; 437 unsigned int state;
427 struct mutex lock; 438 struct mutex lock;
@@ -437,11 +448,11 @@ struct rproc {
437 int index; 448 int index;
438 struct work_struct crash_handler; 449 struct work_struct crash_handler;
439 unsigned int crash_cnt; 450 unsigned int crash_cnt;
440 struct completion crash_comp;
441 bool recovery_disabled; 451 bool recovery_disabled;
442 int max_notifyid; 452 int max_notifyid;
443 struct resource_table *table_ptr; 453 struct resource_table *table_ptr;
444 struct resource_table *cached_table; 454 struct resource_table *cached_table;
455 size_t table_sz;
445 bool has_iommu; 456 bool has_iommu;
446 bool auto_boot; 457 bool auto_boot;
447}; 458};
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 21fc84d82d41..02166e815afb 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -167,6 +167,29 @@ reservation_object_lock(struct reservation_object *obj,
167} 167}
168 168
169/** 169/**
170 * reservation_object_lock_interruptible - lock the reservation object
171 * @obj: the reservation object
172 * @ctx: the locking context
173 *
174 * Locks the reservation object interruptible for exclusive access and
175 * modification. Note, that the lock is only against other writers, readers
176 * will run concurrently with a writer under RCU. The seqlock is used to
177 * notify readers if they overlap with a writer.
178 *
179 * As the reservation object may be locked by multiple parties in an
180 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
181 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
182 * object may be locked by itself by passing NULL as @ctx.
183 */
184static inline int
185reservation_object_lock_interruptible(struct reservation_object *obj,
186 struct ww_acquire_ctx *ctx)
187{
188 return ww_mutex_lock_interruptible(&obj->lock, ctx);
189}
190
191
192/**
170 * reservation_object_trylock - trylock the reservation object 193 * reservation_object_trylock - trylock the reservation object
171 * @obj: the reservation object 194 * @obj: the reservation object
172 * 195 *
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 4c7871ddf3c6..09732c36f351 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -2,8 +2,10 @@
2#ifndef _LINUX_RESET_H_ 2#ifndef _LINUX_RESET_H_
3#define _LINUX_RESET_H_ 3#define _LINUX_RESET_H_
4 4
5#include <linux/device.h> 5#include <linux/types.h>
6 6
7struct device;
8struct device_node;
7struct reset_control; 9struct reset_control;
8 10
9#ifdef CONFIG_RESET_CONTROLLER 11#ifdef CONFIG_RESET_CONTROLLER
@@ -20,22 +22,16 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id,
20 int index, bool shared, 22 int index, bool shared,
21 bool optional); 23 bool optional);
22void reset_control_put(struct reset_control *rstc); 24void reset_control_put(struct reset_control *rstc);
25int __device_reset(struct device *dev, bool optional);
23struct reset_control *__devm_reset_control_get(struct device *dev, 26struct reset_control *__devm_reset_control_get(struct device *dev,
24 const char *id, int index, bool shared, 27 const char *id, int index, bool shared,
25 bool optional); 28 bool optional);
26 29
27int __must_check device_reset(struct device *dev);
28
29struct reset_control *devm_reset_control_array_get(struct device *dev, 30struct reset_control *devm_reset_control_array_get(struct device *dev,
30 bool shared, bool optional); 31 bool shared, bool optional);
31struct reset_control *of_reset_control_array_get(struct device_node *np, 32struct reset_control *of_reset_control_array_get(struct device_node *np,
32 bool shared, bool optional); 33 bool shared, bool optional);
33 34
34static inline int device_reset_optional(struct device *dev)
35{
36 return device_reset(dev);
37}
38
39#else 35#else
40 36
41static inline int reset_control_reset(struct reset_control *rstc) 37static inline int reset_control_reset(struct reset_control *rstc)
@@ -62,15 +58,9 @@ static inline void reset_control_put(struct reset_control *rstc)
62{ 58{
63} 59}
64 60
65static inline int __must_check device_reset(struct device *dev) 61static inline int __device_reset(struct device *dev, bool optional)
66{
67 WARN_ON(1);
68 return -ENOTSUPP;
69}
70
71static inline int device_reset_optional(struct device *dev)
72{ 62{
73 return -ENOTSUPP; 63 return optional ? 0 : -ENOTSUPP;
74} 64}
75 65
76static inline struct reset_control *__of_reset_control_get( 66static inline struct reset_control *__of_reset_control_get(
@@ -109,6 +99,16 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
109 99
110#endif /* CONFIG_RESET_CONTROLLER */ 100#endif /* CONFIG_RESET_CONTROLLER */
111 101
102static inline int __must_check device_reset(struct device *dev)
103{
104 return __device_reset(dev, false);
105}
106
107static inline int device_reset_optional(struct device *dev)
108{
109 return __device_reset(dev, true);
110}
111
112/** 112/**
113 * reset_control_get_exclusive - Lookup and obtain an exclusive reference 113 * reset_control_get_exclusive - Lookup and obtain an exclusive reference
114 * to a reset controller. 114 * to a reset controller.
@@ -127,9 +127,6 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
127static inline struct reset_control * 127static inline struct reset_control *
128__must_check reset_control_get_exclusive(struct device *dev, const char *id) 128__must_check reset_control_get_exclusive(struct device *dev, const char *id)
129{ 129{
130#ifndef CONFIG_RESET_CONTROLLER
131 WARN_ON(1);
132#endif
133 return __reset_control_get(dev, id, 0, false, false); 130 return __reset_control_get(dev, id, 0, false, false);
134} 131}
135 132
@@ -275,9 +272,6 @@ static inline struct reset_control *
275__must_check devm_reset_control_get_exclusive(struct device *dev, 272__must_check devm_reset_control_get_exclusive(struct device *dev,
276 const char *id) 273 const char *id)
277{ 274{
278#ifndef CONFIG_RESET_CONTROLLER
279 WARN_ON(1);
280#endif
281 return __devm_reset_control_get(dev, id, 0, false, false); 275 return __devm_reset_control_get(dev, id, 0, false, false);
282} 276}
283 277
@@ -350,18 +344,6 @@ devm_reset_control_get_shared_by_index(struct device *dev, int index)
350 * These inline function calls will be removed once all consumers 344 * These inline function calls will be removed once all consumers
351 * have been moved over to the new explicit API. 345 * have been moved over to the new explicit API.
352 */ 346 */
353static inline struct reset_control *reset_control_get(
354 struct device *dev, const char *id)
355{
356 return reset_control_get_exclusive(dev, id);
357}
358
359static inline struct reset_control *reset_control_get_optional(
360 struct device *dev, const char *id)
361{
362 return reset_control_get_optional_exclusive(dev, id);
363}
364
365static inline struct reset_control *of_reset_control_get( 347static inline struct reset_control *of_reset_control_get(
366 struct device_node *node, const char *id) 348 struct device_node *node, const char *id)
367{ 349{
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 361c08e35dbc..c9df2527e0cd 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -207,6 +207,7 @@ struct rhashtable_iter {
207 struct rhashtable_walker walker; 207 struct rhashtable_walker walker;
208 unsigned int slot; 208 unsigned int slot;
209 unsigned int skip; 209 unsigned int skip;
210 bool end_of_table;
210}; 211};
211 212
212static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) 213static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
@@ -239,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
239 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); 240 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
240} 241}
241 242
242static inline unsigned int rht_key_hashfn( 243static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
243 struct rhashtable *ht, const struct bucket_table *tbl, 244 const void *key, const struct rhashtable_params params,
244 const void *key, const struct rhashtable_params params) 245 unsigned int hash_rnd)
245{ 246{
246 unsigned int hash; 247 unsigned int hash;
247 248
248 /* params must be equal to ht->p if it isn't constant. */ 249 /* params must be equal to ht->p if it isn't constant. */
249 if (!__builtin_constant_p(params.key_len)) 250 if (!__builtin_constant_p(params.key_len))
250 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); 251 hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
251 else if (params.key_len) { 252 else if (params.key_len) {
252 unsigned int key_len = params.key_len; 253 unsigned int key_len = params.key_len;
253 254
254 if (params.hashfn) 255 if (params.hashfn)
255 hash = params.hashfn(key, key_len, tbl->hash_rnd); 256 hash = params.hashfn(key, key_len, hash_rnd);
256 else if (key_len & (sizeof(u32) - 1)) 257 else if (key_len & (sizeof(u32) - 1))
257 hash = jhash(key, key_len, tbl->hash_rnd); 258 hash = jhash(key, key_len, hash_rnd);
258 else 259 else
259 hash = jhash2(key, key_len / sizeof(u32), 260 hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
260 tbl->hash_rnd);
261 } else { 261 } else {
262 unsigned int key_len = ht->p.key_len; 262 unsigned int key_len = ht->p.key_len;
263 263
264 if (params.hashfn) 264 if (params.hashfn)
265 hash = params.hashfn(key, key_len, tbl->hash_rnd); 265 hash = params.hashfn(key, key_len, hash_rnd);
266 else 266 else
267 hash = jhash(key, key_len, tbl->hash_rnd); 267 hash = jhash(key, key_len, hash_rnd);
268 } 268 }
269 269
270 return hash;
271}
272
273static inline unsigned int rht_key_hashfn(
274 struct rhashtable *ht, const struct bucket_table *tbl,
275 const void *key, const struct rhashtable_params params)
276{
277 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
278
270 return rht_bucket_index(tbl, hash); 279 return rht_bucket_index(tbl, hash);
271} 280}
272 281
@@ -378,8 +387,15 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
378void rhashtable_walk_enter(struct rhashtable *ht, 387void rhashtable_walk_enter(struct rhashtable *ht,
379 struct rhashtable_iter *iter); 388 struct rhashtable_iter *iter);
380void rhashtable_walk_exit(struct rhashtable_iter *iter); 389void rhashtable_walk_exit(struct rhashtable_iter *iter);
381int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); 390int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
391
392static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
393{
394 (void)rhashtable_walk_start_check(iter);
395}
396
382void *rhashtable_walk_next(struct rhashtable_iter *iter); 397void *rhashtable_walk_next(struct rhashtable_iter *iter);
398void *rhashtable_walk_peek(struct rhashtable_iter *iter);
383void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); 399void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
384 400
385void rhashtable_free_and_destroy(struct rhashtable *ht, 401void rhashtable_free_and_destroy(struct rhashtable *ht,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 289e4d54e3e0..7d9eb39fa76a 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -96,7 +96,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
96}) 96})
97 97
98int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); 98int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
99int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, 99__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
100 struct file *filp, poll_table *poll_table); 100 struct file *filp, poll_table *poll_table);
101 101
102 102
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 10d6ae8bbb7d..ca07366c4c33 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -157,7 +157,7 @@ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
157int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, 157int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
158 void *data, int len); 158 void *data, int len);
159 159
160unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, 160__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
161 poll_table *wait); 161 poll_table *wait);
162 162
163#else 163#else
@@ -258,7 +258,7 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
258 return -ENXIO; 258 return -ENXIO;
259} 259}
260 260
261static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, 261static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
262 struct file *filp, poll_table *wait) 262 struct file *filp, poll_table *wait)
263{ 263{
264 /* This shouldn't be possible */ 264 /* This shouldn't be possible */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 41319a2e409b..fc6c90b57be0 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -87,7 +87,6 @@ struct rtc_class_ops {
87 int (*set_offset)(struct device *, long offset); 87 int (*set_offset)(struct device *, long offset);
88}; 88};
89 89
90#define RTC_DEVICE_NAME_SIZE 20
91typedef struct rtc_task { 90typedef struct rtc_task {
92 void (*func)(void *private_data); 91 void (*func)(void *private_data);
93 void *private_data; 92 void *private_data;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2032ce2eb20b..1fdcde96eb65 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -19,10 +19,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
19 19
20void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); 20void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
21void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, 21void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
22 gfp_t flags, int *new_nsid); 22 gfp_t flags, int *new_nsid, int new_ifindex);
23struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, 23struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
24 unsigned change, u32 event, 24 unsigned change, u32 event,
25 gfp_t flags, int *new_nsid); 25 gfp_t flags, int *new_nsid,
26 int new_ifindex);
26void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, 27void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
27 gfp_t flags); 28 gfp_t flags);
28 29
@@ -70,8 +71,7 @@ static inline bool lockdep_rtnl_is_held(void)
70 * @p: The pointer to read, prior to dereferencing 71 * @p: The pointer to read, prior to dereferencing
71 * 72 *
72 * Return the value of the specified RCU-protected pointer, but omit 73 * Return the value of the specified RCU-protected pointer, but omit
73 * both the smp_read_barrier_depends() and the READ_ONCE(), because 74 * the READ_ONCE(), because caller holds RTNL.
74 * caller holds RTNL.
75 */ 75 */
76#define rtnl_dereference(p) \ 76#define rtnl_dereference(p) \
77 rcu_dereference_protected(p, lockdep_rtnl_is_held()) 77 rcu_dereference_protected(p, lockdep_rtnl_is_held())
@@ -97,13 +97,9 @@ void rtnetlink_init(void);
97void __rtnl_unlock(void); 97void __rtnl_unlock(void);
98void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); 98void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
99 99
100#define ASSERT_RTNL() do { \ 100#define ASSERT_RTNL() \
101 if (unlikely(!rtnl_is_locked())) { \ 101 WARN_ONCE(!rtnl_is_locked(), \
102 printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \ 102 "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__)
103 __FILE__, __LINE__); \
104 dump_stack(); \
105 } \
106} while(0)
107 103
108extern int ndo_dflt_fdb_dump(struct sk_buff *skb, 104extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
109 struct netlink_callback *cb, 105 struct netlink_callback *cb,
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/rtsx_common.h
index 443176ee1ab0..443176ee1ab0 100644
--- a/include/linux/mfd/rtsx_common.h
+++ b/include/linux/rtsx_common.h
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/rtsx_pci.h
index c3d3f04d8cc6..478acf6efac6 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -24,7 +24,7 @@
24 24
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/mfd/rtsx_common.h> 27#include <linux/rtsx_common.h>
28 28
29#define MAX_RW_REG_CNT 1024 29#define MAX_RW_REG_CNT 1024
30 30
@@ -203,6 +203,7 @@
203#define SD_DDR_MODE 0x04 203#define SD_DDR_MODE 0x04
204#define SD_30_MODE 0x08 204#define SD_30_MODE 0x08
205#define SD_CLK_DIVIDE_MASK 0xC0 205#define SD_CLK_DIVIDE_MASK 0xC0
206#define SD_MODE_SELECT_MASK 0x0C
206#define SD_CFG2 0xFDA1 207#define SD_CFG2 0xFDA1
207#define SD_CALCULATE_CRC7 0x00 208#define SD_CALCULATE_CRC7 0x00
208#define SD_NO_CALCULATE_CRC7 0x80 209#define SD_NO_CALCULATE_CRC7 0x80
@@ -226,6 +227,7 @@
226#define SD_RSP_TYPE_R6 0x01 227#define SD_RSP_TYPE_R6 0x01
227#define SD_RSP_TYPE_R7 0x01 228#define SD_RSP_TYPE_R7 0x01
228#define SD_CFG3 0xFDA2 229#define SD_CFG3 0xFDA2
230#define SD30_CLK_END_EN 0x10
229#define SD_RSP_80CLK_TIMEOUT_EN 0x01 231#define SD_RSP_80CLK_TIMEOUT_EN 0x01
230 232
231#define SD_STAT1 0xFDA3 233#define SD_STAT1 0xFDA3
@@ -309,6 +311,12 @@
309 311
310#define SD_DATA_STATE 0xFDB6 312#define SD_DATA_STATE 0xFDB6
311#define SD_DATA_IDLE 0x80 313#define SD_DATA_IDLE 0x80
314#define REG_SD_STOP_SDCLK_CFG 0xFDB8
315#define SD30_CLK_STOP_CFG_EN 0x04
316#define SD30_CLK_STOP_CFG1 0x02
317#define SD30_CLK_STOP_CFG0 0x01
318#define REG_PRE_RW_MODE 0xFD70
319#define EN_INFINITE_MODE 0x01
312 320
313#define SRCTL 0xFC13 321#define SRCTL 0xFC13
314 322
@@ -434,6 +442,7 @@
434#define CARD_CLK_EN 0xFD69 442#define CARD_CLK_EN 0xFD69
435#define SD_CLK_EN 0x04 443#define SD_CLK_EN 0x04
436#define MS_CLK_EN 0x08 444#define MS_CLK_EN 0x08
445#define SD40_CLK_EN 0x10
437#define SDIO_CTRL 0xFD6B 446#define SDIO_CTRL 0xFD6B
438#define CD_PAD_CTL 0xFD73 447#define CD_PAD_CTL 0xFD73
439#define CD_DISABLE_MASK 0x07 448#define CD_DISABLE_MASK 0x07
@@ -453,8 +462,8 @@
453#define FPDCTL 0xFC00 462#define FPDCTL 0xFC00
454#define SSC_POWER_DOWN 0x01 463#define SSC_POWER_DOWN 0x01
455#define SD_OC_POWER_DOWN 0x02 464#define SD_OC_POWER_DOWN 0x02
456#define ALL_POWER_DOWN 0x07 465#define ALL_POWER_DOWN 0x03
457#define OC_POWER_DOWN 0x06 466#define OC_POWER_DOWN 0x02
458#define PDINFO 0xFC01 467#define PDINFO 0xFC01
459 468
460#define CLK_CTL 0xFC02 469#define CLK_CTL 0xFC02
@@ -490,6 +499,9 @@
490 499
491#define FPGA_PULL_CTL 0xFC1D 500#define FPGA_PULL_CTL 0xFC1D
492#define OLT_LED_CTL 0xFC1E 501#define OLT_LED_CTL 0xFC1E
502#define LED_SHINE_MASK 0x08
503#define LED_SHINE_EN 0x08
504#define LED_SHINE_DISABLE 0x00
493#define GPIO_CTL 0xFC1F 505#define GPIO_CTL 0xFC1F
494 506
495#define LDO_CTL 0xFC1E 507#define LDO_CTL 0xFC1E
@@ -511,7 +523,11 @@
511#define BPP_LDO_ON 0x00 523#define BPP_LDO_ON 0x00
512#define BPP_LDO_SUSPEND 0x02 524#define BPP_LDO_SUSPEND 0x02
513#define BPP_LDO_OFF 0x03 525#define BPP_LDO_OFF 0x03
526#define EFUSE_CTL 0xFC30
527#define EFUSE_ADD 0xFC31
514#define SYS_VER 0xFC32 528#define SYS_VER 0xFC32
529#define EFUSE_DATAL 0xFC34
530#define EFUSE_DATAH 0xFC35
515 531
516#define CARD_PULL_CTL1 0xFD60 532#define CARD_PULL_CTL1 0xFD60
517#define CARD_PULL_CTL2 0xFD61 533#define CARD_PULL_CTL2 0xFD61
@@ -553,6 +569,9 @@
553#define RBBC1 0xFE2F 569#define RBBC1 0xFE2F
554#define RBDAT 0xFE30 570#define RBDAT 0xFE30
555#define RBCTL 0xFE34 571#define RBCTL 0xFE34
572#define U_AUTO_DMA_EN_MASK 0x20
573#define U_AUTO_DMA_DISABLE 0x00
574#define RB_FLUSH 0x80
556#define CFGADDR0 0xFE35 575#define CFGADDR0 0xFE35
557#define CFGADDR1 0xFE36 576#define CFGADDR1 0xFE36
558#define CFGDATA0 0xFE37 577#define CFGDATA0 0xFE37
@@ -581,6 +600,8 @@
581#define LTR_LATENCY_MODE_HW 0 600#define LTR_LATENCY_MODE_HW 0
582#define LTR_LATENCY_MODE_SW BIT(6) 601#define LTR_LATENCY_MODE_SW BIT(6)
583#define OBFF_CFG 0xFE4C 602#define OBFF_CFG 0xFE4C
603#define OBFF_EN_MASK 0x03
604#define OBFF_DISABLE 0x00
584 605
585#define CDRESUMECTL 0xFE52 606#define CDRESUMECTL 0xFE52
586#define WAKE_SEL_CTL 0xFE54 607#define WAKE_SEL_CTL 0xFE54
@@ -595,6 +616,7 @@
595#define FORCE_ASPM_L0_EN 0x01 616#define FORCE_ASPM_L0_EN 0x01
596#define FORCE_ASPM_NO_ASPM 0x00 617#define FORCE_ASPM_NO_ASPM 0x00
597#define PM_CLK_FORCE_CTL 0xFE58 618#define PM_CLK_FORCE_CTL 0xFE58
619#define CLK_PM_EN 0x01
598#define FUNC_FORCE_CTL 0xFE59 620#define FUNC_FORCE_CTL 0xFE59
599#define FUNC_FORCE_UPME_XMT_DBG 0x02 621#define FUNC_FORCE_UPME_XMT_DBG 0x02
600#define PERST_GLITCH_WIDTH 0xFE5C 622#define PERST_GLITCH_WIDTH 0xFE5C
@@ -620,14 +642,23 @@
620#define LDO_PWR_SEL 0xFE78 642#define LDO_PWR_SEL 0xFE78
621 643
622#define L1SUB_CONFIG1 0xFE8D 644#define L1SUB_CONFIG1 0xFE8D
645#define AUX_CLK_ACTIVE_SEL_MASK 0x01
646#define MAC_CKSW_DONE 0x00
623#define L1SUB_CONFIG2 0xFE8E 647#define L1SUB_CONFIG2 0xFE8E
624#define L1SUB_AUTO_CFG 0x02 648#define L1SUB_AUTO_CFG 0x02
625#define L1SUB_CONFIG3 0xFE8F 649#define L1SUB_CONFIG3 0xFE8F
626#define L1OFF_MBIAS2_EN_5250 BIT(7) 650#define L1OFF_MBIAS2_EN_5250 BIT(7)
627 651
628#define DUMMY_REG_RESET_0 0xFE90 652#define DUMMY_REG_RESET_0 0xFE90
653#define IC_VERSION_MASK 0x0F
629 654
655#define REG_VREF 0xFE97
656#define PWD_SUSPND_EN 0x10
657#define RTS5260_DMA_RST_CTL_0 0xFEBF
658#define RTS5260_DMA_RST 0x80
659#define RTS5260_ADMA3_RST 0x40
630#define AUTOLOAD_CFG_BASE 0xFF00 660#define AUTOLOAD_CFG_BASE 0xFF00
661#define RELINK_TIME_MASK 0x01
631#define PETXCFG 0xFF03 662#define PETXCFG 0xFF03
632#define FORCE_CLKREQ_DELINK_MASK BIT(7) 663#define FORCE_CLKREQ_DELINK_MASK BIT(7)
633#define FORCE_CLKREQ_LOW 0x80 664#define FORCE_CLKREQ_LOW 0x80
@@ -667,15 +698,24 @@
667#define LDO_DV18_CFG 0xFF70 698#define LDO_DV18_CFG 0xFF70
668#define LDO_DV18_SR_MASK 0xC0 699#define LDO_DV18_SR_MASK 0xC0
669#define LDO_DV18_SR_DF 0x40 700#define LDO_DV18_SR_DF 0x40
701#define DV331812_MASK 0x70
702#define DV331812_33 0x70
703#define DV331812_17 0x30
670 704
671#define LDO_CONFIG2 0xFF71 705#define LDO_CONFIG2 0xFF71
672#define LDO_D3318_MASK 0x07 706#define LDO_D3318_MASK 0x07
673#define LDO_D3318_33V 0x07 707#define LDO_D3318_33V 0x07
674#define LDO_D3318_18V 0x02 708#define LDO_D3318_18V 0x02
709#define DV331812_VDD1 0x04
710#define DV331812_POWERON 0x08
711#define DV331812_POWEROFF 0x00
675 712
676#define LDO_VCC_CFG0 0xFF72 713#define LDO_VCC_CFG0 0xFF72
677#define LDO_VCC_LMTVTH_MASK 0x30 714#define LDO_VCC_LMTVTH_MASK 0x30
678#define LDO_VCC_LMTVTH_2A 0x10 715#define LDO_VCC_LMTVTH_2A 0x10
716/*RTS5260*/
717#define RTS5260_DVCC_TUNE_MASK 0x70
718#define RTS5260_DVCC_33 0x70
679 719
680#define LDO_VCC_CFG1 0xFF73 720#define LDO_VCC_CFG1 0xFF73
681#define LDO_VCC_REF_TUNE_MASK 0x30 721#define LDO_VCC_REF_TUNE_MASK 0x30
@@ -684,6 +724,10 @@
684#define LDO_VCC_1V8 0x04 724#define LDO_VCC_1V8 0x04
685#define LDO_VCC_3V3 0x07 725#define LDO_VCC_3V3 0x07
686#define LDO_VCC_LMT_EN 0x08 726#define LDO_VCC_LMT_EN 0x08
727/*RTS5260*/
728#define LDO_POW_SDVDD1_MASK 0x08
729#define LDO_POW_SDVDD1_ON 0x08
730#define LDO_POW_SDVDD1_OFF 0x00
687 731
688#define LDO_VIO_CFG 0xFF75 732#define LDO_VIO_CFG 0xFF75
689#define LDO_VIO_SR_MASK 0xC0 733#define LDO_VIO_SR_MASK 0xC0
@@ -711,6 +755,160 @@
711#define SD_VIO_LDO_1V8 0x40 755#define SD_VIO_LDO_1V8 0x40
712#define SD_VIO_LDO_3V3 0x70 756#define SD_VIO_LDO_3V3 0x70
713 757
758#define RTS5260_AUTOLOAD_CFG4 0xFF7F
759#define RTS5260_MIMO_DISABLE 0x8A
760
761#define RTS5260_REG_GPIO_CTL0 0xFC1A
762#define RTS5260_REG_GPIO_MASK 0x01
763#define RTS5260_REG_GPIO_ON 0x01
764#define RTS5260_REG_GPIO_OFF 0x00
765
766#define PWR_GLOBAL_CTRL 0xF200
767#define PCIE_L1_2_EN 0x0C
768#define PCIE_L1_1_EN 0x0A
769#define PCIE_L1_0_EN 0x09
770#define PWR_FE_CTL 0xF201
771#define PCIE_L1_2_PD_FE_EN 0x0C
772#define PCIE_L1_1_PD_FE_EN 0x0A
773#define PCIE_L1_0_PD_FE_EN 0x09
774#define CFG_PCIE_APHY_OFF_0 0xF204
775#define CFG_PCIE_APHY_OFF_0_DEFAULT 0xBF
776#define CFG_PCIE_APHY_OFF_1 0xF205
777#define CFG_PCIE_APHY_OFF_1_DEFAULT 0xFF
778#define CFG_PCIE_APHY_OFF_2 0xF206
779#define CFG_PCIE_APHY_OFF_2_DEFAULT 0x01
780#define CFG_PCIE_APHY_OFF_3 0xF207
781#define CFG_PCIE_APHY_OFF_3_DEFAULT 0x00
782#define CFG_L1_0_PCIE_MAC_RET_VALUE 0xF20C
783#define CFG_L1_0_PCIE_DPHY_RET_VALUE 0xF20E
784#define CFG_L1_0_SYS_RET_VALUE 0xF210
785#define CFG_L1_0_CRC_MISC_RET_VALUE 0xF212
786#define CFG_L1_0_CRC_SD30_RET_VALUE 0xF214
787#define CFG_L1_0_CRC_SD40_RET_VALUE 0xF216
788#define CFG_LP_FPWM_VALUE 0xF219
789#define CFG_LP_FPWM_VALUE_DEFAULT 0x18
790#define PWC_CDR 0xF253
791#define PWC_CDR_DEFAULT 0x03
792#define CFG_L1_0_RET_VALUE_DEFAULT 0x1B
793#define CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT 0x0C
794
795/* OCPCTL */
796#define SD_DETECT_EN 0x08
797#define SD_OCP_INT_EN 0x04
798#define SD_OCP_INT_CLR 0x02
799#define SD_OC_CLR 0x01
800
801#define SDVIO_DETECT_EN (1 << 7)
802#define SDVIO_OCP_INT_EN (1 << 6)
803#define SDVIO_OCP_INT_CLR (1 << 5)
804#define SDVIO_OC_CLR (1 << 4)
805
806/* OCPSTAT */
807#define SD_OCP_DETECT 0x08
808#define SD_OC_NOW 0x04
809#define SD_OC_EVER 0x02
810
811#define SDVIO_OC_NOW (1 << 6)
812#define SDVIO_OC_EVER (1 << 5)
813
814#define REG_OCPCTL 0xFD6A
815#define REG_OCPSTAT 0xFD6E
816#define REG_OCPGLITCH 0xFD6C
817#define REG_OCPPARA1 0xFD6B
818#define REG_OCPPARA2 0xFD6D
819
820/* rts5260 DV3318 OCP-related registers */
821#define REG_DV3318_OCPCTL 0xFD89
822#define DV3318_OCP_TIME_MASK 0xF0
823#define DV3318_DETECT_EN 0x08
824#define DV3318_OCP_INT_EN 0x04
825#define DV3318_OCP_INT_CLR 0x02
826#define DV3318_OCP_CLR 0x01
827
828#define REG_DV3318_OCPSTAT 0xFD8A
829#define DV3318_OCP_GlITCH_TIME_MASK 0xF0
830#define DV3318_OCP_DETECT 0x08
831#define DV3318_OCP_NOW 0x04
832#define DV3318_OCP_EVER 0x02
833
834#define SD_OCP_GLITCH_MASK 0x0F
835
836/* OCPPARA1 */
837#define SDVIO_OCP_TIME_60 0x00
838#define SDVIO_OCP_TIME_100 0x10
839#define SDVIO_OCP_TIME_200 0x20
840#define SDVIO_OCP_TIME_400 0x30
841#define SDVIO_OCP_TIME_600 0x40
842#define SDVIO_OCP_TIME_800 0x50
843#define SDVIO_OCP_TIME_1100 0x60
844#define SDVIO_OCP_TIME_MASK 0x70
845
846#define SD_OCP_TIME_60 0x00
847#define SD_OCP_TIME_100 0x01
848#define SD_OCP_TIME_200 0x02
849#define SD_OCP_TIME_400 0x03
850#define SD_OCP_TIME_600 0x04
851#define SD_OCP_TIME_800 0x05
852#define SD_OCP_TIME_1100 0x06
853#define SD_OCP_TIME_MASK 0x07
854
855/* OCPPARA2 */
856#define SDVIO_OCP_THD_190 0x00
857#define SDVIO_OCP_THD_250 0x10
858#define SDVIO_OCP_THD_320 0x20
859#define SDVIO_OCP_THD_380 0x30
860#define SDVIO_OCP_THD_440 0x40
861#define SDVIO_OCP_THD_500 0x50
862#define SDVIO_OCP_THD_570 0x60
863#define SDVIO_OCP_THD_630 0x70
864#define SDVIO_OCP_THD_MASK 0x70
865
866#define SD_OCP_THD_450 0x00
867#define SD_OCP_THD_550 0x01
868#define SD_OCP_THD_650 0x02
869#define SD_OCP_THD_750 0x03
870#define SD_OCP_THD_850 0x04
871#define SD_OCP_THD_950 0x05
872#define SD_OCP_THD_1050 0x06
873#define SD_OCP_THD_1150 0x07
874#define SD_OCP_THD_MASK 0x07
875
876#define SDVIO_OCP_GLITCH_MASK 0xF0
877#define SDVIO_OCP_GLITCH_NONE 0x00
878#define SDVIO_OCP_GLITCH_50U 0x10
879#define SDVIO_OCP_GLITCH_100U 0x20
880#define SDVIO_OCP_GLITCH_200U 0x30
881#define SDVIO_OCP_GLITCH_600U 0x40
882#define SDVIO_OCP_GLITCH_800U 0x50
883#define SDVIO_OCP_GLITCH_1M 0x60
884#define SDVIO_OCP_GLITCH_2M 0x70
885#define SDVIO_OCP_GLITCH_3M 0x80
886#define SDVIO_OCP_GLITCH_4M 0x90
887#define SDVIO_OCP_GLIVCH_5M 0xA0
888#define SDVIO_OCP_GLITCH_6M 0xB0
889#define SDVIO_OCP_GLITCH_7M 0xC0
890#define SDVIO_OCP_GLITCH_8M 0xD0
891#define SDVIO_OCP_GLITCH_9M 0xE0
892#define SDVIO_OCP_GLITCH_10M 0xF0
893
894#define SD_OCP_GLITCH_MASK 0x0F
895#define SD_OCP_GLITCH_NONE 0x00
896#define SD_OCP_GLITCH_50U 0x01
897#define SD_OCP_GLITCH_100U 0x02
898#define SD_OCP_GLITCH_200U 0x03
899#define SD_OCP_GLITCH_600U 0x04
900#define SD_OCP_GLITCH_800U 0x05
901#define SD_OCP_GLITCH_1M 0x06
902#define SD_OCP_GLITCH_2M 0x07
903#define SD_OCP_GLITCH_3M 0x08
904#define SD_OCP_GLITCH_4M 0x09
905#define SD_OCP_GLIVCH_5M 0x0A
906#define SD_OCP_GLITCH_6M 0x0B
907#define SD_OCP_GLITCH_7M 0x0C
908#define SD_OCP_GLITCH_8M 0x0D
909#define SD_OCP_GLITCH_9M 0x0E
910#define SD_OCP_GLITCH_10M 0x0F
911
714/* Phy register */ 912/* Phy register */
715#define PHY_PCR 0x00 913#define PHY_PCR 0x00
716#define PHY_PCR_FORCE_CODE 0xB000 914#define PHY_PCR_FORCE_CODE 0xB000
@@ -857,6 +1055,7 @@
857 1055
858#define PCR_ASPM_SETTING_REG1 0x160 1056#define PCR_ASPM_SETTING_REG1 0x160
859#define PCR_ASPM_SETTING_REG2 0x168 1057#define PCR_ASPM_SETTING_REG2 0x168
1058#define PCR_ASPM_SETTING_5260 0x178
860 1059
861#define PCR_SETTING_REG1 0x724 1060#define PCR_SETTING_REG1 0x724
862#define PCR_SETTING_REG2 0x814 1061#define PCR_SETTING_REG2 0x814
@@ -890,6 +1089,7 @@ struct pcr_ops {
890 int (*conv_clk_and_div_n)(int clk, int dir); 1089 int (*conv_clk_and_div_n)(int clk, int dir);
891 void (*fetch_vendor_settings)(struct rtsx_pcr *pcr); 1090 void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
892 void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state); 1091 void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
1092 void (*stop_cmd)(struct rtsx_pcr *pcr);
893 1093
894 void (*set_aspm)(struct rtsx_pcr *pcr, bool enable); 1094 void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
895 int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency); 1095 int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
@@ -897,6 +1097,12 @@ struct pcr_ops {
897 void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active); 1097 void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
898 void (*full_on)(struct rtsx_pcr *pcr); 1098 void (*full_on)(struct rtsx_pcr *pcr);
899 void (*power_saving)(struct rtsx_pcr *pcr); 1099 void (*power_saving)(struct rtsx_pcr *pcr);
1100 void (*enable_ocp)(struct rtsx_pcr *pcr);
1101 void (*disable_ocp)(struct rtsx_pcr *pcr);
1102 void (*init_ocp)(struct rtsx_pcr *pcr);
1103 void (*process_ocp)(struct rtsx_pcr *pcr);
1104 int (*get_ocpstat)(struct rtsx_pcr *pcr, u8 *val);
1105 void (*clear_ocpstat)(struct rtsx_pcr *pcr);
900}; 1106};
901 1107
902enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; 1108enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
@@ -935,6 +1141,9 @@ enum dev_aspm_mode {
935 * @l1_snooze_delay: l1 snooze delay 1141 * @l1_snooze_delay: l1 snooze delay
936 * @ltr_l1off_sspwrgate: ltr l1off sspwrgate 1142 * @ltr_l1off_sspwrgate: ltr l1off sspwrgate
937 * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate 1143 * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
1144 * @ocp_en: enable ocp flag
1145 * @sd_400mA_ocp_thd: 400mA ocp thd
1146 * @sd_800mA_ocp_thd: 800mA ocp thd
938 */ 1147 */
939struct rtsx_cr_option { 1148struct rtsx_cr_option {
940 u32 dev_flags; 1149 u32 dev_flags;
@@ -949,6 +1158,19 @@ struct rtsx_cr_option {
949 u32 l1_snooze_delay; 1158 u32 l1_snooze_delay;
950 u8 ltr_l1off_sspwrgate; 1159 u8 ltr_l1off_sspwrgate;
951 u8 ltr_l1off_snooze_sspwrgate; 1160 u8 ltr_l1off_snooze_sspwrgate;
1161 bool ocp_en;
1162 u8 sd_400mA_ocp_thd;
1163 u8 sd_800mA_ocp_thd;
1164};
1165
1166/*
1167 * struct rtsx_hw_param - card reader hardware param
1168 * @interrupt_en: indicate which interrutp enable
1169 * @ocp_glitch: ocp glitch time
1170 */
1171struct rtsx_hw_param {
1172 u32 interrupt_en;
1173 u8 ocp_glitch;
952}; 1174};
953 1175
954#define rtsx_set_dev_flag(cr, flag) \ 1176#define rtsx_set_dev_flag(cr, flag) \
@@ -963,6 +1185,7 @@ struct rtsx_pcr {
963 unsigned int id; 1185 unsigned int id;
964 int pcie_cap; 1186 int pcie_cap;
965 struct rtsx_cr_option option; 1187 struct rtsx_cr_option option;
1188 struct rtsx_hw_param hw_param;
966 1189
967 /* pci resources */ 1190 /* pci resources */
968 unsigned long addr; 1191 unsigned long addr;
@@ -1042,12 +1265,15 @@ struct rtsx_pcr {
1042 struct rtsx_slot *slots; 1265 struct rtsx_slot *slots;
1043 1266
1044 u8 dma_error_count; 1267 u8 dma_error_count;
1268 u8 ocp_stat;
1269 u8 ocp_stat2;
1045}; 1270};
1046 1271
1047#define PID_524A 0x524A 1272#define PID_524A 0x524A
1048#define PID_5249 0x5249 1273#define PID_5249 0x5249
1049#define PID_5250 0x5250 1274#define PID_5250 0x5250
1050#define PID_525A 0x525A 1275#define PID_525A 0x525A
1276#define PID_5260 0x5260
1051 1277
1052#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid)) 1278#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
1053#define PCI_VID(pcr) ((pcr)->pci->vendor) 1279#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/mfd/rtsx_usb.h b/include/linux/rtsx_usb.h
index c446e4fd6b5c..c446e4fd6b5c 100644
--- a/include/linux/mfd/rtsx_usb.h
+++ b/include/linux/rtsx_usb.h
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index b7c83254c566..22b2131bcdcd 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -276,6 +276,17 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
276 unsigned int n_pages, unsigned int offset, 276 unsigned int n_pages, unsigned int offset,
277 unsigned long size, gfp_t gfp_mask); 277 unsigned long size, gfp_t gfp_mask);
278 278
279#ifdef CONFIG_SGL_ALLOC
280struct scatterlist *sgl_alloc_order(unsigned long long length,
281 unsigned int order, bool chainable,
282 gfp_t gfp, unsigned int *nent_p);
283struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
284 unsigned int *nent_p);
285void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
286void sgl_free_order(struct scatterlist *sgl, int order);
287void sgl_free(struct scatterlist *sgl);
288#endif /* CONFIG_SGL_ALLOC */
289
279size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, 290size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
280 size_t buflen, off_t skip, bool to_buffer); 291 size_t buflen, off_t skip, bool to_buffer);
281 292
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 68a504f6e474..b161ef8a902e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -472,11 +472,15 @@ struct sched_dl_entity {
472 * has not been executed yet. This flag is useful to avoid race 472 * has not been executed yet. This flag is useful to avoid race
473 * conditions between the inactive timer handler and the wakeup 473 * conditions between the inactive timer handler and the wakeup
474 * code. 474 * code.
475 *
476 * @dl_overrun tells if the task asked to be informed about runtime
477 * overruns.
475 */ 478 */
476 unsigned int dl_throttled : 1; 479 unsigned int dl_throttled : 1;
477 unsigned int dl_boosted : 1; 480 unsigned int dl_boosted : 1;
478 unsigned int dl_yielded : 1; 481 unsigned int dl_yielded : 1;
479 unsigned int dl_non_contending : 1; 482 unsigned int dl_non_contending : 1;
483 unsigned int dl_overrun : 1;
480 484
481 /* 485 /*
482 * Bandwidth enforcement timer. Each -deadline task has its 486 * Bandwidth enforcement timer. Each -deadline task has its
@@ -551,6 +555,14 @@ struct task_struct {
551 unsigned long wakee_flip_decay_ts; 555 unsigned long wakee_flip_decay_ts;
552 struct task_struct *last_wakee; 556 struct task_struct *last_wakee;
553 557
558 /*
559 * recent_used_cpu is initially set as the last CPU used by a task
560 * that wakes affine another task. Waker/wakee relationships can
561 * push tasks around a CPU where each wakeup moves to the next one.
562 * Tracking a recently used CPU allows a quick search for a recently
563 * used CPU that may be idle.
564 */
565 int recent_used_cpu;
554 int wake_cpu; 566 int wake_cpu;
555#endif 567#endif
556 int on_rq; 568 int on_rq;
@@ -1427,6 +1439,7 @@ extern int idle_cpu(int cpu);
1427extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 1439extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1428extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 1440extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1429extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1441extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1442extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1430extern struct task_struct *idle_task(int cpu); 1443extern struct task_struct *idle_task(int cpu);
1431 1444
1432/** 1445/**
@@ -1484,6 +1497,11 @@ static inline struct thread_info *task_thread_info(struct task_struct *task)
1484extern struct task_struct *find_task_by_vpid(pid_t nr); 1497extern struct task_struct *find_task_by_vpid(pid_t nr);
1485extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); 1498extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1486 1499
1500/*
1501 * find a task by its virtual pid and get the task struct
1502 */
1503extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1504
1487extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1505extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1488extern int wake_up_process(struct task_struct *tsk); 1506extern int wake_up_process(struct task_struct *tsk);
1489extern void wake_up_new_task(struct task_struct *tsk); 1507extern void wake_up_new_task(struct task_struct *tsk);
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index d1ad3d825561..0b55834efd46 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -12,8 +12,6 @@
12#define SCHED_CPUFREQ_DL (1U << 1) 12#define SCHED_CPUFREQ_DL (1U << 1)
13#define SCHED_CPUFREQ_IOWAIT (1U << 2) 13#define SCHED_CPUFREQ_IOWAIT (1U << 2)
14 14
15#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
16
17#ifdef CONFIG_CPU_FREQ 15#ifdef CONFIG_CPU_FREQ
18struct update_util_data { 16struct update_util_data {
19 void (*func)(struct update_util_data *data, u64 time, unsigned int flags); 17 void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 3d49b91b674d..1149533aa2fa 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -7,11 +7,12 @@
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/mm_types.h> 8#include <linux/mm_types.h>
9#include <linux/gfp.h> 9#include <linux/gfp.h>
10#include <linux/sync_core.h>
10 11
11/* 12/*
12 * Routines for handling mm_structs 13 * Routines for handling mm_structs
13 */ 14 */
14extern struct mm_struct * mm_alloc(void); 15extern struct mm_struct *mm_alloc(void);
15 16
16/** 17/**
17 * mmgrab() - Pin a &struct mm_struct. 18 * mmgrab() - Pin a &struct mm_struct.
@@ -35,27 +36,7 @@ static inline void mmgrab(struct mm_struct *mm)
35 atomic_inc(&mm->mm_count); 36 atomic_inc(&mm->mm_count);
36} 37}
37 38
38/* mmdrop drops the mm and the page tables */ 39extern void mmdrop(struct mm_struct *mm);
39extern void __mmdrop(struct mm_struct *);
40static inline void mmdrop(struct mm_struct *mm)
41{
42 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
43 __mmdrop(mm);
44}
45
46static inline void mmdrop_async_fn(struct work_struct *work)
47{
48 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
49 __mmdrop(mm);
50}
51
52static inline void mmdrop_async(struct mm_struct *mm)
53{
54 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
55 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
56 schedule_work(&mm->async_put_work);
57 }
58}
59 40
60/** 41/**
61 * mmget() - Pin the address space associated with a &struct mm_struct. 42 * mmget() - Pin the address space associated with a &struct mm_struct.
@@ -214,18 +195,48 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
214 195
215#ifdef CONFIG_MEMBARRIER 196#ifdef CONFIG_MEMBARRIER
216enum { 197enum {
217 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), 198 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
218 MEMBARRIER_STATE_SWITCH_MM = (1U << 1), 199 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
200 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
201 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
202 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
203 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
204};
205
206enum {
207 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
219}; 208};
220 209
210#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
211#include <asm/membarrier.h>
212#endif
213
214static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
215{
216 if (likely(!(atomic_read(&mm->membarrier_state) &
217 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
218 return;
219 sync_core_before_usermode();
220}
221
221static inline void membarrier_execve(struct task_struct *t) 222static inline void membarrier_execve(struct task_struct *t)
222{ 223{
223 atomic_set(&t->mm->membarrier_state, 0); 224 atomic_set(&t->mm->membarrier_state, 0);
224} 225}
225#else 226#else
227#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
228static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
229 struct mm_struct *next,
230 struct task_struct *tsk)
231{
232}
233#endif
226static inline void membarrier_execve(struct task_struct *t) 234static inline void membarrier_execve(struct task_struct *t)
227{ 235{
228} 236}
237static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
238{
239}
229#endif 240#endif
230 241
231#endif /* _LINUX_SCHED_MM_H */ 242#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0aa4548fb492..23b4f9cb82db 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -285,6 +285,34 @@ static inline void kernel_signal_stop(void)
285 285
286 schedule(); 286 schedule();
287} 287}
288#ifdef __ARCH_SI_TRAPNO
289# define ___ARCH_SI_TRAPNO(_a1) , _a1
290#else
291# define ___ARCH_SI_TRAPNO(_a1)
292#endif
293#ifdef __ia64__
294# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
295#else
296# define ___ARCH_SI_IA64(_a1, _a2, _a3)
297#endif
298
299int force_sig_fault(int sig, int code, void __user *addr
300 ___ARCH_SI_TRAPNO(int trapno)
301 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
302 , struct task_struct *t);
303int send_sig_fault(int sig, int code, void __user *addr
304 ___ARCH_SI_TRAPNO(int trapno)
305 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
306 , struct task_struct *t);
307
308int force_sig_mceerr(int code, void __user *, short, struct task_struct *);
309int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
310
311int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
312int force_sig_pkuerr(void __user *addr, u32 pkey);
313
314int force_sig_ptrace_errno_trap(int errno, void __user *addr);
315
288extern int send_sig_info(int, struct siginfo *, struct task_struct *); 316extern int send_sig_info(int, struct siginfo *, struct task_struct *);
289extern int force_sigsegv(int, struct task_struct *); 317extern int force_sigsegv(int, struct task_struct *);
290extern int force_sig_info(int, struct siginfo *, struct task_struct *); 318extern int force_sig_info(int, struct siginfo *, struct task_struct *);
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 05b8650f06f5..5be31eb7b266 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -104,6 +104,20 @@ extern int arch_task_struct_size __read_mostly;
104# define arch_task_struct_size (sizeof(struct task_struct)) 104# define arch_task_struct_size (sizeof(struct task_struct))
105#endif 105#endif
106 106
107#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
108/*
109 * If an architecture has not declared a thread_struct whitelist we
110 * must assume something there may need to be copied to userspace.
111 */
112static inline void arch_thread_struct_whitelist(unsigned long *offset,
113 unsigned long *size)
114{
115 *offset = 0;
116 /* Handle dynamically sized thread_struct. */
117 *size = arch_task_struct_size - offsetof(struct task_struct, thread);
118}
119#endif
120
107#ifdef CONFIG_VMAP_STACK 121#ifdef CONFIG_VMAP_STACK
108static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) 122static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
109{ 123{
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index cb4828aaa34f..6a841929073f 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -78,7 +78,7 @@ static inline void put_task_stack(struct task_struct *tsk) {}
78#define task_stack_end_corrupted(task) \ 78#define task_stack_end_corrupted(task) \
79 (*(end_of_stack(task)) != STACK_END_MAGIC) 79 (*(end_of_stack(task)) != STACK_END_MAGIC)
80 80
81static inline int object_is_on_stack(void *obj) 81static inline int object_is_on_stack(const void *obj)
82{ 82{
83 void *stack = task_stack_page(current); 83 void *stack = task_stack_page(current);
84 84
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index cf257c2e728d..26347741ba50 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -7,6 +7,12 @@
7#include <linux/sched/idle.h> 7#include <linux/sched/idle.h>
8 8
9/* 9/*
10 * Increase resolution of cpu_capacity calculations
11 */
12#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
13#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
14
15/*
10 * sched-domains (multiprocessor balancing) declarations: 16 * sched-domains (multiprocessor balancing) declarations:
11 */ 17 */
12#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
@@ -27,12 +33,6 @@
27#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ 33#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
28#define SD_NUMA 0x4000 /* cross-node balancing */ 34#define SD_NUMA 0x4000 /* cross-node balancing */
29 35
30/*
31 * Increase resolution of cpu_capacity calculations
32 */
33#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
34#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
35
36#ifdef CONFIG_SCHED_SMT 36#ifdef CONFIG_SCHED_SMT
37static inline int cpu_smt_flags(void) 37static inline int cpu_smt_flags(void)
38{ 38{
diff --git a/include/linux/scif.h b/include/linux/scif.h
index 49a35d6edc94..7046111b8d0a 100644
--- a/include/linux/scif.h
+++ b/include/linux/scif.h
@@ -123,8 +123,8 @@ struct scif_range {
123 */ 123 */
124struct scif_pollepd { 124struct scif_pollepd {
125 scif_epd_t epd; 125 scif_epd_t epd;
126 short events; 126 __poll_t events;
127 short revents; 127 __poll_t revents;
128}; 128};
129 129
130/** 130/**
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index da803dfc7a39..b36c76635f18 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -102,11 +102,15 @@ enum sctp_cid {
102 /* AUTH Extension Section 4.1 */ 102 /* AUTH Extension Section 4.1 */
103 SCTP_CID_AUTH = 0x0F, 103 SCTP_CID_AUTH = 0x0F,
104 104
105 /* sctp ndata 5.1. I-DATA */
106 SCTP_CID_I_DATA = 0x40,
107
105 /* PR-SCTP Sec 3.2 */ 108 /* PR-SCTP Sec 3.2 */
106 SCTP_CID_FWD_TSN = 0xC0, 109 SCTP_CID_FWD_TSN = 0xC0,
107 110
108 /* Use hex, as defined in ADDIP sec. 3.1 */ 111 /* Use hex, as defined in ADDIP sec. 3.1 */
109 SCTP_CID_ASCONF = 0xC1, 112 SCTP_CID_ASCONF = 0xC1,
113 SCTP_CID_I_FWD_TSN = 0xC2,
110 SCTP_CID_ASCONF_ACK = 0x80, 114 SCTP_CID_ASCONF_ACK = 0x80,
111 SCTP_CID_RECONF = 0x82, 115 SCTP_CID_RECONF = 0x82,
112}; /* enum */ 116}; /* enum */
@@ -240,6 +244,23 @@ struct sctp_data_chunk {
240 struct sctp_datahdr data_hdr; 244 struct sctp_datahdr data_hdr;
241}; 245};
242 246
247struct sctp_idatahdr {
248 __be32 tsn;
249 __be16 stream;
250 __be16 reserved;
251 __be32 mid;
252 union {
253 __u32 ppid;
254 __be32 fsn;
255 };
256 __u8 payload[0];
257};
258
259struct sctp_idata_chunk {
260 struct sctp_chunkhdr chunk_hdr;
261 struct sctp_idatahdr data_hdr;
262};
263
243/* DATA Chuck Specific Flags */ 264/* DATA Chuck Specific Flags */
244enum { 265enum {
245 SCTP_DATA_MIDDLE_FRAG = 0x00, 266 SCTP_DATA_MIDDLE_FRAG = 0x00,
@@ -596,6 +617,22 @@ struct sctp_fwdtsn_chunk {
596 struct sctp_fwdtsn_hdr fwdtsn_hdr; 617 struct sctp_fwdtsn_hdr fwdtsn_hdr;
597}; 618};
598 619
620struct sctp_ifwdtsn_skip {
621 __be16 stream;
622 __u8 reserved;
623 __u8 flags;
624 __be32 mid;
625};
626
627struct sctp_ifwdtsn_hdr {
628 __be32 new_cum_tsn;
629 struct sctp_ifwdtsn_skip skip[0];
630};
631
632struct sctp_ifwdtsn_chunk {
633 struct sctp_chunkhdr chunk_hdr;
634 struct sctp_ifwdtsn_hdr fwdtsn_hdr;
635};
599 636
600/* ADDIP 637/* ADDIP
601 * Section 3.1.1 Address Configuration Change Chunk (ASCONF) 638 * Section 3.1.1 Address Configuration Change Chunk (ASCONF)
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 10f25f7e4304..c723a5c4e3ff 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -95,11 +95,19 @@ static inline void get_seccomp_filter(struct task_struct *tsk)
95#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) 95#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
96extern long seccomp_get_filter(struct task_struct *task, 96extern long seccomp_get_filter(struct task_struct *task,
97 unsigned long filter_off, void __user *data); 97 unsigned long filter_off, void __user *data);
98extern long seccomp_get_metadata(struct task_struct *task,
99 unsigned long filter_off, void __user *data);
98#else 100#else
99static inline long seccomp_get_filter(struct task_struct *task, 101static inline long seccomp_get_filter(struct task_struct *task,
100 unsigned long n, void __user *data) 102 unsigned long n, void __user *data)
101{ 103{
102 return -EINVAL; 104 return -EINVAL;
103} 105}
106static inline long seccomp_get_metadata(struct task_struct *task,
107 unsigned long filter_off,
108 void __user *data)
109{
110 return -EINVAL;
111}
104#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */ 112#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
105#endif /* _LINUX_SECCOMP_H */ 113#endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 09c6e28746f9..ab437dd2e3b9 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -140,6 +140,20 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
140int seq_open_private(struct file *, const struct seq_operations *, int); 140int seq_open_private(struct file *, const struct seq_operations *, int);
141int seq_release_private(struct inode *, struct file *); 141int seq_release_private(struct inode *, struct file *);
142 142
143#define DEFINE_SHOW_ATTRIBUTE(__name) \
144static int __name ## _open(struct inode *inode, struct file *file) \
145{ \
146 return single_open(file, __name ## _show, inode->i_private); \
147} \
148 \
149static const struct file_operations __name ## _fops = { \
150 .owner = THIS_MODULE, \
151 .open = __name ## _open, \
152 .read = seq_read, \
153 .llseek = seq_lseek, \
154 .release = single_release, \
155}
156
143static inline struct user_namespace *seq_user_ns(struct seq_file *seq) 157static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
144{ 158{
145#ifdef CONFIG_USER_NS 159#ifdef CONFIG_USER_NS
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f189a8a3bbb8..bcf4cf26b8c8 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -278,9 +278,8 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
278 278
279static inline int raw_read_seqcount_latch(seqcount_t *s) 279static inline int raw_read_seqcount_latch(seqcount_t *s)
280{ 280{
281 int seq = READ_ONCE(s->sequence);
282 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ 281 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
283 smp_read_barrier_depends(); 282 int seq = READ_ONCE(s->sequence); /* ^^^ */
284 return seq; 283 return seq;
285} 284}
286 285
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index d609e6dc5bad..f153b2c7f0cd 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -27,8 +27,10 @@ struct serdev_device;
27 27
28/** 28/**
29 * struct serdev_device_ops - Callback operations for a serdev device 29 * struct serdev_device_ops - Callback operations for a serdev device
30 * @receive_buf: Function called with data received from device. 30 * @receive_buf: Function called with data received from device;
31 * @write_wakeup: Function called when ready to transmit more data. 31 * returns number of bytes accepted; may sleep.
32 * @write_wakeup: Function called when ready to transmit more data; must
33 * not sleep.
32 */ 34 */
33struct serdev_device_ops { 35struct serdev_device_ops {
34 int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); 36 int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
@@ -76,6 +78,12 @@ static inline struct serdev_device_driver *to_serdev_device_driver(struct device
76 return container_of(d, struct serdev_device_driver, driver); 78 return container_of(d, struct serdev_device_driver, driver);
77} 79}
78 80
81enum serdev_parity {
82 SERDEV_PARITY_NONE,
83 SERDEV_PARITY_EVEN,
84 SERDEV_PARITY_ODD,
85};
86
79/* 87/*
80 * serdev controller structures 88 * serdev controller structures
81 */ 89 */
@@ -86,6 +94,7 @@ struct serdev_controller_ops {
86 int (*open)(struct serdev_controller *); 94 int (*open)(struct serdev_controller *);
87 void (*close)(struct serdev_controller *); 95 void (*close)(struct serdev_controller *);
88 void (*set_flow_control)(struct serdev_controller *, bool); 96 void (*set_flow_control)(struct serdev_controller *, bool);
97 int (*set_parity)(struct serdev_controller *, enum serdev_parity);
89 unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); 98 unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int);
90 void (*wait_until_sent)(struct serdev_controller *, long); 99 void (*wait_until_sent)(struct serdev_controller *, long);
91 int (*get_tiocm)(struct serdev_controller *); 100 int (*get_tiocm)(struct serdev_controller *);
@@ -193,6 +202,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
193 202
194int serdev_device_open(struct serdev_device *); 203int serdev_device_open(struct serdev_device *);
195void serdev_device_close(struct serdev_device *); 204void serdev_device_close(struct serdev_device *);
205int devm_serdev_device_open(struct device *, struct serdev_device *);
196unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); 206unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
197void serdev_device_set_flow_control(struct serdev_device *, bool); 207void serdev_device_set_flow_control(struct serdev_device *, bool);
198int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); 208int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
@@ -298,6 +308,9 @@ static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enabl
298 return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS); 308 return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS);
299} 309}
300 310
311int serdev_device_set_parity(struct serdev_device *serdev,
312 enum serdev_parity parity);
313
301/* 314/*
302 * serdev hooks into TTY core 315 * serdev hooks into TTY core
303 */ 316 */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index aefd0e5115da..b32df49a3bd5 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -387,7 +387,7 @@ struct uart_port *uart_get_console(struct uart_port *ports, int nr,
387 struct console *c); 387 struct console *c);
388int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, 388int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
389 char **options); 389 char **options);
390void uart_parse_options(char *options, int *baud, int *parity, int *bits, 390void uart_parse_options(const char *options, int *baud, int *parity, int *bits,
391 int *flow); 391 int *flow);
392int uart_set_options(struct uart_port *port, struct console *co, int baud, 392int uart_set_options(struct uart_port *port, struct console *co, int baud,
393 int parity, int bits, int flow); 393 int parity, int bits, int flow);
@@ -501,9 +501,5 @@ static inline int uart_handle_break(struct uart_port *port)
501 (cflag) & CRTSCTS || \ 501 (cflag) & CRTSCTS || \
502 !((cflag) & CLOCAL)) 502 !((cflag) & CLOCAL))
503 503
504/* 504void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf);
505 * Common device tree parsing helpers
506 */
507void of_get_rs485_mode(struct device_node *np, struct serial_rs485 *rs485conf);
508
509#endif /* LINUX_SERIAL_CORE_H */ 505#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 4a906f560817..e724d5a3dd80 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/phy.h> 4#include <linux/phy.h>
5 5
6struct __packed sfp_eeprom_base { 6struct sfp_eeprom_base {
7 u8 phys_id; 7 u8 phys_id;
8 u8 phys_ext_id; 8 u8 phys_ext_id;
9 u8 connector; 9 u8 connector;
@@ -165,13 +165,47 @@ struct __packed sfp_eeprom_base {
165 char vendor_rev[4]; 165 char vendor_rev[4];
166 union { 166 union {
167 __be16 optical_wavelength; 167 __be16 optical_wavelength;
168 u8 cable_spec; 168 __be16 cable_compliance;
169 }; 169 struct {
170#if defined __BIG_ENDIAN_BITFIELD
171 u8 reserved60_2:6;
172 u8 fc_pi_4_app_h:1;
173 u8 sff8431_app_e:1;
174 u8 reserved61:8;
175#elif defined __LITTLE_ENDIAN_BITFIELD
176 u8 sff8431_app_e:1;
177 u8 fc_pi_4_app_h:1;
178 u8 reserved60_2:6;
179 u8 reserved61:8;
180#else
181#error Unknown Endian
182#endif
183 } __packed passive;
184 struct {
185#if defined __BIG_ENDIAN_BITFIELD
186 u8 reserved60_4:4;
187 u8 fc_pi_4_lim:1;
188 u8 sff8431_lim:1;
189 u8 fc_pi_4_app_h:1;
190 u8 sff8431_app_e:1;
191 u8 reserved61:8;
192#elif defined __LITTLE_ENDIAN_BITFIELD
193 u8 sff8431_app_e:1;
194 u8 fc_pi_4_app_h:1;
195 u8 sff8431_lim:1;
196 u8 fc_pi_4_lim:1;
197 u8 reserved60_4:4;
198 u8 reserved61:8;
199#else
200#error Unknown Endian
201#endif
202 } __packed active;
203 } __packed;
170 u8 reserved62; 204 u8 reserved62;
171 u8 cc_base; 205 u8 cc_base;
172}; 206} __packed;
173 207
174struct __packed sfp_eeprom_ext { 208struct sfp_eeprom_ext {
175 __be16 options; 209 __be16 options;
176 u8 br_max; 210 u8 br_max;
177 u8 br_min; 211 u8 br_min;
@@ -181,12 +215,21 @@ struct __packed sfp_eeprom_ext {
181 u8 enhopts; 215 u8 enhopts;
182 u8 sff8472_compliance; 216 u8 sff8472_compliance;
183 u8 cc_ext; 217 u8 cc_ext;
184}; 218} __packed;
185 219
186struct __packed sfp_eeprom_id { 220/**
221 * struct sfp_eeprom_id - raw SFP module identification information
222 * @base: base SFP module identification structure
223 * @ext: extended SFP module identification structure
224 *
225 * See the SFF-8472 specification and related documents for the definition
226 * of these structure members. This can be obtained from
227 * ftp://ftp.seagate.com/sff
228 */
229struct sfp_eeprom_id {
187 struct sfp_eeprom_base base; 230 struct sfp_eeprom_base base;
188 struct sfp_eeprom_ext ext; 231 struct sfp_eeprom_ext ext;
189}; 232} __packed;
190 233
191/* SFP EEPROM registers */ 234/* SFP EEPROM registers */
192enum { 235enum {
@@ -222,6 +265,7 @@ enum {
222 SFP_SFF8472_COMPLIANCE = 0x5e, 265 SFP_SFF8472_COMPLIANCE = 0x5e,
223 SFP_CC_EXT = 0x5f, 266 SFP_CC_EXT = 0x5f,
224 267
268 SFP_PHYS_ID_SFF = 0x02,
225 SFP_PHYS_ID_SFP = 0x03, 269 SFP_PHYS_ID_SFP = 0x03,
226 SFP_PHYS_EXT_ID_SFP = 0x04, 270 SFP_PHYS_EXT_ID_SFP = 0x04,
227 SFP_CONNECTOR_UNSPEC = 0x00, 271 SFP_CONNECTOR_UNSPEC = 0x00,
@@ -347,19 +391,32 @@ enum {
347 SFP_PAGE = 0x7f, 391 SFP_PAGE = 0x7f,
348}; 392};
349 393
350struct device_node; 394struct fwnode_handle;
351struct ethtool_eeprom; 395struct ethtool_eeprom;
352struct ethtool_modinfo; 396struct ethtool_modinfo;
353struct net_device; 397struct net_device;
354struct sfp_bus; 398struct sfp_bus;
355 399
400/**
401 * struct sfp_upstream_ops - upstream operations structure
402 * @module_insert: called after a module has been detected to determine
403 * whether the module is supported for the upstream device.
404 * @module_remove: called after the module has been removed.
405 * @link_down: called when the link is non-operational for whatever
406 * reason.
407 * @link_up: called when the link is operational.
408 * @connect_phy: called when an I2C accessible PHY has been detected
409 * on the module.
410 * @disconnect_phy: called when a module with an I2C accessible PHY has
411 * been removed.
412 */
356struct sfp_upstream_ops { 413struct sfp_upstream_ops {
357 int (*module_insert)(void *, const struct sfp_eeprom_id *id); 414 int (*module_insert)(void *priv, const struct sfp_eeprom_id *id);
358 void (*module_remove)(void *); 415 void (*module_remove)(void *priv);
359 void (*link_down)(void *); 416 void (*link_down)(void *priv);
360 void (*link_up)(void *); 417 void (*link_up)(void *priv);
361 int (*connect_phy)(void *, struct phy_device *); 418 int (*connect_phy)(void *priv, struct phy_device *);
362 void (*disconnect_phy)(void *); 419 void (*disconnect_phy)(void *priv);
363}; 420};
364 421
365#if IS_ENABLED(CONFIG_SFP) 422#if IS_ENABLED(CONFIG_SFP)
@@ -375,7 +432,7 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
375 u8 *data); 432 u8 *data);
376void sfp_upstream_start(struct sfp_bus *bus); 433void sfp_upstream_start(struct sfp_bus *bus);
377void sfp_upstream_stop(struct sfp_bus *bus); 434void sfp_upstream_stop(struct sfp_bus *bus);
378struct sfp_bus *sfp_register_upstream(struct device_node *np, 435struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
379 struct net_device *ndev, void *upstream, 436 struct net_device *ndev, void *upstream,
380 const struct sfp_upstream_ops *ops); 437 const struct sfp_upstream_ops *ops);
381void sfp_unregister_upstream(struct sfp_bus *bus); 438void sfp_unregister_upstream(struct sfp_bus *bus);
@@ -419,7 +476,8 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
419{ 476{
420} 477}
421 478
422static inline struct sfp_bus *sfp_register_upstream(struct device_node *np, 479static inline struct sfp_bus *sfp_register_upstream(
480 struct fwnode_handle *fwnode,
423 struct net_device *ndev, void *upstream, 481 struct net_device *ndev, void *upstream,
424 const struct sfp_upstream_ops *ops) 482 const struct sfp_upstream_ops *ops)
425{ 483{
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index 94081e9a5010..6dfda97a6c1a 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -5,12 +5,9 @@
5#include <linux/phy.h> 5#include <linux/phy.h>
6#include <linux/if_ether.h> 6#include <linux/if_ether.h>
7 7
8enum {EDMAC_LITTLE_ENDIAN};
9
10struct sh_eth_plat_data { 8struct sh_eth_plat_data {
11 int phy; 9 int phy;
12 int phy_irq; 10 int phy_irq;
13 int edmac_endian;
14 phy_interface_t phy_interface; 11 phy_interface_t phy_interface;
15 void (*set_mdio_gate)(void *addr); 12 void (*set_mdio_gate)(void *addr);
16 13
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 06b295bec00d..73b5e655a76e 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -112,13 +112,11 @@ extern void shmem_uncharge(struct inode *inode, long pages);
112 112
113#ifdef CONFIG_TMPFS 113#ifdef CONFIG_TMPFS
114 114
115extern int shmem_add_seals(struct file *file, unsigned int seals); 115extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
116extern int shmem_get_seals(struct file *file);
117extern long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
118 116
119#else 117#else
120 118
121static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a) 119static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
122{ 120{
123 return -EINVAL; 121 return -EINVAL;
124} 122}
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 042968dd98f0..a9bc7e1b077e 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -11,13 +11,14 @@ struct task_struct;
11/* for sysctl */ 11/* for sysctl */
12extern int print_fatal_signals; 12extern int print_fatal_signals;
13 13
14static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) 14static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from)
15{ 15{
16 if (from->si_code < 0) 16 memcpy(to, from, sizeof(*to));
17 memcpy(to, from, sizeof(*to)); 17}
18 else 18
19 /* _sigchld is currently the largest know union member */ 19static inline void clear_siginfo(struct siginfo *info)
20 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); 20{
21 memset(info, 0, sizeof(*info));
21} 22}
22 23
23int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); 24int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
@@ -29,9 +30,7 @@ enum siginfo_layout {
29 SIL_FAULT, 30 SIL_FAULT,
30 SIL_CHLD, 31 SIL_CHLD,
31 SIL_RT, 32 SIL_RT,
32#ifdef __ARCH_SIGSYS
33 SIL_SYS, 33 SIL_SYS,
34#endif
35}; 34};
36 35
37enum siginfo_layout siginfo_layout(int sig, int si_code); 36enum siginfo_layout siginfo_layout(int sig, int si_code);
diff --git a/include/linux/siox.h b/include/linux/siox.h
new file mode 100644
index 000000000000..d79624e83134
--- /dev/null
+++ b/include/linux/siox.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright (C) 2015 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
3 *
4 * This program is free software; you can redistribute it and/or modify it under
5 * the terms of the GNU General Public License version 2 as published by the
6 * Free Software Foundation.
7 */
8
9#include <linux/device.h>
10
11#define to_siox_device(_dev) container_of((_dev), struct siox_device, dev)
12struct siox_device {
13 struct list_head node; /* node in smaster->devices */
14 struct siox_master *smaster;
15 struct device dev;
16
17 const char *type;
18 size_t inbytes;
19 size_t outbytes;
20 u8 statustype;
21
22 u8 status_read_clean;
23 u8 status_written;
24 u8 status_written_lastcycle;
25 bool connected;
26
27 /* statistics */
28 unsigned int watchdog_errors;
29 unsigned int status_errors;
30
31 struct kernfs_node *status_errors_kn;
32 struct kernfs_node *watchdog_kn;
33 struct kernfs_node *watchdog_errors_kn;
34 struct kernfs_node *connected_kn;
35};
36
37bool siox_device_synced(struct siox_device *sdevice);
38bool siox_device_connected(struct siox_device *sdevice);
39
40struct siox_driver {
41 int (*probe)(struct siox_device *sdevice);
42 int (*remove)(struct siox_device *sdevice);
43 void (*shutdown)(struct siox_device *sdevice);
44
45 /*
46 * buf is big enough to hold sdev->inbytes - 1 bytes, the status byte
47 * is in the scope of the framework.
48 */
49 int (*set_data)(struct siox_device *sdevice, u8 status, u8 buf[]);
50 /*
51 * buf is big enough to hold sdev->outbytes - 1 bytes, the status byte
52 * is in the scope of the framework
53 */
54 int (*get_data)(struct siox_device *sdevice, const u8 buf[]);
55
56 struct device_driver driver;
57};
58
59static inline struct siox_driver *to_siox_driver(struct device_driver *driver)
60{
61 if (driver)
62 return container_of(driver, struct siox_driver, driver);
63 else
64 return NULL;
65}
66
67int __siox_driver_register(struct siox_driver *sdriver, struct module *owner);
68
69static inline int siox_driver_register(struct siox_driver *sdriver)
70{
71 return __siox_driver_register(sdriver, THIS_MODULE);
72}
73
74static inline void siox_driver_unregister(struct siox_driver *sdriver)
75{
76 return driver_unregister(&sdriver->driver);
77}
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 8621ffdeecbf..a6b6e8bb3d7b 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -69,7 +69,12 @@ static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb
69 */ 69 */
70static inline bool __skb_array_empty(struct skb_array *a) 70static inline bool __skb_array_empty(struct skb_array *a)
71{ 71{
72 return !__ptr_ring_peek(&a->ring); 72 return __ptr_ring_empty(&a->ring);
73}
74
75static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
76{
77 return __ptr_ring_peek(&a->ring);
73} 78}
74 79
75static inline bool skb_array_empty(struct skb_array *a) 80static inline bool skb_array_empty(struct skb_array *a)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a38c80e9f91e..5ebc0f869720 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1211,6 +1211,11 @@ static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1211 data, proto, nhoff, hlen, flags); 1211 data, proto, nhoff, hlen, flags);
1212} 1212}
1213 1213
1214void
1215skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1216 struct flow_dissector *flow_dissector,
1217 void *target_container);
1218
1214static inline __u32 skb_get_hash(struct sk_buff *skb) 1219static inline __u32 skb_get_hash(struct sk_buff *skb)
1215{ 1220{
1216 if (!skb->l4_hash && !skb->sw_hash) 1221 if (!skb->l4_hash && !skb->sw_hash)
@@ -3241,7 +3246,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3241 int *peeked, int *off, int *err); 3246 int *peeked, int *off, int *err);
3242struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, 3247struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3243 int *err); 3248 int *err);
3244unsigned int datagram_poll(struct file *file, struct socket *sock, 3249__poll_t datagram_poll(struct file *file, struct socket *sock,
3245 struct poll_table_struct *wait); 3250 struct poll_table_struct *wait);
3246int skb_copy_datagram_iter(const struct sk_buff *from, int offset, 3251int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3247 struct iov_iter *to, int size); 3252 struct iov_iter *to, int size);
@@ -3282,6 +3287,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3282void skb_scrub_packet(struct sk_buff *skb, bool xnet); 3287void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3283unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); 3288unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
3284bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); 3289bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3290bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3285struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 3291struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3286struct sk_buff *skb_vlan_untag(struct sk_buff *skb); 3292struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3287int skb_ensure_writable(struct sk_buff *skb, int write_len); 3293int skb_ensure_writable(struct sk_buff *skb, int write_len);
@@ -4115,6 +4121,21 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4115 return hdr_len + skb_gso_transport_seglen(skb); 4121 return hdr_len + skb_gso_transport_seglen(skb);
4116} 4122}
4117 4123
4124/**
4125 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
4126 *
4127 * @skb: GSO skb
4128 *
4129 * skb_gso_mac_seglen is used to determine the real size of the
4130 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
4131 * headers (TCP/UDP).
4132 */
4133static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4134{
4135 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
4136 return hdr_len + skb_gso_transport_seglen(skb);
4137}
4138
4118/* Local Checksum Offload. 4139/* Local Checksum Offload.
4119 * Compute outer checksum based on the assumption that the 4140 * Compute outer checksum based on the assumption that the
4120 * inner checksum will be offloaded later. 4141 * inner checksum will be offloaded later.
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 50697a1d6621..231abc8976c5 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -135,9 +135,15 @@ struct mem_cgroup;
135void __init kmem_cache_init(void); 135void __init kmem_cache_init(void);
136bool slab_is_available(void); 136bool slab_is_available(void);
137 137
138struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 138extern bool usercopy_fallback;
139 slab_flags_t, 139
140 void (*)(void *)); 140struct kmem_cache *kmem_cache_create(const char *name, size_t size,
141 size_t align, slab_flags_t flags,
142 void (*ctor)(void *));
143struct kmem_cache *kmem_cache_create_usercopy(const char *name,
144 size_t size, size_t align, slab_flags_t flags,
145 size_t useroffset, size_t usersize,
146 void (*ctor)(void *));
141void kmem_cache_destroy(struct kmem_cache *); 147void kmem_cache_destroy(struct kmem_cache *);
142int kmem_cache_shrink(struct kmem_cache *); 148int kmem_cache_shrink(struct kmem_cache *);
143 149
@@ -153,9 +159,20 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *);
153 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 159 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
154 * then the objects will be properly aligned in SMP configurations. 160 * then the objects will be properly aligned in SMP configurations.
155 */ 161 */
156#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 162#define KMEM_CACHE(__struct, __flags) \
157 sizeof(struct __struct), __alignof__(struct __struct),\ 163 kmem_cache_create(#__struct, sizeof(struct __struct), \
158 (__flags), NULL) 164 __alignof__(struct __struct), (__flags), NULL)
165
166/*
167 * To whitelist a single field for copying to/from usercopy, use this
168 * macro instead for KMEM_CACHE() above.
169 */
170#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
171 kmem_cache_create_usercopy(#__struct, \
172 sizeof(struct __struct), \
173 __alignof__(struct __struct), (__flags), \
174 offsetof(struct __struct, __field), \
175 sizeof_field(struct __struct, __field), NULL)
159 176
160/* 177/*
161 * Common kmalloc functions provided by all allocators 178 * Common kmalloc functions provided by all allocators
@@ -167,15 +184,11 @@ void kzfree(const void *);
167size_t ksize(const void *); 184size_t ksize(const void *);
168 185
169#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 186#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
170const char *__check_heap_object(const void *ptr, unsigned long n, 187void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
171 struct page *page); 188 bool to_user);
172#else 189#else
173static inline const char *__check_heap_object(const void *ptr, 190static inline void __check_heap_object(const void *ptr, unsigned long n,
174 unsigned long n, 191 struct page *page, bool to_user) { }
175 struct page *page)
176{
177 return NULL;
178}
179#endif 192#endif
180 193
181/* 194/*
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 072e46e9e1d5..7385547c04b1 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -85,6 +85,9 @@ struct kmem_cache {
85 unsigned int *random_seq; 85 unsigned int *random_seq;
86#endif 86#endif
87 87
88 size_t useroffset; /* Usercopy region offset */
89 size_t usersize; /* Usercopy region size */
90
88 struct kmem_cache_node *node[MAX_NUMNODES]; 91 struct kmem_cache_node *node[MAX_NUMNODES];
89}; 92};
90 93
diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h
new file mode 100644
index 000000000000..c36cf121d2cd
--- /dev/null
+++ b/include/linux/slimbus.h
@@ -0,0 +1,164 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2011-2017, The Linux Foundation
4 */
5
6#ifndef _LINUX_SLIMBUS_H
7#define _LINUX_SLIMBUS_H
8#include <linux/device.h>
9#include <linux/module.h>
10#include <linux/completion.h>
11#include <linux/mod_devicetable.h>
12
13extern struct bus_type slimbus_bus;
14
15/**
16 * struct slim_eaddr - Enumeration address for a SLIMbus device
17 * @manf_id: Manufacturer Id for the device
18 * @prod_code: Product code
19 * @dev_index: Device index
20 * @instance: Instance value
21 */
22struct slim_eaddr {
23 u16 manf_id;
24 u16 prod_code;
25 u8 dev_index;
26 u8 instance;
27} __packed;
28
29/**
30 * enum slim_device_status - slim device status
31 * @SLIM_DEVICE_STATUS_DOWN: Slim device is absent or not reported yet.
32 * @SLIM_DEVICE_STATUS_UP: Slim device is announced on the bus.
33 * @SLIM_DEVICE_STATUS_RESERVED: Reserved for future use.
34 */
35enum slim_device_status {
36 SLIM_DEVICE_STATUS_DOWN = 0,
37 SLIM_DEVICE_STATUS_UP,
38 SLIM_DEVICE_STATUS_RESERVED,
39};
40
41struct slim_controller;
42
43/**
44 * struct slim_device - Slim device handle.
45 * @dev: Driver model representation of the device.
46 * @e_addr: Enumeration address of this device.
47 * @status: slim device status
48 * @ctrl: slim controller instance.
49 * @laddr: 1-byte Logical address of this device.
50 * @is_laddr_valid: indicates if the laddr is valid or not
51 *
52 * This is the client/device handle returned when a SLIMbus
53 * device is registered with a controller.
54 * Pointer to this structure is used by client-driver as a handle.
55 */
56struct slim_device {
57 struct device dev;
58 struct slim_eaddr e_addr;
59 struct slim_controller *ctrl;
60 enum slim_device_status status;
61 u8 laddr;
62 bool is_laddr_valid;
63};
64
65#define to_slim_device(d) container_of(d, struct slim_device, dev)
66
67/**
68 * struct slim_driver - SLIMbus 'generic device' (slave) device driver
69 * (similar to 'spi_device' on SPI)
70 * @probe: Binds this driver to a SLIMbus device.
71 * @remove: Unbinds this driver from the SLIMbus device.
72 * @shutdown: Standard shutdown callback used during powerdown/halt.
73 * @device_status: This callback is called when
74 * - The device reports present and gets a laddr assigned
75 * - The device reports absent, or the bus goes down.
76 * @driver: SLIMbus device drivers should initialize name and owner field of
77 * this structure
78 * @id_table: List of SLIMbus devices supported by this driver
79 */
80
81struct slim_driver {
82 int (*probe)(struct slim_device *sl);
83 void (*remove)(struct slim_device *sl);
84 void (*shutdown)(struct slim_device *sl);
85 int (*device_status)(struct slim_device *sl,
86 enum slim_device_status s);
87 struct device_driver driver;
88 const struct slim_device_id *id_table;
89};
90#define to_slim_driver(d) container_of(d, struct slim_driver, driver)
91
92/**
93 * struct slim_val_inf - Slimbus value or information element
94 * @start_offset: Specifies starting offset in information/value element map
95 * @rbuf: buffer to read the values
96 * @wbuf: buffer to write
97 * @num_bytes: upto 16. This ensures that the message will fit the slicesize
98 * per SLIMbus spec
99 * @comp: completion for asynchronous operations, valid only if TID is
100 * required for transaction, like REQUEST operations.
101 * Rest of the transactions are synchronous anyway.
102 */
103struct slim_val_inf {
104 u16 start_offset;
105 u8 num_bytes;
106 u8 *rbuf;
107 const u8 *wbuf;
108 struct completion *comp;
109};
110
111/*
112 * use a macro to avoid include chaining to get THIS_MODULE
113 */
114#define slim_driver_register(drv) \
115 __slim_driver_register(drv, THIS_MODULE)
116int __slim_driver_register(struct slim_driver *drv, struct module *owner);
117void slim_driver_unregister(struct slim_driver *drv);
118
119/**
120 * module_slim_driver() - Helper macro for registering a SLIMbus driver
121 * @__slim_driver: slimbus_driver struct
122 *
123 * Helper macro for SLIMbus drivers which do not do anything special in module
124 * init/exit. This eliminates a lot of boilerplate. Each module may only
125 * use this macro once, and calling it replaces module_init() and module_exit()
126 */
127#define module_slim_driver(__slim_driver) \
128 module_driver(__slim_driver, slim_driver_register, \
129 slim_driver_unregister)
130
131static inline void *slim_get_devicedata(const struct slim_device *dev)
132{
133 return dev_get_drvdata(&dev->dev);
134}
135
136static inline void slim_set_devicedata(struct slim_device *dev, void *data)
137{
138 dev_set_drvdata(&dev->dev, data);
139}
140
141struct slim_device *slim_get_device(struct slim_controller *ctrl,
142 struct slim_eaddr *e_addr);
143int slim_get_logical_addr(struct slim_device *sbdev);
144
145/* Information Element management messages */
146#define SLIM_MSG_MC_REQUEST_INFORMATION 0x20
147#define SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION 0x21
148#define SLIM_MSG_MC_REPLY_INFORMATION 0x24
149#define SLIM_MSG_MC_CLEAR_INFORMATION 0x28
150#define SLIM_MSG_MC_REPORT_INFORMATION 0x29
151
152/* Value Element management messages */
153#define SLIM_MSG_MC_REQUEST_VALUE 0x60
154#define SLIM_MSG_MC_REQUEST_CHANGE_VALUE 0x61
155#define SLIM_MSG_MC_REPLY_VALUE 0x64
156#define SLIM_MSG_MC_CHANGE_VALUE 0x68
157
158int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg,
159 u8 mc);
160int slim_readb(struct slim_device *sdev, u32 addr);
161int slim_writeb(struct slim_device *sdev, u32 addr, u8 value);
162int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val);
163int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val);
164#endif /* _LINUX_SLIMBUS_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 0adae162dc8f..8ad99c47b19c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -135,6 +135,9 @@ struct kmem_cache {
135 struct kasan_cache kasan_info; 135 struct kasan_cache kasan_info;
136#endif 136#endif
137 137
138 size_t useroffset; /* Usercopy region offset */
139 size_t usersize; /* Usercopy region size */
140
138 struct kmem_cache_node *node[MAX_NUMNODES]; 141 struct kmem_cache_node *node[MAX_NUMNODES];
139}; 142};
140 143
diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h
index 12e548938bbb..8e884e0dda0a 100644
--- a/include/linux/soc/brcmstb/brcmstb.h
+++ b/include/linux/soc/brcmstb/brcmstb.h
@@ -13,12 +13,6 @@ static inline u32 BRCM_REV(u32 reg)
13} 13}
14 14
15/* 15/*
16 * Bus Interface Unit control register setup, must happen early during boot,
17 * before SMP is brought up, called by machine entry point.
18 */
19void brcmstb_biuctrl_init(void);
20
21/*
22 * Helper functions for getting family or product id from the 16 * Helper functions for getting family or product id from the
23 * SoC driver. 17 * SoC driver.
24 */ 18 */
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index e8d9f0d52933..b0a507d356ef 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -28,7 +28,8 @@
28#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \ 28#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \
29 BIT(7) | BIT(8)) 29 BIT(7) | BIT(8))
30 30
31int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask); 31int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask,
32int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask); 32 bool reg_update);
33 33int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask,
34 bool reg_update);
34#endif /* __SOC_MEDIATEK_INFRACFG_H */ 35#endif /* __SOC_MEDIATEK_INFRACFG_H */
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
new file mode 100644
index 000000000000..f4de33654a60
--- /dev/null
+++ b/include/linux/soc/qcom/qmi.h
@@ -0,0 +1,271 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2017, Linaro Ltd.
5 */
6#ifndef __QMI_HELPERS_H__
7#define __QMI_HELPERS_H__
8
9#include <linux/completion.h>
10#include <linux/idr.h>
11#include <linux/list.h>
12#include <linux/qrtr.h>
13#include <linux/types.h>
14#include <linux/workqueue.h>
15
16struct socket;
17
18/**
19 * qmi_header - wireformat header of QMI messages
20 * @type: type of message
21 * @txn_id: transaction id
22 * @msg_id: message id
23 * @msg_len: length of message payload following header
24 */
25struct qmi_header {
26 u8 type;
27 u16 txn_id;
28 u16 msg_id;
29 u16 msg_len;
30} __packed;
31
32#define QMI_REQUEST 0
33#define QMI_RESPONSE 2
34#define QMI_INDICATION 4
35
36#define QMI_COMMON_TLV_TYPE 0
37
38enum qmi_elem_type {
39 QMI_EOTI,
40 QMI_OPT_FLAG,
41 QMI_DATA_LEN,
42 QMI_UNSIGNED_1_BYTE,
43 QMI_UNSIGNED_2_BYTE,
44 QMI_UNSIGNED_4_BYTE,
45 QMI_UNSIGNED_8_BYTE,
46 QMI_SIGNED_2_BYTE_ENUM,
47 QMI_SIGNED_4_BYTE_ENUM,
48 QMI_STRUCT,
49 QMI_STRING,
50};
51
52enum qmi_array_type {
53 NO_ARRAY,
54 STATIC_ARRAY,
55 VAR_LEN_ARRAY,
56};
57
58/**
59 * struct qmi_elem_info - describes how to encode a single QMI element
60 * @data_type: Data type of this element.
61 * @elem_len: Array length of this element, if an array.
62 * @elem_size: Size of a single instance of this data type.
63 * @array_type: Array type of this element.
64 * @tlv_type: QMI message specific type to identify which element
65 * is present in an incoming message.
66 * @offset: Specifies the offset of the first instance of this
67 * element in the data structure.
68 * @ei_array: Null-terminated array of @qmi_elem_info to describe nested
69 * structures.
70 */
71struct qmi_elem_info {
72 enum qmi_elem_type data_type;
73 u32 elem_len;
74 u32 elem_size;
75 enum qmi_array_type array_type;
76 u8 tlv_type;
77 u32 offset;
78 struct qmi_elem_info *ei_array;
79};
80
81#define QMI_RESULT_SUCCESS_V01 0
82#define QMI_RESULT_FAILURE_V01 1
83
84#define QMI_ERR_NONE_V01 0
85#define QMI_ERR_MALFORMED_MSG_V01 1
86#define QMI_ERR_NO_MEMORY_V01 2
87#define QMI_ERR_INTERNAL_V01 3
88#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5
89#define QMI_ERR_INVALID_ID_V01 41
90#define QMI_ERR_ENCODING_V01 58
91#define QMI_ERR_INCOMPATIBLE_STATE_V01 90
92#define QMI_ERR_NOT_SUPPORTED_V01 94
93
94/**
95 * qmi_response_type_v01 - common response header (decoded)
96 * @result: result of the transaction
97 * @error: error value, when @result is QMI_RESULT_FAILURE_V01
98 */
99struct qmi_response_type_v01 {
100 u16 result;
101 u16 error;
102};
103
104extern struct qmi_elem_info qmi_response_type_v01_ei[];
105
106/**
107 * struct qmi_service - context to track lookup-results
108 * @service: service type
109 * @version: version of the @service
110 * @instance: instance id of the @service
111 * @node: node of the service
112 * @port: port of the service
113 * @priv: handle for client's use
114 * @list_node: list_head for house keeping
115 */
116struct qmi_service {
117 unsigned int service;
118 unsigned int version;
119 unsigned int instance;
120
121 unsigned int node;
122 unsigned int port;
123
124 void *priv;
125 struct list_head list_node;
126};
127
128struct qmi_handle;
129
130/**
131 * struct qmi_ops - callbacks for qmi_handle
132 * @new_server: inform client of a new_server lookup-result, returning
133 * successfully from this call causes the library to call
134 * @del_server as the service is removed from the
135 * lookup-result. @priv of the qmi_service can be used by
136 * the client
137 * @del_server: inform client of a del_server lookup-result
138 * @net_reset: inform client that the name service was restarted and
139 * that and any state needs to be released
140 * @msg_handler: invoked for incoming messages, allows a client to
141 * override the usual QMI message handler
142 * @bye: inform a client that all clients from a node are gone
143 * @del_client: inform a client that a particular client is gone
144 */
145struct qmi_ops {
146 int (*new_server)(struct qmi_handle *qmi, struct qmi_service *svc);
147 void (*del_server)(struct qmi_handle *qmi, struct qmi_service *svc);
148 void (*net_reset)(struct qmi_handle *qmi);
149 void (*msg_handler)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
150 const void *data, size_t count);
151 void (*bye)(struct qmi_handle *qmi, unsigned int node);
152 void (*del_client)(struct qmi_handle *qmi,
153 unsigned int node, unsigned int port);
154};
155
156/**
157 * struct qmi_txn - transaction context
158 * @qmi: QMI handle this transaction is associated with
159 * @id: transaction id
160 * @lock: for synchronization between handler and waiter of messages
161 * @completion: completion object as the transaction receives a response
162 * @result: result code for the completed transaction
163 * @ei: description of the QMI encoded response (optional)
164 * @dest: destination buffer to decode message into (optional)
165 */
166struct qmi_txn {
167 struct qmi_handle *qmi;
168
169 int id;
170
171 struct mutex lock;
172 struct completion completion;
173 int result;
174
175 struct qmi_elem_info *ei;
176 void *dest;
177};
178
179/**
180 * struct qmi_msg_handler - description of QMI message handler
181 * @type: type of message
182 * @msg_id: message id
183 * @ei: description of the QMI encoded message
184 * @decoded_size: size of the decoded object
185 * @fn: function to invoke as the message is decoded
186 */
187struct qmi_msg_handler {
188 unsigned int type;
189 unsigned int msg_id;
190
191 struct qmi_elem_info *ei;
192
193 size_t decoded_size;
194 void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
195 struct qmi_txn *txn, const void *decoded);
196};
197
198/**
199 * struct qmi_handle - QMI context
200 * @sock: socket handle
201 * @sock_lock: synchronization of @sock modifications
202 * @sq: sockaddr of @sock
203 * @work: work for handling incoming messages
204 * @wq: workqueue to post @work on
205 * @recv_buf: scratch buffer for handling incoming messages
206 * @recv_buf_size: size of @recv_buf
207 * @lookups: list of registered lookup requests
208 * @lookup_results: list of lookup-results advertised to the client
209 * @services: list of registered services (by this client)
210 * @ops: reference to callbacks
211 * @txns: outstanding transactions
212 * @txn_lock: lock for modifications of @txns
213 * @handlers: list of handlers for incoming messages
214 */
215struct qmi_handle {
216 struct socket *sock;
217 struct mutex sock_lock;
218
219 struct sockaddr_qrtr sq;
220
221 struct work_struct work;
222 struct workqueue_struct *wq;
223
224 void *recv_buf;
225 size_t recv_buf_size;
226
227 struct list_head lookups;
228 struct list_head lookup_results;
229 struct list_head services;
230
231 struct qmi_ops ops;
232
233 struct idr txns;
234 struct mutex txn_lock;
235
236 const struct qmi_msg_handler *handlers;
237};
238
239int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service,
240 unsigned int version, unsigned int instance);
241int qmi_add_server(struct qmi_handle *qmi, unsigned int service,
242 unsigned int version, unsigned int instance);
243
244int qmi_handle_init(struct qmi_handle *qmi, size_t max_msg_len,
245 const struct qmi_ops *ops,
246 const struct qmi_msg_handler *handlers);
247void qmi_handle_release(struct qmi_handle *qmi);
248
249ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
250 struct qmi_txn *txn, int msg_id, size_t len,
251 struct qmi_elem_info *ei, const void *c_struct);
252ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
253 struct qmi_txn *txn, int msg_id, size_t len,
254 struct qmi_elem_info *ei, const void *c_struct);
255ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
256 int msg_id, size_t len, struct qmi_elem_info *ei,
257 const void *c_struct);
258
259void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
260 unsigned int txn_id, struct qmi_elem_info *ei,
261 const void *c_struct);
262
263int qmi_decode_message(const void *buf, size_t len,
264 struct qmi_elem_info *ei, void *c_struct);
265
266int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
267 struct qmi_elem_info *ei, void *c_struct);
268int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout);
269void qmi_txn_cancel(struct qmi_txn *txn);
270
271#endif
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
new file mode 100644
index 000000000000..e91fdcf41049
--- /dev/null
+++ b/include/linux/soundwire/sdw.h
@@ -0,0 +1,479 @@
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2// Copyright(c) 2015-17 Intel Corporation.
3
4#ifndef __SOUNDWIRE_H
5#define __SOUNDWIRE_H
6
7struct sdw_bus;
8struct sdw_slave;
9
10/* SDW spec defines and enums, as defined by MIPI 1.1. Spec */
11
12/* SDW Broadcast Device Number */
13#define SDW_BROADCAST_DEV_NUM 15
14
15/* SDW Enumeration Device Number */
16#define SDW_ENUM_DEV_NUM 0
17
18/* SDW Group Device Numbers */
19#define SDW_GROUP12_DEV_NUM 12
20#define SDW_GROUP13_DEV_NUM 13
21
22/* SDW Master Device Number, not supported yet */
23#define SDW_MASTER_DEV_NUM 14
24
25#define SDW_NUM_DEV_ID_REGISTERS 6
26
27#define SDW_MAX_DEVICES 11
28
29/**
30 * enum sdw_slave_status - Slave status
31 * @SDW_SLAVE_UNATTACHED: Slave is not attached with the bus.
32 * @SDW_SLAVE_ATTACHED: Slave is attached with bus.
33 * @SDW_SLAVE_ALERT: Some alert condition on the Slave
34 * @SDW_SLAVE_RESERVED: Reserved for future use
35 */
36enum sdw_slave_status {
37 SDW_SLAVE_UNATTACHED = 0,
38 SDW_SLAVE_ATTACHED = 1,
39 SDW_SLAVE_ALERT = 2,
40 SDW_SLAVE_RESERVED = 3,
41};
42
43/**
44 * enum sdw_command_response - Command response as defined by SDW spec
45 * @SDW_CMD_OK: cmd was successful
46 * @SDW_CMD_IGNORED: cmd was ignored
47 * @SDW_CMD_FAIL: cmd was NACKed
48 * @SDW_CMD_TIMEOUT: cmd timedout
49 * @SDW_CMD_FAIL_OTHER: cmd failed due to other reason than above
50 *
51 * NOTE: The enum is different than actual Spec as response in the Spec is
52 * combination of ACK/NAK bits
53 *
54 * SDW_CMD_TIMEOUT/FAIL_OTHER is defined for SW use, not in spec
55 */
56enum sdw_command_response {
57 SDW_CMD_OK = 0,
58 SDW_CMD_IGNORED = 1,
59 SDW_CMD_FAIL = 2,
60 SDW_CMD_TIMEOUT = 3,
61 SDW_CMD_FAIL_OTHER = 4,
62};
63
64/*
65 * SDW properties, defined in MIPI DisCo spec v1.0
66 */
67enum sdw_clk_stop_reset_behave {
68 SDW_CLK_STOP_KEEP_STATUS = 1,
69};
70
71/**
72 * enum sdw_p15_behave - Slave Port 15 behaviour when the Master attempts a
73 * read
74 * @SDW_P15_READ_IGNORED: Read is ignored
75 * @SDW_P15_CMD_OK: Command is ok
76 */
77enum sdw_p15_behave {
78 SDW_P15_READ_IGNORED = 0,
79 SDW_P15_CMD_OK = 1,
80};
81
82/**
83 * enum sdw_dpn_type - Data port types
84 * @SDW_DPN_FULL: Full Data Port is supported
85 * @SDW_DPN_SIMPLE: Simplified Data Port as defined in spec.
86 * DPN_SampleCtrl2, DPN_OffsetCtrl2, DPN_HCtrl and DPN_BlockCtrl3
87 * are not implemented.
88 * @SDW_DPN_REDUCED: Reduced Data Port as defined in spec.
89 * DPN_SampleCtrl2, DPN_HCtrl are not implemented.
90 */
91enum sdw_dpn_type {
92 SDW_DPN_FULL = 0,
93 SDW_DPN_SIMPLE = 1,
94 SDW_DPN_REDUCED = 2,
95};
96
97/**
98 * enum sdw_clk_stop_mode - Clock Stop modes
99 * @SDW_CLK_STOP_MODE0: Slave can continue operation seamlessly on clock
100 * restart
101 * @SDW_CLK_STOP_MODE1: Slave may have entered a deeper power-saving mode,
102 * not capable of continuing operation seamlessly when the clock restarts
103 */
104enum sdw_clk_stop_mode {
105 SDW_CLK_STOP_MODE0 = 0,
106 SDW_CLK_STOP_MODE1 = 1,
107};
108
109/**
110 * struct sdw_dp0_prop - DP0 properties
111 * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64
112 * (inclusive)
113 * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64
114 * (inclusive)
115 * @num_words: number of wordlengths supported
116 * @words: wordlengths supported
117 * @flow_controlled: Slave implementation results in an OK_NotReady
118 * response
119 * @simple_ch_prep_sm: If channel prepare sequence is required
120 * @device_interrupts: If implementation-defined interrupts are supported
121 *
122 * The wordlengths are specified by Spec as max, min AND number of
123 * discrete values, implementation can define based on the wordlengths they
124 * support
125 */
126struct sdw_dp0_prop {
127 u32 max_word;
128 u32 min_word;
129 u32 num_words;
130 u32 *words;
131 bool flow_controlled;
132 bool simple_ch_prep_sm;
133 bool device_interrupts;
134};
135
136/**
137 * struct sdw_dpn_audio_mode - Audio mode properties for DPn
138 * @bus_min_freq: Minimum bus frequency, in Hz
139 * @bus_max_freq: Maximum bus frequency, in Hz
140 * @bus_num_freq: Number of discrete frequencies supported
141 * @bus_freq: Discrete bus frequencies, in Hz
142 * @min_freq: Minimum sampling frequency, in Hz
143 * @max_freq: Maximum sampling bus frequency, in Hz
144 * @num_freq: Number of discrete sampling frequency supported
145 * @freq: Discrete sampling frequencies, in Hz
146 * @prep_ch_behave: Specifies the dependencies between Channel Prepare
147 * sequence and bus clock configuration
148 * If 0, Channel Prepare can happen at any Bus clock rate
149 * If 1, Channel Prepare sequence shall happen only after Bus clock is
150 * changed to a frequency supported by this mode or compatible modes
151 * described by the next field
152 * @glitchless: Bitmap describing possible glitchless transitions from this
153 * Audio Mode to other Audio Modes
154 */
155struct sdw_dpn_audio_mode {
156 u32 bus_min_freq;
157 u32 bus_max_freq;
158 u32 bus_num_freq;
159 u32 *bus_freq;
160 u32 max_freq;
161 u32 min_freq;
162 u32 num_freq;
163 u32 *freq;
164 u32 prep_ch_behave;
165 u32 glitchless;
166};
167
168/**
169 * struct sdw_dpn_prop - Data Port DPn properties
170 * @num: port number
171 * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64
172 * (inclusive)
173 * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64
174 * (inclusive)
175 * @num_words: Number of discrete supported wordlengths
176 * @words: Discrete supported wordlength
177 * @type: Data port type. Full, Simplified or Reduced
178 * @max_grouping: Maximum number of samples that can be grouped together for
179 * a full data port
180 * @simple_ch_prep_sm: If the port supports simplified channel prepare state
181 * machine
182 * @ch_prep_timeout: Port-specific timeout value, in milliseconds
183 * @device_interrupts: If set, each bit corresponds to support for
184 * implementation-defined interrupts
185 * @max_ch: Maximum channels supported
186 * @min_ch: Minimum channels supported
187 * @num_ch: Number of discrete channels supported
188 * @ch: Discrete channels supported
189 * @num_ch_combinations: Number of channel combinations supported
190 * @ch_combinations: Channel combinations supported
191 * @modes: SDW mode supported
192 * @max_async_buffer: Number of samples that this port can buffer in
193 * asynchronous modes
194 * @block_pack_mode: Type of block port mode supported
195 * @port_encoding: Payload Channel Sample encoding schemes supported
196 * @audio_modes: Audio modes supported
197 */
198struct sdw_dpn_prop {
199 u32 num;
200 u32 max_word;
201 u32 min_word;
202 u32 num_words;
203 u32 *words;
204 enum sdw_dpn_type type;
205 u32 max_grouping;
206 bool simple_ch_prep_sm;
207 u32 ch_prep_timeout;
208 u32 device_interrupts;
209 u32 max_ch;
210 u32 min_ch;
211 u32 num_ch;
212 u32 *ch;
213 u32 num_ch_combinations;
214 u32 *ch_combinations;
215 u32 modes;
216 u32 max_async_buffer;
217 bool block_pack_mode;
218 u32 port_encoding;
219 struct sdw_dpn_audio_mode *audio_modes;
220};
221
222/**
223 * struct sdw_slave_prop - SoundWire Slave properties
224 * @mipi_revision: Spec version of the implementation
225 * @wake_capable: Wake-up events are supported
226 * @test_mode_capable: If test mode is supported
227 * @clk_stop_mode1: Clock-Stop Mode 1 is supported
228 * @simple_clk_stop_capable: Simple clock mode is supported
229 * @clk_stop_timeout: Worst-case latency of the Clock Stop Prepare State
230 * Machine transitions, in milliseconds
231 * @ch_prep_timeout: Worst-case latency of the Channel Prepare State Machine
232 * transitions, in milliseconds
233 * @reset_behave: Slave keeps the status of the SlaveStopClockPrepare
234 * state machine (P=1 SCSP_SM) after exit from clock-stop mode1
235 * @high_PHY_capable: Slave is HighPHY capable
236 * @paging_support: Slave implements paging registers SCP_AddrPage1 and
237 * SCP_AddrPage2
238 * @bank_delay_support: Slave implements bank delay/bridge support registers
239 * SCP_BankDelay and SCP_NextFrame
240 * @p15_behave: Slave behavior when the Master attempts a read to the Port15
241 * alias
242 * @lane_control_support: Slave supports lane control
243 * @master_count: Number of Masters present on this Slave
244 * @source_ports: Bitmap identifying source ports
245 * @sink_ports: Bitmap identifying sink ports
246 * @dp0_prop: Data Port 0 properties
247 * @src_dpn_prop: Source Data Port N properties
248 * @sink_dpn_prop: Sink Data Port N properties
249 */
250struct sdw_slave_prop {
251 u32 mipi_revision;
252 bool wake_capable;
253 bool test_mode_capable;
254 bool clk_stop_mode1;
255 bool simple_clk_stop_capable;
256 u32 clk_stop_timeout;
257 u32 ch_prep_timeout;
258 enum sdw_clk_stop_reset_behave reset_behave;
259 bool high_PHY_capable;
260 bool paging_support;
261 bool bank_delay_support;
262 enum sdw_p15_behave p15_behave;
263 bool lane_control_support;
264 u32 master_count;
265 u32 source_ports;
266 u32 sink_ports;
267 struct sdw_dp0_prop *dp0_prop;
268 struct sdw_dpn_prop *src_dpn_prop;
269 struct sdw_dpn_prop *sink_dpn_prop;
270};
271
272/**
273 * struct sdw_master_prop - Master properties
274 * @revision: MIPI spec version of the implementation
275 * @master_count: Number of masters
276 * @clk_stop_mode: Bitmap for Clock Stop modes supported
277 * @max_freq: Maximum Bus clock frequency, in Hz
278 * @num_clk_gears: Number of clock gears supported
279 * @clk_gears: Clock gears supported
280 * @num_freq: Number of clock frequencies supported, in Hz
281 * @freq: Clock frequencies supported, in Hz
282 * @default_frame_rate: Controller default Frame rate, in Hz
283 * @default_row: Number of rows
284 * @default_col: Number of columns
285 * @dynamic_frame: Dynamic frame supported
286 * @err_threshold: Number of times that software may retry sending a single
287 * command
288 * @dpn_prop: Data Port N properties
289 */
290struct sdw_master_prop {
291 u32 revision;
292 u32 master_count;
293 enum sdw_clk_stop_mode clk_stop_mode;
294 u32 max_freq;
295 u32 num_clk_gears;
296 u32 *clk_gears;
297 u32 num_freq;
298 u32 *freq;
299 u32 default_frame_rate;
300 u32 default_row;
301 u32 default_col;
302 bool dynamic_frame;
303 u32 err_threshold;
304 struct sdw_dpn_prop *dpn_prop;
305};
306
307int sdw_master_read_prop(struct sdw_bus *bus);
308int sdw_slave_read_prop(struct sdw_slave *slave);
309
310/*
311 * SDW Slave Structures and APIs
312 */
313
314/**
315 * struct sdw_slave_id - Slave ID
316 * @mfg_id: MIPI Manufacturer ID
317 * @part_id: Device Part ID
318 * @class_id: MIPI Class ID, unused now.
319 * Currently a placeholder in MIPI SoundWire Spec
320 * @unique_id: Device unique ID
321 * @sdw_version: SDW version implemented
322 *
323 * The order of the IDs here does not follow the DisCo spec definitions
324 */
325struct sdw_slave_id {
326 __u16 mfg_id;
327 __u16 part_id;
328 __u8 class_id;
329 __u8 unique_id:4;
330 __u8 sdw_version:4;
331};
332
333/**
334 * struct sdw_slave_intr_status - Slave interrupt status
335 * @control_port: control port status
336 * @port: data port status
337 */
338struct sdw_slave_intr_status {
339 u8 control_port;
340 u8 port[15];
341};
342
343/**
344 * struct sdw_slave_ops - Slave driver callback ops
345 * @read_prop: Read Slave properties
346 * @interrupt_callback: Device interrupt notification (invoked in thread
347 * context)
348 * @update_status: Update Slave status
349 */
350struct sdw_slave_ops {
351 int (*read_prop)(struct sdw_slave *sdw);
352 int (*interrupt_callback)(struct sdw_slave *slave,
353 struct sdw_slave_intr_status *status);
354 int (*update_status)(struct sdw_slave *slave,
355 enum sdw_slave_status status);
356};
357
358/**
359 * struct sdw_slave - SoundWire Slave
360 * @id: MIPI device ID
361 * @dev: Linux device
362 * @status: Status reported by the Slave
363 * @bus: Bus handle
364 * @ops: Slave callback ops
365 * @prop: Slave properties
366 * @node: node for bus list
367 * @port_ready: Port ready completion flag for each Slave port
368 * @dev_num: Device Number assigned by Bus
369 */
370struct sdw_slave {
371 struct sdw_slave_id id;
372 struct device dev;
373 enum sdw_slave_status status;
374 struct sdw_bus *bus;
375 const struct sdw_slave_ops *ops;
376 struct sdw_slave_prop prop;
377 struct list_head node;
378 struct completion *port_ready;
379 u16 dev_num;
380};
381
382#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)
383
384struct sdw_driver {
385 const char *name;
386
387 int (*probe)(struct sdw_slave *sdw,
388 const struct sdw_device_id *id);
389 int (*remove)(struct sdw_slave *sdw);
390 void (*shutdown)(struct sdw_slave *sdw);
391
392 const struct sdw_device_id *id_table;
393 const struct sdw_slave_ops *ops;
394
395 struct device_driver driver;
396};
397
398#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \
399 { .mfg_id = (_mfg_id), .part_id = (_part_id), \
400 .driver_data = (unsigned long)(_drv_data) }
401
402int sdw_handle_slave_status(struct sdw_bus *bus,
403 enum sdw_slave_status status[]);
404
405/*
406 * SDW master structures and APIs
407 */
408
409struct sdw_msg;
410
411/**
412 * struct sdw_defer - SDW deffered message
413 * @length: message length
414 * @complete: message completion
415 * @msg: SDW message
416 */
417struct sdw_defer {
418 int length;
419 struct completion complete;
420 struct sdw_msg *msg;
421};
422
423/**
424 * struct sdw_master_ops - Master driver ops
425 * @read_prop: Read Master properties
426 * @xfer_msg: Transfer message callback
427 * @xfer_msg_defer: Defer version of transfer message callback
428 * @reset_page_addr: Reset the SCP page address registers
429 */
430struct sdw_master_ops {
431 int (*read_prop)(struct sdw_bus *bus);
432
433 enum sdw_command_response (*xfer_msg)
434 (struct sdw_bus *bus, struct sdw_msg *msg);
435 enum sdw_command_response (*xfer_msg_defer)
436 (struct sdw_bus *bus, struct sdw_msg *msg,
437 struct sdw_defer *defer);
438 enum sdw_command_response (*reset_page_addr)
439 (struct sdw_bus *bus, unsigned int dev_num);
440};
441
442/**
443 * struct sdw_bus - SoundWire bus
444 * @dev: Master linux device
445 * @link_id: Link id number, can be 0 to N, unique for each Master
446 * @slaves: list of Slaves on this bus
447 * @assigned: Bitmap for Slave device numbers.
448 * Bit set implies used number, bit clear implies unused number.
449 * @bus_lock: bus lock
450 * @msg_lock: message lock
451 * @ops: Master callback ops
452 * @prop: Master properties
453 * @defer_msg: Defer message
454 * @clk_stop_timeout: Clock stop timeout computed
455 */
456struct sdw_bus {
457 struct device *dev;
458 unsigned int link_id;
459 struct list_head slaves;
460 DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
461 struct mutex bus_lock;
462 struct mutex msg_lock;
463 const struct sdw_master_ops *ops;
464 struct sdw_master_prop prop;
465 struct sdw_defer defer_msg;
466 unsigned int clk_stop_timeout;
467};
468
469int sdw_add_bus_master(struct sdw_bus *bus);
470void sdw_delete_bus_master(struct sdw_bus *bus);
471
472/* messaging and data APIs */
473
474int sdw_read(struct sdw_slave *slave, u32 addr);
475int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
476int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
477int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
478
479#endif /* __SOUNDWIRE_H */
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
new file mode 100644
index 000000000000..4b37528f592d
--- /dev/null
+++ b/include/linux/soundwire/sdw_intel.h
@@ -0,0 +1,24 @@
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2// Copyright(c) 2015-17 Intel Corporation.
3
4#ifndef __SDW_INTEL_H
5#define __SDW_INTEL_H
6
7/**
8 * struct sdw_intel_res - Soundwire Intel resource structure
9 * @mmio_base: mmio base of SoundWire registers
10 * @irq: interrupt number
11 * @handle: ACPI parent handle
12 * @parent: parent device
13 */
14struct sdw_intel_res {
15 void __iomem *mmio_base;
16 int irq;
17 acpi_handle handle;
18 struct device *parent;
19};
20
21void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res);
22void sdw_intel_exit(void *arg);
23
24#endif
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
new file mode 100644
index 000000000000..df472b1ab410
--- /dev/null
+++ b/include/linux/soundwire/sdw_registers.h
@@ -0,0 +1,194 @@
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2// Copyright(c) 2015-17 Intel Corporation.
3
4#ifndef __SDW_REGISTERS_H
5#define __SDW_REGISTERS_H
6
7/*
8 * typically we define register and shifts but if one observes carefully,
9 * the shift can be generated from MASKS using few bit primitaives like ffs
10 * etc, so we use that and avoid defining shifts
11 */
12#define SDW_REG_SHIFT(n) (ffs(n) - 1)
13
14/*
15 * SDW registers as defined by MIPI 1.1 Spec
16 */
17#define SDW_REGADDR GENMASK(14, 0)
18#define SDW_SCP_ADDRPAGE2_MASK GENMASK(22, 15)
19#define SDW_SCP_ADDRPAGE1_MASK GENMASK(30, 23)
20
21#define SDW_REG_NO_PAGE 0x00008000
22#define SDW_REG_OPTIONAL_PAGE 0x00010000
23#define SDW_REG_MAX 0x80000000
24
25#define SDW_DPN_SIZE 0x100
26#define SDW_BANK1_OFFSET 0x10
27
28/*
29 * DP0 Interrupt register & bits
30 *
31 * Spec treats Status (RO) and Clear (WC) as separate but they are same
32 * address, so treat as same register with WC.
33 */
34
35/* both INT and STATUS register are same */
36#define SDW_DP0_INT 0x0
37#define SDW_DP0_INTMASK 0x1
38#define SDW_DP0_PORTCTRL 0x2
39#define SDW_DP0_BLOCKCTRL1 0x3
40#define SDW_DP0_PREPARESTATUS 0x4
41#define SDW_DP0_PREPARECTRL 0x5
42
43#define SDW_DP0_INT_TEST_FAIL BIT(0)
44#define SDW_DP0_INT_PORT_READY BIT(1)
45#define SDW_DP0_INT_BRA_FAILURE BIT(2)
46#define SDW_DP0_INT_IMPDEF1 BIT(5)
47#define SDW_DP0_INT_IMPDEF2 BIT(6)
48#define SDW_DP0_INT_IMPDEF3 BIT(7)
49
50#define SDW_DP0_PORTCTRL_DATAMODE GENMASK(3, 2)
51#define SDW_DP0_PORTCTRL_NXTINVBANK BIT(4)
52#define SDW_DP0_PORTCTRL_BPT_PAYLD GENMASK(7, 6)
53
54#define SDW_DP0_CHANNELEN 0x20
55#define SDW_DP0_SAMPLECTRL1 0x22
56#define SDW_DP0_SAMPLECTRL2 0x23
57#define SDW_DP0_OFFSETCTRL1 0x24
58#define SDW_DP0_OFFSETCTRL2 0x25
59#define SDW_DP0_HCTRL 0x26
60#define SDW_DP0_LANECTRL 0x28
61
62/* Both INT and STATUS register are same */
63#define SDW_SCP_INT1 0x40
64#define SDW_SCP_INTMASK1 0x41
65
66#define SDW_SCP_INT1_PARITY BIT(0)
67#define SDW_SCP_INT1_BUS_CLASH BIT(1)
68#define SDW_SCP_INT1_IMPL_DEF BIT(2)
69#define SDW_SCP_INT1_SCP2_CASCADE BIT(7)
70#define SDW_SCP_INT1_PORT0_3 GENMASK(6, 3)
71
72#define SDW_SCP_INTSTAT2 0x42
73#define SDW_SCP_INTSTAT2_SCP3_CASCADE BIT(7)
74#define SDW_SCP_INTSTAT2_PORT4_10 GENMASK(6, 0)
75
76
77#define SDW_SCP_INTSTAT3 0x43
78#define SDW_SCP_INTSTAT3_PORT11_14 GENMASK(3, 0)
79
80/* Number of interrupt status registers */
81#define SDW_NUM_INT_STAT_REGISTERS 3
82
83/* Number of interrupt clear registers */
84#define SDW_NUM_INT_CLEAR_REGISTERS 1
85
86#define SDW_SCP_CTRL 0x44
87#define SDW_SCP_CTRL_CLK_STP_NOW BIT(1)
88#define SDW_SCP_CTRL_FORCE_RESET BIT(7)
89
90#define SDW_SCP_STAT 0x44
91#define SDW_SCP_STAT_CLK_STP_NF BIT(0)
92#define SDW_SCP_STAT_HPHY_NOK BIT(5)
93#define SDW_SCP_STAT_CURR_BANK BIT(6)
94
95#define SDW_SCP_SYSTEMCTRL 0x45
96#define SDW_SCP_SYSTEMCTRL_CLK_STP_PREP BIT(0)
97#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE BIT(2)
98#define SDW_SCP_SYSTEMCTRL_WAKE_UP_EN BIT(3)
99#define SDW_SCP_SYSTEMCTRL_HIGH_PHY BIT(4)
100
101#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE0 0
102#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1 BIT(2)
103
104#define SDW_SCP_DEVNUMBER 0x46
105#define SDW_SCP_HIGH_PHY_CHECK 0x47
106#define SDW_SCP_ADDRPAGE1 0x48
107#define SDW_SCP_ADDRPAGE2 0x49
108#define SDW_SCP_KEEPEREN 0x4A
109#define SDW_SCP_BANKDELAY 0x4B
110#define SDW_SCP_TESTMODE 0x4F
111#define SDW_SCP_DEVID_0 0x50
112#define SDW_SCP_DEVID_1 0x51
113#define SDW_SCP_DEVID_2 0x52
114#define SDW_SCP_DEVID_3 0x53
115#define SDW_SCP_DEVID_4 0x54
116#define SDW_SCP_DEVID_5 0x55
117
118/* Banked Registers */
119#define SDW_SCP_FRAMECTRL_B0 0x60
120#define SDW_SCP_FRAMECTRL_B1 (0x60 + SDW_BANK1_OFFSET)
121#define SDW_SCP_NEXTFRAME_B0 0x61
122#define SDW_SCP_NEXTFRAME_B1 (0x61 + SDW_BANK1_OFFSET)
123
124/* Both INT and STATUS register is same */
125#define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n))
126#define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n))
127#define SDW_DPN_PORTCTRL(n) (0x2 + SDW_DPN_SIZE * (n))
128#define SDW_DPN_BLOCKCTRL1(n) (0x3 + SDW_DPN_SIZE * (n))
129#define SDW_DPN_PREPARESTATUS(n) (0x4 + SDW_DPN_SIZE * (n))
130#define SDW_DPN_PREPARECTRL(n) (0x5 + SDW_DPN_SIZE * (n))
131
132#define SDW_DPN_INT_TEST_FAIL BIT(0)
133#define SDW_DPN_INT_PORT_READY BIT(1)
134#define SDW_DPN_INT_IMPDEF1 BIT(5)
135#define SDW_DPN_INT_IMPDEF2 BIT(6)
136#define SDW_DPN_INT_IMPDEF3 BIT(7)
137
138#define SDW_DPN_PORTCTRL_FLOWMODE GENMASK(1, 0)
139#define SDW_DPN_PORTCTRL_DATAMODE GENMASK(3, 2)
140#define SDW_DPN_PORTCTRL_NXTINVBANK BIT(4)
141
142#define SDW_DPN_BLOCKCTRL1_WDLEN GENMASK(5, 0)
143
144#define SDW_DPN_PREPARECTRL_CH_PREP GENMASK(7, 0)
145
146#define SDW_DPN_CHANNELEN_B0(n) (0x20 + SDW_DPN_SIZE * (n))
147#define SDW_DPN_CHANNELEN_B1(n) (0x30 + SDW_DPN_SIZE * (n))
148
149#define SDW_DPN_BLOCKCTRL2_B0(n) (0x21 + SDW_DPN_SIZE * (n))
150#define SDW_DPN_BLOCKCTRL2_B1(n) (0x31 + SDW_DPN_SIZE * (n))
151
152#define SDW_DPN_SAMPLECTRL1_B0(n) (0x22 + SDW_DPN_SIZE * (n))
153#define SDW_DPN_SAMPLECTRL1_B1(n) (0x32 + SDW_DPN_SIZE * (n))
154
155#define SDW_DPN_SAMPLECTRL2_B0(n) (0x23 + SDW_DPN_SIZE * (n))
156#define SDW_DPN_SAMPLECTRL2_B1(n) (0x33 + SDW_DPN_SIZE * (n))
157
158#define SDW_DPN_OFFSETCTRL1_B0(n) (0x24 + SDW_DPN_SIZE * (n))
159#define SDW_DPN_OFFSETCTRL1_B1(n) (0x34 + SDW_DPN_SIZE * (n))
160
161#define SDW_DPN_OFFSETCTRL2_B0(n) (0x25 + SDW_DPN_SIZE * (n))
162#define SDW_DPN_OFFSETCTRL2_B1(n) (0x35 + SDW_DPN_SIZE * (n))
163
164#define SDW_DPN_HCTRL_B0(n) (0x26 + SDW_DPN_SIZE * (n))
165#define SDW_DPN_HCTRL_B1(n) (0x36 + SDW_DPN_SIZE * (n))
166
167#define SDW_DPN_BLOCKCTRL3_B0(n) (0x27 + SDW_DPN_SIZE * (n))
168#define SDW_DPN_BLOCKCTRL3_B1(n) (0x37 + SDW_DPN_SIZE * (n))
169
170#define SDW_DPN_LANECTRL_B0(n) (0x28 + SDW_DPN_SIZE * (n))
171#define SDW_DPN_LANECTRL_B1(n) (0x38 + SDW_DPN_SIZE * (n))
172
173#define SDW_DPN_SAMPLECTRL_LOW GENMASK(7, 0)
174#define SDW_DPN_SAMPLECTRL_HIGH GENMASK(15, 8)
175
176#define SDW_DPN_HCTRL_HSTART GENMASK(7, 4)
177#define SDW_DPN_HCTRL_HSTOP GENMASK(3, 0)
178
179#define SDW_NUM_CASC_PORT_INTSTAT1 4
180#define SDW_CASC_PORT_START_INTSTAT1 0
181#define SDW_CASC_PORT_MASK_INTSTAT1 0x8
182#define SDW_CASC_PORT_REG_OFFSET_INTSTAT1 0x0
183
184#define SDW_NUM_CASC_PORT_INTSTAT2 7
185#define SDW_CASC_PORT_START_INTSTAT2 4
186#define SDW_CASC_PORT_MASK_INTSTAT2 1
187#define SDW_CASC_PORT_REG_OFFSET_INTSTAT2 1
188
189#define SDW_NUM_CASC_PORT_INTSTAT3 4
190#define SDW_CASC_PORT_START_INTSTAT3 11
191#define SDW_CASC_PORT_MASK_INTSTAT3 1
192#define SDW_CASC_PORT_REG_OFFSET_INTSTAT3 2
193
194#endif /* __SDW_REGISTERS_H */
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
new file mode 100644
index 000000000000..9fd553e553e9
--- /dev/null
+++ b/include/linux/soundwire/sdw_type.h
@@ -0,0 +1,19 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright(c) 2015-17 Intel Corporation.
3
4#ifndef __SOUNDWIRE_TYPES_H
5#define __SOUNDWIRE_TYPES_H
6
7extern struct bus_type sdw_bus_type;
8
9#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver)
10
11#define sdw_register_driver(drv) \
12 __sdw_register_driver(drv, THIS_MODULE)
13
14int __sdw_register_driver(struct sdw_driver *drv, struct module *);
15void sdw_unregister_driver(struct sdw_driver *drv);
16
17int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
18
19#endif /* __SOUNDWIRE_TYPES_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3bf273538840..4894d322d258 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -409,4 +409,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
409#define atomic_dec_and_lock(atomic, lock) \ 409#define atomic_dec_and_lock(atomic, lock) \
410 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 410 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
411 411
412int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
413 size_t max_size, unsigned int cpu_mult,
414 gfp_t gfp);
415
416void free_bucket_spinlocks(spinlock_t *locks);
417
412#endif /* __LINUX_SPINLOCK_H */ 418#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 62be8966e837..33c1c698df09 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp);
92 * relies on normal RCU, it can be called from the CPU which 92 * relies on normal RCU, it can be called from the CPU which
93 * is in the idle loop from an RCU point of view or offline. 93 * is in the idle loop from an RCU point of view or offline.
94 */ 94 */
95static inline int srcu_read_lock_held(struct srcu_struct *sp) 95static inline int srcu_read_lock_held(const struct srcu_struct *sp)
96{ 96{
97 if (!debug_lockdep_rcu_enabled()) 97 if (!debug_lockdep_rcu_enabled())
98 return 1; 98 return 1;
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
101 101
102#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 102#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
103 103
104static inline int srcu_read_lock_held(struct srcu_struct *sp) 104static inline int srcu_read_lock_held(const struct srcu_struct *sp)
105{ 105{
106 return 1; 106 return 1;
107} 107}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index a949f4f9e4d7..4eda108abee0 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -40,7 +40,7 @@ struct srcu_data {
40 unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */ 40 unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
41 41
42 /* Update-side state. */ 42 /* Update-side state. */
43 raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp; 43 spinlock_t __private lock ____cacheline_internodealigned_in_smp;
44 struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ 44 struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
45 unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ 45 unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
46 unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ 46 unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
@@ -58,7 +58,7 @@ struct srcu_data {
58 * Node in SRCU combining tree, similar in function to rcu_data. 58 * Node in SRCU combining tree, similar in function to rcu_data.
59 */ 59 */
60struct srcu_node { 60struct srcu_node {
61 raw_spinlock_t __private lock; 61 spinlock_t __private lock;
62 unsigned long srcu_have_cbs[4]; /* GP seq for children */ 62 unsigned long srcu_have_cbs[4]; /* GP seq for children */
63 /* having CBs, but only */ 63 /* having CBs, but only */
64 /* is > ->srcu_gq_seq. */ 64 /* is > ->srcu_gq_seq. */
@@ -78,7 +78,7 @@ struct srcu_struct {
78 struct srcu_node *level[RCU_NUM_LVLS + 1]; 78 struct srcu_node *level[RCU_NUM_LVLS + 1];
79 /* First node at each level. */ 79 /* First node at each level. */
80 struct mutex srcu_cb_mutex; /* Serialize CB preparation. */ 80 struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
81 raw_spinlock_t __private lock; /* Protect counters */ 81 spinlock_t __private lock; /* Protect counters */
82 struct mutex srcu_gp_mutex; /* Serialize GP work. */ 82 struct mutex srcu_gp_mutex; /* Serialize GP work. */
83 unsigned int srcu_idx; /* Current rdr array element. */ 83 unsigned int srcu_idx; /* Current rdr array element. */
84 unsigned long srcu_gp_seq; /* Grace-period seq #. */ 84 unsigned long srcu_gp_seq; /* Grace-period seq #. */
@@ -107,7 +107,7 @@ struct srcu_struct {
107#define __SRCU_STRUCT_INIT(name) \ 107#define __SRCU_STRUCT_INIT(name) \
108 { \ 108 { \
109 .sda = &name##_srcu_data, \ 109 .sda = &name##_srcu_data, \
110 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ 110 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
111 .srcu_gp_seq_needed = 0 - 1, \ 111 .srcu_gp_seq_needed = 0 - 1, \
112 __SRCU_DEP_MAP_INIT(name) \ 112 __SRCU_DEP_MAP_INIT(name) \
113 } 113 }
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 2181719fd907..998a4ba28eba 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -20,12 +20,20 @@ enum {
20#endif 20#endif
21 21
22/** 22/**
23 * sizeof_field(TYPE, MEMBER)
24 *
25 * @TYPE: The structure containing the field of interest
26 * @MEMBER: The field to return the size of
27 */
28#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
29
30/**
23 * offsetofend(TYPE, MEMBER) 31 * offsetofend(TYPE, MEMBER)
24 * 32 *
25 * @TYPE: The type of the structure 33 * @TYPE: The type of the structure
26 * @MEMBER: The member within the structure to get the end offset of 34 * @MEMBER: The member within the structure to get the end offset of
27 */ 35 */
28#define offsetofend(TYPE, MEMBER) \ 36#define offsetofend(TYPE, MEMBER) \
29 (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) 37 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
30 38
31#endif 39#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index cfd83eb2f926..dd39a690c841 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -11,6 +11,7 @@
11 11
12extern char *strndup_user(const char __user *, long); 12extern char *strndup_user(const char __user *, long);
13extern void *memdup_user(const void __user *, size_t); 13extern void *memdup_user(const void __user *, size_t);
14extern void *vmemdup_user(const void __user *, size_t);
14extern void *memdup_user_nul(const void __user *, size_t); 15extern void *memdup_user_nul(const void __user *, size_t);
15 16
16/* 17/*
@@ -28,7 +29,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
28size_t strlcpy(char *, const char *, size_t); 29size_t strlcpy(char *, const char *, size_t);
29#endif 30#endif
30#ifndef __HAVE_ARCH_STRSCPY 31#ifndef __HAVE_ARCH_STRSCPY
31ssize_t __must_check strscpy(char *, const char *, size_t); 32ssize_t strscpy(char *, const char *, size_t);
32#endif 33#endif
33#ifndef __HAVE_ARCH_STRCAT 34#ifndef __HAVE_ARCH_STRCAT
34extern char * strcat(char *, const char *); 35extern char * strcat(char *, const char *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 71c237e8240e..ed761f751ecb 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -179,7 +179,6 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
179int rpc_restart_call_prepare(struct rpc_task *); 179int rpc_restart_call_prepare(struct rpc_task *);
180int rpc_restart_call(struct rpc_task *); 180int rpc_restart_call(struct rpc_task *);
181void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); 181void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
182int rpc_protocol(struct rpc_clnt *);
183struct net * rpc_net_ns(struct rpc_clnt *); 182struct net * rpc_net_ns(struct rpc_clnt *);
184size_t rpc_max_payload(struct rpc_clnt *); 183size_t rpc_max_payload(struct rpc_clnt *);
185size_t rpc_max_bc_payload(struct rpc_clnt *); 184size_t rpc_max_bc_payload(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 995c6fe9ee90..4b731b046bcd 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -185,8 +185,6 @@ extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
185extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *); 185extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
186extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *); 186extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
187extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); 187extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
188extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
189extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
190extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); 188extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
191extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); 189extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
192extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); 190extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 221b7a2e5406..5859563e3c1f 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -64,7 +64,7 @@ enum rpcrdma_memreg {
64 RPCRDMA_MEMWINDOWS, 64 RPCRDMA_MEMWINDOWS,
65 RPCRDMA_MEMWINDOWS_ASYNC, 65 RPCRDMA_MEMWINDOWS_ASYNC,
66 RPCRDMA_MTHCAFMR, 66 RPCRDMA_MTHCAFMR,
67 RPCRDMA_FRMR, 67 RPCRDMA_FRWR,
68 RPCRDMA_ALLPHYSICAL, 68 RPCRDMA_ALLPHYSICAL,
69 RPCRDMA_LAST 69 RPCRDMA_LAST
70}; 70};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index cc22a24516d6..440b62f7502e 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -384,6 +384,8 @@ extern int swsusp_page_is_forbidden(struct page *);
384extern void swsusp_set_page_free(struct page *); 384extern void swsusp_set_page_free(struct page *);
385extern void swsusp_unset_page_free(struct page *); 385extern void swsusp_unset_page_free(struct page *);
386extern unsigned long get_safe_page(gfp_t gfp_mask); 386extern unsigned long get_safe_page(gfp_t gfp_mask);
387extern asmlinkage int swsusp_arch_suspend(void);
388extern asmlinkage int swsusp_arch_resume(void);
387 389
388extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); 390extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
389extern int hibernate(void); 391extern int hibernate(void);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c2b8128799c1..7b6a59f722a3 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -332,7 +332,6 @@ extern void mark_page_accessed(struct page *);
332extern void lru_add_drain(void); 332extern void lru_add_drain(void);
333extern void lru_add_drain_cpu(int cpu); 333extern void lru_add_drain_cpu(int cpu);
334extern void lru_add_drain_all(void); 334extern void lru_add_drain_all(void);
335extern void lru_add_drain_all_cpuslocked(void);
336extern void rotate_reclaimable_page(struct page *page); 335extern void rotate_reclaimable_page(struct page *page);
337extern void deactivate_file_page(struct page *page); 336extern void deactivate_file_page(struct page *page);
338extern void mark_page_lazyfree(struct page *page); 337extern void mark_page_lazyfree(struct page *page);
@@ -345,7 +344,6 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
345 344
346/* linux/mm/vmscan.c */ 345/* linux/mm/vmscan.c */
347extern unsigned long zone_reclaimable_pages(struct zone *zone); 346extern unsigned long zone_reclaimable_pages(struct zone *zone);
348extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat);
349extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 347extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
350 gfp_t gfp_mask, nodemask_t *mask); 348 gfp_t gfp_mask, nodemask_t *mask);
351extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); 349extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 24ed817082ee..5b1f2a00491c 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -66,6 +66,12 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
66 enum dma_sync_target target); 66 enum dma_sync_target target);
67 67
68/* Accessory functions. */ 68/* Accessory functions. */
69
70void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
71 gfp_t flags, unsigned long attrs);
72void swiotlb_free(struct device *dev, size_t size, void *vaddr,
73 dma_addr_t dma_addr, unsigned long attrs);
74
69extern void 75extern void
70*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 76*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
71 dma_addr_t *dma_handle, gfp_t flags); 77 dma_addr_t *dma_handle, gfp_t flags);
@@ -115,10 +121,10 @@ extern int
115swiotlb_dma_supported(struct device *hwdev, u64 mask); 121swiotlb_dma_supported(struct device *hwdev, u64 mask);
116 122
117#ifdef CONFIG_SWIOTLB 123#ifdef CONFIG_SWIOTLB
118extern void __init swiotlb_free(void); 124extern void __init swiotlb_exit(void);
119unsigned int swiotlb_max_segment(void); 125unsigned int swiotlb_max_segment(void);
120#else 126#else
121static inline void swiotlb_free(void) { } 127static inline void swiotlb_exit(void) { }
122static inline unsigned int swiotlb_max_segment(void) { return 0; } 128static inline unsigned int swiotlb_max_segment(void) { return 0; }
123#endif 129#endif
124 130
@@ -126,4 +132,6 @@ extern void swiotlb_print_info(void);
126extern int is_swiotlb_buffer(phys_addr_t paddr); 132extern int is_swiotlb_buffer(phys_addr_t paddr);
127extern void swiotlb_set_max_segment(unsigned int); 133extern void swiotlb_set_max_segment(unsigned int);
128 134
135extern const struct dma_map_ops swiotlb_dma_ops;
136
129#endif /* __LINUX_SWIOTLB_H */ 137#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 09d73d0d1aa8..ec93e93371fa 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -100,6 +100,9 @@ struct sw_event_regs {
100 u32 gpio_interrupt_hdr; 100 u32 gpio_interrupt_hdr;
101 u32 gpio_interrupt_data; 101 u32 gpio_interrupt_data;
102 u32 reserved16[4]; 102 u32 reserved16[4];
103 u32 gfms_event_hdr;
104 u32 gfms_event_data;
105 u32 reserved17[4];
103} __packed; 106} __packed;
104 107
105enum { 108enum {
@@ -168,6 +171,14 @@ struct ntb_info_regs {
168 u16 reserved1; 171 u16 reserved1;
169 u64 ep_map; 172 u64 ep_map;
170 u16 requester_id; 173 u16 requester_id;
174 u16 reserved2;
175 u32 reserved3[4];
176 struct nt_partition_info {
177 u32 xlink_enabled;
178 u32 target_part_low;
179 u32 target_part_high;
180 u32 reserved;
181 } ntp_info[48];
171} __packed; 182} __packed;
172 183
173struct part_cfg_regs { 184struct part_cfg_regs {
@@ -284,7 +295,20 @@ enum {
284struct pff_csr_regs { 295struct pff_csr_regs {
285 u16 vendor_id; 296 u16 vendor_id;
286 u16 device_id; 297 u16 device_id;
287 u32 pci_cfg_header[15]; 298 u16 pcicmd;
299 u16 pcists;
300 u32 pci_class;
301 u32 pci_opts;
302 union {
303 u32 pci_bar[6];
304 u64 pci_bar64[3];
305 };
306 u32 pci_cardbus;
307 u32 pci_subsystem_id;
308 u32 pci_expansion_rom;
309 u32 pci_cap_ptr;
310 u32 reserved1;
311 u32 pci_irq;
288 u32 pci_cap_region[48]; 312 u32 pci_cap_region[48];
289 u32 pcie_cap_region[448]; 313 u32 pcie_cap_region[448];
290 u32 indirect_gas_window[128]; 314 u32 indirect_gas_window[128];
diff --git a/include/linux/sync_core.h b/include/linux/sync_core.h
new file mode 100644
index 000000000000..013da4b8b327
--- /dev/null
+++ b/include/linux/sync_core.h
@@ -0,0 +1,21 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SYNC_CORE_H
3#define _LINUX_SYNC_CORE_H
4
5#ifdef CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
6#include <asm/sync_core.h>
7#else
8/*
9 * This is a dummy sync_core_before_usermode() implementation that can be used
10 * on all architectures which return to user-space through core serializing
11 * instructions.
12 * If your architecture returns to user-space through non-core-serializing
13 * instructions, you need to write your own functions.
14 */
15static inline void sync_core_before_usermode(void)
16{
17}
18#endif
19
20#endif /* _LINUX_SYNC_CORE_H */
21
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 992bc9948232..b769ecfcc3bd 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -51,9 +51,6 @@ extern int proc_dointvec_minmax(struct ctl_table *, int,
51extern int proc_douintvec_minmax(struct ctl_table *table, int write, 51extern int proc_douintvec_minmax(struct ctl_table *table, int write,
52 void __user *buffer, size_t *lenp, 52 void __user *buffer, size_t *lenp,
53 loff_t *ppos); 53 loff_t *ppos);
54extern int proc_dopipe_max_size(struct ctl_table *table, int write,
55 void __user *buffer, size_t *lenp,
56 loff_t *ppos);
57extern int proc_dointvec_jiffies(struct ctl_table *, int, 54extern int proc_dointvec_jiffies(struct ctl_table *, int,
58 void __user *, size_t *, loff_t *); 55 void __user *, size_t *, loff_t *);
59extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, 56extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 40839c02d28c..b8bfdc173ec0 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -113,7 +113,7 @@ struct attribute_group {
113} 113}
114 114
115#define __ATTR_RO(_name) { \ 115#define __ATTR_RO(_name) { \
116 .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ 116 .attr = { .name = __stringify(_name), .mode = 0444 }, \
117 .show = _name##_show, \ 117 .show = _name##_show, \
118} 118}
119 119
@@ -124,12 +124,11 @@ struct attribute_group {
124} 124}
125 125
126#define __ATTR_WO(_name) { \ 126#define __ATTR_WO(_name) { \
127 .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \ 127 .attr = { .name = __stringify(_name), .mode = 0200 }, \
128 .store = _name##_store, \ 128 .store = _name##_store, \
129} 129}
130 130
131#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \ 131#define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store)
132 _name##_show, _name##_store)
133 132
134#define __ATTR_NULL { .attr = { .name = NULL } } 133#define __ATTR_NULL { .attr = { .name = NULL } }
135 134
@@ -192,14 +191,13 @@ struct bin_attribute {
192} 191}
193 192
194#define __BIN_ATTR_RO(_name, _size) { \ 193#define __BIN_ATTR_RO(_name, _size) { \
195 .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ 194 .attr = { .name = __stringify(_name), .mode = 0444 }, \
196 .read = _name##_read, \ 195 .read = _name##_read, \
197 .size = _size, \ 196 .size = _size, \
198} 197}
199 198
200#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \ 199#define __BIN_ATTR_RW(_name, _size) \
201 (S_IWUSR | S_IRUGO), _name##_read, \ 200 __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size)
202 _name##_write, _size)
203 201
204#define __BIN_ATTR_NULL __ATTR_NULL 202#define __BIN_ATTR_NULL __ATTR_NULL
205 203
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ca4a6361389b..8f4c54986f97 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -335,6 +335,17 @@ struct tcp_sock {
335 335
336 int linger2; 336 int linger2;
337 337
338
339/* Sock_ops bpf program related variables */
340#ifdef CONFIG_BPF
341 u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs
342 * values defined in uapi/linux/tcp.h
343 */
344#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG)
345#else
346#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
347#endif
348
338/* Receiver side RTT estimation */ 349/* Receiver side RTT estimation */
339 struct { 350 struct {
340 u32 rtt_us; 351 u32 rtt_us;
@@ -344,7 +355,7 @@ struct tcp_sock {
344 355
345/* Receiver queue space */ 356/* Receiver queue space */
346 struct { 357 struct {
347 int space; 358 u32 space;
348 u32 seq; 359 u32 seq;
349 u64 time; 360 u64 time;
350 } rcvq_space; 361 } rcvq_space;
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index cb889afe576b..a2b3dfcee0b5 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -17,6 +17,7 @@
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/idr.h> 19#include <linux/idr.h>
20#include <linux/kref.h>
20#include <linux/list.h> 21#include <linux/list.h>
21#include <linux/tee.h> 22#include <linux/tee.h>
22 23
@@ -25,8 +26,12 @@
25 * specific TEE driver. 26 * specific TEE driver.
26 */ 27 */
27 28
28#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */ 29#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */
29#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */ 30#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */
31#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */
32#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
33#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
34#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
30 35
31struct device; 36struct device;
32struct tee_device; 37struct tee_device;
@@ -38,11 +43,17 @@ struct tee_shm_pool;
38 * @teedev: pointer to this drivers struct tee_device 43 * @teedev: pointer to this drivers struct tee_device
39 * @list_shm: List of shared memory object owned by this context 44 * @list_shm: List of shared memory object owned by this context
40 * @data: driver specific context data, managed by the driver 45 * @data: driver specific context data, managed by the driver
46 * @refcount: reference counter for this structure
47 * @releasing: flag that indicates if context is being released right now.
48 * It is needed to break circular dependency on context during
49 * shared memory release.
41 */ 50 */
42struct tee_context { 51struct tee_context {
43 struct tee_device *teedev; 52 struct tee_device *teedev;
44 struct list_head list_shm; 53 struct list_head list_shm;
45 void *data; 54 void *data;
55 struct kref refcount;
56 bool releasing;
46}; 57};
47 58
48struct tee_param_memref { 59struct tee_param_memref {
@@ -76,6 +87,8 @@ struct tee_param {
76 * @cancel_req: request cancel of an ongoing invoke or open 87 * @cancel_req: request cancel of an ongoing invoke or open
77 * @supp_revc: called for supplicant to get a command 88 * @supp_revc: called for supplicant to get a command
78 * @supp_send: called for supplicant to send a response 89 * @supp_send: called for supplicant to send a response
90 * @shm_register: register shared memory buffer in TEE
91 * @shm_unregister: unregister shared memory buffer in TEE
79 */ 92 */
80struct tee_driver_ops { 93struct tee_driver_ops {
81 void (*get_version)(struct tee_device *teedev, 94 void (*get_version)(struct tee_device *teedev,
@@ -94,6 +107,10 @@ struct tee_driver_ops {
94 struct tee_param *param); 107 struct tee_param *param);
95 int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, 108 int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
96 struct tee_param *param); 109 struct tee_param *param);
110 int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm,
111 struct page **pages, size_t num_pages,
112 unsigned long start);
113 int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
97}; 114};
98 115
99/** 116/**
@@ -150,6 +167,97 @@ int tee_device_register(struct tee_device *teedev);
150void tee_device_unregister(struct tee_device *teedev); 167void tee_device_unregister(struct tee_device *teedev);
151 168
152/** 169/**
170 * struct tee_shm - shared memory object
171 * @teedev: device used to allocate the object
172 * @ctx: context using the object, if NULL the context is gone
173 * @link link element
174 * @paddr: physical address of the shared memory
175 * @kaddr: virtual address of the shared memory
176 * @size: size of shared memory
177 * @offset: offset of buffer in user space
178 * @pages: locked pages from userspace
179 * @num_pages: number of locked pages
180 * @dmabuf: dmabuf used to for exporting to user space
181 * @flags: defined by TEE_SHM_* in tee_drv.h
182 * @id: unique id of a shared memory object on this device
183 *
184 * This pool is only supposed to be accessed directly from the TEE
185 * subsystem and from drivers that implements their own shm pool manager.
186 */
187struct tee_shm {
188 struct tee_device *teedev;
189 struct tee_context *ctx;
190 struct list_head link;
191 phys_addr_t paddr;
192 void *kaddr;
193 size_t size;
194 unsigned int offset;
195 struct page **pages;
196 size_t num_pages;
197 struct dma_buf *dmabuf;
198 u32 flags;
199 int id;
200};
201
202/**
203 * struct tee_shm_pool_mgr - shared memory manager
204 * @ops: operations
205 * @private_data: private data for the shared memory manager
206 */
207struct tee_shm_pool_mgr {
208 const struct tee_shm_pool_mgr_ops *ops;
209 void *private_data;
210};
211
212/**
213 * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
214 * @alloc: called when allocating shared memory
215 * @free: called when freeing shared memory
216 * @destroy_poolmgr: called when destroying the pool manager
217 */
218struct tee_shm_pool_mgr_ops {
219 int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
220 size_t size);
221 void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
222 void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr);
223};
224
225/**
226 * tee_shm_pool_alloc() - Create a shared memory pool from shm managers
227 * @priv_mgr: manager for driver private shared memory allocations
228 * @dmabuf_mgr: manager for dma-buf shared memory allocations
229 *
230 * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
231 * in @dmabuf, others will use the range provided by @priv.
232 *
233 * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
234 */
235struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
236 struct tee_shm_pool_mgr *dmabuf_mgr);
237
238/*
239 * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved
240 * memory
241 * @vaddr: Virtual address of start of pool
242 * @paddr: Physical address of start of pool
243 * @size: Size in bytes of the pool
244 *
245 * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure.
246 */
247struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
248 phys_addr_t paddr,
249 size_t size,
250 int min_alloc_order);
251
252/**
253 * tee_shm_pool_mgr_destroy() - Free a shared memory manager
254 */
255static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm)
256{
257 poolm->ops->destroy_poolmgr(poolm);
258}
259
260/**
153 * struct tee_shm_pool_mem_info - holds information needed to create a shared 261 * struct tee_shm_pool_mem_info - holds information needed to create a shared
154 * memory pool 262 * memory pool
155 * @vaddr: Virtual address of start of pool 263 * @vaddr: Virtual address of start of pool
@@ -211,6 +319,40 @@ void *tee_get_drvdata(struct tee_device *teedev);
211struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); 319struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
212 320
213/** 321/**
322 * tee_shm_priv_alloc() - Allocate shared memory privately
323 * @dev: Device that allocates the shared memory
324 * @size: Requested size of shared memory
325 *
326 * Allocates shared memory buffer that is not associated with any client
327 * context. Such buffers are owned by TEE driver and used for internal calls.
328 *
329 * @returns a pointer to 'struct tee_shm'
330 */
331struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size);
332
333/**
334 * tee_shm_register() - Register shared memory buffer
335 * @ctx: Context that registers the shared memory
336 * @addr: Address is userspace of the shared buffer
337 * @length: Length of the shared buffer
338 * @flags: Flags setting properties for the requested shared memory.
339 *
340 * @returns a pointer to 'struct tee_shm'
341 */
342struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
343 size_t length, u32 flags);
344
345/**
346 * tee_shm_is_registered() - Check if shared memory object in registered in TEE
347 * @shm: Shared memory handle
348 * @returns true if object is registered in TEE
349 */
350static inline bool tee_shm_is_registered(struct tee_shm *shm)
351{
352 return shm && (shm->flags & TEE_SHM_REGISTER);
353}
354
355/**
214 * tee_shm_free() - Free shared memory 356 * tee_shm_free() - Free shared memory
215 * @shm: Handle to shared memory to free 357 * @shm: Handle to shared memory to free
216 */ 358 */
@@ -260,11 +402,47 @@ void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
260int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa); 402int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
261 403
262/** 404/**
405 * tee_shm_get_size() - Get size of shared memory buffer
406 * @shm: Shared memory handle
407 * @returns size of shared memory
408 */
409static inline size_t tee_shm_get_size(struct tee_shm *shm)
410{
411 return shm->size;
412}
413
414/**
415 * tee_shm_get_pages() - Get list of pages that hold shared buffer
416 * @shm: Shared memory handle
417 * @num_pages: Number of pages will be stored there
418 * @returns pointer to pages array
419 */
420static inline struct page **tee_shm_get_pages(struct tee_shm *shm,
421 size_t *num_pages)
422{
423 *num_pages = shm->num_pages;
424 return shm->pages;
425}
426
427/**
428 * tee_shm_get_page_offset() - Get shared buffer offset from page start
429 * @shm: Shared memory handle
430 * @returns page offset of shared buffer
431 */
432static inline size_t tee_shm_get_page_offset(struct tee_shm *shm)
433{
434 return shm->offset;
435}
436
437/**
263 * tee_shm_get_id() - Get id of a shared memory object 438 * tee_shm_get_id() - Get id of a shared memory object
264 * @shm: Shared memory handle 439 * @shm: Shared memory handle
265 * @returns id 440 * @returns id
266 */ 441 */
267int tee_shm_get_id(struct tee_shm *shm); 442static inline int tee_shm_get_id(struct tee_shm *shm)
443{
444 return shm->id;
445}
268 446
269/** 447/**
270 * tee_shm_get_from_id() - Find shared memory object and increase reference 448 * tee_shm_get_from_id() - Find shared memory object and increase reference
@@ -275,4 +453,16 @@ int tee_shm_get_id(struct tee_shm *shm);
275 */ 453 */
276struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); 454struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
277 455
456static inline bool tee_param_is_memref(struct tee_param *param)
457{
458 switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
459 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
460 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
461 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
462 return true;
463 default:
464 return false;
465 }
466}
467
278#endif /*__TEE_DRV_H*/ 468#endif /*__TEE_DRV_H*/
diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h
new file mode 100644
index 000000000000..45bc6b376492
--- /dev/null
+++ b/include/linux/ti-emif-sram.h
@@ -0,0 +1,69 @@
1/*
2 * TI AM33XX EMIF Routines
3 *
4 * Copyright (C) 2016-2017 Texas Instruments Inc.
5 * Dave Gerlach
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16#ifndef __LINUX_TI_EMIF_H
17#define __LINUX_TI_EMIF_H
18
19#include <linux/kbuild.h>
20#include <linux/types.h>
21#ifndef __ASSEMBLY__
22
23struct emif_regs_amx3 {
24 u32 emif_sdcfg_val;
25 u32 emif_timing1_val;
26 u32 emif_timing2_val;
27 u32 emif_timing3_val;
28 u32 emif_ref_ctrl_val;
29 u32 emif_zqcfg_val;
30 u32 emif_pmcr_val;
31 u32 emif_pmcr_shdw_val;
32 u32 emif_rd_wr_level_ramp_ctrl;
33 u32 emif_rd_wr_exec_thresh;
34 u32 emif_cos_config;
35 u32 emif_priority_to_cos_mapping;
36 u32 emif_connect_id_serv_1_map;
37 u32 emif_connect_id_serv_2_map;
38 u32 emif_ocp_config_val;
39 u32 emif_lpddr2_nvm_tim;
40 u32 emif_lpddr2_nvm_tim_shdw;
41 u32 emif_dll_calib_ctrl_val;
42 u32 emif_dll_calib_ctrl_val_shdw;
43 u32 emif_ddr_phy_ctlr_1;
44 u32 emif_ext_phy_ctrl_vals[120];
45};
46
47struct ti_emif_pm_data {
48 void __iomem *ti_emif_base_addr_virt;
49 phys_addr_t ti_emif_base_addr_phys;
50 unsigned long ti_emif_sram_config;
51 struct emif_regs_amx3 *regs_virt;
52 phys_addr_t regs_phys;
53} __packed __aligned(8);
54
55struct ti_emif_pm_functions {
56 u32 save_context;
57 u32 restore_context;
58 u32 enter_sr;
59 u32 exit_sr;
60 u32 abort_sr;
61} __packed __aligned(8);
62
63struct gen_pool;
64
65int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst);
66int ti_emif_get_mem_type(void);
67
68#endif
69#endif /* __LINUX_TI_EMIF_H */
diff --git a/include/linux/torture.h b/include/linux/torture.h
index a45702eb3e7b..66272862070b 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -79,7 +79,7 @@ void stutter_wait(const char *title);
79int torture_stutter_init(int s); 79int torture_stutter_init(int s);
80 80
81/* Initialization and cleanup. */ 81/* Initialization and cleanup. */
82bool torture_init_begin(char *ttype, bool v, int *runnable); 82bool torture_init_begin(char *ttype, bool v);
83void torture_init_end(void); 83void torture_init_end(void);
84bool torture_cleanup_begin(void); 84bool torture_cleanup_begin(void);
85void torture_cleanup_end(void); 85void torture_cleanup_end(void);
@@ -96,4 +96,10 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
96#define torture_stop_kthread(n, tp) \ 96#define torture_stop_kthread(n, tp) \
97 _torture_stop_kthread("Stopping " #n " task", &(tp)) 97 _torture_stop_kthread("Stopping " #n " task", &(tp))
98 98
99#ifdef CONFIG_PREEMPT
100#define torture_preempt_schedule() preempt_schedule()
101#else
102#define torture_preempt_schedule()
103#endif
104
99#endif /* __LINUX_TORTURE_H */ 105#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 5a090f5ab335..bcdd3790e94d 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -24,11 +24,6 @@
24 24
25#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ 25#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
26 26
27/*
28 * Chip num is this value or a valid tpm idx
29 */
30#define TPM_ANY_NUM 0xFFFF
31
32struct tpm_chip; 27struct tpm_chip;
33struct trusted_key_payload; 28struct trusted_key_payload;
34struct trusted_key_options; 29struct trusted_key_options;
@@ -50,46 +45,52 @@ struct tpm_class_ops {
50 unsigned long *timeout_cap); 45 unsigned long *timeout_cap);
51 int (*request_locality)(struct tpm_chip *chip, int loc); 46 int (*request_locality)(struct tpm_chip *chip, int loc);
52 void (*relinquish_locality)(struct tpm_chip *chip, int loc); 47 void (*relinquish_locality)(struct tpm_chip *chip, int loc);
48 void (*clk_enable)(struct tpm_chip *chip, bool value);
53}; 49};
54 50
55#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) 51#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
56 52
57extern int tpm_is_tpm2(u32 chip_num); 53extern int tpm_is_tpm2(struct tpm_chip *chip);
58extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); 54extern int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
59extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); 55extern int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash);
60extern int tpm_send(u32 chip_num, void *cmd, size_t buflen); 56extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
61extern int tpm_get_random(u32 chip_num, u8 *data, size_t max); 57extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
62extern int tpm_seal_trusted(u32 chip_num, 58extern int tpm_seal_trusted(struct tpm_chip *chip,
63 struct trusted_key_payload *payload, 59 struct trusted_key_payload *payload,
64 struct trusted_key_options *options); 60 struct trusted_key_options *options);
65extern int tpm_unseal_trusted(u32 chip_num, 61extern int tpm_unseal_trusted(struct tpm_chip *chip,
66 struct trusted_key_payload *payload, 62 struct trusted_key_payload *payload,
67 struct trusted_key_options *options); 63 struct trusted_key_options *options);
68#else 64#else
69static inline int tpm_is_tpm2(u32 chip_num) 65static inline int tpm_is_tpm2(struct tpm_chip *chip)
70{ 66{
71 return -ENODEV; 67 return -ENODEV;
72} 68}
73static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { 69static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
70{
74 return -ENODEV; 71 return -ENODEV;
75} 72}
76static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) { 73static inline int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx,
74 const u8 *hash)
75{
77 return -ENODEV; 76 return -ENODEV;
78} 77}
79static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) { 78static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
79{
80 return -ENODEV; 80 return -ENODEV;
81} 81}
82static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) { 82static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max)
83{
83 return -ENODEV; 84 return -ENODEV;
84} 85}
85 86
86static inline int tpm_seal_trusted(u32 chip_num, 87static inline int tpm_seal_trusted(struct tpm_chip *chip,
87 struct trusted_key_payload *payload, 88 struct trusted_key_payload *payload,
88 struct trusted_key_options *options) 89 struct trusted_key_options *options)
89{ 90{
90 return -ENODEV; 91 return -ENODEV;
91} 92}
92static inline int tpm_unseal_trusted(u32 chip_num, 93static inline int tpm_unseal_trusted(struct tpm_chip *chip,
93 struct trusted_key_payload *payload, 94 struct trusted_key_payload *payload,
94 struct trusted_key_options *options) 95 struct trusted_key_options *options)
95{ 96{
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
new file mode 100644
index 000000000000..20d9da77fc11
--- /dev/null
+++ b/include/linux/tpm_eventlog.h
@@ -0,0 +1,124 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef __LINUX_TPM_EVENTLOG_H__
4#define __LINUX_TPM_EVENTLOG_H__
5
6#include <crypto/hash_info.h>
7
8#define TCG_EVENT_NAME_LEN_MAX 255
9#define MAX_TEXT_EVENT 1000 /* Max event string length */
10#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
11#define TPM2_ACTIVE_PCR_BANKS 3
12
13#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1
14#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2
15
16#ifdef CONFIG_PPC64
17#define do_endian_conversion(x) be32_to_cpu(x)
18#else
19#define do_endian_conversion(x) x
20#endif
21
22enum bios_platform_class {
23 BIOS_CLIENT = 0x00,
24 BIOS_SERVER = 0x01,
25};
26
27struct tcpa_event {
28 u32 pcr_index;
29 u32 event_type;
30 u8 pcr_value[20]; /* SHA1 */
31 u32 event_size;
32 u8 event_data[0];
33};
34
35enum tcpa_event_types {
36 PREBOOT = 0,
37 POST_CODE,
38 UNUSED,
39 NO_ACTION,
40 SEPARATOR,
41 ACTION,
42 EVENT_TAG,
43 SCRTM_CONTENTS,
44 SCRTM_VERSION,
45 CPU_MICROCODE,
46 PLATFORM_CONFIG_FLAGS,
47 TABLE_OF_DEVICES,
48 COMPACT_HASH,
49 IPL,
50 IPL_PARTITION_DATA,
51 NONHOST_CODE,
52 NONHOST_CONFIG,
53 NONHOST_INFO,
54};
55
56struct tcpa_pc_event {
57 u32 event_id;
58 u32 event_size;
59 u8 event_data[0];
60};
61
62enum tcpa_pc_event_ids {
63 SMBIOS = 1,
64 BIS_CERT,
65 POST_BIOS_ROM,
66 ESCD,
67 CMOS,
68 NVRAM,
69 OPTION_ROM_EXEC,
70 OPTION_ROM_CONFIG,
71 OPTION_ROM_MICROCODE = 10,
72 S_CRTM_VERSION,
73 S_CRTM_CONTENTS,
74 POST_CONTENTS,
75 HOST_TABLE_OF_DEVICES,
76};
77
78/* http://www.trustedcomputinggroup.org/tcg-efi-protocol-specification/ */
79
80struct tcg_efi_specid_event_algs {
81 u16 alg_id;
82 u16 digest_size;
83} __packed;
84
85struct tcg_efi_specid_event {
86 u8 signature[16];
87 u32 platform_class;
88 u8 spec_version_minor;
89 u8 spec_version_major;
90 u8 spec_errata;
91 u8 uintnsize;
92 u32 num_algs;
93 struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS];
94 u8 vendor_info_size;
95 u8 vendor_info[0];
96} __packed;
97
98struct tcg_pcr_event {
99 u32 pcr_idx;
100 u32 event_type;
101 u8 digest[20];
102 u32 event_size;
103 u8 event[0];
104} __packed;
105
106struct tcg_event_field {
107 u32 event_size;
108 u8 event[0];
109} __packed;
110
111struct tpm2_digest {
112 u16 alg_id;
113 u8 digest[SHA512_DIGEST_SIZE];
114} __packed;
115
116struct tcg_pcr_event2 {
117 u32 pcr_idx;
118 u32 event_type;
119 u32 count;
120 struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS];
121 struct tcg_event_field event;
122} __packed;
123
124#endif
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index af44e7c2d577..8a1442c4e513 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -467,6 +467,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
467unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); 467unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
468int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); 468int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
469void perf_event_detach_bpf_prog(struct perf_event *event); 469void perf_event_detach_bpf_prog(struct perf_event *event);
470int perf_event_query_prog_array(struct perf_event *event, void __user *info);
470#else 471#else
471static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 472static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
472{ 473{
@@ -481,6 +482,11 @@ perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
481 482
482static inline void perf_event_detach_bpf_prog(struct perf_event *event) { } 483static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
483 484
485static inline int
486perf_event_query_prog_array(struct perf_event *event, void __user *info)
487{
488 return -EOPNOTSUPP;
489}
484#endif 490#endif
485 491
486enum { 492enum {
@@ -528,6 +534,7 @@ do { \
528struct perf_event; 534struct perf_event;
529 535
530DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); 536DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
537DECLARE_PER_CPU(int, bpf_kprobe_override);
531 538
532extern int perf_trace_init(struct perf_event *event); 539extern int perf_trace_init(struct perf_event *event);
533extern void perf_trace_destroy(struct perf_event *event); 540extern void perf_trace_destroy(struct perf_event *event);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index a26ffbe09e71..c94f466d57ef 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -137,11 +137,8 @@ extern void syscall_unregfunc(void);
137 \ 137 \
138 if (!(cond)) \ 138 if (!(cond)) \
139 return; \ 139 return; \
140 if (rcucheck) { \ 140 if (rcucheck) \
141 if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
142 return; \
143 rcu_irq_enter_irqson(); \ 141 rcu_irq_enter_irqson(); \
144 } \
145 rcu_read_lock_sched_notrace(); \ 142 rcu_read_lock_sched_notrace(); \
146 it_func_ptr = rcu_dereference_sched((tp)->funcs); \ 143 it_func_ptr = rcu_dereference_sched((tp)->funcs); \
147 if (it_func_ptr) { \ 144 if (it_func_ptr) { \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 7ac8ba208b1f..0a6c71e0ad01 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -405,6 +405,8 @@ extern const char *tty_name(const struct tty_struct *tty);
405extern struct tty_struct *tty_kopen(dev_t device); 405extern struct tty_struct *tty_kopen(dev_t device);
406extern void tty_kclose(struct tty_struct *tty); 406extern void tty_kclose(struct tty_struct *tty);
407extern int tty_dev_name_to_number(const char *name, dev_t *number); 407extern int tty_dev_name_to_number(const char *name, dev_t *number);
408extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
409extern void tty_ldisc_unlock(struct tty_struct *tty);
408#else 410#else
409static inline void tty_kref_put(struct tty_struct *tty) 411static inline void tty_kref_put(struct tty_struct *tty)
410{ } 412{ }
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 3bc5144b1c7e..1ef64d4ad887 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -187,7 +187,7 @@ struct tty_ldisc_ops {
187 long (*compat_ioctl)(struct tty_struct *tty, struct file *file, 187 long (*compat_ioctl)(struct tty_struct *tty, struct file *file,
188 unsigned int cmd, unsigned long arg); 188 unsigned int cmd, unsigned long arg);
189 void (*set_termios)(struct tty_struct *tty, struct ktermios *old); 189 void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
190 unsigned int (*poll)(struct tty_struct *, struct file *, 190 __poll_t (*poll)(struct tty_struct *, struct file *,
191 struct poll_table_struct *); 191 struct poll_table_struct *);
192 int (*hangup)(struct tty_struct *tty); 192 int (*hangup)(struct tty_struct *tty);
193 193
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 251e655d407f..efe79c1cdd47 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -273,4 +273,12 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
273#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) 273#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
274#endif 274#endif
275 275
276#ifdef CONFIG_HARDENED_USERCOPY
277void usercopy_warn(const char *name, const char *detail, bool to_user,
278 unsigned long offset, unsigned long len);
279void __noreturn usercopy_abort(const char *name, const char *detail,
280 bool to_user, unsigned long offset,
281 unsigned long len);
282#endif
283
276#endif /* __LINUX_UACCESS_H__ */ 284#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index fbbe974661f2..0173597e59aa 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -609,6 +609,10 @@ struct usb3_lpm_parameters {
609 * to keep track of the number of functions that require USB 3.0 Link Power 609 * to keep track of the number of functions that require USB 3.0 Link Power
610 * Management to be disabled for this usb_device. This count should only 610 * Management to be disabled for this usb_device. This count should only
611 * be manipulated by those functions, with the bandwidth_mutex is held. 611 * be manipulated by those functions, with the bandwidth_mutex is held.
612 * @hub_delay: cached value consisting of:
613 * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns)
614 *
615 * Will be used as wValue for SetIsochDelay requests.
612 * 616 *
613 * Notes: 617 * Notes:
614 * Usbcore drivers should not set usbdev->state directly. Instead use 618 * Usbcore drivers should not set usbdev->state directly. Instead use
@@ -689,6 +693,8 @@ struct usb_device {
689 struct usb3_lpm_parameters u1_params; 693 struct usb3_lpm_parameters u1_params;
690 struct usb3_lpm_parameters u2_params; 694 struct usb3_lpm_parameters u2_params;
691 unsigned lpm_disable_count; 695 unsigned lpm_disable_count;
696
697 u16 hub_delay;
692}; 698};
693#define to_usb_device(d) container_of(d, struct usb_device, dev) 699#define to_usb_device(d) container_of(d, struct usb_device, dev)
694 700
@@ -1293,7 +1299,6 @@ extern int usb_disabled(void);
1293#define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired 1299#define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired
1294 * slot in the schedule */ 1300 * slot in the schedule */
1295#define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ 1301#define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */
1296#define URB_NO_FSBR 0x0020 /* UHCI-specific */
1297#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */ 1302#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */
1298#define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt 1303#define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt
1299 * needed */ 1304 * needed */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 0142f3af0da6..66a5cff7ee14 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -330,6 +330,7 @@ struct usb_gadget_ops {
330 * @name: Identifies the controller hardware type. Used in diagnostics 330 * @name: Identifies the controller hardware type. Used in diagnostics
331 * and sometimes configuration. 331 * and sometimes configuration.
332 * @dev: Driver model state for this abstract device. 332 * @dev: Driver model state for this abstract device.
333 * @isoch_delay: value from Set Isoch Delay request. Only valid on SS/SSP
333 * @out_epnum: last used out ep number 334 * @out_epnum: last used out ep number
334 * @in_epnum: last used in ep number 335 * @in_epnum: last used in ep number
335 * @mA: last set mA value 336 * @mA: last set mA value
@@ -394,6 +395,7 @@ struct usb_gadget {
394 enum usb_device_state state; 395 enum usb_device_state state;
395 const char *name; 396 const char *name;
396 struct device dev; 397 struct device dev;
398 unsigned isoch_delay;
397 unsigned out_epnum; 399 unsigned out_epnum;
398 unsigned in_epnum; 400 unsigned in_epnum;
399 unsigned mA; 401 unsigned mA;
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 6cbe7a5c2b57..dba55ccb9b53 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -12,13 +12,17 @@
12#include <linux/usb/otg.h> 12#include <linux/usb/otg.h>
13#include <linux/usb/phy.h> 13#include <linux/usb/phy.h>
14 14
15struct usb_device;
16
15#if IS_ENABLED(CONFIG_OF) 17#if IS_ENABLED(CONFIG_OF)
16enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0); 18enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0);
17bool of_usb_host_tpl_support(struct device_node *np); 19bool of_usb_host_tpl_support(struct device_node *np);
18int of_usb_update_otg_caps(struct device_node *np, 20int of_usb_update_otg_caps(struct device_node *np,
19 struct usb_otg_caps *otg_caps); 21 struct usb_otg_caps *otg_caps);
20struct device_node *usb_of_get_child_node(struct device_node *parent, 22struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1);
21 int portnum); 23bool usb_of_has_combined_node(struct usb_device *udev);
24struct device_node *usb_of_get_interface_node(struct usb_device *udev,
25 u8 config, u8 ifnum);
22struct device *usb_of_get_companion_dev(struct device *dev); 26struct device *usb_of_get_companion_dev(struct device *dev);
23#else 27#else
24static inline enum usb_dr_mode 28static inline enum usb_dr_mode
@@ -35,8 +39,17 @@ static inline int of_usb_update_otg_caps(struct device_node *np,
35{ 39{
36 return 0; 40 return 0;
37} 41}
38static inline struct device_node *usb_of_get_child_node 42static inline struct device_node *
39 (struct device_node *parent, int portnum) 43usb_of_get_device_node(struct usb_device *hub, int port1)
44{
45 return NULL;
46}
47static inline bool usb_of_has_combined_node(struct usb_device *udev)
48{
49 return false;
50}
51static inline struct device_node *
52usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
40{ 53{
41 return NULL; 54 return NULL;
42} 55}
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index e00051ced806..b3d41d7409b3 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -148,6 +148,8 @@ enum pd_pdo_type {
148 (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \ 148 (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \
149 PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma)) 149 PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma))
150 150
151#define VSAFE5V 5000 /* mv units */
152
151#define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */ 153#define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */
152#define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */ 154#define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */
153#define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */ 155#define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index d92259f8de0a..2b64d23ace5c 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -65,7 +65,7 @@
65#define CMD_EXIT_MODE 5 65#define CMD_EXIT_MODE 5
66#define CMD_ATTENTION 6 66#define CMD_ATTENTION 6
67 67
68#define VDO_CMD_VENDOR(x) (((10 + (x)) & 0x1f)) 68#define VDO_CMD_VENDOR(x) (((0x10 + (x)) & 0x1f))
69 69
70/* ChromeOS specific commands */ 70/* ChromeOS specific commands */
71#define VDO_CMD_VERSION VDO_CMD_VENDOR(0) 71#define VDO_CMD_VERSION VDO_CMD_VENDOR(0)
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 67102f3d59d4..53924f8e840c 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -17,6 +17,7 @@
17 */ 17 */
18#ifndef RENESAS_USB_H 18#ifndef RENESAS_USB_H
19#define RENESAS_USB_H 19#define RENESAS_USB_H
20#include <linux/notifier.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/usb/ch9.h> 22#include <linux/usb/ch9.h>
22 23
@@ -98,6 +99,13 @@ struct renesas_usbhs_platform_callback {
98 * VBUS control is needed for Host 99 * VBUS control is needed for Host
99 */ 100 */
100 int (*set_vbus)(struct platform_device *pdev, int enable); 101 int (*set_vbus)(struct platform_device *pdev, int enable);
102
103 /*
104 * option:
105 * extcon notifier to set host/peripheral mode.
106 */
107 int (*notifier)(struct notifier_block *nb, unsigned long event,
108 void *data);
101}; 109};
102 110
103/* 111/*
@@ -187,6 +195,7 @@ struct renesas_usbhs_driver_param {
187#define USBHS_TYPE_RCAR_GEN2 1 195#define USBHS_TYPE_RCAR_GEN2 1
188#define USBHS_TYPE_RCAR_GEN3 2 196#define USBHS_TYPE_RCAR_GEN3 2
189#define USBHS_TYPE_RCAR_GEN3_WITH_PLL 3 197#define USBHS_TYPE_RCAR_GEN3_WITH_PLL 3
198#define USBHS_TYPE_RZA1 4
190 199
191/* 200/*
192 * option: 201 * option:
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 073197f0d2bb..ca1c0b57f03f 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -183,14 +183,14 @@ struct tcpm_port;
183struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc); 183struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc);
184void tcpm_unregister_port(struct tcpm_port *port); 184void tcpm_unregister_port(struct tcpm_port *port);
185 185
186void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, 186int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
187 unsigned int nr_pdo); 187 unsigned int nr_pdo);
188void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, 188int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
189 unsigned int nr_pdo, 189 unsigned int nr_pdo,
190 unsigned int max_snk_mv, 190 unsigned int max_snk_mv,
191 unsigned int max_snk_ma, 191 unsigned int max_snk_ma,
192 unsigned int max_snk_mw, 192 unsigned int max_snk_mw,
193 unsigned int operating_snk_mw); 193 unsigned int operating_snk_mw);
194 194
195void tcpm_vbus_change(struct tcpm_port *port); 195void tcpm_vbus_change(struct tcpm_port *port);
196void tcpm_cc_change(struct tcpm_port *port); 196void tcpm_cc_change(struct tcpm_port *port);
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 33b0bdbb613c..d9c4a6cce3c2 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -17,6 +17,7 @@
17#define _LINUX_UUID_H_ 17#define _LINUX_UUID_H_
18 18
19#include <uapi/linux/uuid.h> 19#include <uapi/linux/uuid.h>
20#include <linux/string.h>
20 21
21#define UUID_SIZE 16 22#define UUID_SIZE 16
22 23
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
new file mode 100644
index 000000000000..c71def6b310f
--- /dev/null
+++ b/include/linux/vbox_utils.h
@@ -0,0 +1,79 @@
1/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2/* Copyright (C) 2006-2016 Oracle Corporation */
3
4#ifndef __VBOX_UTILS_H__
5#define __VBOX_UTILS_H__
6
7#include <linux/printk.h>
8#include <linux/vbox_vmmdev_types.h>
9
10struct vbg_dev;
11
12/**
13 * vboxguest logging functions, these log both to the backdoor and call
14 * the equivalent kernel pr_foo function.
15 */
16__printf(1, 2) void vbg_info(const char *fmt, ...);
17__printf(1, 2) void vbg_warn(const char *fmt, ...);
18__printf(1, 2) void vbg_err(const char *fmt, ...);
19
20/* Only use backdoor logging for non-dynamic debug builds */
21#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
22__printf(1, 2) void vbg_debug(const char *fmt, ...);
23#else
24#define vbg_debug pr_debug
25#endif
26
27/**
28 * Allocate memory for generic request and initialize the request header.
29 *
30 * Return: the allocated memory
31 * @len: Size of memory block required for the request.
32 * @req_type: The generic request type.
33 */
34void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
35
36/**
37 * Perform a generic request.
38 *
39 * Return: VBox status code
40 * @gdev: The Guest extension device.
41 * @req: Pointer to the request structure.
42 */
43int vbg_req_perform(struct vbg_dev *gdev, void *req);
44
45int vbg_hgcm_connect(struct vbg_dev *gdev,
46 struct vmmdev_hgcm_service_location *loc,
47 u32 *client_id, int *vbox_status);
48
49int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
50
51int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
52 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
53 u32 parm_count, int *vbox_status);
54
55int vbg_hgcm_call32(
56 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
57 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
58 int *vbox_status);
59
60/**
61 * Convert a VirtualBox status code to a standard Linux kernel return value.
62 * Return: 0 or negative errno value.
63 * @rc: VirtualBox status code to convert.
64 */
65int vbg_status_code_to_errno(int rc);
66
67/**
68 * Helper for the vboxsf driver to get a reference to the guest device.
69 * Return: a pointer to the gdev; or a ERR_PTR value on error.
70 */
71struct vbg_dev *vbg_get_gdev(void);
72
73/**
74 * Helper for the vboxsf driver to put a guest device reference.
75 * @gdev: Reference returned by vbg_get_gdev to put.
76 */
77void vbg_put_gdev(struct vbg_dev *gdev);
78
79#endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index a47b985341d1..66741ab087c1 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -145,7 +145,8 @@ extern struct vfio_info_cap_header *vfio_info_cap_add(
145extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); 145extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
146 146
147extern int vfio_info_add_capability(struct vfio_info_cap *caps, 147extern int vfio_info_add_capability(struct vfio_info_cap *caps,
148 int cap_type_id, void *cap_type); 148 struct vfio_info_cap_header *cap,
149 size_t size);
149 150
150extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, 151extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
151 int num_irqs, int max_irq_type, 152 int num_irqs, int max_irq_type,
diff --git a/include/linux/visorbus.h b/include/linux/visorbus.h
new file mode 100644
index 000000000000..0d8bd6769b13
--- /dev/null
+++ b/include/linux/visorbus.h
@@ -0,0 +1,344 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 */
6
7/*
8 * This header file is to be included by other kernel mode components that
9 * implement a particular kind of visor_device. Each of these other kernel
10 * mode components is called a visor device driver. Refer to visortemplate
11 * for a minimal sample visor device driver.
12 *
13 * There should be nothing in this file that is private to the visorbus
14 * bus implementation itself.
15 */
16
17#ifndef __VISORBUS_H__
18#define __VISORBUS_H__
19
20#include <linux/device.h>
21
22#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E')
23
24/*
25 * enum channel_serverstate
26 * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state.
27 * @CHANNELSRV_READY: Channel has been initialized by server.
28 */
29enum channel_serverstate {
30 CHANNELSRV_UNINITIALIZED = 0,
31 CHANNELSRV_READY = 1
32};
33
34/*
35 * enum channel_clientstate
36 * @CHANNELCLI_DETACHED:
37 * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it
38 * unless given TBD* explicit request
39 * (should actually be < DETACHED).
40 * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach.
41 * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time.
42 * @CHANNELCLI_BUSY: Client either wants to use or is using channel.
43 * @CHANNELCLI_OWNED: "No worries" state - client can access channel
44 * anytime.
45 */
46enum channel_clientstate {
47 CHANNELCLI_DETACHED = 0,
48 CHANNELCLI_DISABLED = 1,
49 CHANNELCLI_ATTACHING = 2,
50 CHANNELCLI_ATTACHED = 3,
51 CHANNELCLI_BUSY = 4,
52 CHANNELCLI_OWNED = 5
53};
54
55/*
56 * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that
57 * a guest can look at the FeatureFlags in the io channel, and configure the
58 * driver to use interrupts or not based on this setting. All feature bits for
59 * all channels should be defined here. The io channel feature bits are defined
60 * below.
61 */
62#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1)
63#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3)
64#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4)
65#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5)
66#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6)
67
68/*
69 * struct channel_header - Common Channel Header
70 * @signature: Signature.
71 * @legacy_state: DEPRECATED - being replaced by.
72 * @header_size: sizeof(struct channel_header).
73 * @size: Total size of this channel in bytes.
74 * @features: Flags to modify behavior.
75 * @chtype: Channel type: data, bus, control, etc..
76 * @partition_handle: ID of guest partition.
77 * @handle: Device number of this channel in client.
78 * @ch_space_offset: Offset in bytes to channel specific area.
79 * @version_id: Struct channel_header Version ID.
80 * @partition_index: Index of guest partition.
81 * @zone_uuid: Guid of Channel's zone.
82 * @cli_str_offset: Offset from channel header to null-terminated
83 * ClientString (0 if ClientString not present).
84 * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this
85 * channel.
86 * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
87 * ServerStateUp, ServerStateDown, etc).
88 * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel.
89 * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>.
90 * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see
91 * ServerStateUp, ServerStateDown, etc).
92 * @srv_state: CHANNEL_SERVERSTATE.
93 * @cli_error_boot: Bits to indicate err states for boot clients, so err
94 * messages can be throttled.
95 * @cli_error_os: Bits to indicate err states for OS clients, so err
96 * messages can be throttled.
97 * @filler: Pad out to 128 byte cacheline.
98 * @recover_channel: Please add all new single-byte values below here.
99 */
100struct channel_header {
101 u64 signature;
102 u32 legacy_state;
103 /* SrvState, CliStateBoot, and CliStateOS below */
104 u32 header_size;
105 u64 size;
106 u64 features;
107 guid_t chtype;
108 u64 partition_handle;
109 u64 handle;
110 u64 ch_space_offset;
111 u32 version_id;
112 u32 partition_index;
113 guid_t zone_guid;
114 u32 cli_str_offset;
115 u32 cli_state_boot;
116 u32 cmd_state_cli;
117 u32 cli_state_os;
118 u32 ch_characteristic;
119 u32 cmd_state_srv;
120 u32 srv_state;
121 u8 cli_error_boot;
122 u8 cli_error_os;
123 u8 filler[1];
124 u8 recover_channel;
125} __packed;
126
127#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0)
128
129/*
130 * struct signal_queue_header - Subheader for the Signal Type variation of the
131 * Common Channel.
132 * @version: SIGNAL_QUEUE_HEADER Version ID.
133 * @chtype: Queue type: storage, network.
134 * @size: Total size of this queue in bytes.
135 * @sig_base_offset: Offset to signal queue area.
136 * @features: Flags to modify behavior.
137 * @num_sent: Total # of signals placed in this queue.
138 * @num_overflows: Total # of inserts failed due to full queue.
139 * @signal_size: Total size of a signal for this queue.
140 * @max_slots: Max # of slots in queue, 1 slot is always empty.
141 * @max_signals: Max # of signals in queue (MaxSignalSlots-1).
142 * @head: Queue head signal #.
143 * @num_received: Total # of signals removed from this queue.
144 * @tail: Queue tail signal.
145 * @reserved1: Reserved field.
146 * @reserved2: Reserved field.
147 * @client_queue:
148 * @num_irq_received: Total # of Interrupts received. This is incremented by the
149 * ISR in the guest windows driver.
150 * @num_empty: Number of times that visor_signal_remove is called and
151 * returned Empty Status.
152 * @errorflags: Error bits set during SignalReinit to denote trouble with
153 * client's fields.
154 * @filler: Pad out to 64 byte cacheline.
155 */
156struct signal_queue_header {
157 /* 1st cache line */
158 u32 version;
159 u32 chtype;
160 u64 size;
161 u64 sig_base_offset;
162 u64 features;
163 u64 num_sent;
164 u64 num_overflows;
165 u32 signal_size;
166 u32 max_slots;
167 u32 max_signals;
168 u32 head;
169 /* 2nd cache line */
170 u64 num_received;
171 u32 tail;
172 u32 reserved1;
173 u64 reserved2;
174 u64 client_queue;
175 u64 num_irq_received;
176 u64 num_empty;
177 u32 errorflags;
178 u8 filler[12];
179} __packed;
180
181/* VISORCHANNEL Guids */
182/* {414815ed-c58c-11da-95a9-00e08161165f} */
183#define VISOR_VHBA_CHANNEL_GUID \
184 GUID_INIT(0x414815ed, 0xc58c, 0x11da, \
185 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
186#define VISOR_VHBA_CHANNEL_GUID_STR \
187 "414815ed-c58c-11da-95a9-00e08161165f"
188struct visorchipset_state {
189 u32 created:1;
190 u32 attached:1;
191 u32 configured:1;
192 u32 running:1;
193 /* Remaining bits in this 32-bit word are reserved. */
194};
195
196/**
197 * struct visor_device - A device type for things "plugged" into the visorbus
198 * bus
199 * @visorchannel: Points to the channel that the device is
200 * associated with.
201 * @channel_type_guid: Identifies the channel type to the bus driver.
202 * @device: Device struct meant for use by the bus driver
203 * only.
204 * @list_all: Used by the bus driver to enumerate devices.
205 * @timer: Timer fired periodically to do interrupt-type
206 * activity.
207 * @being_removed: Indicates that the device is being removed from
208 * the bus. Private bus driver use only.
209 * @visordriver_callback_lock: Used by the bus driver to lock when adding and
210 * removing devices.
211 * @pausing: Indicates that a change towards a paused state.
212 * is in progress. Only modified by the bus driver.
213 * @resuming: Indicates that a change towards a running state
214 * is in progress. Only modified by the bus driver.
215 * @chipset_bus_no: Private field used by the bus driver.
216 * @chipset_dev_no: Private field used the bus driver.
217 * @state: Used to indicate the current state of the
218 * device.
219 * @inst: Unique GUID for this instance of the device.
220 * @name: Name of the device.
221 * @pending_msg_hdr: For private use by bus driver to respond to
222 * hypervisor requests.
223 * @vbus_hdr_info: A pointer to header info. Private use by bus
224 * driver.
225 * @partition_guid: Indicates client partion id. This should be the
226 * same across all visor_devices in the current
227 * guest. Private use by bus driver only.
228 */
229struct visor_device {
230 struct visorchannel *visorchannel;
231 guid_t channel_type_guid;
232 /* These fields are for private use by the bus driver only. */
233 struct device device;
234 struct list_head list_all;
235 struct timer_list timer;
236 bool timer_active;
237 bool being_removed;
238 struct mutex visordriver_callback_lock; /* synchronize probe/remove */
239 bool pausing;
240 bool resuming;
241 u32 chipset_bus_no;
242 u32 chipset_dev_no;
243 struct visorchipset_state state;
244 guid_t inst;
245 u8 *name;
246 struct controlvm_message_header *pending_msg_hdr;
247 void *vbus_hdr_info;
248 guid_t partition_guid;
249 struct dentry *debugfs_dir;
250 struct dentry *debugfs_bus_info;
251};
252
253#define to_visor_device(x) container_of(x, struct visor_device, device)
254
255typedef void (*visorbus_state_complete_func) (struct visor_device *dev,
256 int status);
257
258/*
259 * This struct describes a specific visor channel, by providing its GUID, name,
260 * and sizes.
261 */
262struct visor_channeltype_descriptor {
263 const guid_t guid;
264 const char *name;
265 u64 min_bytes;
266 u32 version;
267};
268
269/**
270 * struct visor_driver - Information provided by each visor driver when it
271 * registers with the visorbus driver
272 * @name: Name of the visor driver.
273 * @owner: The module owner.
274 * @channel_types: Types of channels handled by this driver, ending with
275 * a zero GUID. Our specialized BUS.match() method knows
276 * about this list, and uses it to determine whether this
277 * driver will in fact handle a new device that it has
278 * detected.
279 * @probe: Called when a new device comes online, by our probe()
280 * function specified by driver.probe() (triggered
281 * ultimately by some call to driver_register(),
282 * bus_add_driver(), or driver_attach()).
283 * @remove: Called when a new device is removed, by our remove()
284 * function specified by driver.remove() (triggered
285 * ultimately by some call to device_release_driver()).
286 * @channel_interrupt: Called periodically, whenever there is a possiblity
287 * that "something interesting" may have happened to the
288 * channel.
289 * @pause: Called to initiate a change of the device's state. If
290 * the return valu`e is < 0, there was an error and the
291 * state transition will NOT occur. If the return value
292 * is >= 0, then the state transition was INITIATED
293 * successfully, and complete_func() will be called (or
294 * was just called) with the final status when either the
295 * state transition fails or completes successfully.
296 * @resume: Behaves similar to pause.
297 * @driver: Private reference to the device driver. For use by bus
298 * driver only.
299 */
300struct visor_driver {
301 const char *name;
302 struct module *owner;
303 struct visor_channeltype_descriptor *channel_types;
304 int (*probe)(struct visor_device *dev);
305 void (*remove)(struct visor_device *dev);
306 void (*channel_interrupt)(struct visor_device *dev);
307 int (*pause)(struct visor_device *dev,
308 visorbus_state_complete_func complete_func);
309 int (*resume)(struct visor_device *dev,
310 visorbus_state_complete_func complete_func);
311
312 /* These fields are for private use by the bus driver only. */
313 struct device_driver driver;
314};
315
316#define to_visor_driver(x) (container_of(x, struct visor_driver, driver))
317
318int visor_check_channel(struct channel_header *ch, struct device *dev,
319 const guid_t *expected_uuid, char *chname,
320 u64 expected_min_bytes, u32 expected_version,
321 u64 expected_signature);
322
323int visorbus_register_visor_driver(struct visor_driver *drv);
324void visorbus_unregister_visor_driver(struct visor_driver *drv);
325int visorbus_read_channel(struct visor_device *dev,
326 unsigned long offset, void *dest,
327 unsigned long nbytes);
328int visorbus_write_channel(struct visor_device *dev,
329 unsigned long offset, void *src,
330 unsigned long nbytes);
331int visorbus_enable_channel_interrupts(struct visor_device *dev);
332void visorbus_disable_channel_interrupts(struct visor_device *dev);
333
334int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
335 void *msg);
336int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
337 void *msg);
338bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
339const guid_t *visorchannel_get_guid(struct visorchannel *channel);
340
341#define BUS_ROOT_DEVICE UINT_MAX
342struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
343 struct visor_device *from);
344#endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 1779c9817b39..a4c2317d8b9f 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -216,23 +216,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
216 return x; 216 return x;
217} 217}
218 218
219static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
220 enum node_stat_item item)
221{
222 long x = atomic_long_read(&pgdat->vm_stat[item]);
223
224#ifdef CONFIG_SMP
225 int cpu;
226 for_each_online_cpu(cpu)
227 x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
228
229 if (x < 0)
230 x = 0;
231#endif
232 return x;
233}
234
235
236#ifdef CONFIG_NUMA 219#ifdef CONFIG_NUMA
237extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); 220extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
238extern unsigned long sum_zone_node_page_state(int node, 221extern unsigned long sum_zone_node_page_state(int node,
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
index d58594a32324..78901ecd2f95 100644
--- a/include/linux/w1-gpio.h
+++ b/include/linux/w1-gpio.h
@@ -10,16 +10,15 @@
10#ifndef _LINUX_W1_GPIO_H 10#ifndef _LINUX_W1_GPIO_H
11#define _LINUX_W1_GPIO_H 11#define _LINUX_W1_GPIO_H
12 12
13struct gpio_desc;
14
13/** 15/**
14 * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio 16 * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
15 * @pin: GPIO pin to use
16 * @is_open_drain: GPIO pin is configured as open drain
17 */ 17 */
18struct w1_gpio_platform_data { 18struct w1_gpio_platform_data {
19 unsigned int pin; 19 struct gpio_desc *gpiod;
20 unsigned int is_open_drain:1; 20 struct gpio_desc *pullup_gpiod;
21 void (*enable_external_pullup)(int enable); 21 void (*enable_external_pullup)(int enable);
22 unsigned int ext_pullup_enable_pin;
23 unsigned int pullup_duration; 22 unsigned int pullup_duration;
24}; 23};
25 24
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 158715445ffb..55a611486bac 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -206,14 +206,16 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
206/* 206/*
207 * Wakeup macros to be used to report events to the targets. 207 * Wakeup macros to be used to report events to the targets.
208 */ 208 */
209#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
210#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
209#define wake_up_poll(x, m) \ 211#define wake_up_poll(x, m) \
210 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 212 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
211#define wake_up_locked_poll(x, m) \ 213#define wake_up_locked_poll(x, m) \
212 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) 214 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
213#define wake_up_interruptible_poll(x, m) \ 215#define wake_up_interruptible_poll(x, m) \
214 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 216 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
215#define wake_up_interruptible_sync_poll(x, m) \ 217#define wake_up_interruptible_sync_poll(x, m) \
216 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) 218 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
217 219
218#define ___wait_cond_timeout(condition) \ 220#define ___wait_cond_timeout(condition) \
219({ \ 221({ \
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 004ba807df96..7238865e75b0 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -108,4 +108,6 @@ void zpool_register_driver(struct zpool_driver *driver);
108 108
109int zpool_unregister_driver(struct zpool_driver *driver); 109int zpool_unregister_driver(struct zpool_driver *driver);
110 110
111bool zpool_evictable(struct zpool *pool);
112
111#endif 113#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index 16341210d3ba..7cdf71d7125a 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -122,6 +122,7 @@ struct cec_adap_ops {
122 /* Low-level callbacks */ 122 /* Low-level callbacks */
123 int (*adap_enable)(struct cec_adapter *adap, bool enable); 123 int (*adap_enable)(struct cec_adapter *adap, bool enable);
124 int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable); 124 int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
125 int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable);
125 int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr); 126 int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
126 int (*adap_transmit)(struct cec_adapter *adap, u8 attempts, 127 int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
127 u32 signal_free_time, struct cec_msg *msg); 128 u32 signal_free_time, struct cec_msg *msg);
@@ -191,11 +192,6 @@ struct cec_adapter {
191 192
192 u32 tx_timeouts; 193 u32 tx_timeouts;
193 194
194#ifdef CONFIG_MEDIA_CEC_RC
195 bool rc_repeating;
196 int rc_last_scancode;
197 u64 rc_last_keypress;
198#endif
199#ifdef CONFIG_CEC_NOTIFIER 195#ifdef CONFIG_CEC_NOTIFIER
200 struct cec_notifier *notifier; 196 struct cec_notifier *notifier;
201#endif 197#endif
@@ -229,6 +225,18 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
229 return adap->phys_addr == 0; 225 return adap->phys_addr == 0;
230} 226}
231 227
228/**
229 * cec_is_registered() - is the CEC adapter registered?
230 *
231 * @adap: the CEC adapter, may be NULL.
232 *
233 * Return: true if the adapter is registered, false otherwise.
234 */
235static inline bool cec_is_registered(const struct cec_adapter *adap)
236{
237 return adap && adap->devnode.registered;
238}
239
232#define cec_phys_addr_exp(pa) \ 240#define cec_phys_addr_exp(pa) \
233 ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf 241 ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
234 242
diff --git a/include/media/demux.h b/include/media/demux.h
new file mode 100644
index 000000000000..c4df6cee48e6
--- /dev/null
+++ b/include/media/demux.h
@@ -0,0 +1,589 @@
1/*
2 * demux.h
3 *
4 * The Kernel Digital TV Demux kABI defines a driver-internal interface for
5 * registering low-level, hardware specific driver to a hardware independent
6 * demux layer.
7 *
8 * Copyright (c) 2002 Convergence GmbH
9 *
10 * based on code:
11 * Copyright (c) 2000 Nokia Research Center
12 * Tampere, FINLAND
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU Lesser General Public License
16 * as published by the Free Software Foundation; either version 2.1
17 * of the License, or (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 */
25
26#ifndef __DEMUX_H
27#define __DEMUX_H
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/list.h>
32#include <linux/time.h>
33#include <linux/dvb/dmx.h>
34
35/*
36 * Common definitions
37 */
38
39/*
40 * DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter.
41 */
42
43#ifndef DMX_MAX_FILTER_SIZE
44#define DMX_MAX_FILTER_SIZE 18
45#endif
46
47/*
48 * DMX_MAX_SECFEED_SIZE: Maximum length (in bytes) of a private section feed
49 * filter.
50 */
51
52#ifndef DMX_MAX_SECTION_SIZE
53#define DMX_MAX_SECTION_SIZE 4096
54#endif
55#ifndef DMX_MAX_SECFEED_SIZE
56#define DMX_MAX_SECFEED_SIZE (DMX_MAX_SECTION_SIZE + 188)
57#endif
58
59/*
60 * TS packet reception
61 */
62
63/**
64 * enum ts_filter_type - filter type bitmap for dmx_ts_feed.set\(\)
65 *
66 * @TS_PACKET: Send TS packets (188 bytes) to callback (default).
67 * @TS_PAYLOAD_ONLY: In case TS_PACKET is set, only send the TS payload
68 * (<=184 bytes per packet) to callback
69 * @TS_DECODER: Send stream to built-in decoder (if present).
70 * @TS_DEMUX: In case TS_PACKET is set, send the TS to the demux
71 * device, not to the dvr device
72 */
73enum ts_filter_type {
74 TS_PACKET = 1,
75 TS_PAYLOAD_ONLY = 2,
76 TS_DECODER = 4,
77 TS_DEMUX = 8,
78};
79
80/**
81 * struct dmx_ts_feed - Structure that contains a TS feed filter
82 *
83 * @is_filtering: Set to non-zero when filtering in progress
84 * @parent: pointer to struct dmx_demux
85 * @priv: pointer to private data of the API client
86 * @set: sets the TS filter
87 * @start_filtering: starts TS filtering
88 * @stop_filtering: stops TS filtering
89 *
90 * A TS feed is typically mapped to a hardware PID filter on the demux chip.
91 * Using this API, the client can set the filtering properties to start/stop
92 * filtering TS packets on a particular TS feed.
93 */
94struct dmx_ts_feed {
95 int is_filtering;
96 struct dmx_demux *parent;
97 void *priv;
98 int (*set)(struct dmx_ts_feed *feed,
99 u16 pid,
100 int type,
101 enum dmx_ts_pes pes_type,
102 ktime_t timeout);
103 int (*start_filtering)(struct dmx_ts_feed *feed);
104 int (*stop_filtering)(struct dmx_ts_feed *feed);
105};
106
107/*
108 * Section reception
109 */
110
111/**
112 * struct dmx_section_filter - Structure that describes a section filter
113 *
114 * @filter_value: Contains up to 16 bytes (128 bits) of the TS section header
115 * that will be matched by the section filter
116 * @filter_mask: Contains a 16 bytes (128 bits) filter mask with the bits
117 * specified by @filter_value that will be used on the filter
118 * match logic.
119 * @filter_mode: Contains a 16 bytes (128 bits) filter mode.
120 * @parent: Pointer to struct dmx_section_feed.
121 * @priv: Pointer to private data of the API client.
122 *
123 *
124 * The @filter_mask controls which bits of @filter_value are compared with
125 * the section headers/payload. On a binary value of 1 in filter_mask, the
126 * corresponding bits are compared. The filter only accepts sections that are
127 * equal to filter_value in all the tested bit positions.
128 */
129struct dmx_section_filter {
130 u8 filter_value[DMX_MAX_FILTER_SIZE];
131 u8 filter_mask[DMX_MAX_FILTER_SIZE];
132 u8 filter_mode[DMX_MAX_FILTER_SIZE];
133 struct dmx_section_feed *parent; /* Back-pointer */
134 void *priv; /* Pointer to private data of the API client */
135};
136
137/**
138 * struct dmx_section_feed - Structure that contains a section feed filter
139 *
140 * @is_filtering: Set to non-zero when filtering in progress
141 * @parent: pointer to struct dmx_demux
142 * @priv: pointer to private data of the API client
143 * @check_crc: If non-zero, check the CRC values of filtered sections.
144 * @set: sets the section filter
145 * @allocate_filter: This function is used to allocate a section filter on
146 * the demux. It should only be called when no filtering
147 * is in progress on this section feed. If a filter cannot
148 * be allocated, the function fails with -ENOSPC.
149 * @release_filter: This function releases all the resources of a
150 * previously allocated section filter. The function
151 * should not be called while filtering is in progress
152 * on this section feed. After calling this function,
153 * the caller should not try to dereference the filter
154 * pointer.
155 * @start_filtering: starts section filtering
156 * @stop_filtering: stops section filtering
157 *
158 * A TS feed is typically mapped to a hardware PID filter on the demux chip.
159 * Using this API, the client can set the filtering properties to start/stop
160 * filtering TS packets on a particular TS feed.
161 */
162struct dmx_section_feed {
163 int is_filtering;
164 struct dmx_demux *parent;
165 void *priv;
166
167 int check_crc;
168
169 /* private: Used internally at dvb_demux.c */
170 u32 crc_val;
171
172 u8 *secbuf;
173 u8 secbuf_base[DMX_MAX_SECFEED_SIZE];
174 u16 secbufp, seclen, tsfeedp;
175
176 /* public: */
177 int (*set)(struct dmx_section_feed *feed,
178 u16 pid,
179 int check_crc);
180 int (*allocate_filter)(struct dmx_section_feed *feed,
181 struct dmx_section_filter **filter);
182 int (*release_filter)(struct dmx_section_feed *feed,
183 struct dmx_section_filter *filter);
184 int (*start_filtering)(struct dmx_section_feed *feed);
185 int (*stop_filtering)(struct dmx_section_feed *feed);
186};
187
188/**
189 * typedef dmx_ts_cb - DVB demux TS filter callback function prototype
190 *
191 * @buffer1: Pointer to the start of the filtered TS packets.
192 * @buffer1_length: Length of the TS data in buffer1.
193 * @buffer2: Pointer to the tail of the filtered TS packets, or NULL.
194 * @buffer2_length: Length of the TS data in buffer2.
195 * @source: Indicates which TS feed is the source of the callback.
196 *
197 * This function callback prototype, provided by the client of the demux API,
198 * is called from the demux code. The function is only called when filtering
199 * on a TS feed has been enabled using the start_filtering\(\) function at
200 * the &dmx_demux.
201 * Any TS packets that match the filter settings are copied to a circular
202 * buffer. The filtered TS packets are delivered to the client using this
203 * callback function.
204 * It is expected that the @buffer1 and @buffer2 callback parameters point to
205 * addresses within the circular buffer, but other implementations are also
206 * possible. Note that the called party should not try to free the memory
207 * the @buffer1 and @buffer2 parameters point to.
208 *
209 * When this function is called, the @buffer1 parameter typically points to
210 * the start of the first undelivered TS packet within a circular buffer.
211 * The @buffer2 buffer parameter is normally NULL, except when the received
212 * TS packets have crossed the last address of the circular buffer and
213 * "wrapped" to the beginning of the buffer. In the latter case the @buffer1
214 * parameter would contain an address within the circular buffer, while the
215 * @buffer2 parameter would contain the first address of the circular buffer.
216 * The number of bytes delivered with this function (i.e. @buffer1_length +
217 * @buffer2_length) is usually equal to the value of callback_length parameter
218 * given in the set() function, with one exception: if a timeout occurs before
219 * receiving callback_length bytes of TS data, any undelivered packets are
220 * immediately delivered to the client by calling this function. The timeout
221 * duration is controlled by the set() function in the TS Feed API.
222 *
223 * If a TS packet is received with errors that could not be fixed by the
224 * TS-level forward error correction (FEC), the Transport_error_indicator
225 * flag of the TS packet header should be set. The TS packet should not be
226 * discarded, as the error can possibly be corrected by a higher layer
227 * protocol. If the called party is slow in processing the callback, it
228 * is possible that the circular buffer eventually fills up. If this happens,
229 * the demux driver should discard any TS packets received while the buffer
230 * is full and return -EOVERFLOW.
231 *
232 * The type of data returned to the callback can be selected by the
233 * &dmx_ts_feed.@set function. The type parameter decides if the raw
234 * TS packet (TS_PACKET) or just the payload (TS_PACKET|TS_PAYLOAD_ONLY)
235 * should be returned. If additionally the TS_DECODER bit is set the stream
236 * will also be sent to the hardware MPEG decoder.
237 *
238 * Return:
239 *
240 * - 0, on success;
241 *
242 * - -EOVERFLOW, on buffer overflow.
243 */
244typedef int (*dmx_ts_cb)(const u8 *buffer1,
245 size_t buffer1_length,
246 const u8 *buffer2,
247 size_t buffer2_length,
248 struct dmx_ts_feed *source);
249
250/**
251 * typedef dmx_section_cb - DVB demux TS filter callback function prototype
252 *
253 * @buffer1: Pointer to the start of the filtered section, e.g.
254 * within the circular buffer of the demux driver.
255 * @buffer1_len: Length of the filtered section data in @buffer1,
256 * including headers and CRC.
257 * @buffer2: Pointer to the tail of the filtered section data,
258 * or NULL. Useful to handle the wrapping of a
259 * circular buffer.
260 * @buffer2_len: Length of the filtered section data in @buffer2,
261 * including headers and CRC.
262 * @source: Indicates which section feed is the source of the
263 * callback.
264 *
265 * This function callback prototype, provided by the client of the demux API,
266 * is called from the demux code. The function is only called when
267 * filtering of sections has been enabled using the function
268 * &dmx_ts_feed.@start_filtering. When the demux driver has received a
269 * complete section that matches at least one section filter, the client
270 * is notified via this callback function. Normally this function is called
271 * for each received section; however, it is also possible to deliver
272 * multiple sections with one callback, for example when the system load
273 * is high. If an error occurs while receiving a section, this
274 * function should be called with the corresponding error type set in the
275 * success field, whether or not there is data to deliver. The Section Feed
276 * implementation should maintain a circular buffer for received sections.
277 * However, this is not necessary if the Section Feed API is implemented as
278 * a client of the TS Feed API, because the TS Feed implementation then
279 * buffers the received data. The size of the circular buffer can be
280 * configured using the &dmx_ts_feed.@set function in the Section Feed API.
281 * If there is no room in the circular buffer when a new section is received,
282 * the section must be discarded. If this happens, the value of the success
283 * parameter should be DMX_OVERRUN_ERROR on the next callback.
284 */
285typedef int (*dmx_section_cb)(const u8 *buffer1,
286 size_t buffer1_len,
287 const u8 *buffer2,
288 size_t buffer2_len,
289 struct dmx_section_filter *source);
290
291/*
292 * DVB Front-End
293 */
294
295/**
296 * enum dmx_frontend_source - Used to identify the type of frontend
297 *
298 * @DMX_MEMORY_FE: The source of the demux is memory. It means that
299 * the MPEG-TS to be filtered comes from userspace,
300 * via write() syscall.
301 *
302 * @DMX_FRONTEND_0: The source of the demux is a frontend connected
303 * to the demux.
304 */
305enum dmx_frontend_source {
306 DMX_MEMORY_FE,
307 DMX_FRONTEND_0,
308};
309
310/**
311 * struct dmx_frontend - Structure that lists the frontends associated with
312 * a demux
313 *
314 * @connectivity_list: List of front-ends that can be connected to a
315 * particular demux;
316 * @source: Type of the frontend.
317 *
318 * FIXME: this structure should likely be replaced soon by some
319 * media-controller based logic.
320 */
321struct dmx_frontend {
322 struct list_head connectivity_list;
323 enum dmx_frontend_source source;
324};
325
326/*
327 * MPEG-2 TS Demux
328 */
329
330/**
331 * enum dmx_demux_caps - MPEG-2 TS Demux capabilities bitmap
332 *
333 * @DMX_TS_FILTERING: set if TS filtering is supported;
334 * @DMX_SECTION_FILTERING: set if section filtering is supported;
335 * @DMX_MEMORY_BASED_FILTERING: set if write() available.
336 *
337 * Those flags are OR'ed in the &dmx_demux.capabilities field
338 */
339enum dmx_demux_caps {
340 DMX_TS_FILTERING = 1,
341 DMX_SECTION_FILTERING = 4,
342 DMX_MEMORY_BASED_FILTERING = 8,
343};
344
345/*
346 * Demux resource type identifier.
347 */
348
349/**
350 * DMX_FE_ENTRY - Casts elements in the list of registered
351 * front-ends from the generic type struct list_head
352 * to the type * struct dmx_frontend
353 *
354 * @list: list of struct dmx_frontend
355 */
356#define DMX_FE_ENTRY(list) \
357 list_entry(list, struct dmx_frontend, connectivity_list)
358
359/**
360 * struct dmx_demux - Structure that contains the demux capabilities and
361 * callbacks.
362 *
363 * @capabilities: Bitfield of capability flags.
364 *
365 * @frontend: Front-end connected to the demux
366 *
367 * @priv: Pointer to private data of the API client
368 *
369 * @open: This function reserves the demux for use by the caller and, if
370 * necessary, initializes the demux. When the demux is no longer needed,
371 * the function @close should be called. It should be possible for
372 * multiple clients to access the demux at the same time. Thus, the
373 * function implementation should increment the demux usage count when
374 * @open is called and decrement it when @close is called.
375 * The @demux function parameter contains a pointer to the demux API and
376 * instance data.
377 * It returns:
378 * 0 on success;
379 * -EUSERS, if maximum usage count was reached;
380 * -EINVAL, on bad parameter.
381 *
382 * @close: This function reserves the demux for use by the caller and, if
383 * necessary, initializes the demux. When the demux is no longer needed,
384 * the function @close should be called. It should be possible for
385 * multiple clients to access the demux at the same time. Thus, the
386 * function implementation should increment the demux usage count when
387 * @open is called and decrement it when @close is called.
388 * The @demux function parameter contains a pointer to the demux API and
389 * instance data.
390 * It returns:
391 * 0 on success;
392 * -ENODEV, if demux was not in use (e. g. no users);
393 * -EINVAL, on bad parameter.
394 *
395 * @write: This function provides the demux driver with a memory buffer
396 * containing TS packets. Instead of receiving TS packets from the DVB
397 * front-end, the demux driver software will read packets from memory.
398 * Any clients of this demux with active TS, PES or Section filters will
399 * receive filtered data via the Demux callback API (see 0). The function
400 * returns when all the data in the buffer has been consumed by the demux.
401 * Demux hardware typically cannot read TS from memory. If this is the
402 * case, memory-based filtering has to be implemented entirely in software.
403 * The @demux function parameter contains a pointer to the demux API and
404 * instance data.
405 * The @buf function parameter contains a pointer to the TS data in
406 * kernel-space memory.
407 * The @count function parameter contains the length of the TS data.
408 * It returns:
409 * 0 on success;
410 * -ERESTARTSYS, if mutex lock was interrupted;
411 * -EINTR, if a signal handling is pending;
412 * -ENODEV, if demux was removed;
413 * -EINVAL, on bad parameter.
414 *
415 * @allocate_ts_feed: Allocates a new TS feed, which is used to filter the TS
416 * packets carrying a certain PID. The TS feed normally corresponds to a
417 * hardware PID filter on the demux chip.
418 * The @demux function parameter contains a pointer to the demux API and
419 * instance data.
420 * The @feed function parameter contains a pointer to the TS feed API and
421 * instance data.
422 * The @callback function parameter contains a pointer to the callback
423 * function for passing received TS packet.
424 * It returns:
425 * 0 on success;
426 * -ERESTARTSYS, if mutex lock was interrupted;
427 * -EBUSY, if no more TS feeds is available;
428 * -EINVAL, on bad parameter.
429 *
430 * @release_ts_feed: Releases the resources allocated with @allocate_ts_feed.
431 * Any filtering in progress on the TS feed should be stopped before
432 * calling this function.
433 * The @demux function parameter contains a pointer to the demux API and
434 * instance data.
435 * The @feed function parameter contains a pointer to the TS feed API and
436 * instance data.
437 * It returns:
438 * 0 on success;
439 * -EINVAL on bad parameter.
440 *
441 * @allocate_section_feed: Allocates a new section feed, i.e. a demux resource
442 * for filtering and receiving sections. On platforms with hardware
443 * support for section filtering, a section feed is directly mapped to
444 * the demux HW. On other platforms, TS packets are first PID filtered in
445 * hardware and a hardware section filter then emulated in software. The
446 * caller obtains an API pointer of type dmx_section_feed_t as an out
447 * parameter. Using this API the caller can set filtering parameters and
448 * start receiving sections.
449 * The @demux function parameter contains a pointer to the demux API and
450 * instance data.
451 * The @feed function parameter contains a pointer to the TS feed API and
452 * instance data.
453 * The @callback function parameter contains a pointer to the callback
454 * function for passing received TS packet.
455 * It returns:
456 * 0 on success;
457 * -EBUSY, if no more TS feeds is available;
458 * -EINVAL, on bad parameter.
459 *
460 * @release_section_feed: Releases the resources allocated with
461 * @allocate_section_feed, including allocated filters. Any filtering in
462 * progress on the section feed should be stopped before calling this
463 * function.
464 * The @demux function parameter contains a pointer to the demux API and
465 * instance data.
466 * The @feed function parameter contains a pointer to the TS feed API and
467 * instance data.
468 * It returns:
469 * 0 on success;
470 * -EINVAL, on bad parameter.
471 *
472 * @add_frontend: Registers a connectivity between a demux and a front-end,
473 * i.e., indicates that the demux can be connected via a call to
474 * @connect_frontend to use the given front-end as a TS source. The
475 * client of this function has to allocate dynamic or static memory for
476 * the frontend structure and initialize its fields before calling this
477 * function. This function is normally called during the driver
478 * initialization. The caller must not free the memory of the frontend
479 * struct before successfully calling @remove_frontend.
480 * The @demux function parameter contains a pointer to the demux API and
481 * instance data.
482 * The @frontend function parameter contains a pointer to the front-end
483 * instance data.
484 * It returns:
485 * 0 on success;
486 * -EINVAL, on bad parameter.
487 *
488 * @remove_frontend: Indicates that the given front-end, registered by a call
489 * to @add_frontend, can no longer be connected as a TS source by this
490 * demux. The function should be called when a front-end driver or a demux
491 * driver is removed from the system. If the front-end is in use, the
492 * function fails with the return value of -EBUSY. After successfully
493 * calling this function, the caller can free the memory of the frontend
494 * struct if it was dynamically allocated before the @add_frontend
495 * operation.
496 * The @demux function parameter contains a pointer to the demux API and
497 * instance data.
498 * The @frontend function parameter contains a pointer to the front-end
499 * instance data.
500 * It returns:
501 * 0 on success;
502 * -ENODEV, if the front-end was not found,
503 * -EINVAL, on bad parameter.
504 *
505 * @get_frontends: Provides the APIs of the front-ends that have been
506 * registered for this demux. Any of the front-ends obtained with this
507 * call can be used as a parameter for @connect_frontend. The include
508 * file demux.h contains the macro DMX_FE_ENTRY() for converting an
509 * element of the generic type struct &list_head * to the type
510 * struct &dmx_frontend *. The caller must not free the memory of any of
511 * the elements obtained via this function call.
512 * The @demux function parameter contains a pointer to the demux API and
513 * instance data.
514 * It returns a struct list_head pointer to the list of front-end
515 * interfaces, or NULL in the case of an empty list.
516 *
517 * @connect_frontend: Connects the TS output of the front-end to the input of
518 * the demux. A demux can only be connected to a front-end registered to
519 * the demux with the function @add_frontend. It may or may not be
520 * possible to connect multiple demuxes to the same front-end, depending
521 * on the capabilities of the HW platform. When not used, the front-end
522 * should be released by calling @disconnect_frontend.
523 * The @demux function parameter contains a pointer to the demux API and
524 * instance data.
525 * The @frontend function parameter contains a pointer to the front-end
526 * instance data.
527 * It returns:
528 * 0 on success;
529 * -EINVAL, on bad parameter.
530 *
531 * @disconnect_frontend: Disconnects the demux and a front-end previously
532 * connected by a @connect_frontend call.
533 * The @demux function parameter contains a pointer to the demux API and
534 * instance data.
535 * It returns:
536 * 0 on success;
537 * -EINVAL on bad parameter.
538 *
539 * @get_pes_pids: Get the PIDs for DMX_PES_AUDIO0, DMX_PES_VIDEO0,
540 * DMX_PES_TELETEXT0, DMX_PES_SUBTITLE0 and DMX_PES_PCR0.
541 * The @demux function parameter contains a pointer to the demux API and
542 * instance data.
543 * The @pids function parameter contains an array with five u16 elements
544 * where the PIDs will be stored.
545 * It returns:
546 * 0 on success;
547 * -EINVAL on bad parameter.
548 */
549struct dmx_demux {
550 enum dmx_demux_caps capabilities;
551 struct dmx_frontend *frontend;
552 void *priv;
553 int (*open)(struct dmx_demux *demux);
554 int (*close)(struct dmx_demux *demux);
555 int (*write)(struct dmx_demux *demux, const char __user *buf,
556 size_t count);
557 int (*allocate_ts_feed)(struct dmx_demux *demux,
558 struct dmx_ts_feed **feed,
559 dmx_ts_cb callback);
560 int (*release_ts_feed)(struct dmx_demux *demux,
561 struct dmx_ts_feed *feed);
562 int (*allocate_section_feed)(struct dmx_demux *demux,
563 struct dmx_section_feed **feed,
564 dmx_section_cb callback);
565 int (*release_section_feed)(struct dmx_demux *demux,
566 struct dmx_section_feed *feed);
567 int (*add_frontend)(struct dmx_demux *demux,
568 struct dmx_frontend *frontend);
569 int (*remove_frontend)(struct dmx_demux *demux,
570 struct dmx_frontend *frontend);
571 struct list_head *(*get_frontends)(struct dmx_demux *demux);
572 int (*connect_frontend)(struct dmx_demux *demux,
573 struct dmx_frontend *frontend);
574 int (*disconnect_frontend)(struct dmx_demux *demux);
575
576 int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids);
577
578 /* private: */
579
580 /*
581 * Only used at av7110, to read some data from firmware.
582 * As this was never documented, we have no clue about what's
583 * there, and its usage on other drivers aren't encouraged.
584 */
585 int (*get_stc)(struct dmx_demux *demux, unsigned int num,
586 u64 *stc, unsigned int *base);
587};
588
589#endif /* #ifndef __DEMUX_H */
diff --git a/include/media/dmxdev.h b/include/media/dmxdev.h
new file mode 100644
index 000000000000..2f5cb2c7b6a7
--- /dev/null
+++ b/include/media/dmxdev.h
@@ -0,0 +1,212 @@
1/*
2 * dmxdev.h
3 *
4 * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
5 * for convergence integrated media GmbH
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public License
9 * as published by the Free Software Foundation; either version 2.1
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#ifndef _DMXDEV_H_
20#define _DMXDEV_H_
21
22#include <linux/types.h>
23#include <linux/spinlock.h>
24#include <linux/kernel.h>
25#include <linux/time.h>
26#include <linux/timer.h>
27#include <linux/wait.h>
28#include <linux/fs.h>
29#include <linux/string.h>
30#include <linux/mutex.h>
31#include <linux/slab.h>
32
33#include <linux/dvb/dmx.h>
34
35#include <media/dvbdev.h>
36#include <media/demux.h>
37#include <media/dvb_ringbuffer.h>
38#include <media/dvb_vb2.h>
39
40/**
41 * enum dmxdev_type - type of demux filter type.
42 *
43 * @DMXDEV_TYPE_NONE: no filter set.
44 * @DMXDEV_TYPE_SEC: section filter.
45 * @DMXDEV_TYPE_PES: Program Elementary Stream (PES) filter.
46 */
47enum dmxdev_type {
48 DMXDEV_TYPE_NONE,
49 DMXDEV_TYPE_SEC,
50 DMXDEV_TYPE_PES,
51};
52
53/**
54 * enum dmxdev_state - state machine for the dmxdev.
55 *
56 * @DMXDEV_STATE_FREE: indicates that the filter is freed.
57 * @DMXDEV_STATE_ALLOCATED: indicates that the filter was allocated
58 * to be used.
59 * @DMXDEV_STATE_SET: indicates that the filter parameters are set.
60 * @DMXDEV_STATE_GO: indicates that the filter is running.
61 * @DMXDEV_STATE_DONE: indicates that a packet was already filtered
62 * and the filter is now disabled.
63 * Set only if %DMX_ONESHOT. See
64 * &dmx_sct_filter_params.
65 * @DMXDEV_STATE_TIMEDOUT: Indicates a timeout condition.
66 */
67enum dmxdev_state {
68 DMXDEV_STATE_FREE,
69 DMXDEV_STATE_ALLOCATED,
70 DMXDEV_STATE_SET,
71 DMXDEV_STATE_GO,
72 DMXDEV_STATE_DONE,
73 DMXDEV_STATE_TIMEDOUT
74};
75
76/**
77 * struct dmxdev_feed - digital TV dmxdev feed
78 *
79 * @pid: Program ID to be filtered
80 * @ts: pointer to &struct dmx_ts_feed
81 * @next: &struct list_head pointing to the next feed.
82 */
83
84struct dmxdev_feed {
85 u16 pid;
86 struct dmx_ts_feed *ts;
87 struct list_head next;
88};
89
90/**
91 * struct dmxdev_filter - digital TV dmxdev filter
92 *
93 * @filter: a union describing a dmxdev filter.
94 * Currently used only for section filters.
95 * @filter.sec: a &struct dmx_section_filter pointer.
96 * For section filter only.
97 * @feed: a union describing a dmxdev feed.
98 * Depending on the filter type, it can be either
99 * @feed.ts or @feed.sec.
100 * @feed.ts: a &struct list_head list.
101 * For TS and PES feeds.
102 * @feed.sec: a &struct dmx_section_feed pointer.
103 * For section feed only.
104 * @params: a union describing dmxdev filter parameters.
105 * Depending on the filter type, it can be either
106 * @params.sec or @params.pes.
107 * @params.sec: a &struct dmx_sct_filter_params embedded struct.
108 * For section filter only.
109 * @params.pes: a &struct dmx_pes_filter_params embedded struct.
110 * For PES filter only.
111 * @type: type of the dmxdev filter, as defined by &enum dmxdev_type.
112 * @state: state of the dmxdev filter, as defined by &enum dmxdev_state.
113 * @dev: pointer to &struct dmxdev.
114 * @buffer: an embedded &struct dvb_ringbuffer buffer.
115 * @vb2_ctx: control struct for VB2 handler
116 * @mutex: protects the access to &struct dmxdev_filter.
117 * @timer: &struct timer_list embedded timer, used to check for
118 * feed timeouts.
119 * Only for section filter.
120 * @todo: index for the @secheader.
121 * Only for section filter.
122 * @secheader: buffer cache to parse the section header.
123 * Only for section filter.
124 */
125struct dmxdev_filter {
126 union {
127 struct dmx_section_filter *sec;
128 } filter;
129
130 union {
131 /* list of TS and PES feeds (struct dmxdev_feed) */
132 struct list_head ts;
133 struct dmx_section_feed *sec;
134 } feed;
135
136 union {
137 struct dmx_sct_filter_params sec;
138 struct dmx_pes_filter_params pes;
139 } params;
140
141 enum dmxdev_type type;
142 enum dmxdev_state state;
143 struct dmxdev *dev;
144 struct dvb_ringbuffer buffer;
145 struct dvb_vb2_ctx vb2_ctx;
146
147 struct mutex mutex;
148
149 /* only for sections */
150 struct timer_list timer;
151 int todo;
152 u8 secheader[3];
153};
154
155/**
156 * struct dmxdev - Describes a digital TV demux device.
157 *
158 * @dvbdev: pointer to &struct dvb_device associated with
159 * the demux device node.
160 * @dvr_dvbdev: pointer to &struct dvb_device associated with
161 * the dvr device node.
162 * @filter: pointer to &struct dmxdev_filter.
163 * @demux: pointer to &struct dmx_demux.
164 * @filternum: number of filters.
165 * @capabilities: demux capabilities as defined by &enum dmx_demux_caps.
166 * @exit: flag to indicate that the demux is being released.
167 * @dvr_orig_fe: pointer to &struct dmx_frontend.
168 * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output.
169 * @dvr_vb2_ctx: control struct for VB2 handler
170 * @mutex: protects the usage of this structure.
171 * @lock: protects access to &dmxdev->filter->data.
172 */
173struct dmxdev {
174 struct dvb_device *dvbdev;
175 struct dvb_device *dvr_dvbdev;
176
177 struct dmxdev_filter *filter;
178 struct dmx_demux *demux;
179
180 int filternum;
181 int capabilities;
182
183 unsigned int exit:1;
184#define DMXDEV_CAP_DUPLEX 1
185 struct dmx_frontend *dvr_orig_fe;
186
187 struct dvb_ringbuffer dvr_buffer;
188#define DVR_BUFFER_SIZE (10*188*1024)
189
190 struct dvb_vb2_ctx dvr_vb2_ctx;
191
192 struct mutex mutex;
193 spinlock_t lock;
194};
195
196/**
197 * dvb_dmxdev_init - initializes a digital TV demux and registers both demux
198 * and DVR devices.
199 *
200 * @dmxdev: pointer to &struct dmxdev.
201 * @adap: pointer to &struct dvb_adapter.
202 */
203int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *adap);
204
205/**
206 * dvb_dmxdev_release - releases a digital TV demux and unregisters it.
207 *
208 * @dmxdev: pointer to &struct dmxdev.
209 */
210void dvb_dmxdev_release(struct dmxdev *dmxdev);
211
212#endif /* _DMXDEV_H_ */
diff --git a/include/media/drv-intf/cx2341x.h b/include/media/drv-intf/cx2341x.h
index 9635eebaab09..33a97bfcea58 100644
--- a/include/media/drv-intf/cx2341x.h
+++ b/include/media/drv-intf/cx2341x.h
@@ -29,8 +29,8 @@ enum cx2341x_port {
29 29
30enum cx2341x_cap { 30enum cx2341x_cap {
31 CX2341X_CAP_HAS_SLICED_VBI = 1 << 0, 31 CX2341X_CAP_HAS_SLICED_VBI = 1 << 0,
32 CX2341X_CAP_HAS_TS = 1 << 1, 32 CX2341X_CAP_HAS_TS = 1 << 1,
33 CX2341X_CAP_HAS_AC3 = 1 << 2, 33 CX2341X_CAP_HAS_AC3 = 1 << 2,
34}; 34};
35 35
36struct cx2341x_mpeg_params { 36struct cx2341x_mpeg_params {
@@ -204,92 +204,92 @@ void cx2341x_handler_set_busy(struct cx2341x_handler *cxhdl, int busy);
204/* Firmware API commands */ 204/* Firmware API commands */
205 205
206/* MPEG decoder API, specific to the cx23415 */ 206/* MPEG decoder API, specific to the cx23415 */
207#define CX2341X_DEC_PING_FW 0x00 207#define CX2341X_DEC_PING_FW 0x00
208#define CX2341X_DEC_START_PLAYBACK 0x01 208#define CX2341X_DEC_START_PLAYBACK 0x01
209#define CX2341X_DEC_STOP_PLAYBACK 0x02 209#define CX2341X_DEC_STOP_PLAYBACK 0x02
210#define CX2341X_DEC_SET_PLAYBACK_SPEED 0x03 210#define CX2341X_DEC_SET_PLAYBACK_SPEED 0x03
211#define CX2341X_DEC_STEP_VIDEO 0x05 211#define CX2341X_DEC_STEP_VIDEO 0x05
212#define CX2341X_DEC_SET_DMA_BLOCK_SIZE 0x08 212#define CX2341X_DEC_SET_DMA_BLOCK_SIZE 0x08
213#define CX2341X_DEC_GET_XFER_INFO 0x09 213#define CX2341X_DEC_GET_XFER_INFO 0x09
214#define CX2341X_DEC_GET_DMA_STATUS 0x0a 214#define CX2341X_DEC_GET_DMA_STATUS 0x0a
215#define CX2341X_DEC_SCHED_DMA_FROM_HOST 0x0b 215#define CX2341X_DEC_SCHED_DMA_FROM_HOST 0x0b
216#define CX2341X_DEC_PAUSE_PLAYBACK 0x0d 216#define CX2341X_DEC_PAUSE_PLAYBACK 0x0d
217#define CX2341X_DEC_HALT_FW 0x0e 217#define CX2341X_DEC_HALT_FW 0x0e
218#define CX2341X_DEC_SET_STANDARD 0x10 218#define CX2341X_DEC_SET_STANDARD 0x10
219#define CX2341X_DEC_GET_VERSION 0x11 219#define CX2341X_DEC_GET_VERSION 0x11
220#define CX2341X_DEC_SET_STREAM_INPUT 0x14 220#define CX2341X_DEC_SET_STREAM_INPUT 0x14
221#define CX2341X_DEC_GET_TIMING_INFO 0x15 221#define CX2341X_DEC_GET_TIMING_INFO 0x15
222#define CX2341X_DEC_SET_AUDIO_MODE 0x16 222#define CX2341X_DEC_SET_AUDIO_MODE 0x16
223#define CX2341X_DEC_SET_EVENT_NOTIFICATION 0x17 223#define CX2341X_DEC_SET_EVENT_NOTIFICATION 0x17
224#define CX2341X_DEC_SET_DISPLAY_BUFFERS 0x18 224#define CX2341X_DEC_SET_DISPLAY_BUFFERS 0x18
225#define CX2341X_DEC_EXTRACT_VBI 0x19 225#define CX2341X_DEC_EXTRACT_VBI 0x19
226#define CX2341X_DEC_SET_DECODER_SOURCE 0x1a 226#define CX2341X_DEC_SET_DECODER_SOURCE 0x1a
227#define CX2341X_DEC_SET_PREBUFFERING 0x1e 227#define CX2341X_DEC_SET_PREBUFFERING 0x1e
228 228
229/* MPEG encoder API */ 229/* MPEG encoder API */
230#define CX2341X_ENC_PING_FW 0x80 230#define CX2341X_ENC_PING_FW 0x80
231#define CX2341X_ENC_START_CAPTURE 0x81 231#define CX2341X_ENC_START_CAPTURE 0x81
232#define CX2341X_ENC_STOP_CAPTURE 0x82 232#define CX2341X_ENC_STOP_CAPTURE 0x82
233#define CX2341X_ENC_SET_AUDIO_ID 0x89 233#define CX2341X_ENC_SET_AUDIO_ID 0x89
234#define CX2341X_ENC_SET_VIDEO_ID 0x8b 234#define CX2341X_ENC_SET_VIDEO_ID 0x8b
235#define CX2341X_ENC_SET_PCR_ID 0x8d 235#define CX2341X_ENC_SET_PCR_ID 0x8d
236#define CX2341X_ENC_SET_FRAME_RATE 0x8f 236#define CX2341X_ENC_SET_FRAME_RATE 0x8f
237#define CX2341X_ENC_SET_FRAME_SIZE 0x91 237#define CX2341X_ENC_SET_FRAME_SIZE 0x91
238#define CX2341X_ENC_SET_BIT_RATE 0x95 238#define CX2341X_ENC_SET_BIT_RATE 0x95
239#define CX2341X_ENC_SET_GOP_PROPERTIES 0x97 239#define CX2341X_ENC_SET_GOP_PROPERTIES 0x97
240#define CX2341X_ENC_SET_ASPECT_RATIO 0x99 240#define CX2341X_ENC_SET_ASPECT_RATIO 0x99
241#define CX2341X_ENC_SET_DNR_FILTER_MODE 0x9b 241#define CX2341X_ENC_SET_DNR_FILTER_MODE 0x9b
242#define CX2341X_ENC_SET_DNR_FILTER_PROPS 0x9d 242#define CX2341X_ENC_SET_DNR_FILTER_PROPS 0x9d
243#define CX2341X_ENC_SET_CORING_LEVELS 0x9f 243#define CX2341X_ENC_SET_CORING_LEVELS 0x9f
244#define CX2341X_ENC_SET_SPATIAL_FILTER_TYPE 0xa1 244#define CX2341X_ENC_SET_SPATIAL_FILTER_TYPE 0xa1
245#define CX2341X_ENC_SET_VBI_LINE 0xb7 245#define CX2341X_ENC_SET_VBI_LINE 0xb7
246#define CX2341X_ENC_SET_STREAM_TYPE 0xb9 246#define CX2341X_ENC_SET_STREAM_TYPE 0xb9
247#define CX2341X_ENC_SET_OUTPUT_PORT 0xbb 247#define CX2341X_ENC_SET_OUTPUT_PORT 0xbb
248#define CX2341X_ENC_SET_AUDIO_PROPERTIES 0xbd 248#define CX2341X_ENC_SET_AUDIO_PROPERTIES 0xbd
249#define CX2341X_ENC_HALT_FW 0xc3 249#define CX2341X_ENC_HALT_FW 0xc3
250#define CX2341X_ENC_GET_VERSION 0xc4 250#define CX2341X_ENC_GET_VERSION 0xc4
251#define CX2341X_ENC_SET_GOP_CLOSURE 0xc5 251#define CX2341X_ENC_SET_GOP_CLOSURE 0xc5
252#define CX2341X_ENC_GET_SEQ_END 0xc6 252#define CX2341X_ENC_GET_SEQ_END 0xc6
253#define CX2341X_ENC_SET_PGM_INDEX_INFO 0xc7 253#define CX2341X_ENC_SET_PGM_INDEX_INFO 0xc7
254#define CX2341X_ENC_SET_VBI_CONFIG 0xc8 254#define CX2341X_ENC_SET_VBI_CONFIG 0xc8
255#define CX2341X_ENC_SET_DMA_BLOCK_SIZE 0xc9 255#define CX2341X_ENC_SET_DMA_BLOCK_SIZE 0xc9
256#define CX2341X_ENC_GET_PREV_DMA_INFO_MB_10 0xca 256#define CX2341X_ENC_GET_PREV_DMA_INFO_MB_10 0xca
257#define CX2341X_ENC_GET_PREV_DMA_INFO_MB_9 0xcb 257#define CX2341X_ENC_GET_PREV_DMA_INFO_MB_9 0xcb
258#define CX2341X_ENC_SCHED_DMA_TO_HOST 0xcc 258#define CX2341X_ENC_SCHED_DMA_TO_HOST 0xcc
259#define CX2341X_ENC_INITIALIZE_INPUT 0xcd 259#define CX2341X_ENC_INITIALIZE_INPUT 0xcd
260#define CX2341X_ENC_SET_FRAME_DROP_RATE 0xd0 260#define CX2341X_ENC_SET_FRAME_DROP_RATE 0xd0
261#define CX2341X_ENC_PAUSE_ENCODER 0xd2 261#define CX2341X_ENC_PAUSE_ENCODER 0xd2
262#define CX2341X_ENC_REFRESH_INPUT 0xd3 262#define CX2341X_ENC_REFRESH_INPUT 0xd3
263#define CX2341X_ENC_SET_COPYRIGHT 0xd4 263#define CX2341X_ENC_SET_COPYRIGHT 0xd4
264#define CX2341X_ENC_SET_EVENT_NOTIFICATION 0xd5 264#define CX2341X_ENC_SET_EVENT_NOTIFICATION 0xd5
265#define CX2341X_ENC_SET_NUM_VSYNC_LINES 0xd6 265#define CX2341X_ENC_SET_NUM_VSYNC_LINES 0xd6
266#define CX2341X_ENC_SET_PLACEHOLDER 0xd7 266#define CX2341X_ENC_SET_PLACEHOLDER 0xd7
267#define CX2341X_ENC_MUTE_VIDEO 0xd9 267#define CX2341X_ENC_MUTE_VIDEO 0xd9
268#define CX2341X_ENC_MUTE_AUDIO 0xda 268#define CX2341X_ENC_MUTE_AUDIO 0xda
269#define CX2341X_ENC_SET_VERT_CROP_LINE 0xdb 269#define CX2341X_ENC_SET_VERT_CROP_LINE 0xdb
270#define CX2341X_ENC_MISC 0xdc 270#define CX2341X_ENC_MISC 0xdc
271 271
272/* OSD API, specific to the cx23415 */ 272/* OSD API, specific to the cx23415 */
273#define CX2341X_OSD_GET_FRAMEBUFFER 0x41 273#define CX2341X_OSD_GET_FRAMEBUFFER 0x41
274#define CX2341X_OSD_GET_PIXEL_FORMAT 0x42 274#define CX2341X_OSD_GET_PIXEL_FORMAT 0x42
275#define CX2341X_OSD_SET_PIXEL_FORMAT 0x43 275#define CX2341X_OSD_SET_PIXEL_FORMAT 0x43
276#define CX2341X_OSD_GET_STATE 0x44 276#define CX2341X_OSD_GET_STATE 0x44
277#define CX2341X_OSD_SET_STATE 0x45 277#define CX2341X_OSD_SET_STATE 0x45
278#define CX2341X_OSD_GET_OSD_COORDS 0x46 278#define CX2341X_OSD_GET_OSD_COORDS 0x46
279#define CX2341X_OSD_SET_OSD_COORDS 0x47 279#define CX2341X_OSD_SET_OSD_COORDS 0x47
280#define CX2341X_OSD_GET_SCREEN_COORDS 0x48 280#define CX2341X_OSD_GET_SCREEN_COORDS 0x48
281#define CX2341X_OSD_SET_SCREEN_COORDS 0x49 281#define CX2341X_OSD_SET_SCREEN_COORDS 0x49
282#define CX2341X_OSD_GET_GLOBAL_ALPHA 0x4a 282#define CX2341X_OSD_GET_GLOBAL_ALPHA 0x4a
283#define CX2341X_OSD_SET_GLOBAL_ALPHA 0x4b 283#define CX2341X_OSD_SET_GLOBAL_ALPHA 0x4b
284#define CX2341X_OSD_SET_BLEND_COORDS 0x4c 284#define CX2341X_OSD_SET_BLEND_COORDS 0x4c
285#define CX2341X_OSD_GET_FLICKER_STATE 0x4f 285#define CX2341X_OSD_GET_FLICKER_STATE 0x4f
286#define CX2341X_OSD_SET_FLICKER_STATE 0x50 286#define CX2341X_OSD_SET_FLICKER_STATE 0x50
287#define CX2341X_OSD_BLT_COPY 0x52 287#define CX2341X_OSD_BLT_COPY 0x52
288#define CX2341X_OSD_BLT_FILL 0x53 288#define CX2341X_OSD_BLT_FILL 0x53
289#define CX2341X_OSD_BLT_TEXT 0x54 289#define CX2341X_OSD_BLT_TEXT 0x54
290#define CX2341X_OSD_SET_FRAMEBUFFER_WINDOW 0x56 290#define CX2341X_OSD_SET_FRAMEBUFFER_WINDOW 0x56
291#define CX2341X_OSD_SET_CHROMA_KEY 0x60 291#define CX2341X_OSD_SET_CHROMA_KEY 0x60
292#define CX2341X_OSD_GET_ALPHA_CONTENT_INDEX 0x61 292#define CX2341X_OSD_GET_ALPHA_CONTENT_INDEX 0x61
293#define CX2341X_OSD_SET_ALPHA_CONTENT_INDEX 0x62 293#define CX2341X_OSD_SET_ALPHA_CONTENT_INDEX 0x62
294 294
295#endif /* CX2341X_H */ 295#endif /* CX2341X_H */
diff --git a/include/media/drv-intf/exynos-fimc.h b/include/media/drv-intf/exynos-fimc.h
index 69bcd2a07d5c..f9c64338841f 100644
--- a/include/media/drv-intf/exynos-fimc.h
+++ b/include/media/drv-intf/exynos-fimc.h
@@ -155,7 +155,8 @@ static inline struct exynos_video_entity *vdev_to_exynos_video_entity(
155} 155}
156 156
157#define fimc_pipeline_call(ent, op, args...) \ 157#define fimc_pipeline_call(ent, op, args...) \
158 (!(ent) ? -ENOENT : (((ent)->pipe->ops && (ent)->pipe->ops->op) ? \ 158 ((!(ent) || !(ent)->pipe) ? -ENOENT : \
159 (((ent)->pipe->ops && (ent)->pipe->ops->op) ? \
159 (ent)->pipe->ops->op(((ent)->pipe), ##args) : -ENOIOCTLCMD)) \ 160 (ent)->pipe->ops->op(((ent)->pipe), ##args) : -ENOIOCTLCMD)) \
160 161
161#endif /* S5P_FIMC_H_ */ 162#endif /* S5P_FIMC_H_ */
diff --git a/include/media/drv-intf/msp3400.h b/include/media/drv-intf/msp3400.h
index 1e6e80213a77..db98ce49e17b 100644
--- a/include/media/drv-intf/msp3400.h
+++ b/include/media/drv-intf/msp3400.h
@@ -80,17 +80,17 @@
80 */ 80 */
81 81
82/* SCART input to DSP selection */ 82/* SCART input to DSP selection */
83#define MSP_IN_SCART1 0 /* Pin SC1_IN */ 83#define MSP_IN_SCART1 0 /* Pin SC1_IN */
84#define MSP_IN_SCART2 1 /* Pin SC2_IN */ 84#define MSP_IN_SCART2 1 /* Pin SC2_IN */
85#define MSP_IN_SCART3 2 /* Pin SC3_IN */ 85#define MSP_IN_SCART3 2 /* Pin SC3_IN */
86#define MSP_IN_SCART4 3 /* Pin SC4_IN */ 86#define MSP_IN_SCART4 3 /* Pin SC4_IN */
87#define MSP_IN_MONO 6 /* Pin MONO_IN */ 87#define MSP_IN_MONO 6 /* Pin MONO_IN */
88#define MSP_IN_MUTE 7 /* Mute DSP input */ 88#define MSP_IN_MUTE 7 /* Mute DSP input */
89#define MSP_SCART_TO_DSP(in) (in) 89#define MSP_SCART_TO_DSP(in) (in)
90/* Tuner input to demodulator and DSP selection */ 90/* Tuner input to demodulator and DSP selection */
91#define MSP_IN_TUNER1 0 /* Analog Sound IF input pin ANA_IN1 */ 91#define MSP_IN_TUNER1 0 /* Analog Sound IF input pin ANA_IN1 */
92#define MSP_IN_TUNER2 1 /* Analog Sound IF input pin ANA_IN2 */ 92#define MSP_IN_TUNER2 1 /* Analog Sound IF input pin ANA_IN2 */
93#define MSP_TUNER_TO_DSP(in) ((in) << 3) 93#define MSP_TUNER_TO_DSP(in) ((in) << 3)
94 94
95/* The msp has up to 5 DSP outputs, each output can independently select 95/* The msp has up to 5 DSP outputs, each output can independently select
96 a DSP input. 96 a DSP input.
@@ -109,30 +109,30 @@
109 DSP. This is currently not implemented. Also not implemented is the 109 DSP. This is currently not implemented. Also not implemented is the
110 multi-channel capable I2S3 input of the 44x0G. If someone can demonstrate 110 multi-channel capable I2S3 input of the 44x0G. If someone can demonstrate
111 a need for one of those features then additional support can be added. */ 111 a need for one of those features then additional support can be added. */
112#define MSP_DSP_IN_TUNER 0 /* Tuner DSP input */ 112#define MSP_DSP_IN_TUNER 0 /* Tuner DSP input */
113#define MSP_DSP_IN_SCART 2 /* SCART DSP input */ 113#define MSP_DSP_IN_SCART 2 /* SCART DSP input */
114#define MSP_DSP_IN_I2S1 5 /* I2S1 DSP input */ 114#define MSP_DSP_IN_I2S1 5 /* I2S1 DSP input */
115#define MSP_DSP_IN_I2S2 6 /* I2S2 DSP input */ 115#define MSP_DSP_IN_I2S2 6 /* I2S2 DSP input */
116#define MSP_DSP_IN_I2S3 7 /* I2S3 DSP input */ 116#define MSP_DSP_IN_I2S3 7 /* I2S3 DSP input */
117#define MSP_DSP_IN_MAIN_AVC 11 /* MAIN AVC processed DSP input */ 117#define MSP_DSP_IN_MAIN_AVC 11 /* MAIN AVC processed DSP input */
118#define MSP_DSP_IN_MAIN 12 /* MAIN DSP input */ 118#define MSP_DSP_IN_MAIN 12 /* MAIN DSP input */
119#define MSP_DSP_IN_AUX 13 /* AUX DSP input */ 119#define MSP_DSP_IN_AUX 13 /* AUX DSP input */
120#define MSP_DSP_TO_MAIN(in) ((in) << 4) 120#define MSP_DSP_TO_MAIN(in) ((in) << 4)
121#define MSP_DSP_TO_AUX(in) ((in) << 8) 121#define MSP_DSP_TO_AUX(in) ((in) << 8)
122#define MSP_DSP_TO_SCART1(in) ((in) << 12) 122#define MSP_DSP_TO_SCART1(in) ((in) << 12)
123#define MSP_DSP_TO_SCART2(in) ((in) << 16) 123#define MSP_DSP_TO_SCART2(in) ((in) << 16)
124#define MSP_DSP_TO_I2S(in) ((in) << 20) 124#define MSP_DSP_TO_I2S(in) ((in) << 20)
125 125
126/* Output SCART select: the SCART outputs can select which input 126/* Output SCART select: the SCART outputs can select which input
127 to use. */ 127 to use. */
128#define MSP_SC_IN_SCART1 0 /* SCART1 input, bypassing the DSP */ 128#define MSP_SC_IN_SCART1 0 /* SCART1 input, bypassing the DSP */
129#define MSP_SC_IN_SCART2 1 /* SCART2 input, bypassing the DSP */ 129#define MSP_SC_IN_SCART2 1 /* SCART2 input, bypassing the DSP */
130#define MSP_SC_IN_SCART3 2 /* SCART3 input, bypassing the DSP */ 130#define MSP_SC_IN_SCART3 2 /* SCART3 input, bypassing the DSP */
131#define MSP_SC_IN_SCART4 3 /* SCART4 input, bypassing the DSP */ 131#define MSP_SC_IN_SCART4 3 /* SCART4 input, bypassing the DSP */
132#define MSP_SC_IN_DSP_SCART1 4 /* DSP SCART1 input */ 132#define MSP_SC_IN_DSP_SCART1 4 /* DSP SCART1 input */
133#define MSP_SC_IN_DSP_SCART2 5 /* DSP SCART2 input */ 133#define MSP_SC_IN_DSP_SCART2 5 /* DSP SCART2 input */
134#define MSP_SC_IN_MONO 6 /* MONO input, bypassing the DSP */ 134#define MSP_SC_IN_MONO 6 /* MONO input, bypassing the DSP */
135#define MSP_SC_IN_MUTE 7 /* MUTE output */ 135#define MSP_SC_IN_MUTE 7 /* MUTE output */
136#define MSP_SC_TO_SCART1(in) (in) 136#define MSP_SC_TO_SCART1(in) (in)
137#define MSP_SC_TO_SCART2(in) ((in) << 4) 137#define MSP_SC_TO_SCART2(in) ((in) << 4)
138 138
diff --git a/include/media/drv-intf/saa7146.h b/include/media/drv-intf/saa7146.h
index 769c6cf7eb4c..a7bf2c4a2e4d 100644
--- a/include/media/drv-intf/saa7146.h
+++ b/include/media/drv-intf/saa7146.h
@@ -118,7 +118,7 @@ struct saa7146_dev
118{ 118{
119 struct module *module; 119 struct module *module;
120 120
121 struct v4l2_device v4l2_dev; 121 struct v4l2_device v4l2_dev;
122 struct v4l2_ctrl_handler ctrl_handler; 122 struct v4l2_ctrl_handler ctrl_handler;
123 123
124 /* different device locks */ 124 /* different device locks */
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
new file mode 100644
index 000000000000..28e2be5c8a98
--- /dev/null
+++ b/include/media/dvb-usb-ids.h
@@ -0,0 +1,424 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* dvb-usb-ids.h is part of the DVB USB library.
3 *
4 * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see
5 * dvb-usb-init.c for copyright information.
6 *
7 * a header file containing define's for the USB device supported by the
8 * various drivers.
9 */
10#ifndef _DVB_USB_IDS_H_
11#define _DVB_USB_IDS_H_
12
13/* Vendor IDs */
14#define USB_VID_ADSTECH 0x06e1
15#define USB_VID_AFATECH 0x15a4
16#define USB_VID_ALCOR_MICRO 0x058f
17#define USB_VID_ALINK 0x05e3
18#define USB_VID_AMT 0x1c73
19#define USB_VID_ANCHOR 0x0547
20#define USB_VID_ANSONIC 0x10b9
21#define USB_VID_ANUBIS_ELECTRONIC 0x10fd
22#define USB_VID_ASUS 0x0b05
23#define USB_VID_AVERMEDIA 0x07ca
24#define USB_VID_COMPRO 0x185b
25#define USB_VID_COMPRO_UNK 0x145f
26#define USB_VID_CONEXANT 0x0572
27#define USB_VID_CYPRESS 0x04b4
28#define USB_VID_DEXATEK 0x1d19
29#define USB_VID_DIBCOM 0x10b8
30#define USB_VID_DPOSH 0x1498
31#define USB_VID_DVICO 0x0fe9
32#define USB_VID_E3C 0x18b4
33#define USB_VID_ELGATO 0x0fd9
34#define USB_VID_EMPIA 0xeb1a
35#define USB_VID_GENPIX 0x09c0
36#define USB_VID_GRANDTEC 0x5032
37#define USB_VID_GTEK 0x1f4d
38#define USB_VID_HANFTEK 0x15f4
39#define USB_VID_HAUPPAUGE 0x2040
40#define USB_VID_HYPER_PALTEK 0x1025
41#define USB_VID_INTEL 0x8086
42#define USB_VID_ITETECH 0x048d
43#define USB_VID_KWORLD 0xeb2a
44#define USB_VID_KWORLD_2 0x1b80
45#define USB_VID_KYE 0x0458
46#define USB_VID_LEADTEK 0x0413
47#define USB_VID_LITEON 0x04ca
48#define USB_VID_MEDION 0x1660
49#define USB_VID_MIGLIA 0x18f3
50#define USB_VID_MSI 0x0db0
51#define USB_VID_MSI_2 0x1462
52#define USB_VID_OPERA1 0x695c
53#define USB_VID_PINNACLE 0x2304
54#define USB_VID_PCTV 0x2013
55#define USB_VID_PIXELVIEW 0x1554
56#define USB_VID_REALTEK 0x0bda
57#define USB_VID_TECHNOTREND 0x0b48
58#define USB_VID_TERRATEC 0x0ccd
59#define USB_VID_TELESTAR 0x10b9
60#define USB_VID_VISIONPLUS 0x13d3
61#define USB_VID_SONY 0x1415
62#define USB_PID_TEVII_S421 0xd421
63#define USB_PID_TEVII_S480_1 0xd481
64#define USB_PID_TEVII_S480_2 0xd482
65#define USB_PID_TEVII_S630 0xd630
66#define USB_PID_TEVII_S632 0xd632
67#define USB_PID_TEVII_S650 0xd650
68#define USB_PID_TEVII_S660 0xd660
69#define USB_PID_TEVII_S662 0xd662
70#define USB_VID_TWINHAN 0x1822
71#define USB_VID_ULTIMA_ELECTRONIC 0x05d8
72#define USB_VID_UNIWILL 0x1584
73#define USB_VID_WIDEVIEW 0x14aa
74#define USB_VID_GIGABYTE 0x1044
75#define USB_VID_YUAN 0x1164
76#define USB_VID_XTENSIONS 0x1ae7
77#define USB_VID_ZYDAS 0x0ace
78#define USB_VID_HUMAX_COEX 0x10b9
79#define USB_VID_774 0x7a69
80#define USB_VID_EVOLUTEPC 0x1e59
81#define USB_VID_AZUREWAVE 0x13d3
82#define USB_VID_TECHNISAT 0x14f7
83#define USB_VID_HAMA 0x147f
84#define USB_VID_MICROSOFT 0x045e
85
86/* Product IDs */
87#define USB_PID_ADSTECH_USB2_COLD 0xa333
88#define USB_PID_ADSTECH_USB2_WARM 0xa334
89#define USB_PID_AFATECH_AF9005 0x9020
90#define USB_PID_AFATECH_AF9015_9015 0x9015
91#define USB_PID_AFATECH_AF9015_9016 0x9016
92#define USB_PID_AFATECH_AF9035_1000 0x1000
93#define USB_PID_AFATECH_AF9035_1001 0x1001
94#define USB_PID_AFATECH_AF9035_1002 0x1002
95#define USB_PID_AFATECH_AF9035_1003 0x1003
96#define USB_PID_AFATECH_AF9035_9035 0x9035
97#define USB_PID_TREKSTOR_DVBT 0x901b
98#define USB_PID_TREKSTOR_TERRES_2_0 0xC803
99#define USB_VID_ALINK_DTU 0xf170
100#define USB_PID_ANSONIC_DVBT_USB 0x6000
101#define USB_PID_ANYSEE 0x861f
102#define USB_PID_AZUREWAVE_AD_TU700 0x3237
103#define USB_PID_AZUREWAVE_6007 0x0ccd
104#define USB_PID_AVERMEDIA_DVBT_USB_COLD 0x0001
105#define USB_PID_AVERMEDIA_DVBT_USB_WARM 0x0002
106#define USB_PID_AVERMEDIA_DVBT_USB2_COLD 0xa800
107#define USB_PID_AVERMEDIA_DVBT_USB2_WARM 0xa801
108#define USB_PID_COMPRO_DVBU2000_COLD 0xd000
109#define USB_PID_COMPRO_DVBU2000_WARM 0xd001
110#define USB_PID_COMPRO_DVBU2000_UNK_COLD 0x010c
111#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d
112#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78
113#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80
114#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397
115#define USB_PID_CONEXANT_D680_DMB 0x86d6
116#define USB_PID_CREATIX_CTX1921 0x1921
117#define USB_PID_DELOCK_USB2_DVBT 0xb803
118#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064
119#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065
120#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8
121#define USB_PID_DIBCOM_MOD3000_WARM 0x0bb9
122#define USB_PID_DIBCOM_MOD3001_COLD 0x0bc6
123#define USB_PID_DIBCOM_MOD3001_WARM 0x0bc7
124#define USB_PID_DIBCOM_STK7700P 0x1e14
125#define USB_PID_DIBCOM_STK7700P_PC 0x1e78
126#define USB_PID_DIBCOM_STK7700D 0x1ef0
127#define USB_PID_DIBCOM_STK7700_U7000 0x7001
128#define USB_PID_DIBCOM_STK7070P 0x1ebc
129#define USB_PID_DIBCOM_STK7070PD 0x1ebe
130#define USB_PID_DIBCOM_STK807XP 0x1f90
131#define USB_PID_DIBCOM_STK807XPVR 0x1f98
132#define USB_PID_DIBCOM_STK8096GP 0x1fa0
133#define USB_PID_DIBCOM_STK8096PVR 0x1faa
134#define USB_PID_DIBCOM_NIM8096MD 0x1fa8
135#define USB_PID_DIBCOM_TFE8096P 0x1f9C
136#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
137#define USB_PID_DIBCOM_STK7770P 0x1e80
138#define USB_PID_DIBCOM_NIM7090 0x1bb2
139#define USB_PID_DIBCOM_TFE7090PVR 0x1bb4
140#define USB_PID_DIBCOM_TFE7790P 0x1e6e
141#define USB_PID_DIBCOM_NIM9090M 0x2383
142#define USB_PID_DIBCOM_NIM9090MD 0x2384
143#define USB_PID_DPOSH_M9206_COLD 0x9206
144#define USB_PID_DPOSH_M9206_WARM 0xa090
145#define USB_PID_E3C_EC168 0x1689
146#define USB_PID_E3C_EC168_2 0xfffa
147#define USB_PID_E3C_EC168_3 0xfffb
148#define USB_PID_E3C_EC168_4 0x1001
149#define USB_PID_E3C_EC168_5 0x1002
150#define USB_PID_FREECOM_DVBT 0x0160
151#define USB_PID_FREECOM_DVBT_2 0x0161
152#define USB_PID_UNIWILL_STK7700P 0x6003
153#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012
154#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0
155#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
156#define USB_PID_GOTVIEW_SAT_HD 0x5456
157#define USB_PID_INTEL_CE9500 0x9500
158#define USB_PID_ITETECH_IT9135 0x9135
159#define USB_PID_ITETECH_IT9135_9005 0x9005
160#define USB_PID_ITETECH_IT9135_9006 0x9006
161#define USB_PID_ITETECH_IT9303 0x9306
162#define USB_PID_KWORLD_399U 0xe399
163#define USB_PID_KWORLD_399U_2 0xe400
164#define USB_PID_KWORLD_395U 0xe396
165#define USB_PID_KWORLD_395U_2 0xe39b
166#define USB_PID_KWORLD_395U_3 0xe395
167#define USB_PID_KWORLD_395U_4 0xe39a
168#define USB_PID_KWORLD_MC810 0xc810
169#define USB_PID_KWORLD_PC160_2T 0xc160
170#define USB_PID_KWORLD_PC160_T 0xc161
171#define USB_PID_KWORLD_UB383_T 0xe383
172#define USB_PID_KWORLD_UB499_2T_T09 0xe409
173#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
174#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
175#define USB_PID_PROF_1100 0xb012
176#define USB_PID_TERRATEC_CINERGY_S 0x0064
177#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
178#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
179#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093
180#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
181#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
182#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9
183#define USB_PID_TWINHAN_VP7041_COLD 0x3201
184#define USB_PID_TWINHAN_VP7041_WARM 0x3202
185#define USB_PID_TWINHAN_VP7020_COLD 0x3203
186#define USB_PID_TWINHAN_VP7020_WARM 0x3204
187#define USB_PID_TWINHAN_VP7045_COLD 0x3205
188#define USB_PID_TWINHAN_VP7045_WARM 0x3206
189#define USB_PID_TWINHAN_VP7021_COLD 0x3207
190#define USB_PID_TWINHAN_VP7021_WARM 0x3208
191#define USB_PID_TWINHAN_VP7049 0x3219
192#define USB_PID_TINYTWIN 0x3226
193#define USB_PID_TINYTWIN_2 0xe402
194#define USB_PID_TINYTWIN_3 0x9016
195#define USB_PID_DNTV_TINYUSB2_COLD 0x3223
196#define USB_PID_DNTV_TINYUSB2_WARM 0x3224
197#define USB_PID_ULTIMA_TVBOX_COLD 0x8105
198#define USB_PID_ULTIMA_TVBOX_WARM 0x8106
199#define USB_PID_ULTIMA_TVBOX_AN2235_COLD 0x8107
200#define USB_PID_ULTIMA_TVBOX_AN2235_WARM 0x8108
201#define USB_PID_ULTIMA_TVBOX_ANCHOR_COLD 0x2235
202#define USB_PID_ULTIMA_TVBOX_USB2_COLD 0x8109
203#define USB_PID_ULTIMA_TVBOX_USB2_WARM 0x810a
204#define USB_PID_ARTEC_T14_COLD 0x810b
205#define USB_PID_ARTEC_T14_WARM 0x810c
206#define USB_PID_ARTEC_T14BR 0x810f
207#define USB_PID_ULTIMA_TVBOX_USB2_FX_COLD 0x8613
208#define USB_PID_ULTIMA_TVBOX_USB2_FX_WARM 0x1002
209#define USB_PID_UNK_HYPER_PALTEK_COLD 0x005e
210#define USB_PID_UNK_HYPER_PALTEK_WARM 0x005f
211#define USB_PID_HANFTEK_UMT_010_COLD 0x0001
212#define USB_PID_HANFTEK_UMT_010_WARM 0x0015
213#define USB_PID_DTT200U_COLD 0x0201
214#define USB_PID_DTT200U_WARM 0x0301
215#define USB_PID_WT220U_ZAP250_COLD 0x0220
216#define USB_PID_WT220U_COLD 0x0222
217#define USB_PID_WT220U_WARM 0x0221
218#define USB_PID_WT220U_FC_COLD 0x0225
219#define USB_PID_WT220U_FC_WARM 0x0226
220#define USB_PID_WT220U_ZL0353_COLD 0x022a
221#define USB_PID_WT220U_ZL0353_WARM 0x022b
222#define USB_PID_WINTV_NOVA_T_USB2_COLD 0x9300
223#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301
224#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941
225#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950
226#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400
227#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050
228#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060
229#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070
230#define USB_PID_HAUPPAUGE_MYTV_T 0x7080
231#define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580
232#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200
233#define USB_PID_HAUPPAUGE_TIGER_ATSC 0xb200
234#define USB_PID_HAUPPAUGE_TIGER_ATSC_B210 0xb210
235#define USB_PID_AVERMEDIA_EXPRESS 0xb568
236#define USB_PID_AVERMEDIA_VOLAR 0xa807
237#define USB_PID_AVERMEDIA_VOLAR_2 0xb808
238#define USB_PID_AVERMEDIA_VOLAR_A868R 0xa868
239#define USB_PID_AVERMEDIA_MCE_USB_M038 0x1228
240#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R 0x0039
241#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_ATSC 0x1039
242#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_DVBT 0x2039
243#define USB_PID_AVERMEDIA_VOLAR_X 0xa815
244#define USB_PID_AVERMEDIA_VOLAR_X_2 0x8150
245#define USB_PID_AVERMEDIA_A309 0xa309
246#define USB_PID_AVERMEDIA_A310 0xa310
247#define USB_PID_AVERMEDIA_A850 0x850a
248#define USB_PID_AVERMEDIA_A850T 0x850b
249#define USB_PID_AVERMEDIA_A805 0xa805
250#define USB_PID_AVERMEDIA_A815M 0x815a
251#define USB_PID_AVERMEDIA_A835 0xa835
252#define USB_PID_AVERMEDIA_B835 0xb835
253#define USB_PID_AVERMEDIA_A835B_1835 0x1835
254#define USB_PID_AVERMEDIA_A835B_2835 0x2835
255#define USB_PID_AVERMEDIA_A835B_3835 0x3835
256#define USB_PID_AVERMEDIA_A835B_4835 0x4835
257#define USB_PID_AVERMEDIA_1867 0x1867
258#define USB_PID_AVERMEDIA_A867 0xa867
259#define USB_PID_AVERMEDIA_H335 0x0335
260#define USB_PID_AVERMEDIA_TD110 0xa110
261#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
262#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
263#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
264#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
265#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011
266#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
267#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015
268#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
269#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI 0x3017
270#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
271#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
272#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058
273#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060
274#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062
275#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078
276#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab
277#define USB_PID_TERRATEC_CINERGY_S2_R1 0x00a8
278#define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0
279#define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102
280#define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105
281#define USB_PID_TERRATEC_H7 0x10b4
282#define USB_PID_TERRATEC_H7_2 0x10a3
283#define USB_PID_TERRATEC_H7_3 0x10a5
284#define USB_PID_TERRATEC_T1 0x10ae
285#define USB_PID_TERRATEC_T3 0x10a0
286#define USB_PID_TERRATEC_T5 0x10a1
287#define USB_PID_NOXON_DAB_STICK 0x00b3
288#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0
289#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4
290#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
291#define USB_PID_PINNACLE_PCTV2000E 0x022c
292#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
293#define USB_PID_PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T 0x0229
294#define USB_PID_PINNACLE_PCTV71E 0x022b
295#define USB_PID_PINNACLE_PCTV72E 0x0236
296#define USB_PID_PINNACLE_PCTV73E 0x0237
297#define USB_PID_PINNACLE_PCTV310E 0x3211
298#define USB_PID_PINNACLE_PCTV801E 0x023a
299#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
300#define USB_PID_PINNACLE_PCTV340E 0x023d
301#define USB_PID_PINNACLE_PCTV340E_SE 0x023e
302#define USB_PID_PINNACLE_PCTV73A 0x0243
303#define USB_PID_PINNACLE_PCTV73ESE 0x0245
304#define USB_PID_PINNACLE_PCTV74E 0x0246
305#define USB_PID_PINNACLE_PCTV282E 0x0248
306#define USB_PID_PIXELVIEW_SBTVD 0x5010
307#define USB_PID_PCTV_200E 0x020e
308#define USB_PID_PCTV_400E 0x020f
309#define USB_PID_PCTV_450E 0x0222
310#define USB_PID_PCTV_452E 0x021f
311#define USB_PID_PCTV_78E 0x025a
312#define USB_PID_PCTV_79E 0x0262
313#define USB_PID_REALTEK_RTL2831U 0x2831
314#define USB_PID_REALTEK_RTL2832U 0x2832
315#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007
316#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a
317#define USB_PID_NEBULA_DIGITV 0x0201
318#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820
319#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500
320#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501
321#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00
322#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01
323#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10
324#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11
325#define USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD 0xdb50
326#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51
327#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58
328#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59
329#define USB_PID_DVICO_BLUEBIRD_DUAL_4 0xdb78
330#define USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2 0xdb98
331#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2 0xdb70
332#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM 0xdb71
333#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54
334#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55
335#define USB_PID_MEDION_MD95700 0x0932
336#define USB_PID_MSI_MEGASKY580 0x5580
337#define USB_PID_MSI_MEGASKY580_55801 0x5581
338#define USB_PID_KYE_DVB_T_COLD 0x701e
339#define USB_PID_KYE_DVB_T_WARM 0x701f
340#define USB_PID_LITEON_DVB_T_COLD 0xf000
341#define USB_PID_LITEON_DVB_T_WARM 0xf001
342#define USB_PID_DIGIVOX_MINI_SL_COLD 0xe360
343#define USB_PID_DIGIVOX_MINI_SL_WARM 0xe361
344#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6
345#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7
346#define USB_PID_WINFAST_DTV2000DS 0x6a04
347#define USB_PID_WINFAST_DTV2000DS_PLUS 0x6f12
348#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025
349#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026
350#define USB_PID_WINFAST_DTV_DONGLE_STK7700P 0x6f00
351#define USB_PID_WINFAST_DTV_DONGLE_H 0x60f6
352#define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01
353#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029
354#define USB_PID_WINFAST_DTV_DONGLE_MINID 0x6f0f
355#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200
356#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201
357#define USB_PID_GENPIX_8PSK_REV_2 0x0202
358#define USB_PID_GENPIX_SKYWALKER_1 0x0203
359#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204
360#define USB_PID_GENPIX_SKYWALKER_2 0x0206
361#define USB_PID_SIGMATEK_DVB_110 0x6610
362#define USB_PID_MSI_DIGI_VOX_MINI_II 0x1513
363#define USB_PID_MSI_DIGIVOX_DUO 0x8801
364#define USB_PID_OPERA1_COLD 0x2830
365#define USB_PID_OPERA1_WARM 0x3829
366#define USB_PID_LIFEVIEW_TV_WALKER_TWIN_COLD 0x0514
367#define USB_PID_LIFEVIEW_TV_WALKER_TWIN_WARM 0x0513
368#define USB_PID_GIGABYTE_U7000 0x7001
369#define USB_PID_GIGABYTE_U8000 0x7002
370#define USB_PID_ASUS_U3000 0x171f
371#define USB_PID_ASUS_U3000H 0x1736
372#define USB_PID_ASUS_U3100 0x173f
373#define USB_PID_ASUS_U3100MINI_PLUS 0x1779
374#define USB_PID_YUAN_EC372S 0x1edc
375#define USB_PID_YUAN_STK7700PH 0x1f08
376#define USB_PID_YUAN_PD378S 0x2edc
377#define USB_PID_YUAN_MC770 0x0871
378#define USB_PID_YUAN_STK7700D 0x1efc
379#define USB_PID_YUAN_STK7700D_2 0x1e8c
380#define USB_PID_DW2102 0x2102
381#define USB_PID_DW2104 0x2104
382#define USB_PID_DW3101 0x3101
383#define USB_PID_XTENSIONS_XD_380 0x0381
384#define USB_PID_TELESTAR_STARSTICK_2 0x8000
385#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
386#define USB_PID_SONY_PLAYTV 0x0003
387#define USB_PID_MYGICA_D689 0xd811
388#define USB_PID_MYGICA_T230 0xc688
389#define USB_PID_MYGICA_T230C 0xc689
390#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
391#define USB_PID_ELGATO_EYETV_DTT 0x0021
392#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
393#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
394#define USB_PID_ELGATO_EYETV_SAT 0x002a
395#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025
396#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036
397#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
398#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_WARM 0x5001
399#define USB_PID_FRIIO_WHITE 0x0001
400#define USB_PID_TVWAY_PLUS 0x0002
401#define USB_PID_SVEON_STV20 0xe39d
402#define USB_PID_SVEON_STV20_RTL2832U 0xd39d
403#define USB_PID_SVEON_STV21 0xd3b0
404#define USB_PID_SVEON_STV22 0xe401
405#define USB_PID_SVEON_STV22_IT9137 0xe411
406#define USB_PID_AZUREWAVE_AZ6027 0x3275
407#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
408#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
409#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001
410#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002
411#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003
412#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004
413#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500
414#define USB_PID_CPYTO_REDI_PC50A 0xa803
415#define USB_PID_CTVDIGDUAL_V2 0xe410
416#define USB_PID_PCTV_2002E 0x025c
417#define USB_PID_PCTV_2002E_SE 0x025d
418#define USB_PID_SVEON_STV27 0xd3af
419#define USB_PID_TURBOX_DTT_2000 0xd3a4
420#define USB_PID_WINTV_SOLOHD 0x0264
421#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
422#define USB_PID_HAMA_DVBT_HYBRID 0x2758
423#define USB_PID_XBOX_ONE_TUNER 0x02d5
424#endif
diff --git a/include/media/dvb_ca_en50221.h b/include/media/dvb_ca_en50221.h
new file mode 100644
index 000000000000..a1c014b0a837
--- /dev/null
+++ b/include/media/dvb_ca_en50221.h
@@ -0,0 +1,142 @@
1/*
2 * dvb_ca.h: generic DVB functions for EN50221 CA interfaces
3 *
4 * Copyright (C) 2004 Andrew de Quincey
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public License
8 * as published by the Free Software Foundation; either version 2.1
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef _DVB_CA_EN50221_H_
18#define _DVB_CA_EN50221_H_
19
20#include <linux/list.h>
21#include <linux/dvb/ca.h>
22
23#include <media/dvbdev.h>
24
25#define DVB_CA_EN50221_POLL_CAM_PRESENT 1
26#define DVB_CA_EN50221_POLL_CAM_CHANGED 2
27#define DVB_CA_EN50221_POLL_CAM_READY 4
28
29#define DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE 1
30#define DVB_CA_EN50221_FLAG_IRQ_FR 2
31#define DVB_CA_EN50221_FLAG_IRQ_DA 4
32
33#define DVB_CA_EN50221_CAMCHANGE_REMOVED 0
34#define DVB_CA_EN50221_CAMCHANGE_INSERTED 1
35
36/**
37 * struct dvb_ca_en50221- Structure describing a CA interface
38 *
39 * @owner: the module owning this structure
40 * @read_attribute_mem: function for reading attribute memory on the CAM
41 * @write_attribute_mem: function for writing attribute memory on the CAM
42 * @read_cam_control: function for reading the control interface on the CAM
43 * @write_cam_control: function for reading the control interface on the CAM
44 * @read_data: function for reading data (block mode)
45 * @write_data: function for writing data (block mode)
46 * @slot_reset: function to reset the CAM slot
47 * @slot_shutdown: function to shutdown a CAM slot
48 * @slot_ts_enable: function to enable the Transport Stream on a CAM slot
49 * @poll_slot_status: function to poll slot status. Only necessary if
50 * DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set.
51 * @data: private data, used by caller.
52 * @private: Opaque data used by the dvb_ca core. Do not modify!
53 *
54 * NOTE: the read_*, write_* and poll_slot_status functions will be
55 * called for different slots concurrently and need to use locks where
56 * and if appropriate. There will be no concurrent access to one slot.
57 */
58struct dvb_ca_en50221 {
59 struct module *owner;
60
61 int (*read_attribute_mem)(struct dvb_ca_en50221 *ca,
62 int slot, int address);
63 int (*write_attribute_mem)(struct dvb_ca_en50221 *ca,
64 int slot, int address, u8 value);
65
66 int (*read_cam_control)(struct dvb_ca_en50221 *ca,
67 int slot, u8 address);
68 int (*write_cam_control)(struct dvb_ca_en50221 *ca,
69 int slot, u8 address, u8 value);
70
71 int (*read_data)(struct dvb_ca_en50221 *ca,
72 int slot, u8 *ebuf, int ecount);
73 int (*write_data)(struct dvb_ca_en50221 *ca,
74 int slot, u8 *ebuf, int ecount);
75
76 int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot);
77 int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot);
78 int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot);
79
80 int (*poll_slot_status)(struct dvb_ca_en50221 *ca, int slot, int open);
81
82 void *data;
83
84 void *private;
85};
86
87/*
88 * Functions for reporting IRQ events
89 */
90
91/**
92 * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
93 *
94 * @pubca: CA instance.
95 * @slot: Slot concerned.
96 * @change_type: One of the DVB_CA_CAMCHANGE_* values
97 */
98void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot,
99 int change_type);
100
101/**
102 * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
103 *
104 * @pubca: CA instance.
105 * @slot: Slot concerned.
106 */
107void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot);
108
109/**
110 * dvb_ca_en50221_frda_irq - An FR or a DA IRQ has occurred.
111 *
112 * @ca: CA instance.
113 * @slot: Slot concerned.
114 */
115void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *ca, int slot);
116
117/*
118 * Initialisation/shutdown functions
119 */
120
121/**
122 * dvb_ca_en50221_init - Initialise a new DVB CA device.
123 *
124 * @dvb_adapter: DVB adapter to attach the new CA device to.
125 * @ca: The dvb_ca instance.
126 * @flags: Flags describing the CA device (DVB_CA_EN50221_FLAG_*).
127 * @slot_count: Number of slots supported.
128 *
129 * @return 0 on success, nonzero on failure
130 */
131int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
132 struct dvb_ca_en50221 *ca, int flags,
133 int slot_count);
134
135/**
136 * dvb_ca_en50221_release - Release a DVB CA device.
137 *
138 * @ca: The associated dvb_ca instance.
139 */
140void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca);
141
142#endif
diff --git a/include/media/dvb_demux.h b/include/media/dvb_demux.h
new file mode 100644
index 000000000000..b07092038f4b
--- /dev/null
+++ b/include/media/dvb_demux.h
@@ -0,0 +1,350 @@
1/*
2 * dvb_demux.h: DVB kernel demux API
3 *
4 * Copyright (C) 2000-2001 Marcus Metzler & Ralph Metzler
5 * for convergence integrated media GmbH
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public License
9 * as published by the Free Software Foundation; either version 2.1
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#ifndef _DVB_DEMUX_H_
20#define _DVB_DEMUX_H_
21
22#include <linux/time.h>
23#include <linux/timer.h>
24#include <linux/spinlock.h>
25#include <linux/mutex.h>
26
27#include <media/demux.h>
28
29/**
30 * enum dvb_dmx_filter_type - type of demux feed.
31 *
32 * @DMX_TYPE_TS: feed is in TS mode.
33 * @DMX_TYPE_SEC: feed is in Section mode.
34 */
35enum dvb_dmx_filter_type {
36 DMX_TYPE_TS,
37 DMX_TYPE_SEC,
38};
39
40/**
41 * enum dvb_dmx_state - state machine for a demux filter.
42 *
43 * @DMX_STATE_FREE: indicates that the filter is freed.
44 * @DMX_STATE_ALLOCATED: indicates that the filter was allocated
45 * to be used.
46 * @DMX_STATE_READY: indicates that the filter is ready
47 * to be used.
48 * @DMX_STATE_GO: indicates that the filter is running.
49 */
50enum dvb_dmx_state {
51 DMX_STATE_FREE,
52 DMX_STATE_ALLOCATED,
53 DMX_STATE_READY,
54 DMX_STATE_GO,
55};
56
57#define DVB_DEMUX_MASK_MAX 18
58
59#define MAX_PID 0x1fff
60
61#define SPEED_PKTS_INTERVAL 50000
62
63/**
64 * struct dvb_demux_filter - Describes a DVB demux section filter.
65 *
66 * @filter: Section filter as defined by &struct dmx_section_filter.
67 * @maskandmode: logical ``and`` bit mask.
68 * @maskandnotmode: logical ``and not`` bit mask.
69 * @doneq: flag that indicates when a filter is ready.
70 * @next: pointer to the next section filter.
71 * @feed: &struct dvb_demux_feed pointer.
72 * @index: index of the used demux filter.
73 * @state: state of the filter as described by &enum dvb_dmx_state.
74 * @type: type of the filter as described
75 * by &enum dvb_dmx_filter_type.
76 */
77
78struct dvb_demux_filter {
79 struct dmx_section_filter filter;
80 u8 maskandmode[DMX_MAX_FILTER_SIZE];
81 u8 maskandnotmode[DMX_MAX_FILTER_SIZE];
82 bool doneq;
83
84 struct dvb_demux_filter *next;
85 struct dvb_demux_feed *feed;
86 int index;
87 enum dvb_dmx_state state;
88 enum dvb_dmx_filter_type type;
89
90 /* private: used only by av7110 */
91 u16 hw_handle;
92};
93
94/**
95 * struct dvb_demux_feed - describes a DVB field
96 *
97 * @feed: a union describing a digital TV feed.
98 * Depending on the feed type, it can be either
99 * @feed.ts or @feed.sec.
100 * @feed.ts: a &struct dmx_ts_feed pointer.
101 * For TS feed only.
102 * @feed.sec: a &struct dmx_section_feed pointer.
103 * For section feed only.
104 * @cb: a union describing digital TV callbacks.
105 * Depending on the feed type, it can be either
106 * @cb.ts or @cb.sec.
107 * @cb.ts: a dmx_ts_cb() calback function pointer.
108 * For TS feed only.
109 * @cb.sec: a dmx_section_cb() callback function pointer.
110 * For section feed only.
111 * @demux: pointer to &struct dvb_demux.
112 * @priv: private data that can optionally be used by a DVB driver.
113 * @type: type of the filter, as defined by &enum dvb_dmx_filter_type.
114 * @state: state of the filter as defined by &enum dvb_dmx_state.
115 * @pid: PID to be filtered.
116 * @timeout: feed timeout.
117 * @filter: pointer to &struct dvb_demux_filter.
118 * @ts_type: type of TS, as defined by &enum ts_filter_type.
119 * @pes_type: type of PES, as defined by &enum dmx_ts_pes.
120 * @cc: MPEG-TS packet continuity counter
121 * @pusi_seen: if true, indicates that a discontinuity was detected.
122 * it is used to prevent feeding of garbage from previous section.
123 * @peslen: length of the PES (Packet Elementary Stream).
124 * @list_head: head for the list of digital TV demux feeds.
125 * @index: a unique index for each feed. Can be used as hardware
126 * pid filter index.
127 */
128struct dvb_demux_feed {
129 union {
130 struct dmx_ts_feed ts;
131 struct dmx_section_feed sec;
132 } feed;
133
134 union {
135 dmx_ts_cb ts;
136 dmx_section_cb sec;
137 } cb;
138
139 struct dvb_demux *demux;
140 void *priv;
141 enum dvb_dmx_filter_type type;
142 enum dvb_dmx_state state;
143 u16 pid;
144
145 ktime_t timeout;
146 struct dvb_demux_filter *filter;
147
148 enum ts_filter_type ts_type;
149 enum dmx_ts_pes pes_type;
150
151 int cc;
152 bool pusi_seen;
153
154 u16 peslen;
155
156 struct list_head list_head;
157 unsigned int index;
158};
159
160/**
161 * struct dvb_demux - represents a digital TV demux
162 * @dmx: embedded &struct dmx_demux with demux capabilities
163 * and callbacks.
164 * @priv: private data that can optionally be used by
165 * a DVB driver.
166 * @filternum: maximum amount of DVB filters.
167 * @feednum: maximum amount of DVB feeds.
168 * @start_feed: callback routine to be called in order to start
169 * a DVB feed.
170 * @stop_feed: callback routine to be called in order to stop
171 * a DVB feed.
172 * @write_to_decoder: callback routine to be called if the feed is TS and
173 * it is routed to an A/V decoder, when a new TS packet
174 * is received.
175 * Used only on av7110-av.c.
176 * @check_crc32: callback routine to check CRC. If not initialized,
177 * dvb_demux will use an internal one.
178 * @memcopy: callback routine to memcopy received data.
179 * If not initialized, dvb_demux will default to memcpy().
180 * @users: counter for the number of demux opened file descriptors.
181 * Currently, it is limited to 10 users.
182 * @filter: pointer to &struct dvb_demux_filter.
183 * @feed: pointer to &struct dvb_demux_feed.
184 * @frontend_list: &struct list_head with frontends used by the demux.
185 * @pesfilter: array of &struct dvb_demux_feed with the PES types
186 * that will be filtered.
187 * @pids: list of filtered program IDs.
188 * @feed_list: &struct list_head with feeds.
189 * @tsbuf: temporary buffer used internally to store TS packets.
190 * @tsbufp: temporary buffer index used internally.
191 * @mutex: pointer to &struct mutex used to protect feed set
192 * logic.
193 * @lock: pointer to &spinlock_t, used to protect buffer handling.
194 * @cnt_storage: buffer used for TS/TEI continuity check.
195 * @speed_last_time: &ktime_t used for TS speed check.
196 * @speed_pkts_cnt: packets count used for TS speed check.
197 */
198struct dvb_demux {
199 struct dmx_demux dmx;
200 void *priv;
201 int filternum;
202 int feednum;
203 int (*start_feed)(struct dvb_demux_feed *feed);
204 int (*stop_feed)(struct dvb_demux_feed *feed);
205 int (*write_to_decoder)(struct dvb_demux_feed *feed,
206 const u8 *buf, size_t len);
207 u32 (*check_crc32)(struct dvb_demux_feed *feed,
208 const u8 *buf, size_t len);
209 void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst,
210 const u8 *src, size_t len);
211
212 int users;
213#define MAX_DVB_DEMUX_USERS 10
214 struct dvb_demux_filter *filter;
215 struct dvb_demux_feed *feed;
216
217 struct list_head frontend_list;
218
219 struct dvb_demux_feed *pesfilter[DMX_PES_OTHER];
220 u16 pids[DMX_PES_OTHER];
221
222#define DMX_MAX_PID 0x2000
223 struct list_head feed_list;
224 u8 tsbuf[204];
225 int tsbufp;
226
227 struct mutex mutex;
228 spinlock_t lock;
229
230 uint8_t *cnt_storage; /* for TS continuity check */
231
232 ktime_t speed_last_time; /* for TS speed check */
233 uint32_t speed_pkts_cnt; /* for TS speed check */
234
235 /* private: used only on av7110 */
236 int playing;
237 int recording;
238};
239
240/**
241 * dvb_dmx_init - initialize a digital TV demux struct.
242 *
243 * @demux: &struct dvb_demux to be initialized.
244 *
245 * Before being able to register a digital TV demux struct, drivers
246 * should call this routine. On its typical usage, some fields should
247 * be initialized at the driver before calling it.
248 *
249 * A typical usecase is::
250 *
251 * dvb->demux.dmx.capabilities =
252 * DMX_TS_FILTERING | DMX_SECTION_FILTERING |
253 * DMX_MEMORY_BASED_FILTERING;
254 * dvb->demux.priv = dvb;
255 * dvb->demux.filternum = 256;
256 * dvb->demux.feednum = 256;
257 * dvb->demux.start_feed = driver_start_feed;
258 * dvb->demux.stop_feed = driver_stop_feed;
259 * ret = dvb_dmx_init(&dvb->demux);
260 * if (ret < 0)
261 * return ret;
262 */
263int dvb_dmx_init(struct dvb_demux *demux);
264
265/**
266 * dvb_dmx_release - releases a digital TV demux internal buffers.
267 *
268 * @demux: &struct dvb_demux to be released.
269 *
270 * The DVB core internally allocates data at @demux. This routine
271 * releases those data. Please notice that the struct itelf is not
272 * released, as it can be embedded on other structs.
273 */
274void dvb_dmx_release(struct dvb_demux *demux);
275
276/**
277 * dvb_dmx_swfilter_packets - use dvb software filter for a buffer with
278 * multiple MPEG-TS packets with 188 bytes each.
279 *
280 * @demux: pointer to &struct dvb_demux
281 * @buf: buffer with data to be filtered
282 * @count: number of MPEG-TS packets with size of 188.
283 *
284 * The routine will discard a DVB packet that don't start with 0x47.
285 *
286 * Use this routine if the DVB demux fills MPEG-TS buffers that are
287 * already aligned.
288 *
289 * NOTE: The @buf size should have size equal to ``count * 188``.
290 */
291void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
292 size_t count);
293
294/**
295 * dvb_dmx_swfilter - use dvb software filter for a buffer with
296 * multiple MPEG-TS packets with 188 bytes each.
297 *
298 * @demux: pointer to &struct dvb_demux
299 * @buf: buffer with data to be filtered
300 * @count: number of MPEG-TS packets with size of 188.
301 *
302 * If a DVB packet doesn't start with 0x47, it will seek for the first
303 * byte that starts with 0x47.
304 *
305 * Use this routine if the DVB demux fill buffers that may not start with
306 * a packet start mark (0x47).
307 *
308 * NOTE: The @buf size should have size equal to ``count * 188``.
309 */
310void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
311
312/**
313 * dvb_dmx_swfilter_204 - use dvb software filter for a buffer with
314 * multiple MPEG-TS packets with 204 bytes each.
315 *
316 * @demux: pointer to &struct dvb_demux
317 * @buf: buffer with data to be filtered
318 * @count: number of MPEG-TS packets with size of 204.
319 *
320 * If a DVB packet doesn't start with 0x47, it will seek for the first
321 * byte that starts with 0x47.
322 *
323 * Use this routine if the DVB demux fill buffers that may not start with
324 * a packet start mark (0x47).
325 *
326 * NOTE: The @buf size should have size equal to ``count * 204``.
327 */
328void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf,
329 size_t count);
330
331/**
332 * dvb_dmx_swfilter_raw - make the raw data available to userspace without
333 * filtering
334 *
335 * @demux: pointer to &struct dvb_demux
336 * @buf: buffer with data
337 * @count: number of packets to be passed. The actual size of each packet
338 * depends on the &dvb_demux->feed->cb.ts logic.
339 *
340 * Use it if the driver needs to deliver the raw payload to userspace without
341 * passing through the kernel demux. That is meant to support some
342 * delivery systems that aren't based on MPEG-TS.
343 *
344 * This function relies on &dvb_demux->feed->cb.ts to actually handle the
345 * buffer.
346 */
347void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf,
348 size_t count);
349
350#endif /* _DVB_DEMUX_H_ */
diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h
new file mode 100644
index 000000000000..331c8269c00e
--- /dev/null
+++ b/include/media/dvb_frontend.h
@@ -0,0 +1,795 @@
1/*
2 * dvb_frontend.h
3 *
4 * The Digital TV Frontend kABI defines a driver-internal interface for
5 * registering low-level, hardware specific driver to a hardware independent
6 * frontend layer.
7 *
8 * Copyright (C) 2001 convergence integrated media GmbH
9 * Copyright (C) 2004 convergence GmbH
10 *
11 * Written by Ralph Metzler
12 * Overhauled by Holger Waechtler
13 * Kernel I2C stuff by Michael Hunold <hunold@convergence.de>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public License
17 * as published by the Free Software Foundation; either version 2.1
18 * of the License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25
26 * You should have received a copy of the GNU Lesser General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
29 *
30 */
31
32#ifndef _DVB_FRONTEND_H_
33#define _DVB_FRONTEND_H_
34
35#include <linux/types.h>
36#include <linux/sched.h>
37#include <linux/ioctl.h>
38#include <linux/i2c.h>
39#include <linux/module.h>
40#include <linux/errno.h>
41#include <linux/delay.h>
42#include <linux/mutex.h>
43#include <linux/slab.h>
44
45#include <linux/dvb/frontend.h>
46
47#include <media/dvbdev.h>
48
49/*
50 * Maximum number of Delivery systems per frontend. It
51 * should be smaller or equal to 32
52 */
53#define MAX_DELSYS 8
54
55/**
56 * struct dvb_frontend_tune_settings - parameters to adjust frontend tuning
57 *
58 * @min_delay_ms: minimum delay for tuning, in ms
59 * @step_size: step size between two consecutive frequencies
60 * @max_drift: maximum drift
61 *
62 * NOTE: step_size is in Hz, for terrestrial/cable or kHz for satellite
63 */
64struct dvb_frontend_tune_settings {
65 int min_delay_ms;
66 int step_size;
67 int max_drift;
68};
69
70struct dvb_frontend;
71
72/**
73 * struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths
74 *
75 * @name: name of the Frontend
76 * @frequency_min: minimal frequency supported
77 * @frequency_max: maximum frequency supported
78 * @frequency_step: frequency step
79 * @bandwidth_min: minimal frontend bandwidth supported
80 * @bandwidth_max: maximum frontend bandwidth supported
81 * @bandwidth_step: frontend bandwidth step
82 *
83 * NOTE: frequency parameters are in Hz, for terrestrial/cable or kHz for
84 * satellite.
85 */
86struct dvb_tuner_info {
87 char name[128];
88
89 u32 frequency_min;
90 u32 frequency_max;
91 u32 frequency_step;
92
93 u32 bandwidth_min;
94 u32 bandwidth_max;
95 u32 bandwidth_step;
96};
97
98/**
99 * struct analog_parameters - Parameters to tune into an analog/radio channel
100 *
101 * @frequency: Frequency used by analog TV tuner (either in 62.5 kHz step,
102 * for TV, or 62.5 Hz for radio)
103 * @mode: Tuner mode, as defined on enum v4l2_tuner_type
104 * @audmode: Audio mode as defined for the rxsubchans field at videodev2.h,
105 * e. g. V4L2_TUNER_MODE_*
106 * @std: TV standard bitmap as defined at videodev2.h, e. g. V4L2_STD_*
107 *
108 * Hybrid tuners should be supported by both V4L2 and DVB APIs. This
109 * struct contains the data that are used by the V4L2 side. To avoid
110 * dependencies from V4L2 headers, all enums here are declared as integers.
111 */
112struct analog_parameters {
113 unsigned int frequency;
114 unsigned int mode;
115 unsigned int audmode;
116 u64 std;
117};
118
119/**
120 * enum dvbfe_algo - defines the algorithm used to tune into a channel
121 *
122 * @DVBFE_ALGO_HW: Hardware Algorithm -
123 * Devices that support this algorithm do everything in hardware
124 * and no software support is needed to handle them.
125 * Requesting these devices to LOCK is the only thing required,
126 * device is supposed to do everything in the hardware.
127 *
128 * @DVBFE_ALGO_SW: Software Algorithm -
129 * These are dumb devices, that require software to do everything
130 *
131 * @DVBFE_ALGO_CUSTOM: Customizable Agorithm -
132 * Devices having this algorithm can be customized to have specific
133 * algorithms in the frontend driver, rather than simply doing a
134 * software zig-zag. In this case the zigzag maybe hardware assisted
135 * or it maybe completely done in hardware. In all cases, usage of
136 * this algorithm, in conjunction with the search and track
137 * callbacks, utilizes the driver specific algorithm.
138 *
139 * @DVBFE_ALGO_RECOVERY: Recovery Algorithm -
140 * These devices have AUTO recovery capabilities from LOCK failure
141 */
142enum dvbfe_algo {
143 DVBFE_ALGO_HW = (1 << 0),
144 DVBFE_ALGO_SW = (1 << 1),
145 DVBFE_ALGO_CUSTOM = (1 << 2),
146 DVBFE_ALGO_RECOVERY = (1 << 31)
147};
148
149/**
150 * enum dvbfe_search - search callback possible return status
151 *
152 * @DVBFE_ALGO_SEARCH_SUCCESS:
153 * The frontend search algorithm completed and returned successfully
154 *
155 * @DVBFE_ALGO_SEARCH_ASLEEP:
156 * The frontend search algorithm is sleeping
157 *
158 * @DVBFE_ALGO_SEARCH_FAILED:
159 * The frontend search for a signal failed
160 *
161 * @DVBFE_ALGO_SEARCH_INVALID:
162 * The frontend search algorith was probably supplied with invalid
163 * parameters and the search is an invalid one
164 *
165 * @DVBFE_ALGO_SEARCH_ERROR:
166 * The frontend search algorithm failed due to some error
167 *
168 * @DVBFE_ALGO_SEARCH_AGAIN:
169 * The frontend search algorithm was requested to search again
170 */
171enum dvbfe_search {
172 DVBFE_ALGO_SEARCH_SUCCESS = (1 << 0),
173 DVBFE_ALGO_SEARCH_ASLEEP = (1 << 1),
174 DVBFE_ALGO_SEARCH_FAILED = (1 << 2),
175 DVBFE_ALGO_SEARCH_INVALID = (1 << 3),
176 DVBFE_ALGO_SEARCH_AGAIN = (1 << 4),
177 DVBFE_ALGO_SEARCH_ERROR = (1 << 31),
178};
179
180/**
181 * struct dvb_tuner_ops - Tuner information and callbacks
182 *
183 * @info: embedded &struct dvb_tuner_info with tuner properties
184 * @release: callback function called when frontend is detached.
185 * drivers should free any allocated memory.
186 * @init: callback function used to initialize the tuner device.
187 * @sleep: callback function used to put the tuner to sleep.
188 * @suspend: callback function used to inform that the Kernel will
189 * suspend.
190 * @resume: callback function used to inform that the Kernel is
191 * resuming from suspend.
192 * @set_params: callback function used to inform the tuner to tune
193 * into a digital TV channel. The properties to be used
194 * are stored at &struct dvb_frontend.dtv_property_cache.
195 * The tuner demod can change the parameters to reflect
196 * the changes needed for the channel to be tuned, and
197 * update statistics. This is the recommended way to set
198 * the tuner parameters and should be used on newer
199 * drivers.
200 * @set_analog_params: callback function used to tune into an analog TV
201 * channel on hybrid tuners. It passes @analog_parameters
202 * to the driver.
203 * @set_config: callback function used to send some tuner-specific
204 * parameters.
205 * @get_frequency: get the actual tuned frequency
206 * @get_bandwidth: get the bandwitdh used by the low pass filters
207 * @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband,
208 * should return 0.
209 * @get_status: returns the frontend lock status
210 * @get_rf_strength: returns the RF signal strength. Used mostly to support
211 * analog TV and radio. Digital TV should report, instead,
212 * via DVBv5 API (&struct dvb_frontend.dtv_property_cache).
213 * @get_afc: Used only by analog TV core. Reports the frequency
214 * drift due to AFC.
215 * @calc_regs: callback function used to pass register data settings
216 * for simple tuners. Shouldn't be used on newer drivers.
217 * @set_frequency: Set a new frequency. Shouldn't be used on newer drivers.
218 * @set_bandwidth: Set a new frequency. Shouldn't be used on newer drivers.
219 *
220 * NOTE: frequencies used on @get_frequency and @set_frequency are in Hz for
221 * terrestrial/cable or kHz for satellite.
222 *
223 */
224struct dvb_tuner_ops {
225
226 struct dvb_tuner_info info;
227
228 void (*release)(struct dvb_frontend *fe);
229 int (*init)(struct dvb_frontend *fe);
230 int (*sleep)(struct dvb_frontend *fe);
231 int (*suspend)(struct dvb_frontend *fe);
232 int (*resume)(struct dvb_frontend *fe);
233
234 /* This is the recomended way to set the tuner */
235 int (*set_params)(struct dvb_frontend *fe);
236 int (*set_analog_params)(struct dvb_frontend *fe, struct analog_parameters *p);
237
238 int (*set_config)(struct dvb_frontend *fe, void *priv_cfg);
239
240 int (*get_frequency)(struct dvb_frontend *fe, u32 *frequency);
241 int (*get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth);
242 int (*get_if_frequency)(struct dvb_frontend *fe, u32 *frequency);
243
244#define TUNER_STATUS_LOCKED 1
245#define TUNER_STATUS_STEREO 2
246 int (*get_status)(struct dvb_frontend *fe, u32 *status);
247 int (*get_rf_strength)(struct dvb_frontend *fe, u16 *strength);
248 int (*get_afc)(struct dvb_frontend *fe, s32 *afc);
249
250 /*
251 * This is support for demods like the mt352 - fills out the supplied
252 * buffer with what to write.
253 *
254 * Don't use on newer drivers.
255 */
256 int (*calc_regs)(struct dvb_frontend *fe, u8 *buf, int buf_len);
257
258 /*
259 * These are provided separately from set_params in order to
260 * facilitate silicon tuners which require sophisticated tuning loops,
261 * controlling each parameter separately.
262 *
263 * Don't use on newer drivers.
264 */
265 int (*set_frequency)(struct dvb_frontend *fe, u32 frequency);
266 int (*set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth);
267};
268
269/**
270 * struct analog_demod_info - Information struct for analog TV part of the demod
271 *
272 * @name: Name of the analog TV demodulator
273 */
274struct analog_demod_info {
275 char *name;
276};
277
278/**
279 * struct analog_demod_ops - Demodulation information and callbacks for
280 * analog TV and radio
281 *
282 * @info: pointer to struct analog_demod_info
283 * @set_params: callback function used to inform the demod to set the
284 * demodulator parameters needed to decode an analog or
285 * radio channel. The properties are passed via
286 * &struct analog_params.
287 * @has_signal: returns 0xffff if has signal, or 0 if it doesn't.
288 * @get_afc: Used only by analog TV core. Reports the frequency
289 * drift due to AFC.
290 * @tuner_status: callback function that returns tuner status bits, e. g.
291 * %TUNER_STATUS_LOCKED and %TUNER_STATUS_STEREO.
292 * @standby: set the tuner to standby mode.
293 * @release: callback function called when frontend is detached.
294 * drivers should free any allocated memory.
295 * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C
296 * mux support instead.
297 * @set_config: callback function used to send some tuner-specific
298 * parameters.
299 */
300struct analog_demod_ops {
301
302 struct analog_demod_info info;
303
304 void (*set_params)(struct dvb_frontend *fe,
305 struct analog_parameters *params);
306 int (*has_signal)(struct dvb_frontend *fe, u16 *signal);
307 int (*get_afc)(struct dvb_frontend *fe, s32 *afc);
308 void (*tuner_status)(struct dvb_frontend *fe);
309 void (*standby)(struct dvb_frontend *fe);
310 void (*release)(struct dvb_frontend *fe);
311 int (*i2c_gate_ctrl)(struct dvb_frontend *fe, int enable);
312
313 /** This is to allow setting tuner-specific configuration */
314 int (*set_config)(struct dvb_frontend *fe, void *priv_cfg);
315};
316
317struct dtv_frontend_properties;
318
319
320/**
321 * struct dvb_frontend_ops - Demodulation information and callbacks for
322 * ditialt TV
323 *
324 * @info: embedded &struct dvb_tuner_info with tuner properties
325 * @delsys: Delivery systems supported by the frontend
326 * @detach: callback function called when frontend is detached.
327 * drivers should clean up, but not yet free the &struct
328 * dvb_frontend allocation.
329 * @release: callback function called when frontend is ready to be
330 * freed.
331 * drivers should free any allocated memory.
332 * @release_sec: callback function requesting that the Satelite Equipment
333 * Control (SEC) driver to release and free any memory
334 * allocated by the driver.
335 * @init: callback function used to initialize the tuner device.
336 * @sleep: callback function used to put the tuner to sleep.
337 * @write: callback function used by some demod legacy drivers to
338 * allow other drivers to write data into their registers.
339 * Should not be used on new drivers.
340 * @tune: callback function used by demod drivers that use
341 * @DVBFE_ALGO_HW to tune into a frequency.
342 * @get_frontend_algo: returns the desired hardware algorithm.
343 * @set_frontend: callback function used to inform the demod to set the
344 * parameters for demodulating a digital TV channel.
345 * The properties to be used are stored at &struct
346 * dvb_frontend.dtv_property_cache. The demod can change
347 * the parameters to reflect the changes needed for the
348 * channel to be decoded, and update statistics.
349 * @get_tune_settings: callback function
350 * @get_frontend: callback function used to inform the parameters
351 * actuall in use. The properties to be used are stored at
352 * &struct dvb_frontend.dtv_property_cache and update
353 * statistics. Please notice that it should not return
354 * an error code if the statistics are not available
355 * because the demog is not locked.
356 * @read_status: returns the locking status of the frontend.
357 * @read_ber: legacy callback function to return the bit error rate.
358 * Newer drivers should provide such info via DVBv5 API,
359 * e. g. @set_frontend;/@get_frontend, implementing this
360 * callback only if DVBv3 API compatibility is wanted.
361 * @read_signal_strength: legacy callback function to return the signal
362 * strength. Newer drivers should provide such info via
363 * DVBv5 API, e. g. @set_frontend/@get_frontend,
364 * implementing this callback only if DVBv3 API
365 * compatibility is wanted.
366 * @read_snr: legacy callback function to return the Signal/Noise
367 * rate. Newer drivers should provide such info via
368 * DVBv5 API, e. g. @set_frontend/@get_frontend,
369 * implementing this callback only if DVBv3 API
370 * compatibility is wanted.
371 * @read_ucblocks: legacy callback function to return the Uncorrected Error
372 * Blocks. Newer drivers should provide such info via
373 * DVBv5 API, e. g. @set_frontend/@get_frontend,
374 * implementing this callback only if DVBv3 API
375 * compatibility is wanted.
376 * @diseqc_reset_overload: callback function to implement the
377 * FE_DISEQC_RESET_OVERLOAD() ioctl (only Satellite)
378 * @diseqc_send_master_cmd: callback function to implement the
379 * FE_DISEQC_SEND_MASTER_CMD() ioctl (only Satellite).
380 * @diseqc_recv_slave_reply: callback function to implement the
381 * FE_DISEQC_RECV_SLAVE_REPLY() ioctl (only Satellite)
382 * @diseqc_send_burst: callback function to implement the
383 * FE_DISEQC_SEND_BURST() ioctl (only Satellite).
384 * @set_tone: callback function to implement the
385 * FE_SET_TONE() ioctl (only Satellite).
386 * @set_voltage: callback function to implement the
387 * FE_SET_VOLTAGE() ioctl (only Satellite).
388 * @enable_high_lnb_voltage: callback function to implement the
389 * FE_ENABLE_HIGH_LNB_VOLTAGE() ioctl (only Satellite).
390 * @dishnetwork_send_legacy_command: callback function to implement the
391 * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl (only Satellite).
392 * Drivers should not use this, except when the DVB
393 * core emulation fails to provide proper support (e.g.
394 * if @set_voltage takes more than 8ms to work), and
395 * when backward compatibility with this legacy API is
396 * required.
397 * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C
398 * mux support instead.
399 * @ts_bus_ctrl: callback function used to take control of the TS bus.
400 * @set_lna: callback function to power on/off/auto the LNA.
401 * @search: callback function used on some custom algo search algos.
402 * @tuner_ops: pointer to &struct dvb_tuner_ops
403 * @analog_ops: pointer to &struct analog_demod_ops
404 */
405struct dvb_frontend_ops {
406 struct dvb_frontend_info info;
407
408 u8 delsys[MAX_DELSYS];
409
410 void (*detach)(struct dvb_frontend *fe);
411 void (*release)(struct dvb_frontend* fe);
412 void (*release_sec)(struct dvb_frontend* fe);
413
414 int (*init)(struct dvb_frontend* fe);
415 int (*sleep)(struct dvb_frontend* fe);
416
417 int (*write)(struct dvb_frontend* fe, const u8 buf[], int len);
418
419 /* if this is set, it overrides the default swzigzag */
420 int (*tune)(struct dvb_frontend* fe,
421 bool re_tune,
422 unsigned int mode_flags,
423 unsigned int *delay,
424 enum fe_status *status);
425
426 /* get frontend tuning algorithm from the module */
427 enum dvbfe_algo (*get_frontend_algo)(struct dvb_frontend *fe);
428
429 /* these two are only used for the swzigzag code */
430 int (*set_frontend)(struct dvb_frontend *fe);
431 int (*get_tune_settings)(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* settings);
432
433 int (*get_frontend)(struct dvb_frontend *fe,
434 struct dtv_frontend_properties *props);
435
436 int (*read_status)(struct dvb_frontend *fe, enum fe_status *status);
437 int (*read_ber)(struct dvb_frontend* fe, u32* ber);
438 int (*read_signal_strength)(struct dvb_frontend* fe, u16* strength);
439 int (*read_snr)(struct dvb_frontend* fe, u16* snr);
440 int (*read_ucblocks)(struct dvb_frontend* fe, u32* ucblocks);
441
442 int (*diseqc_reset_overload)(struct dvb_frontend* fe);
443 int (*diseqc_send_master_cmd)(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd);
444 int (*diseqc_recv_slave_reply)(struct dvb_frontend* fe, struct dvb_diseqc_slave_reply* reply);
445 int (*diseqc_send_burst)(struct dvb_frontend *fe,
446 enum fe_sec_mini_cmd minicmd);
447 int (*set_tone)(struct dvb_frontend *fe, enum fe_sec_tone_mode tone);
448 int (*set_voltage)(struct dvb_frontend *fe,
449 enum fe_sec_voltage voltage);
450 int (*enable_high_lnb_voltage)(struct dvb_frontend* fe, long arg);
451 int (*dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd);
452 int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable);
453 int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire);
454 int (*set_lna)(struct dvb_frontend *);
455
456 /*
457 * These callbacks are for devices that implement their own
458 * tuning algorithms, rather than a simple swzigzag
459 */
460 enum dvbfe_search (*search)(struct dvb_frontend *fe);
461
462 struct dvb_tuner_ops tuner_ops;
463 struct analog_demod_ops analog_ops;
464};
465
466#ifdef __DVB_CORE__
467#define MAX_EVENT 8
468
469/* Used only internally at dvb_frontend.c */
470struct dvb_fe_events {
471 struct dvb_frontend_event events[MAX_EVENT];
472 int eventw;
473 int eventr;
474 int overflow;
475 wait_queue_head_t wait_queue;
476 struct mutex mtx;
477};
478#endif
479
480/**
481 * struct dtv_frontend_properties - contains a list of properties that are
482 * specific to a digital TV standard.
483 *
484 * @frequency: frequency in Hz for terrestrial/cable or in kHz for
485 * Satellite
486 * @modulation: Frontend modulation type
487 * @voltage: SEC voltage (only Satellite)
488 * @sectone: SEC tone mode (only Satellite)
489 * @inversion: Spectral inversion
490 * @fec_inner: Forward error correction inner Code Rate
491 * @transmission_mode: Transmission Mode
492 * @bandwidth_hz: Bandwidth, in Hz. A zero value means that userspace
493 * wants to autodetect.
494 * @guard_interval: Guard Interval
495 * @hierarchy: Hierarchy
496 * @symbol_rate: Symbol Rate
497 * @code_rate_HP: high priority stream code rate
498 * @code_rate_LP: low priority stream code rate
499 * @pilot: Enable/disable/autodetect pilot tones
500 * @rolloff: Rolloff factor (alpha)
501 * @delivery_system: FE delivery system (e. g. digital TV standard)
502 * @interleaving: interleaving
503 * @isdbt_partial_reception: ISDB-T partial reception (only ISDB standard)
504 * @isdbt_sb_mode: ISDB-T Sound Broadcast (SB) mode (only ISDB standard)
505 * @isdbt_sb_subchannel: ISDB-T SB subchannel (only ISDB standard)
506 * @isdbt_sb_segment_idx: ISDB-T SB segment index (only ISDB standard)
507 * @isdbt_sb_segment_count: ISDB-T SB segment count (only ISDB standard)
508 * @isdbt_layer_enabled: ISDB Layer enabled (only ISDB standard)
509 * @layer: ISDB per-layer data (only ISDB standard)
510 * @layer.segment_count: Segment Count;
511 * @layer.fec: per layer code rate;
512 * @layer.modulation: per layer modulation;
513 * @layer.interleaving: per layer interleaving.
514 * @stream_id: If different than zero, enable substream filtering, if
515 * hardware supports (DVB-S2 and DVB-T2).
516 * @scrambling_sequence_index: Carries the index of the DVB-S2 physical layer
517 * scrambling sequence.
518 * @atscmh_fic_ver: Version number of the FIC (Fast Information Channel)
519 * signaling data (only ATSC-M/H)
520 * @atscmh_parade_id: Parade identification number (only ATSC-M/H)
521 * @atscmh_nog: Number of MH groups per MH subframe for a designated
522 * parade (only ATSC-M/H)
523 * @atscmh_tnog: Total number of MH groups including all MH groups
524 * belonging to all MH parades in one MH subframe
525 * (only ATSC-M/H)
526 * @atscmh_sgn: Start group number (only ATSC-M/H)
527 * @atscmh_prc: Parade repetition cycle (only ATSC-M/H)
528 * @atscmh_rs_frame_mode: Reed Solomon (RS) frame mode (only ATSC-M/H)
529 * @atscmh_rs_frame_ensemble: RS frame ensemble (only ATSC-M/H)
530 * @atscmh_rs_code_mode_pri: RS code mode pri (only ATSC-M/H)
531 * @atscmh_rs_code_mode_sec: RS code mode sec (only ATSC-M/H)
532 * @atscmh_sccc_block_mode: Series Concatenated Convolutional Code (SCCC)
533 * Block Mode (only ATSC-M/H)
534 * @atscmh_sccc_code_mode_a: SCCC code mode A (only ATSC-M/H)
535 * @atscmh_sccc_code_mode_b: SCCC code mode B (only ATSC-M/H)
536 * @atscmh_sccc_code_mode_c: SCCC code mode C (only ATSC-M/H)
537 * @atscmh_sccc_code_mode_d: SCCC code mode D (only ATSC-M/H)
538 * @lna: Power ON/OFF/AUTO the Linear Now-noise Amplifier (LNA)
539 * @strength: DVBv5 API statistics: Signal Strength
540 * @cnr: DVBv5 API statistics: Signal to Noise ratio of the
541 * (main) carrier
542 * @pre_bit_error: DVBv5 API statistics: pre-Viterbi bit error count
543 * @pre_bit_count: DVBv5 API statistics: pre-Viterbi bit count
544 * @post_bit_error: DVBv5 API statistics: post-Viterbi bit error count
545 * @post_bit_count: DVBv5 API statistics: post-Viterbi bit count
546 * @block_error: DVBv5 API statistics: block error count
547 * @block_count: DVBv5 API statistics: block count
548 *
549 * NOTE: derivated statistics like Uncorrected Error blocks (UCE) are
550 * calculated on userspace.
551 *
552 * Only a subset of the properties are needed for a given delivery system.
553 * For more info, consult the media_api.html with the documentation of the
554 * Userspace API.
555 */
556struct dtv_frontend_properties {
557 u32 frequency;
558 enum fe_modulation modulation;
559
560 enum fe_sec_voltage voltage;
561 enum fe_sec_tone_mode sectone;
562 enum fe_spectral_inversion inversion;
563 enum fe_code_rate fec_inner;
564 enum fe_transmit_mode transmission_mode;
565 u32 bandwidth_hz; /* 0 = AUTO */
566 enum fe_guard_interval guard_interval;
567 enum fe_hierarchy hierarchy;
568 u32 symbol_rate;
569 enum fe_code_rate code_rate_HP;
570 enum fe_code_rate code_rate_LP;
571
572 enum fe_pilot pilot;
573 enum fe_rolloff rolloff;
574
575 enum fe_delivery_system delivery_system;
576
577 enum fe_interleaving interleaving;
578
579 /* ISDB-T specifics */
580 u8 isdbt_partial_reception;
581 u8 isdbt_sb_mode;
582 u8 isdbt_sb_subchannel;
583 u32 isdbt_sb_segment_idx;
584 u32 isdbt_sb_segment_count;
585 u8 isdbt_layer_enabled;
586 struct {
587 u8 segment_count;
588 enum fe_code_rate fec;
589 enum fe_modulation modulation;
590 u8 interleaving;
591 } layer[3];
592
593 /* Multistream specifics */
594 u32 stream_id;
595
596 /* Physical Layer Scrambling specifics */
597 u32 scrambling_sequence_index;
598
599 /* ATSC-MH specifics */
600 u8 atscmh_fic_ver;
601 u8 atscmh_parade_id;
602 u8 atscmh_nog;
603 u8 atscmh_tnog;
604 u8 atscmh_sgn;
605 u8 atscmh_prc;
606
607 u8 atscmh_rs_frame_mode;
608 u8 atscmh_rs_frame_ensemble;
609 u8 atscmh_rs_code_mode_pri;
610 u8 atscmh_rs_code_mode_sec;
611 u8 atscmh_sccc_block_mode;
612 u8 atscmh_sccc_code_mode_a;
613 u8 atscmh_sccc_code_mode_b;
614 u8 atscmh_sccc_code_mode_c;
615 u8 atscmh_sccc_code_mode_d;
616
617 u32 lna;
618
619 /* statistics data */
620 struct dtv_fe_stats strength;
621 struct dtv_fe_stats cnr;
622 struct dtv_fe_stats pre_bit_error;
623 struct dtv_fe_stats pre_bit_count;
624 struct dtv_fe_stats post_bit_error;
625 struct dtv_fe_stats post_bit_count;
626 struct dtv_fe_stats block_error;
627 struct dtv_fe_stats block_count;
628};
629
630#define DVB_FE_NO_EXIT 0
631#define DVB_FE_NORMAL_EXIT 1
632#define DVB_FE_DEVICE_REMOVED 2
633#define DVB_FE_DEVICE_RESUME 3
634
635/**
636 * struct dvb_frontend - Frontend structure to be used on drivers.
637 *
638 * @refcount: refcount to keep track of &struct dvb_frontend
639 * references
640 * @ops: embedded &struct dvb_frontend_ops
641 * @dvb: pointer to &struct dvb_adapter
642 * @demodulator_priv: demod private data
643 * @tuner_priv: tuner private data
644 * @frontend_priv: frontend private data
645 * @sec_priv: SEC private data
646 * @analog_demod_priv: Analog demod private data
647 * @dtv_property_cache: embedded &struct dtv_frontend_properties
648 * @callback: callback function used on some drivers to call
649 * either the tuner or the demodulator.
650 * @id: Frontend ID
651 * @exit: Used to inform the DVB core that the frontend
652 * thread should exit (usually, means that the hardware
653 * got disconnected.
654 */
655
656struct dvb_frontend {
657 struct kref refcount;
658 struct dvb_frontend_ops ops;
659 struct dvb_adapter *dvb;
660 void *demodulator_priv;
661 void *tuner_priv;
662 void *frontend_priv;
663 void *sec_priv;
664 void *analog_demod_priv;
665 struct dtv_frontend_properties dtv_property_cache;
666#define DVB_FRONTEND_COMPONENT_TUNER 0
667#define DVB_FRONTEND_COMPONENT_DEMOD 1
668 int (*callback)(void *adapter_priv, int component, int cmd, int arg);
669 int id;
670 unsigned int exit;
671};
672
673/**
674 * dvb_register_frontend() - Registers a DVB frontend at the adapter
675 *
676 * @dvb: pointer to &struct dvb_adapter
677 * @fe: pointer to &struct dvb_frontend
678 *
679 * Allocate and initialize the private data needed by the frontend core to
680 * manage the frontend and calls dvb_register_device() to register a new
681 * frontend. It also cleans the property cache that stores the frontend
682 * parameters and selects the first available delivery system.
683 */
684int dvb_register_frontend(struct dvb_adapter *dvb,
685 struct dvb_frontend *fe);
686
687/**
688 * dvb_unregister_frontend() - Unregisters a DVB frontend
689 *
690 * @fe: pointer to &struct dvb_frontend
691 *
692 * Stops the frontend kthread, calls dvb_unregister_device() and frees the
693 * private frontend data allocated by dvb_register_frontend().
694 *
695 * NOTE: This function doesn't frees the memory allocated by the demod,
696 * by the SEC driver and by the tuner. In order to free it, an explicit call to
697 * dvb_frontend_detach() is needed, after calling this function.
698 */
699int dvb_unregister_frontend(struct dvb_frontend *fe);
700
701/**
702 * dvb_frontend_detach() - Detaches and frees frontend specific data
703 *
704 * @fe: pointer to &struct dvb_frontend
705 *
706 * This function should be called after dvb_unregister_frontend(). It
707 * calls the SEC, tuner and demod release functions:
708 * &dvb_frontend_ops.release_sec, &dvb_frontend_ops.tuner_ops.release,
709 * &dvb_frontend_ops.analog_ops.release and &dvb_frontend_ops.release.
710 *
711 * If the driver is compiled with %CONFIG_MEDIA_ATTACH, it also decreases
712 * the module reference count, needed to allow userspace to remove the
713 * previously used DVB frontend modules.
714 */
715void dvb_frontend_detach(struct dvb_frontend *fe);
716
717/**
718 * dvb_frontend_suspend() - Suspends a Digital TV frontend
719 *
720 * @fe: pointer to &struct dvb_frontend
721 *
722 * This function prepares a Digital TV frontend to suspend.
723 *
724 * In order to prepare the tuner to suspend, if
725 * &dvb_frontend_ops.tuner_ops.suspend\(\) is available, it calls it. Otherwise,
726 * it will call &dvb_frontend_ops.tuner_ops.sleep\(\), if available.
727 *
728 * It will also call &dvb_frontend_ops.sleep\(\) to put the demod to suspend.
729 *
730 * The drivers should also call dvb_frontend_suspend\(\) as part of their
731 * handler for the &device_driver.suspend\(\).
732 */
733int dvb_frontend_suspend(struct dvb_frontend *fe);
734
735/**
736 * dvb_frontend_resume() - Resumes a Digital TV frontend
737 *
738 * @fe: pointer to &struct dvb_frontend
739 *
740 * This function resumes the usual operation of the tuner after resume.
741 *
742 * In order to resume the frontend, it calls the demod &dvb_frontend_ops.init\(\).
743 *
744 * If &dvb_frontend_ops.tuner_ops.resume\(\) is available, It, it calls it.
745 * Otherwise,t will call &dvb_frontend_ops.tuner_ops.init\(\), if available.
746 *
747 * Once tuner and demods are resumed, it will enforce that the SEC voltage and
748 * tone are restored to their previous values and wake up the frontend's
749 * kthread in order to retune the frontend.
750 *
751 * The drivers should also call dvb_frontend_resume() as part of their
752 * handler for the &device_driver.resume\(\).
753 */
754int dvb_frontend_resume(struct dvb_frontend *fe);
755
756/**
757 * dvb_frontend_reinitialise() - forces a reinitialisation at the frontend
758 *
759 * @fe: pointer to &struct dvb_frontend
760 *
761 * Calls &dvb_frontend_ops.init\(\) and &dvb_frontend_ops.tuner_ops.init\(\),
762 * and resets SEC tone and voltage (for Satellite systems).
763 *
764 * NOTE: Currently, this function is used only by one driver (budget-av).
765 * It seems to be due to address some special issue with that specific
766 * frontend.
767 */
768void dvb_frontend_reinitialise(struct dvb_frontend *fe);
769
770/**
771 * dvb_frontend_sleep_until() - Sleep for the amount of time given by
772 * add_usec parameter
773 *
774 * @waketime: pointer to &struct ktime_t
775 * @add_usec: time to sleep, in microseconds
776 *
777 * This function is used to measure the time required for the
778 * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl to work. It needs to be as precise
779 * as possible, as it affects the detection of the dish tone command at the
780 * satellite subsystem.
781 *
782 * Its used internally by the DVB frontend core, in order to emulate
783 * FE_DISHNETWORK_SEND_LEGACY_CMD() using the &dvb_frontend_ops.set_voltage\(\)
784 * callback.
785 *
786 * NOTE: it should not be used at the drivers, as the emulation for the
787 * legacy callback is provided by the Kernel. The only situation where this
788 * should be at the drivers is when there are some bugs at the hardware that
789 * would prevent the core emulation to work. On such cases, the driver would
790 * be writing a &dvb_frontend_ops.dishnetwork_send_legacy_command\(\) and
791 * calling this function directly.
792 */
793void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec);
794
795#endif
diff --git a/include/media/dvb_math.h b/include/media/dvb_math.h
new file mode 100644
index 000000000000..8690ec42954d
--- /dev/null
+++ b/include/media/dvb_math.h
@@ -0,0 +1,66 @@
1/*
2 * dvb-math provides some complex fixed-point math
3 * operations shared between the dvb related stuff
4 *
5 * Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as
9 * published by the Free Software Foundation; either version 2.1 of
10 * the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU Lesser General Public License for more details.
16 */
17
18#ifndef __DVB_MATH_H
19#define __DVB_MATH_H
20
21#include <linux/types.h>
22
23/**
24 * intlog2 - computes log2 of a value; the result is shifted left by 24 bits
25 *
26 * @value: The value (must be != 0)
27 *
28 * to use rational values you can use the following method:
29 *
30 * intlog2(value) = intlog2(value * 2^x) - x * 2^24
31 *
32 * Some usecase examples:
33 *
34 * intlog2(8) will give 3 << 24 = 3 * 2^24
35 *
36 * intlog2(9) will give 3 << 24 + ... = 3.16... * 2^24
37 *
38 * intlog2(1.5) = intlog2(3) - 2^24 = 0.584... * 2^24
39 *
40 *
41 * return: log2(value) * 2^24
42 */
43extern unsigned int intlog2(u32 value);
44
45/**
46 * intlog10 - computes log10 of a value; the result is shifted left by 24 bits
47 *
48 * @value: The value (must be != 0)
49 *
50 * to use rational values you can use the following method:
51 *
52 * intlog10(value) = intlog10(value * 10^x) - x * 2^24
53 *
54 * An usecase example:
55 *
56 * intlog10(1000) will give 3 << 24 = 3 * 2^24
57 *
58 * due to the implementation intlog10(1000) might be not exactly 3 * 2^24
59 *
60 * look at intlog2 for similar examples
61 *
62 * return: log10(value) * 2^24
63 */
64extern unsigned int intlog10(u32 value);
65
66#endif
diff --git a/include/media/dvb_net.h b/include/media/dvb_net.h
new file mode 100644
index 000000000000..5e31d37f25fa
--- /dev/null
+++ b/include/media/dvb_net.h
@@ -0,0 +1,93 @@
1/*
2 * dvb_net.h
3 *
4 * Copyright (C) 2001 Ralph Metzler for convergence integrated media GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public License
8 * as published by the Free Software Foundation; either version 2.1
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef _DVB_NET_H_
19#define _DVB_NET_H_
20
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/inetdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/skbuff.h>
26
27#include <media/dvbdev.h>
28
29#define DVB_NET_DEVICES_MAX 10
30
31#ifdef CONFIG_DVB_NET
32
33/**
34 * struct dvb_net - describes a DVB network interface
35 *
36 * @dvbdev: pointer to &struct dvb_device.
37 * @device: array of pointers to &struct net_device.
38 * @state: array of integers to each net device. A value
39 * different than zero means that the interface is
40 * in usage.
41 * @exit: flag to indicate when the device is being removed.
42 * @demux: pointer to &struct dmx_demux.
43 * @ioctl_mutex: protect access to this struct.
44 *
45 * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network
46 * devices.
47 */
48
49struct dvb_net {
50 struct dvb_device *dvbdev;
51 struct net_device *device[DVB_NET_DEVICES_MAX];
52 int state[DVB_NET_DEVICES_MAX];
53 unsigned int exit:1;
54 struct dmx_demux *demux;
55 struct mutex ioctl_mutex;
56};
57
58/**
59 * dvb_net_init - nitializes a digital TV network device and registers it.
60 *
61 * @adap: pointer to &struct dvb_adapter.
62 * @dvbnet: pointer to &struct dvb_net.
63 * @dmxdemux: pointer to &struct dmx_demux.
64 */
65int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet,
66 struct dmx_demux *dmxdemux);
67
68/**
69 * dvb_net_release - releases a digital TV network device and unregisters it.
70 *
71 * @dvbnet: pointer to &struct dvb_net.
72 */
73void dvb_net_release(struct dvb_net *dvbnet);
74
75#else
76
77struct dvb_net {
78 struct dvb_device *dvbdev;
79};
80
81static inline void dvb_net_release(struct dvb_net *dvbnet)
82{
83}
84
85static inline int dvb_net_init(struct dvb_adapter *adap,
86 struct dvb_net *dvbnet, struct dmx_demux *dmx)
87{
88 return 0;
89}
90
91#endif /* ifdef CONFIG_DVB_NET */
92
93#endif
diff --git a/include/media/dvb_ringbuffer.h b/include/media/dvb_ringbuffer.h
new file mode 100644
index 000000000000..8ed6bcc3a56e
--- /dev/null
+++ b/include/media/dvb_ringbuffer.h
@@ -0,0 +1,280 @@
1/*
2 *
3 * dvb_ringbuffer.h: ring buffer implementation for the dvb driver
4 *
5 * Copyright (C) 2003 Oliver Endriss
6 * Copyright (C) 2004 Andrew de Quincey
7 *
8 * based on code originally found in av7110.c & dvb_ci.c:
9 * Copyright (C) 1999-2003 Ralph Metzler & Marcus Metzler
10 * for convergence integrated media GmbH
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU Lesser General Public License for more details.
21 */
22
23#ifndef _DVB_RINGBUFFER_H_
24#define _DVB_RINGBUFFER_H_
25
26#include <linux/spinlock.h>
27#include <linux/wait.h>
28
29/**
30 * struct dvb_ringbuffer - Describes a ring buffer used at DVB framework
31 *
32 * @data: Area were the ringbuffer data is written
33 * @size: size of the ringbuffer
34 * @pread: next position to read
35 * @pwrite: next position to write
36 * @error: used by ringbuffer clients to indicate that an error happened.
37 * @queue: Wait queue used by ringbuffer clients to indicate when buffer
38 * was filled
39 * @lock: Spinlock used to protect the ringbuffer
40 */
41struct dvb_ringbuffer {
42 u8 *data;
43 ssize_t size;
44 ssize_t pread;
45 ssize_t pwrite;
46 int error;
47
48 wait_queue_head_t queue;
49 spinlock_t lock;
50};
51
52#define DVB_RINGBUFFER_PKTHDRSIZE 3
53
54/**
55 * dvb_ringbuffer_init - initialize ring buffer, lock and queue
56 *
57 * @rbuf: pointer to struct dvb_ringbuffer
58 * @data: pointer to the buffer where the data will be stored
59 * @len: bytes from ring buffer into @buf
60 */
61extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data,
62 size_t len);
63
64/**
65 * dvb_ringbuffer_empty - test whether buffer is empty
66 *
67 * @rbuf: pointer to struct dvb_ringbuffer
68 */
69extern int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf);
70
71/**
72 * dvb_ringbuffer_free - returns the number of free bytes in the buffer
73 *
74 * @rbuf: pointer to struct dvb_ringbuffer
75 *
76 * Return: number of free bytes in the buffer
77 */
78extern ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf);
79
80/**
81 * dvb_ringbuffer_avail - returns the number of bytes waiting in the buffer
82 *
83 * @rbuf: pointer to struct dvb_ringbuffer
84 *
85 * Return: number of bytes waiting in the buffer
86 */
87extern ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf);
88
89/**
90 * dvb_ringbuffer_reset - resets the ringbuffer to initial state
91 *
92 * @rbuf: pointer to struct dvb_ringbuffer
93 *
94 * Resets the read and write pointers to zero and flush the buffer.
95 *
96 * This counts as a read and write operation
97 */
98extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf);
99
100/*
101 * read routines & macros
102 */
103
104/**
105 * dvb_ringbuffer_flush - flush buffer
106 *
107 * @rbuf: pointer to struct dvb_ringbuffer
108 */
109extern void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf);
110
111/**
112 * dvb_ringbuffer_flush_spinlock_wakeup- flush buffer protected by spinlock
113 * and wake-up waiting task(s)
114 *
115 * @rbuf: pointer to struct dvb_ringbuffer
116 */
117extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf);
118
119/**
120 * DVB_RINGBUFFER_PEEK - peek at byte @offs in the buffer
121 *
122 * @rbuf: pointer to struct dvb_ringbuffer
123 * @offs: offset inside the ringbuffer
124 */
125#define DVB_RINGBUFFER_PEEK(rbuf, offs) \
126 ((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size])
127
128/**
129 * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes
130 *
131 * @rbuf: pointer to struct dvb_ringbuffer
132 * @num: number of bytes to advance
133 */
134#define DVB_RINGBUFFER_SKIP(rbuf, num) {\
135 (rbuf)->pread = ((rbuf)->pread + (num)) % (rbuf)->size;\
136}
137
138/**
139 * dvb_ringbuffer_read_user - Reads a buffer into a user pointer
140 *
141 * @rbuf: pointer to struct dvb_ringbuffer
142 * @buf: pointer to the buffer where the data will be stored
143 * @len: bytes from ring buffer into @buf
144 *
145 * This variant assumes that the buffer is a memory at the userspace. So,
146 * it will internally call copy_to_user().
147 *
148 * Return: number of bytes transferred or -EFAULT
149 */
150extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf,
151 u8 __user *buf, size_t len);
152
153/**
154 * dvb_ringbuffer_read - Reads a buffer into a pointer
155 *
156 * @rbuf: pointer to struct dvb_ringbuffer
157 * @buf: pointer to the buffer where the data will be stored
158 * @len: bytes from ring buffer into @buf
159 *
160 * This variant assumes that the buffer is a memory at the Kernel space
161 *
162 * Return: number of bytes transferred or -EFAULT
163 */
164extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf,
165 u8 *buf, size_t len);
166
167/*
168 * write routines & macros
169 */
170
171/**
172 * DVB_RINGBUFFER_WRITE_BYTE - write single byte to ring buffer
173 *
174 * @rbuf: pointer to struct dvb_ringbuffer
175 * @byte: byte to write
176 */
177#define DVB_RINGBUFFER_WRITE_BYTE(rbuf, byte) \
178 { (rbuf)->data[(rbuf)->pwrite] = (byte); \
179 (rbuf)->pwrite = ((rbuf)->pwrite + 1) % (rbuf)->size; }
180
181/**
182 * dvb_ringbuffer_write - Writes a buffer into the ringbuffer
183 *
184 * @rbuf: pointer to struct dvb_ringbuffer
185 * @buf: pointer to the buffer where the data will be read
186 * @len: bytes from ring buffer into @buf
187 *
188 * This variant assumes that the buffer is a memory at the Kernel space
189 *
190 * return: number of bytes transferred or -EFAULT
191 */
192extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf,
193 size_t len);
194
195/**
196 * dvb_ringbuffer_write_user - Writes a buffer received via a user pointer
197 *
198 * @rbuf: pointer to struct dvb_ringbuffer
199 * @buf: pointer to the buffer where the data will be read
200 * @len: bytes from ring buffer into @buf
201 *
202 * This variant assumes that the buffer is a memory at the userspace. So,
203 * it will internally call copy_from_user().
204 *
205 * Return: number of bytes transferred or -EFAULT
206 */
207extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
208 const u8 __user *buf, size_t len);
209
210/**
211 * dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer.
212 *
213 * @rbuf: Ringbuffer to write to.
214 * @buf: Buffer to write.
215 * @len: Length of buffer (currently limited to 65535 bytes max).
216 *
217 * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL.
218 */
219extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf,
220 size_t len);
221
222/**
223 * dvb_ringbuffer_pkt_read_user - Read from a packet in the ringbuffer.
224 *
225 * @rbuf: Ringbuffer concerned.
226 * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
227 * @offset: Offset into packet to read from.
228 * @buf: Destination buffer for data.
229 * @len: Size of destination buffer.
230 *
231 * Return: Number of bytes read, or -EFAULT.
232 *
233 * .. note::
234 *
235 * unlike dvb_ringbuffer_read(), this does **NOT** update the read pointer
236 * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a
237 * packet as no longer required.
238 */
239extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf,
240 size_t idx,
241 int offset, u8 __user *buf,
242 size_t len);
243
244/**
245 * dvb_ringbuffer_pkt_read - Read from a packet in the ringbuffer.
246 * Note: unlike dvb_ringbuffer_read_user(), this DOES update the read pointer
247 * in the ringbuffer.
248 *
249 * @rbuf: Ringbuffer concerned.
250 * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
251 * @offset: Offset into packet to read from.
252 * @buf: Destination buffer for data.
253 * @len: Size of destination buffer.
254 *
255 * Return: Number of bytes read, or -EFAULT.
256 */
257extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
258 int offset, u8 *buf, size_t len);
259
260/**
261 * dvb_ringbuffer_pkt_dispose - Dispose of a packet in the ring buffer.
262 *
263 * @rbuf: Ring buffer concerned.
264 * @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
265 */
266extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx);
267
268/**
269 * dvb_ringbuffer_pkt_next - Get the index of the next packet in a ringbuffer.
270 *
271 * @rbuf: Ringbuffer concerned.
272 * @idx: Previous packet index, or -1 to return the first packet index.
273 * @pktlen: On success, will be updated to contain the length of the packet
274 * in bytes.
275 * returns Packet index (if >=0), or -1 if no packets available.
276 */
277extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf,
278 size_t idx, size_t *pktlen);
279
280#endif /* _DVB_RINGBUFFER_H_ */
diff --git a/include/media/dvb_vb2.h b/include/media/dvb_vb2.h
new file mode 100644
index 000000000000..01d1202d1a55
--- /dev/null
+++ b/include/media/dvb_vb2.h
@@ -0,0 +1,266 @@
1/*
2 * SPDX-License-Identifier: GPL-2.0
3 *
4 * dvb-vb2.h - DVB driver helper framework for streaming I/O
5 *
6 * Copyright (C) 2015 Samsung Electronics
7 *
8 * Author: jh1009.sung@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation.
13 */
14
15#ifndef _DVB_VB2_H
16#define _DVB_VB2_H
17
18#include <linux/mutex.h>
19#include <linux/poll.h>
20#include <linux/dvb/dmx.h>
21#include <media/videobuf2-core.h>
22#include <media/videobuf2-dma-contig.h>
23#include <media/videobuf2-vmalloc.h>
24
25/**
26 * enum dvb_buf_type - types of Digital TV memory-mapped buffers
27 *
28 * @DVB_BUF_TYPE_CAPTURE: buffer is filled by the Kernel,
29 * with a received Digital TV stream
30 */
31enum dvb_buf_type {
32 DVB_BUF_TYPE_CAPTURE = 1,
33};
34
35/**
36 * enum dvb_vb2_states - states to control VB2 state machine
37 * @DVB_VB2_STATE_NONE:
38 * VB2 engine not initialized yet, init failed or VB2 was released.
39 * @DVB_VB2_STATE_INIT:
40 * VB2 engine initialized.
41 * @DVB_VB2_STATE_REQBUFS:
42 * Buffers were requested
43 * @DVB_VB2_STATE_STREAMON:
44 * VB2 is streaming. Callers should not check it directly. Instead,
45 * they should use dvb_vb2_is_streaming().
46 *
47 * Note:
48 *
49 * Callers should not touch at the state machine directly. This
50 * is handled inside dvb_vb2.c.
51 */
52enum dvb_vb2_states {
53 DVB_VB2_STATE_NONE = 0x0,
54 DVB_VB2_STATE_INIT = 0x1,
55 DVB_VB2_STATE_REQBUFS = 0x2,
56 DVB_VB2_STATE_STREAMON = 0x4,
57};
58
59#define DVB_VB2_NAME_MAX (20)
60
61/**
62 * struct dvb_buffer - video buffer information for v4l2.
63 *
64 * @vb: embedded struct &vb2_buffer.
65 * @list: list of &struct dvb_buffer.
66 */
67struct dvb_buffer {
68 struct vb2_buffer vb;
69 struct list_head list;
70};
71
72/**
73 * struct dvb_vb2_ctx - control struct for VB2 handler
74 * @vb_q: pointer to &struct vb2_queue with videobuf2 queue.
75 * @mutex: mutex to serialize vb2 operations. Used by
76 * vb2 core %wait_prepare and %wait_finish operations.
77 * @slock: spin lock used to protect buffer filling at dvb_vb2.c.
78 * @dvb_q: List of buffers that are not filled yet.
79 * @buf: Pointer to the buffer that are currently being filled.
80 * @offset: index to the next position at the @buf to be filled.
81 * @remain: How many bytes are left to be filled at @buf.
82 * @state: bitmask of buffer states as defined by &enum dvb_vb2_states.
83 * @buf_siz: size of each VB2 buffer.
84 * @buf_cnt: number of VB2 buffers.
85 * @nonblocking:
86 * If different than zero, device is operating on non-blocking
87 * mode.
88 * @name: name of the device type. Currently, it can either be
89 * "dvr" or "demux_filter".
90 */
91struct dvb_vb2_ctx {
92 struct vb2_queue vb_q;
93 struct mutex mutex;
94 spinlock_t slock;
95 struct list_head dvb_q;
96 struct dvb_buffer *buf;
97 int offset;
98 int remain;
99 int state;
100 int buf_siz;
101 int buf_cnt;
102 int nonblocking;
103 char name[DVB_VB2_NAME_MAX + 1];
104};
105
106#ifndef DVB_MMAP
107static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx,
108 const char *name, int non_blocking)
109{
110 return 0;
111};
112static inline int dvb_vb2_release(struct dvb_vb2_ctx *ctx)
113{
114 return 0;
115};
116#define dvb_vb2_is_streaming(ctx) (0)
117#define dvb_vb2_fill_buffer(ctx, file, wait) (0)
118
119static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx,
120 struct file *file,
121 poll_table *wait)
122{
123 return 0;
124}
125#else
126/**
127 * dvb_vb2_init - initializes VB2 handler
128 *
129 * @ctx: control struct for VB2 handler
130 * @name: name for the VB2 handler
131 * @non_blocking:
132 * if not zero, it means that the device is at non-blocking mode
133 */
134int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name, int non_blocking);
135
136/**
137 * dvb_vb2_release - Releases the VB2 handler allocated resources and
138 * put @ctx at DVB_VB2_STATE_NONE state.
139 * @ctx: control struct for VB2 handler
140 */
141int dvb_vb2_release(struct dvb_vb2_ctx *ctx);
142
143/**
144 * dvb_vb2_is_streaming - checks if the VB2 handler is streaming
145 * @ctx: control struct for VB2 handler
146 *
147 * Return: 0 if not streaming, 1 otherwise.
148 */
149int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx);
150
151/**
152 * dvb_vb2_fill_buffer - fills a VB2 buffer
153 * @ctx: control struct for VB2 handler
154 * @src: place where the data is stored
155 * @len: number of bytes to be copied from @src
156 */
157int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
158 const unsigned char *src, int len);
159
160/**
161 * dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV
162 * buffer handling.
163 *
164 * @ctx: control struct for VB2 handler
165 * @file: &struct file argument passed to the poll
166 * file operation handler.
167 * @wait: &poll_table wait argument passed to the poll
168 * file operation handler.
169 *
170 * Implements poll syscall() logic.
171 */
172__poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, struct file *file,
173 poll_table *wait);
174#endif
175
176/**
177 * dvb_vb2_stream_on() - Wrapper to vb2_core_streamon() for Digital TV
178 * buffer handling.
179 *
180 * @ctx: control struct for VB2 handler
181 *
182 * Starts dvb streaming
183 */
184int dvb_vb2_stream_on(struct dvb_vb2_ctx *ctx);
185/**
186 * dvb_vb2_stream_off() - Wrapper to vb2_core_streamoff() for Digital TV
187 * buffer handling.
188 *
189 * @ctx: control struct for VB2 handler
190 *
191 * Stops dvb streaming
192 */
193int dvb_vb2_stream_off(struct dvb_vb2_ctx *ctx);
194
195/**
196 * dvb_vb2_reqbufs() - Wrapper to vb2_core_reqbufs() for Digital TV
197 * buffer handling.
198 *
199 * @ctx: control struct for VB2 handler
200 * @req: &struct dmx_requestbuffers passed from userspace in
201 * order to handle &DMX_REQBUFS.
202 *
203 * Initiate streaming by requesting a number of buffers. Also used to
204 * free previously requested buffers, is ``req->count`` is zero.
205 */
206int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req);
207
208/**
209 * dvb_vb2_querybuf() - Wrapper to vb2_core_querybuf() for Digital TV
210 * buffer handling.
211 *
212 * @ctx: control struct for VB2 handler
213 * @b: &struct dmx_buffer passed from userspace in
214 * order to handle &DMX_QUERYBUF.
215 *
216 *
217 */
218int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b);
219
220/**
221 * dvb_vb2_expbuf() - Wrapper to vb2_core_expbuf() for Digital TV
222 * buffer handling.
223 *
224 * @ctx: control struct for VB2 handler
225 * @exp: &struct dmx_exportbuffer passed from userspace in
226 * order to handle &DMX_EXPBUF.
227 *
228 * Export a buffer as a file descriptor.
229 */
230int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp);
231
232/**
233 * dvb_vb2_qbuf() - Wrapper to vb2_core_qbuf() for Digital TV buffer handling.
234 *
235 * @ctx: control struct for VB2 handler
236 * @b: &struct dmx_buffer passed from userspace in
237 * order to handle &DMX_QBUF.
238 *
239 * Queue a Digital TV buffer as requested by userspace
240 */
241int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b);
242
243/**
244 * dvb_vb2_dqbuf() - Wrapper to vb2_core_dqbuf() for Digital TV
245 * buffer handling.
246 *
247 * @ctx: control struct for VB2 handler
248 * @b: &struct dmx_buffer passed from userspace in
249 * order to handle &DMX_DQBUF.
250 *
251 * Dequeue a Digital TV buffer to the userspace
252 */
253int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b);
254
255/**
256 * dvb_vb2_mmap() - Wrapper to vb2_mmap() for Digital TV buffer handling.
257 *
258 * @ctx: control struct for VB2 handler
259 * @vma: pointer to &struct vm_area_struct with the vma passed
260 * to the mmap file operation handler in the driver.
261 *
262 * map Digital TV video buffers into application address space.
263 */
264int dvb_vb2_mmap(struct dvb_vb2_ctx *ctx, struct vm_area_struct *vma);
265
266#endif /* _DVB_VB2_H */
diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
new file mode 100644
index 000000000000..554db879527f
--- /dev/null
+++ b/include/media/dvbdev.h
@@ -0,0 +1,407 @@
1/*
2 * dvbdev.h
3 *
4 * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
5 * for convergence integrated media GmbH
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Lesser Public License
9 * as published by the Free Software Foundation; either version 2.1
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#ifndef _DVBDEV_H_
20#define _DVBDEV_H_
21
22#include <linux/types.h>
23#include <linux/poll.h>
24#include <linux/fs.h>
25#include <linux/list.h>
26#include <media/media-device.h>
27
28#define DVB_MAJOR 212
29
30#if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0
31 #define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS
32#else
33 #define DVB_MAX_ADAPTERS 16
34#endif
35
36#define DVB_UNSET (-1)
37
38/* List of DVB device types */
39
40/**
41 * enum dvb_device_type - type of the Digital TV device
42 *
43 * @DVB_DEVICE_SEC: Digital TV standalone Common Interface (CI)
44 * @DVB_DEVICE_FRONTEND: Digital TV frontend.
45 * @DVB_DEVICE_DEMUX: Digital TV demux.
46 * @DVB_DEVICE_DVR: Digital TV digital video record (DVR).
47 * @DVB_DEVICE_CA: Digital TV Conditional Access (CA).
48 * @DVB_DEVICE_NET: Digital TV network.
49 *
50 * @DVB_DEVICE_VIDEO: Digital TV video decoder.
51 * Deprecated. Used only on av7110-av.
52 * @DVB_DEVICE_AUDIO: Digital TV audio decoder.
53 * Deprecated. Used only on av7110-av.
54 * @DVB_DEVICE_OSD: Digital TV On Screen Display (OSD).
55 * Deprecated. Used only on av7110.
56 */
57enum dvb_device_type {
58 DVB_DEVICE_SEC,
59 DVB_DEVICE_FRONTEND,
60 DVB_DEVICE_DEMUX,
61 DVB_DEVICE_DVR,
62 DVB_DEVICE_CA,
63 DVB_DEVICE_NET,
64
65 DVB_DEVICE_VIDEO,
66 DVB_DEVICE_AUDIO,
67 DVB_DEVICE_OSD,
68};
69
70#define DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr) \
71 static short adapter_nr[] = \
72 {[0 ... (DVB_MAX_ADAPTERS - 1)] = DVB_UNSET }; \
73 module_param_array(adapter_nr, short, NULL, 0444); \
74 MODULE_PARM_DESC(adapter_nr, "DVB adapter numbers")
75
76struct dvb_frontend;
77
78/**
79 * struct dvb_adapter - represents a Digital TV adapter using Linux DVB API
80 *
81 * @num: Number of the adapter
82 * @list_head: List with the DVB adapters
83 * @device_list: List with the DVB devices
84 * @name: Name of the adapter
85 * @proposed_mac: proposed MAC address for the adapter
86 * @priv: private data
87 * @device: pointer to struct device
88 * @module: pointer to struct module
89 * @mfe_shared: mfe shared: indicates mutually exclusive frontends
90 * Thie usage of this flag is currently deprecated
91 * @mfe_dvbdev: Frontend device in use, in the case of MFE
92 * @mfe_lock: Lock to prevent using the other frontends when MFE is
93 * used.
94 * @mdev: pointer to struct media_device, used when the media
95 * controller is used.
96 * @conn: RF connector. Used only if the device has no separate
97 * tuner.
98 * @conn_pads: pointer to struct media_pad associated with @conn;
99 */
100struct dvb_adapter {
101 int num;
102 struct list_head list_head;
103 struct list_head device_list;
104 const char *name;
105 u8 proposed_mac [6];
106 void* priv;
107
108 struct device *device;
109
110 struct module *module;
111
112 int mfe_shared; /* indicates mutually exclusive frontends */
113 struct dvb_device *mfe_dvbdev; /* frontend device in use */
114 struct mutex mfe_lock; /* access lock for thread creation */
115
116#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
117 struct media_device *mdev;
118 struct media_entity *conn;
119 struct media_pad *conn_pads;
120#endif
121};
122
123/**
124 * struct dvb_device - represents a DVB device node
125 *
126 * @list_head: List head with all DVB devices
127 * @fops: pointer to struct file_operations
128 * @adapter: pointer to the adapter that holds this device node
129 * @type: type of the device, as defined by &enum dvb_device_type.
130 * @minor: devnode minor number. Major number is always DVB_MAJOR.
131 * @id: device ID number, inside the adapter
132 * @readers: Initialized by the caller. Each call to open() in Read Only mode
133 * decreases this counter by one.
134 * @writers: Initialized by the caller. Each call to open() in Read/Write
135 * mode decreases this counter by one.
136 * @users: Initialized by the caller. Each call to open() in any mode
137 * decreases this counter by one.
138 * @wait_queue: wait queue, used to wait for certain events inside one of
139 * the DVB API callers
140 * @kernel_ioctl: callback function used to handle ioctl calls from userspace.
141 * @name: Name to be used for the device at the Media Controller
142 * @entity: pointer to struct media_entity associated with the device node
143 * @pads: pointer to struct media_pad associated with @entity;
144 * @priv: private data
145 * @intf_devnode: Pointer to media_intf_devnode. Used by the dvbdev core to
146 * store the MC device node interface
147 * @tsout_num_entities: Number of Transport Stream output entities
148 * @tsout_entity: array with MC entities associated to each TS output node
149 * @tsout_pads: array with the source pads for each @tsout_entity
150 *
151 * This structure is used by the DVB core (frontend, CA, net, demux) in
152 * order to create the device nodes. Usually, driver should not initialize
153 * this struct diretly.
154 */
155struct dvb_device {
156 struct list_head list_head;
157 const struct file_operations *fops;
158 struct dvb_adapter *adapter;
159 enum dvb_device_type type;
160 int minor;
161 u32 id;
162
163 /* in theory, 'users' can vanish now,
164 but I don't want to change too much now... */
165 int readers;
166 int writers;
167 int users;
168
169 wait_queue_head_t wait_queue;
170 /* don't really need those !? -- FIXME: use video_usercopy */
171 int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg);
172
173 /* Needed for media controller register/unregister */
174#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
175 const char *name;
176
177 /* Allocated and filled inside dvbdev.c */
178 struct media_intf_devnode *intf_devnode;
179
180 unsigned tsout_num_entities;
181 struct media_entity *entity, *tsout_entity;
182 struct media_pad *pads, *tsout_pads;
183#endif
184
185 void *priv;
186};
187
188/**
189 * dvb_register_adapter - Registers a new DVB adapter
190 *
191 * @adap: pointer to struct dvb_adapter
192 * @name: Adapter's name
193 * @module: initialized with THIS_MODULE at the caller
194 * @device: pointer to struct device that corresponds to the device driver
195 * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter;
196 * to select among them. Typically, initialized with:
197 * DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums)
198 */
199int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
200 struct module *module, struct device *device,
201 short *adapter_nums);
202
203/**
204 * dvb_unregister_adapter - Unregisters a DVB adapter
205 *
206 * @adap: pointer to struct dvb_adapter
207 */
208int dvb_unregister_adapter(struct dvb_adapter *adap);
209
210/**
211 * dvb_register_device - Registers a new DVB device
212 *
213 * @adap: pointer to struct dvb_adapter
214 * @pdvbdev: pointer to the place where the new struct dvb_device will be
215 * stored
216 * @template: Template used to create &pdvbdev;
217 * @priv: private data
218 * @type: type of the device, as defined by &enum dvb_device_type.
219 * @demux_sink_pads: Number of demux outputs, to be used to create the TS
220 * outputs via the Media Controller.
221 */
222int dvb_register_device(struct dvb_adapter *adap,
223 struct dvb_device **pdvbdev,
224 const struct dvb_device *template,
225 void *priv,
226 enum dvb_device_type type,
227 int demux_sink_pads);
228
229/**
230 * dvb_remove_device - Remove a registered DVB device
231 *
232 * This does not free memory. To do that, call dvb_free_device().
233 *
234 * @dvbdev: pointer to struct dvb_device
235 */
236void dvb_remove_device(struct dvb_device *dvbdev);
237
238/**
239 * dvb_free_device - Free memory occupied by a DVB device.
240 *
241 * Call dvb_unregister_device() before calling this function.
242 *
243 * @dvbdev: pointer to struct dvb_device
244 */
245void dvb_free_device(struct dvb_device *dvbdev);
246
247/**
248 * dvb_unregister_device - Unregisters a DVB device
249 *
250 * This is a combination of dvb_remove_device() and dvb_free_device().
251 * Using this function is usually a mistake, and is often an indicator
252 * for a use-after-free bug (when a userspace process keeps a file
253 * handle to a detached device).
254 *
255 * @dvbdev: pointer to struct dvb_device
256 */
257void dvb_unregister_device(struct dvb_device *dvbdev);
258
259#ifdef CONFIG_MEDIA_CONTROLLER_DVB
260/**
261 * dvb_create_media_graph - Creates media graph for the Digital TV part of the
262 * device.
263 *
264 * @adap: pointer to &struct dvb_adapter
265 * @create_rf_connector: if true, it creates the RF connector too
266 *
267 * This function checks all DVB-related functions at the media controller
268 * entities and creates the needed links for the media graph. It is
269 * capable of working with multiple tuners or multiple frontends, but it
270 * won't create links if the device has multiple tuners and multiple frontends
271 * or if the device has multiple muxes. In such case, the caller driver should
272 * manually create the remaining links.
273 */
274__must_check int dvb_create_media_graph(struct dvb_adapter *adap,
275 bool create_rf_connector);
276
277/**
278 * dvb_register_media_controller - registers a media controller at DVB adapter
279 *
280 * @adap: pointer to &struct dvb_adapter
281 * @mdev: pointer to &struct media_device
282 */
283static inline void dvb_register_media_controller(struct dvb_adapter *adap,
284 struct media_device *mdev)
285{
286 adap->mdev = mdev;
287}
288
289/**
290 * dvb_get_media_controller - gets the associated media controller
291 *
292 * @adap: pointer to &struct dvb_adapter
293 */
294static inline struct media_device
295*dvb_get_media_controller(struct dvb_adapter *adap)
296{
297 return adap->mdev;
298}
299#else
300static inline
301int dvb_create_media_graph(struct dvb_adapter *adap,
302 bool create_rf_connector)
303{
304 return 0;
305};
306#define dvb_register_media_controller(a, b) {}
307#define dvb_get_media_controller(a) NULL
308#endif
309
310/**
311 * dvb_generic_open - Digital TV open function, used by DVB devices
312 *
313 * @inode: pointer to &struct inode.
314 * @file: pointer to &struct file.
315 *
316 * Checks if a DVB devnode is still valid, and if the permissions are
317 * OK and increment negative use count.
318 */
319int dvb_generic_open(struct inode *inode, struct file *file);
320
321/**
322 * dvb_generic_close - Digital TV close function, used by DVB devices
323 *
324 * @inode: pointer to &struct inode.
325 * @file: pointer to &struct file.
326 *
327 * Checks if a DVB devnode is still valid, and if the permissions are
328 * OK and decrement negative use count.
329 */
330int dvb_generic_release(struct inode *inode, struct file *file);
331
332/**
333 * dvb_generic_ioctl - Digital TV close function, used by DVB devices
334 *
335 * @file: pointer to &struct file.
336 * @cmd: Ioctl name.
337 * @arg: Ioctl argument.
338 *
339 * Checks if a DVB devnode and struct dvbdev.kernel_ioctl is still valid.
340 * If so, calls dvb_usercopy().
341 */
342long dvb_generic_ioctl(struct file *file,
343 unsigned int cmd, unsigned long arg);
344
345/**
346 * dvb_usercopy - copies data from/to userspace memory when an ioctl is
347 * issued.
348 *
349 * @file: Pointer to struct &file.
350 * @cmd: Ioctl name.
351 * @arg: Ioctl argument.
352 * @func: function that will actually handle the ioctl
353 *
354 * Ancillary function that uses ioctl direction and size to copy from
355 * userspace. Then, it calls @func, and, if needed, data is copied back
356 * to userspace.
357 */
358int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
359 int (*func)(struct file *file, unsigned int cmd, void *arg));
360
361/** generic DVB attach function. */
362#ifdef CONFIG_MEDIA_ATTACH
363
364/**
365 * dvb_attach - attaches a DVB frontend into the DVB core.
366 *
367 * @FUNCTION: function on a frontend module to be called.
368 * @ARGS...: @FUNCTION arguments.
369 *
370 * This ancillary function loads a frontend module in runtime and runs
371 * the @FUNCTION function there, with @ARGS.
372 * As it increments symbol usage cont, at unregister, dvb_detach()
373 * should be called.
374 */
375#define dvb_attach(FUNCTION, ARGS...) ({ \
376 void *__r = NULL; \
377 typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
378 if (__a) { \
379 __r = (void *) __a(ARGS); \
380 if (__r == NULL) \
381 symbol_put(FUNCTION); \
382 } else { \
383 printk(KERN_ERR "DVB: Unable to find symbol "#FUNCTION"()\n"); \
384 } \
385 __r; \
386})
387
388/**
389 * dvb_detach - detaches a DVB frontend loaded via dvb_attach()
390 *
391 * @FUNC: attach function
392 *
393 * Decrements usage count for a function previously called via dvb_attach().
394 */
395
396#define dvb_detach(FUNC) symbol_put_addr(FUNC)
397
398#else
399#define dvb_attach(FUNCTION, ARGS...) ({ \
400 FUNCTION(ARGS); \
401})
402
403#define dvb_detach(FUNC) {}
404
405#endif
406
407#endif /* #ifndef _DVBDEV_H_ */
diff --git a/include/media/i2c-addr.h b/include/media/i2c-addr.h
deleted file mode 100644
index 1b6872f5e970..000000000000
--- a/include/media/i2c-addr.h
+++ /dev/null
@@ -1,43 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * V4L I2C address list
4 *
5 *
6 * Copyright (C) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
7 * Based on a previous mapping by
8 * Ralph Metzler (rjkm@thp.uni-koeln.de)
9 * Gerd Knorr <kraxel@goldbach.in-berlin.de>
10 *
11 */
12
13/* bttv address list */
14#define I2C_ADDR_TSA5522 0xc2
15#define I2C_ADDR_TDA7432 0x8a
16#define I2C_ADDR_TDA8425 0x82
17#define I2C_ADDR_TDA9840 0x84
18#define I2C_ADDR_TDA9850 0xb6 /* also used by 9855,9873 */
19#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */
20#define I2C_ADDR_TDA9875 0xb0
21#define I2C_ADDR_HAUPEE 0xa0
22#define I2C_ADDR_STBEE 0xae
23#define I2C_ADDR_VHX 0xc0
24#define I2C_ADDR_MSP3400 0x80
25#define I2C_ADDR_MSP3400_ALT 0x88
26#define I2C_ADDR_TEA6300 0x80 /* also used by 6320 */
27#define I2C_ADDR_DPL3518 0x84
28#define I2C_ADDR_TDA9887 0x86
29
30/*
31 * i2c bus addresses for the chips supported by tvaudio.c
32 */
33
34#define I2C_ADDR_TDA8425 0x82
35#define I2C_ADDR_TDA9840 0x84 /* also used by TA8874Z */
36#define I2C_ADDR_TDA985x_L 0xb4 /* also used by 9873 */
37#define I2C_ADDR_TDA985x_H 0xb6
38#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */
39
40#define I2C_ADDR_TEA6300 0x80 /* also used by 6320 */
41#define I2C_ADDR_TEA6420 0x98
42
43#define I2C_ADDR_PIC16C54 0x96 /* PV951 */
diff --git a/include/media/i2c/as3645a.h b/include/media/i2c/as3645a.h
deleted file mode 100644
index fffd4b563f5a..000000000000
--- a/include/media/i2c/as3645a.h
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * include/media/i2c/as3645a.h
3 *
4 * Copyright (C) 2008-2011 Nokia Corporation
5 *
6 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 */
18
19#ifndef __AS3645A_H__
20#define __AS3645A_H__
21
22#include <media/v4l2-subdev.h>
23
24#define AS3645A_NAME "as3645a"
25#define AS3645A_I2C_ADDR (0x60 >> 1) /* W:0x60, R:0x61 */
26
27#define AS3645A_FLASH_TIMEOUT_MIN 100000 /* us */
28#define AS3645A_FLASH_TIMEOUT_MAX 850000
29#define AS3645A_FLASH_TIMEOUT_STEP 50000
30
31#define AS3645A_FLASH_INTENSITY_MIN 200 /* mA */
32#define AS3645A_FLASH_INTENSITY_MAX_1LED 500
33#define AS3645A_FLASH_INTENSITY_MAX_2LEDS 400
34#define AS3645A_FLASH_INTENSITY_STEP 20
35
36#define AS3645A_TORCH_INTENSITY_MIN 20 /* mA */
37#define AS3645A_TORCH_INTENSITY_MAX 160
38#define AS3645A_TORCH_INTENSITY_STEP 20
39
40#define AS3645A_INDICATOR_INTENSITY_MIN 0 /* uA */
41#define AS3645A_INDICATOR_INTENSITY_MAX 10000
42#define AS3645A_INDICATOR_INTENSITY_STEP 2500
43
44/*
45 * as3645a_platform_data - Flash controller platform data
46 * @set_power: Set power callback
47 * @vref: VREF offset (0=0V, 1=+0.3V, 2=-0.3V, 3=+0.6V)
48 * @peak: Inductor peak current limit (0=1.25A, 1=1.5A, 2=1.75A, 3=2.0A)
49 * @ext_strobe: True if external flash strobe can be used
50 * @flash_max_current: Max flash current (mA, <= AS3645A_FLASH_INTENSITY_MAX)
51 * @torch_max_current: Max torch current (mA, >= AS3645A_TORCH_INTENSITY_MAX)
52 * @timeout_max: Max flash timeout (us, <= AS3645A_FLASH_TIMEOUT_MAX)
53 */
54struct as3645a_platform_data {
55 int (*set_power)(struct v4l2_subdev *subdev, int on);
56 unsigned int vref;
57 unsigned int peak;
58 bool ext_strobe;
59
60 /* Flash and torch currents and timeout limits */
61 unsigned int flash_max_current;
62 unsigned int torch_max_current;
63 unsigned int timeout_max;
64};
65
66#endif /* __AS3645A_H__ */
diff --git a/include/media/i2c/bt819.h b/include/media/i2c/bt819.h
index 8025f4bc2bb6..1bcf0dbeb516 100644
--- a/include/media/i2c/bt819.h
+++ b/include/media/i2c/bt819.h
@@ -30,7 +30,7 @@
30 30
31 Note: these ioctls that internal to the kernel and are never called 31 Note: these ioctls that internal to the kernel and are never called
32 from userspace. */ 32 from userspace. */
33#define BT819_FIFO_RESET_LOW _IO('b', 0) 33#define BT819_FIFO_RESET_LOW _IO('b', 0)
34#define BT819_FIFO_RESET_HIGH _IO('b', 1) 34#define BT819_FIFO_RESET_HIGH _IO('b', 1)
35 35
36#endif 36#endif
diff --git a/include/media/i2c/ir-kbd-i2c.h b/include/media/i2c/ir-kbd-i2c.h
index 76491c62c254..9f47d6a48cff 100644
--- a/include/media/i2c/ir-kbd-i2c.h
+++ b/include/media/i2c/ir-kbd-i2c.h
@@ -19,11 +19,15 @@ struct IR_i2c {
19 u32 polling_interval; /* in ms */ 19 u32 polling_interval; /* in ms */
20 20
21 struct delayed_work work; 21 struct delayed_work work;
22 char name[32];
23 char phys[32]; 22 char phys[32];
24 int (*get_key)(struct IR_i2c *ir, 23 int (*get_key)(struct IR_i2c *ir,
25 enum rc_proto *protocol, 24 enum rc_proto *protocol,
26 u32 *scancode, u8 *toggle); 25 u32 *scancode, u8 *toggle);
26 /* tx */
27 struct i2c_client *tx_c;
28 struct mutex lock; /* do not poll Rx during Tx */
29 unsigned int carrier;
30 unsigned int duty_cycle;
27}; 31};
28 32
29enum ir_kbd_get_key_fn { 33enum ir_kbd_get_key_fn {
diff --git a/include/media/i2c/m52790.h b/include/media/i2c/m52790.h
index 7ddffae31a67..8d9db3cf6fab 100644
--- a/include/media/i2c/m52790.h
+++ b/include/media/i2c/m52790.h
@@ -23,57 +23,57 @@
23 23
24/* Input routing switch 1 */ 24/* Input routing switch 1 */
25 25
26#define M52790_SW1_IN_MASK 0x0003 26#define M52790_SW1_IN_MASK 0x0003
27#define M52790_SW1_IN_TUNER 0x0000 27#define M52790_SW1_IN_TUNER 0x0000
28#define M52790_SW1_IN_V2 0x0001 28#define M52790_SW1_IN_V2 0x0001
29#define M52790_SW1_IN_V3 0x0002 29#define M52790_SW1_IN_V3 0x0002
30#define M52790_SW1_IN_V4 0x0003 30#define M52790_SW1_IN_V4 0x0003
31 31
32/* Selects component input instead of composite */ 32/* Selects component input instead of composite */
33#define M52790_SW1_YCMIX 0x0004 33#define M52790_SW1_YCMIX 0x0004
34 34
35 35
36/* Input routing switch 2 */ 36/* Input routing switch 2 */
37 37
38#define M52790_SW2_IN_MASK 0x0300 38#define M52790_SW2_IN_MASK 0x0300
39#define M52790_SW2_IN_TUNER 0x0000 39#define M52790_SW2_IN_TUNER 0x0000
40#define M52790_SW2_IN_V2 0x0100 40#define M52790_SW2_IN_V2 0x0100
41#define M52790_SW2_IN_V3 0x0200 41#define M52790_SW2_IN_V3 0x0200
42#define M52790_SW2_IN_V4 0x0300 42#define M52790_SW2_IN_V4 0x0300
43 43
44/* Selects component input instead of composite */ 44/* Selects component input instead of composite */
45#define M52790_SW2_YCMIX 0x0400 45#define M52790_SW2_YCMIX 0x0400
46 46
47 47
48/* Output routing switch 1 */ 48/* Output routing switch 1 */
49 49
50/* Enable 6dB amplifier for composite out */ 50/* Enable 6dB amplifier for composite out */
51#define M52790_SW1_V_AMP 0x0008 51#define M52790_SW1_V_AMP 0x0008
52 52
53/* Enable 6dB amplifier for component out */ 53/* Enable 6dB amplifier for component out */
54#define M52790_SW1_YC_AMP 0x0010 54#define M52790_SW1_YC_AMP 0x0010
55 55
56/* Audio output mode */ 56/* Audio output mode */
57#define M52790_SW1_AUDIO_MASK 0x00c0 57#define M52790_SW1_AUDIO_MASK 0x00c0
58#define M52790_SW1_AUDIO_MUTE 0x0000 58#define M52790_SW1_AUDIO_MUTE 0x0000
59#define M52790_SW1_AUDIO_R 0x0040 59#define M52790_SW1_AUDIO_R 0x0040
60#define M52790_SW1_AUDIO_L 0x0080 60#define M52790_SW1_AUDIO_L 0x0080
61#define M52790_SW1_AUDIO_STEREO 0x00c0 61#define M52790_SW1_AUDIO_STEREO 0x00c0
62 62
63 63
64/* Output routing switch 2 */ 64/* Output routing switch 2 */
65 65
66/* Enable 6dB amplifier for composite out */ 66/* Enable 6dB amplifier for composite out */
67#define M52790_SW2_V_AMP 0x0800 67#define M52790_SW2_V_AMP 0x0800
68 68
69/* Enable 6dB amplifier for component out */ 69/* Enable 6dB amplifier for component out */
70#define M52790_SW2_YC_AMP 0x1000 70#define M52790_SW2_YC_AMP 0x1000
71 71
72/* Audio output mode */ 72/* Audio output mode */
73#define M52790_SW2_AUDIO_MASK 0xc000 73#define M52790_SW2_AUDIO_MASK 0xc000
74#define M52790_SW2_AUDIO_MUTE 0x0000 74#define M52790_SW2_AUDIO_MUTE 0x0000
75#define M52790_SW2_AUDIO_R 0x4000 75#define M52790_SW2_AUDIO_R 0x4000
76#define M52790_SW2_AUDIO_L 0x8000 76#define M52790_SW2_AUDIO_L 0x8000
77#define M52790_SW2_AUDIO_STEREO 0xc000 77#define M52790_SW2_AUDIO_STEREO 0xc000
78 78
79 79
@@ -83,9 +83,9 @@
83#define M52790_IN_V3 (M52790_SW1_IN_V3 | M52790_SW2_IN_V3) 83#define M52790_IN_V3 (M52790_SW1_IN_V3 | M52790_SW2_IN_V3)
84#define M52790_IN_V4 (M52790_SW1_IN_V4 | M52790_SW2_IN_V4) 84#define M52790_IN_V4 (M52790_SW1_IN_V4 | M52790_SW2_IN_V4)
85 85
86#define M52790_OUT_STEREO (M52790_SW1_AUDIO_STEREO | \ 86#define M52790_OUT_STEREO (M52790_SW1_AUDIO_STEREO | \
87 M52790_SW2_AUDIO_STEREO) 87 M52790_SW2_AUDIO_STEREO)
88#define M52790_OUT_AMP_STEREO (M52790_SW1_AUDIO_STEREO | \ 88#define M52790_OUT_AMP_STEREO (M52790_SW1_AUDIO_STEREO | \
89 M52790_SW1_V_AMP | \ 89 M52790_SW1_V_AMP | \
90 M52790_SW2_AUDIO_STEREO | \ 90 M52790_SW2_AUDIO_STEREO | \
91 M52790_SW2_V_AMP) 91 M52790_SW2_V_AMP)
diff --git a/include/media/i2c/saa7115.h b/include/media/i2c/saa7115.h
index 53954c90e7f6..a0cda423509d 100644
--- a/include/media/i2c/saa7115.h
+++ b/include/media/i2c/saa7115.h
@@ -36,15 +36,15 @@
36#define SAA7115_SVIDEO3 9 36#define SAA7115_SVIDEO3 9
37 37
38/* outputs */ 38/* outputs */
39#define SAA7115_IPORT_ON 1 39#define SAA7115_IPORT_ON 1
40#define SAA7115_IPORT_OFF 0 40#define SAA7115_IPORT_OFF 0
41 41
42/* SAA7111 specific outputs. */ 42/* SAA7111 specific outputs. */
43#define SAA7111_VBI_BYPASS 2 43#define SAA7111_VBI_BYPASS 2
44#define SAA7111_FMT_YUV422 0x00 44#define SAA7111_FMT_YUV422 0x00
45#define SAA7111_FMT_RGB 0x40 45#define SAA7111_FMT_RGB 0x40
46#define SAA7111_FMT_CCIR 0x80 46#define SAA7111_FMT_CCIR 0x80
47#define SAA7111_FMT_YUV411 0xc0 47#define SAA7111_FMT_YUV411 0xc0
48 48
49/* config flags */ 49/* config flags */
50/* 50/*
diff --git a/include/media/i2c/tvaudio.h b/include/media/i2c/tvaudio.h
index 1ac8184693f8..f13e1a386364 100644
--- a/include/media/i2c/tvaudio.h
+++ b/include/media/i2c/tvaudio.h
@@ -21,7 +21,22 @@
21#ifndef _TVAUDIO_H 21#ifndef _TVAUDIO_H
22#define _TVAUDIO_H 22#define _TVAUDIO_H
23 23
24#include <media/i2c-addr.h> 24/*
25 * i2c bus addresses for the chips supported by tvaudio.c
26 */
27
28#define I2C_ADDR_TDA8425 0x82
29#define I2C_ADDR_TDA9840 0x84
30#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */
31#define I2C_ADDR_TDA9875 0xb0
32#define I2C_ADDR_TDA8425 0x82
33#define I2C_ADDR_TDA9840 0x84 /* also used by TA8874Z */
34#define I2C_ADDR_TDA985x_L 0xb4 /* also used by 9873 */
35#define I2C_ADDR_TDA985x_H 0xb6
36#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */
37#define I2C_ADDR_TEA6300 0x80 /* also used by 6320 */
38#define I2C_ADDR_TEA6420 0x98
39#define I2C_ADDR_PIC16C54 0x96 /* PV951 */
25 40
26/* The tvaudio module accepts the following inputs: */ 41/* The tvaudio module accepts the following inputs: */
27#define TVAUDIO_INPUT_TUNER 0 42#define TVAUDIO_INPUT_TUNER 0
diff --git a/include/media/i2c/upd64031a.h b/include/media/i2c/upd64031a.h
index 48ec03c4ef23..1eba24dfee48 100644
--- a/include/media/i2c/upd64031a.h
+++ b/include/media/i2c/upd64031a.h
@@ -18,9 +18,9 @@
18#define _UPD64031A_H_ 18#define _UPD64031A_H_
19 19
20/* Ghost reduction modes */ 20/* Ghost reduction modes */
21#define UPD64031A_GR_ON 0 21#define UPD64031A_GR_ON 0
22#define UPD64031A_GR_OFF 1 22#define UPD64031A_GR_OFF 1
23#define UPD64031A_GR_THROUGH 3 23#define UPD64031A_GR_THROUGH 3
24 24
25/* Direct 3D/YCS Connection */ 25/* Direct 3D/YCS Connection */
26#define UPD64031A_3DYCS_DISABLE (0 << 2) 26#define UPD64031A_3DYCS_DISABLE (0 << 2)
diff --git a/include/media/lirc.h b/include/media/lirc.h
deleted file mode 100644
index 554988c860c1..000000000000
--- a/include/media/lirc.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <uapi/linux/lirc.h>
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
deleted file mode 100644
index 857da67bd931..000000000000
--- a/include/media/lirc_dev.h
+++ /dev/null
@@ -1,192 +0,0 @@
1/*
2 * LIRC base driver
3 *
4 * by Artur Lipowski <alipowski@interia.pl>
5 * This code is licensed under GNU GPL
6 *
7 */
8
9#ifndef _LINUX_LIRC_DEV_H
10#define _LINUX_LIRC_DEV_H
11
12#define BUFLEN 16
13
14#include <linux/slab.h>
15#include <linux/fs.h>
16#include <linux/ioctl.h>
17#include <linux/poll.h>
18#include <linux/kfifo.h>
19#include <media/lirc.h>
20#include <linux/device.h>
21#include <linux/cdev.h>
22
23struct lirc_buffer {
24 wait_queue_head_t wait_poll;
25 spinlock_t fifo_lock;
26 unsigned int chunk_size;
27 unsigned int size; /* in chunks */
28 /* Using chunks instead of bytes pretends to simplify boundary checking
29 * And should allow for some performance fine tunning later */
30 struct kfifo fifo;
31};
32
33static inline void lirc_buffer_clear(struct lirc_buffer *buf)
34{
35 unsigned long flags;
36
37 if (kfifo_initialized(&buf->fifo)) {
38 spin_lock_irqsave(&buf->fifo_lock, flags);
39 kfifo_reset(&buf->fifo);
40 spin_unlock_irqrestore(&buf->fifo_lock, flags);
41 } else
42 WARN(1, "calling %s on an uninitialized lirc_buffer\n",
43 __func__);
44}
45
46static inline int lirc_buffer_init(struct lirc_buffer *buf,
47 unsigned int chunk_size,
48 unsigned int size)
49{
50 int ret;
51
52 init_waitqueue_head(&buf->wait_poll);
53 spin_lock_init(&buf->fifo_lock);
54 buf->chunk_size = chunk_size;
55 buf->size = size;
56 ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL);
57
58 return ret;
59}
60
61static inline void lirc_buffer_free(struct lirc_buffer *buf)
62{
63 if (kfifo_initialized(&buf->fifo)) {
64 kfifo_free(&buf->fifo);
65 } else
66 WARN(1, "calling %s on an uninitialized lirc_buffer\n",
67 __func__);
68}
69
70static inline int lirc_buffer_len(struct lirc_buffer *buf)
71{
72 int len;
73 unsigned long flags;
74
75 spin_lock_irqsave(&buf->fifo_lock, flags);
76 len = kfifo_len(&buf->fifo);
77 spin_unlock_irqrestore(&buf->fifo_lock, flags);
78
79 return len;
80}
81
82static inline int lirc_buffer_full(struct lirc_buffer *buf)
83{
84 return lirc_buffer_len(buf) == buf->size * buf->chunk_size;
85}
86
87static inline int lirc_buffer_empty(struct lirc_buffer *buf)
88{
89 return !lirc_buffer_len(buf);
90}
91
92static inline unsigned int lirc_buffer_read(struct lirc_buffer *buf,
93 unsigned char *dest)
94{
95 unsigned int ret = 0;
96
97 if (lirc_buffer_len(buf) >= buf->chunk_size)
98 ret = kfifo_out_locked(&buf->fifo, dest, buf->chunk_size,
99 &buf->fifo_lock);
100 return ret;
101
102}
103
104static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf,
105 unsigned char *orig)
106{
107 unsigned int ret;
108
109 ret = kfifo_in_locked(&buf->fifo, orig, buf->chunk_size,
110 &buf->fifo_lock);
111
112 return ret;
113}
114
115/**
116 * struct lirc_dev - represents a LIRC device
117 *
118 * @name: used for logging
119 * @minor: the minor device (/dev/lircX) number for the device
120 * @code_length: length of a remote control key code expressed in bits
121 * @features: lirc compatible hardware features, like LIRC_MODE_RAW,
122 * LIRC_CAN\_\*, as defined at include/media/lirc.h.
123 * @buffer_size: Number of FIFO buffers with @chunk_size size.
124 * Only used if @rbuf is NULL.
125 * @chunk_size: Size of each FIFO buffer.
126 * Only used if @rbuf is NULL.
127 * @data: private per-driver data
128 * @buf: if %NULL, lirc_dev will allocate and manage the buffer,
129 * otherwise allocated by the caller which will
130 * have to write to the buffer by other means, like irq's
131 * (see also lirc_serial.c).
132 * @buf_internal: whether lirc_dev has allocated the read buffer or not
133 * @rdev: &struct rc_dev associated with the device
134 * @fops: &struct file_operations for the device
135 * @owner: the module owning this struct
136 * @attached: if the device is still live
137 * @open: open count for the device's chardev
138 * @mutex: serialises file_operations calls
139 * @dev: &struct device assigned to the device
140 * @cdev: &struct cdev assigned to the device
141 */
142struct lirc_dev {
143 char name[40];
144 unsigned int minor;
145 __u32 code_length;
146 __u32 features;
147
148 unsigned int buffer_size; /* in chunks holding one code each */
149 unsigned int chunk_size;
150 struct lirc_buffer *buf;
151 bool buf_internal;
152
153 void *data;
154 struct rc_dev *rdev;
155 const struct file_operations *fops;
156 struct module *owner;
157
158 bool attached;
159 int open;
160
161 struct mutex mutex; /* protect from simultaneous accesses */
162
163 struct device dev;
164 struct cdev cdev;
165};
166
167struct lirc_dev *lirc_allocate_device(void);
168
169void lirc_free_device(struct lirc_dev *d);
170
171int lirc_register_device(struct lirc_dev *d);
172
173void lirc_unregister_device(struct lirc_dev *d);
174
175/* Must be called in the open fop before lirc_get_pdata() can be used */
176void lirc_init_pdata(struct inode *inode, struct file *file);
177
178/* Returns the private data stored in the lirc_dev
179 * associated with the given device file pointer.
180 */
181void *lirc_get_pdata(struct file *file);
182
183/* default file operations
184 * used by drivers if they override only some operations
185 */
186int lirc_dev_fop_open(struct inode *inode, struct file *file);
187int lirc_dev_fop_close(struct inode *inode, struct file *file);
188unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait);
189long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
190ssize_t lirc_dev_fop_read(struct file *file, char __user *buffer, size_t length,
191 loff_t *ppos);
192#endif
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 511615d3bf6f..dc2f64e1b08f 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -56,7 +56,7 @@ struct media_file_operations {
56 struct module *owner; 56 struct module *owner;
57 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); 57 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
58 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); 58 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
59 unsigned int (*poll) (struct file *, struct poll_table_struct *); 59 __poll_t (*poll) (struct file *, struct poll_table_struct *);
60 long (*ioctl) (struct file *, unsigned int, unsigned long); 60 long (*ioctl) (struct file *, unsigned int, unsigned long);
61 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 61 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
62 int (*open) (struct file *); 62 int (*open) (struct file *);
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 222d379960b7..a732af1dbba0 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -88,6 +88,8 @@ struct media_entity_enum {
88 * @stack: Graph traversal stack; the stack contains information 88 * @stack: Graph traversal stack; the stack contains information
89 * on the path the media entities to be walked and the 89 * on the path the media entities to be walked and the
90 * links through which they were reached. 90 * links through which they were reached.
91 * @stack.entity: pointer to &struct media_entity at the graph.
92 * @stack.link: pointer to &struct list_head.
91 * @ent_enum: Visited entities 93 * @ent_enum: Visited entities
92 * @top: The top of the stack 94 * @top: The top of the stack
93 */ 95 */
@@ -247,6 +249,9 @@ enum media_entity_type {
247 * @pipe: Pipeline this entity belongs to. 249 * @pipe: Pipeline this entity belongs to.
248 * @info: Union with devnode information. Kept just for backward 250 * @info: Union with devnode information. Kept just for backward
249 * compatibility. 251 * compatibility.
252 * @info.dev: Contains device major and minor info.
253 * @info.dev.major: device node major, if the device is a devnode.
254 * @info.dev.minor: device node minor, if the device is a devnode.
250 * @major: Devnode major number (zero if not applicable). Kept just 255 * @major: Devnode major number (zero if not applicable). Kept just
251 * for backward compatibility. 256 * for backward compatibility.
252 * @minor: Devnode minor number (zero if not applicable). Kept just 257 * @minor: Devnode minor number (zero if not applicable). Kept just
@@ -629,7 +634,11 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
629 * This function must be called during the cleanup phase after unregistering 634 * This function must be called during the cleanup phase after unregistering
630 * the entity (currently, it does nothing). 635 * the entity (currently, it does nothing).
631 */ 636 */
632static inline void media_entity_cleanup(struct media_entity *entity) {}; 637#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
638static inline void media_entity_cleanup(struct media_entity *entity) {}
639#else
640#define media_entity_cleanup(entity) do { } while (false)
641#endif
633 642
634/** 643/**
635 * media_create_pad_link() - creates a link between two entities. 644 * media_create_pad_link() - creates a link between two entities.
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 314a1edb6189..aed4272d47f5 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -17,6 +17,7 @@
17#define _RC_CORE 17#define _RC_CORE
18 18
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/cdev.h>
20#include <linux/kfifo.h> 21#include <linux/kfifo.h>
21#include <linux/time.h> 22#include <linux/time.h>
22#include <linux/timer.h> 23#include <linux/timer.h>
@@ -30,9 +31,9 @@ do { \
30} while (0) 31} while (0)
31 32
32/** 33/**
33 * enum rc_driver_type - type of the RC output 34 * enum rc_driver_type - type of the RC driver.
34 * 35 *
35 * @RC_DRIVER_SCANCODE: Driver or hardware generates a scancode 36 * @RC_DRIVER_SCANCODE: Driver or hardware generates a scancode.
36 * @RC_DRIVER_IR_RAW: Driver or hardware generates pulse/space sequences. 37 * @RC_DRIVER_IR_RAW: Driver or hardware generates pulse/space sequences.
37 * It needs a Infra-Red pulse/space decoder 38 * It needs a Infra-Red pulse/space decoder
38 * @RC_DRIVER_IR_RAW_TX: Device transmitter only, 39 * @RC_DRIVER_IR_RAW_TX: Device transmitter only,
@@ -68,6 +69,33 @@ enum rc_filter_type {
68}; 69};
69 70
70/** 71/**
72 * struct lirc_fh - represents an open lirc file
73 * @list: list of open file handles
74 * @rc: rcdev for this lirc chardev
75 * @carrier_low: when setting the carrier range, first the low end must be
76 * set with an ioctl and then the high end with another ioctl
77 * @send_timeout_reports: report timeouts in lirc raw IR.
78 * @rawir: queue for incoming raw IR
79 * @scancodes: queue for incoming decoded scancodes
80 * @wait_poll: poll struct for lirc device
81 * @send_mode: lirc mode for sending, either LIRC_MODE_SCANCODE or
82 * LIRC_MODE_PULSE
83 * @rec_mode: lirc mode for receiving, either LIRC_MODE_SCANCODE or
84 * LIRC_MODE_MODE2
85 */
86struct lirc_fh {
87 struct list_head list;
88 struct rc_dev *rc;
89 int carrier_low;
90 bool send_timeout_reports;
91 DECLARE_KFIFO_PTR(rawir, unsigned int);
92 DECLARE_KFIFO_PTR(scancodes, struct lirc_scancode);
93 wait_queue_head_t wait_poll;
94 u8 send_mode;
95 u8 rec_mode;
96};
97
98/**
71 * struct rc_dev - represents a remote control device 99 * struct rc_dev - represents a remote control device
72 * @dev: driver model's view of this device 100 * @dev: driver model's view of this device
73 * @managed_alloc: devm_rc_allocate_device was used to create rc_dev 101 * @managed_alloc: devm_rc_allocate_device was used to create rc_dev
@@ -106,6 +134,8 @@ enum rc_filter_type {
106 * @keypressed: whether a key is currently pressed 134 * @keypressed: whether a key is currently pressed
107 * @keyup_jiffies: time (in jiffies) when the current keypress should be released 135 * @keyup_jiffies: time (in jiffies) when the current keypress should be released
108 * @timer_keyup: timer for releasing a keypress 136 * @timer_keyup: timer for releasing a keypress
137 * @timer_repeat: timer for autorepeat events. This is needed for CEC, which
138 * has non-standard repeats.
109 * @last_keycode: keycode of last keypress 139 * @last_keycode: keycode of last keypress
110 * @last_protocol: protocol of last keypress 140 * @last_protocol: protocol of last keypress
111 * @last_scancode: scancode of last keypress 141 * @last_scancode: scancode of last keypress
@@ -115,6 +145,15 @@ enum rc_filter_type {
115 * @max_timeout: maximum timeout supported by device 145 * @max_timeout: maximum timeout supported by device
116 * @rx_resolution : resolution (in ns) of input sampler 146 * @rx_resolution : resolution (in ns) of input sampler
117 * @tx_resolution: resolution (in ns) of output sampler 147 * @tx_resolution: resolution (in ns) of output sampler
148 * @lirc_dev: lirc device
149 * @lirc_cdev: lirc char cdev
150 * @gap_start: time when gap starts
151 * @gap_duration: duration of initial gap
152 * @gap: true if we're in a gap
153 * @lirc_fh_lock: protects lirc_fh list
154 * @lirc_fh: list of open files
155 * @registered: set to true by rc_register_device(), false by
156 * rc_unregister_device
118 * @change_protocol: allow changing the protocol used on hardware decoders 157 * @change_protocol: allow changing the protocol used on hardware decoders
119 * @open: callback to allow drivers to enable polling/irq when IR input device 158 * @open: callback to allow drivers to enable polling/irq when IR input device
120 * is opened. 159 * is opened.
@@ -165,6 +204,7 @@ struct rc_dev {
165 bool keypressed; 204 bool keypressed;
166 unsigned long keyup_jiffies; 205 unsigned long keyup_jiffies;
167 struct timer_list timer_keyup; 206 struct timer_list timer_keyup;
207 struct timer_list timer_repeat;
168 u32 last_keycode; 208 u32 last_keycode;
169 enum rc_proto last_protocol; 209 enum rc_proto last_protocol;
170 u32 last_scancode; 210 u32 last_scancode;
@@ -174,6 +214,16 @@ struct rc_dev {
174 u32 max_timeout; 214 u32 max_timeout;
175 u32 rx_resolution; 215 u32 rx_resolution;
176 u32 tx_resolution; 216 u32 tx_resolution;
217#ifdef CONFIG_LIRC
218 struct device lirc_dev;
219 struct cdev lirc_cdev;
220 ktime_t gap_start;
221 u64 gap_duration;
222 bool gap;
223 spinlock_t lirc_fh_lock;
224 struct list_head lirc_fh;
225#endif
226 bool registered;
177 int (*change_protocol)(struct rc_dev *dev, u64 *rc_proto); 227 int (*change_protocol)(struct rc_dev *dev, u64 *rc_proto);
178 int (*open)(struct rc_dev *dev); 228 int (*open)(struct rc_dev *dev);
179 void (*close)(struct rc_dev *dev); 229 void (*close)(struct rc_dev *dev);
@@ -248,20 +298,6 @@ int devm_rc_register_device(struct device *parent, struct rc_dev *dev);
248 */ 298 */
249void rc_unregister_device(struct rc_dev *dev); 299void rc_unregister_device(struct rc_dev *dev);
250 300
251/**
252 * rc_open - Opens a RC device
253 *
254 * @rdev: pointer to struct rc_dev.
255 */
256int rc_open(struct rc_dev *rdev);
257
258/**
259 * rc_close - Closes a RC device
260 *
261 * @rdev: pointer to struct rc_dev.
262 */
263void rc_close(struct rc_dev *rdev);
264
265void rc_repeat(struct rc_dev *dev); 301void rc_repeat(struct rc_dev *dev);
266void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode, 302void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode,
267 u8 toggle); 303 u8 toggle);
@@ -309,6 +345,7 @@ int ir_raw_event_store_with_filter(struct rc_dev *dev,
309void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); 345void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
310int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode, 346int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
311 struct ir_raw_event *events, unsigned int max); 347 struct ir_raw_event *events, unsigned int max);
348int ir_raw_encode_carrier(enum rc_proto protocol);
312 349
313static inline void ir_raw_event_reset(struct rc_dev *dev) 350static inline void ir_raw_event_reset(struct rc_dev *dev)
314{ 351{
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 72197cb43781..7046734b3895 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -10,59 +10,7 @@
10 */ 10 */
11 11
12#include <linux/input.h> 12#include <linux/input.h>
13 13#include <uapi/linux/lirc.h>
14/**
15 * enum rc_proto - the Remote Controller protocol
16 *
17 * @RC_PROTO_UNKNOWN: Protocol not known
18 * @RC_PROTO_OTHER: Protocol known but proprietary
19 * @RC_PROTO_RC5: Philips RC5 protocol
20 * @RC_PROTO_RC5X_20: Philips RC5x 20 bit protocol
21 * @RC_PROTO_RC5_SZ: StreamZap variant of RC5
22 * @RC_PROTO_JVC: JVC protocol
23 * @RC_PROTO_SONY12: Sony 12 bit protocol
24 * @RC_PROTO_SONY15: Sony 15 bit protocol
25 * @RC_PROTO_SONY20: Sony 20 bit protocol
26 * @RC_PROTO_NEC: NEC protocol
27 * @RC_PROTO_NECX: Extended NEC protocol
28 * @RC_PROTO_NEC32: NEC 32 bit protocol
29 * @RC_PROTO_SANYO: Sanyo protocol
30 * @RC_PROTO_MCIR2_KBD: RC6-ish MCE keyboard
31 * @RC_PROTO_MCIR2_MSE: RC6-ish MCE mouse
32 * @RC_PROTO_RC6_0: Philips RC6-0-16 protocol
33 * @RC_PROTO_RC6_6A_20: Philips RC6-6A-20 protocol
34 * @RC_PROTO_RC6_6A_24: Philips RC6-6A-24 protocol
35 * @RC_PROTO_RC6_6A_32: Philips RC6-6A-32 protocol
36 * @RC_PROTO_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol
37 * @RC_PROTO_SHARP: Sharp protocol
38 * @RC_PROTO_XMP: XMP protocol
39 * @RC_PROTO_CEC: CEC protocol
40 */
41enum rc_proto {
42 RC_PROTO_UNKNOWN = 0,
43 RC_PROTO_OTHER = 1,
44 RC_PROTO_RC5 = 2,
45 RC_PROTO_RC5X_20 = 3,
46 RC_PROTO_RC5_SZ = 4,
47 RC_PROTO_JVC = 5,
48 RC_PROTO_SONY12 = 6,
49 RC_PROTO_SONY15 = 7,
50 RC_PROTO_SONY20 = 8,
51 RC_PROTO_NEC = 9,
52 RC_PROTO_NECX = 10,
53 RC_PROTO_NEC32 = 11,
54 RC_PROTO_SANYO = 12,
55 RC_PROTO_MCIR2_KBD = 13,
56 RC_PROTO_MCIR2_MSE = 14,
57 RC_PROTO_RC6_0 = 15,
58 RC_PROTO_RC6_6A_20 = 16,
59 RC_PROTO_RC6_6A_24 = 17,
60 RC_PROTO_RC6_6A_32 = 18,
61 RC_PROTO_RC6_MCE = 19,
62 RC_PROTO_SHARP = 20,
63 RC_PROTO_XMP = 21,
64 RC_PROTO_CEC = 22,
65};
66 14
67#define RC_PROTO_BIT_NONE 0ULL 15#define RC_PROTO_BIT_NONE 0ULL
68#define RC_PROTO_BIT_UNKNOWN BIT_ULL(RC_PROTO_UNKNOWN) 16#define RC_PROTO_BIT_UNKNOWN BIT_ULL(RC_PROTO_UNKNOWN)
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 4d8cb0796bc6..b7e42a1b0910 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -117,7 +117,7 @@ struct soc_camera_host_ops {
117 int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *); 117 int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
118 int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *); 118 int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
119 int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *); 119 int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *);
120 unsigned int (*poll)(struct file *, poll_table *); 120 __poll_t (*poll)(struct file *, poll_table *);
121}; 121};
122 122
123#define SOCAM_SENSOR_INVERT_PCLK (1 << 0) 123#define SOCAM_SENSOR_INVERT_PCLK (1 << 0)
diff --git a/include/media/v4l2-tpg.h b/include/media/tpg/v4l2-tpg.h
index 13e49d85cae3..823fadede7bf 100644
--- a/include/media/v4l2-tpg.h
+++ b/include/media/tpg/v4l2-tpg.h
@@ -26,8 +26,51 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
28#include <linux/videodev2.h> 28#include <linux/videodev2.h>
29#include <media/v4l2-tpg-colors.h>
30 29
30struct tpg_rbg_color8 {
31 unsigned char r, g, b;
32};
33
34struct tpg_rbg_color16 {
35 __u16 r, g, b;
36};
37
38enum tpg_color {
39 TPG_COLOR_CSC_WHITE,
40 TPG_COLOR_CSC_YELLOW,
41 TPG_COLOR_CSC_CYAN,
42 TPG_COLOR_CSC_GREEN,
43 TPG_COLOR_CSC_MAGENTA,
44 TPG_COLOR_CSC_RED,
45 TPG_COLOR_CSC_BLUE,
46 TPG_COLOR_CSC_BLACK,
47 TPG_COLOR_75_YELLOW,
48 TPG_COLOR_75_CYAN,
49 TPG_COLOR_75_GREEN,
50 TPG_COLOR_75_MAGENTA,
51 TPG_COLOR_75_RED,
52 TPG_COLOR_75_BLUE,
53 TPG_COLOR_100_WHITE,
54 TPG_COLOR_100_YELLOW,
55 TPG_COLOR_100_CYAN,
56 TPG_COLOR_100_GREEN,
57 TPG_COLOR_100_MAGENTA,
58 TPG_COLOR_100_RED,
59 TPG_COLOR_100_BLUE,
60 TPG_COLOR_100_BLACK,
61 TPG_COLOR_TEXTFG,
62 TPG_COLOR_TEXTBG,
63 TPG_COLOR_RANDOM,
64 TPG_COLOR_RAMP,
65 TPG_COLOR_MAX = TPG_COLOR_RAMP + 256
66};
67
68extern const struct tpg_rbg_color8 tpg_colors[TPG_COLOR_MAX];
69extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
70extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
71extern const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1]
72 [V4L2_XFER_FUNC_SMPTE2084 + 1]
73 [TPG_COLOR_CSC_BLACK + 1];
31enum tpg_pattern { 74enum tpg_pattern {
32 TPG_PAT_75_COLORBAR, 75 TPG_PAT_75_COLORBAR,
33 TPG_PAT_100_COLORBAR, 76 TPG_PAT_100_COLORBAR,
diff --git a/include/media/tuner-types.h b/include/media/tuner-types.h
index 78f0654d9c3d..df76ac8e658c 100644
--- a/include/media/tuner-types.h
+++ b/include/media/tuner-types.h
@@ -171,6 +171,21 @@ struct tuner_params {
171 struct tuner_range *ranges; 171 struct tuner_range *ranges;
172}; 172};
173 173
174/**
175 * struct tunertype - describes the known tuners.
176 *
177 * @name: string with the tuner's name.
178 * @count: size of &struct tuner_params array.
179 * @params: pointer to &struct tuner_params array.
180 *
181 * @min: minimal tuner frequency, in 62.5 kHz step.
182 * should be multiplied to 16 to convert to MHz.
183 * @max: minimal tuner frequency, in 62.5 kHz step.
184 * Should be multiplied to 16 to convert to MHz.
185 * @stepsize: frequency step, in Hz.
186 * @initdata: optional byte sequence to initialize the tuner.
187 * @sleepdata: optional byte sequence to power down the tuner.
188 */
174struct tunertype { 189struct tunertype {
175 char *name; 190 char *name;
176 unsigned int count; 191 unsigned int count;
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 6152434cbe82..1592d323c577 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -28,7 +28,7 @@ struct v4l2_async_notifier;
28 * in order to identify a match 28 * in order to identify a match
29 * 29 *
30 * @V4L2_ASYNC_MATCH_CUSTOM: Match will use the logic provided by &struct 30 * @V4L2_ASYNC_MATCH_CUSTOM: Match will use the logic provided by &struct
31 * v4l2_async_subdev.match ops 31 * v4l2_async_subdev.match ops
32 * @V4L2_ASYNC_MATCH_DEVNAME: Match will use the device name 32 * @V4L2_ASYNC_MATCH_DEVNAME: Match will use the device name
33 * @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address 33 * @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address
34 * @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node 34 * @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node
@@ -48,6 +48,31 @@ enum v4l2_async_match_type {
48 * 48 *
49 * @match_type: type of match that will be used 49 * @match_type: type of match that will be used
50 * @match: union of per-bus type matching data sets 50 * @match: union of per-bus type matching data sets
51 * @match.fwnode:
52 * pointer to &struct fwnode_handle to be matched.
53 * Used if @match_type is %V4L2_ASYNC_MATCH_FWNODE.
54 * @match.device_name:
55 * string containing the device name to be matched.
56 * Used if @match_type is %V4L2_ASYNC_MATCH_DEVNAME.
57 * @match.i2c: embedded struct with I2C parameters to be matched.
58 * Both @match.i2c.adapter_id and @match.i2c.address
59 * should be matched.
60 * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
61 * @match.i2c.adapter_id:
62 * I2C adapter ID to be matched.
63 * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
64 * @match.i2c.address:
65 * I2C address to be matched.
66 * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
67 * @match.custom:
68 * Driver-specific match criteria.
69 * Used if @match_type is %V4L2_ASYNC_MATCH_CUSTOM.
70 * @match.custom.match:
71 * Driver-specific match function to be used if
72 * %V4L2_ASYNC_MATCH_CUSTOM.
73 * @match.custom.priv:
74 * Driver-specific private struct with match parameters
75 * to be used if %V4L2_ASYNC_MATCH_CUSTOM.
51 * @list: used to link struct v4l2_async_subdev objects, waiting to be 76 * @list: used to link struct v4l2_async_subdev objects, waiting to be
52 * probed, to a notifier->waiting list 77 * probed, to a notifier->waiting list
53 * 78 *
@@ -58,12 +83,8 @@ enum v4l2_async_match_type {
58struct v4l2_async_subdev { 83struct v4l2_async_subdev {
59 enum v4l2_async_match_type match_type; 84 enum v4l2_async_match_type match_type;
60 union { 85 union {
61 struct { 86 struct fwnode_handle *fwnode;
62 struct fwnode_handle *fwnode; 87 const char *device_name;
63 } fwnode;
64 struct {
65 const char *name;
66 } device_name;
67 struct { 88 struct {
68 int adapter_id; 89 int adapter_id;
69 unsigned short address; 90 unsigned short address;
@@ -167,7 +188,7 @@ void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier);
167 188
168/** 189/**
169 * v4l2_async_register_subdev - registers a sub-device to the asynchronous 190 * v4l2_async_register_subdev - registers a sub-device to the asynchronous
170 * subdevice framework 191 * subdevice framework
171 * 192 *
172 * @sd: pointer to &struct v4l2_subdev 193 * @sd: pointer to &struct v4l2_subdev
173 */ 194 */
@@ -197,7 +218,7 @@ int __must_check v4l2_async_register_subdev_sensor_common(
197 218
198/** 219/**
199 * v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous 220 * v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous
200 * subdevice framework 221 * subdevice framework
201 * 222 *
202 * @sd: pointer to &struct v4l2_subdev 223 * @sd: pointer to &struct v4l2_subdev
203 */ 224 */
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index aac8b7b6e691..e0d95a7c5d48 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -28,7 +28,7 @@
28 28
29#include <media/v4l2-dev.h> 29#include <media/v4l2-dev.h>
30 30
31/* Common printk constucts for v4l-i2c drivers. These macros create a unique 31/* Common printk constructs for v4l-i2c drivers. These macros create a unique
32 prefix consisting of the driver name, the adapter number and the i2c 32 prefix consisting of the driver name, the adapter number and the i2c
33 address. */ 33 address. */
34#define v4l_printk(level, name, adapter, addr, fmt, arg...) \ 34#define v4l_printk(level, name, adapter, addr, fmt, arg...) \
@@ -50,7 +50,7 @@
50/* These three macros assume that the debug level is set with a module 50/* These three macros assume that the debug level is set with a module
51 parameter called 'debug'. */ 51 parameter called 'debug'. */
52#define v4l_dbg(level, debug, client, fmt, arg...) \ 52#define v4l_dbg(level, debug, client, fmt, arg...) \
53 do { \ 53 do { \
54 if (debug >= (level)) \ 54 if (debug >= (level)) \
55 v4l_client_printk(KERN_DEBUG, client, fmt , ## arg); \ 55 v4l_client_printk(KERN_DEBUG, client, fmt , ## arg); \
56 } while (0) 56 } while (0)
@@ -80,9 +80,9 @@
80/* These three macros assume that the debug level is set with a module 80/* These three macros assume that the debug level is set with a module
81 parameter called 'debug'. */ 81 parameter called 'debug'. */
82#define v4l2_dbg(level, debug, dev, fmt, arg...) \ 82#define v4l2_dbg(level, debug, dev, fmt, arg...) \
83 do { \ 83 do { \
84 if (debug >= (level)) \ 84 if (debug >= (level)) \
85 v4l2_printk(KERN_DEBUG, dev, fmt , ## arg); \ 85 v4l2_printk(KERN_DEBUG, dev, fmt , ## arg); \
86 } while (0) 86 } while (0)
87 87
88/** 88/**
@@ -127,7 +127,7 @@ struct v4l2_subdev_ops;
127 * @client_type: name of the chip that's on the adapter. 127 * @client_type: name of the chip that's on the adapter.
128 * @addr: I2C address. If zero, it will use @probe_addrs 128 * @addr: I2C address. If zero, it will use @probe_addrs
129 * @probe_addrs: array with a list of address. The last entry at such 129 * @probe_addrs: array with a list of address. The last entry at such
130 * array should be %I2C_CLIENT_END. 130 * array should be %I2C_CLIENT_END.
131 * 131 *
132 * returns a &struct v4l2_subdev pointer. 132 * returns a &struct v4l2_subdev pointer.
133 */ 133 */
@@ -146,7 +146,7 @@ struct i2c_board_info;
146 * @info: pointer to struct i2c_board_info used to replace the irq, 146 * @info: pointer to struct i2c_board_info used to replace the irq,
147 * platform_data and addr arguments. 147 * platform_data and addr arguments.
148 * @probe_addrs: array with a list of address. The last entry at such 148 * @probe_addrs: array with a list of address. The last entry at such
149 * array should be %I2C_CLIENT_END. 149 * array should be %I2C_CLIENT_END.
150 * 150 *
151 * returns a &struct v4l2_subdev pointer. 151 * returns a &struct v4l2_subdev pointer.
152 */ 152 */
@@ -174,17 +174,43 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
174 */ 174 */
175unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd); 175unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd);
176 176
177/**
178 * enum v4l2_i2c_tuner_type - specifies the range of tuner address that
179 * should be used when seeking for I2C devices.
180 *
181 * @ADDRS_RADIO: Radio tuner addresses.
182 * Represent the following I2C addresses:
183 * 0x10 (if compiled with tea5761 support)
184 * and 0x60.
185 * @ADDRS_DEMOD: Demod tuner addresses.
186 * Represent the following I2C addresses:
187 * 0x42, 0x43, 0x4a and 0x4b.
188 * @ADDRS_TV: TV tuner addresses.
189 * Represent the following I2C addresses:
190 * 0x42, 0x43, 0x4a, 0x4b, 0x60, 0x61, 0x62,
191 * 0x63 and 0x64.
192 * @ADDRS_TV_WITH_DEMOD: TV tuner addresses if demod is present, this
193 * excludes addresses used by the demodulator
194 * from the list of candidates.
195 * Represent the following I2C addresses:
196 * 0x60, 0x61, 0x62, 0x63 and 0x64.
197 *
198 * NOTE: All I2C addresses above use the 7-bit notation.
199 */
177enum v4l2_i2c_tuner_type { 200enum v4l2_i2c_tuner_type {
178 ADDRS_RADIO, /* Radio tuner addresses */ 201 ADDRS_RADIO,
179 ADDRS_DEMOD, /* Demod tuner addresses */ 202 ADDRS_DEMOD,
180 ADDRS_TV, /* TV tuner addresses */ 203 ADDRS_TV,
181 /* TV tuner addresses if demod is present, this excludes
182 addresses used by the demodulator from the list of
183 candidates. */
184 ADDRS_TV_WITH_DEMOD, 204 ADDRS_TV_WITH_DEMOD,
185}; 205};
186/* Return a list of I2C tuner addresses to probe. Use only if the tuner 206/**
187 addresses are unknown. */ 207 * v4l2_i2c_tuner_addrs - Return a list of I2C tuner addresses to probe.
208 *
209 * @type: type of the tuner to seek, as defined by
210 * &enum v4l2_i2c_tuner_type.
211 *
212 * NOTE: Use only if the tuner addresses are unknown.
213 */
188const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type); 214const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type);
189 215
190/* ------------------------------------------------------------------------- */ 216/* ------------------------------------------------------------------------- */
@@ -224,10 +250,14 @@ void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
224 250
225/* ------------------------------------------------------------------------- */ 251/* ------------------------------------------------------------------------- */
226 252
227/* Note: these remaining ioctls/structs should be removed as well, but they are 253/*
228 still used in tuner-simple.c (TUNER_SET_CONFIG), cx18/ivtv (RESET) and 254 * FIXME: these remaining ioctls/structs should be removed as well, but they
229 v4l2-int-device.h (v4l2_routing). To remove these ioctls some more cleanup 255 * are still used in tuner-simple.c (TUNER_SET_CONFIG) and cx18/ivtv (RESET).
230 is needed in those modules. */ 256 * To remove these ioctls some more cleanup is needed in those modules.
257 *
258 * It doesn't make much sense on documenting them, as what we really want is
259 * to get rid of them.
260 */
231 261
232/* s_config */ 262/* s_config */
233struct v4l2_priv_tun_config { 263struct v4l2_priv_tun_config {
@@ -236,32 +266,79 @@ struct v4l2_priv_tun_config {
236}; 266};
237#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config) 267#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config)
238 268
239#define VIDIOC_INT_RESET _IOW ('d', 102, u32) 269#define VIDIOC_INT_RESET _IOW ('d', 102, u32)
240
241struct v4l2_routing {
242 u32 input;
243 u32 output;
244};
245 270
246/* ------------------------------------------------------------------------- */ 271/* ------------------------------------------------------------------------- */
247 272
248/* Miscellaneous helper functions */ 273/* Miscellaneous helper functions */
249 274
250void v4l_bound_align_image(unsigned int *w, unsigned int wmin, 275/**
276 * v4l_bound_align_image - adjust video dimensions according to
277 * a given constraints.
278 *
279 * @width: pointer to width that will be adjusted if needed.
280 * @wmin: minimum width.
281 * @wmax: maximum width.
282 * @walign: least significant bit on width.
283 * @height: pointer to height that will be adjusted if needed.
284 * @hmin: minimum height.
285 * @hmax: maximum height.
286 * @halign: least significant bit on width.
287 * @salign: least significant bit for the image size (e. g.
288 * :math:`width * height`).
289 *
290 * Clip an image to have @width between @wmin and @wmax, and @height between
291 * @hmin and @hmax, inclusive.
292 *
293 * Additionally, the @width will be a multiple of :math:`2^{walign}`,
294 * the @height will be a multiple of :math:`2^{halign}`, and the overall
295 * size :math:`width * height` will be a multiple of :math:`2^{salign}`.
296 *
297 * .. note::
298 *
299 * #. The clipping rectangle may be shrunk or enlarged to fit the alignment
300 * constraints.
301 * #. @wmax must not be smaller than @wmin.
302 * #. @hmax must not be smaller than @hmin.
303 * #. The alignments must not be so high there are no possible image
304 * sizes within the allowed bounds.
305 * #. @wmin and @hmin must be at least 1 (don't use 0).
306 * #. For @walign, @halign and @salign, if you don't care about a certain
307 * alignment, specify ``0``, as :math:`2^0 = 1` and one byte alignment
308 * is equivalent to no alignment.
309 * #. If you only want to adjust downward, specify a maximum that's the
310 * same as the initial value.
311 */
312void v4l_bound_align_image(unsigned int *width, unsigned int wmin,
251 unsigned int wmax, unsigned int walign, 313 unsigned int wmax, unsigned int walign,
252 unsigned int *h, unsigned int hmin, 314 unsigned int *height, unsigned int hmin,
253 unsigned int hmax, unsigned int halign, 315 unsigned int hmax, unsigned int halign,
254 unsigned int salign); 316 unsigned int salign);
255 317
256struct v4l2_discrete_probe { 318/**
257 const struct v4l2_frmsize_discrete *sizes; 319 * v4l2_find_nearest_format - find the nearest format size among a discrete
258 int num_sizes; 320 * set of resolutions.
259}; 321 *
260 322 * @sizes: array of &struct v4l2_frmsize_discrete image sizes.
261const struct v4l2_frmsize_discrete *v4l2_find_nearest_format( 323 * @num_sizes: length of @sizes array.
262 const struct v4l2_discrete_probe *probe, 324 * @width: desired width.
263 s32 width, s32 height); 325 * @height: desired height.
326 *
327 * Finds the closest resolution to minimize the width and height differences
328 * between what requested and the supported resolutions.
329 */
330const struct v4l2_frmsize_discrete *
331v4l2_find_nearest_format(const struct v4l2_frmsize_discrete *sizes,
332 const size_t num_sizes,
333 s32 width, s32 height);
264 334
335/**
336 * v4l2_get_timestamp - helper routine to get a timestamp to be used when
337 * filling streaming metadata. Internally, it uses ktime_get_ts(),
338 * which is the recommended way to get it.
339 *
340 * @tv: pointer to &struct timeval to be filled.
341 */
265void v4l2_get_timestamp(struct timeval *tv); 342void v4l2_get_timestamp(struct timeval *tv);
266 343
267#endif /* V4L2_COMMON_H_ */ 344#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index dacfe54057f8..05ebb9ef9e73 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -166,8 +166,15 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
166 * empty strings ("") correspond to non-existing menu items (this 166 * empty strings ("") correspond to non-existing menu items (this
167 * is in addition to the menu_skip_mask above). The last entry 167 * is in addition to the menu_skip_mask above). The last entry
168 * must be NULL. 168 * must be NULL.
169 * Used only if the @type is %V4L2_CTRL_TYPE_MENU.
170 * @qmenu_int: A 64-bit integer array for with integer menu items.
171 * The size of array must be equal to the menu size, e. g.:
172 * :math:`ceil(\frac{maximum - minimum}{step}) + 1`.
173 * Used only if the @type is %V4L2_CTRL_TYPE_INTEGER_MENU.
169 * @flags: The control's flags. 174 * @flags: The control's flags.
170 * @cur: The control's current value. 175 * @cur: Structure to store the current value.
176 * @cur.val: The control's current value, if the @type is represented via
177 * a u32 integer (see &enum v4l2_ctrl_type).
171 * @val: The control's new s32 value. 178 * @val: The control's new s32 value.
172 * @priv: The control's private pointer. For use by the driver. It is 179 * @priv: The control's private pointer. For use by the driver. It is
173 * untouched by the control framework. Note that this pointer is 180 * untouched by the control framework. Note that this pointer is
@@ -1037,7 +1044,7 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
1037 * @file: pointer to struct file 1044 * @file: pointer to struct file
1038 * @wait: pointer to struct poll_table_struct 1045 * @wait: pointer to struct poll_table_struct
1039 */ 1046 */
1040unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait); 1047__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
1041 1048
1042/* Helpers for ioctl_ops */ 1049/* Helpers for ioctl_ops */
1043 1050
@@ -1139,7 +1146,7 @@ int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
1139 1146
1140/** 1147/**
1141 * v4l2_ctrl_subdev_subscribe_event - Helper function to implement 1148 * v4l2_ctrl_subdev_subscribe_event - Helper function to implement
1142 * as a &struct v4l2_subdev_core_ops subscribe_event function 1149 * as a &struct v4l2_subdev_core_ops subscribe_event function
1143 * that just subscribes control events. 1150 * that just subscribes control events.
1144 * 1151 *
1145 * @sd: pointer to &struct v4l2_subdev 1152 * @sd: pointer to &struct v4l2_subdev
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 28a686eb7d09..53f32022fabe 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -21,31 +21,63 @@
21 21
22#define VIDEO_MAJOR 81 22#define VIDEO_MAJOR 81
23 23
24#define VFL_TYPE_GRABBER 0 24/**
25#define VFL_TYPE_VBI 1 25 * enum vfl_devnode_type - type of V4L2 device node
26#define VFL_TYPE_RADIO 2 26 *
27#define VFL_TYPE_SUBDEV 3 27 * @VFL_TYPE_GRABBER: for video input/output devices
28#define VFL_TYPE_SDR 4 28 * @VFL_TYPE_VBI: for vertical blank data (i.e. closed captions, teletext)
29#define VFL_TYPE_TOUCH 5 29 * @VFL_TYPE_RADIO: for radio tuners
30#define VFL_TYPE_MAX 6 30 * @VFL_TYPE_SUBDEV: for V4L2 subdevices
31 31 * @VFL_TYPE_SDR: for Software Defined Radio tuners
32/* Is this a receiver, transmitter or mem-to-mem? */ 32 * @VFL_TYPE_TOUCH: for touch sensors
33/* Ignored for VFL_TYPE_SUBDEV. */ 33 */
34#define VFL_DIR_RX 0 34enum vfl_devnode_type {
35#define VFL_DIR_TX 1 35 VFL_TYPE_GRABBER = 0,
36#define VFL_DIR_M2M 2 36 VFL_TYPE_VBI = 1,
37 VFL_TYPE_RADIO = 2,
38 VFL_TYPE_SUBDEV = 3,
39 VFL_TYPE_SDR = 4,
40 VFL_TYPE_TOUCH = 5,
41};
42#define VFL_TYPE_MAX VFL_TYPE_TOUCH
43
44/**
45 * enum vfl_direction - Identifies if a &struct video_device corresponds
46 * to a receiver, a transmitter or a mem-to-mem device.
47 *
48 * @VFL_DIR_RX: device is a receiver.
49 * @VFL_DIR_TX: device is a transmitter.
50 * @VFL_DIR_M2M: device is a memory to memory device.
51 *
52 * Note: Ignored if &enum vfl_devnode_type is %VFL_TYPE_SUBDEV.
53 */
54enum vfl_devnode_direction {
55 VFL_DIR_RX,
56 VFL_DIR_TX,
57 VFL_DIR_M2M,
58};
37 59
38struct v4l2_ioctl_callbacks; 60struct v4l2_ioctl_callbacks;
39struct video_device; 61struct video_device;
40struct v4l2_device; 62struct v4l2_device;
41struct v4l2_ctrl_handler; 63struct v4l2_ctrl_handler;
42 64
43/* Flag to mark the video_device struct as registered. 65/**
44 Drivers can clear this flag if they want to block all future 66 * enum v4l2_video_device_flags - Flags used by &struct video_device
45 device access. It is cleared by video_unregister_device. */ 67 *
46#define V4L2_FL_REGISTERED (0) 68 * @V4L2_FL_REGISTERED:
47/* file->private_data points to struct v4l2_fh */ 69 * indicates that a &struct video_device is registered.
48#define V4L2_FL_USES_V4L2_FH (1) 70 * Drivers can clear this flag if they want to block all future
71 * device access. It is cleared by video_unregister_device.
72 * @V4L2_FL_USES_V4L2_FH:
73 * indicates that file->private_data points to &struct v4l2_fh.
74 * This flag is set by the core when v4l2_fh_init() is called.
75 * All new drivers should use it.
76 */
77enum v4l2_video_device_flags {
78 V4L2_FL_REGISTERED = 0,
79 V4L2_FL_USES_V4L2_FH = 1,
80};
49 81
50/* Priority helper functions */ 82/* Priority helper functions */
51 83
@@ -152,7 +184,7 @@ struct v4l2_file_operations {
152 struct module *owner; 184 struct module *owner;
153 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); 185 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
154 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); 186 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
155 unsigned int (*poll) (struct file *, struct poll_table_struct *); 187 __poll_t (*poll) (struct file *, struct poll_table_struct *);
156 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 188 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
157#ifdef CONFIG_COMPAT 189#ifdef CONFIG_COMPAT
158 long (*compat_ioctl32) (struct file *, unsigned int, unsigned long); 190 long (*compat_ioctl32) (struct file *, unsigned int, unsigned long);
@@ -166,7 +198,7 @@ struct v4l2_file_operations {
166 198
167/* 199/*
168 * Newer version of video_device, handled by videodev2.c 200 * Newer version of video_device, handled by videodev2.c
169 * This version moves redundant code from video device code to 201 * This version moves redundant code from video device code to
170 * the common handler 202 * the common handler
171 */ 203 */
172 204
@@ -189,11 +221,12 @@ struct v4l2_file_operations {
189 * @prio: pointer to &struct v4l2_prio_state with device's Priority state. 221 * @prio: pointer to &struct v4l2_prio_state with device's Priority state.
190 * If NULL, then v4l2_dev->prio will be used. 222 * If NULL, then v4l2_dev->prio will be used.
191 * @name: video device name 223 * @name: video device name
192 * @vfl_type: V4L device type 224 * @vfl_type: V4L device type, as defined by &enum vfl_devnode_type
193 * @vfl_dir: V4L receiver, transmitter or m2m 225 * @vfl_dir: V4L receiver, transmitter or m2m
194 * @minor: device node 'minor'. It is set to -1 if the registration failed 226 * @minor: device node 'minor'. It is set to -1 if the registration failed
195 * @num: number of the video device node 227 * @num: number of the video device node
196 * @flags: video device flags. Use bitops to set/clear/test flags 228 * @flags: video device flags. Use bitops to set/clear/test flags.
229 * Contains a set of &enum v4l2_video_device_flags.
197 * @index: attribute to differentiate multiple indices on one physical device 230 * @index: attribute to differentiate multiple indices on one physical device
198 * @fh_lock: Lock for all v4l2_fhs 231 * @fh_lock: Lock for all v4l2_fhs
199 * @fh_list: List of &struct v4l2_fh 232 * @fh_list: List of &struct v4l2_fh
@@ -237,8 +270,8 @@ struct video_device
237 270
238 /* device info */ 271 /* device info */
239 char name[32]; 272 char name[32];
240 int vfl_type; 273 enum vfl_devnode_type vfl_type;
241 int vfl_dir; 274 enum vfl_devnode_direction vfl_dir;
242 int minor; 275 int minor;
243 u16 num; 276 u16 num;
244 unsigned long flags; 277 unsigned long flags;
@@ -261,18 +294,30 @@ struct video_device
261 struct mutex *lock; 294 struct mutex *lock;
262}; 295};
263 296
264#define media_entity_to_video_device(__e) \ 297/**
265 container_of(__e, struct video_device, entity) 298 * media_entity_to_video_device - Returns a &struct video_device from
266/* dev to video-device */ 299 * the &struct media_entity embedded on it.
300 *
301 * @entity: pointer to &struct media_entity
302 */
303#define media_entity_to_video_device(entity) \
304 container_of(entity, struct video_device, entity)
305
306/**
307 * to_video_device - Returns a &struct video_device from the
308 * &struct device embedded on it.
309 *
310 * @cd: pointer to &struct device
311 */
267#define to_video_device(cd) container_of(cd, struct video_device, dev) 312#define to_video_device(cd) container_of(cd, struct video_device, dev)
268 313
269/** 314/**
270 * __video_register_device - register video4linux devices 315 * __video_register_device - register video4linux devices
271 * 316 *
272 * @vdev: struct video_device to register 317 * @vdev: struct video_device to register
273 * @type: type of device to register 318 * @type: type of device to register, as defined by &enum vfl_devnode_type
274 * @nr: which device node number is desired: 319 * @nr: which device node number is desired:
275 * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) 320 * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free)
276 * @warn_if_nr_in_use: warn if the desired device node number 321 * @warn_if_nr_in_use: warn if the desired device node number
277 * was already in use and another number was chosen instead. 322 * was already in use and another number was chosen instead.
278 * @owner: module that owns the video device node 323 * @owner: module that owns the video device node
@@ -289,43 +334,37 @@ struct video_device
289 * 334 *
290 * Returns 0 on success. 335 * Returns 0 on success.
291 * 336 *
292 * Valid values for @type are:
293 *
294 * - %VFL_TYPE_GRABBER - A frame grabber
295 * - %VFL_TYPE_VBI - Vertical blank data (undecoded)
296 * - %VFL_TYPE_RADIO - A radio card
297 * - %VFL_TYPE_SUBDEV - A subdevice
298 * - %VFL_TYPE_SDR - Software Defined Radio
299 * - %VFL_TYPE_TOUCH - A touch sensor
300 *
301 * .. note:: 337 * .. note::
302 * 338 *
303 * This function is meant to be used only inside the V4L2 core. 339 * This function is meant to be used only inside the V4L2 core.
304 * Drivers should use video_register_device() or 340 * Drivers should use video_register_device() or
305 * video_register_device_no_warn(). 341 * video_register_device_no_warn().
306 */ 342 */
307int __must_check __video_register_device(struct video_device *vdev, int type, 343int __must_check __video_register_device(struct video_device *vdev,
308 int nr, int warn_if_nr_in_use, struct module *owner); 344 enum vfl_devnode_type type,
345 int nr, int warn_if_nr_in_use,
346 struct module *owner);
309 347
310/** 348/**
311 * video_register_device - register video4linux devices 349 * video_register_device - register video4linux devices
312 * 350 *
313 * @vdev: struct video_device to register 351 * @vdev: struct video_device to register
314 * @type: type of device to register 352 * @type: type of device to register, as defined by &enum vfl_devnode_type
315 * @nr: which device node number is desired: 353 * @nr: which device node number is desired:
316 * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) 354 * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free)
317 * 355 *
318 * Internally, it calls __video_register_device(). Please see its 356 * Internally, it calls __video_register_device(). Please see its
319 * documentation for more details. 357 * documentation for more details.
320 * 358 *
321 * .. note:: 359 * .. note::
322 * if video_register_device fails, the release() callback of 360 * if video_register_device fails, the release() callback of
323 * &struct video_device structure is *not* called, so the caller 361 * &struct video_device structure is *not* called, so the caller
324 * is responsible for freeing any data. Usually that means that 362 * is responsible for freeing any data. Usually that means that
325 * you video_device_release() should be called on failure. 363 * you video_device_release() should be called on failure.
326 */ 364 */
327static inline int __must_check video_register_device(struct video_device *vdev, 365static inline int __must_check video_register_device(struct video_device *vdev,
328 int type, int nr) 366 enum vfl_devnode_type type,
367 int nr)
329{ 368{
330 return __video_register_device(vdev, type, nr, 1, vdev->fops->owner); 369 return __video_register_device(vdev, type, nr, 1, vdev->fops->owner);
331} 370}
@@ -334,9 +373,9 @@ static inline int __must_check video_register_device(struct video_device *vdev,
334 * video_register_device_no_warn - register video4linux devices 373 * video_register_device_no_warn - register video4linux devices
335 * 374 *
336 * @vdev: struct video_device to register 375 * @vdev: struct video_device to register
337 * @type: type of device to register 376 * @type: type of device to register, as defined by &enum vfl_devnode_type
338 * @nr: which device node number is desired: 377 * @nr: which device node number is desired:
339 * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) 378 * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free)
340 * 379 *
341 * This function is identical to video_register_device() except that no 380 * This function is identical to video_register_device() except that no
342 * warning is issued if the desired device node number was already in use. 381 * warning is issued if the desired device node number was already in use.
@@ -345,13 +384,14 @@ static inline int __must_check video_register_device(struct video_device *vdev,
345 * documentation for more details. 384 * documentation for more details.
346 * 385 *
347 * .. note:: 386 * .. note::
348 * if video_register_device fails, the release() callback of 387 * if video_register_device fails, the release() callback of
349 * &struct video_device structure is *not* called, so the caller 388 * &struct video_device structure is *not* called, so the caller
350 * is responsible for freeing any data. Usually that means that 389 * is responsible for freeing any data. Usually that means that
351 * you video_device_release() should be called on failure. 390 * you video_device_release() should be called on failure.
352 */ 391 */
353static inline int __must_check video_register_device_no_warn( 392static inline int __must_check
354 struct video_device *vdev, int type, int nr) 393video_register_device_no_warn(struct video_device *vdev,
394 enum vfl_devnode_type type, int nr)
355{ 395{
356 return __video_register_device(vdev, type, nr, 0, vdev->fops->owner); 396 return __video_register_device(vdev, type, nr, 0, vdev->fops->owner);
357} 397}
@@ -383,7 +423,7 @@ void video_device_release(struct video_device *vdev);
383 423
384/** 424/**
385 * video_device_release_empty - helper function to implement the 425 * video_device_release_empty - helper function to implement the
386 * video_device->release\(\) callback. 426 * video_device->release\(\) callback.
387 * 427 *
388 * @vdev: pointer to &struct video_device 428 * @vdev: pointer to &struct video_device
389 * 429 *
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 8ffa94009d1a..0c9e4da55499 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -38,7 +38,7 @@ struct v4l2_ctrl_handler;
38 * @lock: lock this struct; can be used by the driver as well 38 * @lock: lock this struct; can be used by the driver as well
39 * if this struct is embedded into a larger struct. 39 * if this struct is embedded into a larger struct.
40 * @name: unique device name, by default the driver name + bus ID 40 * @name: unique device name, by default the driver name + bus ID
41 * @notify: notify callback called by some sub-devices. 41 * @notify: notify operation called by some sub-devices.
42 * @ctrl_handler: The control handler. May be %NULL. 42 * @ctrl_handler: The control handler. May be %NULL.
43 * @prio: Device's priority state 43 * @prio: Device's priority state
44 * @ref: Keep track of the references to this struct. 44 * @ref: Keep track of the references to this struct.
@@ -56,7 +56,6 @@ struct v4l2_ctrl_handler;
56 * #) @dev->driver_data points to this struct. 56 * #) @dev->driver_data points to this struct.
57 * #) @dev might be %NULL if there is no parent device 57 * #) @dev might be %NULL if there is no parent device
58 */ 58 */
59
60struct v4l2_device { 59struct v4l2_device {
61 struct device *dev; 60 struct device *dev;
62#if defined(CONFIG_MEDIA_CONTROLLER) 61#if defined(CONFIG_MEDIA_CONTROLLER)
@@ -166,7 +165,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev);
166 * v4l2_device_register_subdev - Registers a subdev with a v4l2 device. 165 * v4l2_device_register_subdev - Registers a subdev with a v4l2 device.
167 * 166 *
168 * @v4l2_dev: pointer to struct &v4l2_device 167 * @v4l2_dev: pointer to struct &v4l2_device
169 * @sd: pointer to struct &v4l2_subdev 168 * @sd: pointer to &struct v4l2_subdev
170 * 169 *
171 * While registered, the subdev module is marked as in-use. 170 * While registered, the subdev module is marked as in-use.
172 * 171 *
@@ -179,7 +178,7 @@ int __must_check v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
179/** 178/**
180 * v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device. 179 * v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device.
181 * 180 *
182 * @sd: pointer to struct &v4l2_subdev 181 * @sd: pointer to &struct v4l2_subdev
183 * 182 *
184 * .. note :: 183 * .. note ::
185 * 184 *
@@ -201,7 +200,7 @@ v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev);
201/** 200/**
202 * v4l2_subdev_notify - Sends a notification to v4l2_device. 201 * v4l2_subdev_notify - Sends a notification to v4l2_device.
203 * 202 *
204 * @sd: pointer to struct &v4l2_subdev 203 * @sd: pointer to &struct v4l2_subdev
205 * @notification: type of notification. Please notice that the notification 204 * @notification: type of notification. Please notice that the notification
206 * type is driver-specific. 205 * type is driver-specific.
207 * @arg: arguments for the notification. Those are specific to each 206 * @arg: arguments for the notification. Those are specific to each
@@ -214,13 +213,43 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
214 sd->v4l2_dev->notify(sd, notification, arg); 213 sd->v4l2_dev->notify(sd, notification, arg);
215} 214}
216 215
217/* Iterate over all subdevs. */ 216/* Helper macros to iterate over all subdevs. */
217
218/**
219 * v4l2_device_for_each_subdev - Helper macro that interates over all
220 * sub-devices of a given &v4l2_device.
221 *
222 * @sd: pointer that will be filled by the macro with all
223 * &struct v4l2_subdev pointer used as an iterator by the loop.
224 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
225 *
226 * This macro iterates over all sub-devices owned by the @v4l2_dev device.
227 * It acts as a for loop iterator and executes the next statement with
228 * the @sd variable pointing to each sub-device in turn.
229 */
218#define v4l2_device_for_each_subdev(sd, v4l2_dev) \ 230#define v4l2_device_for_each_subdev(sd, v4l2_dev) \
219 list_for_each_entry(sd, &(v4l2_dev)->subdevs, list) 231 list_for_each_entry(sd, &(v4l2_dev)->subdevs, list)
220 232
221/* Call the specified callback for all subdevs matching the condition. 233/**
222 Ignore any errors. Note that you cannot add or delete a subdev 234 * __v4l2_device_call_subdevs_p - Calls the specified operation for
223 while walking the subdevs list. */ 235 * all subdevs matching the condition.
236 *
237 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
238 * @sd: pointer that will be filled by the macro with all
239 * &struct v4l2_subdev pointer used as an iterator by the loop.
240 * @cond: condition to be match
241 * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
242 * Each element there groups a set of operations functions.
243 * @f: operation function that will be called if @cond matches.
244 * The operation functions are defined in groups, according to
245 * each element at &struct v4l2_subdev_ops.
246 * @args...: arguments for @f.
247 *
248 * Ignore any errors.
249 *
250 * Note: subdevs cannot be added or deleted while walking
251 * the subdevs list.
252 */
224#define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...) \ 253#define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...) \
225 do { \ 254 do { \
226 list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) \ 255 list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) \
@@ -228,6 +257,24 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
228 (sd)->ops->o->f((sd) , ##args); \ 257 (sd)->ops->o->f((sd) , ##args); \
229 } while (0) 258 } while (0)
230 259
260/**
261 * __v4l2_device_call_subdevs - Calls the specified operation for
262 * all subdevs matching the condition.
263 *
264 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
265 * @cond: condition to be match
266 * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
267 * Each element there groups a set of operations functions.
268 * @f: operation function that will be called if @cond matches.
269 * The operation functions are defined in groups, according to
270 * each element at &struct v4l2_subdev_ops.
271 * @args...: arguments for @f.
272 *
273 * Ignore any errors.
274 *
275 * Note: subdevs cannot be added or deleted while walking
276 * the subdevs list.
277 */
231#define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \ 278#define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \
232 do { \ 279 do { \
233 struct v4l2_subdev *__sd; \ 280 struct v4l2_subdev *__sd; \
@@ -236,10 +283,30 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
236 f , ##args); \ 283 f , ##args); \
237 } while (0) 284 } while (0)
238 285
239/* Call the specified callback for all subdevs matching the condition. 286/**
240 If the callback returns an error other than 0 or -ENOIOCTLCMD, then 287 * __v4l2_device_call_subdevs_until_err_p - Calls the specified operation for
241 return with that error code. Note that you cannot add or delete a 288 * all subdevs matching the condition.
242 subdev while walking the subdevs list. */ 289 *
290 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
291 * @sd: pointer that will be filled by the macro with all
292 * &struct v4l2_subdev sub-devices associated with @v4l2_dev.
293 * @cond: condition to be match
294 * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
295 * Each element there groups a set of operations functions.
296 * @f: operation function that will be called if @cond matches.
297 * The operation functions are defined in groups, according to
298 * each element at &struct v4l2_subdev_ops.
299 * @args...: arguments for @f.
300 *
301 * Return:
302 *
303 * If the operation returns an error other than 0 or ``-ENOIOCTLCMD``
304 * for any subdevice, then abort and return with that error code, zero
305 * otherwise.
306 *
307 * Note: subdevs cannot be added or deleted while walking
308 * the subdevs list.
309 */
243#define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \ 310#define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \
244({ \ 311({ \
245 long __err = 0; \ 312 long __err = 0; \
@@ -253,6 +320,28 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
253 (__err == -ENOIOCTLCMD) ? 0 : __err; \ 320 (__err == -ENOIOCTLCMD) ? 0 : __err; \
254}) 321})
255 322
323/**
324 * __v4l2_device_call_subdevs_until_err - Calls the specified operation for
325 * all subdevs matching the condition.
326 *
327 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
328 * @cond: condition to be match
329 * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
330 * Each element there groups a set of operations functions.
331 * @f: operation function that will be called if @cond matches.
332 * The operation functions are defined in groups, according to
333 * each element at &struct v4l2_subdev_ops.
334 * @args...: arguments for @f.
335 *
336 * Return:
337 *
338 * If the operation returns an error other than 0 or ``-ENOIOCTLCMD``
339 * for any subdevice, then abort and return with that error code,
340 * zero otherwise.
341 *
342 * Note: subdevs cannot be added or deleted while walking
343 * the subdevs list.
344 */
256#define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \ 345#define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \
257({ \ 346({ \
258 struct v4l2_subdev *__sd; \ 347 struct v4l2_subdev *__sd; \
@@ -260,9 +349,26 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
260 f , ##args); \ 349 f , ##args); \
261}) 350})
262 351
263/* Call the specified callback for all subdevs matching grp_id (if 0, then 352/**
264 match them all). Ignore any errors. Note that you cannot add or delete 353 * v4l2_device_call_all - Calls the specified operation for
265 a subdev while walking the subdevs list. */ 354 * all subdevs matching the &v4l2_subdev.grp_id, as assigned
355 * by the bridge driver.
356 *
357 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
358 * @grpid: &struct v4l2_subdev->grp_id group ID to match.
359 * Use 0 to match them all.
360 * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
361 * Each element there groups a set of operations functions.
362 * @f: operation function that will be called if @cond matches.
363 * The operation functions are defined in groups, according to
364 * each element at &struct v4l2_subdev_ops.
365 * @args...: arguments for @f.
366 *
367 * Ignore any errors.
368 *
369 * Note: subdevs cannot be added or deleted while walking
370 * the subdevs list.
371 */
266#define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \ 372#define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \
267 do { \ 373 do { \
268 struct v4l2_subdev *__sd; \ 374 struct v4l2_subdev *__sd; \
@@ -272,10 +378,30 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
272 ##args); \ 378 ##args); \
273 } while (0) 379 } while (0)
274 380
275/* Call the specified callback for all subdevs matching grp_id (if 0, then 381/**
276 match them all). If the callback returns an error other than 0 or 382 * v4l2_device_call_until_err - Calls the specified operation for
277 -ENOIOCTLCMD, then return with that error code. Note that you cannot 383 * all subdevs matching the &v4l2_subdev.grp_id, as assigned
278 add or delete a subdev while walking the subdevs list. */ 384 * by the bridge driver, until an error occurs.
385 *
386 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
387 * @grpid: &struct v4l2_subdev->grp_id group ID to match.
388 * Use 0 to match them all.
389 * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
390 * Each element there groups a set of operations functions.
391 * @f: operation function that will be called if @cond matches.
392 * The operation functions are defined in groups, according to
393 * each element at &struct v4l2_subdev_ops.
394 * @args...: arguments for @f.
395 *
396 * Return:
397 *
398 * If the operation returns an error other than 0 or ``-ENOIOCTLCMD``
399 * for any subdevice, then abort and return with that error code,
400 * zero otherwise.
401 *
402 * Note: subdevs cannot be added or deleted while walking
403 * the subdevs list.
404 */
279#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \ 405#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \
280({ \ 406({ \
281 struct v4l2_subdev *__sd; \ 407 struct v4l2_subdev *__sd; \
@@ -284,10 +410,24 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
284 ##args); \ 410 ##args); \
285}) 411})
286 412
287/* 413/**
288 * Call the specified callback for all subdevs where grp_id & grpmsk != 0 414 * v4l2_device_mask_call_all - Calls the specified operation for
289 * (if grpmsk == `0, then match them all). Ignore any errors. Note that you 415 * all subdevices where a group ID matches a specified bitmask.
290 * cannot add or delete a subdev while walking the subdevs list. 416 *
417 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
418 * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id
419 * group ID to be matched. Use 0 to match them all.
420 * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
421 * Each element there groups a set of operations functions.
422 * @f: operation function that will be called if @cond matches.
423 * The operation functions are defined in groups, according to
424 * each element at &struct v4l2_subdev_ops.
425 * @args...: arguments for @f.
426 *
427 * Ignore any errors.
428 *
429 * Note: subdevs cannot be added or deleted while walking
430 * the subdevs list.
291 */ 431 */
292#define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...) \ 432#define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...) \
293 do { \ 433 do { \
@@ -298,11 +438,28 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
298 ##args); \ 438 ##args); \
299 } while (0) 439 } while (0)
300 440
301/* 441/**
302 * Call the specified callback for all subdevs where grp_id & grpmsk != 0 442 * v4l2_device_mask_call_until_err - Calls the specified operation for
303 * (if grpmsk == 0, then match them all). If the callback returns an error 443 * all subdevices where a group ID matches a specified bitmask.
304 * other than 0 or %-ENOIOCTLCMD, then return with that error code. Note that 444 *
305 * you cannot add or delete a subdev while walking the subdevs list. 445 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
446 * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id
447 * group ID to be matched. Use 0 to match them all.
448 * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
449 * Each element there groups a set of operations functions.
450 * @f: operation function that will be called if @cond matches.
451 * The operation functions are defined in groups, according to
452 * each element at &struct v4l2_subdev_ops.
453 * @args...: arguments for @f.
454 *
455 * Return:
456 *
457 * If the operation returns an error other than 0 or ``-ENOIOCTLCMD``
458 * for any subdevice, then abort and return with that error code,
459 * zero otherwise.
460 *
461 * Note: subdevs cannot be added or deleted while walking
462 * the subdevs list.
306 */ 463 */
307#define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \ 464#define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \
308({ \ 465({ \
@@ -312,9 +469,19 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
312 ##args); \ 469 ##args); \
313}) 470})
314 471
315/* 472
316 * Does any subdev with matching grpid (or all if grpid == 0) has the given 473/**
317 * op? 474 * v4l2_device_has_op - checks if any subdev with matching grpid has a
475 * given ops.
476 *
477 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
478 * @grpid: &struct v4l2_subdev->grp_id group ID to match.
479 * Use 0 to match them all.
480 * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
481 * Each element there groups a set of operations functions.
482 * @f: operation function that will be called if @cond matches.
483 * The operation functions are defined in groups, according to
484 * each element at &struct v4l2_subdev_ops.
318 */ 485 */
319#define v4l2_device_has_op(v4l2_dev, grpid, o, f) \ 486#define v4l2_device_has_op(v4l2_dev, grpid, o, f) \
320({ \ 487({ \
@@ -331,9 +498,18 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
331 __result; \ 498 __result; \
332}) 499})
333 500
334/* 501/**
335 * Does any subdev with matching grpmsk (or all if grpmsk == 0) has the given 502 * v4l2_device_mask_has_op - checks if any subdev with matching group
336 * op? 503 * mask has a given ops.
504 *
505 * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over.
506 * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id
507 * group ID to be matched. Use 0 to match them all.
508 * @o: name of the element at &struct v4l2_subdev_ops that contains @f.
509 * Each element there groups a set of operations functions.
510 * @f: operation function that will be called if @cond matches.
511 * The operation functions are defined in groups, according to
512 * each element at &struct v4l2_subdev_ops.
337 */ 513 */
338#define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f) \ 514#define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f) \
339({ \ 515({ \
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 61a18893e004..ebf00e07a515 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -203,13 +203,15 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait);
203 */ 203 */
204struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t); 204struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t);
205 205
206/* 206/**
207 * reduce_fps - check if conditions for reduced fps are true. 207 * can_reduce_fps - check if conditions for reduced fps are true.
208 * bt - v4l2 timing structure 208 * @bt: v4l2 timing structure
209 * For different timings reduced fps is allowed if following conditions 209 *
210 * are met - 210 * For different timings reduced fps is allowed if the following conditions
211 * For CVT timings: if reduced blanking v2 (vsync == 8) is true. 211 * are met:
212 * For CEA861 timings: if V4L2_DV_FL_CAN_REDUCE_FPS flag is true. 212 *
213 * - For CVT timings: if reduced blanking v2 (vsync == 8) is true.
214 * - For CEA861 timings: if %V4L2_DV_FL_CAN_REDUCE_FPS flag is true.
213 */ 215 */
214static inline bool can_reduce_fps(struct v4l2_bt_timings *bt) 216static inline bool can_reduce_fps(struct v4l2_bt_timings *bt)
215{ 217{
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 6741910c3a18..17833e886e11 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -24,40 +24,6 @@
24#include <linux/videodev2.h> 24#include <linux/videodev2.h>
25#include <linux/wait.h> 25#include <linux/wait.h>
26 26
27/*
28 * Overview:
29 *
30 * Events are subscribed per-filehandle. An event specification consists of a
31 * type and is optionally associated with an object identified through the
32 * 'id' field. So an event is uniquely identified by the (type, id) tuple.
33 *
34 * The v4l2-fh struct has a list of subscribed events. The v4l2_subscribed_event
35 * struct is added to that list, one for every subscribed event.
36 *
37 * Each v4l2_subscribed_event struct ends with an array of v4l2_kevent structs.
38 * This array (ringbuffer, really) is used to store any events raised by the
39 * driver. The v4l2_kevent struct links into the 'available' list of the
40 * v4l2_fh struct so VIDIOC_DQEVENT will know which event to dequeue first.
41 *
42 * Finally, if the event subscription is associated with a particular object
43 * such as a V4L2 control, then that object needs to know about that as well
44 * so that an event can be raised by that object. So the 'node' field can
45 * be used to link the v4l2_subscribed_event struct into a list of that
46 * object.
47 *
48 * So to summarize:
49 *
50 * struct v4l2_fh has two lists: one of the subscribed events, and one of the
51 * pending events.
52 *
53 * struct v4l2_subscribed_event has a ringbuffer of raised (pending) events of
54 * that particular type.
55 *
56 * If struct v4l2_subscribed_event is associated with a specific object, then
57 * that object will have an internal list of struct v4l2_subscribed_event so
58 * it knows who subscribed an event to that object.
59 */
60
61struct v4l2_fh; 27struct v4l2_fh;
62struct v4l2_subdev; 28struct v4l2_subdev;
63struct v4l2_subscribed_event; 29struct v4l2_subscribed_event;
@@ -218,7 +184,7 @@ int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd,
218 struct v4l2_event_subscription *sub); 184 struct v4l2_event_subscription *sub);
219/** 185/**
220 * v4l2_src_change_event_subscribe - helper function that calls 186 * v4l2_src_change_event_subscribe - helper function that calls
221 * v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE. 187 * v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE.
222 * 188 *
223 * @fh: pointer to struct v4l2_fh 189 * @fh: pointer to struct v4l2_fh
224 * @sub: pointer to &struct v4l2_event_subscription 190 * @sub: pointer to &struct v4l2_event_subscription
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
index 5c1d50f78e12..0a5e4518ca11 100644
--- a/include/media/v4l2-flash-led-class.h
+++ b/include/media/v4l2-flash-led-class.h
@@ -91,12 +91,24 @@ struct v4l2_flash {
91 struct v4l2_ctrl **ctrls; 91 struct v4l2_ctrl **ctrls;
92}; 92};
93 93
94/**
95 * v4l2_subdev_to_v4l2_flash - Returns a &struct v4l2_flash from the
96 * &struct v4l2_subdev embedded on it.
97 *
98 * @sd: pointer to &struct v4l2_subdev
99 */
94static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash( 100static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash(
95 struct v4l2_subdev *sd) 101 struct v4l2_subdev *sd)
96{ 102{
97 return container_of(sd, struct v4l2_flash, sd); 103 return container_of(sd, struct v4l2_flash, sd);
98} 104}
99 105
106/**
107 * v4l2_ctrl_to_v4l2_flash - Returns a &struct v4l2_flash from the
108 * &struct v4l2_ctrl embedded on it.
109 *
110 * @c: pointer to &struct v4l2_ctrl
111 */
100static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c) 112static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c)
101{ 113{
102 return container_of(c->handler, struct v4l2_flash, hdl); 114 return container_of(c->handler, struct v4l2_flash, hdl);
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index b5b465677d28..c228ec1c77cf 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -81,7 +81,17 @@ struct v4l2_fwnode_bus_mipi_csi1 {
81 * struct v4l2_fwnode_endpoint - the endpoint data structure 81 * struct v4l2_fwnode_endpoint - the endpoint data structure
82 * @base: fwnode endpoint of the v4l2_fwnode 82 * @base: fwnode endpoint of the v4l2_fwnode
83 * @bus_type: bus type 83 * @bus_type: bus type
84 * @bus: bus configuration data structure 84 * @bus: union with bus configuration data structure
85 * @bus.parallel: embedded &struct v4l2_fwnode_bus_parallel.
86 * Used if the bus is parallel.
87 * @bus.mipi_csi1: embedded &struct v4l2_fwnode_bus_mipi_csi1.
88 * Used if the bus is MIPI Alliance's Camera Serial
89 * Interface version 1 (MIPI CSI1) or Standard
90 * Mobile Imaging Architecture's Compact Camera Port 2
91 * (SMIA CCP2).
92 * @bus.mipi_csi2: embedded &struct v4l2_fwnode_bus_mipi_csi2.
93 * Used if the bus is MIPI Alliance's Camera Serial
94 * Interface version 2 (MIPI CSI2).
85 * @link_frequencies: array of supported link frequencies 95 * @link_frequencies: array of supported link frequencies
86 * @nr_of_link_frequencies: number of elements in link_frequenccies array 96 * @nr_of_link_frequencies: number of elements in link_frequenccies array
87 */ 97 */
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 93f8afcb7a22..4d8626c468bc 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -12,6 +12,8 @@
12#define V4L2_MEDIABUS_H 12#define V4L2_MEDIABUS_H
13 13
14#include <linux/v4l2-mediabus.h> 14#include <linux/v4l2-mediabus.h>
15#include <linux/bitops.h>
16
15 17
16/* Parallel flags */ 18/* Parallel flags */
17/* 19/*
@@ -20,44 +22,44 @@
20 * horizontal and vertical synchronisation. In "Slave mode" the host is 22 * horizontal and vertical synchronisation. In "Slave mode" the host is
21 * providing these signals to the slave. 23 * providing these signals to the slave.
22 */ 24 */
23#define V4L2_MBUS_MASTER (1 << 0) 25#define V4L2_MBUS_MASTER BIT(0)
24#define V4L2_MBUS_SLAVE (1 << 1) 26#define V4L2_MBUS_SLAVE BIT(1)
25/* 27/*
26 * Signal polarity flags 28 * Signal polarity flags
27 * Note: in BT.656 mode HSYNC, FIELD, and VSYNC are unused 29 * Note: in BT.656 mode HSYNC, FIELD, and VSYNC are unused
28 * V4L2_MBUS_[HV]SYNC* flags should be also used for specifying 30 * V4L2_MBUS_[HV]SYNC* flags should be also used for specifying
29 * configuration of hardware that uses [HV]REF signals 31 * configuration of hardware that uses [HV]REF signals
30 */ 32 */
31#define V4L2_MBUS_HSYNC_ACTIVE_HIGH (1 << 2) 33#define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2)
32#define V4L2_MBUS_HSYNC_ACTIVE_LOW (1 << 3) 34#define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3)
33#define V4L2_MBUS_VSYNC_ACTIVE_HIGH (1 << 4) 35#define V4L2_MBUS_VSYNC_ACTIVE_HIGH BIT(4)
34#define V4L2_MBUS_VSYNC_ACTIVE_LOW (1 << 5) 36#define V4L2_MBUS_VSYNC_ACTIVE_LOW BIT(5)
35#define V4L2_MBUS_PCLK_SAMPLE_RISING (1 << 6) 37#define V4L2_MBUS_PCLK_SAMPLE_RISING BIT(6)
36#define V4L2_MBUS_PCLK_SAMPLE_FALLING (1 << 7) 38#define V4L2_MBUS_PCLK_SAMPLE_FALLING BIT(7)
37#define V4L2_MBUS_DATA_ACTIVE_HIGH (1 << 8) 39#define V4L2_MBUS_DATA_ACTIVE_HIGH BIT(8)
38#define V4L2_MBUS_DATA_ACTIVE_LOW (1 << 9) 40#define V4L2_MBUS_DATA_ACTIVE_LOW BIT(9)
39/* FIELD = 0/1 - Field1 (odd)/Field2 (even) */ 41/* FIELD = 0/1 - Field1 (odd)/Field2 (even) */
40#define V4L2_MBUS_FIELD_EVEN_HIGH (1 << 10) 42#define V4L2_MBUS_FIELD_EVEN_HIGH BIT(10)
41/* FIELD = 1/0 - Field1 (odd)/Field2 (even) */ 43/* FIELD = 1/0 - Field1 (odd)/Field2 (even) */
42#define V4L2_MBUS_FIELD_EVEN_LOW (1 << 11) 44#define V4L2_MBUS_FIELD_EVEN_LOW BIT(11)
43/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */ 45/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */
44#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH (1 << 12) 46#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(12)
45#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW (1 << 13) 47#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(13)
46 48
47/* Serial flags */ 49/* Serial flags */
48/* How many lanes the client can use */ 50/* How many lanes the client can use */
49#define V4L2_MBUS_CSI2_1_LANE (1 << 0) 51#define V4L2_MBUS_CSI2_1_LANE BIT(0)
50#define V4L2_MBUS_CSI2_2_LANE (1 << 1) 52#define V4L2_MBUS_CSI2_2_LANE BIT(1)
51#define V4L2_MBUS_CSI2_3_LANE (1 << 2) 53#define V4L2_MBUS_CSI2_3_LANE BIT(2)
52#define V4L2_MBUS_CSI2_4_LANE (1 << 3) 54#define V4L2_MBUS_CSI2_4_LANE BIT(3)
53/* On which channels it can send video data */ 55/* On which channels it can send video data */
54#define V4L2_MBUS_CSI2_CHANNEL_0 (1 << 4) 56#define V4L2_MBUS_CSI2_CHANNEL_0 BIT(4)
55#define V4L2_MBUS_CSI2_CHANNEL_1 (1 << 5) 57#define V4L2_MBUS_CSI2_CHANNEL_1 BIT(5)
56#define V4L2_MBUS_CSI2_CHANNEL_2 (1 << 6) 58#define V4L2_MBUS_CSI2_CHANNEL_2 BIT(6)
57#define V4L2_MBUS_CSI2_CHANNEL_3 (1 << 7) 59#define V4L2_MBUS_CSI2_CHANNEL_3 BIT(7)
58/* Does it support only continuous or also non-continuous clock mode */ 60/* Does it support only continuous or also non-continuous clock mode */
59#define V4L2_MBUS_CSI2_CONTINUOUS_CLOCK (1 << 8) 61#define V4L2_MBUS_CSI2_CONTINUOUS_CLOCK BIT(8)
60#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK (1 << 9) 62#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(9)
61 63
62#define V4L2_MBUS_CSI2_LANES (V4L2_MBUS_CSI2_1_LANE | V4L2_MBUS_CSI2_2_LANE | \ 64#define V4L2_MBUS_CSI2_LANES (V4L2_MBUS_CSI2_1_LANE | V4L2_MBUS_CSI2_2_LANE | \
63 V4L2_MBUS_CSI2_3_LANE | V4L2_MBUS_CSI2_4_LANE) 65 V4L2_MBUS_CSI2_3_LANE | V4L2_MBUS_CSI2_4_LANE)
@@ -91,6 +93,13 @@ struct v4l2_mbus_config {
91 unsigned int flags; 93 unsigned int flags;
92}; 94};
93 95
96/**
97 * v4l2_fill_pix_format - Ancillary routine that fills a &struct
98 * v4l2_pix_format fields from a &struct v4l2_mbus_framefmt.
99 *
100 * @pix_fmt: pointer to &struct v4l2_pix_format to be filled
101 * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model
102 */
94static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, 103static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
95 const struct v4l2_mbus_framefmt *mbus_fmt) 104 const struct v4l2_mbus_framefmt *mbus_fmt)
96{ 105{
@@ -103,6 +112,15 @@ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
103 pix_fmt->xfer_func = mbus_fmt->xfer_func; 112 pix_fmt->xfer_func = mbus_fmt->xfer_func;
104} 113}
105 114
115/**
116 * v4l2_fill_pix_format - Ancillary routine that fills a &struct
117 * v4l2_mbus_framefmt from a &struct v4l2_pix_format and a
118 * data format code.
119 *
120 * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled
121 * @pix_fmt: pointer to &struct v4l2_pix_format to be used as model
122 * @code: data format code (from &enum v4l2_mbus_pixelcode)
123 */
106static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, 124static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
107 const struct v4l2_pix_format *pix_fmt, 125 const struct v4l2_pix_format *pix_fmt,
108 u32 code) 126 u32 code)
@@ -117,6 +135,13 @@ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
117 mbus_fmt->code = code; 135 mbus_fmt->code = code;
118} 136}
119 137
138/**
139 * v4l2_fill_pix_format - Ancillary routine that fills a &struct
140 * v4l2_pix_format_mplane fields from a media bus structure.
141 *
142 * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be filled
143 * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model
144 */
120static inline void v4l2_fill_pix_format_mplane( 145static inline void v4l2_fill_pix_format_mplane(
121 struct v4l2_pix_format_mplane *pix_mp_fmt, 146 struct v4l2_pix_format_mplane *pix_mp_fmt,
122 const struct v4l2_mbus_framefmt *mbus_fmt) 147 const struct v4l2_mbus_framefmt *mbus_fmt)
@@ -130,6 +155,13 @@ static inline void v4l2_fill_pix_format_mplane(
130 pix_mp_fmt->xfer_func = mbus_fmt->xfer_func; 155 pix_mp_fmt->xfer_func = mbus_fmt->xfer_func;
131} 156}
132 157
158/**
159 * v4l2_fill_pix_format - Ancillary routine that fills a &struct
160 * v4l2_mbus_framefmt from a &struct v4l2_pix_format_mplane.
161 *
162 * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled
163 * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be used as model
164 */
133static inline void v4l2_fill_mbus_format_mplane( 165static inline void v4l2_fill_mbus_format_mplane(
134 struct v4l2_mbus_framefmt *mbus_fmt, 166 struct v4l2_mbus_framefmt *mbus_fmt,
135 const struct v4l2_pix_format_mplane *pix_mp_fmt) 167 const struct v4l2_pix_format_mplane *pix_mp_fmt)
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index e157d5c9b224..3d07ba3a8262 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -297,7 +297,7 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
297 * indicate that a non-blocking write can be performed, while read will be 297 * indicate that a non-blocking write can be performed, while read will be
298 * returned in case of the destination queue. 298 * returned in case of the destination queue.
299 */ 299 */
300unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 300__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
301 struct poll_table_struct *wait); 301 struct poll_table_struct *wait);
302 302
303/** 303/**
@@ -601,7 +601,7 @@ int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
601int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, 601int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
602 enum v4l2_buf_type type); 602 enum v4l2_buf_type type);
603int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); 603int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
604unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait); 604__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
605 605
606#endif /* _MEDIA_V4L2_MEM2MEM_H */ 606#endif /* _MEDIA_V4L2_MEM2MEM_H */
607 607
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index ec399c770301..980a86c08fce 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -108,22 +108,31 @@ struct v4l2_decode_vbi_line {
108 * not yet implemented) since ops provide proper type-checking. 108 * not yet implemented) since ops provide proper type-checking.
109 */ 109 */
110 110
111/* Subdevice external IO pin configuration */ 111/**
112#define V4L2_SUBDEV_IO_PIN_DISABLE (1 << 0) /* ENABLE assumed */ 112 * enum v4l2_subdev_io_pin_bits - Subdevice external IO pin configuration
113#define V4L2_SUBDEV_IO_PIN_OUTPUT (1 << 1) 113 * bits
114#define V4L2_SUBDEV_IO_PIN_INPUT (1 << 2) 114 *
115#define V4L2_SUBDEV_IO_PIN_SET_VALUE (1 << 3) /* Set output value */ 115 * @V4L2_SUBDEV_IO_PIN_DISABLE: disables a pin config. ENABLE assumed.
116#define V4L2_SUBDEV_IO_PIN_ACTIVE_LOW (1 << 4) /* ACTIVE HIGH assumed */ 116 * @V4L2_SUBDEV_IO_PIN_OUTPUT: set it if pin is an output.
117 * @V4L2_SUBDEV_IO_PIN_INPUT: set it if pin is an input.
118 * @V4L2_SUBDEV_IO_PIN_SET_VALUE: to set the output value via
119 * &struct v4l2_subdev_io_pin_config->value.
120 * @V4L2_SUBDEV_IO_PIN_ACTIVE_LOW: pin active is bit 0.
121 * Otherwise, ACTIVE HIGH is assumed.
122 */
123enum v4l2_subdev_io_pin_bits {
124 V4L2_SUBDEV_IO_PIN_DISABLE = 0,
125 V4L2_SUBDEV_IO_PIN_OUTPUT = 1,
126 V4L2_SUBDEV_IO_PIN_INPUT = 2,
127 V4L2_SUBDEV_IO_PIN_SET_VALUE = 3,
128 V4L2_SUBDEV_IO_PIN_ACTIVE_LOW = 4,
129};
117 130
118/** 131/**
119 * struct v4l2_subdev_io_pin_config - Subdevice external IO pin configuration 132 * struct v4l2_subdev_io_pin_config - Subdevice external IO pin configuration
120 * 133 *
121 * @flags: bitmask with flags for this pin's config: 134 * @flags: bitmask with flags for this pin's config, whose bits are defined by
122 * %V4L2_SUBDEV_IO_PIN_DISABLE - disables a pin config, 135 * &enum v4l2_subdev_io_pin_bits.
123 * %V4L2_SUBDEV_IO_PIN_OUTPUT - if pin is an output,
124 * %V4L2_SUBDEV_IO_PIN_INPUT - if pin is an input,
125 * %V4L2_SUBDEV_IO_PIN_SET_VALUE - to set the output value via @value
126 * and %V4L2_SUBDEV_IO_PIN_ACTIVE_LOW - if active is 0.
127 * @pin: Chip external IO pin to configure 136 * @pin: Chip external IO pin to configure
128 * @function: Internal signal pad/function to route to IO pin 137 * @function: Internal signal pad/function to route to IO pin
129 * @value: Initial value for pin - e.g. GPIO output value 138 * @value: Initial value for pin - e.g. GPIO output value
@@ -140,7 +149,7 @@ struct v4l2_subdev_io_pin_config {
140/** 149/**
141 * struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs 150 * struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs
142 * 151 *
143 * @log_status: callback for %VIDIOC_LOG_STATUS ioctl handler code. 152 * @log_status: callback for VIDIOC_LOG_STATUS() ioctl handler code.
144 * 153 *
145 * @s_io_pin_config: configure one or more chip I/O pins for chips that 154 * @s_io_pin_config: configure one or more chip I/O pins for chips that
146 * multiplex different internal signal pads out to IO pins. This function 155 * multiplex different internal signal pads out to IO pins. This function
@@ -168,9 +177,9 @@ struct v4l2_subdev_io_pin_config {
168 * @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel, 177 * @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel,
169 * in order to fix data passed from/to userspace. 178 * in order to fix data passed from/to userspace.
170 * 179 *
171 * @g_register: callback for %VIDIOC_G_REGISTER ioctl handler code. 180 * @g_register: callback for VIDIOC_DBG_G_REGISTER() ioctl handler code.
172 * 181 *
173 * @s_register: callback for %VIDIOC_G_REGISTER ioctl handler code. 182 * @s_register: callback for VIDIOC_DBG_S_REGISTER() ioctl handler code.
174 * 183 *
175 * @s_power: puts subdevice in power saving mode (on == 0) or normal operation 184 * @s_power: puts subdevice in power saving mode (on == 0) or normal operation
176 * mode (on == 1). 185 * mode (on == 1).
@@ -215,29 +224,48 @@ struct v4l2_subdev_core_ops {
215 * struct v4l2_subdev_tuner_ops - Callbacks used when v4l device was opened 224 * struct v4l2_subdev_tuner_ops - Callbacks used when v4l device was opened
216 * in radio mode. 225 * in radio mode.
217 * 226 *
218 * @s_radio: callback for %VIDIOC_S_RADIO ioctl handler code. 227 * @s_radio: callback that switches the tuner to radio mode.
228 * drivers should explicitly call it when a tuner ops should
229 * operate on radio mode, before being able to handle it.
230 * Used on devices that have both AM/FM radio receiver and TV.
219 * 231 *
220 * @s_frequency: callback for %VIDIOC_S_FREQUENCY ioctl handler code. 232 * @s_frequency: callback for VIDIOC_S_FREQUENCY() ioctl handler code.
221 * 233 *
222 * @g_frequency: callback for %VIDIOC_G_FREQUENCY ioctl handler code. 234 * @g_frequency: callback for VIDIOC_G_FREQUENCY() ioctl handler code.
223 * freq->type must be filled in. Normally done by video_ioctl2() 235 * freq->type must be filled in. Normally done by video_ioctl2()
224 * or the bridge driver. 236 * or the bridge driver.
225 * 237 *
226 * @enum_freq_bands: callback for %VIDIOC_ENUM_FREQ_BANDS ioctl handler code. 238 * @enum_freq_bands: callback for VIDIOC_ENUM_FREQ_BANDS() ioctl handler code.
227 * 239 *
228 * @g_tuner: callback for %VIDIOC_G_TUNER ioctl handler code. 240 * @g_tuner: callback for VIDIOC_G_TUNER() ioctl handler code.
229 * 241 *
230 * @s_tuner: callback for %VIDIOC_S_TUNER ioctl handler code. @vt->type must be 242 * @s_tuner: callback for VIDIOC_S_TUNER() ioctl handler code. @vt->type must be
231 * filled in. Normally done by video_ioctl2 or the 243 * filled in. Normally done by video_ioctl2 or the
232 * bridge driver. 244 * bridge driver.
233 * 245 *
234 * @g_modulator: callback for %VIDIOC_G_MODULATOR ioctl handler code. 246 * @g_modulator: callback for VIDIOC_G_MODULATOR() ioctl handler code.
235 * 247 *
236 * @s_modulator: callback for %VIDIOC_S_MODULATOR ioctl handler code. 248 * @s_modulator: callback for VIDIOC_S_MODULATOR() ioctl handler code.
237 * 249 *
238 * @s_type_addr: sets tuner type and its I2C addr. 250 * @s_type_addr: sets tuner type and its I2C addr.
239 * 251 *
240 * @s_config: sets tda9887 specific stuff, like port1, port2 and qss 252 * @s_config: sets tda9887 specific stuff, like port1, port2 and qss
253 *
254 * .. note::
255 *
256 * On devices that have both AM/FM and TV, it is up to the driver
257 * to explicitly call s_radio when the tuner should be switched to
258 * radio mode, before handling other &struct v4l2_subdev_tuner_ops
259 * that would require it. An example of such usage is::
260 *
261 * static void s_frequency(void *priv, const struct v4l2_frequency *f)
262 * {
263 * ...
264 * if (f.type == V4L2_TUNER_RADIO)
265 * v4l2_device_call_all(v4l2_dev, 0, tuner, s_radio);
266 * ...
267 * v4l2_device_call_all(v4l2_dev, 0, tuner, s_frequency);
268 * }
241 */ 269 */
242struct v4l2_subdev_tuner_ops { 270struct v4l2_subdev_tuner_ops {
243 int (*s_radio)(struct v4l2_subdev *sd); 271 int (*s_radio)(struct v4l2_subdev *sd);
@@ -285,25 +313,32 @@ struct v4l2_subdev_audio_ops {
285 int (*s_stream)(struct v4l2_subdev *sd, int enable); 313 int (*s_stream)(struct v4l2_subdev *sd, int enable);
286}; 314};
287 315
288/* Indicates the @length field specifies maximum data length. */ 316/**
289#define V4L2_MBUS_FRAME_DESC_FL_LEN_MAX (1U << 0) 317 * enum v4l2_mbus_frame_desc_entry - media bus frame description flags
290/* 318 *
291 * Indicates that the format does not have line offsets, i.e. the 319 * @V4L2_MBUS_FRAME_DESC_FL_LEN_MAX:
292 * receiver should use 1D DMA. 320 * Indicates that &struct v4l2_mbus_frame_desc_entry->length field
321 * specifies maximum data length.
322 * @V4L2_MBUS_FRAME_DESC_FL_BLOB:
323 * Indicates that the format does not have line offsets, i.e.
324 * the receiver should use 1D DMA.
293 */ 325 */
294#define V4L2_MBUS_FRAME_DESC_FL_BLOB (1U << 1) 326enum v4l2_mbus_frame_desc_flags {
327 V4L2_MBUS_FRAME_DESC_FL_LEN_MAX = BIT(0),
328 V4L2_MBUS_FRAME_DESC_FL_BLOB = BIT(1),
329};
295 330
296/** 331/**
297 * struct v4l2_mbus_frame_desc_entry - media bus frame description structure 332 * struct v4l2_mbus_frame_desc_entry - media bus frame description structure
298 * 333 *
299 * @flags: bitmask flags: %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX and 334 * @flags: bitmask flags, as defined by &enum v4l2_mbus_frame_desc_flags.
300 * %V4L2_MBUS_FRAME_DESC_FL_BLOB. 335 * @pixelcode: media bus pixel code, valid if @flags
301 * @pixelcode: media bus pixel code, valid if FRAME_DESC_FL_BLOB is not set 336 * %FRAME_DESC_FL_BLOB is not set.
302 * @length: number of octets per frame, valid if V4L2_MBUS_FRAME_DESC_FL_BLOB 337 * @length: number of octets per frame, valid if @flags
303 * is set 338 * %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX is set.
304 */ 339 */
305struct v4l2_mbus_frame_desc_entry { 340struct v4l2_mbus_frame_desc_entry {
306 u16 flags; 341 enum v4l2_mbus_frame_desc_flags flags;
307 u32 pixelcode; 342 u32 pixelcode;
308 u32 length; 343 u32 length;
309}; 344};
@@ -332,9 +367,9 @@ struct v4l2_mbus_frame_desc {
332 * regarding clock frequency dividers, etc. If not used, then set flags 367 * regarding clock frequency dividers, etc. If not used, then set flags
333 * to 0. If the frequency is not supported, then -EINVAL is returned. 368 * to 0. If the frequency is not supported, then -EINVAL is returned.
334 * 369 *
335 * @g_std: callback for %VIDIOC_G_STD ioctl handler code. 370 * @g_std: callback for VIDIOC_G_STD() ioctl handler code.
336 * 371 *
337 * @s_std: callback for %VIDIOC_S_STD ioctl handler code. 372 * @s_std: callback for VIDIOC_S_STD() ioctl handler code.
338 * 373 *
339 * @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by 374 * @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
340 * video input devices. 375 * video input devices.
@@ -342,7 +377,7 @@ struct v4l2_mbus_frame_desc {
342 * @g_std_output: get current standard for video OUTPUT devices. This is ignored 377 * @g_std_output: get current standard for video OUTPUT devices. This is ignored
343 * by video input devices. 378 * by video input devices.
344 * 379 *
345 * @querystd: callback for %VIDIOC_QUERYSTD ioctl handler code. 380 * @querystd: callback for VIDIOC_QUERYSTD() ioctl handler code.
346 * 381 *
347 * @g_tvnorms: get &v4l2_std_id with all standards supported by the video 382 * @g_tvnorms: get &v4l2_std_id with all standards supported by the video
348 * CAPTURE device. This is ignored by video output devices. 383 * CAPTURE device. This is ignored by video output devices.
@@ -358,13 +393,15 @@ struct v4l2_mbus_frame_desc {
358 * 393 *
359 * @g_pixelaspect: callback to return the pixelaspect ratio. 394 * @g_pixelaspect: callback to return the pixelaspect ratio.
360 * 395 *
361 * @g_parm: callback for %VIDIOC_G_PARM ioctl handler code. 396 * @g_parm: callback for VIDIOC_G_PARM() ioctl handler code.
362 * 397 *
363 * @s_parm: callback for %VIDIOC_S_PARM ioctl handler code. 398 * @s_parm: callback for VIDIOC_S_PARM() ioctl handler code.
364 * 399 *
365 * @g_frame_interval: callback for %VIDIOC_G_FRAMEINTERVAL ioctl handler code. 400 * @g_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL()
401 * ioctl handler code.
366 * 402 *
367 * @s_frame_interval: callback for %VIDIOC_S_FRAMEINTERVAL ioctl handler code. 403 * @s_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL()
404 * ioctl handler code.
368 * 405 *
369 * @s_dv_timings: Set custom dv timings in the sub device. This is used 406 * @s_dv_timings: Set custom dv timings in the sub device. This is used
370 * when sub device is capable of setting detailed timing information 407 * when sub device is capable of setting detailed timing information
@@ -372,7 +409,7 @@ struct v4l2_mbus_frame_desc {
372 * 409 *
373 * @g_dv_timings: Get custom dv timings in the sub device. 410 * @g_dv_timings: Get custom dv timings in the sub device.
374 * 411 *
375 * @query_dv_timings: callback for %VIDIOC_QUERY_DV_TIMINGS ioctl handler code. 412 * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS() ioctl handler code.
376 * 413 *
377 * @g_mbus_config: get supported mediabus configurations 414 * @g_mbus_config: get supported mediabus configurations
378 * 415 *
@@ -443,7 +480,8 @@ struct v4l2_subdev_video_ops {
443 * member (to determine whether CC data from the first or second field 480 * member (to determine whether CC data from the first or second field
444 * should be obtained). 481 * should be obtained).
445 * 482 *
446 * @g_sliced_vbi_cap: callback for %VIDIOC_SLICED_VBI_CAP ioctl handler code. 483 * @g_sliced_vbi_cap: callback for VIDIOC_G_SLICED_VBI_CAP() ioctl handler
484 * code.
447 * 485 *
448 * @s_raw_fmt: setup the video encoder/decoder for raw VBI. 486 * @s_raw_fmt: setup the video encoder/decoder for raw VBI.
449 * 487 *
@@ -610,30 +648,30 @@ struct v4l2_subdev_pad_config {
610 * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations 648 * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
611 * 649 *
612 * @init_cfg: initialize the pad config to default values 650 * @init_cfg: initialize the pad config to default values
613 * @enum_mbus_code: callback for %VIDIOC_SUBDEV_ENUM_MBUS_CODE ioctl handler 651 * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE() ioctl handler
614 * code. 652 * code.
615 * @enum_frame_size: callback for %VIDIOC_SUBDEV_ENUM_FRAME_SIZE ioctl handler 653 * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE() ioctl handler
616 * code. 654 * code.
617 * 655 *
618 * @enum_frame_interval: callback for %VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL ioctl 656 * @enum_frame_interval: callback for VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL() ioctl
619 * handler code. 657 * handler code.
620 * 658 *
621 * @get_fmt: callback for %VIDIOC_SUBDEV_G_FMT ioctl handler code. 659 * @get_fmt: callback for VIDIOC_SUBDEV_G_FMT() ioctl handler code.
622 * 660 *
623 * @set_fmt: callback for %VIDIOC_SUBDEV_S_FMT ioctl handler code. 661 * @set_fmt: callback for VIDIOC_SUBDEV_S_FMT() ioctl handler code.
624 * 662 *
625 * @get_selection: callback for %VIDIOC_SUBDEV_G_SELECTION ioctl handler code. 663 * @get_selection: callback for VIDIOC_SUBDEV_G_SELECTION() ioctl handler code.
626 * 664 *
627 * @set_selection: callback for %VIDIOC_SUBDEV_S_SELECTION ioctl handler code. 665 * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION() ioctl handler code.
628 * 666 *
629 * @get_edid: callback for %VIDIOC_SUBDEV_G_EDID ioctl handler code. 667 * @get_edid: callback for VIDIOC_SUBDEV_G_EDID() ioctl handler code.
630 * 668 *
631 * @set_edid: callback for %VIDIOC_SUBDEV_S_EDID ioctl handler code. 669 * @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code.
632 * 670 *
633 * @dv_timings_cap: callback for %VIDIOC_SUBDEV_DV_TIMINGS_CAP ioctl handler 671 * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP() ioctl handler
634 * code. 672 * code.
635 * 673 *
636 * @enum_dv_timings: callback for %VIDIOC_SUBDEV_ENUM_DV_TIMINGS ioctl handler 674 * @enum_dv_timings: callback for VIDIOC_SUBDEV_ENUM_DV_TIMINGS() ioctl handler
637 * code. 675 * code.
638 * 676 *
639 * @link_validate: used by the media controller code to check if the links 677 * @link_validate: used by the media controller code to check if the links
@@ -766,7 +804,7 @@ struct v4l2_subdev_platform_data {
766 * @list: List of sub-devices 804 * @list: List of sub-devices
767 * @owner: The owner is the same as the driver's &struct device owner. 805 * @owner: The owner is the same as the driver's &struct device owner.
768 * @owner_v4l2_dev: true if the &sd->owner matches the owner of @v4l2_dev->dev 806 * @owner_v4l2_dev: true if the &sd->owner matches the owner of @v4l2_dev->dev
769 * ownner. Initialized by v4l2_device_register_subdev(). 807 * owner. Initialized by v4l2_device_register_subdev().
770 * @flags: subdev flags. Can be: 808 * @flags: subdev flags. Can be:
771 * %V4L2_SUBDEV_FL_IS_I2C - Set this flag if this subdev is a i2c device; 809 * %V4L2_SUBDEV_FL_IS_I2C - Set this flag if this subdev is a i2c device;
772 * %V4L2_SUBDEV_FL_IS_SPI - Set this flag if this subdev is a spi device; 810 * %V4L2_SUBDEV_FL_IS_SPI - Set this flag if this subdev is a spi device;
diff --git a/include/media/v4l2-tpg-colors.h b/include/media/v4l2-tpg-colors.h
deleted file mode 100644
index 2a88d1fae0cd..000000000000
--- a/include/media/v4l2-tpg-colors.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * v4l2-tpg-colors.h - Color definitions for the test pattern generator
3 *
4 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5 *
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17 * SOFTWARE.
18 */
19
20#ifndef _V4L2_TPG_COLORS_H_
21#define _V4L2_TPG_COLORS_H_
22
23struct color {
24 unsigned char r, g, b;
25};
26
27struct color16 {
28 int r, g, b;
29};
30
31enum tpg_color {
32 TPG_COLOR_CSC_WHITE,
33 TPG_COLOR_CSC_YELLOW,
34 TPG_COLOR_CSC_CYAN,
35 TPG_COLOR_CSC_GREEN,
36 TPG_COLOR_CSC_MAGENTA,
37 TPG_COLOR_CSC_RED,
38 TPG_COLOR_CSC_BLUE,
39 TPG_COLOR_CSC_BLACK,
40 TPG_COLOR_75_YELLOW,
41 TPG_COLOR_75_CYAN,
42 TPG_COLOR_75_GREEN,
43 TPG_COLOR_75_MAGENTA,
44 TPG_COLOR_75_RED,
45 TPG_COLOR_75_BLUE,
46 TPG_COLOR_100_WHITE,
47 TPG_COLOR_100_YELLOW,
48 TPG_COLOR_100_CYAN,
49 TPG_COLOR_100_GREEN,
50 TPG_COLOR_100_MAGENTA,
51 TPG_COLOR_100_RED,
52 TPG_COLOR_100_BLUE,
53 TPG_COLOR_100_BLACK,
54 TPG_COLOR_TEXTFG,
55 TPG_COLOR_TEXTBG,
56 TPG_COLOR_RANDOM,
57 TPG_COLOR_RAMP,
58 TPG_COLOR_MAX = TPG_COLOR_RAMP + 256
59};
60
61extern const struct color tpg_colors[TPG_COLOR_MAX];
62extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
63extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
64extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1]
65 [V4L2_XFER_FUNC_SMPTE2084 + 1]
66 [TPG_COLOR_CSC_BLACK + 1];
67
68#endif
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index d760aa73ebbb..0bda0adc744f 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -219,7 +219,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
219ssize_t videobuf_read_one(struct videobuf_queue *q, 219ssize_t videobuf_read_one(struct videobuf_queue *q,
220 char __user *data, size_t count, loff_t *ppos, 220 char __user *data, size_t count, loff_t *ppos,
221 int nonblocking); 221 int nonblocking);
222unsigned int videobuf_poll_stream(struct file *file, 222__poll_t videobuf_poll_stream(struct file *file,
223 struct videobuf_queue *q, 223 struct videobuf_queue *q,
224 poll_table *wait); 224 poll_table *wait);
225 225
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
index a14ac7711c92..c9c81990a56c 100644
--- a/include/media/videobuf-dvb.h
+++ b/include/media/videobuf-dvb.h
@@ -1,9 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#include <dvbdev.h> 2#include <media/dvbdev.h>
3#include <dmxdev.h> 3#include <media/dmxdev.h>
4#include <dvb_demux.h> 4#include <media/dvb_demux.h>
5#include <dvb_net.h> 5#include <media/dvb_net.h>
6#include <dvb_frontend.h> 6#include <media/dvb_frontend.h>
7 7
8#ifndef _VIDEOBUF_DVB_H_ 8#ifndef _VIDEOBUF_DVB_H_
9#define _VIDEOBUF_DVB_H_ 9#define _VIDEOBUF_DVB_H_
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index ef9b64398c8c..aa16c064294f 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -16,6 +16,7 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/poll.h> 17#include <linux/poll.h>
18#include <linux/dma-buf.h> 18#include <linux/dma-buf.h>
19#include <linux/bitops.h>
19 20
20#define VB2_MAX_FRAME (32) 21#define VB2_MAX_FRAME (32)
21#define VB2_MAX_PLANES (8) 22#define VB2_MAX_PLANES (8)
@@ -45,7 +46,7 @@ struct vb2_fileio_data;
45struct vb2_threadio_data; 46struct vb2_threadio_data;
46 47
47/** 48/**
48 * struct vb2_mem_ops - memory handling/memory allocator operations 49 * struct vb2_mem_ops - memory handling/memory allocator operations.
49 * @alloc: allocate video memory and, optionally, allocator private data, 50 * @alloc: allocate video memory and, optionally, allocator private data,
50 * return ERR_PTR() on failure or a pointer to allocator private, 51 * return ERR_PTR() on failure or a pointer to allocator private,
51 * per-buffer data on success; the returned private structure 52 * per-buffer data on success; the returned private structure
@@ -69,7 +70,7 @@ struct vb2_threadio_data;
69 * argument to other ops in this structure. 70 * argument to other ops in this structure.
70 * @put_userptr: inform the allocator that a USERPTR buffer will no longer 71 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
71 * be used. 72 * be used.
72 * @attach_dmabuf: attach a shared struct dma_buf for a hardware operation; 73 * @attach_dmabuf: attach a shared &struct dma_buf for a hardware operation;
73 * used for DMABUF memory types; dev is the alloc device 74 * used for DMABUF memory types; dev is the alloc device
74 * dbuf is the shared dma_buf; returns ERR_PTR() on failure; 75 * dbuf is the shared dma_buf; returns ERR_PTR() on failure;
75 * allocator private per-buffer structure on success; 76 * allocator private per-buffer structure on success;
@@ -145,28 +146,28 @@ struct vb2_mem_ops {
145}; 146};
146 147
147/** 148/**
148 * struct vb2_plane - plane information 149 * struct vb2_plane - plane information.
149 * @mem_priv: private data with this plane 150 * @mem_priv: private data with this plane.
150 * @dbuf: dma_buf - shared buffer object 151 * @dbuf: dma_buf - shared buffer object.
151 * @dbuf_mapped: flag to show whether dbuf is mapped or not 152 * @dbuf_mapped: flag to show whether dbuf is mapped or not
152 * @bytesused: number of bytes occupied by data in the plane (payload) 153 * @bytesused: number of bytes occupied by data in the plane (payload).
153 * @length: size of this plane (NOT the payload) in bytes 154 * @length: size of this plane (NOT the payload) in bytes.
154 * @min_length: minimum required size of this plane (NOT the payload) in bytes. 155 * @min_length: minimum required size of this plane (NOT the payload) in bytes.
155 * @length is always greater or equal to @min_length. 156 * @length is always greater or equal to @min_length.
156 * @offset: when memory in the associated struct vb2_buffer is 157 * @m: Union with memtype-specific data.
157 * VB2_MEMORY_MMAP, equals the offset from the start of 158 * @m.offset: when memory in the associated struct vb2_buffer is
159 * %VB2_MEMORY_MMAP, equals the offset from the start of
158 * the device memory for this plane (or is a "cookie" that 160 * the device memory for this plane (or is a "cookie" that
159 * should be passed to mmap() called on the video node) 161 * should be passed to mmap() called on the video node).
160 * @userptr: when memory is VB2_MEMORY_USERPTR, a userspace pointer 162 * @m.userptr: when memory is %VB2_MEMORY_USERPTR, a userspace pointer
161 * pointing to this plane 163 * pointing to this plane.
162 * @fd: when memory is VB2_MEMORY_DMABUF, a userspace file 164 * @m.fd: when memory is %VB2_MEMORY_DMABUF, a userspace file
163 * descriptor associated with this plane 165 * descriptor associated with this plane.
164 * @m: Union with memtype-specific data (@offset, @userptr or
165 * @fd).
166 * @data_offset: offset in the plane to the start of data; usually 0, 166 * @data_offset: offset in the plane to the start of data; usually 0,
167 * unless there is a header in front of the data 167 * unless there is a header in front of the data.
168 *
168 * Should contain enough information to be able to cover all the fields 169 * Should contain enough information to be able to cover all the fields
169 * of struct v4l2_plane at videodev2.h 170 * of &struct v4l2_plane at videodev2.h.
170 */ 171 */
171struct vb2_plane { 172struct vb2_plane {
172 void *mem_priv; 173 void *mem_priv;
@@ -184,35 +185,35 @@ struct vb2_plane {
184}; 185};
185 186
186/** 187/**
187 * enum vb2_io_modes - queue access methods 188 * enum vb2_io_modes - queue access methods.
188 * @VB2_MMAP: driver supports MMAP with streaming API 189 * @VB2_MMAP: driver supports MMAP with streaming API.
189 * @VB2_USERPTR: driver supports USERPTR with streaming API 190 * @VB2_USERPTR: driver supports USERPTR with streaming API.
190 * @VB2_READ: driver supports read() style access 191 * @VB2_READ: driver supports read() style access.
191 * @VB2_WRITE: driver supports write() style access 192 * @VB2_WRITE: driver supports write() style access.
192 * @VB2_DMABUF: driver supports DMABUF with streaming API 193 * @VB2_DMABUF: driver supports DMABUF with streaming API.
193 */ 194 */
194enum vb2_io_modes { 195enum vb2_io_modes {
195 VB2_MMAP = (1 << 0), 196 VB2_MMAP = BIT(0),
196 VB2_USERPTR = (1 << 1), 197 VB2_USERPTR = BIT(1),
197 VB2_READ = (1 << 2), 198 VB2_READ = BIT(2),
198 VB2_WRITE = (1 << 3), 199 VB2_WRITE = BIT(3),
199 VB2_DMABUF = (1 << 4), 200 VB2_DMABUF = BIT(4),
200}; 201};
201 202
202/** 203/**
203 * enum vb2_buffer_state - current video buffer state 204 * enum vb2_buffer_state - current video buffer state.
204 * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control 205 * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control.
205 * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf 206 * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf.
206 * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver 207 * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver.
207 * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver 208 * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver.
208 * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver 209 * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver.
209 * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used 210 * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
210 * in a hardware operation 211 * in a hardware operation.
211 * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but 212 * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
212 * not yet dequeued to userspace 213 * not yet dequeued to userspace.
213 * @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer 214 * @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer
214 * has ended with an error, which will be reported 215 * has ended with an error, which will be reported
215 * to the userspace when it is dequeued 216 * to the userspace when it is dequeued.
216 */ 217 */
217enum vb2_buffer_state { 218enum vb2_buffer_state {
218 VB2_BUF_STATE_DEQUEUED, 219 VB2_BUF_STATE_DEQUEUED,
@@ -228,15 +229,15 @@ enum vb2_buffer_state {
228struct vb2_queue; 229struct vb2_queue;
229 230
230/** 231/**
231 * struct vb2_buffer - represents a video buffer 232 * struct vb2_buffer - represents a video buffer.
232 * @vb2_queue: the queue to which this driver belongs 233 * @vb2_queue: pointer to &struct vb2_queue with the queue to
233 * @index: id number of the buffer 234 * which this driver belongs.
234 * @type: buffer type 235 * @index: id number of the buffer.
235 * @memory: the method, in which the actual data is passed 236 * @type: buffer type.
237 * @memory: the method, in which the actual data is passed.
236 * @num_planes: number of planes in the buffer 238 * @num_planes: number of planes in the buffer
237 * on an internal driver queue 239 * on an internal driver queue.
238 * @planes: private per-plane information; do not change 240 * @timestamp: frame timestamp in ns.
239 * @timestamp: frame timestamp in ns
240 */ 241 */
241struct vb2_buffer { 242struct vb2_buffer {
242 struct vb2_queue *vb2_queue; 243 struct vb2_queue *vb2_queue;
@@ -244,7 +245,6 @@ struct vb2_buffer {
244 unsigned int type; 245 unsigned int type;
245 unsigned int memory; 246 unsigned int memory;
246 unsigned int num_planes; 247 unsigned int num_planes;
247 struct vb2_plane planes[VB2_MAX_PLANES];
248 u64 timestamp; 248 u64 timestamp;
249 249
250 /* private: internal use only 250 /* private: internal use only
@@ -254,9 +254,11 @@ struct vb2_buffer {
254 * all buffers queued from userspace 254 * all buffers queued from userspace
255 * done_entry: entry on the list that stores all buffers ready 255 * done_entry: entry on the list that stores all buffers ready
256 * to be dequeued to userspace 256 * to be dequeued to userspace
257 * vb2_plane: per-plane information; do not change
257 */ 258 */
258 enum vb2_buffer_state state; 259 enum vb2_buffer_state state;
259 260
261 struct vb2_plane planes[VB2_MAX_PLANES];
260 struct list_head queued_entry; 262 struct list_head queued_entry;
261 struct list_head done_entry; 263 struct list_head done_entry;
262#ifdef CONFIG_VIDEO_ADV_DEBUG 264#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -292,7 +294,7 @@ struct vb2_buffer {
292}; 294};
293 295
294/** 296/**
295 * struct vb2_ops - driver-specific callbacks 297 * struct vb2_ops - driver-specific callbacks.
296 * 298 *
297 * @queue_setup: called from VIDIOC_REQBUFS() and VIDIOC_CREATE_BUFS() 299 * @queue_setup: called from VIDIOC_REQBUFS() and VIDIOC_CREATE_BUFS()
298 * handlers before memory allocation. It can be called 300 * handlers before memory allocation. It can be called
@@ -358,8 +360,8 @@ struct vb2_buffer {
358 * the @buf_queue callback are to be returned by the driver 360 * the @buf_queue callback are to be returned by the driver
359 * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED. 361 * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED.
360 * If you need a minimum number of buffers before you can 362 * If you need a minimum number of buffers before you can
361 * start streaming, then set @min_buffers_needed in the 363 * start streaming, then set
362 * vb2_queue structure. If that is non-zero then 364 * &vb2_queue->min_buffers_needed. If that is non-zero then
363 * @start_streaming won't be called until at least that 365 * @start_streaming won't be called until at least that
364 * many buffers have been queued up by userspace. 366 * many buffers have been queued up by userspace.
365 * @stop_streaming: called when 'streaming' state must be disabled; driver 367 * @stop_streaming: called when 'streaming' state must be disabled; driver
@@ -396,18 +398,18 @@ struct vb2_ops {
396}; 398};
397 399
398/** 400/**
399 * struct vb2_buf_ops - driver-specific callbacks 401 * struct vb2_buf_ops - driver-specific callbacks.
400 * 402 *
401 * @verify_planes_array: Verify that a given user space structure contains 403 * @verify_planes_array: Verify that a given user space structure contains
402 * enough planes for the buffer. This is called 404 * enough planes for the buffer. This is called
403 * for each dequeued buffer. 405 * for each dequeued buffer.
404 * @fill_user_buffer: given a vb2_buffer fill in the userspace structure. 406 * @fill_user_buffer: given a &vb2_buffer fill in the userspace structure.
405 * For V4L2 this is a struct v4l2_buffer. 407 * For V4L2 this is a &struct v4l2_buffer.
406 * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer. 408 * @fill_vb2_buffer: given a userspace structure, fill in the &vb2_buffer.
407 * If the userspace structure is invalid, then this op 409 * If the userspace structure is invalid, then this op
408 * will return an error. 410 * will return an error.
409 * @copy_timestamp: copy the timestamp from a userspace structure to 411 * @copy_timestamp: copy the timestamp from a userspace structure to
410 * the vb2_buffer struct. 412 * the &struct vb2_buffer.
411 */ 413 */
412struct vb2_buf_ops { 414struct vb2_buf_ops {
413 int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb); 415 int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
@@ -418,20 +420,21 @@ struct vb2_buf_ops {
418}; 420};
419 421
420/** 422/**
421 * struct vb2_queue - a videobuf queue 423 * struct vb2_queue - a videobuf queue.
422 * 424 *
423 * @type: private buffer type whose content is defined by the vb2-core 425 * @type: private buffer type whose content is defined by the vb2-core
424 * caller. For example, for V4L2, it should match 426 * caller. For example, for V4L2, it should match
425 * the types defined on enum &v4l2_buf_type 427 * the types defined on &enum v4l2_buf_type.
426 * @io_modes: supported io methods (see vb2_io_modes enum) 428 * @io_modes: supported io methods (see &enum vb2_io_modes).
429 * @alloc_devs: &struct device memory type/allocator-specific per-plane device
427 * @dev: device to use for the default allocation context if the driver 430 * @dev: device to use for the default allocation context if the driver
428 * doesn't fill in the @alloc_devs array. 431 * doesn't fill in the @alloc_devs array.
429 * @dma_attrs: DMA attributes to use for the DMA. 432 * @dma_attrs: DMA attributes to use for the DMA.
430 * @bidirectional: when this flag is set the DMA direction for the buffers of 433 * @bidirectional: when this flag is set the DMA direction for the buffers of
431 * this queue will be overridden with DMA_BIDIRECTIONAL direction. 434 * this queue will be overridden with %DMA_BIDIRECTIONAL direction.
432 * This is useful in cases where the hardware (firmware) writes to 435 * This is useful in cases where the hardware (firmware) writes to
433 * a buffer which is mapped as read (DMA_TO_DEVICE), or reads from 436 * a buffer which is mapped as read (%DMA_TO_DEVICE), or reads from
434 * buffer which is mapped for write (DMA_FROM_DEVICE) in order 437 * buffer which is mapped for write (%DMA_FROM_DEVICE) in order
435 * to satisfy some internal hardware restrictions or adds a padding 438 * to satisfy some internal hardware restrictions or adds a padding
436 * needed by the processing algorithm. In case the DMA mapping is 439 * needed by the processing algorithm. In case the DMA mapping is
437 * not bidirectional but the hardware (firmware) trying to access 440 * not bidirectional but the hardware (firmware) trying to access
@@ -440,10 +443,10 @@ struct vb2_buf_ops {
440 * @fileio_read_once: report EOF after reading the first buffer 443 * @fileio_read_once: report EOF after reading the first buffer
441 * @fileio_write_immediately: queue buffer after each write() call 444 * @fileio_write_immediately: queue buffer after each write() call
442 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver 445 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
443 * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF 446 * @quirk_poll_must_check_waiting_for_buffers: Return %POLLERR at poll when QBUF
444 * has not been called. This is a vb1 idiom that has been adopted 447 * has not been called. This is a vb1 idiom that has been adopted
445 * also by vb2. 448 * also by vb2.
446 * @lock: pointer to a mutex that protects the vb2_queue struct. The 449 * @lock: pointer to a mutex that protects the &struct vb2_queue. The
447 * driver can set this to a mutex to let the v4l2 core serialize 450 * driver can set this to a mutex to let the v4l2 core serialize
448 * the queuing ioctls. If the driver wants to handle locking 451 * the queuing ioctls. If the driver wants to handle locking
449 * itself, then this should be set to NULL. This lock is not used 452 * itself, then this should be set to NULL. This lock is not used
@@ -454,17 +457,17 @@ struct vb2_buf_ops {
454 * drivers to easily associate an owner filehandle with the queue. 457 * drivers to easily associate an owner filehandle with the queue.
455 * @ops: driver-specific callbacks 458 * @ops: driver-specific callbacks
456 * @mem_ops: memory allocator specific callbacks 459 * @mem_ops: memory allocator specific callbacks
457 * @buf_ops: callbacks to deliver buffer information 460 * @buf_ops: callbacks to deliver buffer information.
458 * between user-space and kernel-space 461 * between user-space and kernel-space.
459 * @drv_priv: driver private data 462 * @drv_priv: driver private data.
460 * @buf_struct_size: size of the driver-specific buffer structure; 463 * @buf_struct_size: size of the driver-specific buffer structure;
461 * "0" indicates the driver doesn't want to use a custom buffer 464 * "0" indicates the driver doesn't want to use a custom buffer
462 * structure type. for example, sizeof(struct vb2_v4l2_buffer) 465 * structure type. for example, ``sizeof(struct vb2_v4l2_buffer)``
463 * will be used for v4l2. 466 * will be used for v4l2.
464 * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and 467 * @timestamp_flags: Timestamp flags; ``V4L2_BUF_FLAG_TIMESTAMP_*`` and
465 * V4L2_BUF_FLAG_TSTAMP_SRC_* 468 * ``V4L2_BUF_FLAG_TSTAMP_SRC_*``
466 * @gfp_flags: additional gfp flags used when allocating the buffers. 469 * @gfp_flags: additional gfp flags used when allocating the buffers.
467 * Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32 470 * Typically this is 0, but it may be e.g. %GFP_DMA or %__GFP_DMA32
468 * to force the buffer allocation to a specific memory zone. 471 * to force the buffer allocation to a specific memory zone.
469 * @min_buffers_needed: the minimum number of buffers needed before 472 * @min_buffers_needed: the minimum number of buffers needed before
470 * @start_streaming can be called. Used when a DMA engine 473 * @start_streaming can be called. Used when a DMA engine
@@ -484,20 +487,19 @@ struct vb2_buf_ops {
484 * @done_list: list of buffers ready to be dequeued to userspace 487 * @done_list: list of buffers ready to be dequeued to userspace
485 * @done_lock: lock to protect done_list list 488 * @done_lock: lock to protect done_list list
486 * @done_wq: waitqueue for processes waiting for buffers ready to be dequeued 489 * @done_wq: waitqueue for processes waiting for buffers ready to be dequeued
487 * @alloc_devs: memory type/allocator-specific per-plane device
488 * @streaming: current streaming state 490 * @streaming: current streaming state
489 * @start_streaming_called: @start_streaming was called successfully and we 491 * @start_streaming_called: @start_streaming was called successfully and we
490 * started streaming. 492 * started streaming.
491 * @error: a fatal error occurred on the queue 493 * @error: a fatal error occurred on the queue
492 * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for 494 * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
493 * buffers. Only set for capture queues if qbuf has not yet been 495 * buffers. Only set for capture queues if qbuf has not yet been
494 * called since poll() needs to return POLLERR in that situation. 496 * called since poll() needs to return %POLLERR in that situation.
495 * @is_multiplanar: set if buffer type is multiplanar 497 * @is_multiplanar: set if buffer type is multiplanar
496 * @is_output: set if buffer type is output 498 * @is_output: set if buffer type is output
497 * @copy_timestamp: set if vb2-core should set timestamps 499 * @copy_timestamp: set if vb2-core should set timestamps
498 * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the 500 * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the
499 * last decoded buffer was already dequeued. Set for capture queues 501 * last decoded buffer was already dequeued. Set for capture queues
500 * when a buffer with the V4L2_BUF_FLAG_LAST is dequeued. 502 * when a buffer with the %V4L2_BUF_FLAG_LAST is dequeued.
501 * @fileio: file io emulator internal data, used only if emulator is active 503 * @fileio: file io emulator internal data, used only if emulator is active
502 * @threadio: thread io internal data, used only if thread is active 504 * @threadio: thread io internal data, used only if thread is active
503 */ 505 */
@@ -525,6 +527,8 @@ struct vb2_queue {
525 gfp_t gfp_flags; 527 gfp_t gfp_flags;
526 u32 min_buffers_needed; 528 u32 min_buffers_needed;
527 529
530 struct device *alloc_devs[VB2_MAX_PLANES];
531
528 /* private: internal use only */ 532 /* private: internal use only */
529 struct mutex mmap_lock; 533 struct mutex mmap_lock;
530 unsigned int memory; 534 unsigned int memory;
@@ -540,8 +544,6 @@ struct vb2_queue {
540 spinlock_t done_lock; 544 spinlock_t done_lock;
541 wait_queue_head_t done_wq; 545 wait_queue_head_t done_wq;
542 546
543 struct device *alloc_devs[VB2_MAX_PLANES];
544
545 unsigned int streaming:1; 547 unsigned int streaming:1;
546 unsigned int start_streaming_called:1; 548 unsigned int start_streaming_called:1;
547 unsigned int error:1; 549 unsigned int error:1;
@@ -568,9 +570,10 @@ struct vb2_queue {
568}; 570};
569 571
570/** 572/**
571 * vb2_plane_vaddr() - Return a kernel virtual address of a given plane 573 * vb2_plane_vaddr() - Return a kernel virtual address of a given plane.
572 * @vb: vb2_buffer to which the plane in question belongs to 574 * @vb: pointer to &struct vb2_buffer to which the plane in
573 * @plane_no: plane number for which the address is to be returned 575 * question belongs to.
576 * @plane_no: plane number for which the address is to be returned.
574 * 577 *
575 * This function returns a kernel virtual address of a given plane if 578 * This function returns a kernel virtual address of a given plane if
576 * such a mapping exist, NULL otherwise. 579 * such a mapping exist, NULL otherwise.
@@ -578,9 +581,10 @@ struct vb2_queue {
578void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no); 581void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
579 582
580/** 583/**
581 * vb2_plane_cookie() - Return allocator specific cookie for the given plane 584 * vb2_plane_cookie() - Return allocator specific cookie for the given plane.
582 * @vb: vb2_buffer to which the plane in question belongs to 585 * @vb: pointer to &struct vb2_buffer to which the plane in
583 * @plane_no: plane number for which the cookie is to be returned 586 * question belongs to.
587 * @plane_no: plane number for which the cookie is to be returned.
584 * 588 *
585 * This function returns an allocator specific cookie for a given plane if 589 * This function returns an allocator specific cookie for a given plane if
586 * available, NULL otherwise. The allocator should provide some simple static 590 * available, NULL otherwise. The allocator should provide some simple static
@@ -591,9 +595,11 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
591void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); 595void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
592 596
593/** 597/**
594 * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished 598 * vb2_buffer_done() - inform videobuf that an operation on a buffer
595 * @vb: vb2_buffer returned from the driver 599 * is finished.
596 * @state: either %VB2_BUF_STATE_DONE if the operation finished 600 * @vb: pointer to &struct vb2_buffer to be used.
601 * @state: state of the buffer, as defined by &enum vb2_buffer_state.
602 * Either %VB2_BUF_STATE_DONE if the operation finished
597 * successfully, %VB2_BUF_STATE_ERROR if the operation finished 603 * successfully, %VB2_BUF_STATE_ERROR if the operation finished
598 * with an error or %VB2_BUF_STATE_QUEUED if the driver wants to 604 * with an error or %VB2_BUF_STATE_QUEUED if the driver wants to
599 * requeue buffers. If start_streaming fails then it should return 605 * requeue buffers. If start_streaming fails then it should return
@@ -614,8 +620,8 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
614void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); 620void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
615 621
616/** 622/**
617 * vb2_discard_done() - discard all buffers marked as DONE 623 * vb2_discard_done() - discard all buffers marked as DONE.
618 * @q: videobuf2 queue 624 * @q: pointer to &struct vb2_queue with videobuf2 queue.
619 * 625 *
620 * This function is intended to be used with suspend/resume operations. It 626 * This function is intended to be used with suspend/resume operations. It
621 * discards all 'done' buffers as they would be too old to be requested after 627 * discards all 'done' buffers as they would be too old to be requested after
@@ -628,74 +634,83 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
628void vb2_discard_done(struct vb2_queue *q); 634void vb2_discard_done(struct vb2_queue *q);
629 635
630/** 636/**
631 * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2 637 * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2.
632 * @q: videobuf2 queue 638 * @q: pointer to &struct vb2_queue with videobuf2 queue.
633 * 639 *
634 * This function will wait until all buffers that have been given to the driver 640 * This function will wait until all buffers that have been given to the driver
635 * by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done(). It 641 * by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done(). It
636 * doesn't call wait_prepare()/wait_finish() pair. It is intended to be called 642 * doesn't call &vb2_ops->wait_prepare/&vb2_ops->wait_finish pair.
637 * with all locks taken, for example from &vb2_ops->stop_streaming callback. 643 * It is intended to be called with all locks taken, for example from
644 * &vb2_ops->stop_streaming callback.
638 */ 645 */
639int vb2_wait_for_all_buffers(struct vb2_queue *q); 646int vb2_wait_for_all_buffers(struct vb2_queue *q);
640 647
641/** 648/**
642 * vb2_core_querybuf() - query video buffer information 649 * vb2_core_querybuf() - query video buffer information.
643 * @q: videobuf queue 650 * @q: pointer to &struct vb2_queue with videobuf2 queue.
644 * @index: id number of the buffer 651 * @index: id number of the buffer.
645 * @pb: buffer struct passed from userspace 652 * @pb: buffer struct passed from userspace.
653 *
654 * Videobuf2 core helper to implement VIDIOC_QUERYBUF() operation. It is called
655 * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
646 * 656 *
647 * Should be called from vidioc_querybuf ioctl handler in driver.
648 * The passed buffer should have been verified. 657 * The passed buffer should have been verified.
658 *
649 * This function fills the relevant information for the userspace. 659 * This function fills the relevant information for the userspace.
660 *
661 * Return: returns zero on success; an error code otherwise.
650 */ 662 */
651void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); 663void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
652 664
653/** 665/**
654 * vb2_core_reqbufs() - Initiate streaming 666 * vb2_core_reqbufs() - Initiate streaming.
655 * @q: videobuf2 queue 667 * @q: pointer to &struct vb2_queue with videobuf2 queue.
656 * @memory: memory type 668 * @memory: memory type, as defined by &enum vb2_memory.
657 * @count: requested buffer count 669 * @count: requested buffer count.
658 * 670 *
659 * Should be called from vidioc_reqbufs ioctl handler of a driver. 671 * Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called
672 * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
660 * 673 *
661 * This function: 674 * This function:
662 * 675 *
663 * #) verifies streaming parameters passed from the userspace, 676 * #) verifies streaming parameters passed from the userspace;
664 * #) sets up the queue, 677 * #) sets up the queue;
665 * #) negotiates number of buffers and planes per buffer with the driver 678 * #) negotiates number of buffers and planes per buffer with the driver
666 * to be used during streaming, 679 * to be used during streaming;
667 * #) allocates internal buffer structures (struct vb2_buffer), according to 680 * #) allocates internal buffer structures (&struct vb2_buffer), according to
668 * the agreed parameters, 681 * the agreed parameters;
669 * #) for MMAP memory type, allocates actual video memory, using the 682 * #) for MMAP memory type, allocates actual video memory, using the
670 * memory handling/allocation routines provided during queue initialization 683 * memory handling/allocation routines provided during queue initialization.
671 * 684 *
672 * If req->count is 0, all the memory will be freed instead. 685 * If req->count is 0, all the memory will be freed instead.
673 * If the queue has been allocated previously (by a previous vb2_reqbufs) call
674 * and the queue is not busy, memory will be reallocated.
675 * 686 *
676 * The return values from this function are intended to be directly returned 687 * If the queue has been allocated previously by a previous vb2_core_reqbufs()
677 * from vidioc_reqbufs handler in driver. 688 * call and the queue is not busy, memory will be reallocated.
689 *
690 * Return: returns zero on success; an error code otherwise.
678 */ 691 */
679int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, 692int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
680 unsigned int *count); 693 unsigned int *count);
681 694
682/** 695/**
683 * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs 696 * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
684 * @q: videobuf2 queue 697 * @q: pointer to &struct vb2_queue with videobuf2 queue.
685 * @memory: memory type 698 * @memory: memory type, as defined by &enum vb2_memory.
686 * @count: requested buffer count 699 * @count: requested buffer count.
687 * @requested_planes: number of planes requested 700 * @requested_planes: number of planes requested.
688 * @requested_sizes: array with the size of the planes 701 * @requested_sizes: array with the size of the planes.
702 *
703 * Videobuf2 core helper to implement VIDIOC_CREATE_BUFS() operation. It is
704 * called internally by VB2 by an API-specific handler, like
705 * ``videobuf2-v4l2.h``.
689 * 706 *
690 * Should be called from VIDIOC_CREATE_BUFS() ioctl handler of a driver.
691 * This function: 707 * This function:
692 * 708 *
693 * #) verifies parameter sanity 709 * #) verifies parameter sanity;
694 * #) calls the .queue_setup() queue operation 710 * #) calls the &vb2_ops->queue_setup queue operation;
695 * #) performs any necessary memory allocations 711 * #) performs any necessary memory allocations.
696 * 712 *
697 * Return: the return values from this function are intended to be directly 713 * Return: returns zero on success; an error code otherwise.
698 * returned from VIDIOC_CREATE_BUFS() handler in driver.
699 */ 714 */
700int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, 715int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
701 unsigned int *count, unsigned int requested_planes, 716 unsigned int *count, unsigned int requested_planes,
@@ -703,57 +718,61 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
703 718
704/** 719/**
705 * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace 720 * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
706 * to the kernel 721 * to the kernel.
707 * @q: videobuf2 queue 722 * @q: pointer to &struct vb2_queue with videobuf2 queue.
708 * @index: id number of the buffer 723 * @index: id number of the buffer.
709 * @pb: buffer structure passed from userspace to vidioc_prepare_buf 724 * @pb: buffer structure passed from userspace to
710 * handler in driver 725 * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver.
726 *
727 * Videobuf2 core helper to implement VIDIOC_PREPARE_BUF() operation. It is
728 * called internally by VB2 by an API-specific handler, like
729 * ``videobuf2-v4l2.h``.
711 * 730 *
712 * Should be called from vidioc_prepare_buf ioctl handler of a driver.
713 * The passed buffer should have been verified. 731 * The passed buffer should have been verified.
714 * This function calls buf_prepare callback in the driver (if provided),
715 * in which driver-specific buffer initialization can be performed,
716 * 732 *
717 * The return values from this function are intended to be directly returned 733 * This function calls vb2_ops->buf_prepare callback in the driver
718 * from vidioc_prepare_buf handler in driver. 734 * (if provided), in which driver-specific buffer initialization can
735 * be performed.
736 *
737 * Return: returns zero on success; an error code otherwise.
719 */ 738 */
720int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb); 739int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
721 740
722/** 741/**
723 * vb2_core_qbuf() - Queue a buffer from userspace 742 * vb2_core_qbuf() - Queue a buffer from userspace
724 * 743 *
725 * @q: videobuf2 queue 744 * @q: pointer to &struct vb2_queue with videobuf2 queue.
726 * @index: id number of the buffer 745 * @index: id number of the buffer
727 * @pb: buffer structure passed from userspace to vidioc_qbuf handler 746 * @pb: buffer structure passed from userspace to
728 * in driver 747 * v4l2_ioctl_ops->vidioc_qbuf handler in driver
729 * 748 *
730 * Should be called from vidioc_qbuf ioctl handler of a driver. 749 * Videobuf2 core helper to implement VIDIOC_QBUF() operation. It is called
731 * The passed buffer should have been verified. 750 * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
732 * 751 *
733 * This function: 752 * This function:
734 * 753 *
735 * #) if necessary, calls buf_prepare callback in the driver (if provided), in 754 * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver
736 * which driver-specific buffer initialization can be performed, 755 * (if provided), in which driver-specific buffer initialization can
756 * be performed;
737 * #) if streaming is on, queues the buffer in driver by the means of 757 * #) if streaming is on, queues the buffer in driver by the means of
738 * &vb2_ops->buf_queue callback for processing. 758 * &vb2_ops->buf_queue callback for processing.
739 * 759 *
740 * The return values from this function are intended to be directly returned 760 * Return: returns zero on success; an error code otherwise.
741 * from vidioc_qbuf handler in driver.
742 */ 761 */
743int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb); 762int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
744 763
745/** 764/**
746 * vb2_core_dqbuf() - Dequeue a buffer to the userspace 765 * vb2_core_dqbuf() - Dequeue a buffer to the userspace
747 * @q: videobuf2 queue 766 * @q: pointer to &struct vb2_queue with videobuf2 queue
748 * @pindex: pointer to the buffer index. May be NULL 767 * @pindex: pointer to the buffer index. May be NULL
749 * @pb: buffer structure passed from userspace to vidioc_dqbuf handler 768 * @pb: buffer structure passed from userspace to
750 * in driver 769 * v4l2_ioctl_ops->vidioc_dqbuf handler in driver.
751 * @nonblocking: if true, this call will not sleep waiting for a buffer if no 770 * @nonblocking: if true, this call will not sleep waiting for a buffer if no
752 * buffers ready for dequeuing are present. Normally the driver 771 * buffers ready for dequeuing are present. Normally the driver
753 * would be passing (file->f_flags & O_NONBLOCK) here 772 * would be passing (file->f_flags & O_NONBLOCK) here.
754 * 773 *
755 * Should be called from vidioc_dqbuf ioctl handler of a driver. 774 * Videobuf2 core helper to implement VIDIOC_DQBUF() operation. It is called
756 * The passed buffer should have been verified. 775 * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
757 * 776 *
758 * This function: 777 * This function:
759 * 778 *
@@ -763,73 +782,108 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
763 * #) the buffer struct members are filled with relevant information for 782 * #) the buffer struct members are filled with relevant information for
764 * the userspace. 783 * the userspace.
765 * 784 *
766 * The return values from this function are intended to be directly returned 785 * Return: returns zero on success; an error code otherwise.
767 * from vidioc_dqbuf handler in driver.
768 */ 786 */
769int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, 787int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
770 bool nonblocking); 788 bool nonblocking);
771 789
790/**
791 * vb2_core_streamon() - Implements VB2 stream ON logic
792 *
793 * @q: pointer to &struct vb2_queue with videobuf2 queue
794 * @type: type of the queue to be started.
795 * For V4L2, this is defined by &enum v4l2_buf_type type.
796 *
797 * Videobuf2 core helper to implement VIDIOC_STREAMON() operation. It is called
798 * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
799 *
800 * Return: returns zero on success; an error code otherwise.
801 */
772int vb2_core_streamon(struct vb2_queue *q, unsigned int type); 802int vb2_core_streamon(struct vb2_queue *q, unsigned int type);
803
804/**
805 * vb2_core_streamoff() - Implements VB2 stream OFF logic
806 *
807 * @q: pointer to &struct vb2_queue with videobuf2 queue
808 * @type: type of the queue to be started.
809 * For V4L2, this is defined by &enum v4l2_buf_type type.
810 *
811 * Videobuf2 core helper to implement VIDIOC_STREAMOFF() operation. It is
812 * called internally by VB2 by an API-specific handler, like
813 * ``videobuf2-v4l2.h``.
814 *
815 * Return: returns zero on success; an error code otherwise.
816 */
773int vb2_core_streamoff(struct vb2_queue *q, unsigned int type); 817int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
774 818
775/** 819/**
776 * vb2_core_expbuf() - Export a buffer as a file descriptor 820 * vb2_core_expbuf() - Export a buffer as a file descriptor.
777 * @q: videobuf2 queue 821 * @q: pointer to &struct vb2_queue with videobuf2 queue.
778 * @fd: file descriptor associated with DMABUF (set by driver) * 822 * @fd: pointer to the file descriptor associated with DMABUF
779 * @type: buffer type 823 * (set by driver).
780 * @index: id number of the buffer 824 * @type: buffer type.
825 * @index: id number of the buffer.
781 * @plane: index of the plane to be exported, 0 for single plane queues 826 * @plane: index of the plane to be exported, 0 for single plane queues
782 * @flags: flags for newly created file, currently only O_CLOEXEC is 827 * @flags: file flags for newly created file, as defined at
783 * supported, refer to manual of open syscall for more details 828 * include/uapi/asm-generic/fcntl.h.
829 * Currently, the only used flag is %O_CLOEXEC.
830 * is supported, refer to manual of open syscall for more details.
784 * 831 *
785 * The return values from this function are intended to be directly returned 832 *
786 * from vidioc_expbuf handler in driver. 833 * Videobuf2 core helper to implement VIDIOC_EXPBUF() operation. It is called
834 * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
835 *
836 * Return: returns zero on success; an error code otherwise.
787 */ 837 */
788int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, 838int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
789 unsigned int index, unsigned int plane, unsigned int flags); 839 unsigned int index, unsigned int plane, unsigned int flags);
790 840
791/** 841/**
792 * vb2_core_queue_init() - initialize a videobuf2 queue 842 * vb2_core_queue_init() - initialize a videobuf2 queue
793 * @q: videobuf2 queue; this structure should be allocated in driver 843 * @q: pointer to &struct vb2_queue with videobuf2 queue.
844 * This structure should be allocated in driver
794 * 845 *
795 * The vb2_queue structure should be allocated by the driver. The driver is 846 * The &vb2_queue structure should be allocated by the driver. The driver is
796 * responsible of clearing it's content and setting initial values for some 847 * responsible of clearing it's content and setting initial values for some
797 * required entries before calling this function. 848 * required entries before calling this function.
798 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer 849 *
799 * to the struct vb2_queue description in include/media/videobuf2-core.h 850 * .. note::
800 * for more information. 851 *
852 * The following fields at @q should be set before calling this function:
853 * &vb2_queue->ops, &vb2_queue->mem_ops, &vb2_queue->type.
801 */ 854 */
802int vb2_core_queue_init(struct vb2_queue *q); 855int vb2_core_queue_init(struct vb2_queue *q);
803 856
804/** 857/**
805 * vb2_core_queue_release() - stop streaming, release the queue and free memory 858 * vb2_core_queue_release() - stop streaming, release the queue and free memory
806 * @q: videobuf2 queue 859 * @q: pointer to &struct vb2_queue with videobuf2 queue.
807 * 860 *
808 * This function stops streaming and performs necessary clean ups, including 861 * This function stops streaming and performs necessary clean ups, including
809 * freeing video buffer memory. The driver is responsible for freeing 862 * freeing video buffer memory. The driver is responsible for freeing
810 * the vb2_queue structure itself. 863 * the &struct vb2_queue itself.
811 */ 864 */
812void vb2_core_queue_release(struct vb2_queue *q); 865void vb2_core_queue_release(struct vb2_queue *q);
813 866
814/** 867/**
815 * vb2_queue_error() - signal a fatal error on the queue 868 * vb2_queue_error() - signal a fatal error on the queue
816 * @q: videobuf2 queue 869 * @q: pointer to &struct vb2_queue with videobuf2 queue.
817 * 870 *
818 * Flag that a fatal unrecoverable error has occurred and wake up all processes 871 * Flag that a fatal unrecoverable error has occurred and wake up all processes
819 * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing 872 * waiting on the queue. Polling will now set %POLLERR and queuing and dequeuing
820 * buffers will return -EIO. 873 * buffers will return %-EIO.
821 * 874 *
822 * The error flag will be cleared when cancelling the queue, either from 875 * The error flag will be cleared when canceling the queue, either from
823 * vb2_streamoff or vb2_queue_release. Drivers should thus not call this 876 * vb2_streamoff() or vb2_queue_release(). Drivers should thus not call this
824 * function before starting the stream, otherwise the error flag will remain set 877 * function before starting the stream, otherwise the error flag will remain set
825 * until the queue is released when closing the device node. 878 * until the queue is released when closing the device node.
826 */ 879 */
827void vb2_queue_error(struct vb2_queue *q); 880void vb2_queue_error(struct vb2_queue *q);
828 881
829/** 882/**
830 * vb2_mmap() - map video buffers into application address space 883 * vb2_mmap() - map video buffers into application address space.
831 * @q: videobuf2 queue 884 * @q: pointer to &struct vb2_queue with videobuf2 queue.
832 * @vma: vma passed to the mmap file operation handler in the driver 885 * @vma: pointer to &struct vm_area_struct with the vma passed
886 * to the mmap file operation handler in the driver.
833 * 887 *
834 * Should be called from mmap file operation handler of a driver. 888 * Should be called from mmap file operation handler of a driver.
835 * This function maps one plane of one of the available video buffers to 889 * This function maps one plane of one of the available video buffers to
@@ -837,8 +891,10 @@ void vb2_queue_error(struct vb2_queue *q);
837 * has to be called once per each plane per each buffer previously allocated. 891 * has to be called once per each plane per each buffer previously allocated.
838 * 892 *
839 * When the userspace application calls mmap, it passes to it an offset returned 893 * When the userspace application calls mmap, it passes to it an offset returned
840 * to it earlier by the means of vidioc_querybuf handler. That offset acts as 894 * to it earlier by the means of &v4l2_ioctl_ops->vidioc_querybuf handler.
841 * a "cookie", which is then used to identify the plane to be mapped. 895 * That offset acts as a "cookie", which is then used to identify the plane
896 * to be mapped.
897 *
842 * This function finds a plane with a matching offset and a mapping is performed 898 * This function finds a plane with a matching offset and a mapping is performed
843 * by the means of a provided memory operation. 899 * by the means of a provided memory operation.
844 * 900 *
@@ -848,6 +904,21 @@ void vb2_queue_error(struct vb2_queue *q);
848int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma); 904int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma);
849 905
850#ifndef CONFIG_MMU 906#ifndef CONFIG_MMU
907/**
908 * vb2_get_unmapped_area - map video buffers into application address space.
909 * @q: pointer to &struct vb2_queue with videobuf2 queue.
910 * @addr: memory address.
911 * @len: buffer size.
912 * @pgoff: page offset.
913 * @flags: memory flags.
914 *
915 * This function is used in noMMU platforms to propose address mapping
916 * for a given buffer. It's intended to be used as a handler for the
917 * &file_operations->get_unmapped_area operation.
918 *
919 * This is called by the mmap() syscall routines will call this
920 * to get a proposed address for the mapping, when ``!CONFIG_MMU``.
921 */
851unsigned long vb2_get_unmapped_area(struct vb2_queue *q, 922unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
852 unsigned long addr, 923 unsigned long addr,
853 unsigned long len, 924 unsigned long len,
@@ -856,10 +927,12 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
856#endif 927#endif
857 928
858/** 929/**
859 * vb2_core_poll() - implements poll userspace operation 930 * vb2_core_poll() - implements poll syscall() logic.
860 * @q: videobuf2 queue 931 * @q: pointer to &struct vb2_queue with videobuf2 queue.
861 * @file: file argument passed to the poll file operation handler 932 * @file: &struct file argument passed to the poll
862 * @wait: wait argument passed to the poll file operation handler 933 * file operation handler.
934 * @wait: &poll_table wait argument passed to the poll
935 * file operation handler.
863 * 936 *
864 * This function implements poll file operation handler for a driver. 937 * This function implements poll file operation handler for a driver.
865 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will 938 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
@@ -871,19 +944,35 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
871 * The return values from this function are intended to be directly returned 944 * The return values from this function are intended to be directly returned
872 * from poll handler in driver. 945 * from poll handler in driver.
873 */ 946 */
874unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file, 947__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
875 poll_table *wait); 948 poll_table *wait);
876 949
950/**
951 * vb2_read() - implements read() syscall logic.
952 * @q: pointer to &struct vb2_queue with videobuf2 queue.
953 * @data: pointed to target userspace buffer
954 * @count: number of bytes to read
955 * @ppos: file handle position tracking pointer
956 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
957 */
877size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, 958size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
878 loff_t *ppos, int nonblock); 959 loff_t *ppos, int nonblock);
960/**
961 * vb2_read() - implements write() syscall logic.
962 * @q: pointer to &struct vb2_queue with videobuf2 queue.
963 * @data: pointed to target userspace buffer
964 * @count: number of bytes to write
965 * @ppos: file handle position tracking pointer
966 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
967 */
879size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, 968size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
880 loff_t *ppos, int nonblock); 969 loff_t *ppos, int nonblock);
881 970
882/** 971/**
883 * typedef vb2_thread_fnc - callback function for use with vb2_thread 972 * typedef vb2_thread_fnc - callback function for use with vb2_thread.
884 * 973 *
885 * @vb: pointer to struct &vb2_buffer 974 * @vb: pointer to struct &vb2_buffer.
886 * @priv: pointer to a private pointer 975 * @priv: pointer to a private data.
887 * 976 *
888 * This is called whenever a buffer is dequeued in the thread. 977 * This is called whenever a buffer is dequeued in the thread.
889 */ 978 */
@@ -891,13 +980,13 @@ typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv);
891 980
892/** 981/**
893 * vb2_thread_start() - start a thread for the given queue. 982 * vb2_thread_start() - start a thread for the given queue.
894 * @q: videobuf queue 983 * @q: pointer to &struct vb2_queue with videobuf2 queue.
895 * @fnc: callback function 984 * @fnc: &vb2_thread_fnc callback function.
896 * @priv: priv pointer passed to the callback function 985 * @priv: priv pointer passed to the callback function.
897 * @thread_name:the name of the thread. This will be prefixed with "vb2-". 986 * @thread_name:the name of the thread. This will be prefixed with "vb2-".
898 * 987 *
899 * This starts a thread that will queue and dequeue until an error occurs 988 * This starts a thread that will queue and dequeue until an error occurs
900 * or @vb2_thread_stop is called. 989 * or vb2_thread_stop() is called.
901 * 990 *
902 * .. attention:: 991 * .. attention::
903 * 992 *
@@ -910,13 +999,13 @@ int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
910 999
911/** 1000/**
912 * vb2_thread_stop() - stop the thread for the given queue. 1001 * vb2_thread_stop() - stop the thread for the given queue.
913 * @q: videobuf queue 1002 * @q: pointer to &struct vb2_queue with videobuf2 queue.
914 */ 1003 */
915int vb2_thread_stop(struct vb2_queue *q); 1004int vb2_thread_stop(struct vb2_queue *q);
916 1005
917/** 1006/**
918 * vb2_is_streaming() - return streaming status of the queue 1007 * vb2_is_streaming() - return streaming status of the queue.
919 * @q: videobuf queue 1008 * @q: pointer to &struct vb2_queue with videobuf2 queue.
920 */ 1009 */
921static inline bool vb2_is_streaming(struct vb2_queue *q) 1010static inline bool vb2_is_streaming(struct vb2_queue *q)
922{ 1011{
@@ -925,15 +1014,16 @@ static inline bool vb2_is_streaming(struct vb2_queue *q)
925 1014
926/** 1015/**
927 * vb2_fileio_is_active() - return true if fileio is active. 1016 * vb2_fileio_is_active() - return true if fileio is active.
928 * @q: videobuf queue 1017 * @q: pointer to &struct vb2_queue with videobuf2 queue.
929 * 1018 *
930 * This returns true if read() or write() is used to stream the data 1019 * This returns true if read() or write() is used to stream the data
931 * as opposed to stream I/O. This is almost never an important distinction, 1020 * as opposed to stream I/O. This is almost never an important distinction,
932 * except in rare cases. One such case is that using read() or write() to 1021 * except in rare cases. One such case is that using read() or write() to
933 * stream a format using V4L2_FIELD_ALTERNATE is not allowed since there 1022 * stream a format using %V4L2_FIELD_ALTERNATE is not allowed since there
934 * is no way you can pass the field information of each buffer to/from 1023 * is no way you can pass the field information of each buffer to/from
935 * userspace. A driver that supports this field format should check for 1024 * userspace. A driver that supports this field format should check for
936 * this in the queue_setup op and reject it if this function returns true. 1025 * this in the &vb2_ops->queue_setup op and reject it if this function returns
1026 * true.
937 */ 1027 */
938static inline bool vb2_fileio_is_active(struct vb2_queue *q) 1028static inline bool vb2_fileio_is_active(struct vb2_queue *q)
939{ 1029{
@@ -941,8 +1031,8 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q)
941} 1031}
942 1032
943/** 1033/**
944 * vb2_is_busy() - return busy status of the queue 1034 * vb2_is_busy() - return busy status of the queue.
945 * @q: videobuf queue 1035 * @q: pointer to &struct vb2_queue with videobuf2 queue.
946 * 1036 *
947 * This function checks if queue has any buffers allocated. 1037 * This function checks if queue has any buffers allocated.
948 */ 1038 */
@@ -952,8 +1042,8 @@ static inline bool vb2_is_busy(struct vb2_queue *q)
952} 1042}
953 1043
954/** 1044/**
955 * vb2_get_drv_priv() - return driver private data associated with the queue 1045 * vb2_get_drv_priv() - return driver private data associated with the queue.
956 * @q: videobuf queue 1046 * @q: pointer to &struct vb2_queue with videobuf2 queue.
957 */ 1047 */
958static inline void *vb2_get_drv_priv(struct vb2_queue *q) 1048static inline void *vb2_get_drv_priv(struct vb2_queue *q)
959{ 1049{
@@ -961,10 +1051,11 @@ static inline void *vb2_get_drv_priv(struct vb2_queue *q)
961} 1051}
962 1052
963/** 1053/**
964 * vb2_set_plane_payload() - set bytesused for the plane plane_no 1054 * vb2_set_plane_payload() - set bytesused for the plane @plane_no.
965 * @vb: buffer for which plane payload should be set 1055 * @vb: pointer to &struct vb2_buffer to which the plane in
966 * @plane_no: plane number for which payload should be set 1056 * question belongs to.
967 * @size: payload in bytes 1057 * @plane_no: plane number for which payload should be set.
1058 * @size: payload in bytes.
968 */ 1059 */
969static inline void vb2_set_plane_payload(struct vb2_buffer *vb, 1060static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
970 unsigned int plane_no, unsigned long size) 1061 unsigned int plane_no, unsigned long size)
@@ -975,8 +1066,9 @@ static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
975 1066
976/** 1067/**
977 * vb2_get_plane_payload() - get bytesused for the plane plane_no 1068 * vb2_get_plane_payload() - get bytesused for the plane plane_no
978 * @vb: buffer for which plane payload should be set 1069 * @vb: pointer to &struct vb2_buffer to which the plane in
979 * @plane_no: plane number for which payload should be set 1070 * question belongs to.
1071 * @plane_no: plane number for which payload should be set.
980 */ 1072 */
981static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb, 1073static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
982 unsigned int plane_no) 1074 unsigned int plane_no)
@@ -987,9 +1079,10 @@ static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
987} 1079}
988 1080
989/** 1081/**
990 * vb2_plane_size() - return plane size in bytes 1082 * vb2_plane_size() - return plane size in bytes.
991 * @vb: buffer for which plane size should be returned 1083 * @vb: pointer to &struct vb2_buffer to which the plane in
992 * @plane_no: plane number for which size should be returned 1084 * question belongs to.
1085 * @plane_no: plane number for which size should be returned.
993 */ 1086 */
994static inline unsigned long 1087static inline unsigned long
995vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) 1088vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
@@ -1000,8 +1093,8 @@ vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
1000} 1093}
1001 1094
1002/** 1095/**
1003 * vb2_start_streaming_called() - return streaming status of driver 1096 * vb2_start_streaming_called() - return streaming status of driver.
1004 * @q: videobuf queue 1097 * @q: pointer to &struct vb2_queue with videobuf2 queue.
1005 */ 1098 */
1006static inline bool vb2_start_streaming_called(struct vb2_queue *q) 1099static inline bool vb2_start_streaming_called(struct vb2_queue *q)
1007{ 1100{
@@ -1009,8 +1102,8 @@ static inline bool vb2_start_streaming_called(struct vb2_queue *q)
1009} 1102}
1010 1103
1011/** 1104/**
1012 * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue 1105 * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue.
1013 * @q: videobuf queue 1106 * @q: pointer to &struct vb2_queue with videobuf2 queue.
1014 */ 1107 */
1015static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) 1108static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
1016{ 1109{
@@ -1024,10 +1117,10 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
1024 1117
1025/** 1118/**
1026 * vb2_buffer_in_use() - return true if the buffer is in use and 1119 * vb2_buffer_in_use() - return true if the buffer is in use and
1027 * the queue cannot be freed (by the means of REQBUFS(0)) call 1120 * the queue cannot be freed (by the means of VIDIOC_REQBUFS(0)) call.
1028 * 1121 *
1029 * @vb: buffer for which plane size should be returned 1122 * @vb: buffer for which plane size should be returned.
1030 * @q: videobuf queue 1123 * @q: pointer to &struct vb2_queue with videobuf2 queue.
1031 */ 1124 */
1032bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb); 1125bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb);
1033 1126
@@ -1035,11 +1128,11 @@ bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb);
1035 * vb2_verify_memory_type() - Check whether the memory type and buffer type 1128 * vb2_verify_memory_type() - Check whether the memory type and buffer type
1036 * passed to a buffer operation are compatible with the queue. 1129 * passed to a buffer operation are compatible with the queue.
1037 * 1130 *
1038 * @q: videobuf queue 1131 * @q: pointer to &struct vb2_queue with videobuf2 queue.
1039 * @memory: memory model, as defined by enum &vb2_memory. 1132 * @memory: memory model, as defined by enum &vb2_memory.
1040 * @type: private buffer type whose content is defined by the vb2-core 1133 * @type: private buffer type whose content is defined by the vb2-core
1041 * caller. For example, for V4L2, it should match 1134 * caller. For example, for V4L2, it should match
1042 * the types defined on enum &v4l2_buf_type 1135 * the types defined on enum &v4l2_buf_type.
1043 */ 1136 */
1044int vb2_verify_memory_type(struct vb2_queue *q, 1137int vb2_verify_memory_type(struct vb2_queue *q,
1045 enum vb2_memory memory, unsigned int type); 1138 enum vb2_memory memory, unsigned int type);
diff --git a/include/media/videobuf2-dvb.h b/include/media/videobuf2-dvb.h
index 5a31faa24f1a..8605366ec87c 100644
--- a/include/media/videobuf2-dvb.h
+++ b/include/media/videobuf2-dvb.h
@@ -2,12 +2,11 @@
2#ifndef _VIDEOBUF2_DVB_H_ 2#ifndef _VIDEOBUF2_DVB_H_
3#define _VIDEOBUF2_DVB_H_ 3#define _VIDEOBUF2_DVB_H_
4 4
5#include <dvbdev.h> 5#include <media/dvbdev.h>
6#include <dmxdev.h> 6#include <media/dmxdev.h>
7#include <dvb_demux.h> 7#include <media/dvb_demux.h>
8#include <dvb_net.h> 8#include <media/dvb_net.h>
9#include <dvb_frontend.h> 9#include <media/dvb_frontend.h>
10
11#include <media/videobuf2-v4l2.h> 10#include <media/videobuf2-v4l2.h>
12 11
13/* We don't actually need to include media-device.h here */ 12/* We don't actually need to include media-device.h here */
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index a6ed091b79ce..4b5b84f93538 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -19,11 +19,11 @@
19#include <linux/refcount.h> 19#include <linux/refcount.h>
20 20
21/** 21/**
22 * struct vb2_vmarea_handler - common vma refcount tracking handler 22 * struct vb2_vmarea_handler - common vma refcount tracking handler.
23 * 23 *
24 * @refcount: pointer to refcount entry in the buffer 24 * @refcount: pointer to &refcount_t entry in the buffer.
25 * @put: callback to function that decreases buffer refcount 25 * @put: callback to function that decreases buffer refcount.
26 * @arg: argument for @put callback 26 * @arg: argument for @put callback.
27 */ 27 */
28struct vb2_vmarea_handler { 28struct vb2_vmarea_handler {
29 refcount_t *refcount; 29 refcount_t *refcount;
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 036127c54bbf..3d5e2d739f05 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -24,16 +24,17 @@
24#endif 24#endif
25 25
26/** 26/**
27 * struct vb2_v4l2_buffer - video buffer information for v4l2 27 * struct vb2_v4l2_buffer - video buffer information for v4l2.
28 * 28 *
29 * @vb2_buf: video buffer 2 29 * @vb2_buf: embedded struct &vb2_buffer.
30 * @flags: buffer informational flags 30 * @flags: buffer informational flags.
31 * @field: enum v4l2_field; field order of the image in the buffer 31 * @field: field order of the image in the buffer, as defined by
32 * @timecode: frame timecode 32 * &enum v4l2_field.
33 * @sequence: sequence count of this frame 33 * @timecode: frame timecode.
34 * @sequence: sequence count of this frame.
34 * 35 *
35 * Should contain enough information to be able to cover all the fields 36 * Should contain enough information to be able to cover all the fields
36 * of struct v4l2_buffer at videodev2.h 37 * of &struct v4l2_buffer at ``videodev2.h``.
37 */ 38 */
38struct vb2_v4l2_buffer { 39struct vb2_v4l2_buffer {
39 struct vb2_buffer vb2_buf; 40 struct vb2_buffer vb2_buf;
@@ -56,9 +57,9 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
56 * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies 57 * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies
57 * the memory and type values. 58 * the memory and type values.
58 * 59 *
59 * @q: videobuf2 queue 60 * @q: pointer to &struct vb2_queue with videobuf2 queue.
60 * @req: struct passed from userspace to vidioc_reqbufs handler 61 * @req: &struct v4l2_requestbuffers passed from userspace to
61 * in driver 62 * &v4l2_ioctl_ops->vidioc_reqbufs handler in driver.
62 */ 63 */
63int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); 64int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
64 65
@@ -66,94 +67,99 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
66 * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies 67 * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies
67 * the memory and type values. 68 * the memory and type values.
68 * 69 *
69 * @q: videobuf2 queue 70 * @q: pointer to &struct vb2_queue with videobuf2 queue.
70 * @create: creation parameters, passed from userspace to vidioc_create_bufs 71 * @create: creation parameters, passed from userspace to
71 * handler in driver 72 * &v4l2_ioctl_ops->vidioc_create_bufs handler in driver
72 */ 73 */
73int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); 74int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
74 75
75/** 76/**
76 * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel 77 * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
77 * 78 *
78 * @q: videobuf2 queue 79 * @q: pointer to &struct vb2_queue with videobuf2 queue.
79 * @b: buffer structure passed from userspace to vidioc_prepare_buf 80 * @b: buffer structure passed from userspace to
80 * handler in driver 81 * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver
82 *
83 * Should be called from &v4l2_ioctl_ops->vidioc_prepare_buf ioctl handler
84 * of a driver.
81 * 85 *
82 * Should be called from vidioc_prepare_buf ioctl handler of a driver.
83 * This function: 86 * This function:
84 * 87 *
85 * #) verifies the passed buffer, 88 * #) verifies the passed buffer,
86 * #) calls buf_prepare callback in the driver (if provided), in which 89 * #) calls &vb2_ops->buf_prepare callback in the driver (if provided),
87 * driver-specific buffer initialization can be performed. 90 * in which driver-specific buffer initialization can be performed.
88 * 91 *
89 * The return values from this function are intended to be directly returned 92 * The return values from this function are intended to be directly returned
90 * from vidioc_prepare_buf handler in driver. 93 * from &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver.
91 */ 94 */
92int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b); 95int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
93 96
94/** 97/**
95 * vb2_qbuf() - Queue a buffer from userspace 98 * vb2_qbuf() - Queue a buffer from userspace
96 * @q: videobuf2 queue 99 * @q: pointer to &struct vb2_queue with videobuf2 queue.
97 * @b: buffer structure passed from userspace to VIDIOC_QBUF() handler 100 * @b: buffer structure passed from userspace to
98 * in driver 101 * &v4l2_ioctl_ops->vidioc_qbuf handler in driver
99 * 102 *
100 * Should be called from VIDIOC_QBUF() ioctl handler of a driver. 103 * Should be called from &v4l2_ioctl_ops->vidioc_qbuf handler of a driver.
101 * 104 *
102 * This function: 105 * This function:
103 * 106 *
104 * #) verifies the passed buffer, 107 * #) verifies the passed buffer;
105 * #) if necessary, calls buf_prepare callback in the driver (if provided), in 108 * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver
106 * which driver-specific buffer initialization can be performed, 109 * (if provided), in which driver-specific buffer initialization can
107 * #) if streaming is on, queues the buffer in driver by the means of buf_queue 110 * be performed;
108 * callback for processing. 111 * #) if streaming is on, queues the buffer in driver by the means of
112 * &vb2_ops->buf_queue callback for processing.
109 * 113 *
110 * The return values from this function are intended to be directly returned 114 * The return values from this function are intended to be directly returned
111 * from VIDIOC_QBUF() handler in driver. 115 * from &v4l2_ioctl_ops->vidioc_qbuf handler in driver.
112 */ 116 */
113int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b); 117int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
114 118
115/** 119/**
116 * vb2_expbuf() - Export a buffer as a file descriptor 120 * vb2_expbuf() - Export a buffer as a file descriptor
117 * @q: videobuf2 queue 121 * @q: pointer to &struct vb2_queue with videobuf2 queue.
118 * @eb: export buffer structure passed from userspace to VIDIOC_EXPBUF() 122 * @eb: export buffer structure passed from userspace to
119 * handler in driver 123 * &v4l2_ioctl_ops->vidioc_expbuf handler in driver
120 * 124 *
121 * The return values from this function are intended to be directly returned 125 * The return values from this function are intended to be directly returned
122 * from VIDIOC_EXPBUF() handler in driver. 126 * from &v4l2_ioctl_ops->vidioc_expbuf handler in driver.
123 */ 127 */
124int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); 128int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
125 129
126/** 130/**
127 * vb2_dqbuf() - Dequeue a buffer to the userspace 131 * vb2_dqbuf() - Dequeue a buffer to the userspace
128 * @q: videobuf2 queue 132 * @q: pointer to &struct vb2_queue with videobuf2 queue.
129 * @b: buffer structure passed from userspace to VIDIOC_DQBUF() handler 133 * @b: buffer structure passed from userspace to
130 * in driver 134 * &v4l2_ioctl_ops->vidioc_dqbuf handler in driver
131 * @nonblocking: if true, this call will not sleep waiting for a buffer if no 135 * @nonblocking: if true, this call will not sleep waiting for a buffer if no
132 * buffers ready for dequeuing are present. Normally the driver 136 * buffers ready for dequeuing are present. Normally the driver
133 * would be passing (file->f_flags & O_NONBLOCK) here 137 * would be passing (&file->f_flags & %O_NONBLOCK) here
134 * 138 *
135 * Should be called from VIDIOC_DQBUF() ioctl handler of a driver. 139 * Should be called from &v4l2_ioctl_ops->vidioc_dqbuf ioctl handler
140 * of a driver.
136 * 141 *
137 * This function: 142 * This function:
138 * 143 *
139 * #) verifies the passed buffer, 144 * #) verifies the passed buffer;
140 * #) calls buf_finish callback in the driver (if provided), in which 145 * #) calls &vb2_ops->buf_finish callback in the driver (if provided), in which
141 * driver can perform any additional operations that may be required before 146 * driver can perform any additional operations that may be required before
142 * returning the buffer to userspace, such as cache sync, 147 * returning the buffer to userspace, such as cache sync;
143 * #) the buffer struct members are filled with relevant information for 148 * #) the buffer struct members are filled with relevant information for
144 * the userspace. 149 * the userspace.
145 * 150 *
146 * The return values from this function are intended to be directly returned 151 * The return values from this function are intended to be directly returned
147 * from VIDIOC_DQBUF() handler in driver. 152 * from &v4l2_ioctl_ops->vidioc_dqbuf handler in driver.
148 */ 153 */
149int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); 154int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
150 155
151/** 156/**
152 * vb2_streamon - start streaming 157 * vb2_streamon - start streaming
153 * @q: videobuf2 queue 158 * @q: pointer to &struct vb2_queue with videobuf2 queue.
154 * @type: type argument passed from userspace to vidioc_streamon handler 159 * @type: type argument passed from userspace to vidioc_streamon handler,
160 * as defined by &enum v4l2_buf_type.
155 * 161 *
156 * Should be called from vidioc_streamon handler of a driver. 162 * Should be called from &v4l2_ioctl_ops->vidioc_streamon handler of a driver.
157 * 163 *
158 * This function: 164 * This function:
159 * 165 *
@@ -161,13 +167,13 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
161 * 2) passes any previously queued buffers to the driver and starts streaming 167 * 2) passes any previously queued buffers to the driver and starts streaming
162 * 168 *
163 * The return values from this function are intended to be directly returned 169 * The return values from this function are intended to be directly returned
164 * from vidioc_streamon handler in the driver. 170 * from &v4l2_ioctl_ops->vidioc_streamon handler in the driver.
165 */ 171 */
166int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); 172int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type);
167 173
168/** 174/**
169 * vb2_streamoff - stop streaming 175 * vb2_streamoff - stop streaming
170 * @q: videobuf2 queue 176 * @q: pointer to &struct vb2_queue with videobuf2 queue.
171 * @type: type argument passed from userspace to vidioc_streamoff handler 177 * @type: type argument passed from userspace to vidioc_streamoff handler
172 * 178 *
173 * Should be called from vidioc_streamoff handler of a driver. 179 * Should be called from vidioc_streamoff handler of a driver.
@@ -186,7 +192,7 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
186 192
187/** 193/**
188 * vb2_queue_init() - initialize a videobuf2 queue 194 * vb2_queue_init() - initialize a videobuf2 queue
189 * @q: videobuf2 queue; this structure should be allocated in driver 195 * @q: pointer to &struct vb2_queue with videobuf2 queue.
190 * 196 *
191 * The vb2_queue structure should be allocated by the driver. The driver is 197 * The vb2_queue structure should be allocated by the driver. The driver is
192 * responsible of clearing it's content and setting initial values for some 198 * responsible of clearing it's content and setting initial values for some
@@ -199,7 +205,7 @@ int __must_check vb2_queue_init(struct vb2_queue *q);
199 205
200/** 206/**
201 * vb2_queue_release() - stop streaming, release the queue and free memory 207 * vb2_queue_release() - stop streaming, release the queue and free memory
202 * @q: videobuf2 queue 208 * @q: pointer to &struct vb2_queue with videobuf2 queue.
203 * 209 *
204 * This function stops streaming and performs necessary clean ups, including 210 * This function stops streaming and performs necessary clean ups, including
205 * freeing video buffer memory. The driver is responsible for freeing 211 * freeing video buffer memory. The driver is responsible for freeing
@@ -209,7 +215,7 @@ void vb2_queue_release(struct vb2_queue *q);
209 215
210/** 216/**
211 * vb2_poll() - implements poll userspace operation 217 * vb2_poll() - implements poll userspace operation
212 * @q: videobuf2 queue 218 * @q: pointer to &struct vb2_queue with videobuf2 queue.
213 * @file: file argument passed to the poll file operation handler 219 * @file: file argument passed to the poll file operation handler
214 * @wait: wait argument passed to the poll file operation handler 220 * @wait: wait argument passed to the poll file operation handler
215 * 221 *
@@ -226,8 +232,7 @@ void vb2_queue_release(struct vb2_queue *q);
226 * The return values from this function are intended to be directly returned 232 * The return values from this function are intended to be directly returned
227 * from poll handler in driver. 233 * from poll handler in driver.
228 */ 234 */
229unsigned int vb2_poll(struct vb2_queue *q, struct file *file, 235__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
230 poll_table *wait);
231 236
232/* 237/*
233 * The following functions are not part of the vb2 core API, but are simple 238 * The following functions are not part of the vb2 core API, but are simple
@@ -262,7 +267,7 @@ ssize_t vb2_fop_write(struct file *file, const char __user *buf,
262 size_t count, loff_t *ppos); 267 size_t count, loff_t *ppos);
263ssize_t vb2_fop_read(struct file *file, char __user *buf, 268ssize_t vb2_fop_read(struct file *file, char __user *buf,
264 size_t count, loff_t *ppos); 269 size_t count, loff_t *ppos);
265unsigned int vb2_fop_poll(struct file *file, poll_table *wait); 270__poll_t vb2_fop_poll(struct file *file, poll_table *wait);
266#ifndef CONFIG_MMU 271#ifndef CONFIG_MMU
267unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, 272unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
268 unsigned long len, unsigned long pgoff, unsigned long flags); 273 unsigned long len, unsigned long pgoff, unsigned long flags);
@@ -271,7 +276,7 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
271/** 276/**
272 * vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue 277 * vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue
273 * 278 *
274 * @vq: pointer to struct vb2_queue 279 * @vq: pointer to &struct vb2_queue
275 * 280 *
276 * ..note:: only use if vq->lock is non-NULL. 281 * ..note:: only use if vq->lock is non-NULL.
277 */ 282 */
@@ -280,7 +285,7 @@ void vb2_ops_wait_prepare(struct vb2_queue *vq);
280/** 285/**
281 * vb2_ops_wait_finish - helper function to unlock a struct &vb2_queue 286 * vb2_ops_wait_finish - helper function to unlock a struct &vb2_queue
282 * 287 *
283 * @vq: pointer to struct vb2_queue 288 * @vq: pointer to &struct vb2_queue
284 * 289 *
285 * ..note:: only use if vq->lock is non-NULL. 290 * ..note:: only use if vq->lock is non-NULL.
286 */ 291 */
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 480d50a0b8ba..b712be544f8c 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -267,7 +267,7 @@ int cxl_fd_open(struct inode *inode, struct file *file);
267int cxl_fd_release(struct inode *inode, struct file *file); 267int cxl_fd_release(struct inode *inode, struct file *file);
268long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 268long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
269int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm); 269int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
270unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll); 270__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
271ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count, 271ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
272 loff_t *off); 272 loff_t *off);
273 273
diff --git a/include/misc/ocxl-config.h b/include/misc/ocxl-config.h
new file mode 100644
index 000000000000..3526fa996a22
--- /dev/null
+++ b/include/misc/ocxl-config.h
@@ -0,0 +1,45 @@
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright 2017 IBM Corp.
3#ifndef _OCXL_CONFIG_H_
4#define _OCXL_CONFIG_H_
5
6/*
7 * This file lists the various constants used to read the
8 * configuration space of an opencapi adapter.
9 *
10 * It follows the specification for opencapi 3.0
11 */
12
13#define OCXL_EXT_CAP_ID_DVSEC 0x23
14
15#define OCXL_DVSEC_VENDOR_OFFSET 0x4
16#define OCXL_DVSEC_ID_OFFSET 0x8
17#define OCXL_DVSEC_TL_ID 0xF000
18#define OCXL_DVSEC_TL_BACKOFF_TIMERS 0x10
19#define OCXL_DVSEC_TL_RECV_CAP 0x18
20#define OCXL_DVSEC_TL_SEND_CAP 0x20
21#define OCXL_DVSEC_TL_RECV_RATE 0x30
22#define OCXL_DVSEC_TL_SEND_RATE 0x50
23#define OCXL_DVSEC_FUNC_ID 0xF001
24#define OCXL_DVSEC_FUNC_OFF_INDEX 0x08
25#define OCXL_DVSEC_FUNC_OFF_ACTAG 0x0C
26#define OCXL_DVSEC_AFU_INFO_ID 0xF003
27#define OCXL_DVSEC_AFU_INFO_AFU_IDX 0x0A
28#define OCXL_DVSEC_AFU_INFO_OFF 0x0C
29#define OCXL_DVSEC_AFU_INFO_DATA 0x10
30#define OCXL_DVSEC_AFU_CTRL_ID 0xF004
31#define OCXL_DVSEC_AFU_CTRL_AFU_IDX 0x0A
32#define OCXL_DVSEC_AFU_CTRL_TERM_PASID 0x0C
33#define OCXL_DVSEC_AFU_CTRL_ENABLE 0x0F
34#define OCXL_DVSEC_AFU_CTRL_PASID_SUP 0x10
35#define OCXL_DVSEC_AFU_CTRL_PASID_EN 0x11
36#define OCXL_DVSEC_AFU_CTRL_PASID_BASE 0x14
37#define OCXL_DVSEC_AFU_CTRL_ACTAG_SUP 0x18
38#define OCXL_DVSEC_AFU_CTRL_ACTAG_EN 0x1A
39#define OCXL_DVSEC_AFU_CTRL_ACTAG_BASE 0x1C
40#define OCXL_DVSEC_VENDOR_ID 0xF0F0
41#define OCXL_DVSEC_VENDOR_CFG_VERS 0x0C
42#define OCXL_DVSEC_VENDOR_TLX_VERS 0x10
43#define OCXL_DVSEC_VENDOR_DLX_VERS 0x20
44
45#endif /* _OCXL_CONFIG_H_ */
diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h
new file mode 100644
index 000000000000..51ccf76db293
--- /dev/null
+++ b/include/misc/ocxl.h
@@ -0,0 +1,214 @@
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright 2017 IBM Corp.
3#ifndef _MISC_OCXL_H_
4#define _MISC_OCXL_H_
5
6#include <linux/pci.h>
7
8/*
9 * Opencapi drivers all need some common facilities, like parsing the
10 * device configuration space, adding a Process Element to the Shared
11 * Process Area, etc...
12 *
13 * The ocxl module provides a kernel API, to allow other drivers to
14 * reuse common code. A bit like a in-kernel library.
15 */
16
17#define OCXL_AFU_NAME_SZ (24+1) /* add 1 for NULL termination */
18
19/*
20 * The following 2 structures are a fairly generic way of representing
21 * the configuration data for a function and AFU, as read from the
22 * configuration space.
23 */
24struct ocxl_afu_config {
25 u8 idx;
26 int dvsec_afu_control_pos; /* offset of AFU control DVSEC */
27 char name[OCXL_AFU_NAME_SZ];
28 u8 version_major;
29 u8 version_minor;
30 u8 afuc_type;
31 u8 afum_type;
32 u8 profile;
33 u8 global_mmio_bar; /* global MMIO area */
34 u64 global_mmio_offset;
35 u32 global_mmio_size;
36 u8 pp_mmio_bar; /* per-process MMIO area */
37 u64 pp_mmio_offset;
38 u32 pp_mmio_stride;
39 u8 log_mem_size;
40 u8 pasid_supported_log;
41 u16 actag_supported;
42};
43
44struct ocxl_fn_config {
45 int dvsec_tl_pos; /* offset of the Transaction Layer DVSEC */
46 int dvsec_function_pos; /* offset of the Function DVSEC */
47 int dvsec_afu_info_pos; /* offset of the AFU information DVSEC */
48 s8 max_pasid_log;
49 s8 max_afu_index;
50};
51
52/*
53 * Read the configuration space of a function and fill in a
54 * ocxl_fn_config structure with all the function details
55 */
56extern int ocxl_config_read_function(struct pci_dev *dev,
57 struct ocxl_fn_config *fn);
58
59/*
60 * Check if an AFU index is valid for the given function.
61 *
62 * AFU indexes can be sparse, so a driver should check all indexes up
63 * to the maximum found in the function description
64 */
65extern int ocxl_config_check_afu_index(struct pci_dev *dev,
66 struct ocxl_fn_config *fn, int afu_idx);
67
68/*
69 * Read the configuration space of a function for the AFU specified by
70 * the index 'afu_idx'. Fills in a ocxl_afu_config structure
71 */
72extern int ocxl_config_read_afu(struct pci_dev *dev,
73 struct ocxl_fn_config *fn,
74 struct ocxl_afu_config *afu,
75 u8 afu_idx);
76
77/*
78 * Get the max PASID value that can be used by the function
79 */
80extern int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count);
81
82/*
83 * Tell an AFU, by writing in the configuration space, the PASIDs that
84 * it can use. Range starts at 'pasid_base' and its size is a multiple
85 * of 2
86 *
87 * 'afu_control_offset' is the offset of the AFU control DVSEC which
88 * can be found in the function configuration
89 */
90extern void ocxl_config_set_afu_pasid(struct pci_dev *dev,
91 int afu_control_offset,
92 int pasid_base, u32 pasid_count_log);
93
94/*
95 * Get the actag configuration for the function:
96 * 'base' is the first actag value that can be used.
97 * 'enabled' it the number of actags available, starting from base.
98 * 'supported' is the total number of actags desired by all the AFUs
99 * of the function.
100 */
101extern int ocxl_config_get_actag_info(struct pci_dev *dev,
102 u16 *base, u16 *enabled, u16 *supported);
103
104/*
105 * Tell a function, by writing in the configuration space, the actags
106 * it can use.
107 *
108 * 'func_offset' is the offset of the Function DVSEC that can found in
109 * the function configuration
110 */
111extern void ocxl_config_set_actag(struct pci_dev *dev, int func_offset,
112 u32 actag_base, u32 actag_count);
113
114/*
115 * Tell an AFU, by writing in the configuration space, the actags it
116 * can use.
117 *
118 * 'afu_control_offset' is the offset of the AFU control DVSEC for the
119 * desired AFU. It can be found in the AFU configuration
120 */
121extern void ocxl_config_set_afu_actag(struct pci_dev *dev,
122 int afu_control_offset,
123 int actag_base, int actag_count);
124
125/*
126 * Enable/disable an AFU, by writing in the configuration space.
127 *
128 * 'afu_control_offset' is the offset of the AFU control DVSEC for the
129 * desired AFU. It can be found in the AFU configuration
130 */
131extern void ocxl_config_set_afu_state(struct pci_dev *dev,
132 int afu_control_offset, int enable);
133
134/*
135 * Set the Transaction Layer configuration in the configuration space.
136 * Only needed for function 0.
137 *
138 * It queries the host TL capabilities, find some common ground
139 * between the host and device, and set the Transaction Layer on both
140 * accordingly.
141 */
142extern int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec);
143
144/*
145 * Request an AFU to terminate a PASID.
146 * Will return once the AFU has acked the request, or an error in case
147 * of timeout.
148 *
149 * The hardware can only terminate one PASID at a time, so caller must
150 * guarantee some kind of serialization.
151 *
152 * 'afu_control_offset' is the offset of the AFU control DVSEC for the
153 * desired AFU. It can be found in the AFU configuration
154 */
155extern int ocxl_config_terminate_pasid(struct pci_dev *dev,
156 int afu_control_offset, int pasid);
157
158/*
159 * Set up the opencapi link for the function.
160 *
161 * When called for the first time for a link, it sets up the Shared
162 * Process Area for the link and the interrupt handler to process
163 * translation faults.
164 *
165 * Returns a 'link handle' that should be used for further calls for
166 * the link
167 */
168extern int ocxl_link_setup(struct pci_dev *dev, int PE_mask,
169 void **link_handle);
170
171/*
172 * Remove the association between the function and its link.
173 */
174extern void ocxl_link_release(struct pci_dev *dev, void *link_handle);
175
176/*
177 * Add a Process Element to the Shared Process Area for a link.
178 * The process is defined by its PASID, pid, tid and its mm_struct.
179 *
180 * 'xsl_err_cb' is an optional callback if the driver wants to be
181 * notified when the translation fault interrupt handler detects an
182 * address error.
183 * 'xsl_err_data' is an argument passed to the above callback, if
184 * defined
185 */
186extern int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
187 u64 amr, struct mm_struct *mm,
188 void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
189 void *xsl_err_data);
190
191/*
192 * Remove a Process Element from the Shared Process Area for a link
193 */
194extern int ocxl_link_remove_pe(void *link_handle, int pasid);
195
196/*
197 * Allocate an AFU interrupt associated to the link.
198 *
199 * 'hw_irq' is the hardware interrupt number
200 * 'obj_handle' is the 64-bit object handle to be passed to the AFU to
201 * trigger the interrupt.
202 * On P9, 'obj_handle' is an address, which, if written, triggers the
203 * interrupt. It is an MMIO address which needs to be remapped (one
204 * page).
205 */
206extern int ocxl_link_irq_alloc(void *link_handle, int *hw_irq,
207 u64 *obj_handle);
208
209/*
210 * Free a previously allocated AFU interrupt
211 */
212extern void ocxl_link_free_irq(void *link_handle, int hw_irq);
213
214#endif /* _MISC_OCXL_H_ */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index fd08df74c466..6ed9692f20bd 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -86,7 +86,7 @@ struct tc_action_ops {
86 int (*act)(struct sk_buff *, const struct tc_action *, 86 int (*act)(struct sk_buff *, const struct tc_action *,
87 struct tcf_result *); 87 struct tcf_result *);
88 int (*dump)(struct sk_buff *, struct tc_action *, int, int); 88 int (*dump)(struct sk_buff *, struct tc_action *, int, int);
89 void (*cleanup)(struct tc_action *, int bind); 89 void (*cleanup)(struct tc_action *);
90 int (*lookup)(struct net *, struct tc_action **, u32); 90 int (*lookup)(struct net *, struct tc_action **, u32);
91 int (*init)(struct net *net, struct nlattr *nla, 91 int (*init)(struct net *net, struct nlattr *nla,
92 struct nlattr *est, struct tc_action **act, int ovr, 92 struct nlattr *est, struct tc_action **act, int ovr,
@@ -120,12 +120,19 @@ int tc_action_net_init(struct tc_action_net *tn,
120void tcf_idrinfo_destroy(const struct tc_action_ops *ops, 120void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
121 struct tcf_idrinfo *idrinfo); 121 struct tcf_idrinfo *idrinfo);
122 122
123static inline void tc_action_net_exit(struct tc_action_net *tn) 123static inline void tc_action_net_exit(struct list_head *net_list,
124 unsigned int id)
124{ 125{
126 struct net *net;
127
125 rtnl_lock(); 128 rtnl_lock();
126 tcf_idrinfo_destroy(tn->ops, tn->idrinfo); 129 list_for_each_entry(net, net_list, exit_list) {
130 struct tc_action_net *tn = net_generic(net, id);
131
132 tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
133 kfree(tn->idrinfo);
134 }
127 rtnl_unlock(); 135 rtnl_unlock();
128 kfree(tn->idrinfo);
129} 136}
130 137
131int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 138int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index b623b65a79d1..c4185a7b0e90 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -180,7 +180,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
180 */ 180 */
181int ipv6_addr_label_init(void); 181int ipv6_addr_label_init(void);
182void ipv6_addr_label_cleanup(void); 182void ipv6_addr_label_cleanup(void);
183void ipv6_addr_label_rtnl_register(void); 183int ipv6_addr_label_rtnl_register(void);
184u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr, 184u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
185 int type, int ifindex); 185 int type, int ifindex);
186 186
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e89cff0c4c23..ec9d6bc65855 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
271 int flags); 271 int flags);
272int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, 272int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
273 size_t len, int flags); 273 size_t len, int flags);
274uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); 274__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
275int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 275int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
276int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); 276int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
277int bt_sock_wait_ready(struct sock *sk, unsigned long flags); 277int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index fe328c52c46b..801489bb14c3 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -32,6 +32,33 @@ void cfpkt_destroy(struct cfpkt *pkt);
32 */ 32 */
33int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len); 33int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
34 34
35static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
36{
37 u8 tmp;
38
39 cfpkt_extr_head(pkt, &tmp, 1);
40
41 return tmp;
42}
43
44static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
45{
46 __le16 tmp;
47
48 cfpkt_extr_head(pkt, &tmp, 2);
49
50 return le16_to_cpu(tmp);
51}
52
53static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
54{
55 __le32 tmp;
56
57 cfpkt_extr_head(pkt, &tmp, 4);
58
59 return le32_to_cpu(tmp);
60}
61
35/* 62/*
36 * Peek header from packet. 63 * Peek header from packet.
37 * Reads data from packet without changing packet. 64 * Reads data from packet without changing packet.
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index fb94a8bd8ab5..81174f9b8d14 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1775,6 +1775,8 @@ enum cfg80211_signal_type {
1775 * by %parent_bssid. 1775 * by %parent_bssid.
1776 * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to 1776 * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
1777 * the BSS that requested the scan in which the beacon/probe was received. 1777 * the BSS that requested the scan in which the beacon/probe was received.
1778 * @chains: bitmask for filled values in @chain_signal.
1779 * @chain_signal: per-chain signal strength of last received BSS in dBm.
1778 */ 1780 */
1779struct cfg80211_inform_bss { 1781struct cfg80211_inform_bss {
1780 struct ieee80211_channel *chan; 1782 struct ieee80211_channel *chan;
@@ -1783,6 +1785,8 @@ struct cfg80211_inform_bss {
1783 u64 boottime_ns; 1785 u64 boottime_ns;
1784 u64 parent_tsf; 1786 u64 parent_tsf;
1785 u8 parent_bssid[ETH_ALEN] __aligned(2); 1787 u8 parent_bssid[ETH_ALEN] __aligned(2);
1788 u8 chains;
1789 s8 chain_signal[IEEE80211_MAX_CHAINS];
1786}; 1790};
1787 1791
1788/** 1792/**
@@ -1826,6 +1830,8 @@ struct cfg80211_bss_ies {
1826 * that holds the beacon data. @beacon_ies is still valid, of course, and 1830 * that holds the beacon data. @beacon_ies is still valid, of course, and
1827 * points to the same data as hidden_beacon_bss->beacon_ies in that case. 1831 * points to the same data as hidden_beacon_bss->beacon_ies in that case.
1828 * @signal: signal strength value (type depends on the wiphy's signal_type) 1832 * @signal: signal strength value (type depends on the wiphy's signal_type)
1833 * @chains: bitmask for filled values in @chain_signal.
1834 * @chain_signal: per-chain signal strength of last received BSS in dBm.
1829 * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes 1835 * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
1830 */ 1836 */
1831struct cfg80211_bss { 1837struct cfg80211_bss {
@@ -1844,6 +1850,8 @@ struct cfg80211_bss {
1844 u16 capability; 1850 u16 capability;
1845 1851
1846 u8 bssid[ETH_ALEN]; 1852 u8 bssid[ETH_ALEN];
1853 u8 chains;
1854 s8 chain_signal[IEEE80211_MAX_CHAINS];
1847 1855
1848 u8 priv[0] __aligned(sizeof(void *)); 1856 u8 priv[0] __aligned(sizeof(void *));
1849}; 1857};
@@ -2023,6 +2031,9 @@ struct cfg80211_disassoc_request {
2023 * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask 2031 * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
2024 * will be used in ht_capa. Un-supported values will be ignored. 2032 * will be used in ht_capa. Un-supported values will be ignored.
2025 * @ht_capa_mask: The bits of ht_capa which are to be used. 2033 * @ht_capa_mask: The bits of ht_capa which are to be used.
2034 * @wep_keys: static WEP keys, if not NULL points to an array of
2035 * CFG80211_MAX_WEP_KEYS WEP keys
2036 * @wep_tx_key: key index (0..3) of the default TX static WEP key
2026 */ 2037 */
2027struct cfg80211_ibss_params { 2038struct cfg80211_ibss_params {
2028 const u8 *ssid; 2039 const u8 *ssid;
@@ -2039,6 +2050,8 @@ struct cfg80211_ibss_params {
2039 int mcast_rate[NUM_NL80211_BANDS]; 2050 int mcast_rate[NUM_NL80211_BANDS];
2040 struct ieee80211_ht_cap ht_capa; 2051 struct ieee80211_ht_cap ht_capa;
2041 struct ieee80211_ht_cap ht_capa_mask; 2052 struct ieee80211_ht_cap ht_capa_mask;
2053 struct key_params *wep_keys;
2054 int wep_tx_key;
2042}; 2055};
2043 2056
2044/** 2057/**
@@ -5577,7 +5590,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
5577 * cfg80211_rx_mgmt - notification of received, unprocessed management frame 5590 * cfg80211_rx_mgmt - notification of received, unprocessed management frame
5578 * @wdev: wireless device receiving the frame 5591 * @wdev: wireless device receiving the frame
5579 * @freq: Frequency on which the frame was received in MHz 5592 * @freq: Frequency on which the frame was received in MHz
5580 * @sig_dbm: signal strength in mBm, or 0 if unknown 5593 * @sig_dbm: signal strength in dBm, or 0 if unknown
5581 * @buf: Management frame (header + body) 5594 * @buf: Management frame (header + body)
5582 * @len: length of the frame data 5595 * @len: length of the frame data
5583 * @flags: flags, as defined in enum nl80211_rxmgmt_flags 5596 * @flags: flags, as defined in enum nl80211_rxmgmt_flags
@@ -5756,7 +5769,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
5756 * @frame: the frame 5769 * @frame: the frame
5757 * @len: length of the frame 5770 * @len: length of the frame
5758 * @freq: frequency the frame was received on 5771 * @freq: frequency the frame was received on
5759 * @sig_dbm: signal strength in mBm, or 0 if unknown 5772 * @sig_dbm: signal strength in dBm, or 0 if unknown
5760 * 5773 *
5761 * Use this function to report to userspace when a beacon was 5774 * Use this function to report to userspace when a beacon was
5762 * received. It is not useful to call this when there is no 5775 * received. It is not useful to call this when there is no
diff --git a/include/net/devlink.h b/include/net/devlink.h
index b9654e133599..6545b03e97f7 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -26,10 +26,12 @@ struct devlink {
26 struct list_head port_list; 26 struct list_head port_list;
27 struct list_head sb_list; 27 struct list_head sb_list;
28 struct list_head dpipe_table_list; 28 struct list_head dpipe_table_list;
29 struct list_head resource_list;
29 struct devlink_dpipe_headers *dpipe_headers; 30 struct devlink_dpipe_headers *dpipe_headers;
30 const struct devlink_ops *ops; 31 const struct devlink_ops *ops;
31 struct device *dev; 32 struct device *dev;
32 possible_net_t _net; 33 possible_net_t _net;
34 struct mutex lock;
33 char priv[0] __aligned(NETDEV_ALIGN); 35 char priv[0] __aligned(NETDEV_ALIGN);
34}; 36};
35 37
@@ -181,6 +183,9 @@ struct devlink_dpipe_table_ops;
181 * @counters_enabled: indicates if counters are active 183 * @counters_enabled: indicates if counters are active
182 * @counter_control_extern: indicates if counter control is in dpipe or 184 * @counter_control_extern: indicates if counter control is in dpipe or
183 * external tool 185 * external tool
186 * @resource_valid: Indicate that the resource id is valid
187 * @resource_id: relative resource this table is related to
188 * @resource_units: number of resource's unit consumed per table's entry
184 * @table_ops: table operations 189 * @table_ops: table operations
185 * @rcu: rcu 190 * @rcu: rcu
186 */ 191 */
@@ -190,6 +195,9 @@ struct devlink_dpipe_table {
190 const char *name; 195 const char *name;
191 bool counters_enabled; 196 bool counters_enabled;
192 bool counter_control_extern; 197 bool counter_control_extern;
198 bool resource_valid;
199 u64 resource_id;
200 u64 resource_units;
193 struct devlink_dpipe_table_ops *table_ops; 201 struct devlink_dpipe_table_ops *table_ops;
194 struct rcu_head rcu; 202 struct rcu_head rcu;
195}; 203};
@@ -223,7 +231,63 @@ struct devlink_dpipe_headers {
223 unsigned int headers_count; 231 unsigned int headers_count;
224}; 232};
225 233
234/**
235 * struct devlink_resource_ops - resource ops
236 * @occ_get: get the occupied size
237 * @size_validate: validate the size of the resource before update, reload
238 * is needed for changes to take place
239 */
240struct devlink_resource_ops {
241 u64 (*occ_get)(struct devlink *devlink);
242 int (*size_validate)(struct devlink *devlink, u64 size,
243 struct netlink_ext_ack *extack);
244};
245
246/**
247 * struct devlink_resource_size_params - resource's size parameters
248 * @size_min: minimum size which can be set
249 * @size_max: maximum size which can be set
250 * @size_granularity: size granularity
251 * @size_unit: resource's basic unit
252 */
253struct devlink_resource_size_params {
254 u64 size_min;
255 u64 size_max;
256 u64 size_granularity;
257 enum devlink_resource_unit unit;
258};
259
260/**
261 * struct devlink_resource - devlink resource
262 * @name: name of the resource
263 * @id: id, per devlink instance
264 * @size: size of the resource
265 * @size_new: updated size of the resource, reload is needed
266 * @size_valid: valid in case the total size of the resource is valid
267 * including its children
268 * @parent: parent resource
269 * @size_params: size parameters
270 * @list: parent list
271 * @resource_list: list of child resources
272 * @resource_ops: resource ops
273 */
274struct devlink_resource {
275 const char *name;
276 u64 id;
277 u64 size;
278 u64 size_new;
279 bool size_valid;
280 struct devlink_resource *parent;
281 struct devlink_resource_size_params *size_params;
282 struct list_head list;
283 struct list_head resource_list;
284 const struct devlink_resource_ops *resource_ops;
285};
286
287#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
288
226struct devlink_ops { 289struct devlink_ops {
290 int (*reload)(struct devlink *devlink);
227 int (*port_type_set)(struct devlink_port *devlink_port, 291 int (*port_type_set)(struct devlink_port *devlink_port,
228 enum devlink_port_type port_type); 292 enum devlink_port_type port_type);
229 int (*port_split)(struct devlink *devlink, unsigned int port_index, 293 int (*port_split)(struct devlink *devlink, unsigned int port_index,
@@ -332,6 +396,23 @@ extern struct devlink_dpipe_header devlink_dpipe_header_ethernet;
332extern struct devlink_dpipe_header devlink_dpipe_header_ipv4; 396extern struct devlink_dpipe_header devlink_dpipe_header_ipv4;
333extern struct devlink_dpipe_header devlink_dpipe_header_ipv6; 397extern struct devlink_dpipe_header devlink_dpipe_header_ipv6;
334 398
399int devlink_resource_register(struct devlink *devlink,
400 const char *resource_name,
401 bool top_hierarchy,
402 u64 resource_size,
403 u64 resource_id,
404 u64 parent_resource_id,
405 struct devlink_resource_size_params *size_params,
406 const struct devlink_resource_ops *resource_ops);
407void devlink_resources_unregister(struct devlink *devlink,
408 struct devlink_resource *resource);
409int devlink_resource_size_get(struct devlink *devlink,
410 u64 resource_id,
411 u64 *p_resource_size);
412int devlink_dpipe_table_resource_set(struct devlink *devlink,
413 const char *table_name, u64 resource_id,
414 u64 resource_units);
415
335#else 416#else
336 417
337static inline struct devlink *devlink_alloc(const struct devlink_ops *ops, 418static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
@@ -468,6 +549,40 @@ devlink_dpipe_match_put(struct sk_buff *skb,
468 return 0; 549 return 0;
469} 550}
470 551
552static inline int
553devlink_resource_register(struct devlink *devlink,
554 const char *resource_name,
555 bool top_hierarchy,
556 u64 resource_size,
557 u64 resource_id,
558 u64 parent_resource_id,
559 struct devlink_resource_size_params *size_params,
560 const struct devlink_resource_ops *resource_ops)
561{
562 return 0;
563}
564
565static inline void
566devlink_resources_unregister(struct devlink *devlink,
567 struct devlink_resource *resource)
568{
569}
570
571static inline int
572devlink_resource_size_get(struct devlink *devlink, u64 resource_id,
573 u64 *p_resource_size)
574{
575 return -EOPNOTSUPP;
576}
577
578static inline int
579devlink_dpipe_table_resource_set(struct devlink *devlink,
580 const char *table_name, u64 resource_id,
581 u64 resource_units)
582{
583 return -EOPNOTSUPP;
584}
585
471#endif 586#endif
472 587
473#endif /* _NET_DEVLINK_H_ */ 588#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 55df9939bca2..342d2503cba5 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -69,6 +69,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
69 */ 69 */
70struct dn_route { 70struct dn_route {
71 struct dst_entry dst; 71 struct dst_entry dst;
72 struct dn_route __rcu *dn_next;
72 73
73 struct neighbour *n; 74 struct neighbour *n;
74 75
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 2a05738570d8..6cb602dd970c 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -296,31 +296,39 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
296 return mask; 296 return mask;
297} 297}
298 298
299static inline u8 dsa_upstream_port(struct dsa_switch *ds) 299/* Return the local port used to reach an arbitrary switch port */
300static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
301 int port)
300{ 302{
301 struct dsa_switch_tree *dst = ds->dst; 303 if (device == ds->index)
302 304 return port;
303 /*
304 * If this is the root switch (i.e. the switch that connects
305 * to the CPU), return the cpu port number on this switch.
306 * Else return the (DSA) port number that connects to the
307 * switch that is one hop closer to the cpu.
308 */
309 if (dst->cpu_dp->ds == ds)
310 return dst->cpu_dp->index;
311 else 305 else
312 return ds->rtable[dst->cpu_dp->ds->index]; 306 return ds->rtable[device];
307}
308
309/* Return the local port used to reach the dedicated CPU port */
310static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
311{
312 const struct dsa_port *dp = dsa_to_port(ds, port);
313 const struct dsa_port *cpu_dp = dp->cpu_dp;
314
315 if (!cpu_dp)
316 return port;
317
318 return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
313} 319}
314 320
315typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid, 321typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
316 bool is_static, void *data); 322 bool is_static, void *data);
317struct dsa_switch_ops { 323struct dsa_switch_ops {
324#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
318 /* 325 /*
319 * Legacy probing. 326 * Legacy probing.
320 */ 327 */
321 const char *(*probe)(struct device *dsa_dev, 328 const char *(*probe)(struct device *dsa_dev,
322 struct device *host_dev, int sw_addr, 329 struct device *host_dev, int sw_addr,
323 void **priv); 330 void **priv);
331#endif
324 332
325 enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds, 333 enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
326 int port); 334 int port);
@@ -412,12 +420,10 @@ struct dsa_switch_ops {
412 */ 420 */
413 int (*port_vlan_filtering)(struct dsa_switch *ds, int port, 421 int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
414 bool vlan_filtering); 422 bool vlan_filtering);
415 int (*port_vlan_prepare)(struct dsa_switch *ds, int port, 423 int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
416 const struct switchdev_obj_port_vlan *vlan, 424 const struct switchdev_obj_port_vlan *vlan);
417 struct switchdev_trans *trans); 425 void (*port_vlan_add)(struct dsa_switch *ds, int port,
418 void (*port_vlan_add)(struct dsa_switch *ds, int port, 426 const struct switchdev_obj_port_vlan *vlan);
419 const struct switchdev_obj_port_vlan *vlan,
420 struct switchdev_trans *trans);
421 int (*port_vlan_del)(struct dsa_switch *ds, int port, 427 int (*port_vlan_del)(struct dsa_switch *ds, int port,
422 const struct switchdev_obj_port_vlan *vlan); 428 const struct switchdev_obj_port_vlan *vlan);
423 /* 429 /*
@@ -433,12 +439,10 @@ struct dsa_switch_ops {
433 /* 439 /*
434 * Multicast database 440 * Multicast database
435 */ 441 */
436 int (*port_mdb_prepare)(struct dsa_switch *ds, int port, 442 int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
437 const struct switchdev_obj_port_mdb *mdb, 443 const struct switchdev_obj_port_mdb *mdb);
438 struct switchdev_trans *trans); 444 void (*port_mdb_add)(struct dsa_switch *ds, int port,
439 void (*port_mdb_add)(struct dsa_switch *ds, int port, 445 const struct switchdev_obj_port_mdb *mdb);
440 const struct switchdev_obj_port_mdb *mdb,
441 struct switchdev_trans *trans);
442 int (*port_mdb_del)(struct dsa_switch *ds, int port, 446 int (*port_mdb_del)(struct dsa_switch *ds, int port,
443 const struct switchdev_obj_port_mdb *mdb); 447 const struct switchdev_obj_port_mdb *mdb);
444 /* 448 /*
@@ -472,11 +476,20 @@ struct dsa_switch_driver {
472 const struct dsa_switch_ops *ops; 476 const struct dsa_switch_ops *ops;
473}; 477};
474 478
479#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
475/* Legacy driver registration */ 480/* Legacy driver registration */
476void register_switch_driver(struct dsa_switch_driver *type); 481void register_switch_driver(struct dsa_switch_driver *type);
477void unregister_switch_driver(struct dsa_switch_driver *type); 482void unregister_switch_driver(struct dsa_switch_driver *type);
478struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); 483struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
479 484
485#else
486static inline void register_switch_driver(struct dsa_switch_driver *type) { }
487static inline void unregister_switch_driver(struct dsa_switch_driver *type) { }
488static inline struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev)
489{
490 return NULL;
491}
492#endif
480struct net_device *dsa_dev_to_net_device(struct device *dev); 493struct net_device *dsa_dev_to_net_device(struct device *dev);
481 494
482/* Keep inline for faster access in hot path */ 495/* Keep inline for faster access in hot path */
diff --git a/include/net/dst.h b/include/net/dst.h
index d49d607dd2b3..c63d2c37f6e9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -34,13 +34,9 @@ struct sk_buff;
34 34
35struct dst_entry { 35struct dst_entry {
36 struct net_device *dev; 36 struct net_device *dev;
37 struct rcu_head rcu_head;
38 struct dst_entry *child;
39 struct dst_ops *ops; 37 struct dst_ops *ops;
40 unsigned long _metrics; 38 unsigned long _metrics;
41 unsigned long expires; 39 unsigned long expires;
42 struct dst_entry *path;
43 struct dst_entry *from;
44#ifdef CONFIG_XFRM 40#ifdef CONFIG_XFRM
45 struct xfrm_state *xfrm; 41 struct xfrm_state *xfrm;
46#else 42#else
@@ -59,8 +55,6 @@ struct dst_entry {
59#define DST_XFRM_QUEUE 0x0040 55#define DST_XFRM_QUEUE 0x0040
60#define DST_METADATA 0x0080 56#define DST_METADATA 0x0080
61 57
62 short error;
63
64 /* A non-zero value of dst->obsolete forces by-hand validation 58 /* A non-zero value of dst->obsolete forces by-hand validation
65 * of the route entry. Positive values are set by the generic 59 * of the route entry. Positive values are set by the generic
66 * dst layer to indicate that the entry has been forcefully 60 * dst layer to indicate that the entry has been forcefully
@@ -76,35 +70,24 @@ struct dst_entry {
76#define DST_OBSOLETE_KILL -2 70#define DST_OBSOLETE_KILL -2
77 unsigned short header_len; /* more space at head required */ 71 unsigned short header_len; /* more space at head required */
78 unsigned short trailer_len; /* space to reserve at tail */ 72 unsigned short trailer_len; /* space to reserve at tail */
79 unsigned short __pad3;
80 73
81#ifdef CONFIG_IP_ROUTE_CLASSID
82 __u32 tclassid;
83#else
84 __u32 __pad2;
85#endif
86
87#ifdef CONFIG_64BIT
88 /*
89 * Align __refcnt to a 64 bytes alignment
90 * (L1_CACHE_SIZE would be too much)
91 */
92 long __pad_to_align_refcnt[2];
93#endif
94 /* 74 /*
95 * __refcnt wants to be on a different cache line from 75 * __refcnt wants to be on a different cache line from
96 * input/output/ops or performance tanks badly 76 * input/output/ops or performance tanks badly
97 */ 77 */
98 atomic_t __refcnt; /* client references */ 78#ifdef CONFIG_64BIT
79 atomic_t __refcnt; /* 64-bit offset 64 */
80#endif
99 int __use; 81 int __use;
100 unsigned long lastuse; 82 unsigned long lastuse;
101 struct lwtunnel_state *lwtstate; 83 struct lwtunnel_state *lwtstate;
102 union { 84 struct rcu_head rcu_head;
103 struct dst_entry *next; 85 short error;
104 struct rtable __rcu *rt_next; 86 short __pad;
105 struct rt6_info __rcu *rt6_next; 87 __u32 tclassid;
106 struct dn_route __rcu *dn_next; 88#ifndef CONFIG_64BIT
107 }; 89 atomic_t __refcnt; /* 32-bit offset 64 */
90#endif
108}; 91};
109 92
110struct dst_metrics { 93struct dst_metrics {
@@ -250,7 +233,7 @@ static inline void dst_hold(struct dst_entry *dst)
250{ 233{
251 /* 234 /*
252 * If your kernel compilation stops here, please check 235 * If your kernel compilation stops here, please check
253 * __pad_to_align_refcnt declaration in struct dst_entry 236 * the placement of __refcnt in struct dst_entry
254 */ 237 */
255 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); 238 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
256 WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0); 239 WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
diff --git a/include/net/erspan.h b/include/net/erspan.h
index ca94fc86865e..d044aa60cc76 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -15,7 +15,7 @@
15 * s, Recur, Flags, Version fields only S (bit 03) is set to 1. The 15 * s, Recur, Flags, Version fields only S (bit 03) is set to 1. The
16 * other fields are set to zero, so only a sequence number follows. 16 * other fields are set to zero, so only a sequence number follows.
17 * 17 *
18 * ERSPAN Type II header (8 octets [42:49]) 18 * ERSPAN Version 1 (Type II) header (8 octets [42:49])
19 * 0 1 2 3 19 * 0 1 2 3
20 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 20 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
21 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 21 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -24,11 +24,31 @@
24 * | Reserved | Index | 24 * | Reserved | Index |
25 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 25 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
26 * 26 *
27 *
28 * ERSPAN Version 2 (Type III) header (12 octets [42:49])
29 * 0 1 2 3
30 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
31 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
32 * | Ver | VLAN | COS |BSO|T| Session ID |
33 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
34 * | Timestamp |
35 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
36 * | SGT |P| FT | Hw ID |D|Gra|O|
37 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
38 *
39 * Platform Specific SubHeader (8 octets, optional)
40 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
41 * | Platf ID | Platform Specific Info |
42 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
43 * | Platform Specific Info |
44 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
45 *
27 * GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB 46 * GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB
28 */ 47 */
29 48
30#define ERSPAN_VERSION 0x1 49#include <uapi/linux/erspan.h>
31 50
51#define ERSPAN_VERSION 0x1 /* ERSPAN type II */
32#define VER_MASK 0xf000 52#define VER_MASK 0xf000
33#define VLAN_MASK 0x0fff 53#define VLAN_MASK 0x0fff
34#define COS_MASK 0xe000 54#define COS_MASK 0xe000
@@ -37,6 +57,19 @@
37#define ID_MASK 0x03ff 57#define ID_MASK 0x03ff
38#define INDEX_MASK 0xfffff 58#define INDEX_MASK 0xfffff
39 59
60#define ERSPAN_VERSION2 0x2 /* ERSPAN type III*/
61#define BSO_MASK EN_MASK
62#define SGT_MASK 0xffff0000
63#define P_MASK 0x8000
64#define FT_MASK 0x7c00
65#define HWID_MASK 0x03f0
66#define DIR_MASK 0x0008
67#define GRA_MASK 0x0006
68#define O_MASK 0x0001
69
70#define HWID_OFFSET 4
71#define DIR_OFFSET 3
72
40enum erspan_encap_type { 73enum erspan_encap_type {
41 ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */ 74 ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */
42 ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */ 75 ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */
@@ -44,18 +77,199 @@ enum erspan_encap_type {
44 ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */ 77 ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */
45}; 78};
46 79
47struct erspan_metadata { 80#define ERSPAN_V1_MDSIZE 4
48 __be32 index; /* type II */ 81#define ERSPAN_V2_MDSIZE 8
49};
50 82
51struct erspanhdr { 83struct erspan_base_hdr {
52 __be16 ver_vlan; 84#if defined(__LITTLE_ENDIAN_BITFIELD)
53#define VER_OFFSET 12 85 __u8 vlan_upper:4,
54 __be16 session_id; 86 ver:4;
55#define COS_OFFSET 13 87 __u8 vlan:8;
56#define EN_OFFSET 11 88 __u8 session_id_upper:2,
57#define T_OFFSET 10 89 t:1,
58 struct erspan_metadata md; 90 en:2,
91 cos:3;
92 __u8 session_id:8;
93#elif defined(__BIG_ENDIAN_BITFIELD)
94 __u8 ver: 4,
95 vlan_upper:4;
96 __u8 vlan:8;
97 __u8 cos:3,
98 en:2,
99 t:1,
100 session_id_upper:2;
101 __u8 session_id:8;
102#else
103#error "Please fix <asm/byteorder.h>"
104#endif
59}; 105};
60 106
107static inline void set_session_id(struct erspan_base_hdr *ershdr, u16 id)
108{
109 ershdr->session_id = id & 0xff;
110 ershdr->session_id_upper = (id >> 8) & 0x3;
111}
112
113static inline u16 get_session_id(const struct erspan_base_hdr *ershdr)
114{
115 return (ershdr->session_id_upper << 8) + ershdr->session_id;
116}
117
118static inline void set_vlan(struct erspan_base_hdr *ershdr, u16 vlan)
119{
120 ershdr->vlan = vlan & 0xff;
121 ershdr->vlan_upper = (vlan >> 8) & 0xf;
122}
123
124static inline u16 get_vlan(const struct erspan_base_hdr *ershdr)
125{
126 return (ershdr->vlan_upper << 8) + ershdr->vlan;
127}
128
129static inline void set_hwid(struct erspan_md2 *md2, u8 hwid)
130{
131 md2->hwid = hwid & 0xf;
132 md2->hwid_upper = (hwid >> 4) & 0x3;
133}
134
135static inline u8 get_hwid(const struct erspan_md2 *md2)
136{
137 return (md2->hwid_upper << 4) + md2->hwid;
138}
139
140static inline int erspan_hdr_len(int version)
141{
142 return sizeof(struct erspan_base_hdr) +
143 (version == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE);
144}
145
146static inline u8 tos_to_cos(u8 tos)
147{
148 u8 dscp, cos;
149
150 dscp = tos >> 2;
151 cos = dscp >> 3;
152 return cos;
153}
154
155static inline void erspan_build_header(struct sk_buff *skb,
156 u32 id, u32 index,
157 bool truncate, bool is_ipv4)
158{
159 struct ethhdr *eth = (struct ethhdr *)skb->data;
160 enum erspan_encap_type enc_type;
161 struct erspan_base_hdr *ershdr;
162 struct qtag_prefix {
163 __be16 eth_type;
164 __be16 tci;
165 } *qp;
166 u16 vlan_tci = 0;
167 u8 tos;
168 __be32 *idx;
169
170 tos = is_ipv4 ? ip_hdr(skb)->tos :
171 (ipv6_hdr(skb)->priority << 4) +
172 (ipv6_hdr(skb)->flow_lbl[0] >> 4);
173
174 enc_type = ERSPAN_ENCAP_NOVLAN;
175
176 /* If mirrored packet has vlan tag, extract tci and
177 * perserve vlan header in the mirrored frame.
178 */
179 if (eth->h_proto == htons(ETH_P_8021Q)) {
180 qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
181 vlan_tci = ntohs(qp->tci);
182 enc_type = ERSPAN_ENCAP_INFRAME;
183 }
184
185 skb_push(skb, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
186 ershdr = (struct erspan_base_hdr *)skb->data;
187 memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
188
189 /* Build base header */
190 ershdr->ver = ERSPAN_VERSION;
191 ershdr->cos = tos_to_cos(tos);
192 ershdr->en = enc_type;
193 ershdr->t = truncate;
194 set_vlan(ershdr, vlan_tci);
195 set_session_id(ershdr, id);
196
197 /* Build metadata */
198 idx = (__be32 *)(ershdr + 1);
199 *idx = htonl(index & INDEX_MASK);
200}
201
202/* ERSPAN GRA: timestamp granularity
203 * 00b --> granularity = 100 microseconds
204 * 01b --> granularity = 100 nanoseconds
205 * 10b --> granularity = IEEE 1588
206 * Here we only support 100 microseconds.
207 */
208static inline __be32 erspan_get_timestamp(void)
209{
210 u64 h_usecs;
211 ktime_t kt;
212
213 kt = ktime_get_real();
214 h_usecs = ktime_divns(kt, 100 * NSEC_PER_USEC);
215
216 /* ERSPAN base header only has 32-bit,
217 * so it wraps around 4 days.
218 */
219 return htonl((u32)h_usecs);
220}
221
222static inline void erspan_build_header_v2(struct sk_buff *skb,
223 u32 id, u8 direction, u16 hwid,
224 bool truncate, bool is_ipv4)
225{
226 struct ethhdr *eth = (struct ethhdr *)skb->data;
227 struct erspan_base_hdr *ershdr;
228 struct erspan_md2 *md2;
229 struct qtag_prefix {
230 __be16 eth_type;
231 __be16 tci;
232 } *qp;
233 u16 vlan_tci = 0;
234 u8 gra = 0; /* 100 usec */
235 u8 bso = 0; /* Bad/Short/Oversized */
236 u8 sgt = 0;
237 u8 tos;
238
239 tos = is_ipv4 ? ip_hdr(skb)->tos :
240 (ipv6_hdr(skb)->priority << 4) +
241 (ipv6_hdr(skb)->flow_lbl[0] >> 4);
242
243 /* Unlike v1, v2 does not have En field,
244 * so only extract vlan tci field.
245 */
246 if (eth->h_proto == htons(ETH_P_8021Q)) {
247 qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
248 vlan_tci = ntohs(qp->tci);
249 }
250
251 skb_push(skb, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
252 ershdr = (struct erspan_base_hdr *)skb->data;
253 memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
254
255 /* Build base header */
256 ershdr->ver = ERSPAN_VERSION2;
257 ershdr->cos = tos_to_cos(tos);
258 ershdr->en = bso;
259 ershdr->t = truncate;
260 set_vlan(ershdr, vlan_tci);
261 set_session_id(ershdr, id);
262
263 /* Build metadata */
264 md2 = (struct erspan_md2 *)(ershdr + 1);
265 md2->timestamp = erspan_get_timestamp();
266 md2->sgt = htons(sgt);
267 md2->p = 1;
268 md2->ft = 0;
269 md2->dir = direction;
270 md2->gra = gra;
271 md2->o = 0;
272 set_hwid(md2, hwid);
273}
274
61#endif 275#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 304f7aa9cc01..0304ba2ae353 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -49,6 +49,9 @@ int gnet_stats_copy_rate_est(struct gnet_dump *d,
49int gnet_stats_copy_queue(struct gnet_dump *d, 49int gnet_stats_copy_queue(struct gnet_dump *d,
50 struct gnet_stats_queue __percpu *cpu_q, 50 struct gnet_stats_queue __percpu *cpu_q,
51 struct gnet_stats_queue *q, __u32 qlen); 51 struct gnet_stats_queue *q, __u32 qlen);
52void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
53 const struct gnet_stats_queue __percpu *cpu_q,
54 const struct gnet_stats_queue *q, __u32 qlen);
52int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); 55int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
53 56
54int gnet_stats_finish_copy(struct gnet_dump *d); 57int gnet_stats_finish_copy(struct gnet_dump *d);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0358745ea059..6692d67e9245 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -77,6 +77,7 @@ struct inet_connection_sock_af_ops {
77 * @icsk_af_ops Operations which are AF_INET{4,6} specific 77 * @icsk_af_ops Operations which are AF_INET{4,6} specific
78 * @icsk_ulp_ops Pluggable ULP control hook 78 * @icsk_ulp_ops Pluggable ULP control hook
79 * @icsk_ulp_data ULP private data 79 * @icsk_ulp_data ULP private data
80 * @icsk_listen_portaddr_node hash to the portaddr listener hashtable
80 * @icsk_ca_state: Congestion control state 81 * @icsk_ca_state: Congestion control state
81 * @icsk_retransmits: Number of unrecovered [RTO] timeouts 82 * @icsk_retransmits: Number of unrecovered [RTO] timeouts
82 * @icsk_pending: Scheduled timer event 83 * @icsk_pending: Scheduled timer event
@@ -101,6 +102,7 @@ struct inet_connection_sock {
101 const struct inet_connection_sock_af_ops *icsk_af_ops; 102 const struct inet_connection_sock_af_ops *icsk_af_ops;
102 const struct tcp_ulp_ops *icsk_ulp_ops; 103 const struct tcp_ulp_ops *icsk_ulp_ops;
103 void *icsk_ulp_data; 104 void *icsk_ulp_data;
105 struct hlist_node icsk_listen_portaddr_node;
104 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); 106 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
105 __u8 icsk_ca_state:6, 107 __u8 icsk_ca_state:6,
106 icsk_ca_setsockopt:1, 108 icsk_ca_setsockopt:1,
@@ -305,7 +307,7 @@ void inet_csk_prepare_forced_close(struct sock *sk);
305/* 307/*
306 * LISTEN is a special case for poll.. 308 * LISTEN is a special case for poll..
307 */ 309 */
308static inline unsigned int inet_csk_listen_poll(const struct sock *sk) 310static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
309{ 311{
310 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? 312 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
311 (POLLIN | POLLRDNORM) : 0; 313 (POLLIN | POLLRDNORM) : 0;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 2dbbbff5e1e3..9141e95529e7 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -111,6 +111,7 @@ struct inet_bind_hashbucket {
111 */ 111 */
112struct inet_listen_hashbucket { 112struct inet_listen_hashbucket {
113 spinlock_t lock; 113 spinlock_t lock;
114 unsigned int count;
114 struct hlist_head head; 115 struct hlist_head head;
115}; 116};
116 117
@@ -132,12 +133,13 @@ struct inet_hashinfo {
132 /* Ok, let's try this, I give up, we do need a local binding 133 /* Ok, let's try this, I give up, we do need a local binding
133 * TCP hash as well as the others for fast bind/connect. 134 * TCP hash as well as the others for fast bind/connect.
134 */ 135 */
136 struct kmem_cache *bind_bucket_cachep;
135 struct inet_bind_hashbucket *bhash; 137 struct inet_bind_hashbucket *bhash;
136
137 unsigned int bhash_size; 138 unsigned int bhash_size;
138 /* 4 bytes hole on 64 bit */
139 139
140 struct kmem_cache *bind_bucket_cachep; 140 /* The 2nd listener table hashed by local port and address */
141 unsigned int lhash2_mask;
142 struct inet_listen_hashbucket *lhash2;
141 143
142 /* All the above members are written once at bootup and 144 /* All the above members are written once at bootup and
143 * never written again _or_ are predominantly read-access. 145 * never written again _or_ are predominantly read-access.
@@ -145,14 +147,25 @@ struct inet_hashinfo {
145 * Now align to a new cache line as all the following members 147 * Now align to a new cache line as all the following members
146 * might be often dirty. 148 * might be often dirty.
147 */ 149 */
148 /* All sockets in TCP_LISTEN state will be in here. This is the only 150 /* All sockets in TCP_LISTEN state will be in listening_hash.
149 * table where wildcard'd TCP sockets can exist. Hash function here 151 * This is the only table where wildcard'd TCP sockets can
150 * is just local port number. 152 * exist. listening_hash is only hashed by local port number.
153 * If lhash2 is initialized, the same socket will also be hashed
154 * to lhash2 by port and address.
151 */ 155 */
152 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] 156 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
153 ____cacheline_aligned_in_smp; 157 ____cacheline_aligned_in_smp;
154}; 158};
155 159
160#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
161 hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
162
163static inline struct inet_listen_hashbucket *
164inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
165{
166 return &h->lhash2[hash & h->lhash2_mask];
167}
168
156static inline struct inet_ehash_bucket *inet_ehash_bucket( 169static inline struct inet_ehash_bucket *inet_ehash_bucket(
157 struct inet_hashinfo *hashinfo, 170 struct inet_hashinfo *hashinfo,
158 unsigned int hash) 171 unsigned int hash)
@@ -208,6 +221,10 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child);
208void inet_put_port(struct sock *sk); 221void inet_put_port(struct sock *sk);
209 222
210void inet_hashinfo_init(struct inet_hashinfo *h); 223void inet_hashinfo_init(struct inet_hashinfo *h);
224void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
225 unsigned long numentries, int scale,
226 unsigned long low_limit,
227 unsigned long high_limit);
211 228
212bool inet_ehash_insert(struct sock *sk, struct sock *osk); 229bool inet_ehash_insert(struct sock *sk, struct sock *osk);
213bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); 230bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 39efb968b7a4..0a671c32d6b9 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -291,6 +291,31 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
291 291
292int inet_sk_rebuild_header(struct sock *sk); 292int inet_sk_rebuild_header(struct sock *sk);
293 293
294/**
295 * inet_sk_state_load - read sk->sk_state for lockless contexts
296 * @sk: socket pointer
297 *
298 * Paired with inet_sk_state_store(). Used in places we don't hold socket lock:
299 * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
300 */
301static inline int inet_sk_state_load(const struct sock *sk)
302{
303 /* state change might impact lockless readers. */
304 return smp_load_acquire(&sk->sk_state);
305}
306
307/**
308 * inet_sk_state_store - update sk->sk_state
309 * @sk: socket pointer
310 * @newstate: new state
311 *
312 * Paired with inet_sk_state_load(). Should be used in contexts where
313 * state change might impact lockless readers.
314 */
315void inet_sk_state_store(struct sock *sk, int newstate);
316
317void inet_sk_set_state(struct sock *sk, int state);
318
294static inline unsigned int __inet_ehashfn(const __be32 laddr, 319static inline unsigned int __inet_ehashfn(const __be32 laddr,
295 const __u16 lport, 320 const __u16 lport,
296 const __be32 faddr, 321 const __be32 faddr,
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 1356fa6a7566..899495589a7e 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -93,8 +93,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
93 struct inet_timewait_death_row *dr, 93 struct inet_timewait_death_row *dr,
94 const int state); 94 const int state);
95 95
96void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, 96void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
97 struct inet_hashinfo *hashinfo); 97 struct inet_hashinfo *hashinfo);
98 98
99void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, 99void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
100 bool rearm); 100 bool rearm);
diff --git a/include/net/ip.h b/include/net/ip.h
index af8addbaa3c1..746abff9ce51 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -26,12 +26,14 @@
26#include <linux/ip.h> 26#include <linux/ip.h>
27#include <linux/in.h> 27#include <linux/in.h>
28#include <linux/skbuff.h> 28#include <linux/skbuff.h>
29#include <linux/jhash.h>
29 30
30#include <net/inet_sock.h> 31#include <net/inet_sock.h>
31#include <net/route.h> 32#include <net/route.h>
32#include <net/snmp.h> 33#include <net/snmp.h>
33#include <net/flow.h> 34#include <net/flow.h>
34#include <net/flow_dissector.h> 35#include <net/flow_dissector.h>
36#include <net/netns/hash.h>
35 37
36#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ 38#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
37#define IPV4_MIN_MTU 68 /* RFC 791 */ 39#define IPV4_MIN_MTU 68 /* RFC 791 */
@@ -522,6 +524,13 @@ static inline unsigned int ipv4_addr_hash(__be32 ip)
522 return (__force unsigned int) ip; 524 return (__force unsigned int) ip;
523} 525}
524 526
527static inline u32 ipv4_portaddr_hash(const struct net *net,
528 __be32 saddr,
529 unsigned int port)
530{
531 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
532}
533
525bool ip_call_ra_chain(struct sk_buff *skb); 534bool ip_call_ra_chain(struct sk_buff *skb);
526 535
527/* 536/*
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 10c913816032..34ec321d6a03 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -129,6 +129,8 @@ struct rt6_exception {
129 129
130struct rt6_info { 130struct rt6_info {
131 struct dst_entry dst; 131 struct dst_entry dst;
132 struct rt6_info __rcu *rt6_next;
133 struct rt6_info *from;
132 134
133 /* 135 /*
134 * Tail elements of dst_entry (__refcnt etc.) 136 * Tail elements of dst_entry (__refcnt etc.)
@@ -147,6 +149,7 @@ struct rt6_info {
147 */ 149 */
148 struct list_head rt6i_siblings; 150 struct list_head rt6i_siblings;
149 unsigned int rt6i_nsiblings; 151 unsigned int rt6i_nsiblings;
152 atomic_t rt6i_nh_upper_bound;
150 153
151 atomic_t rt6i_ref; 154 atomic_t rt6i_ref;
152 155
@@ -168,19 +171,21 @@ struct rt6_info {
168 u32 rt6i_metric; 171 u32 rt6i_metric;
169 u32 rt6i_pmtu; 172 u32 rt6i_pmtu;
170 /* more non-fragment space at head required */ 173 /* more non-fragment space at head required */
174 int rt6i_nh_weight;
171 unsigned short rt6i_nfheader_len; 175 unsigned short rt6i_nfheader_len;
172 u8 rt6i_protocol; 176 u8 rt6i_protocol;
173 u8 exception_bucket_flushed:1, 177 u8 exception_bucket_flushed:1,
174 unused:7; 178 should_flush:1,
179 unused:6;
175}; 180};
176 181
177#define for_each_fib6_node_rt_rcu(fn) \ 182#define for_each_fib6_node_rt_rcu(fn) \
178 for (rt = rcu_dereference((fn)->leaf); rt; \ 183 for (rt = rcu_dereference((fn)->leaf); rt; \
179 rt = rcu_dereference(rt->dst.rt6_next)) 184 rt = rcu_dereference(rt->rt6_next))
180 185
181#define for_each_fib6_walker_rt(w) \ 186#define for_each_fib6_walker_rt(w) \
182 for (rt = (w)->leaf; rt; \ 187 for (rt = (w)->leaf; rt; \
183 rt = rcu_dereference_protected(rt->dst.rt6_next, 1)) 188 rt = rcu_dereference_protected(rt->rt6_next, 1))
184 189
185static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) 190static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
186{ 191{
@@ -203,11 +208,9 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
203{ 208{
204 struct rt6_info *rt; 209 struct rt6_info *rt;
205 210
206 for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); 211 for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); rt = rt->from);
207 rt = (struct rt6_info *)rt->dst.from);
208 if (rt && rt != rt0) 212 if (rt && rt != rt0)
209 rt0->dst.expires = rt->dst.expires; 213 rt0->dst.expires = rt->dst.expires;
210
211 dst_set_expires(&rt0->dst, timeout); 214 dst_set_expires(&rt0->dst, timeout);
212 rt0->rt6i_flags |= RTF_EXPIRES; 215 rt0->rt6i_flags |= RTF_EXPIRES;
213} 216}
@@ -242,8 +245,8 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
242 u32 cookie = 0; 245 u32 cookie = 0;
243 246
244 if (rt->rt6i_flags & RTF_PCPU || 247 if (rt->rt6i_flags & RTF_PCPU ||
245 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from)) 248 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
246 rt = (struct rt6_info *)(rt->dst.from); 249 rt = rt->from;
247 250
248 rt6_get_cookie_safe(rt, &cookie); 251 rt6_get_cookie_safe(rt, &cookie);
249 252
@@ -404,6 +407,7 @@ unsigned int fib6_tables_seq_read(struct net *net);
404int fib6_tables_dump(struct net *net, struct notifier_block *nb); 407int fib6_tables_dump(struct net *net, struct notifier_block *nb);
405 408
406void fib6_update_sernum(struct rt6_info *rt); 409void fib6_update_sernum(struct rt6_info *rt);
410void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt);
407 411
408#ifdef CONFIG_IPV6_MULTIPLE_TABLES 412#ifdef CONFIG_IPV6_MULTIPLE_TABLES
409int fib6_rules_init(void); 413int fib6_rules_init(void);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 18e442ea93d8..27d23a65f3cd 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
66 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 66 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
67} 67}
68 68
69static inline bool rt6_qualify_for_ecmp(const struct rt6_info *rt)
70{
71 return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
72 RTF_GATEWAY;
73}
74
69void ip6_route_input(struct sk_buff *skb); 75void ip6_route_input(struct sk_buff *skb);
70struct dst_entry *ip6_route_input_lookup(struct net *net, 76struct dst_entry *ip6_route_input_lookup(struct net *net,
71 struct net_device *dev, 77 struct net_device *dev,
@@ -165,10 +171,13 @@ struct rt6_rtnl_dump_arg {
165}; 171};
166 172
167int rt6_dump_route(struct rt6_info *rt, void *p_arg); 173int rt6_dump_route(struct rt6_info *rt, void *p_arg);
168void rt6_ifdown(struct net *net, struct net_device *dev);
169void rt6_mtu_change(struct net_device *dev, unsigned int mtu); 174void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
170void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); 175void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
171void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); 176void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
177void rt6_sync_up(struct net_device *dev, unsigned int nh_flags);
178void rt6_disable_ip(struct net_device *dev, unsigned long event);
179void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
180void rt6_multipath_rebalance(struct rt6_info *rt);
172 181
173static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) 182static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
174{ 183{
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index d66f70f63734..236e40ba06bf 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,10 @@ struct __ip6_tnl_parm {
36 __be32 o_key; 36 __be32 o_key;
37 37
38 __u32 fwmark; 38 __u32 fwmark;
39 __u32 index; /* ERSPAN type II index */
40 __u8 erspan_ver; /* ERSPAN version */
41 __u8 dir; /* direction */
42 __u16 hwid; /* hwid */
39}; 43};
40 44
41/* IPv6 tunnel */ 45/* IPv6 tunnel */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 24628f6b09bf..1f16773cfd76 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -116,8 +116,11 @@ struct ip_tunnel {
116 u32 o_seqno; /* The last output seqno */ 116 u32 o_seqno; /* The last output seqno */
117 int tun_hlen; /* Precalculated header length */ 117 int tun_hlen; /* Precalculated header length */
118 118
119 /* This field used only by ERSPAN */ 119 /* These four fields used only by ERSPAN */
120 u32 index; /* ERSPAN type II index */ 120 u32 index; /* ERSPAN type II index */
121 u8 erspan_ver; /* ERSPAN version */
122 u8 dir; /* ERSPAN direction */
123 u16 hwid; /* ERSPAN hardware ID */
121 124
122 struct dst_cache dst_cache; 125 struct dst_cache dst_cache;
123 126
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index ff68cf288f9b..eb0bec043c96 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -69,8 +69,7 @@ struct ip_vs_iphdr {
69}; 69};
70 70
71static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, 71static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
72 int len, void *buffer, 72 int len, void *buffer)
73 const struct ip_vs_iphdr *ipvsh)
74{ 73{
75 return skb_header_pointer(skb, offset, len, buffer); 74 return skb_header_pointer(skb, offset, len, buffer);
76} 75}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 221238254eb7..8606c9113d3f 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -22,6 +22,7 @@
22#include <net/flow.h> 22#include <net/flow.h>
23#include <net/flow_dissector.h> 23#include <net/flow_dissector.h>
24#include <net/snmp.h> 24#include <net/snmp.h>
25#include <net/netns/hash.h>
25 26
26#define SIN6_LEN_RFC2133 24 27#define SIN6_LEN_RFC2133 24
27 28
@@ -674,6 +675,22 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
674 cpu_to_be32(0x0000ffff))) == 0UL; 675 cpu_to_be32(0x0000ffff))) == 0UL;
675} 676}
676 677
678static inline u32 ipv6_portaddr_hash(const struct net *net,
679 const struct in6_addr *addr6,
680 unsigned int port)
681{
682 unsigned int hash, mix = net_hash_mix(net);
683
684 if (ipv6_addr_any(addr6))
685 hash = jhash_1word(0, mix);
686 else if (ipv6_addr_v4mapped(addr6))
687 hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
688 else
689 hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
690
691 return hash ^ port;
692}
693
677/* 694/*
678 * Check for a RFC 4843 ORCHID address 695 * Check for a RFC 4843 ORCHID address
679 * (Overlay Routable Cryptographic Hash Identifiers) 696 * (Overlay Routable Cryptographic Hash Identifiers)
@@ -953,6 +970,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
953 &inet6_sk(sk)->cork); 970 &inet6_sk(sk)->cork);
954} 971}
955 972
973unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst);
974
956int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, 975int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
957 struct flowi6 *fl6); 976 struct flowi6 *fl6);
958struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, 977struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 070e93a17c59..f4c21b5a1242 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -153,7 +153,7 @@ struct iucv_sock_list {
153 atomic_t autobind_name; 153 atomic_t autobind_name;
154}; 154};
155 155
156unsigned int iucv_sock_poll(struct file *file, struct socket *sock, 156__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
157 poll_table *wait); 157 poll_table *wait);
158void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); 158void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
159void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); 159void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index eec143cca1c0..906e90223066 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1552,6 +1552,9 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
1552 * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the 1552 * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
1553 * driver for a key to indicate that sufficient tailroom must always 1553 * driver for a key to indicate that sufficient tailroom must always
1554 * be reserved for ICV or MIC, even when HW encryption is enabled. 1554 * be reserved for ICV or MIC, even when HW encryption is enabled.
1555 * @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for
1556 * a TKIP key if it only requires MIC space. Do not set together with
1557 * @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key.
1555 */ 1558 */
1556enum ieee80211_key_flags { 1559enum ieee80211_key_flags {
1557 IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0), 1560 IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
@@ -1562,6 +1565,7 @@ enum ieee80211_key_flags {
1562 IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5), 1565 IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5),
1563 IEEE80211_KEY_FLAG_RX_MGMT = BIT(6), 1566 IEEE80211_KEY_FLAG_RX_MGMT = BIT(6),
1564 IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7), 1567 IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7),
1568 IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8),
1565}; 1569};
1566 1570
1567/** 1571/**
@@ -1593,8 +1597,8 @@ struct ieee80211_key_conf {
1593 u8 icv_len; 1597 u8 icv_len;
1594 u8 iv_len; 1598 u8 iv_len;
1595 u8 hw_key_idx; 1599 u8 hw_key_idx;
1596 u8 flags;
1597 s8 keyidx; 1600 s8 keyidx;
1601 u16 flags;
1598 u8 keylen; 1602 u8 keylen;
1599 u8 key[0]; 1603 u8 key[0];
1600}; 1604};
@@ -2056,6 +2060,9 @@ struct ieee80211_txq {
2056 * The stack will not do fragmentation. 2060 * The stack will not do fragmentation.
2057 * The callback for @set_frag_threshold should be set as well. 2061 * The callback for @set_frag_threshold should be set as well.
2058 * 2062 *
2063 * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
2064 * TDLS links.
2065 *
2059 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays 2066 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
2060 */ 2067 */
2061enum ieee80211_hw_flags { 2068enum ieee80211_hw_flags {
@@ -2098,6 +2105,7 @@ enum ieee80211_hw_flags {
2098 IEEE80211_HW_TX_FRAG_LIST, 2105 IEEE80211_HW_TX_FRAG_LIST,
2099 IEEE80211_HW_REPORTS_LOW_ACK, 2106 IEEE80211_HW_REPORTS_LOW_ACK,
2100 IEEE80211_HW_SUPPORTS_TX_FRAG, 2107 IEEE80211_HW_SUPPORTS_TX_FRAG,
2108 IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
2101 2109
2102 /* keep last, obviously */ 2110 /* keep last, obviously */
2103 NUM_IEEE80211_HW_FLAGS 2111 NUM_IEEE80211_HW_FLAGS
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 049008493faf..f306b2aa15a4 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -51,7 +51,7 @@ struct net {
51 refcount_t passive; /* To decided when the network 51 refcount_t passive; /* To decided when the network
52 * namespace should be freed. 52 * namespace should be freed.
53 */ 53 */
54 atomic_t count; /* To decided when the network 54 refcount_t count; /* To decided when the network
55 * namespace should be shut down. 55 * namespace should be shut down.
56 */ 56 */
57 spinlock_t rules_mod_lock; 57 spinlock_t rules_mod_lock;
@@ -195,7 +195,7 @@ void __put_net(struct net *net);
195 195
196static inline struct net *get_net(struct net *net) 196static inline struct net *get_net(struct net *net)
197{ 197{
198 atomic_inc(&net->count); 198 refcount_inc(&net->count);
199 return net; 199 return net;
200} 200}
201 201
@@ -206,14 +206,14 @@ static inline struct net *maybe_get_net(struct net *net)
206 * exists. If the reference count is zero this 206 * exists. If the reference count is zero this
207 * function fails and returns NULL. 207 * function fails and returns NULL.
208 */ 208 */
209 if (!atomic_inc_not_zero(&net->count)) 209 if (!refcount_inc_not_zero(&net->count))
210 net = NULL; 210 net = NULL;
211 return net; 211 return net;
212} 212}
213 213
214static inline void put_net(struct net *net) 214static inline void put_net(struct net *net)
215{ 215{
216 if (atomic_dec_and_test(&net->count)) 216 if (refcount_dec_and_test(&net->count))
217 __put_net(net); 217 __put_net(net);
218} 218}
219 219
@@ -225,7 +225,7 @@ int net_eq(const struct net *net1, const struct net *net2)
225 225
226static inline int check_net(const struct net *net) 226static inline int check_net(const struct net *net)
227{ 227{
228 return atomic_read(&net->count) != 0; 228 return refcount_read(&net->count) != 0;
229} 229}
230 230
231void net_drop_ns(void *); 231void net_drop_ns(void *);
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 4ed1040bbe4a..73f825732326 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -13,17 +13,17 @@
13 13
14const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; 14const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
15 15
16extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; 16extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
17extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; 17extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
18extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; 18extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
19#ifdef CONFIG_NF_CT_PROTO_DCCP 19#ifdef CONFIG_NF_CT_PROTO_DCCP
20extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4; 20extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
21#endif 21#endif
22#ifdef CONFIG_NF_CT_PROTO_SCTP 22#ifdef CONFIG_NF_CT_PROTO_SCTP
23extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4; 23extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
24#endif 24#endif
25#ifdef CONFIG_NF_CT_PROTO_UDPLITE 25#ifdef CONFIG_NF_CT_PROTO_UDPLITE
26extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; 26extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
27#endif 27#endif
28 28
29int nf_conntrack_ipv4_compat_init(void); 29int nf_conntrack_ipv4_compat_init(void);
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 9cd55be95853..effa8dfba68c 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -4,17 +4,17 @@
4 4
5extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; 5extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
6 6
7extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; 7extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
8extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; 8extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
9extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; 9extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
10#ifdef CONFIG_NF_CT_PROTO_DCCP 10#ifdef CONFIG_NF_CT_PROTO_DCCP
11extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6; 11extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
12#endif 12#endif
13#ifdef CONFIG_NF_CT_PROTO_SCTP 13#ifdef CONFIG_NF_CT_PROTO_SCTP
14extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6; 14extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
15#endif 15#endif
16#ifdef CONFIG_NF_CT_PROTO_UDPLITE 16#ifdef CONFIG_NF_CT_PROTO_UDPLITE
17extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; 17extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
18#endif 18#endif
19 19
20#include <linux/sysctl.h> 20#include <linux/sysctl.h>
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
new file mode 100644
index 000000000000..adf8db44cf86
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -0,0 +1,17 @@
1#ifndef _NF_CONNTRACK_COUNT_H
2#define _NF_CONNTRACK_COUNT_H
3
4struct nf_conncount_data;
5
6struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
7 unsigned int keylen);
8void nf_conncount_destroy(struct net *net, unsigned int family,
9 struct nf_conncount_data *data);
10
11unsigned int nf_conncount_count(struct net *net,
12 struct nf_conncount_data *data,
13 const u32 *key,
14 unsigned int family,
15 const struct nf_conntrack_tuple *tuple,
16 const struct nf_conntrack_zone *zone);
17#endif
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 7ef56c13698a..a7220eef9aee 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -27,6 +27,9 @@ struct nf_conntrack_l4proto {
27 /* Resolve clashes on insertion races. */ 27 /* Resolve clashes on insertion races. */
28 bool allow_clash; 28 bool allow_clash;
29 29
30 /* protoinfo nlattr size, closes a hole */
31 u16 nlattr_size;
32
30 /* Try to fill in the third arg: dataoff is offset past network protocol 33 /* Try to fill in the third arg: dataoff is offset past network protocol
31 hdr. Return true if possible. */ 34 hdr. Return true if possible. */
32 bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff, 35 bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
@@ -66,8 +69,6 @@ struct nf_conntrack_l4proto {
66 /* convert protoinfo to nfnetink attributes */ 69 /* convert protoinfo to nfnetink attributes */
67 int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla, 70 int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
68 struct nf_conn *ct); 71 struct nf_conn *ct);
69 /* Calculate protoinfo nlattr size */
70 int (*nlattr_size)(void);
71 72
72 /* convert nfnetlink attributes to protoinfo */ 73 /* convert nfnetlink attributes to protoinfo */
73 int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); 74 int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
@@ -80,8 +81,6 @@ struct nf_conntrack_l4proto {
80 struct nf_conntrack_tuple *t); 81 struct nf_conntrack_tuple *t);
81 const struct nla_policy *nla_policy; 82 const struct nla_policy *nla_policy;
82 83
83 size_t nla_size;
84
85#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 84#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
86 struct { 85 struct {
87 int (*nlattr_to_obj)(struct nlattr *tb[], 86 int (*nlattr_to_obj)(struct nlattr *tb[],
@@ -109,7 +108,7 @@ struct nf_conntrack_l4proto {
109}; 108};
110 109
111/* Existing built-in generic protocol */ 110/* Existing built-in generic protocol */
112extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; 111extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
113 112
114#define MAX_NF_CT_PROTO 256 113#define MAX_NF_CT_PROTO 256
115 114
@@ -126,18 +125,18 @@ int nf_ct_l4proto_pernet_register_one(struct net *net,
126void nf_ct_l4proto_pernet_unregister_one(struct net *net, 125void nf_ct_l4proto_pernet_unregister_one(struct net *net,
127 const struct nf_conntrack_l4proto *proto); 126 const struct nf_conntrack_l4proto *proto);
128int nf_ct_l4proto_pernet_register(struct net *net, 127int nf_ct_l4proto_pernet_register(struct net *net,
129 struct nf_conntrack_l4proto *const proto[], 128 const struct nf_conntrack_l4proto *const proto[],
130 unsigned int num_proto); 129 unsigned int num_proto);
131void nf_ct_l4proto_pernet_unregister(struct net *net, 130void nf_ct_l4proto_pernet_unregister(struct net *net,
132 struct nf_conntrack_l4proto *const proto[], 131 const struct nf_conntrack_l4proto *const proto[],
133 unsigned int num_proto); 132 unsigned int num_proto);
134 133
135/* Protocol global registration. */ 134/* Protocol global registration. */
136int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *proto); 135int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
137void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto); 136void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
138int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto[], 137int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
139 unsigned int num_proto); 138 unsigned int num_proto);
140void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto[], 139void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
141 unsigned int num_proto); 140 unsigned int num_proto);
142 141
143/* Generic netlink helpers */ 142/* Generic netlink helpers */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
new file mode 100644
index 000000000000..b22b22082733
--- /dev/null
+++ b/include/net/netfilter/nf_flow_table.h
@@ -0,0 +1,122 @@
1#ifndef _NF_FLOW_TABLE_H
2#define _NF_FLOW_TABLE_H
3
4#include <linux/in.h>
5#include <linux/in6.h>
6#include <linux/netdevice.h>
7#include <linux/rhashtable.h>
8#include <linux/rcupdate.h>
9#include <net/dst.h>
10
11struct nf_flowtable;
12
13struct nf_flowtable_type {
14 struct list_head list;
15 int family;
16 void (*gc)(struct work_struct *work);
17 const struct rhashtable_params *params;
18 nf_hookfn *hook;
19 struct module *owner;
20};
21
22struct nf_flowtable {
23 struct rhashtable rhashtable;
24 const struct nf_flowtable_type *type;
25 struct delayed_work gc_work;
26};
27
28enum flow_offload_tuple_dir {
29 FLOW_OFFLOAD_DIR_ORIGINAL,
30 FLOW_OFFLOAD_DIR_REPLY,
31 __FLOW_OFFLOAD_DIR_MAX = FLOW_OFFLOAD_DIR_REPLY,
32};
33#define FLOW_OFFLOAD_DIR_MAX (__FLOW_OFFLOAD_DIR_MAX + 1)
34
35struct flow_offload_tuple {
36 union {
37 struct in_addr src_v4;
38 struct in6_addr src_v6;
39 };
40 union {
41 struct in_addr dst_v4;
42 struct in6_addr dst_v6;
43 };
44 struct {
45 __be16 src_port;
46 __be16 dst_port;
47 };
48
49 int iifidx;
50
51 u8 l3proto;
52 u8 l4proto;
53 u8 dir;
54
55 int oifidx;
56
57 struct dst_entry *dst_cache;
58};
59
60struct flow_offload_tuple_rhash {
61 struct rhash_head node;
62 struct flow_offload_tuple tuple;
63};
64
65#define FLOW_OFFLOAD_SNAT 0x1
66#define FLOW_OFFLOAD_DNAT 0x2
67#define FLOW_OFFLOAD_DYING 0x4
68
69struct flow_offload {
70 struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
71 u32 flags;
72 union {
73 /* Your private driver data here. */
74 u32 timeout;
75 };
76};
77
78#define NF_FLOW_TIMEOUT (30 * HZ)
79
80struct nf_flow_route {
81 struct {
82 struct dst_entry *dst;
83 int ifindex;
84 } tuple[FLOW_OFFLOAD_DIR_MAX];
85};
86
87struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
88 struct nf_flow_route *route);
89void flow_offload_free(struct flow_offload *flow);
90
91int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
92void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow);
93struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
94 struct flow_offload_tuple *tuple);
95int nf_flow_table_iterate(struct nf_flowtable *flow_table,
96 void (*iter)(struct flow_offload *flow, void *data),
97 void *data);
98void nf_flow_offload_work_gc(struct work_struct *work);
99extern const struct rhashtable_params nf_flow_offload_rhash_params;
100
101void flow_offload_dead(struct flow_offload *flow);
102
103int nf_flow_snat_port(const struct flow_offload *flow,
104 struct sk_buff *skb, unsigned int thoff,
105 u8 protocol, enum flow_offload_tuple_dir dir);
106int nf_flow_dnat_port(const struct flow_offload *flow,
107 struct sk_buff *skb, unsigned int thoff,
108 u8 protocol, enum flow_offload_tuple_dir dir);
109
110struct flow_ports {
111 __be16 source, dest;
112};
113
114unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
115 const struct nf_hook_state *state);
116unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
117 const struct nf_hook_state *state);
118
119#define MODULE_ALIAS_NF_FLOWTABLE(family) \
120 MODULE_ALIAS("nf-flowtable-" __stringify(family))
121
122#endif /* _FLOW_OFFLOAD_H */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 814058d0f167..a50a69f5334c 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -25,7 +25,7 @@ struct nf_queue_entry {
25struct nf_queue_handler { 25struct nf_queue_handler {
26 int (*outfn)(struct nf_queue_entry *entry, 26 int (*outfn)(struct nf_queue_entry *entry,
27 unsigned int queuenum); 27 unsigned int queuenum);
28 unsigned int (*nf_hook_drop)(struct net *net); 28 void (*nf_hook_drop)(struct net *net);
29}; 29};
30 30
31void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); 31void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index fecc6112c768..663b015dace5 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -9,6 +9,7 @@
9#include <linux/netfilter/x_tables.h> 9#include <linux/netfilter/x_tables.h>
10#include <linux/netfilter/nf_tables.h> 10#include <linux/netfilter/nf_tables.h>
11#include <linux/u64_stats_sync.h> 11#include <linux/u64_stats_sync.h>
12#include <net/netfilter/nf_flow_table.h>
12#include <net/netlink.h> 13#include <net/netlink.h>
13 14
14#define NFT_JUMP_STACK_SIZE 16 15#define NFT_JUMP_STACK_SIZE 16
@@ -54,8 +55,8 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
54 pkt->xt.state = state; 55 pkt->xt.state = state;
55} 56}
56 57
57static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt, 58static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
58 struct sk_buff *skb) 59 struct sk_buff *skb)
59{ 60{
60 pkt->tprot_set = false; 61 pkt->tprot_set = false;
61 pkt->tprot = 0; 62 pkt->tprot = 0;
@@ -63,14 +64,6 @@ static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
63 pkt->xt.fragoff = 0; 64 pkt->xt.fragoff = 0;
64} 65}
65 66
66static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
67 struct sk_buff *skb,
68 const struct nf_hook_state *state)
69{
70 nft_set_pktinfo(pkt, skb, state);
71 nft_set_pktinfo_proto_unspec(pkt, skb);
72}
73
74/** 67/**
75 * struct nft_verdict - nf_tables verdict 68 * struct nft_verdict - nf_tables verdict
76 * 69 *
@@ -150,22 +143,22 @@ static inline void nft_data_debug(const struct nft_data *data)
150 * struct nft_ctx - nf_tables rule/set context 143 * struct nft_ctx - nf_tables rule/set context
151 * 144 *
152 * @net: net namespace 145 * @net: net namespace
153 * @afi: address family info
154 * @table: the table the chain is contained in 146 * @table: the table the chain is contained in
155 * @chain: the chain the rule is contained in 147 * @chain: the chain the rule is contained in
156 * @nla: netlink attributes 148 * @nla: netlink attributes
157 * @portid: netlink portID of the original message 149 * @portid: netlink portID of the original message
158 * @seq: netlink sequence number 150 * @seq: netlink sequence number
151 * @family: protocol family
159 * @report: notify via unicast netlink message 152 * @report: notify via unicast netlink message
160 */ 153 */
161struct nft_ctx { 154struct nft_ctx {
162 struct net *net; 155 struct net *net;
163 struct nft_af_info *afi;
164 struct nft_table *table; 156 struct nft_table *table;
165 struct nft_chain *chain; 157 struct nft_chain *chain;
166 const struct nlattr * const *nla; 158 const struct nlattr * const *nla;
167 u32 portid; 159 u32 portid;
168 u32 seq; 160 u32 seq;
161 u8 family;
169 bool report; 162 bool report;
170}; 163};
171 164
@@ -381,6 +374,7 @@ void nft_unregister_set(struct nft_set_type *type);
381 * @list: table set list node 374 * @list: table set list node
382 * @bindings: list of set bindings 375 * @bindings: list of set bindings
383 * @name: name of the set 376 * @name: name of the set
377 * @handle: unique handle of the set
384 * @ktype: key type (numeric type defined by userspace, not used in the kernel) 378 * @ktype: key type (numeric type defined by userspace, not used in the kernel)
385 * @dtype: data type (verdict or numeric type defined by userspace) 379 * @dtype: data type (verdict or numeric type defined by userspace)
386 * @objtype: object type (see NFT_OBJECT_* definitions) 380 * @objtype: object type (see NFT_OBJECT_* definitions)
@@ -403,6 +397,7 @@ struct nft_set {
403 struct list_head list; 397 struct list_head list;
404 struct list_head bindings; 398 struct list_head bindings;
405 char *name; 399 char *name;
400 u64 handle;
406 u32 ktype; 401 u32 ktype;
407 u32 dtype; 402 u32 dtype;
408 u32 objtype; 403 u32 objtype;
@@ -424,6 +419,11 @@ struct nft_set {
424 __attribute__((aligned(__alignof__(u64)))); 419 __attribute__((aligned(__alignof__(u64))));
425}; 420};
426 421
422static inline bool nft_set_is_anonymous(const struct nft_set *set)
423{
424 return set->flags & NFT_SET_ANONYMOUS;
425}
426
427static inline void *nft_set_priv(const struct nft_set *set) 427static inline void *nft_set_priv(const struct nft_set *set)
428{ 428{
429 return (void *)set->data; 429 return (void *)set->data;
@@ -883,7 +883,7 @@ enum nft_chain_type {
883 * @family: address family 883 * @family: address family
884 * @owner: module owner 884 * @owner: module owner
885 * @hook_mask: mask of valid hooks 885 * @hook_mask: mask of valid hooks
886 * @hooks: hookfn overrides 886 * @hooks: array of hook functions
887 */ 887 */
888struct nf_chain_type { 888struct nf_chain_type {
889 const char *name; 889 const char *name;
@@ -905,8 +905,6 @@ struct nft_stats {
905 struct u64_stats_sync syncp; 905 struct u64_stats_sync syncp;
906}; 906};
907 907
908#define NFT_HOOK_OPS_MAX 2
909
910/** 908/**
911 * struct nft_base_chain - nf_tables base chain 909 * struct nft_base_chain - nf_tables base chain
912 * 910 *
@@ -918,7 +916,7 @@ struct nft_stats {
918 * @dev_name: device name that this base chain is attached to (if any) 916 * @dev_name: device name that this base chain is attached to (if any)
919 */ 917 */
920struct nft_base_chain { 918struct nft_base_chain {
921 struct nf_hook_ops ops[NFT_HOOK_OPS_MAX]; 919 struct nf_hook_ops ops;
922 const struct nf_chain_type *type; 920 const struct nf_chain_type *type;
923 u8 policy; 921 u8 policy;
924 u8 flags; 922 u8 flags;
@@ -948,10 +946,13 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
948 * @chains: chains in the table 946 * @chains: chains in the table
949 * @sets: sets in the table 947 * @sets: sets in the table
950 * @objects: stateful objects in the table 948 * @objects: stateful objects in the table
949 * @flowtables: flow tables in the table
951 * @hgenerator: handle generator state 950 * @hgenerator: handle generator state
951 * @handle: table handle
952 * @use: number of chain references to this table 952 * @use: number of chain references to this table
953 * @flags: table flag (see enum nft_table_flags) 953 * @flags: table flag (see enum nft_table_flags)
954 * @genmask: generation mask 954 * @genmask: generation mask
955 * @afinfo: address family info
955 * @name: name of the table 956 * @name: name of the table
956 */ 957 */
957struct nft_table { 958struct nft_table {
@@ -959,46 +960,16 @@ struct nft_table {
959 struct list_head chains; 960 struct list_head chains;
960 struct list_head sets; 961 struct list_head sets;
961 struct list_head objects; 962 struct list_head objects;
963 struct list_head flowtables;
962 u64 hgenerator; 964 u64 hgenerator;
965 u64 handle;
963 u32 use; 966 u32 use;
964 u16 flags:14, 967 u16 family:6,
968 flags:8,
965 genmask:2; 969 genmask:2;
966 char *name; 970 char *name;
967}; 971};
968 972
969enum nft_af_flags {
970 NFT_AF_NEEDS_DEV = (1 << 0),
971};
972
973/**
974 * struct nft_af_info - nf_tables address family info
975 *
976 * @list: used internally
977 * @family: address family
978 * @nhooks: number of hooks in this family
979 * @owner: module owner
980 * @tables: used internally
981 * @flags: family flags
982 * @nops: number of hook ops in this family
983 * @hook_ops_init: initialization function for chain hook ops
984 * @hooks: hookfn overrides for packet validation
985 */
986struct nft_af_info {
987 struct list_head list;
988 int family;
989 unsigned int nhooks;
990 struct module *owner;
991 struct list_head tables;
992 u32 flags;
993 unsigned int nops;
994 void (*hook_ops_init)(struct nf_hook_ops *,
995 unsigned int);
996 nf_hookfn *hooks[NF_MAX_HOOKS];
997};
998
999int nft_register_afinfo(struct net *, struct nft_af_info *);
1000void nft_unregister_afinfo(struct net *, struct nft_af_info *);
1001
1002int nft_register_chain_type(const struct nf_chain_type *); 973int nft_register_chain_type(const struct nf_chain_type *);
1003void nft_unregister_chain_type(const struct nf_chain_type *); 974void nft_unregister_chain_type(const struct nf_chain_type *);
1004 975
@@ -1016,9 +987,9 @@ int nft_verdict_dump(struct sk_buff *skb, int type,
1016 * @name: name of this stateful object 987 * @name: name of this stateful object
1017 * @genmask: generation mask 988 * @genmask: generation mask
1018 * @use: number of references to this stateful object 989 * @use: number of references to this stateful object
1019 * @data: object data, layout depends on type 990 * @handle: unique object handle
1020 * @ops: object operations 991 * @ops: object operations
1021 * @data: pointer to object data 992 * @data: object data, layout depends on type
1022 */ 993 */
1023struct nft_object { 994struct nft_object {
1024 struct list_head list; 995 struct list_head list;
@@ -1026,6 +997,7 @@ struct nft_object {
1026 struct nft_table *table; 997 struct nft_table *table;
1027 u32 genmask:2, 998 u32 genmask:2,
1028 use:30; 999 use:30;
1000 u64 handle;
1029 /* runtime data below here */ 1001 /* runtime data below here */
1030 const struct nft_object_ops *ops ____cacheline_aligned; 1002 const struct nft_object_ops *ops ____cacheline_aligned;
1031 unsigned char data[] 1003 unsigned char data[]
@@ -1097,6 +1069,46 @@ int nft_register_obj(struct nft_object_type *obj_type);
1097void nft_unregister_obj(struct nft_object_type *obj_type); 1069void nft_unregister_obj(struct nft_object_type *obj_type);
1098 1070
1099/** 1071/**
1072 * struct nft_flowtable - nf_tables flow table
1073 *
1074 * @list: flow table list node in table list
1075 * @table: the table the flow table is contained in
1076 * @name: name of this flow table
1077 * @hooknum: hook number
1078 * @priority: hook priority
1079 * @ops_len: number of hooks in array
1080 * @genmask: generation mask
1081 * @use: number of references to this flow table
1082 * @handle: unique object handle
1083 * @data: rhashtable and garbage collector
1084 * @ops: array of hooks
1085 */
1086struct nft_flowtable {
1087 struct list_head list;
1088 struct nft_table *table;
1089 char *name;
1090 int hooknum;
1091 int priority;
1092 int ops_len;
1093 u32 genmask:2,
1094 use:30;
1095 u64 handle;
1096 /* runtime data below here */
1097 struct nf_hook_ops *ops ____cacheline_aligned;
1098 struct nf_flowtable data;
1099};
1100
1101struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
1102 const struct nlattr *nla,
1103 u8 genmask);
1104void nft_flow_table_iterate(struct net *net,
1105 void (*iter)(struct nf_flowtable *flowtable, void *data),
1106 void *data);
1107
1108void nft_register_flowtable_type(struct nf_flowtable_type *type);
1109void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
1110
1111/**
1100 * struct nft_traceinfo - nft tracing information and state 1112 * struct nft_traceinfo - nft tracing information and state
1101 * 1113 *
1102 * @pkt: pktinfo currently processed 1114 * @pkt: pktinfo currently processed
@@ -1125,12 +1137,6 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
1125 1137
1126void nft_trace_notify(struct nft_traceinfo *info); 1138void nft_trace_notify(struct nft_traceinfo *info);
1127 1139
1128#define nft_dereference(p) \
1129 nfnl_dereference(p, NFNL_SUBSYS_NFTABLES)
1130
1131#define MODULE_ALIAS_NFT_FAMILY(family) \
1132 MODULE_ALIAS("nft-afinfo-" __stringify(family))
1133
1134#define MODULE_ALIAS_NFT_CHAIN(family, name) \ 1140#define MODULE_ALIAS_NFT_CHAIN(family, name) \
1135 MODULE_ALIAS("nft-chain-" __stringify(family) "-" name) 1141 MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
1136 1142
@@ -1332,4 +1338,11 @@ struct nft_trans_obj {
1332#define nft_trans_obj(trans) \ 1338#define nft_trans_obj(trans) \
1333 (((struct nft_trans_obj *)trans->data)->obj) 1339 (((struct nft_trans_obj *)trans->data)->obj)
1334 1340
1341struct nft_trans_flowtable {
1342 struct nft_flowtable *flowtable;
1343};
1344
1345#define nft_trans_flowtable(trans) \
1346 (((struct nft_trans_flowtable *)trans->data)->flowtable)
1347
1335#endif /* _NET_NF_TABLES_H */ 1348#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index f0896ba456c4..ed7b511f0a59 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -5,15 +5,11 @@
5#include <net/netfilter/nf_tables.h> 5#include <net/netfilter/nf_tables.h>
6#include <net/ip.h> 6#include <net/ip.h>
7 7
8static inline void 8static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
9nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, 9 struct sk_buff *skb)
10 struct sk_buff *skb,
11 const struct nf_hook_state *state)
12{ 10{
13 struct iphdr *ip; 11 struct iphdr *ip;
14 12
15 nft_set_pktinfo(pkt, skb, state);
16
17 ip = ip_hdr(pkt->skb); 13 ip = ip_hdr(pkt->skb);
18 pkt->tprot_set = true; 14 pkt->tprot_set = true;
19 pkt->tprot = ip->protocol; 15 pkt->tprot = ip->protocol;
@@ -21,10 +17,8 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
21 pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET; 17 pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
22} 18}
23 19
24static inline int 20static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
25__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, 21 struct sk_buff *skb)
26 struct sk_buff *skb,
27 const struct nf_hook_state *state)
28{ 22{
29 struct iphdr *iph, _iph; 23 struct iphdr *iph, _iph;
30 u32 len, thoff; 24 u32 len, thoff;
@@ -52,16 +46,11 @@ __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
52 return 0; 46 return 0;
53} 47}
54 48
55static inline void 49static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
56nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, 50 struct sk_buff *skb)
57 struct sk_buff *skb,
58 const struct nf_hook_state *state)
59{ 51{
60 nft_set_pktinfo(pkt, skb, state); 52 if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0)
61 if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0) 53 nft_set_pktinfo_unspec(pkt, skb);
62 nft_set_pktinfo_proto_unspec(pkt, skb);
63} 54}
64 55
65extern struct nft_af_info nft_af_ipv4;
66
67#endif 56#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index b8065b72f56e..dabe6fdb553a 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -5,20 +5,16 @@
5#include <linux/netfilter_ipv6/ip6_tables.h> 5#include <linux/netfilter_ipv6/ip6_tables.h>
6#include <net/ipv6.h> 6#include <net/ipv6.h>
7 7
8static inline void 8static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
9nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, 9 struct sk_buff *skb)
10 struct sk_buff *skb,
11 const struct nf_hook_state *state)
12{ 10{
13 unsigned int flags = IP6_FH_F_AUTH; 11 unsigned int flags = IP6_FH_F_AUTH;
14 int protohdr, thoff = 0; 12 int protohdr, thoff = 0;
15 unsigned short frag_off; 13 unsigned short frag_off;
16 14
17 nft_set_pktinfo(pkt, skb, state);
18
19 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); 15 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
20 if (protohdr < 0) { 16 if (protohdr < 0) {
21 nft_set_pktinfo_proto_unspec(pkt, skb); 17 nft_set_pktinfo_unspec(pkt, skb);
22 return; 18 return;
23 } 19 }
24 20
@@ -28,10 +24,8 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
28 pkt->xt.fragoff = frag_off; 24 pkt->xt.fragoff = frag_off;
29} 25}
30 26
31static inline int 27static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
32__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, 28 struct sk_buff *skb)
33 struct sk_buff *skb,
34 const struct nf_hook_state *state)
35{ 29{
36#if IS_ENABLED(CONFIG_IPV6) 30#if IS_ENABLED(CONFIG_IPV6)
37 unsigned int flags = IP6_FH_F_AUTH; 31 unsigned int flags = IP6_FH_F_AUTH;
@@ -68,16 +62,11 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
68#endif 62#endif
69} 63}
70 64
71static inline void 65static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
72nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, 66 struct sk_buff *skb)
73 struct sk_buff *skb,
74 const struct nf_hook_state *state)
75{ 67{
76 nft_set_pktinfo(pkt, skb, state); 68 if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0)
77 if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0) 69 nft_set_pktinfo_unspec(pkt, skb);
78 nft_set_pktinfo_proto_unspec(pkt, skb);
79} 70}
80 71
81extern struct nft_af_info nft_af_ipv6;
82
83#endif 72#endif
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
index ecf238b8862c..ca9bd9fba5b5 100644
--- a/include/net/netns/can.h
+++ b/include/net/netns/can.h
@@ -8,7 +8,7 @@
8 8
9#include <linux/spinlock.h> 9#include <linux/spinlock.h>
10 10
11struct dev_rcv_lists; 11struct can_dev_rcv_lists;
12struct s_stats; 12struct s_stats;
13struct s_pstats; 13struct s_pstats;
14 14
@@ -28,7 +28,7 @@ struct netns_can {
28#endif 28#endif
29 29
30 /* receive filters subscribed for 'all' CAN devices */ 30 /* receive filters subscribed for 'all' CAN devices */
31 struct dev_rcv_lists *can_rx_alldev_list; 31 struct can_dev_rcv_lists *can_rx_alldev_list;
32 spinlock_t can_rcvlists_lock; 32 spinlock_t can_rcvlists_lock;
33 struct timer_list can_stattimer;/* timer for statistics update */ 33 struct timer_list can_stattimer;/* timer for statistics update */
34 struct s_stats *can_stats; /* packet statistics */ 34 struct s_stats *can_stats; /* packet statistics */
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 0ad4d0c71228..36c2d998a43c 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -11,7 +11,10 @@ struct netns_core {
11 11
12 int sysctl_somaxconn; 12 int sysctl_somaxconn;
13 13
14 struct prot_inuse __percpu *inuse; 14#ifdef CONFIG_PROC_FS
15 int __percpu *sock_inuse;
16 struct prot_inuse __percpu *prot_inuse;
17#endif
15}; 18};
16 19
17#endif 20#endif
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index cc00af2ac2d7..ca043342c0eb 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -17,7 +17,17 @@ struct netns_nf {
17#ifdef CONFIG_SYSCTL 17#ifdef CONFIG_SYSCTL
18 struct ctl_table_header *nf_log_dir_header; 18 struct ctl_table_header *nf_log_dir_header;
19#endif 19#endif
20 struct nf_hook_entries __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 20 struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
21 struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
22#ifdef CONFIG_NETFILTER_FAMILY_ARP
23 struct nf_hook_entries __rcu *hooks_arp[NF_ARP_NUMHOOKS];
24#endif
25#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
26 struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
27#endif
28#if IS_ENABLED(CONFIG_DECNET)
29 struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
30#endif
21#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) 31#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
22 bool defrag_ipv4; 32 bool defrag_ipv4;
23#endif 33#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 4109b5f3010f..48134353411d 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -7,14 +7,8 @@
7struct nft_af_info; 7struct nft_af_info;
8 8
9struct netns_nftables { 9struct netns_nftables {
10 struct list_head af_info; 10 struct list_head tables;
11 struct list_head commit_list; 11 struct list_head commit_list;
12 struct nft_af_info *ipv4;
13 struct nft_af_info *ipv6;
14 struct nft_af_info *inet;
15 struct nft_af_info *arp;
16 struct nft_af_info *bridge;
17 struct nft_af_info *netdev;
18 unsigned int base_seq; 12 unsigned int base_seq;
19 u8 gencursor; 13 u8 gencursor;
20}; 14};
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index ebc813277662..0db7fb3e4e15 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -122,9 +122,12 @@ struct netns_sctp {
122 /* Flag to indicate if PR-CONFIG is enabled. */ 122 /* Flag to indicate if PR-CONFIG is enabled. */
123 int reconf_enable; 123 int reconf_enable;
124 124
125 /* Flag to idicate if SCTP-AUTH is enabled */ 125 /* Flag to indicate if SCTP-AUTH is enabled */
126 int auth_enable; 126 int auth_enable;
127 127
128 /* Flag to indicate if stream interleave is enabled */
129 int intl_enable;
130
128 /* 131 /*
129 * Policy to control SCTP IPv4 address scoping 132 * Policy to control SCTP IPv4 address scoping
130 * 0 - Disable IPv4 address scoping 133 * 0 - Disable IPv4 address scoping
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 753ac9361154..87406252f0a3 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -29,6 +29,7 @@ struct tcf_block_ext_info {
29 enum tcf_block_binder_type binder_type; 29 enum tcf_block_binder_type binder_type;
30 tcf_chain_head_change_t *chain_head_change; 30 tcf_chain_head_change_t *chain_head_change;
31 void *chain_head_change_priv; 31 void *chain_head_change_priv;
32 u32 block_index;
32}; 33};
33 34
34struct tcf_block_cb; 35struct tcf_block_cb;
@@ -38,16 +39,25 @@ bool tcf_queue_work(struct work_struct *work);
38struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 39struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
39 bool create); 40 bool create);
40void tcf_chain_put(struct tcf_chain *chain); 41void tcf_chain_put(struct tcf_chain *chain);
42void tcf_block_netif_keep_dst(struct tcf_block *block);
41int tcf_block_get(struct tcf_block **p_block, 43int tcf_block_get(struct tcf_block **p_block,
42 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q); 44 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
45 struct netlink_ext_ack *extack);
43int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 46int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
44 struct tcf_block_ext_info *ei); 47 struct tcf_block_ext_info *ei,
48 struct netlink_ext_ack *extack);
45void tcf_block_put(struct tcf_block *block); 49void tcf_block_put(struct tcf_block *block);
46void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 50void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
47 struct tcf_block_ext_info *ei); 51 struct tcf_block_ext_info *ei);
48 52
53static inline bool tcf_block_shared(struct tcf_block *block)
54{
55 return block->index;
56}
57
49static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 58static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
50{ 59{
60 WARN_ON(tcf_block_shared(block));
51 return block->q; 61 return block->q;
52} 62}
53 63
@@ -77,14 +87,16 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
77#else 87#else
78static inline 88static inline
79int tcf_block_get(struct tcf_block **p_block, 89int tcf_block_get(struct tcf_block **p_block,
80 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q) 90 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
91 struct netlink_ext_ack *extack)
81{ 92{
82 return 0; 93 return 0;
83} 94}
84 95
85static inline 96static inline
86int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 97int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
87 struct tcf_block_ext_info *ei) 98 struct tcf_block_ext_info *ei,
99 struct netlink_ext_ack *extack)
88{ 100{
89 return 0; 101 return 0;
90} 102}
@@ -364,7 +376,8 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
364 376
365int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 377int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
366 struct nlattr **tb, struct nlattr *rate_tlv, 378 struct nlattr **tb, struct nlattr *rate_tlv,
367 struct tcf_exts *exts, bool ovr); 379 struct tcf_exts *exts, bool ovr,
380 struct netlink_ext_ack *extack);
368void tcf_exts_destroy(struct tcf_exts *exts); 381void tcf_exts_destroy(struct tcf_exts *exts);
369void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 382void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
370int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 383int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
@@ -544,13 +557,16 @@ static inline int tcf_valid_offset(const struct sk_buff *skb,
544#include <net/net_namespace.h> 557#include <net/net_namespace.h>
545 558
546static inline int 559static inline int
547tcf_change_indev(struct net *net, struct nlattr *indev_tlv) 560tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
561 struct netlink_ext_ack *extack)
548{ 562{
549 char indev[IFNAMSIZ]; 563 char indev[IFNAMSIZ];
550 struct net_device *dev; 564 struct net_device *dev;
551 565
552 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) 566 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
567 NL_SET_ERR_MSG(extack, "Interface name too long");
553 return -EINVAL; 568 return -EINVAL;
569 }
554 dev = __dev_get_by_name(net, indev); 570 dev = __dev_get_by_name(net, indev);
555 if (!dev) 571 if (!dev)
556 return -ENODEV; 572 return -ENODEV;
@@ -586,17 +602,9 @@ struct tc_cls_common_offload {
586 u32 chain_index; 602 u32 chain_index;
587 __be16 protocol; 603 __be16 protocol;
588 u32 prio; 604 u32 prio;
605 struct netlink_ext_ack *extack;
589}; 606};
590 607
591static inline void
592tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
593 const struct tcf_proto *tp)
594{
595 cls_common->chain_index = tp->chain->index;
596 cls_common->protocol = tp->protocol;
597 cls_common->prio = tp->prio;
598}
599
600struct tc_cls_u32_knode { 608struct tc_cls_u32_knode {
601 struct tcf_exts *exts; 609 struct tcf_exts *exts;
602 struct tc_u32_sel *sel; 610 struct tc_u32_sel *sel;
@@ -637,6 +645,31 @@ static inline bool tc_can_offload(const struct net_device *dev)
637 return dev->features & NETIF_F_HW_TC; 645 return dev->features & NETIF_F_HW_TC;
638} 646}
639 647
648static inline bool tc_can_offload_extack(const struct net_device *dev,
649 struct netlink_ext_ack *extack)
650{
651 bool can = tc_can_offload(dev);
652
653 if (!can)
654 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
655
656 return can;
657}
658
659static inline bool
660tc_cls_can_offload_and_chain0(const struct net_device *dev,
661 struct tc_cls_common_offload *common)
662{
663 if (!tc_can_offload_extack(dev, common->extack))
664 return false;
665 if (common->chain_index) {
666 NL_SET_ERR_MSG(common->extack,
667 "Driver supports only offload of chain 0");
668 return false;
669 }
670 return true;
671}
672
640static inline bool tc_skip_hw(u32 flags) 673static inline bool tc_skip_hw(u32 flags)
641{ 674{
642 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 675 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
@@ -664,6 +697,18 @@ static inline bool tc_in_hw(u32 flags)
664 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 697 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
665} 698}
666 699
700static inline void
701tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
702 const struct tcf_proto *tp, u32 flags,
703 struct netlink_ext_ack *extack)
704{
705 cls_common->chain_index = tp->chain->index;
706 cls_common->protocol = tp->protocol;
707 cls_common->prio = tp->prio;
708 if (tc_skip_sw(flags))
709 cls_common->extack = extack;
710}
711
667enum tc_fl_command { 712enum tc_fl_command {
668 TC_CLSFLOWER_REPLACE, 713 TC_CLSFLOWER_REPLACE,
669 TC_CLSFLOWER_DESTROY, 714 TC_CLSFLOWER_DESTROY,
@@ -706,7 +751,6 @@ struct tc_cls_bpf_offload {
706 struct bpf_prog *oldprog; 751 struct bpf_prog *oldprog;
707 const char *name; 752 const char *name;
708 bool exts_integrated; 753 bool exts_integrated;
709 u32 gen_flags;
710}; 754};
711 755
712struct tc_mqprio_qopt_offload { 756struct tc_mqprio_qopt_offload {
@@ -727,6 +771,11 @@ struct tc_cookie {
727 u32 len; 771 u32 len;
728}; 772};
729 773
774struct tc_qopt_offload_stats {
775 struct gnet_stats_basic_packed *bstats;
776 struct gnet_stats_queue *qstats;
777};
778
730enum tc_red_command { 779enum tc_red_command {
731 TC_RED_REPLACE, 780 TC_RED_REPLACE,
732 TC_RED_DESTROY, 781 TC_RED_DESTROY,
@@ -739,9 +788,6 @@ struct tc_red_qopt_offload_params {
739 u32 max; 788 u32 max;
740 u32 probability; 789 u32 probability;
741 bool is_ecn; 790 bool is_ecn;
742};
743struct tc_red_qopt_offload_stats {
744 struct gnet_stats_basic_packed *bstats;
745 struct gnet_stats_queue *qstats; 791 struct gnet_stats_queue *qstats;
746}; 792};
747 793
@@ -751,9 +797,34 @@ struct tc_red_qopt_offload {
751 u32 parent; 797 u32 parent;
752 union { 798 union {
753 struct tc_red_qopt_offload_params set; 799 struct tc_red_qopt_offload_params set;
754 struct tc_red_qopt_offload_stats stats; 800 struct tc_qopt_offload_stats stats;
755 struct red_stats *xstats; 801 struct red_stats *xstats;
756 }; 802 };
757}; 803};
758 804
805enum tc_prio_command {
806 TC_PRIO_REPLACE,
807 TC_PRIO_DESTROY,
808 TC_PRIO_STATS,
809};
810
811struct tc_prio_qopt_offload_params {
812 int bands;
813 u8 priomap[TC_PRIO_MAX + 1];
814 /* In case that a prio qdisc is offloaded and now is changed to a
815 * non-offloadedable config, it needs to update the backlog & qlen
816 * values to negate the HW backlog & qlen values (and only them).
817 */
818 struct gnet_stats_queue *qstats;
819};
820
821struct tc_prio_qopt_offload {
822 enum tc_prio_command command;
823 u32 handle;
824 u32 parent;
825 union {
826 struct tc_prio_qopt_offload_params replace_params;
827 struct tc_qopt_offload_stats stats;
828 };
829};
759#endif 830#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d1f413f06c72..815b92a23936 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -89,7 +89,8 @@ extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
89 89
90int fifo_set_limit(struct Qdisc *q, unsigned int limit); 90int fifo_set_limit(struct Qdisc *q, unsigned int limit);
91struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, 91struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
92 unsigned int limit); 92 unsigned int limit,
93 struct netlink_ext_ack *extack);
93 94
94int register_qdisc(struct Qdisc_ops *qops); 95int register_qdisc(struct Qdisc_ops *qops);
95int unregister_qdisc(struct Qdisc_ops *qops); 96int unregister_qdisc(struct Qdisc_ops *qops);
@@ -99,22 +100,24 @@ int qdisc_set_default(const char *id);
99void qdisc_hash_add(struct Qdisc *q, bool invisible); 100void qdisc_hash_add(struct Qdisc *q, bool invisible);
100void qdisc_hash_del(struct Qdisc *q); 101void qdisc_hash_del(struct Qdisc *q);
101struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); 102struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
102struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
103struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, 103struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
104 struct nlattr *tab); 104 struct nlattr *tab,
105 struct netlink_ext_ack *extack);
105void qdisc_put_rtab(struct qdisc_rate_table *tab); 106void qdisc_put_rtab(struct qdisc_rate_table *tab);
106void qdisc_put_stab(struct qdisc_size_table *tab); 107void qdisc_put_stab(struct qdisc_size_table *tab);
107void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); 108void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
108int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, 109bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
109 struct net_device *dev, struct netdev_queue *txq, 110 struct net_device *dev, struct netdev_queue *txq,
110 spinlock_t *root_lock, bool validate); 111 spinlock_t *root_lock, bool validate);
111 112
112void __qdisc_run(struct Qdisc *q); 113void __qdisc_run(struct Qdisc *q);
113 114
114static inline void qdisc_run(struct Qdisc *q) 115static inline void qdisc_run(struct Qdisc *q)
115{ 116{
116 if (qdisc_run_begin(q)) 117 if (qdisc_run_begin(q)) {
117 __qdisc_run(q); 118 __qdisc_run(q);
119 qdisc_run_end(q);
120 }
118} 121}
119 122
120static inline __be16 tc_skb_protocol(const struct sk_buff *skb) 123static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
diff --git a/include/net/route.h b/include/net/route.h
index d538e6db1afe..1eb9ce470e25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -217,7 +217,7 @@ unsigned int inet_addr_type_dev_table(struct net *net,
217 const struct net_device *dev, 217 const struct net_device *dev,
218 __be32 addr); 218 __be32 addr);
219void ip_rt_multicast_event(struct in_device *); 219void ip_rt_multicast_event(struct in_device *);
220int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); 220int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt);
221void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); 221void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
222struct rtable *rt_dst_alloc(struct net_device *dev, 222struct rtable *rt_dst_alloc(struct net_device *dev,
223 unsigned int flags, u16 type, 223 unsigned int flags, u16 type,
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index ead018744ff5..14b6b3af8918 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -13,10 +13,10 @@ enum rtnl_link_flags {
13 RTNL_FLAG_DOIT_UNLOCKED = 1, 13 RTNL_FLAG_DOIT_UNLOCKED = 1,
14}; 14};
15 15
16int __rtnl_register(int protocol, int msgtype,
17 rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
18void rtnl_register(int protocol, int msgtype, 16void rtnl_register(int protocol, int msgtype,
19 rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); 17 rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
18int rtnl_register_module(struct module *owner, int protocol, int msgtype,
19 rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
20int rtnl_unregister(int protocol, int msgtype); 20int rtnl_unregister(int protocol, int msgtype);
21void rtnl_unregister_all(int protocol); 21void rtnl_unregister_all(int protocol);
22 22
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index becf86aa4ac6..e2ab13687fb9 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -71,6 +71,7 @@ struct Qdisc {
71 * qdisc_tree_decrease_qlen() should stop. 71 * qdisc_tree_decrease_qlen() should stop.
72 */ 72 */
73#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 73#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
74#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
74#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ 75#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
75 u32 limit; 76 u32 limit;
76 const struct Qdisc_ops *ops; 77 const struct Qdisc_ops *ops;
@@ -88,14 +89,14 @@ struct Qdisc {
88 /* 89 /*
89 * For performance sake on SMP, we put highly modified fields at the end 90 * For performance sake on SMP, we put highly modified fields at the end
90 */ 91 */
91 struct sk_buff *gso_skb ____cacheline_aligned_in_smp; 92 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
92 struct qdisc_skb_head q; 93 struct qdisc_skb_head q;
93 struct gnet_stats_basic_packed bstats; 94 struct gnet_stats_basic_packed bstats;
94 seqcount_t running; 95 seqcount_t running;
95 struct gnet_stats_queue qstats; 96 struct gnet_stats_queue qstats;
96 unsigned long state; 97 unsigned long state;
97 struct Qdisc *next_sched; 98 struct Qdisc *next_sched;
98 struct sk_buff *skb_bad_txq; 99 struct sk_buff_head skb_bad_txq;
99 int padded; 100 int padded;
100 refcount_t refcnt; 101 refcount_t refcnt;
101 102
@@ -150,19 +151,23 @@ struct Qdisc_class_ops {
150 /* Child qdisc manipulation */ 151 /* Child qdisc manipulation */
151 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); 152 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
152 int (*graft)(struct Qdisc *, unsigned long cl, 153 int (*graft)(struct Qdisc *, unsigned long cl,
153 struct Qdisc *, struct Qdisc **); 154 struct Qdisc *, struct Qdisc **,
155 struct netlink_ext_ack *extack);
154 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); 156 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
155 void (*qlen_notify)(struct Qdisc *, unsigned long); 157 void (*qlen_notify)(struct Qdisc *, unsigned long);
156 158
157 /* Class manipulation routines */ 159 /* Class manipulation routines */
158 unsigned long (*find)(struct Qdisc *, u32 classid); 160 unsigned long (*find)(struct Qdisc *, u32 classid);
159 int (*change)(struct Qdisc *, u32, u32, 161 int (*change)(struct Qdisc *, u32, u32,
160 struct nlattr **, unsigned long *); 162 struct nlattr **, unsigned long *,
163 struct netlink_ext_ack *);
161 int (*delete)(struct Qdisc *, unsigned long); 164 int (*delete)(struct Qdisc *, unsigned long);
162 void (*walk)(struct Qdisc *, struct qdisc_walker * arg); 165 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
163 166
164 /* Filter manipulation */ 167 /* Filter manipulation */
165 struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long); 168 struct tcf_block * (*tcf_block)(struct Qdisc *sch,
169 unsigned long arg,
170 struct netlink_ext_ack *extack);
166 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, 171 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
167 u32 classid); 172 u32 classid);
168 void (*unbind_tcf)(struct Qdisc *, unsigned long); 173 void (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -187,15 +192,26 @@ struct Qdisc_ops {
187 struct sk_buff * (*dequeue)(struct Qdisc *); 192 struct sk_buff * (*dequeue)(struct Qdisc *);
188 struct sk_buff * (*peek)(struct Qdisc *); 193 struct sk_buff * (*peek)(struct Qdisc *);
189 194
190 int (*init)(struct Qdisc *, struct nlattr *arg); 195 int (*init)(struct Qdisc *sch, struct nlattr *arg,
196 struct netlink_ext_ack *extack);
191 void (*reset)(struct Qdisc *); 197 void (*reset)(struct Qdisc *);
192 void (*destroy)(struct Qdisc *); 198 void (*destroy)(struct Qdisc *);
193 int (*change)(struct Qdisc *, struct nlattr *arg); 199 int (*change)(struct Qdisc *sch,
194 void (*attach)(struct Qdisc *); 200 struct nlattr *arg,
201 struct netlink_ext_ack *extack);
202 void (*attach)(struct Qdisc *sch);
203 int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
195 204
196 int (*dump)(struct Qdisc *, struct sk_buff *); 205 int (*dump)(struct Qdisc *, struct sk_buff *);
197 int (*dump_stats)(struct Qdisc *, struct gnet_dump *); 206 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
198 207
208 void (*ingress_block_set)(struct Qdisc *sch,
209 u32 block_index);
210 void (*egress_block_set)(struct Qdisc *sch,
211 u32 block_index);
212 u32 (*ingress_block_get)(struct Qdisc *sch);
213 u32 (*egress_block_get)(struct Qdisc *sch);
214
199 struct module *owner; 215 struct module *owner;
200}; 216};
201 217
@@ -218,14 +234,18 @@ struct tcf_proto_ops {
218 const struct tcf_proto *, 234 const struct tcf_proto *,
219 struct tcf_result *); 235 struct tcf_result *);
220 int (*init)(struct tcf_proto*); 236 int (*init)(struct tcf_proto*);
221 void (*destroy)(struct tcf_proto*); 237 void (*destroy)(struct tcf_proto *tp,
238 struct netlink_ext_ack *extack);
222 239
223 void* (*get)(struct tcf_proto*, u32 handle); 240 void* (*get)(struct tcf_proto*, u32 handle);
224 int (*change)(struct net *net, struct sk_buff *, 241 int (*change)(struct net *net, struct sk_buff *,
225 struct tcf_proto*, unsigned long, 242 struct tcf_proto*, unsigned long,
226 u32 handle, struct nlattr **, 243 u32 handle, struct nlattr **,
227 void **, bool); 244 void **, bool,
228 int (*delete)(struct tcf_proto*, void *, bool*); 245 struct netlink_ext_ack *);
246 int (*delete)(struct tcf_proto *tp, void *arg,
247 bool *last,
248 struct netlink_ext_ack *);
229 void (*walk)(struct tcf_proto*, struct tcf_walker *arg); 249 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
230 void (*bind_class)(void *, u32, unsigned long); 250 void (*bind_class)(void *, u32, unsigned long);
231 251
@@ -247,8 +267,6 @@ struct tcf_proto {
247 267
248 /* All the rest */ 268 /* All the rest */
249 u32 prio; 269 u32 prio;
250 u32 classid;
251 struct Qdisc *q;
252 void *data; 270 void *data;
253 const struct tcf_proto_ops *ops; 271 const struct tcf_proto_ops *ops;
254 struct tcf_chain *chain; 272 struct tcf_chain *chain;
@@ -267,8 +285,7 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
267 285
268struct tcf_chain { 286struct tcf_chain {
269 struct tcf_proto __rcu *filter_chain; 287 struct tcf_proto __rcu *filter_chain;
270 tcf_chain_head_change_t *chain_head_change; 288 struct list_head filter_chain_list;
271 void *chain_head_change_priv;
272 struct list_head list; 289 struct list_head list;
273 struct tcf_block *block; 290 struct tcf_block *block;
274 u32 index; /* chain index */ 291 u32 index; /* chain index */
@@ -277,12 +294,33 @@ struct tcf_chain {
277 294
278struct tcf_block { 295struct tcf_block {
279 struct list_head chain_list; 296 struct list_head chain_list;
297 u32 index; /* block index for shared blocks */
298 unsigned int refcnt;
280 struct net *net; 299 struct net *net;
281 struct Qdisc *q; 300 struct Qdisc *q;
282 struct list_head cb_list; 301 struct list_head cb_list;
283 struct work_struct work; 302 struct list_head owner_list;
303 bool keep_dst;
304 unsigned int offloadcnt; /* Number of oddloaded filters */
305 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
284}; 306};
285 307
308static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
309{
310 if (*flags & TCA_CLS_FLAGS_IN_HW)
311 return;
312 *flags |= TCA_CLS_FLAGS_IN_HW;
313 block->offloadcnt++;
314}
315
316static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
317{
318 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
319 return;
320 *flags &= ~TCA_CLS_FLAGS_IN_HW;
321 block->offloadcnt--;
322}
323
286static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 324static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
287{ 325{
288 struct qdisc_skb_cb *qcb; 326 struct qdisc_skb_cb *qcb;
@@ -291,11 +329,31 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
291 BUILD_BUG_ON(sizeof(qcb->data) < sz); 329 BUILD_BUG_ON(sizeof(qcb->data) < sz);
292} 330}
293 331
332static inline int qdisc_qlen_cpu(const struct Qdisc *q)
333{
334 return this_cpu_ptr(q->cpu_qstats)->qlen;
335}
336
294static inline int qdisc_qlen(const struct Qdisc *q) 337static inline int qdisc_qlen(const struct Qdisc *q)
295{ 338{
296 return q->q.qlen; 339 return q->q.qlen;
297} 340}
298 341
342static inline int qdisc_qlen_sum(const struct Qdisc *q)
343{
344 __u32 qlen = 0;
345 int i;
346
347 if (q->flags & TCQ_F_NOLOCK) {
348 for_each_possible_cpu(i)
349 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
350 } else {
351 qlen = q->q.qlen;
352 }
353
354 return qlen;
355}
356
299static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) 357static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
300{ 358{
301 return (struct qdisc_skb_cb *)skb->cb; 359 return (struct qdisc_skb_cb *)skb->cb;
@@ -432,6 +490,7 @@ void qdisc_class_hash_remove(struct Qdisc_class_hash *,
432void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); 490void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
433void qdisc_class_hash_destroy(struct Qdisc_class_hash *); 491void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
434 492
493int dev_qdisc_change_tx_queue_len(struct net_device *dev);
435void dev_init_scheduler(struct net_device *dev); 494void dev_init_scheduler(struct net_device *dev);
436void dev_shutdown(struct net_device *dev); 495void dev_shutdown(struct net_device *dev);
437void dev_activate(struct net_device *dev); 496void dev_activate(struct net_device *dev);
@@ -444,10 +503,12 @@ void qdisc_destroy(struct Qdisc *qdisc);
444void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, 503void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
445 unsigned int len); 504 unsigned int len);
446struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, 505struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
447 const struct Qdisc_ops *ops); 506 const struct Qdisc_ops *ops,
507 struct netlink_ext_ack *extack);
448void qdisc_free(struct Qdisc *qdisc); 508void qdisc_free(struct Qdisc *qdisc);
449struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, 509struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
450 const struct Qdisc_ops *ops, u32 parentid); 510 const struct Qdisc_ops *ops, u32 parentid,
511 struct netlink_ext_ack *extack);
451void __qdisc_calculate_pkt_len(struct sk_buff *skb, 512void __qdisc_calculate_pkt_len(struct sk_buff *skb,
452 const struct qdisc_size_table *stab); 513 const struct qdisc_size_table *stab);
453int skb_do_redirect(struct sk_buff *); 514int skb_do_redirect(struct sk_buff *);
@@ -633,12 +694,39 @@ static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
633 sch->qstats.backlog -= qdisc_pkt_len(skb); 694 sch->qstats.backlog -= qdisc_pkt_len(skb);
634} 695}
635 696
697static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
698 const struct sk_buff *skb)
699{
700 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
701}
702
636static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, 703static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
637 const struct sk_buff *skb) 704 const struct sk_buff *skb)
638{ 705{
639 sch->qstats.backlog += qdisc_pkt_len(skb); 706 sch->qstats.backlog += qdisc_pkt_len(skb);
640} 707}
641 708
709static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
710 const struct sk_buff *skb)
711{
712 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
713}
714
715static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
716{
717 this_cpu_inc(sch->cpu_qstats->qlen);
718}
719
720static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
721{
722 this_cpu_dec(sch->cpu_qstats->qlen);
723}
724
725static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
726{
727 this_cpu_inc(sch->cpu_qstats->requeues);
728}
729
642static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) 730static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
643{ 731{
644 sch->qstats.drops += count; 732 sch->qstats.drops += count;
@@ -769,26 +857,30 @@ static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
769/* generic pseudo peek method for non-work-conserving qdisc */ 857/* generic pseudo peek method for non-work-conserving qdisc */
770static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) 858static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
771{ 859{
860 struct sk_buff *skb = skb_peek(&sch->gso_skb);
861
772 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ 862 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
773 if (!sch->gso_skb) { 863 if (!skb) {
774 sch->gso_skb = sch->dequeue(sch); 864 skb = sch->dequeue(sch);
775 if (sch->gso_skb) { 865
866 if (skb) {
867 __skb_queue_head(&sch->gso_skb, skb);
776 /* it's still part of the queue */ 868 /* it's still part of the queue */
777 qdisc_qstats_backlog_inc(sch, sch->gso_skb); 869 qdisc_qstats_backlog_inc(sch, skb);
778 sch->q.qlen++; 870 sch->q.qlen++;
779 } 871 }
780 } 872 }
781 873
782 return sch->gso_skb; 874 return skb;
783} 875}
784 876
785/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ 877/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
786static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) 878static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
787{ 879{
788 struct sk_buff *skb = sch->gso_skb; 880 struct sk_buff *skb = skb_peek(&sch->gso_skb);
789 881
790 if (skb) { 882 if (skb) {
791 sch->gso_skb = NULL; 883 skb = __skb_dequeue(&sch->gso_skb);
792 qdisc_qstats_backlog_dec(sch, skb); 884 qdisc_qstats_backlog_dec(sch, skb);
793 sch->q.qlen--; 885 sch->q.qlen--;
794 } else { 886 } else {
@@ -846,6 +938,14 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
846 qdisc_qstats_drop(sch); 938 qdisc_qstats_drop(sch);
847} 939}
848 940
941static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
942 struct sk_buff **to_free)
943{
944 __qdisc_drop(skb, to_free);
945 qdisc_qstats_cpu_drop(sch);
946
947 return NET_XMIT_DROP;
948}
849 949
850static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, 950static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
851 struct sk_buff **to_free) 951 struct sk_buff **to_free)
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index deaafa9b09cb..20ff237c5eb2 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -145,12 +145,13 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other)
145SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive) 145SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive)
146 146
147 147
148#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA) 148#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
149 a->chunk_hdr->type == SCTP_CID_I_DATA)
149 150
150/* Calculate the actual data size in a data chunk */ 151/* Calculate the actual data size in a data chunk */
151#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end)\ 152#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \
152 - (unsigned long)(c->chunk_hdr)\ 153 (unsigned long)(c->chunk_hdr) - \
153 - sizeof(struct sctp_data_chunk))) 154 sctp_datachk_len(&c->asoc->stream)))
154 155
155/* Internal error codes */ 156/* Internal error codes */
156enum sctp_ierror { 157enum sctp_ierror {
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 906a9c0efa71..f7ae6b0a21d0 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -107,7 +107,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
107int sctp_inet_listen(struct socket *sock, int backlog); 107int sctp_inet_listen(struct socket *sock, int backlog);
108void sctp_write_space(struct sock *sk); 108void sctp_write_space(struct sock *sk);
109void sctp_data_ready(struct sock *sk); 109void sctp_data_ready(struct sock *sk);
110unsigned int sctp_poll(struct file *file, struct socket *sock, 110__poll_t sctp_poll(struct file *file, struct socket *sock,
111 poll_table *wait); 111 poll_table *wait);
112void sctp_sock_rfree(struct sk_buff *skb); 112void sctp_sock_rfree(struct sk_buff *skb);
113void sctp_copy_sock(struct sock *newsk, struct sock *sk, 113void sctp_copy_sock(struct sock *newsk, struct sock *sk,
@@ -116,7 +116,7 @@ extern struct percpu_counter sctp_sockets_allocated;
116int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); 116int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
117struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); 117struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
118 118
119int sctp_transport_walk_start(struct rhashtable_iter *iter); 119void sctp_transport_walk_start(struct rhashtable_iter *iter);
120void sctp_transport_walk_stop(struct rhashtable_iter *iter); 120void sctp_transport_walk_stop(struct rhashtable_iter *iter);
121struct sctp_transport *sctp_transport_get_next(struct net *net, 121struct sctp_transport *sctp_transport_get_next(struct net *net,
122 struct rhashtable_iter *iter); 122 struct rhashtable_iter *iter);
@@ -444,13 +444,13 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
444 int frag = pmtu; 444 int frag = pmtu;
445 445
446 frag -= sp->pf->af->net_header_len; 446 frag -= sp->pf->af->net_header_len;
447 frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); 447 frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
448 448
449 if (asoc->user_frag) 449 if (asoc->user_frag)
450 frag = min_t(int, frag, asoc->user_frag); 450 frag = min_t(int, frag, asoc->user_frag);
451 451
452 frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN - 452 frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
453 sizeof(struct sctp_data_chunk))); 453 sctp_datachk_len(&asoc->stream)));
454 454
455 return frag; 455 return frag;
456} 456}
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 70fb397f65b0..2883c43c5258 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -197,10 +197,14 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
197struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, 197struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
198 const __u32 lowest_tsn, 198 const __u32 lowest_tsn,
199 const struct sctp_chunk *chunk); 199 const struct sctp_chunk *chunk);
200struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, 200struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
201 __u8 flags, int paylen, gfp_t gfp);
202struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
203 __u32 new_cum_tsn, size_t nstreams,
204 struct sctp_ifwdtsn_skip *skiplist);
205struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
201 const struct sctp_sndrcvinfo *sinfo, 206 const struct sctp_sndrcvinfo *sinfo,
202 int len, const __u8 flags, 207 int len, __u8 flags, gfp_t gfp);
203 __u16 ssn, gfp_t gfp);
204struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, 208struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
205 const __u32 lowest_tsn); 209 const __u32 lowest_tsn);
206struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc); 210struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc);
@@ -342,7 +346,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
342 __u16 size; 346 __u16 size;
343 347
344 size = ntohs(chunk->chunk_hdr->length); 348 size = ntohs(chunk->chunk_hdr->length);
345 size -= sizeof(struct sctp_data_chunk); 349 size -= sctp_datahdr_len(&chunk->asoc->stream);
346 350
347 return size; 351 return size;
348} 352}
@@ -358,6 +362,12 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
358 typecheck(__u32, b) && \ 362 typecheck(__u32, b) && \
359 ((__s32)((a) - (b)) <= 0)) 363 ((__s32)((a) - (b)) <= 0))
360 364
365/* Compare two MIDs */
366#define MID_lt(a, b) \
367 (typecheck(__u32, a) && \
368 typecheck(__u32, b) && \
369 ((__s32)((a) - (b)) < 0))
370
361/* Compare two SSNs */ 371/* Compare two SSNs */
362#define SSN_lt(a,b) \ 372#define SSN_lt(a,b) \
363 (typecheck(__u16, a) && \ 373 (typecheck(__u16, a) && \
diff --git a/include/net/sctp/stream_interleave.h b/include/net/sctp/stream_interleave.h
new file mode 100644
index 000000000000..6657711c8bc4
--- /dev/null
+++ b/include/net/sctp/stream_interleave.h
@@ -0,0 +1,61 @@
1/* SCTP kernel implementation
2 * (C) Copyright Red Hat Inc. 2017
3 *
4 * These are definitions used by the stream schedulers, defined in RFC
5 * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
6 *
7 * This SCTP implementation is free software;
8 * you can redistribute it and/or modify it under the terms of
9 * the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This SCTP implementation is distributed in the hope that it
14 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
15 * ************************
16 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 * See the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with GNU CC; see the file COPYING. If not, see
21 * <http://www.gnu.org/licenses/>.
22 *
23 * Please send any bug reports or fixes you make to the
24 * email addresses:
25 * lksctp developers <linux-sctp@vger.kernel.org>
26 *
27 * Written or modified by:
28 * Xin Long <lucien.xin@gmail.com>
29 */
30
31#ifndef __sctp_stream_interleave_h__
32#define __sctp_stream_interleave_h__
33
34struct sctp_stream_interleave {
35 __u16 data_chunk_len;
36 __u16 ftsn_chunk_len;
37 /* (I-)DATA process */
38 struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc,
39 const struct sctp_sndrcvinfo *sinfo,
40 int len, __u8 flags, gfp_t gfp);
41 void (*assign_number)(struct sctp_chunk *chunk);
42 bool (*validate_data)(struct sctp_chunk *chunk);
43 int (*ulpevent_data)(struct sctp_ulpq *ulpq,
44 struct sctp_chunk *chunk, gfp_t gfp);
45 int (*enqueue_event)(struct sctp_ulpq *ulpq,
46 struct sctp_ulpevent *event);
47 void (*renege_events)(struct sctp_ulpq *ulpq,
48 struct sctp_chunk *chunk, gfp_t gfp);
49 void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
50 void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
51 /* (I-)FORWARD-TSN process */
52 void (*generate_ftsn)(struct sctp_outq *q, __u32 ctsn);
53 bool (*validate_ftsn)(struct sctp_chunk *chunk);
54 void (*report_ftsn)(struct sctp_ulpq *ulpq, __u32 ftsn);
55 void (*handle_ftsn)(struct sctp_ulpq *ulpq,
56 struct sctp_chunk *chunk);
57};
58
59void sctp_stream_interleave_init(struct sctp_stream *stream);
60
61#endif /* __sctp_stream_interleave_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 9a5ccf03a59b..03e92dda1813 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -89,6 +89,7 @@ struct sctp_stream;
89#include <net/sctp/tsnmap.h> 89#include <net/sctp/tsnmap.h>
90#include <net/sctp/ulpevent.h> 90#include <net/sctp/ulpevent.h>
91#include <net/sctp/ulpqueue.h> 91#include <net/sctp/ulpqueue.h>
92#include <net/sctp/stream_interleave.h>
92 93
93/* Structures useful for managing bind/connect. */ 94/* Structures useful for managing bind/connect. */
94 95
@@ -202,12 +203,17 @@ struct sctp_sock {
202 /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */ 203 /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */
203 __u32 param_flags; 204 __u32 param_flags;
204 205
205 struct sctp_initmsg initmsg;
206 struct sctp_rtoinfo rtoinfo; 206 struct sctp_rtoinfo rtoinfo;
207 struct sctp_paddrparams paddrparam; 207 struct sctp_paddrparams paddrparam;
208 struct sctp_event_subscribe subscribe;
209 struct sctp_assocparams assocparams; 208 struct sctp_assocparams assocparams;
210 209
210 /*
211 * These two structures must be grouped together for the usercopy
212 * whitelist region.
213 */
214 struct sctp_event_subscribe subscribe;
215 struct sctp_initmsg initmsg;
216
211 int user_frag; 217 int user_frag;
212 218
213 __u32 autoclose; 219 __u32 autoclose;
@@ -217,6 +223,7 @@ struct sctp_sock {
217 disable_fragments:1, 223 disable_fragments:1,
218 v4mapped:1, 224 v4mapped:1,
219 frag_interleave:1, 225 frag_interleave:1,
226 strm_interleave:1,
220 recvrcvinfo:1, 227 recvrcvinfo:1,
221 recvnxtinfo:1, 228 recvnxtinfo:1,
222 data_ready_signalled:1; 229 data_ready_signalled:1;
@@ -397,6 +404,28 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
397#define sctp_ssn_skip(stream, type, sid, ssn) \ 404#define sctp_ssn_skip(stream, type, sid, ssn) \
398 ((stream)->type[sid].ssn = ssn + 1) 405 ((stream)->type[sid].ssn = ssn + 1)
399 406
407/* What is the current MID number for this stream? */
408#define sctp_mid_peek(stream, type, sid) \
409 ((stream)->type[sid].mid)
410
411/* Return the next MID number for this stream. */
412#define sctp_mid_next(stream, type, sid) \
413 ((stream)->type[sid].mid++)
414
415/* Skip over this mid and all below. */
416#define sctp_mid_skip(stream, type, sid, mid) \
417 ((stream)->type[sid].mid = mid + 1)
418
419#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
420
421/* What is the current MID_uo number for this stream? */
422#define sctp_mid_uo_peek(stream, type, sid) \
423 ((stream)->type[sid].mid_uo)
424
425/* Return the next MID_uo number for this stream. */
426#define sctp_mid_uo_next(stream, type, sid) \
427 ((stream)->type[sid].mid_uo++)
428
400/* 429/*
401 * Pointers to address related SCTP functions. 430 * Pointers to address related SCTP functions.
402 * (i.e. things that depend on the address family.) 431 * (i.e. things that depend on the address family.)
@@ -574,6 +603,8 @@ struct sctp_chunk {
574 struct sctp_addiphdr *addip_hdr; 603 struct sctp_addiphdr *addip_hdr;
575 struct sctp_fwdtsn_hdr *fwdtsn_hdr; 604 struct sctp_fwdtsn_hdr *fwdtsn_hdr;
576 struct sctp_authhdr *auth_hdr; 605 struct sctp_authhdr *auth_hdr;
606 struct sctp_idatahdr *idata_hdr;
607 struct sctp_ifwdtsn_hdr *ifwdtsn_hdr;
577 } subh; 608 } subh;
578 609
579 __u8 *chunk_end; 610 __u8 *chunk_end;
@@ -620,6 +651,7 @@ struct sctp_chunk {
620 __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ 651 __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */
621 has_tsn:1, /* Does this chunk have a TSN yet? */ 652 has_tsn:1, /* Does this chunk have a TSN yet? */
622 has_ssn:1, /* Does this chunk have a SSN yet? */ 653 has_ssn:1, /* Does this chunk have a SSN yet? */
654#define has_mid has_ssn
623 singleton:1, /* Only chunk in the packet? */ 655 singleton:1, /* Only chunk in the packet? */
624 end_of_packet:1, /* Last chunk in the packet? */ 656 end_of_packet:1, /* Last chunk in the packet? */
625 ecn_ce_done:1, /* Have we processed the ECN CE bit? */ 657 ecn_ce_done:1, /* Have we processed the ECN CE bit? */
@@ -1073,6 +1105,7 @@ void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
1073void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp); 1105void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
1074void sctp_prsctp_prune(struct sctp_association *asoc, 1106void sctp_prsctp_prune(struct sctp_association *asoc,
1075 struct sctp_sndrcvinfo *sinfo, int msg_len); 1107 struct sctp_sndrcvinfo *sinfo, int msg_len);
1108void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
1076/* Uncork and flush an outqueue. */ 1109/* Uncork and flush an outqueue. */
1077static inline void sctp_outq_cork(struct sctp_outq *q) 1110static inline void sctp_outq_cork(struct sctp_outq *q)
1078{ 1111{
@@ -1357,13 +1390,25 @@ struct sctp_stream_out_ext {
1357}; 1390};
1358 1391
1359struct sctp_stream_out { 1392struct sctp_stream_out {
1360 __u16 ssn; 1393 union {
1361 __u8 state; 1394 __u32 mid;
1395 __u16 ssn;
1396 };
1397 __u32 mid_uo;
1362 struct sctp_stream_out_ext *ext; 1398 struct sctp_stream_out_ext *ext;
1399 __u8 state;
1363}; 1400};
1364 1401
1365struct sctp_stream_in { 1402struct sctp_stream_in {
1366 __u16 ssn; 1403 union {
1404 __u32 mid;
1405 __u16 ssn;
1406 };
1407 __u32 mid_uo;
1408 __u32 fsn;
1409 __u32 fsn_uo;
1410 char pd_mode;
1411 char pd_mode_uo;
1367}; 1412};
1368 1413
1369struct sctp_stream { 1414struct sctp_stream {
@@ -1387,11 +1432,32 @@ struct sctp_stream {
1387 struct sctp_stream_out_ext *rr_next; 1432 struct sctp_stream_out_ext *rr_next;
1388 }; 1433 };
1389 }; 1434 };
1435 struct sctp_stream_interleave *si;
1390}; 1436};
1391 1437
1392#define SCTP_STREAM_CLOSED 0x00 1438#define SCTP_STREAM_CLOSED 0x00
1393#define SCTP_STREAM_OPEN 0x01 1439#define SCTP_STREAM_OPEN 0x01
1394 1440
1441static inline __u16 sctp_datachk_len(const struct sctp_stream *stream)
1442{
1443 return stream->si->data_chunk_len;
1444}
1445
1446static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream)
1447{
1448 return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr);
1449}
1450
1451static inline __u16 sctp_ftsnchk_len(const struct sctp_stream *stream)
1452{
1453 return stream->si->ftsn_chunk_len;
1454}
1455
1456static inline __u16 sctp_ftsnhdr_len(const struct sctp_stream *stream)
1457{
1458 return stream->si->ftsn_chunk_len - sizeof(struct sctp_chunkhdr);
1459}
1460
1395/* SCTP_GET_ASSOC_STATS counters */ 1461/* SCTP_GET_ASSOC_STATS counters */
1396struct sctp_priv_assoc_stats { 1462struct sctp_priv_assoc_stats {
1397 /* Maximum observed rto in the association during subsequent 1463 /* Maximum observed rto in the association during subsequent
@@ -1940,6 +2006,7 @@ struct sctp_association {
1940 __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ 2006 __u8 need_ecne:1, /* Need to send an ECNE Chunk? */
1941 temp:1, /* Is it a temporary association? */ 2007 temp:1, /* Is it a temporary association? */
1942 force_delay:1, 2008 force_delay:1,
2009 intl_enable:1,
1943 prsctp_enable:1, 2010 prsctp_enable:1,
1944 reconf_enable:1; 2011 reconf_enable:1;
1945 2012
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 231dc42f1da6..51b4e0626c34 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -45,19 +45,29 @@
45/* A structure to carry information to the ULP (e.g. Sockets API) */ 45/* A structure to carry information to the ULP (e.g. Sockets API) */
46/* Warning: This sits inside an skb.cb[] area. Be very careful of 46/* Warning: This sits inside an skb.cb[] area. Be very careful of
47 * growing this structure as it is at the maximum limit now. 47 * growing this structure as it is at the maximum limit now.
48 *
49 * sctp_ulpevent is saved in sk->cb(48 bytes), whose last 4 bytes
50 * have been taken by sock_skb_cb, So here it has to use 'packed'
51 * to make sctp_ulpevent fit into the rest 44 bytes.
48 */ 52 */
49struct sctp_ulpevent { 53struct sctp_ulpevent {
50 struct sctp_association *asoc; 54 struct sctp_association *asoc;
51 struct sctp_chunk *chunk; 55 struct sctp_chunk *chunk;
52 unsigned int rmem_len; 56 unsigned int rmem_len;
53 __u32 ppid; 57 union {
58 __u32 mid;
59 __u16 ssn;
60 };
61 union {
62 __u32 ppid;
63 __u32 fsn;
64 };
54 __u32 tsn; 65 __u32 tsn;
55 __u32 cumtsn; 66 __u32 cumtsn;
56 __u16 stream; 67 __u16 stream;
57 __u16 ssn;
58 __u16 flags; 68 __u16 flags;
59 __u16 msg_flags; 69 __u16 msg_flags;
60}; 70} __packed;
61 71
62/* Retrieve the skb this event sits inside of. */ 72/* Retrieve the skb this event sits inside of. */
63static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev) 73static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
@@ -112,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
112 122
113struct sctp_ulpevent *sctp_ulpevent_make_pdapi( 123struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
114 const struct sctp_association *asoc, 124 const struct sctp_association *asoc,
115 __u32 indication, gfp_t gfp); 125 __u32 indication, __u32 sid, __u32 seq,
126 __u32 flags, gfp_t gfp);
116 127
117struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication( 128struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
118 const struct sctp_association *asoc, gfp_t gfp); 129 const struct sctp_association *asoc, gfp_t gfp);
@@ -140,6 +151,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
140 const struct sctp_association *asoc, __u16 flags, 151 const struct sctp_association *asoc, __u16 flags,
141 __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp); 152 __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
142 153
154struct sctp_ulpevent *sctp_make_reassembled_event(
155 struct net *net, struct sk_buff_head *queue,
156 struct sk_buff *f_frag, struct sk_buff *l_frag);
157
143void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, 158void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
144 struct msghdr *); 159 struct msghdr *);
145void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, 160void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index e0dce07b8794..bb0ecba3db2b 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -45,6 +45,7 @@ struct sctp_ulpq {
45 char pd_mode; 45 char pd_mode;
46 struct sctp_association *asoc; 46 struct sctp_association *asoc;
47 struct sk_buff_head reasm; 47 struct sk_buff_head reasm;
48 struct sk_buff_head reasm_uo;
48 struct sk_buff_head lobby; 49 struct sk_buff_head lobby;
49}; 50};
50 51
@@ -76,11 +77,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
76void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); 77void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
77 78
78void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32); 79void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
79#endif /* __sctp_ulpqueue_h__ */
80
81
82
83
84
85 80
81__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
82 struct sk_buff_head *list, __u16 needed);
86 83
84#endif /* __sctp_ulpqueue_h__ */
diff --git a/include/net/sock.h b/include/net/sock.h
index 7a7b14e9628a..169c92afcafa 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -72,6 +72,7 @@
72#include <net/tcp_states.h> 72#include <net/tcp_states.h>
73#include <linux/net_tstamp.h> 73#include <linux/net_tstamp.h>
74#include <net/smc.h> 74#include <net/smc.h>
75#include <net/l3mdev.h>
75 76
76/* 77/*
77 * This structure really needs to be cleaned up. 78 * This structure really needs to be cleaned up.
@@ -1108,6 +1109,8 @@ struct proto {
1108 struct kmem_cache *slab; 1109 struct kmem_cache *slab;
1109 unsigned int obj_size; 1110 unsigned int obj_size;
1110 slab_flags_t slab_flags; 1111 slab_flags_t slab_flags;
1112 size_t useroffset; /* Usercopy region offset */
1113 size_t usersize; /* Usercopy region size */
1111 1114
1112 struct percpu_counter *orphan_count; 1115 struct percpu_counter *orphan_count;
1113 1116
@@ -1262,6 +1265,7 @@ proto_memory_pressure(struct proto *prot)
1262/* Called with local bh disabled */ 1265/* Called with local bh disabled */
1263void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); 1266void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1264int sock_prot_inuse_get(struct net *net, struct proto *proto); 1267int sock_prot_inuse_get(struct net *net, struct proto *proto);
1268int sock_inuse_get(struct net *net);
1265#else 1269#else
1266static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, 1270static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1267 int inc) 1271 int inc)
@@ -1445,10 +1449,8 @@ do { \
1445} while (0) 1449} while (0)
1446 1450
1447#ifdef CONFIG_LOCKDEP 1451#ifdef CONFIG_LOCKDEP
1448static inline bool lockdep_sock_is_held(const struct sock *csk) 1452static inline bool lockdep_sock_is_held(const struct sock *sk)
1449{ 1453{
1450 struct sock *sk = (struct sock *)csk;
1451
1452 return lockdep_is_held(&sk->sk_lock) || 1454 return lockdep_is_held(&sk->sk_lock) ||
1453 lockdep_is_held(&sk->sk_lock.slock); 1455 lockdep_is_held(&sk->sk_lock.slock);
1454} 1456}
@@ -1583,7 +1585,7 @@ int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1583int sock_no_socketpair(struct socket *, struct socket *); 1585int sock_no_socketpair(struct socket *, struct socket *);
1584int sock_no_accept(struct socket *, struct socket *, int, bool); 1586int sock_no_accept(struct socket *, struct socket *, int, bool);
1585int sock_no_getname(struct socket *, struct sockaddr *, int *, int); 1587int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
1586unsigned int sock_no_poll(struct file *, struct socket *, 1588__poll_t sock_no_poll(struct file *, struct socket *,
1587 struct poll_table_struct *); 1589 struct poll_table_struct *);
1588int sock_no_ioctl(struct socket *, unsigned int, unsigned long); 1590int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1589int sock_no_listen(struct socket *, int); 1591int sock_no_listen(struct socket *, int);
@@ -2337,31 +2339,6 @@ static inline bool sk_listener(const struct sock *sk)
2337 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); 2339 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2338} 2340}
2339 2341
2340/**
2341 * sk_state_load - read sk->sk_state for lockless contexts
2342 * @sk: socket pointer
2343 *
2344 * Paired with sk_state_store(). Used in places we do not hold socket lock :
2345 * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
2346 */
2347static inline int sk_state_load(const struct sock *sk)
2348{
2349 return smp_load_acquire(&sk->sk_state);
2350}
2351
2352/**
2353 * sk_state_store - update sk->sk_state
2354 * @sk: socket pointer
2355 * @newstate: new state
2356 *
2357 * Paired with sk_state_load(). Should be used in contexts where
2358 * state change might impact lockless readers.
2359 */
2360static inline void sk_state_store(struct sock *sk, int newstate)
2361{
2362 smp_store_release(&sk->sk_state, newstate);
2363}
2364
2365void sock_enable_timestamp(struct sock *sk, int flag); 2342void sock_enable_timestamp(struct sock *sk, int flag);
2366int sock_get_timestamp(struct sock *, struct timeval __user *); 2343int sock_get_timestamp(struct sock *, struct timeval __user *);
2367int sock_get_timestampns(struct sock *, struct timespec __user *); 2344int sock_get_timestampns(struct sock *, struct timespec __user *);
@@ -2412,4 +2389,34 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2412 return *proto->sysctl_rmem; 2389 return *proto->sysctl_rmem;
2413} 2390}
2414 2391
2392/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
2393 * Some wifi drivers need to tweak it to get more chunks.
2394 * They can use this helper from their ndo_start_xmit()
2395 */
2396static inline void sk_pacing_shift_update(struct sock *sk, int val)
2397{
2398 if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
2399 return;
2400 sk->sk_pacing_shift = val;
2401}
2402
2403/* if a socket is bound to a device, check that the given device
2404 * index is either the same or that the socket is bound to an L3
2405 * master device and the given device index is also enslaved to
2406 * that L3 master
2407 */
2408static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
2409{
2410 int mdif;
2411
2412 if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
2413 return true;
2414
2415 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
2416 if (mdif && mdif == sk->sk_bound_dev_if)
2417 return true;
2418
2419 return false;
2420}
2421
2415#endif /* _SOCK_H */ 2422#endif /* _SOCK_H */
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 781f3433a0be..9470fd7e4350 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -6,10 +6,16 @@
6#include <net/act_api.h> 6#include <net/act_api.h>
7#include <linux/tc_act/tc_csum.h> 7#include <linux/tc_act/tc_csum.h>
8 8
9struct tcf_csum_params {
10 int action;
11 u32 update_flags;
12 struct rcu_head rcu;
13};
14
9struct tcf_csum { 15struct tcf_csum {
10 struct tc_action common; 16 struct tc_action common;
11 17
12 u32 update_flags; 18 struct tcf_csum_params __rcu *params;
13}; 19};
14#define to_tcf_csum(a) ((struct tcf_csum *)a) 20#define to_tcf_csum(a) ((struct tcf_csum *)a)
15 21
@@ -24,7 +30,13 @@ static inline bool is_tcf_csum(const struct tc_action *a)
24 30
25static inline u32 tcf_csum_update_flags(const struct tc_action *a) 31static inline u32 tcf_csum_update_flags(const struct tc_action *a)
26{ 32{
27 return to_tcf_csum(a)->update_flags; 33 u32 update_flags;
34
35 rcu_read_lock();
36 update_flags = rcu_dereference(to_tcf_csum(a)->params)->update_flags;
37 rcu_read_unlock();
38
39 return update_flags;
28} 40}
29 41
30#endif /* __NET_TC_CSUM_H */ 42#endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 21d253c9a8c6..a2e9cbca5c9e 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,10 +8,8 @@
8struct tcf_mirred { 8struct tcf_mirred {
9 struct tc_action common; 9 struct tc_action common;
10 int tcfm_eaction; 10 int tcfm_eaction;
11 int tcfm_ifindex;
12 bool tcfm_mac_header_xmit; 11 bool tcfm_mac_header_xmit;
13 struct net_device __rcu *tcfm_dev; 12 struct net_device __rcu *tcfm_dev;
14 struct net *net;
15 struct list_head tcfm_list; 13 struct list_head tcfm_list;
16}; 14};
17#define to_mirred(a) ((struct tcf_mirred *)a) 15#define to_mirred(a) ((struct tcf_mirred *)a)
@@ -34,9 +32,9 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a)
34 return false; 32 return false;
35} 33}
36 34
37static inline int tcf_mirred_ifindex(const struct tc_action *a) 35static inline struct net_device *tcf_mirred_dev(const struct tc_action *a)
38{ 36{
39 return to_mirred(a)->tcfm_ifindex; 37 return rtnl_dereference(to_mirred(a)->tcfm_dev);
40} 38}
41 39
42#endif /* __NET_TC_MIR_H */ 40#endif /* __NET_TC_MIR_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6da880d2f022..58278669cc55 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -387,7 +387,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
387void tcp_close(struct sock *sk, long timeout); 387void tcp_close(struct sock *sk, long timeout);
388void tcp_init_sock(struct sock *sk); 388void tcp_init_sock(struct sock *sk);
389void tcp_init_transfer(struct sock *sk, int bpf_op); 389void tcp_init_transfer(struct sock *sk, int bpf_op);
390unsigned int tcp_poll(struct file *file, struct socket *sock, 390__poll_t tcp_poll(struct file *file, struct socket *sock,
391 struct poll_table_struct *wait); 391 struct poll_table_struct *wait);
392int tcp_getsockopt(struct sock *sk, int level, int optname, 392int tcp_getsockopt(struct sock *sk, int level, int optname,
393 char __user *optval, int __user *optlen); 393 char __user *optval, int __user *optlen);
@@ -953,6 +953,7 @@ struct rate_sample {
953 u32 prior_in_flight; /* in flight before this ACK */ 953 u32 prior_in_flight; /* in flight before this ACK */
954 bool is_app_limited; /* is sample from packet with bubble in pipe? */ 954 bool is_app_limited; /* is sample from packet with bubble in pipe? */
955 bool is_retrans; /* is sample from retransmission? */ 955 bool is_retrans; /* is sample from retransmission? */
956 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
956}; 957};
957 958
958struct tcp_congestion_ops { 959struct tcp_congestion_ops {
@@ -1507,8 +1508,7 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1507 1508
1508/* From tcp_fastopen.c */ 1509/* From tcp_fastopen.c */
1509void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, 1510void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1510 struct tcp_fastopen_cookie *cookie, int *syn_loss, 1511 struct tcp_fastopen_cookie *cookie);
1511 unsigned long *last_syn_loss);
1512void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 1512void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1513 struct tcp_fastopen_cookie *cookie, bool syn_lost, 1513 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1514 u16 try_exp); 1514 u16 try_exp);
@@ -1546,7 +1546,7 @@ extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1546void tcp_fastopen_active_disable(struct sock *sk); 1546void tcp_fastopen_active_disable(struct sock *sk);
1547bool tcp_fastopen_active_should_disable(struct sock *sk); 1547bool tcp_fastopen_active_should_disable(struct sock *sk);
1548void tcp_fastopen_active_disable_ofo_check(struct sock *sk); 1548void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1549void tcp_fastopen_active_timeout_reset(void); 1549void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1550 1550
1551/* Latencies incurred by various limits for a sender. They are 1551/* Latencies incurred by various limits for a sender. They are
1552 * chronograph-like stats that are mutually exclusive. 1552 * chronograph-like stats that are mutually exclusive.
@@ -2006,17 +2006,21 @@ void tcp_cleanup_ulp(struct sock *sk);
2006 * program loaded). 2006 * program loaded).
2007 */ 2007 */
2008#ifdef CONFIG_BPF 2008#ifdef CONFIG_BPF
2009static inline int tcp_call_bpf(struct sock *sk, int op) 2009static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2010{ 2010{
2011 struct bpf_sock_ops_kern sock_ops; 2011 struct bpf_sock_ops_kern sock_ops;
2012 int ret; 2012 int ret;
2013 2013
2014 if (sk_fullsock(sk)) 2014 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2015 if (sk_fullsock(sk)) {
2016 sock_ops.is_fullsock = 1;
2015 sock_owned_by_me(sk); 2017 sock_owned_by_me(sk);
2018 }
2016 2019
2017 memset(&sock_ops, 0, sizeof(sock_ops));
2018 sock_ops.sk = sk; 2020 sock_ops.sk = sk;
2019 sock_ops.op = op; 2021 sock_ops.op = op;
2022 if (nargs > 0)
2023 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2020 2024
2021 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); 2025 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2022 if (ret == 0) 2026 if (ret == 0)
@@ -2025,18 +2029,46 @@ static inline int tcp_call_bpf(struct sock *sk, int op)
2025 ret = -1; 2029 ret = -1;
2026 return ret; 2030 return ret;
2027} 2031}
2032
2033static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2034{
2035 u32 args[2] = {arg1, arg2};
2036
2037 return tcp_call_bpf(sk, op, 2, args);
2038}
2039
2040static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2041 u32 arg3)
2042{
2043 u32 args[3] = {arg1, arg2, arg3};
2044
2045 return tcp_call_bpf(sk, op, 3, args);
2046}
2047
2028#else 2048#else
2029static inline int tcp_call_bpf(struct sock *sk, int op) 2049static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2030{ 2050{
2031 return -EPERM; 2051 return -EPERM;
2032} 2052}
2053
2054static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2055{
2056 return -EPERM;
2057}
2058
2059static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2060 u32 arg3)
2061{
2062 return -EPERM;
2063}
2064
2033#endif 2065#endif
2034 2066
2035static inline u32 tcp_timeout_init(struct sock *sk) 2067static inline u32 tcp_timeout_init(struct sock *sk)
2036{ 2068{
2037 int timeout; 2069 int timeout;
2038 2070
2039 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT); 2071 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2040 2072
2041 if (timeout <= 0) 2073 if (timeout <= 0)
2042 timeout = TCP_TIMEOUT_INIT; 2074 timeout = TCP_TIMEOUT_INIT;
@@ -2047,7 +2079,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2047{ 2079{
2048 int rwnd; 2080 int rwnd;
2049 2081
2050 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT); 2082 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2051 2083
2052 if (rwnd < 0) 2084 if (rwnd < 0)
2053 rwnd = 0; 2085 rwnd = 0;
@@ -2056,7 +2088,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2056 2088
2057static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) 2089static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2058{ 2090{
2059 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN) == 1); 2091 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2060} 2092}
2061 2093
2062#if IS_ENABLED(CONFIG_SMC) 2094#if IS_ENABLED(CONFIG_SMC)
diff --git a/include/net/tls.h b/include/net/tls.h
index 9185e53a743c..4913430ab807 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -36,6 +36,7 @@
36 36
37#include <linux/types.h> 37#include <linux/types.h>
38#include <asm/byteorder.h> 38#include <asm/byteorder.h>
39#include <linux/crypto.h>
39#include <linux/socket.h> 40#include <linux/socket.h>
40#include <linux/tcp.h> 41#include <linux/tcp.h>
41#include <net/tcp.h> 42#include <net/tcp.h>
@@ -57,6 +58,7 @@
57 58
58struct tls_sw_context { 59struct tls_sw_context {
59 struct crypto_aead *aead_send; 60 struct crypto_aead *aead_send;
61 struct crypto_wait async_wait;
60 62
61 /* Sending context */ 63 /* Sending context */
62 char aad_space[TLS_AAD_SPACE_SIZE]; 64 char aad_space[TLS_AAD_SPACE_SIZE];
diff --git a/include/net/udp.h b/include/net/udp.h
index 6c759c8594e2..850a8e581cce 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -275,7 +275,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
275int udp_init_sock(struct sock *sk); 275int udp_init_sock(struct sock *sk);
276int __udp_disconnect(struct sock *sk, int flags); 276int __udp_disconnect(struct sock *sk, int flags);
277int udp_disconnect(struct sock *sk, int flags); 277int udp_disconnect(struct sock *sk, int flags);
278unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); 278__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
279struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, 279struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
280 netdev_features_t features, 280 netdev_features_t features,
281 bool is_ipv6); 281 bool is_ipv6);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index f96391e84a8a..ad73d8b3fcc2 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -301,7 +301,7 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
301 l4_hdr = ipv6_hdr(skb)->nexthdr; 301 l4_hdr = ipv6_hdr(skb)->nexthdr;
302 break; 302 break;
303 default: 303 default:
304 return features;; 304 return features;
305 } 305 }
306 306
307 if ((l4_hdr == IPPROTO_UDP) && 307 if ((l4_hdr == IPPROTO_UDP) &&
diff --git a/include/net/wext.h b/include/net/wext.h
index e51f067fdb3a..aa192a670304 100644
--- a/include/net/wext.h
+++ b/include/net/wext.h
@@ -7,7 +7,7 @@
7struct net; 7struct net;
8 8
9#ifdef CONFIG_WEXT_CORE 9#ifdef CONFIG_WEXT_CORE
10int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, 10int wext_handle_ioctl(struct net *net, unsigned int cmd,
11 void __user *arg); 11 void __user *arg);
12int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, 12int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
13 unsigned long arg); 13 unsigned long arg);
@@ -15,7 +15,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
15struct iw_statistics *get_wireless_stats(struct net_device *dev); 15struct iw_statistics *get_wireless_stats(struct net_device *dev);
16int call_commit_handler(struct net_device *dev); 16int call_commit_handler(struct net_device *dev);
17#else 17#else
18static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, 18static inline int wext_handle_ioctl(struct net *net, unsigned int cmd,
19 void __user *arg) 19 void __user *arg)
20{ 20{
21 return -EINVAL; 21 return -EINVAL;
diff --git a/include/net/xdp.h b/include/net/xdp.h
new file mode 100644
index 000000000000..b2362ddfa694
--- /dev/null
+++ b/include/net/xdp.h
@@ -0,0 +1,48 @@
1/* include/net/xdp.h
2 *
3 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
4 * Released under terms in GPL version 2. See COPYING.
5 */
6#ifndef __LINUX_NET_XDP_H__
7#define __LINUX_NET_XDP_H__
8
9/**
10 * DOC: XDP RX-queue information
11 *
12 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
13 * level RX-ring queues. It is information that is specific to how
14 * the driver have configured a given RX-ring queue.
15 *
16 * Each xdp_buff frame received in the driver carry a (pointer)
17 * reference to this xdp_rxq_info structure. This provides the XDP
18 * data-path read-access to RX-info for both kernel and bpf-side
19 * (limited subset).
20 *
21 * For now, direct access is only safe while running in NAPI/softirq
22 * context. Contents is read-mostly and must not be updated during
23 * driver NAPI/softirq poll.
24 *
25 * The driver usage API is a register and unregister API.
26 *
27 * The struct is not directly tied to the XDP prog. A new XDP prog
28 * can be attached as long as it doesn't change the underlying
29 * RX-ring. If the RX-ring does change significantly, the NIC driver
30 * naturally need to stop the RX-ring before purging and reallocating
31 * memory. In that process the driver MUST call unregistor (which
32 * also apply for driver shutdown and unload). The register API is
33 * also mandatory during RX-ring setup.
34 */
35
36struct xdp_rxq_info {
37 struct net_device *dev;
38 u32 queue_index;
39 u32 reg_state;
40} ____cacheline_aligned; /* perf critical, avoid false-sharing */
41
42int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
43 struct net_device *dev, u32 queue_index);
44void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
45void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
46bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
47
48#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index ae35991b5877..7d2077665c0b 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -968,7 +968,7 @@ static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_c
968 968
969/* A struct encoding bundle of transformations to apply to some set of flow. 969/* A struct encoding bundle of transformations to apply to some set of flow.
970 * 970 *
971 * dst->child points to the next element of bundle. 971 * xdst->child points to the next element of bundle.
972 * dst->xfrm points to an instanse of transformer. 972 * dst->xfrm points to an instanse of transformer.
973 * 973 *
974 * Due to unfortunate limitations of current routing cache, which we 974 * Due to unfortunate limitations of current routing cache, which we
@@ -984,6 +984,8 @@ struct xfrm_dst {
984 struct rt6_info rt6; 984 struct rt6_info rt6;
985 } u; 985 } u;
986 struct dst_entry *route; 986 struct dst_entry *route;
987 struct dst_entry *child;
988 struct dst_entry *path;
987 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 989 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
988 int num_pols, num_xfrms; 990 int num_pols, num_xfrms;
989 u32 xfrm_genid; 991 u32 xfrm_genid;
@@ -994,7 +996,35 @@ struct xfrm_dst {
994 u32 path_cookie; 996 u32 path_cookie;
995}; 997};
996 998
999static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
1000{
1001#ifdef CONFIG_XFRM
1002 if (dst->xfrm) {
1003 const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
1004
1005 return xdst->path;
1006 }
1007#endif
1008 return (struct dst_entry *) dst;
1009}
1010
1011static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
1012{
1013#ifdef CONFIG_XFRM
1014 if (dst->xfrm) {
1015 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1016 return xdst->child;
1017 }
1018#endif
1019 return NULL;
1020}
1021
997#ifdef CONFIG_XFRM 1022#ifdef CONFIG_XFRM
1023static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
1024{
1025 xdst->child = child;
1026}
1027
998static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) 1028static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
999{ 1029{
1000 xfrm_pols_put(xdst->pols, xdst->num_pols); 1030 xfrm_pols_put(xdst->pols, xdst->num_pols);
@@ -1021,6 +1051,7 @@ struct xfrm_offload {
1021#define XFRM_GSO_SEGMENT 16 1051#define XFRM_GSO_SEGMENT 16
1022#define XFRM_GRO 32 1052#define XFRM_GRO 32
1023#define XFRM_ESP_NO_TRAILER 64 1053#define XFRM_ESP_NO_TRAILER 64
1054#define XFRM_DEV_RESUME 128
1024 1055
1025 __u32 status; 1056 __u32 status;
1026#define CRYPTO_SUCCESS 1 1057#define CRYPTO_SUCCESS 1
@@ -1847,34 +1878,53 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1847{ 1878{
1848 return skb->sp->xvec[skb->sp->len - 1]; 1879 return skb->sp->xvec[skb->sp->len - 1];
1849} 1880}
1881#endif
1882
1850static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) 1883static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1851{ 1884{
1885#ifdef CONFIG_XFRM
1852 struct sec_path *sp = skb->sp; 1886 struct sec_path *sp = skb->sp;
1853 1887
1854 if (!sp || !sp->olen || sp->len != sp->olen) 1888 if (!sp || !sp->olen || sp->len != sp->olen)
1855 return NULL; 1889 return NULL;
1856 1890
1857 return &sp->ovec[sp->olen - 1]; 1891 return &sp->ovec[sp->olen - 1];
1858} 1892#else
1893 return NULL;
1859#endif 1894#endif
1895}
1860 1896
1861void __net_init xfrm_dev_init(void); 1897void __net_init xfrm_dev_init(void);
1862 1898
1863#ifdef CONFIG_XFRM_OFFLOAD 1899#ifdef CONFIG_XFRM_OFFLOAD
1864int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); 1900void xfrm_dev_resume(struct sk_buff *skb);
1901void xfrm_dev_backlog(struct softnet_data *sd);
1902struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
1865int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 1903int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
1866 struct xfrm_user_offload *xuo); 1904 struct xfrm_user_offload *xuo);
1867bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); 1905bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
1868 1906
1907static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
1908{
1909 struct xfrm_state_offload *xso = &x->xso;
1910
1911 if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
1912 xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
1913}
1914
1869static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) 1915static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
1870{ 1916{
1871 struct xfrm_state *x = dst->xfrm; 1917 struct xfrm_state *x = dst->xfrm;
1918 struct xfrm_dst *xdst;
1872 1919
1873 if (!x || !x->type_offload) 1920 if (!x || !x->type_offload)
1874 return false; 1921 return false;
1875 1922
1876 if (x->xso.offload_handle && (x->xso.dev == dst->path->dev) && 1923 xdst = (struct xfrm_dst *) dst;
1877 !dst->child->xfrm) 1924 if (!x->xso.offload_handle && !xdst->child->xfrm)
1925 return true;
1926 if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
1927 !xdst->child->xfrm)
1878 return true; 1928 return true;
1879 1929
1880 return false; 1930 return false;
@@ -1894,15 +1944,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
1894 struct net_device *dev = xso->dev; 1944 struct net_device *dev = xso->dev;
1895 1945
1896 if (dev && dev->xfrmdev_ops) { 1946 if (dev && dev->xfrmdev_ops) {
1897 dev->xfrmdev_ops->xdo_dev_state_free(x); 1947 if (dev->xfrmdev_ops->xdo_dev_state_free)
1948 dev->xfrmdev_ops->xdo_dev_state_free(x);
1898 xso->dev = NULL; 1949 xso->dev = NULL;
1899 dev_put(dev); 1950 dev_put(dev);
1900 } 1951 }
1901} 1952}
1902#else 1953#else
1903static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) 1954static inline void xfrm_dev_resume(struct sk_buff *skb)
1904{ 1955{
1905 return 0; 1956}
1957
1958static inline void xfrm_dev_backlog(struct softnet_data *sd)
1959{
1960}
1961
1962static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
1963{
1964 return skb;
1906} 1965}
1907 1966
1908static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo) 1967static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
@@ -1923,6 +1982,10 @@ static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x
1923 return false; 1982 return false;
1924} 1983}
1925 1984
1985static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
1986{
1987}
1988
1926static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) 1989static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
1927{ 1990{
1928 return false; 1991 return false;
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 18c564f60e93..d656809f1217 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -94,7 +94,7 @@ struct rdma_dev_addr {
94 * The dev_addr->net field must be initialized. 94 * The dev_addr->net field must be initialized.
95 */ 95 */
96int rdma_translate_ip(const struct sockaddr *addr, 96int rdma_translate_ip(const struct sockaddr *addr,
97 struct rdma_dev_addr *dev_addr, u16 *vlan_id); 97 struct rdma_dev_addr *dev_addr);
98 98
99/** 99/**
100 * rdma_resolve_ip - Resolve source and destination IP addresses to 100 * rdma_resolve_ip - Resolve source and destination IP addresses to
@@ -131,10 +131,9 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
131 131
132int rdma_addr_size(struct sockaddr *addr); 132int rdma_addr_size(struct sockaddr *addr);
133 133
134int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
135int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, 134int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
136 const union ib_gid *dgid, 135 const union ib_gid *dgid,
137 u8 *smac, u16 *vlan_id, int *if_index, 136 u8 *dmac, const struct net_device *ndev,
138 int *hoplimit); 137 int *hoplimit);
139 138
140static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) 139static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
@@ -198,34 +197,15 @@ static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid)
198 } 197 }
199} 198}
200 199
201static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, 200/*
202 union ib_gid *gid) 201 * rdma_get/set_sgid/dgid() APIs are applicable to IB, and iWarp.
203{ 202 * They are not applicable to RoCE.
204 struct net_device *dev; 203 * RoCE GIDs are derived from the IP addresses.
205 struct in_device *ip4; 204 */
206
207 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
208 if (dev) {
209 ip4 = in_dev_get(dev);
210 if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
211 ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
212 (struct in6_addr *)gid);
213
214 if (ip4)
215 in_dev_put(ip4);
216
217 dev_put(dev);
218 }
219}
220
221static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) 205static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
222{ 206{
223 if (dev_addr->transport == RDMA_TRANSPORT_IB && 207 memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr),
224 dev_addr->dev_type != ARPHRD_INFINIBAND) 208 sizeof(*gid));
225 iboe_addr_get_sgid(dev_addr, gid);
226 else
227 memcpy(gid, dev_addr->src_dev_addr +
228 rdma_addr_gid_offset(dev_addr), sizeof *gid);
229} 209}
230 210
231static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) 211static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index c124d515f7d5..6e35416170a3 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -313,16 +313,14 @@ static inline u32 ib_bth_get_qpn(struct ib_other_headers *ohdr)
313 return (u32)((be32_to_cpu(ohdr->bth[1])) & IB_QPN_MASK); 313 return (u32)((be32_to_cpu(ohdr->bth[1])) & IB_QPN_MASK);
314} 314}
315 315
316static inline u8 ib_bth_get_becn(struct ib_other_headers *ohdr) 316static inline bool ib_bth_get_becn(struct ib_other_headers *ohdr)
317{ 317{
318 return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_BECN_SHIFT) & 318 return (ohdr->bth[1]) & cpu_to_be32(IB_BECN_SMASK);
319 IB_BECN_MASK);
320} 319}
321 320
322static inline u8 ib_bth_get_fecn(struct ib_other_headers *ohdr) 321static inline bool ib_bth_get_fecn(struct ib_other_headers *ohdr)
323{ 322{
324 return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_FECN_SHIFT) & 323 return (ohdr->bth[1]) & cpu_to_be32(IB_FECN_SMASK);
325 IB_FECN_MASK);
326} 324}
327 325
328static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr) 326static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr)
@@ -331,4 +329,13 @@ static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr)
331 IB_BTH_TVER_MASK); 329 IB_BTH_TVER_MASK);
332} 330}
333 331
332static inline bool ib_bth_is_solicited(struct ib_other_headers *ohdr)
333{
334 return ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED);
335}
336
337static inline bool ib_bth_is_migration(struct ib_other_headers *ohdr)
338{
339 return ohdr->bth[0] & cpu_to_be32(IB_BTH_MIG_REQ);
340}
334#endif /* IB_HDRS_H */ 341#endif /* IB_HDRS_H */
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 1f7f604db5aa..811cfcfcbe3d 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -549,12 +549,12 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
549 struct rdma_ah_attr *ah_attr); 549 struct rdma_ah_attr *ah_attr);
550 550
551/** 551/**
552 * ib_init_ah_from_path - Initialize address handle attributes based on an SA 552 * ib_init_ah_attr_from_path - Initialize address handle attributes based on
553 * path record. 553 * an SA path record.
554 */ 554 */
555int ib_init_ah_from_path(struct ib_device *device, u8 port_num, 555int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
556 struct sa_path_rec *rec, 556 struct sa_path_rec *rec,
557 struct rdma_ah_attr *ah_attr); 557 struct rdma_ah_attr *ah_attr);
558 558
559/** 559/**
560 * ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec 560 * ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fd84cda5ed7c..73b2387e3f74 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -63,6 +63,7 @@
63#include <linux/uaccess.h> 63#include <linux/uaccess.h>
64#include <linux/cgroup_rdma.h> 64#include <linux/cgroup_rdma.h>
65#include <uapi/rdma/ib_user_verbs.h> 65#include <uapi/rdma/ib_user_verbs.h>
66#include <rdma/restrack.h>
66 67
67#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN 68#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
68 69
@@ -300,11 +301,6 @@ struct ib_tm_caps {
300 u32 max_sge; 301 u32 max_sge;
301}; 302};
302 303
303enum ib_cq_creation_flags {
304 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
305 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
306};
307
308struct ib_cq_init_attr { 304struct ib_cq_init_attr {
309 unsigned int cqe; 305 unsigned int cqe;
310 int comp_vector; 306 int comp_vector;
@@ -878,6 +874,7 @@ struct ib_mr_status {
878__attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 874__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
879 875
880enum rdma_ah_attr_type { 876enum rdma_ah_attr_type {
877 RDMA_AH_ATTR_TYPE_UNDEFINED,
881 RDMA_AH_ATTR_TYPE_IB, 878 RDMA_AH_ATTR_TYPE_IB,
882 RDMA_AH_ATTR_TYPE_ROCE, 879 RDMA_AH_ATTR_TYPE_ROCE,
883 RDMA_AH_ATTR_TYPE_OPA, 880 RDMA_AH_ATTR_TYPE_OPA,
@@ -983,9 +980,9 @@ struct ib_wc {
983 u32 invalidate_rkey; 980 u32 invalidate_rkey;
984 } ex; 981 } ex;
985 u32 src_qp; 982 u32 src_qp;
983 u32 slid;
986 int wc_flags; 984 int wc_flags;
987 u16 pkey_index; 985 u16 pkey_index;
988 u32 slid;
989 u8 sl; 986 u8 sl;
990 u8 dlid_path_bits; 987 u8 dlid_path_bits;
991 u8 port_num; /* valid only for DR SMPs on switches */ 988 u8 port_num; /* valid only for DR SMPs on switches */
@@ -1082,6 +1079,7 @@ enum ib_qp_type {
1082 IB_QPT_XRC_INI = 9, 1079 IB_QPT_XRC_INI = 9,
1083 IB_QPT_XRC_TGT, 1080 IB_QPT_XRC_TGT,
1084 IB_QPT_MAX, 1081 IB_QPT_MAX,
1082 IB_QPT_DRIVER = 0xFF,
1085 /* Reserve a range for qp types internal to the low level driver. 1083 /* Reserve a range for qp types internal to the low level driver.
1086 * These qp types will not be visible at the IB core layer, so the 1084 * These qp types will not be visible at the IB core layer, so the
1087 * IB_QPT_MAX usages should not be affected in the core layer 1085 * IB_QPT_MAX usages should not be affected in the core layer
@@ -1529,6 +1527,7 @@ struct ib_pd {
1529 * Implementation details of the RDMA core, don't use in drivers: 1527 * Implementation details of the RDMA core, don't use in drivers:
1530 */ 1528 */
1531 struct ib_mr *__internal_mr; 1529 struct ib_mr *__internal_mr;
1530 struct rdma_restrack_entry res;
1532}; 1531};
1533 1532
1534struct ib_xrcd { 1533struct ib_xrcd {
@@ -1538,6 +1537,10 @@ struct ib_xrcd {
1538 1537
1539 struct mutex tgt_qp_mutex; 1538 struct mutex tgt_qp_mutex;
1540 struct list_head tgt_qp_list; 1539 struct list_head tgt_qp_list;
1540 /*
1541 * Implementation details of the RDMA core, don't use in drivers:
1542 */
1543 struct rdma_restrack_entry res;
1541}; 1544};
1542 1545
1543struct ib_ah { 1546struct ib_ah {
@@ -1569,6 +1572,10 @@ struct ib_cq {
1569 struct irq_poll iop; 1572 struct irq_poll iop;
1570 struct work_struct work; 1573 struct work_struct work;
1571 }; 1574 };
1575 /*
1576 * Implementation details of the RDMA core, don't use in drivers:
1577 */
1578 struct rdma_restrack_entry res;
1572}; 1579};
1573 1580
1574struct ib_srq { 1581struct ib_srq {
@@ -1745,6 +1752,11 @@ struct ib_qp {
1745 struct ib_rwq_ind_table *rwq_ind_tbl; 1752 struct ib_rwq_ind_table *rwq_ind_tbl;
1746 struct ib_qp_security *qp_sec; 1753 struct ib_qp_security *qp_sec;
1747 u8 port; 1754 u8 port;
1755
1756 /*
1757 * Implementation details of the RDMA core, don't use in drivers:
1758 */
1759 struct rdma_restrack_entry res;
1748}; 1760};
1749 1761
1750struct ib_mr { 1762struct ib_mr {
@@ -2351,6 +2363,10 @@ struct ib_device {
2351#endif 2363#endif
2352 2364
2353 u32 index; 2365 u32 index;
2366 /*
2367 * Implementation details of the RDMA core, don't use in drivers
2368 */
2369 struct rdma_restrack_root res;
2354 2370
2355 /** 2371 /**
2356 * The following mandatory functions are used only at device 2372 * The following mandatory functions are used only at device
@@ -2836,8 +2852,7 @@ int ib_modify_port(struct ib_device *device,
2836 struct ib_port_modify *port_modify); 2852 struct ib_port_modify *port_modify);
2837 2853
2838int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2854int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2839 enum ib_gid_type gid_type, struct net_device *ndev, 2855 struct net_device *ndev, u8 *port_num, u16 *index);
2840 u8 *port_num, u16 *index);
2841 2856
2842int ib_find_pkey(struct ib_device *device, 2857int ib_find_pkey(struct ib_device *device,
2843 u8 port_num, u16 pkey, u16 *index); 2858 u8 port_num, u16 pkey, u16 *index);
@@ -2858,7 +2873,7 @@ enum ib_pd_flags {
2858struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 2873struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2859 const char *caller); 2874 const char *caller);
2860#define ib_alloc_pd(device, flags) \ 2875#define ib_alloc_pd(device, flags) \
2861 __ib_alloc_pd((device), (flags), __func__) 2876 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
2862void ib_dealloc_pd(struct ib_pd *pd); 2877void ib_dealloc_pd(struct ib_pd *pd);
2863 2878
2864/** 2879/**
@@ -2905,7 +2920,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2905int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); 2920int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2906 2921
2907/** 2922/**
2908 * ib_init_ah_from_wc - Initializes address handle attributes from a 2923 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
2909 * work completion. 2924 * work completion.
2910 * @device: Device on which the received message arrived. 2925 * @device: Device on which the received message arrived.
2911 * @port_num: Port on which the received message arrived. 2926 * @port_num: Port on which the received message arrived.
@@ -2915,9 +2930,9 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2915 * @ah_attr: Returned attributes that can be used when creating an address 2930 * @ah_attr: Returned attributes that can be used when creating an address
2916 * handle for replying to the message. 2931 * handle for replying to the message.
2917 */ 2932 */
2918int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 2933int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
2919 const struct ib_wc *wc, const struct ib_grh *grh, 2934 const struct ib_wc *wc, const struct ib_grh *grh,
2920 struct rdma_ah_attr *ah_attr); 2935 struct rdma_ah_attr *ah_attr);
2921 2936
2922/** 2937/**
2923 * ib_create_ah_from_wc - Creates an address handle associated with the 2938 * ib_create_ah_from_wc - Creates an address handle associated with the
@@ -3135,8 +3150,12 @@ static inline int ib_post_recv(struct ib_qp *qp,
3135 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 3150 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3136} 3151}
3137 3152
3138struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 3153struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
3139 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); 3154 int nr_cqe, int comp_vector,
3155 enum ib_poll_context poll_ctx, const char *caller);
3156#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
3157 __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
3158
3140void ib_free_cq(struct ib_cq *cq); 3159void ib_free_cq(struct ib_cq *cq);
3141int ib_process_cq_direct(struct ib_cq *cq, int budget); 3160int ib_process_cq_direct(struct ib_cq *cq, int budget);
3142 3161
@@ -3560,8 +3579,11 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3560/** 3579/**
3561 * ib_alloc_xrcd - Allocates an XRC domain. 3580 * ib_alloc_xrcd - Allocates an XRC domain.
3562 * @device: The device on which to allocate the XRC domain. 3581 * @device: The device on which to allocate the XRC domain.
3582 * @caller: Module name for kernel consumers
3563 */ 3583 */
3564struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); 3584struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
3585#define ib_alloc_xrcd(device) \
3586 __ib_alloc_xrcd((device), KBUILD_MODNAME)
3565 3587
3566/** 3588/**
3567 * ib_dealloc_xrcd - Deallocates an XRC domain. 3589 * ib_dealloc_xrcd - Deallocates an XRC domain.
@@ -3789,18 +3811,24 @@ static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3789 grh->traffic_class = traffic_class; 3811 grh->traffic_class = traffic_class;
3790} 3812}
3791 3813
3792/*Get AH type */ 3814/**
3815 * rdma_ah_find_type - Return address handle type.
3816 *
3817 * @dev: Device to be checked
3818 * @port_num: Port number
3819 */
3793static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, 3820static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3794 u32 port_num) 3821 u8 port_num)
3795{ 3822{
3796 if ((rdma_protocol_roce(dev, port_num)) || 3823 if (rdma_protocol_roce(dev, port_num))
3797 (rdma_protocol_iwarp(dev, port_num)))
3798 return RDMA_AH_ATTR_TYPE_ROCE; 3824 return RDMA_AH_ATTR_TYPE_ROCE;
3799 else if ((rdma_protocol_ib(dev, port_num)) && 3825 if (rdma_protocol_ib(dev, port_num)) {
3800 (rdma_cap_opa_ah(dev, port_num))) 3826 if (rdma_cap_opa_ah(dev, port_num))
3801 return RDMA_AH_ATTR_TYPE_OPA; 3827 return RDMA_AH_ATTR_TYPE_OPA;
3802 else
3803 return RDMA_AH_ATTR_TYPE_IB; 3828 return RDMA_AH_ATTR_TYPE_IB;
3829 }
3830
3831 return RDMA_AH_ATTR_TYPE_UNDEFINED;
3804} 3832}
3805 3833
3806/** 3834/**
@@ -3850,4 +3878,12 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
3850 3878
3851} 3879}
3852 3880
3881/**
3882 * rdma_roce_rescan_device - Rescan all of the network devices in the system
3883 * and add their gids, as needed, to the relevant RoCE devices.
3884 *
3885 * @device: the rdma device
3886 */
3887void rdma_roce_rescan_device(struct ib_device *ibdev);
3888
3853#endif /* IB_VERBS_H */ 3889#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index f68fca296631..2bbb7a67e643 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -114,4 +114,20 @@ static inline u32 opa_get_mcast_base(u32 nr_top_bits)
114 return (be32_to_cpu(OPA_LID_PERMISSIVE) << (32 - nr_top_bits)); 114 return (be32_to_cpu(OPA_LID_PERMISSIVE) << (32 - nr_top_bits));
115} 115}
116 116
117/* Check for a valid unicast LID for non-SM traffic types */
118static inline bool rdma_is_valid_unicast_lid(struct rdma_ah_attr *attr)
119{
120 if (attr->type == RDMA_AH_ATTR_TYPE_IB) {
121 if (!rdma_ah_get_dlid(attr) ||
122 rdma_ah_get_dlid(attr) >=
123 be32_to_cpu(IB_MULTICAST_LID_BASE))
124 return false;
125 } else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) {
126 if (!rdma_ah_get_dlid(attr) ||
127 rdma_ah_get_dlid(attr) >=
128 opa_get_mcast_base(OPA_MCAST_NR))
129 return false;
130 }
131 return true;
132}
117#endif /* OPA_ADDR_H */ 133#endif /* OPA_ADDR_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 3d2eed3c4e75..6538a5cc27b6 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -413,4 +413,23 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
413const void *rdma_consumer_reject_data(struct rdma_cm_id *id, 413const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
414 struct rdma_cm_event *ev, u8 *data_len); 414 struct rdma_cm_event *ev, u8 *data_len);
415 415
416/**
417 * rdma_read_gids - Return the SGID and DGID used for establishing
418 * connection. This can be used after rdma_resolve_addr()
419 * on client side. This can be use on new connection
420 * on server side. This is applicable to IB, RoCE, iWarp.
421 * If cm_id is not bound yet to the RDMA device, it doesn't
422 * copy and SGID or DGID to the given pointers.
423 * @id: Communication identifier whose GIDs are queried.
424 * @sgid: Pointer to SGID where SGID will be returned. It is optional.
425 * @dgid: Pointer to DGID where DGID will be returned. It is optional.
426 * Note: This API should not be used by any new ULPs or new code.
427 * Instead, users interested in querying GIDs should refer to path record
428 * of the rdma_cm_id to query the GIDs.
429 * This API is provided for compatibility for existing users.
430 */
431
432void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
433 union ib_gid *dgid);
434
416#endif /* RDMA_CM_H */ 435#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index 6947a6ba2557..6a69d71a21a5 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -36,17 +36,17 @@
36#include <rdma/rdma_cm.h> 36#include <rdma/rdma_cm.h>
37 37
38/** 38/**
39 * rdma_set_ib_paths - Manually sets the path records used to establish a 39 * rdma_set_ib_path - Manually sets the path record used to establish a
40 * connection. 40 * connection.
41 * @id: Connection identifier associated with the request. 41 * @id: Connection identifier associated with the request.
42 * @path_rec: Reference to the path record 42 * @path_rec: Reference to the path record
43 * 43 *
44 * This call permits a user to specify routing information for rdma_cm_id's 44 * This call permits a user to specify routing information for rdma_cm_id's
45 * bound to Infiniband devices. It is called on the client side of a 45 * bound to InfiniBand devices. It is called on the client side of a
46 * connection and replaces the call to rdma_resolve_route. 46 * connection and replaces the call to rdma_resolve_route.
47 */ 47 */
48int rdma_set_ib_paths(struct rdma_cm_id *id, 48int rdma_set_ib_path(struct rdma_cm_id *id,
49 struct sa_path_rec *path_rec, int num_paths); 49 struct sa_path_rec *path_rec);
50 50
51/* Global qkey for UDP QPs and multicast groups. */ 51/* Global qkey for UDP QPs and multicast groups. */
52#define RDMA_UDP_QKEY 0x01234567 52#define RDMA_UDP_QKEY 0x01234567
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 1ba84a78f1c5..4118324a0310 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -228,13 +228,6 @@ struct rvt_driver_provided {
228 int (*port_callback)(struct ib_device *, u8, struct kobject *); 228 int (*port_callback)(struct ib_device *, u8, struct kobject *);
229 229
230 /* 230 /*
231 * Returns a string to represent the device for which is being
232 * registered. This is primarily used for error and debug messages on
233 * the console.
234 */
235 const char * (*get_card_name)(struct rvt_dev_info *rdi);
236
237 /*
238 * Returns a pointer to the undelying hardware's PCI device. This is 231 * Returns a pointer to the undelying hardware's PCI device. This is
239 * used to display information as to what hardware is being referenced 232 * used to display information as to what hardware is being referenced
240 * in an output message 233 * in an output message
@@ -419,6 +412,30 @@ struct rvt_dev_info {
419 412
420}; 413};
421 414
415/**
416 * rvt_set_ibdev_name - Craft an IB device name from client info
417 * @rdi: pointer to the client rvt_dev_info structure
418 * @name: client specific name
419 * @unit: client specific unit number.
420 */
421static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
422 const char *fmt, const char *name,
423 const int unit)
424{
425 snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
426}
427
428/**
429 * rvt_get_ibdev_name - return the IB name
430 * @rdi: rdmavt device
431 *
432 * Return the registered name of the device.
433 */
434static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
435{
436 return rdi->ibdev.name;
437}
438
422static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd) 439static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
423{ 440{
424 return container_of(ibpd, struct rvt_pd, ibpd); 441 return container_of(ibpd, struct rvt_pd, ibpd);
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
new file mode 100644
index 000000000000..c2d81167c858
--- /dev/null
+++ b/include/rdma/restrack.h
@@ -0,0 +1,157 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/*
3 * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
4 */
5
6#ifndef _RDMA_RESTRACK_H_
7#define _RDMA_RESTRACK_H_
8
9#include <linux/typecheck.h>
10#include <linux/rwsem.h>
11#include <linux/sched.h>
12#include <linux/kref.h>
13#include <linux/completion.h>
14
15/**
16 * enum rdma_restrack_type - HW objects to track
17 */
18enum rdma_restrack_type {
19 /**
20 * @RDMA_RESTRACK_PD: Protection domain (PD)
21 */
22 RDMA_RESTRACK_PD,
23 /**
24 * @RDMA_RESTRACK_CQ: Completion queue (CQ)
25 */
26 RDMA_RESTRACK_CQ,
27 /**
28 * @RDMA_RESTRACK_QP: Queue pair (QP)
29 */
30 RDMA_RESTRACK_QP,
31 /**
32 * @RDMA_RESTRACK_XRCD: XRC domain (XRCD)
33 */
34 RDMA_RESTRACK_XRCD,
35 /**
36 * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
37 */
38 RDMA_RESTRACK_MAX
39};
40
41#define RDMA_RESTRACK_HASH_BITS 8
42/**
43 * struct rdma_restrack_root - main resource tracking management
44 * entity, per-device
45 */
46struct rdma_restrack_root {
47 /*
48 * @rwsem: Read/write lock to protect lists
49 */
50 struct rw_semaphore rwsem;
51 /**
52 * @hash: global database for all resources per-device
53 */
54 DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS);
55};
56
57/**
58 * struct rdma_restrack_entry - metadata per-entry
59 */
60struct rdma_restrack_entry {
61 /**
62 * @valid: validity indicator
63 *
64 * The entries are filled during rdma_restrack_add,
65 * can be attempted to be free during rdma_restrack_del.
66 *
67 * As an example for that, see mlx5 QPs with type MLX5_IB_QPT_HW_GSI
68 */
69 bool valid;
70 /*
71 * @kref: Protect destroy of the resource
72 */
73 struct kref kref;
74 /*
75 * @comp: Signal that all consumers of resource are completed their work
76 */
77 struct completion comp;
78 /**
79 * @task: owner of resource tracking entity
80 *
81 * There are two types of entities: created by user and created
82 * by kernel.
83 *
84 * This is relevant for the entities created by users.
85 * For the entities created by kernel, this pointer will be NULL.
86 */
87 struct task_struct *task;
88 /**
89 * @kern_name: name of owner for the kernel created entities.
90 */
91 const char *kern_name;
92 /**
93 * @node: hash table entry
94 */
95 struct hlist_node node;
96 /**
97 * @type: various objects in restrack database
98 */
99 enum rdma_restrack_type type;
100};
101
102/**
103 * rdma_restrack_init() - initialize resource tracking
104 * @res: resource tracking root
105 */
106void rdma_restrack_init(struct rdma_restrack_root *res);
107
108/**
109 * rdma_restrack_clean() - clean resource tracking
110 * @res: resource tracking root
111 */
112void rdma_restrack_clean(struct rdma_restrack_root *res);
113
114/**
115 * rdma_restrack_count() - the current usage of specific object
116 * @res: resource entry
117 * @type: actual type of object to operate
118 * @ns: PID namespace
119 */
120int rdma_restrack_count(struct rdma_restrack_root *res,
121 enum rdma_restrack_type type,
122 struct pid_namespace *ns);
123
124/**
125 * rdma_restrack_add() - add object to the reource tracking database
126 * @res: resource entry
127 */
128void rdma_restrack_add(struct rdma_restrack_entry *res);
129
130/**
131 * rdma_restrack_del() - delete object from the reource tracking database
132 * @res: resource entry
133 * @type: actual type of object to operate
134 */
135void rdma_restrack_del(struct rdma_restrack_entry *res);
136
137/**
138 * rdma_is_kernel_res() - check the owner of resource
139 * @res: resource entry
140 */
141static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
142{
143 return !res->task;
144}
145
146/**
147 * rdma_restrack_get() - grab to protect resource from release
148 * @res: resource entry
149 */
150int __must_check rdma_restrack_get(struct rdma_restrack_entry *res);
151
152/**
153 * rdma_restrack_put() - relase resource
154 * @res: resource entry
155 */
156int rdma_restrack_put(struct rdma_restrack_entry *res);
157#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 6df6fe0c2198..225ab7783dfd 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -75,16 +75,15 @@ enum phy_event {
75 PHYE_OOB_ERROR, 75 PHYE_OOB_ERROR,
76 PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */ 76 PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */
77 PHYE_RESUME_TIMEOUT, 77 PHYE_RESUME_TIMEOUT,
78 PHYE_SHUTDOWN,
78 PHY_NUM_EVENTS, 79 PHY_NUM_EVENTS,
79}; 80};
80 81
81enum discover_event { 82enum discover_event {
82 DISCE_DISCOVER_DOMAIN = 0U, 83 DISCE_DISCOVER_DOMAIN = 0U,
83 DISCE_REVALIDATE_DOMAIN, 84 DISCE_REVALIDATE_DOMAIN,
84 DISCE_PROBE,
85 DISCE_SUSPEND, 85 DISCE_SUSPEND,
86 DISCE_RESUME, 86 DISCE_RESUME,
87 DISCE_DESTRUCT,
88 DISC_NUM_EVENTS, 87 DISC_NUM_EVENTS,
89}; 88};
90 89
@@ -261,6 +260,7 @@ struct asd_sas_port {
261 struct list_head dev_list; 260 struct list_head dev_list;
262 struct list_head disco_list; 261 struct list_head disco_list;
263 struct list_head destroy_list; 262 struct list_head destroy_list;
263 struct list_head sas_port_del_list;
264 enum sas_linkrate linkrate; 264 enum sas_linkrate linkrate;
265 265
266 struct sas_work work; 266 struct sas_work work;
@@ -292,6 +292,7 @@ struct asd_sas_port {
292struct asd_sas_event { 292struct asd_sas_event {
293 struct sas_work work; 293 struct sas_work work;
294 struct asd_sas_phy *phy; 294 struct asd_sas_phy *phy;
295 int event;
295}; 296};
296 297
297static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work) 298static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
@@ -301,17 +302,24 @@ static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
301 return ev; 302 return ev;
302} 303}
303 304
305static inline void INIT_SAS_EVENT(struct asd_sas_event *ev,
306 void (*fn)(struct work_struct *),
307 struct asd_sas_phy *phy, int event)
308{
309 INIT_SAS_WORK(&ev->work, fn);
310 ev->phy = phy;
311 ev->event = event;
312}
313
314#define SAS_PHY_SHUTDOWN_THRES 1024
315
304/* The phy pretty much is controlled by the LLDD. 316/* The phy pretty much is controlled by the LLDD.
305 * The class only reads those fields. 317 * The class only reads those fields.
306 */ 318 */
307struct asd_sas_phy { 319struct asd_sas_phy {
308/* private: */ 320/* private: */
309 struct asd_sas_event port_events[PORT_NUM_EVENTS]; 321 atomic_t event_nr;
310 struct asd_sas_event phy_events[PHY_NUM_EVENTS]; 322 int in_shutdown;
311
312 unsigned long port_events_pending;
313 unsigned long phy_events_pending;
314
315 int error; 323 int error;
316 int suspended; 324 int suspended;
317 325
@@ -380,6 +388,9 @@ struct sas_ha_struct {
380 struct device *dev; /* should be set */ 388 struct device *dev; /* should be set */
381 struct module *lldd_module; /* should be set */ 389 struct module *lldd_module; /* should be set */
382 390
391 struct workqueue_struct *event_q;
392 struct workqueue_struct *disco_q;
393
383 u8 *sas_addr; /* must be set */ 394 u8 *sas_addr; /* must be set */
384 u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE]; 395 u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE];
385 396
@@ -399,6 +410,8 @@ struct sas_ha_struct {
399 410
400 struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */ 411 struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */
401 struct list_head eh_ata_q; /* scmds to promote from sas to ata eh */ 412 struct list_head eh_ata_q; /* scmds to promote from sas to ata eh */
413
414 int event_thres;
402}; 415};
403 416
404#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata) 417#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata)
@@ -670,6 +683,7 @@ extern int sas_bios_param(struct scsi_device *,
670 sector_t capacity, int *hsc); 683 sector_t capacity, int *hsc);
671extern struct scsi_transport_template * 684extern struct scsi_transport_template *
672sas_domain_attach_transport(struct sas_domain_function_template *); 685sas_domain_attach_transport(struct sas_domain_function_template *);
686extern struct device_attribute dev_attr_phy_event_threshold;
673 687
674int sas_discover_root_expander(struct domain_device *); 688int sas_discover_root_expander(struct domain_device *);
675 689
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 7fb57e905526..d8d4a902a88d 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -58,8 +58,7 @@ struct scsi_pointer {
58/* for scmd->flags */ 58/* for scmd->flags */
59#define SCMD_TAGGED (1 << 0) 59#define SCMD_TAGGED (1 << 0)
60#define SCMD_UNCHECKED_ISA_DMA (1 << 1) 60#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
61#define SCMD_ZONE_WRITE_LOCK (1 << 2) 61#define SCMD_INITIALIZED (1 << 2)
62#define SCMD_INITIALIZED (1 << 3)
63/* flags preserved across unprep / reprep */ 62/* flags preserved across unprep / reprep */
64#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED) 63#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
65 64
@@ -171,7 +170,6 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
171extern void scsi_kunmap_atomic_sg(void *virt); 170extern void scsi_kunmap_atomic_sg(void *virt);
172 171
173extern int scsi_init_io(struct scsi_cmnd *cmd); 172extern int scsi_init_io(struct scsi_cmnd *cmd);
174extern void scsi_initialize_rq(struct request *rq);
175 173
176extern int scsi_dma_map(struct scsi_cmnd *cmd); 174extern int scsi_dma_map(struct scsi_cmnd *cmd);
177extern void scsi_dma_unmap(struct scsi_cmnd *cmd); 175extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a8b7bf879ced..1a1df0d21ee3 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -571,6 +571,8 @@ struct Scsi_Host {
571 struct blk_mq_tag_set tag_set; 571 struct blk_mq_tag_set tag_set;
572 }; 572 };
573 573
574 struct rcu_head rcu;
575
574 atomic_t host_busy; /* commands actually active on low-level */ 576 atomic_t host_busy; /* commands actually active on low-level */
575 atomic_t host_blocked; 577 atomic_t host_blocked;
576 578
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 1df8efb0ee01..c36860111932 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -236,6 +236,7 @@ struct scsi_varlen_cdb_hdr {
236#define UNIT_ATTENTION 0x06 236#define UNIT_ATTENTION 0x06
237#define DATA_PROTECT 0x07 237#define DATA_PROTECT 0x07
238#define BLANK_CHECK 0x08 238#define BLANK_CHECK 0x08
239#define VENDOR_SPECIFIC 0x09
239#define COPY_ABORTED 0x0a 240#define COPY_ABORTED 0x0a
240#define ABORTED_COMMAND 0x0b 241#define ABORTED_COMMAND 0x0b
241#define VOLUME_OVERFLOW 0x0d 242#define VOLUME_OVERFLOW 0x0d
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 8cf30215c177..15da45dc2a5d 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -139,8 +139,8 @@ enum fc_vport_state {
139#define FC_PORTSPEED_50GBIT 0x200 139#define FC_PORTSPEED_50GBIT 0x200
140#define FC_PORTSPEED_100GBIT 0x400 140#define FC_PORTSPEED_100GBIT 0x400
141#define FC_PORTSPEED_25GBIT 0x800 141#define FC_PORTSPEED_25GBIT 0x800
142#define FC_PORTSPEED_64BIT 0x1000 142#define FC_PORTSPEED_64GBIT 0x1000
143#define FC_PORTSPEED_128BIT 0x2000 143#define FC_PORTSPEED_128GBIT 0x2000
144#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */ 144#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
145 145
146/* 146/*
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 62895b405933..05ec927a3c72 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -156,6 +156,7 @@ struct sas_port {
156 156
157 struct mutex phy_list_mutex; 157 struct mutex phy_list_mutex;
158 struct list_head phy_list; 158 struct list_head phy_list;
159 struct list_head del_list; /* libsas only */
159}; 160};
160 161
161#define dev_to_sas_port(d) \ 162#define dev_to_sas_port(d) \
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 5be834de491a..c16a3c9a4d9b 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -129,6 +129,23 @@ struct srp_login_req {
129 u8 target_port_id[16]; 129 u8 target_port_id[16];
130}; 130};
131 131
132/**
133 * struct srp_login_req_rdma - RDMA/CM login parameters.
134 *
135 * RDMA/CM over InfiniBand can only carry 92 - 36 = 56 bytes of private
136 * data. The %srp_login_req_rdma structure contains the same information as
137 * %srp_login_req but with the reserved data removed.
138 */
139struct srp_login_req_rdma {
140 u64 tag;
141 __be16 req_buf_fmt;
142 u8 req_flags;
143 u8 opcode;
144 __be32 req_it_iu_len;
145 u8 initiator_port_id[16];
146 u8 target_port_id[16];
147};
148
132/* 149/*
133 * The SRP spec defines the size of the LOGIN_RSP structure to be 52 150 * The SRP spec defines the size of the LOGIN_RSP structure to be 52
134 * bytes, so it needs to be packed to avoid having it padded to 56 151 * bytes, so it needs to be packed to avoid having it padded to 56
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 44202ff897fd..233bae954970 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -51,6 +51,12 @@ struct tegra_smmu_swgroup {
51 unsigned int reg; 51 unsigned int reg;
52}; 52};
53 53
54struct tegra_smmu_group_soc {
55 const char *name;
56 const unsigned int *swgroups;
57 unsigned int num_swgroups;
58};
59
54struct tegra_smmu_soc { 60struct tegra_smmu_soc {
55 const struct tegra_mc_client *clients; 61 const struct tegra_mc_client *clients;
56 unsigned int num_clients; 62 unsigned int num_clients;
@@ -58,6 +64,9 @@ struct tegra_smmu_soc {
58 const struct tegra_smmu_swgroup *swgroups; 64 const struct tegra_smmu_swgroup *swgroups;
59 unsigned int num_swgroups; 65 unsigned int num_swgroups;
60 66
67 const struct tegra_smmu_group_soc *groups;
68 unsigned int num_groups;
69
61 bool supports_round_robin_arbitration; 70 bool supports_round_robin_arbitration;
62 bool supports_request_limit; 71 bool supports_request_limit;
63 72
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 1c3982bc558f..c32bf91c23e6 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -83,6 +83,7 @@ enum tegra_io_pad {
83 TEGRA_IO_PAD_BB, 83 TEGRA_IO_PAD_BB,
84 TEGRA_IO_PAD_CAM, 84 TEGRA_IO_PAD_CAM,
85 TEGRA_IO_PAD_COMP, 85 TEGRA_IO_PAD_COMP,
86 TEGRA_IO_PAD_CONN,
86 TEGRA_IO_PAD_CSIA, 87 TEGRA_IO_PAD_CSIA,
87 TEGRA_IO_PAD_CSIB, 88 TEGRA_IO_PAD_CSIB,
88 TEGRA_IO_PAD_CSIC, 89 TEGRA_IO_PAD_CSIC,
@@ -92,31 +93,42 @@ enum tegra_io_pad {
92 TEGRA_IO_PAD_DBG, 93 TEGRA_IO_PAD_DBG,
93 TEGRA_IO_PAD_DEBUG_NONAO, 94 TEGRA_IO_PAD_DEBUG_NONAO,
94 TEGRA_IO_PAD_DMIC, 95 TEGRA_IO_PAD_DMIC,
96 TEGRA_IO_PAD_DMIC_HV,
95 TEGRA_IO_PAD_DP, 97 TEGRA_IO_PAD_DP,
96 TEGRA_IO_PAD_DSI, 98 TEGRA_IO_PAD_DSI,
97 TEGRA_IO_PAD_DSIB, 99 TEGRA_IO_PAD_DSIB,
98 TEGRA_IO_PAD_DSIC, 100 TEGRA_IO_PAD_DSIC,
99 TEGRA_IO_PAD_DSID, 101 TEGRA_IO_PAD_DSID,
102 TEGRA_IO_PAD_EDP,
100 TEGRA_IO_PAD_EMMC, 103 TEGRA_IO_PAD_EMMC,
101 TEGRA_IO_PAD_EMMC2, 104 TEGRA_IO_PAD_EMMC2,
102 TEGRA_IO_PAD_GPIO, 105 TEGRA_IO_PAD_GPIO,
103 TEGRA_IO_PAD_HDMI, 106 TEGRA_IO_PAD_HDMI,
107 TEGRA_IO_PAD_HDMI_DP0,
108 TEGRA_IO_PAD_HDMI_DP1,
104 TEGRA_IO_PAD_HSIC, 109 TEGRA_IO_PAD_HSIC,
105 TEGRA_IO_PAD_HV, 110 TEGRA_IO_PAD_HV,
106 TEGRA_IO_PAD_LVDS, 111 TEGRA_IO_PAD_LVDS,
107 TEGRA_IO_PAD_MIPI_BIAS, 112 TEGRA_IO_PAD_MIPI_BIAS,
108 TEGRA_IO_PAD_NAND, 113 TEGRA_IO_PAD_NAND,
109 TEGRA_IO_PAD_PEX_BIAS, 114 TEGRA_IO_PAD_PEX_BIAS,
115 TEGRA_IO_PAD_PEX_CLK_BIAS,
110 TEGRA_IO_PAD_PEX_CLK1, 116 TEGRA_IO_PAD_PEX_CLK1,
111 TEGRA_IO_PAD_PEX_CLK2, 117 TEGRA_IO_PAD_PEX_CLK2,
118 TEGRA_IO_PAD_PEX_CLK3,
112 TEGRA_IO_PAD_PEX_CNTRL, 119 TEGRA_IO_PAD_PEX_CNTRL,
113 TEGRA_IO_PAD_SDMMC1, 120 TEGRA_IO_PAD_SDMMC1,
121 TEGRA_IO_PAD_SDMMC1_HV,
122 TEGRA_IO_PAD_SDMMC2,
123 TEGRA_IO_PAD_SDMMC2_HV,
114 TEGRA_IO_PAD_SDMMC3, 124 TEGRA_IO_PAD_SDMMC3,
125 TEGRA_IO_PAD_SDMMC3_HV,
115 TEGRA_IO_PAD_SDMMC4, 126 TEGRA_IO_PAD_SDMMC4,
116 TEGRA_IO_PAD_SPI, 127 TEGRA_IO_PAD_SPI,
117 TEGRA_IO_PAD_SPI_HV, 128 TEGRA_IO_PAD_SPI_HV,
118 TEGRA_IO_PAD_SYS_DDC, 129 TEGRA_IO_PAD_SYS_DDC,
119 TEGRA_IO_PAD_UART, 130 TEGRA_IO_PAD_UART,
131 TEGRA_IO_PAD_UFS,
120 TEGRA_IO_PAD_USB0, 132 TEGRA_IO_PAD_USB0,
121 TEGRA_IO_PAD_USB1, 133 TEGRA_IO_PAD_USB1,
122 TEGRA_IO_PAD_USB2, 134 TEGRA_IO_PAD_USB2,
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
index ab9fcb2f97f0..afeca593188a 100644
--- a/include/sound/hwdep.h
+++ b/include/sound/hwdep.h
@@ -37,7 +37,7 @@ struct snd_hwdep_ops {
37 long count, loff_t *offset); 37 long count, loff_t *offset);
38 int (*open)(struct snd_hwdep *hw, struct file * file); 38 int (*open)(struct snd_hwdep *hw, struct file * file);
39 int (*release)(struct snd_hwdep *hw, struct file * file); 39 int (*release)(struct snd_hwdep *hw, struct file * file);
40 unsigned int (*poll)(struct snd_hwdep *hw, struct file *file, 40 __poll_t (*poll)(struct snd_hwdep *hw, struct file *file,
41 poll_table *wait); 41 poll_table *wait);
42 int (*ioctl)(struct snd_hwdep *hw, struct file *file, 42 int (*ioctl)(struct snd_hwdep *hw, struct file *file,
43 unsigned int cmd, unsigned long arg); 43 unsigned int cmd, unsigned long arg);
diff --git a/include/sound/info.h b/include/sound/info.h
index 67390ee846aa..becdf66d2825 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -62,7 +62,7 @@ struct snd_info_entry_ops {
62 loff_t (*llseek)(struct snd_info_entry *entry, 62 loff_t (*llseek)(struct snd_info_entry *entry,
63 void *file_private_data, struct file *file, 63 void *file_private_data, struct file *file,
64 loff_t offset, int orig); 64 loff_t offset, int orig);
65 unsigned int (*poll)(struct snd_info_entry *entry, 65 __poll_t (*poll)(struct snd_info_entry *entry,
66 void *file_private_data, struct file *file, 66 void *file_private_data, struct file *file,
67 poll_table *wait); 67 poll_table *wait);
68 int (*ioctl)(struct snd_info_entry *entry, void *file_private_data, 68 int (*ioctl)(struct snd_info_entry *entry, void *file_private_data,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index b655d987fbe7..747fd583b9dc 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -841,7 +841,7 @@ struct snd_soc_component_driver {
841 /* bits */ 841 /* bits */
842 unsigned int idle_bias_on:1; 842 unsigned int idle_bias_on:1;
843 unsigned int suspend_bias_off:1; 843 unsigned int suspend_bias_off:1;
844 unsigned int pmdown_time:1; /* care pmdown_time at stop */ 844 unsigned int use_pmdown_time:1; /* care pmdown_time at stop */
845 unsigned int endianness:1; 845 unsigned int endianness:1;
846 unsigned int non_legacy_dai_naming:1; 846 unsigned int non_legacy_dai_naming:1;
847}; 847};
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 1bee3e7fdf32..8ea966448b58 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -82,8 +82,8 @@ TRACE_EVENT(fdb_delete,
82 TP_fast_assign( 82 TP_fast_assign(
83 __assign_str(br_dev, br->dev->name); 83 __assign_str(br_dev, br->dev->name);
84 __assign_str(dev, f->dst ? f->dst->dev->name : "null"); 84 __assign_str(dev, f->dst ? f->dst->dev->name : "null");
85 memcpy(__entry->addr, f->addr.addr, ETH_ALEN); 85 memcpy(__entry->addr, f->key.addr.addr, ETH_ALEN);
86 __entry->vid = f->vlan_id; 86 __entry->vid = f->key.vlan_id;
87 ), 87 ),
88 88
89 TP_printk("br_dev %s dev %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u", 89 TP_printk("br_dev %s dev %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u",
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4342a329821f..c3ac5ec86519 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -193,7 +193,6 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
193 __print_flags(flag, "|", \ 193 __print_flags(flag, "|", \
194 { (1 << EXTENT_FLAG_PINNED), "PINNED" },\ 194 { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
195 { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\ 195 { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
196 { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\
197 { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\ 196 { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
198 { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\ 197 { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
199 { (1 << EXTENT_FLAG_FILLING), "FILLING" },\ 198 { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 8f8dd42fa57b..06c87f9f720c 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -147,7 +147,8 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
147 { CP_NO_SPC_ROLL, "no space roll forward" }, \ 147 { CP_NO_SPC_ROLL, "no space roll forward" }, \
148 { CP_NODE_NEED_CP, "node needs cp" }, \ 148 { CP_NODE_NEED_CP, "node needs cp" }, \
149 { CP_FASTBOOT_MODE, "fastboot mode" }, \ 149 { CP_FASTBOOT_MODE, "fastboot mode" }, \
150 { CP_SPEC_LOG_NUM, "log type is 2" }) 150 { CP_SPEC_LOG_NUM, "log type is 2" }, \
151 { CP_RECOVER_DIR, "dir needs recovery" })
151 152
152struct victim_sel_policy; 153struct victim_sel_policy;
153struct f2fs_map_blocks; 154struct f2fs_map_blocks;
diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h
new file mode 100644
index 000000000000..3930119cab08
--- /dev/null
+++ b/include/trace/events/net_probe_common.h
@@ -0,0 +1,44 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#if !defined(_TRACE_NET_PROBE_COMMON_H) || defined(TRACE_HEADER_MULTI_READ)
4#define _TRACE_NET_PROBE_COMMON_H
5
6#define TP_STORE_ADDR_PORTS_V4(__entry, inet, sk) \
7 do { \
8 struct sockaddr_in *v4 = (void *)__entry->saddr; \
9 \
10 v4->sin_family = AF_INET; \
11 v4->sin_port = inet->inet_sport; \
12 v4->sin_addr.s_addr = inet->inet_saddr; \
13 v4 = (void *)__entry->daddr; \
14 v4->sin_family = AF_INET; \
15 v4->sin_port = inet->inet_dport; \
16 v4->sin_addr.s_addr = inet->inet_daddr; \
17 } while (0)
18
19#if IS_ENABLED(CONFIG_IPV6)
20
21#define TP_STORE_ADDR_PORTS(__entry, inet, sk) \
22 do { \
23 if (sk->sk_family == AF_INET6) { \
24 struct sockaddr_in6 *v6 = (void *)__entry->saddr; \
25 \
26 v6->sin6_family = AF_INET6; \
27 v6->sin6_port = inet->inet_sport; \
28 v6->sin6_addr = inet6_sk(sk)->saddr; \
29 v6 = (void *)__entry->daddr; \
30 v6->sin6_family = AF_INET6; \
31 v6->sin6_port = inet->inet_dport; \
32 v6->sin6_addr = sk->sk_v6_daddr; \
33 } else \
34 TP_STORE_ADDR_PORTS_V4(__entry, inet, sk); \
35 } while (0)
36
37#else
38
39#define TP_STORE_ADDR_PORTS(__entry, inet, sk) \
40 TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);
41
42#endif
43
44#endif
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 59d40c454aa0..0b50fda80db0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -243,6 +243,7 @@ TRACE_EVENT(rcu_exp_funnel_lock,
243 __entry->grphi, __entry->gpevent) 243 __entry->grphi, __entry->gpevent)
244); 244);
245 245
246#ifdef CONFIG_RCU_NOCB_CPU
246/* 247/*
247 * Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended 248 * Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
248 * to assist debugging of these handoffs. 249 * to assist debugging of these handoffs.
@@ -285,6 +286,7 @@ TRACE_EVENT(rcu_nocb_wake,
285 286
286 TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason) 287 TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
287); 288);
289#endif
288 290
289/* 291/*
290 * Tracepoint for tasks blocking within preemptible-RCU read-side 292 * Tracepoint for tasks blocking within preemptible-RCU read-side
@@ -421,76 +423,40 @@ TRACE_EVENT(rcu_fqs,
421 423
422/* 424/*
423 * Tracepoint for dyntick-idle entry/exit events. These take a string 425 * Tracepoint for dyntick-idle entry/exit events. These take a string
424 * as argument: "Start" for entering dyntick-idle mode, "End" for 426 * as argument: "Start" for entering dyntick-idle mode, "Startirq" for
425 * leaving it, "--=" for events moving towards idle, and "++=" for events 427 * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it
426 * moving away from idle. "Error on entry: not idle task" and "Error on 428 * to irq/NMI, "--=" for events moving towards idle, and "++=" for events
427 * exit: not idle task" indicate that a non-idle task is erroneously 429 * moving away from idle.
428 * toying with the idle loop.
429 * 430 *
430 * These events also take a pair of numbers, which indicate the nesting 431 * These events also take a pair of numbers, which indicate the nesting
431 * depth before and after the event of interest. Note that task-related 432 * depth before and after the event of interest, and a third number that is
432 * events use the upper bits of each number, while interrupt-related 433 * the ->dynticks counter. Note that task-related and interrupt-related
433 * events use the lower bits. 434 * events use two separate counters, and that the "++=" and "--=" events
435 * for irq/NMI will change the counter by two, otherwise by one.
434 */ 436 */
435TRACE_EVENT(rcu_dyntick, 437TRACE_EVENT(rcu_dyntick,
436 438
437 TP_PROTO(const char *polarity, long long oldnesting, long long newnesting), 439 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
438 440
439 TP_ARGS(polarity, oldnesting, newnesting), 441 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
440 442
441 TP_STRUCT__entry( 443 TP_STRUCT__entry(
442 __field(const char *, polarity) 444 __field(const char *, polarity)
443 __field(long long, oldnesting) 445 __field(long, oldnesting)
444 __field(long long, newnesting) 446 __field(long, newnesting)
447 __field(int, dynticks)
445 ), 448 ),
446 449
447 TP_fast_assign( 450 TP_fast_assign(
448 __entry->polarity = polarity; 451 __entry->polarity = polarity;
449 __entry->oldnesting = oldnesting; 452 __entry->oldnesting = oldnesting;
450 __entry->newnesting = newnesting; 453 __entry->newnesting = newnesting;
454 __entry->dynticks = atomic_read(&dynticks);
451 ), 455 ),
452 456
453 TP_printk("%s %llx %llx", __entry->polarity, 457 TP_printk("%s %lx %lx %#3x", __entry->polarity,
454 __entry->oldnesting, __entry->newnesting) 458 __entry->oldnesting, __entry->newnesting,
455); 459 __entry->dynticks & 0xfff)
456
457/*
458 * Tracepoint for RCU preparation for idle, the goal being to get RCU
459 * processing done so that the current CPU can shut off its scheduling
460 * clock and enter dyntick-idle mode. One way to accomplish this is
461 * to drain all RCU callbacks from this CPU, and the other is to have
462 * done everything RCU requires for the current grace period. In this
463 * latter case, the CPU will be awakened at the end of the current grace
464 * period in order to process the remainder of its callbacks.
465 *
466 * These tracepoints take a string as argument:
467 *
468 * "No callbacks": Nothing to do, no callbacks on this CPU.
469 * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
470 * "Begin holdoff": Attempt failed, don't retry until next jiffy.
471 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
472 * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
473 * "More callbacks": Still more callbacks, try again to clear them out.
474 * "Callbacks drained": All callbacks processed, off to dyntick idle!
475 * "Timer": Timer fired to cause CPU to continue processing callbacks.
476 * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
477 * "Cleanup after idle": Idle exited, timer canceled.
478 */
479TRACE_EVENT(rcu_prep_idle,
480
481 TP_PROTO(const char *reason),
482
483 TP_ARGS(reason),
484
485 TP_STRUCT__entry(
486 __field(const char *, reason)
487 ),
488
489 TP_fast_assign(
490 __entry->reason = reason;
491 ),
492
493 TP_printk("%s", __entry->reason)
494); 460);
495 461
496/* 462/*
@@ -799,8 +765,7 @@ TRACE_EVENT(rcu_barrier,
799 grplo, grphi, gp_tasks) do { } \ 765 grplo, grphi, gp_tasks) do { } \
800 while (0) 766 while (0)
801#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) 767#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
802#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0) 768#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
803#define trace_rcu_prep_idle(reason) do { } while (0)
804#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0) 769#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
805#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \ 770#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
806 do { } while (0) 771 do { } while (0)
diff --git a/include/trace/events/rdma.h b/include/trace/events/rdma.h
new file mode 100644
index 000000000000..aa19afc73a4e
--- /dev/null
+++ b/include/trace/events/rdma.h
@@ -0,0 +1,129 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2017 Oracle. All rights reserved.
4 */
5
6/*
7 * enum ib_event_type, from include/rdma/ib_verbs.h
8 */
9
10#define IB_EVENT_LIST \
11 ib_event(CQ_ERR) \
12 ib_event(QP_FATAL) \
13 ib_event(QP_REQ_ERR) \
14 ib_event(QP_ACCESS_ERR) \
15 ib_event(COMM_EST) \
16 ib_event(SQ_DRAINED) \
17 ib_event(PATH_MIG) \
18 ib_event(PATH_MIG_ERR) \
19 ib_event(DEVICE_FATAL) \
20 ib_event(PORT_ACTIVE) \
21 ib_event(PORT_ERR) \
22 ib_event(LID_CHANGE) \
23 ib_event(PKEY_CHANGE) \
24 ib_event(SM_CHANGE) \
25 ib_event(SRQ_ERR) \
26 ib_event(SRQ_LIMIT_REACHED) \
27 ib_event(QP_LAST_WQE_REACHED) \
28 ib_event(CLIENT_REREGISTER) \
29 ib_event(GID_CHANGE) \
30 ib_event_end(WQ_FATAL)
31
32#undef ib_event
33#undef ib_event_end
34
35#define ib_event(x) TRACE_DEFINE_ENUM(IB_EVENT_##x);
36#define ib_event_end(x) TRACE_DEFINE_ENUM(IB_EVENT_##x);
37
38IB_EVENT_LIST
39
40#undef ib_event
41#undef ib_event_end
42
43#define ib_event(x) { IB_EVENT_##x, #x },
44#define ib_event_end(x) { IB_EVENT_##x, #x }
45
46#define rdma_show_ib_event(x) \
47 __print_symbolic(x, IB_EVENT_LIST)
48
49/*
50 * enum ib_wc_status type, from include/rdma/ib_verbs.h
51 */
52#define IB_WC_STATUS_LIST \
53 ib_wc_status(SUCCESS) \
54 ib_wc_status(LOC_LEN_ERR) \
55 ib_wc_status(LOC_QP_OP_ERR) \
56 ib_wc_status(LOC_EEC_OP_ERR) \
57 ib_wc_status(LOC_PROT_ERR) \
58 ib_wc_status(WR_FLUSH_ERR) \
59 ib_wc_status(MW_BIND_ERR) \
60 ib_wc_status(BAD_RESP_ERR) \
61 ib_wc_status(LOC_ACCESS_ERR) \
62 ib_wc_status(REM_INV_REQ_ERR) \
63 ib_wc_status(REM_ACCESS_ERR) \
64 ib_wc_status(REM_OP_ERR) \
65 ib_wc_status(RETRY_EXC_ERR) \
66 ib_wc_status(RNR_RETRY_EXC_ERR) \
67 ib_wc_status(LOC_RDD_VIOL_ERR) \
68 ib_wc_status(REM_INV_RD_REQ_ERR) \
69 ib_wc_status(REM_ABORT_ERR) \
70 ib_wc_status(INV_EECN_ERR) \
71 ib_wc_status(INV_EEC_STATE_ERR) \
72 ib_wc_status(FATAL_ERR) \
73 ib_wc_status(RESP_TIMEOUT_ERR) \
74 ib_wc_status_end(GENERAL_ERR)
75
76#undef ib_wc_status
77#undef ib_wc_status_end
78
79#define ib_wc_status(x) TRACE_DEFINE_ENUM(IB_WC_##x);
80#define ib_wc_status_end(x) TRACE_DEFINE_ENUM(IB_WC_##x);
81
82IB_WC_STATUS_LIST
83
84#undef ib_wc_status
85#undef ib_wc_status_end
86
87#define ib_wc_status(x) { IB_WC_##x, #x },
88#define ib_wc_status_end(x) { IB_WC_##x, #x }
89
90#define rdma_show_wc_status(x) \
91 __print_symbolic(x, IB_WC_STATUS_LIST)
92
93/*
94 * enum rdma_cm_event_type, from include/rdma/rdma_cm.h
95 */
96#define RDMA_CM_EVENT_LIST \
97 rdma_cm_event(ADDR_RESOLVED) \
98 rdma_cm_event(ADDR_ERROR) \
99 rdma_cm_event(ROUTE_RESOLVED) \
100 rdma_cm_event(ROUTE_ERROR) \
101 rdma_cm_event(CONNECT_REQUEST) \
102 rdma_cm_event(CONNECT_RESPONSE) \
103 rdma_cm_event(CONNECT_ERROR) \
104 rdma_cm_event(UNREACHABLE) \
105 rdma_cm_event(REJECTED) \
106 rdma_cm_event(ESTABLISHED) \
107 rdma_cm_event(DISCONNECTED) \
108 rdma_cm_event(DEVICE_REMOVAL) \
109 rdma_cm_event(MULTICAST_JOIN) \
110 rdma_cm_event(MULTICAST_ERROR) \
111 rdma_cm_event(ADDR_CHANGE) \
112 rdma_cm_event_end(TIMEWAIT_EXIT)
113
114#undef rdma_cm_event
115#undef rdma_cm_event_end
116
117#define rdma_cm_event(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
118#define rdma_cm_event_end(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
119
120RDMA_CM_EVENT_LIST
121
122#undef rdma_cm_event
123#undef rdma_cm_event_end
124
125#define rdma_cm_event(x) { RDMA_CM_EVENT_##x, #x },
126#define rdma_cm_event_end(x) { RDMA_CM_EVENT_##x, #x }
127
128#define rdma_show_cm_event(x) \
129 __print_symbolic(x, RDMA_CM_EVENT_LIST)
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
new file mode 100644
index 000000000000..50ed3f8bf534
--- /dev/null
+++ b/include/trace/events/rpcrdma.h
@@ -0,0 +1,890 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2017 Oracle. All rights reserved.
4 */
5#undef TRACE_SYSTEM
6#define TRACE_SYSTEM rpcrdma
7
8#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
9#define _TRACE_RPCRDMA_H
10
11#include <linux/tracepoint.h>
12#include <trace/events/rdma.h>
13
14/**
15 ** Event classes
16 **/
17
18DECLARE_EVENT_CLASS(xprtrdma_reply_event,
19 TP_PROTO(
20 const struct rpcrdma_rep *rep
21 ),
22
23 TP_ARGS(rep),
24
25 TP_STRUCT__entry(
26 __field(const void *, rep)
27 __field(const void *, r_xprt)
28 __field(u32, xid)
29 __field(u32, version)
30 __field(u32, proc)
31 ),
32
33 TP_fast_assign(
34 __entry->rep = rep;
35 __entry->r_xprt = rep->rr_rxprt;
36 __entry->xid = be32_to_cpu(rep->rr_xid);
37 __entry->version = be32_to_cpu(rep->rr_vers);
38 __entry->proc = be32_to_cpu(rep->rr_proc);
39 ),
40
41 TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
42 __entry->r_xprt, __entry->xid, __entry->rep,
43 __entry->version, __entry->proc
44 )
45);
46
47#define DEFINE_REPLY_EVENT(name) \
48 DEFINE_EVENT(xprtrdma_reply_event, name, \
49 TP_PROTO( \
50 const struct rpcrdma_rep *rep \
51 ), \
52 TP_ARGS(rep))
53
54DECLARE_EVENT_CLASS(xprtrdma_rxprt,
55 TP_PROTO(
56 const struct rpcrdma_xprt *r_xprt
57 ),
58
59 TP_ARGS(r_xprt),
60
61 TP_STRUCT__entry(
62 __field(const void *, r_xprt)
63 __string(addr, rpcrdma_addrstr(r_xprt))
64 __string(port, rpcrdma_portstr(r_xprt))
65 ),
66
67 TP_fast_assign(
68 __entry->r_xprt = r_xprt;
69 __assign_str(addr, rpcrdma_addrstr(r_xprt));
70 __assign_str(port, rpcrdma_portstr(r_xprt));
71 ),
72
73 TP_printk("peer=[%s]:%s r_xprt=%p",
74 __get_str(addr), __get_str(port), __entry->r_xprt
75 )
76);
77
78#define DEFINE_RXPRT_EVENT(name) \
79 DEFINE_EVENT(xprtrdma_rxprt, name, \
80 TP_PROTO( \
81 const struct rpcrdma_xprt *r_xprt \
82 ), \
83 TP_ARGS(r_xprt))
84
85DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
86 TP_PROTO(
87 const struct rpc_task *task,
88 unsigned int pos,
89 struct rpcrdma_mr *mr,
90 int nsegs
91 ),
92
93 TP_ARGS(task, pos, mr, nsegs),
94
95 TP_STRUCT__entry(
96 __field(unsigned int, task_id)
97 __field(unsigned int, client_id)
98 __field(const void *, mr)
99 __field(unsigned int, pos)
100 __field(int, nents)
101 __field(u32, handle)
102 __field(u32, length)
103 __field(u64, offset)
104 __field(int, nsegs)
105 ),
106
107 TP_fast_assign(
108 __entry->task_id = task->tk_pid;
109 __entry->client_id = task->tk_client->cl_clid;
110 __entry->mr = mr;
111 __entry->pos = pos;
112 __entry->nents = mr->mr_nents;
113 __entry->handle = mr->mr_handle;
114 __entry->length = mr->mr_length;
115 __entry->offset = mr->mr_offset;
116 __entry->nsegs = nsegs;
117 ),
118
119 TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
120 __entry->task_id, __entry->client_id, __entry->mr,
121 __entry->pos, __entry->length,
122 (unsigned long long)__entry->offset, __entry->handle,
123 __entry->nents < __entry->nsegs ? "more" : "last"
124 )
125);
126
127#define DEFINE_RDCH_EVENT(name) \
128 DEFINE_EVENT(xprtrdma_rdch_event, name, \
129 TP_PROTO( \
130 const struct rpc_task *task, \
131 unsigned int pos, \
132 struct rpcrdma_mr *mr, \
133 int nsegs \
134 ), \
135 TP_ARGS(task, pos, mr, nsegs))
136
137DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
138 TP_PROTO(
139 const struct rpc_task *task,
140 struct rpcrdma_mr *mr,
141 int nsegs
142 ),
143
144 TP_ARGS(task, mr, nsegs),
145
146 TP_STRUCT__entry(
147 __field(unsigned int, task_id)
148 __field(unsigned int, client_id)
149 __field(const void *, mr)
150 __field(int, nents)
151 __field(u32, handle)
152 __field(u32, length)
153 __field(u64, offset)
154 __field(int, nsegs)
155 ),
156
157 TP_fast_assign(
158 __entry->task_id = task->tk_pid;
159 __entry->client_id = task->tk_client->cl_clid;
160 __entry->mr = mr;
161 __entry->nents = mr->mr_nents;
162 __entry->handle = mr->mr_handle;
163 __entry->length = mr->mr_length;
164 __entry->offset = mr->mr_offset;
165 __entry->nsegs = nsegs;
166 ),
167
168 TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
169 __entry->task_id, __entry->client_id, __entry->mr,
170 __entry->length, (unsigned long long)__entry->offset,
171 __entry->handle,
172 __entry->nents < __entry->nsegs ? "more" : "last"
173 )
174);
175
176#define DEFINE_WRCH_EVENT(name) \
177 DEFINE_EVENT(xprtrdma_wrch_event, name, \
178 TP_PROTO( \
179 const struct rpc_task *task, \
180 struct rpcrdma_mr *mr, \
181 int nsegs \
182 ), \
183 TP_ARGS(task, mr, nsegs))
184
185TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
186TRACE_DEFINE_ENUM(FRWR_IS_VALID);
187TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
188TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
189
190#define xprtrdma_show_frwr_state(x) \
191 __print_symbolic(x, \
192 { FRWR_IS_INVALID, "INVALID" }, \
193 { FRWR_IS_VALID, "VALID" }, \
194 { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
195 { FRWR_FLUSHED_LI, "FLUSHED_LI" })
196
197DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
198 TP_PROTO(
199 const struct ib_wc *wc,
200 const struct rpcrdma_frwr *frwr
201 ),
202
203 TP_ARGS(wc, frwr),
204
205 TP_STRUCT__entry(
206 __field(const void *, mr)
207 __field(unsigned int, state)
208 __field(unsigned int, status)
209 __field(unsigned int, vendor_err)
210 ),
211
212 TP_fast_assign(
213 __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
214 __entry->state = frwr->fr_state;
215 __entry->status = wc->status;
216 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
217 ),
218
219 TP_printk(
220 "mr=%p state=%s: %s (%u/0x%x)",
221 __entry->mr, xprtrdma_show_frwr_state(__entry->state),
222 rdma_show_wc_status(__entry->status),
223 __entry->status, __entry->vendor_err
224 )
225);
226
227#define DEFINE_FRWR_DONE_EVENT(name) \
228 DEFINE_EVENT(xprtrdma_frwr_done, name, \
229 TP_PROTO( \
230 const struct ib_wc *wc, \
231 const struct rpcrdma_frwr *frwr \
232 ), \
233 TP_ARGS(wc, frwr))
234
235DECLARE_EVENT_CLASS(xprtrdma_mr,
236 TP_PROTO(
237 const struct rpcrdma_mr *mr
238 ),
239
240 TP_ARGS(mr),
241
242 TP_STRUCT__entry(
243 __field(const void *, mr)
244 __field(u32, handle)
245 __field(u32, length)
246 __field(u64, offset)
247 ),
248
249 TP_fast_assign(
250 __entry->mr = mr;
251 __entry->handle = mr->mr_handle;
252 __entry->length = mr->mr_length;
253 __entry->offset = mr->mr_offset;
254 ),
255
256 TP_printk("mr=%p %u@0x%016llx:0x%08x",
257 __entry->mr, __entry->length,
258 (unsigned long long)__entry->offset,
259 __entry->handle
260 )
261);
262
263#define DEFINE_MR_EVENT(name) \
264 DEFINE_EVENT(xprtrdma_mr, name, \
265 TP_PROTO( \
266 const struct rpcrdma_mr *mr \
267 ), \
268 TP_ARGS(mr))
269
270DECLARE_EVENT_CLASS(xprtrdma_cb_event,
271 TP_PROTO(
272 const struct rpc_rqst *rqst
273 ),
274
275 TP_ARGS(rqst),
276
277 TP_STRUCT__entry(
278 __field(const void *, rqst)
279 __field(const void *, rep)
280 __field(const void *, req)
281 __field(u32, xid)
282 ),
283
284 TP_fast_assign(
285 __entry->rqst = rqst;
286 __entry->req = rpcr_to_rdmar(rqst);
287 __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
288 __entry->xid = be32_to_cpu(rqst->rq_xid);
289 ),
290
291 TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
292 __entry->xid, __entry->rqst, __entry->req, __entry->rep
293 )
294);
295
296#define DEFINE_CB_EVENT(name) \
297 DEFINE_EVENT(xprtrdma_cb_event, name, \
298 TP_PROTO( \
299 const struct rpc_rqst *rqst \
300 ), \
301 TP_ARGS(rqst))
302
303/**
304 ** Connection events
305 **/
306
307TRACE_EVENT(xprtrdma_conn_upcall,
308 TP_PROTO(
309 const struct rpcrdma_xprt *r_xprt,
310 struct rdma_cm_event *event
311 ),
312
313 TP_ARGS(r_xprt, event),
314
315 TP_STRUCT__entry(
316 __field(const void *, r_xprt)
317 __field(unsigned int, event)
318 __field(int, status)
319 __string(addr, rpcrdma_addrstr(r_xprt))
320 __string(port, rpcrdma_portstr(r_xprt))
321 ),
322
323 TP_fast_assign(
324 __entry->r_xprt = r_xprt;
325 __entry->event = event->event;
326 __entry->status = event->status;
327 __assign_str(addr, rpcrdma_addrstr(r_xprt));
328 __assign_str(port, rpcrdma_portstr(r_xprt));
329 ),
330
331 TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
332 __get_str(addr), __get_str(port),
333 __entry->r_xprt, rdma_show_cm_event(__entry->event),
334 __entry->event, __entry->status
335 )
336);
337
338TRACE_EVENT(xprtrdma_disconnect,
339 TP_PROTO(
340 const struct rpcrdma_xprt *r_xprt,
341 int status
342 ),
343
344 TP_ARGS(r_xprt, status),
345
346 TP_STRUCT__entry(
347 __field(const void *, r_xprt)
348 __field(int, status)
349 __field(int, connected)
350 __string(addr, rpcrdma_addrstr(r_xprt))
351 __string(port, rpcrdma_portstr(r_xprt))
352 ),
353
354 TP_fast_assign(
355 __entry->r_xprt = r_xprt;
356 __entry->status = status;
357 __entry->connected = r_xprt->rx_ep.rep_connected;
358 __assign_str(addr, rpcrdma_addrstr(r_xprt));
359 __assign_str(port, rpcrdma_portstr(r_xprt));
360 ),
361
362 TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
363 __get_str(addr), __get_str(port),
364 __entry->r_xprt, __entry->status,
365 __entry->connected == 1 ? "still " : "dis"
366 )
367);
368
369DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
370DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
371DEFINE_RXPRT_EVENT(xprtrdma_create);
372DEFINE_RXPRT_EVENT(xprtrdma_destroy);
373DEFINE_RXPRT_EVENT(xprtrdma_remove);
374DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
375DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
376DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
377
378TRACE_EVENT(xprtrdma_qp_error,
379 TP_PROTO(
380 const struct rpcrdma_xprt *r_xprt,
381 const struct ib_event *event
382 ),
383
384 TP_ARGS(r_xprt, event),
385
386 TP_STRUCT__entry(
387 __field(const void *, r_xprt)
388 __field(unsigned int, event)
389 __string(name, event->device->name)
390 __string(addr, rpcrdma_addrstr(r_xprt))
391 __string(port, rpcrdma_portstr(r_xprt))
392 ),
393
394 TP_fast_assign(
395 __entry->r_xprt = r_xprt;
396 __entry->event = event->event;
397 __assign_str(name, event->device->name);
398 __assign_str(addr, rpcrdma_addrstr(r_xprt));
399 __assign_str(port, rpcrdma_portstr(r_xprt));
400 ),
401
402 TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
403 __get_str(addr), __get_str(port), __entry->r_xprt,
404 __get_str(name), rdma_show_ib_event(__entry->event),
405 __entry->event
406 )
407);
408
409/**
410 ** Call events
411 **/
412
413TRACE_EVENT(xprtrdma_createmrs,
414 TP_PROTO(
415 const struct rpcrdma_xprt *r_xprt,
416 unsigned int count
417 ),
418
419 TP_ARGS(r_xprt, count),
420
421 TP_STRUCT__entry(
422 __field(const void *, r_xprt)
423 __field(unsigned int, count)
424 ),
425
426 TP_fast_assign(
427 __entry->r_xprt = r_xprt;
428 __entry->count = count;
429 ),
430
431 TP_printk("r_xprt=%p: created %u MRs",
432 __entry->r_xprt, __entry->count
433 )
434);
435
436DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
437
438DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
439DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
440DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
441
442TRACE_DEFINE_ENUM(rpcrdma_noch);
443TRACE_DEFINE_ENUM(rpcrdma_readch);
444TRACE_DEFINE_ENUM(rpcrdma_areadch);
445TRACE_DEFINE_ENUM(rpcrdma_writech);
446TRACE_DEFINE_ENUM(rpcrdma_replych);
447
448#define xprtrdma_show_chunktype(x) \
449 __print_symbolic(x, \
450 { rpcrdma_noch, "inline" }, \
451 { rpcrdma_readch, "read list" }, \
452 { rpcrdma_areadch, "*read list" }, \
453 { rpcrdma_writech, "write list" }, \
454 { rpcrdma_replych, "reply chunk" })
455
456TRACE_EVENT(xprtrdma_marshal,
457 TP_PROTO(
458 const struct rpc_rqst *rqst,
459 unsigned int hdrlen,
460 unsigned int rtype,
461 unsigned int wtype
462 ),
463
464 TP_ARGS(rqst, hdrlen, rtype, wtype),
465
466 TP_STRUCT__entry(
467 __field(unsigned int, task_id)
468 __field(unsigned int, client_id)
469 __field(u32, xid)
470 __field(unsigned int, hdrlen)
471 __field(unsigned int, headlen)
472 __field(unsigned int, pagelen)
473 __field(unsigned int, taillen)
474 __field(unsigned int, rtype)
475 __field(unsigned int, wtype)
476 ),
477
478 TP_fast_assign(
479 __entry->task_id = rqst->rq_task->tk_pid;
480 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
481 __entry->xid = be32_to_cpu(rqst->rq_xid);
482 __entry->hdrlen = hdrlen;
483 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
484 __entry->pagelen = rqst->rq_snd_buf.page_len;
485 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
486 __entry->rtype = rtype;
487 __entry->wtype = wtype;
488 ),
489
490 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
491 __entry->task_id, __entry->client_id, __entry->xid,
492 __entry->hdrlen,
493 __entry->headlen, __entry->pagelen, __entry->taillen,
494 xprtrdma_show_chunktype(__entry->rtype),
495 xprtrdma_show_chunktype(__entry->wtype)
496 )
497);
498
499TRACE_EVENT(xprtrdma_post_send,
500 TP_PROTO(
501 const struct rpcrdma_req *req,
502 int status
503 ),
504
505 TP_ARGS(req, status),
506
507 TP_STRUCT__entry(
508 __field(const void *, req)
509 __field(int, num_sge)
510 __field(bool, signaled)
511 __field(int, status)
512 ),
513
514 TP_fast_assign(
515 __entry->req = req;
516 __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
517 __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
518 IB_SEND_SIGNALED;
519 __entry->status = status;
520 ),
521
522 TP_printk("req=%p, %d SGEs%s, status=%d",
523 __entry->req, __entry->num_sge,
524 (__entry->signaled ? ", signaled" : ""),
525 __entry->status
526 )
527);
528
529TRACE_EVENT(xprtrdma_post_recv,
530 TP_PROTO(
531 const struct rpcrdma_rep *rep,
532 int status
533 ),
534
535 TP_ARGS(rep, status),
536
537 TP_STRUCT__entry(
538 __field(const void *, rep)
539 __field(int, status)
540 ),
541
542 TP_fast_assign(
543 __entry->rep = rep;
544 __entry->status = status;
545 ),
546
547 TP_printk("rep=%p status=%d",
548 __entry->rep, __entry->status
549 )
550);
551
552/**
553 ** Completion events
554 **/
555
556TRACE_EVENT(xprtrdma_wc_send,
557 TP_PROTO(
558 const struct rpcrdma_sendctx *sc,
559 const struct ib_wc *wc
560 ),
561
562 TP_ARGS(sc, wc),
563
564 TP_STRUCT__entry(
565 __field(const void *, req)
566 __field(unsigned int, unmap_count)
567 __field(unsigned int, status)
568 __field(unsigned int, vendor_err)
569 ),
570
571 TP_fast_assign(
572 __entry->req = sc->sc_req;
573 __entry->unmap_count = sc->sc_unmap_count;
574 __entry->status = wc->status;
575 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
576 ),
577
578 TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
579 __entry->req, __entry->unmap_count,
580 rdma_show_wc_status(__entry->status),
581 __entry->status, __entry->vendor_err
582 )
583);
584
585TRACE_EVENT(xprtrdma_wc_receive,
586 TP_PROTO(
587 const struct rpcrdma_rep *rep,
588 const struct ib_wc *wc
589 ),
590
591 TP_ARGS(rep, wc),
592
593 TP_STRUCT__entry(
594 __field(const void *, rep)
595 __field(unsigned int, byte_len)
596 __field(unsigned int, status)
597 __field(unsigned int, vendor_err)
598 ),
599
600 TP_fast_assign(
601 __entry->rep = rep;
602 __entry->byte_len = wc->byte_len;
603 __entry->status = wc->status;
604 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
605 ),
606
607 TP_printk("rep=%p, %u bytes: %s (%u/0x%x)",
608 __entry->rep, __entry->byte_len,
609 rdma_show_wc_status(__entry->status),
610 __entry->status, __entry->vendor_err
611 )
612);
613
614DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
615DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
616DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
617
618DEFINE_MR_EVENT(xprtrdma_localinv);
619DEFINE_MR_EVENT(xprtrdma_dma_unmap);
620DEFINE_MR_EVENT(xprtrdma_remoteinv);
621DEFINE_MR_EVENT(xprtrdma_recover_mr);
622
623/**
624 ** Reply events
625 **/
626
627TRACE_EVENT(xprtrdma_reply,
628 TP_PROTO(
629 const struct rpc_task *task,
630 const struct rpcrdma_rep *rep,
631 const struct rpcrdma_req *req,
632 unsigned int credits
633 ),
634
635 TP_ARGS(task, rep, req, credits),
636
637 TP_STRUCT__entry(
638 __field(unsigned int, task_id)
639 __field(unsigned int, client_id)
640 __field(const void *, rep)
641 __field(const void *, req)
642 __field(u32, xid)
643 __field(unsigned int, credits)
644 ),
645
646 TP_fast_assign(
647 __entry->task_id = task->tk_pid;
648 __entry->client_id = task->tk_client->cl_clid;
649 __entry->rep = rep;
650 __entry->req = req;
651 __entry->xid = be32_to_cpu(rep->rr_xid);
652 __entry->credits = credits;
653 ),
654
655 TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
656 __entry->task_id, __entry->client_id, __entry->xid,
657 __entry->credits, __entry->rep, __entry->req
658 )
659);
660
661TRACE_EVENT(xprtrdma_defer_cmp,
662 TP_PROTO(
663 const struct rpcrdma_rep *rep
664 ),
665
666 TP_ARGS(rep),
667
668 TP_STRUCT__entry(
669 __field(unsigned int, task_id)
670 __field(unsigned int, client_id)
671 __field(const void *, rep)
672 __field(u32, xid)
673 ),
674
675 TP_fast_assign(
676 __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
677 __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
678 __entry->rep = rep;
679 __entry->xid = be32_to_cpu(rep->rr_xid);
680 ),
681
682 TP_printk("task:%u@%u xid=0x%08x rep=%p",
683 __entry->task_id, __entry->client_id, __entry->xid,
684 __entry->rep
685 )
686);
687
688DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
689DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
690DEFINE_REPLY_EVENT(xprtrdma_reply_short);
691DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
692
693TRACE_EVENT(xprtrdma_fixup,
694 TP_PROTO(
695 const struct rpc_rqst *rqst,
696 int len,
697 int hdrlen
698 ),
699
700 TP_ARGS(rqst, len, hdrlen),
701
702 TP_STRUCT__entry(
703 __field(unsigned int, task_id)
704 __field(unsigned int, client_id)
705 __field(const void *, base)
706 __field(int, len)
707 __field(int, hdrlen)
708 ),
709
710 TP_fast_assign(
711 __entry->task_id = rqst->rq_task->tk_pid;
712 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
713 __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
714 __entry->len = len;
715 __entry->hdrlen = hdrlen;
716 ),
717
718 TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
719 __entry->task_id, __entry->client_id,
720 __entry->base, __entry->len, __entry->hdrlen
721 )
722);
723
724TRACE_EVENT(xprtrdma_fixup_pg,
725 TP_PROTO(
726 const struct rpc_rqst *rqst,
727 int pageno,
728 const void *pos,
729 int len,
730 int curlen
731 ),
732
733 TP_ARGS(rqst, pageno, pos, len, curlen),
734
735 TP_STRUCT__entry(
736 __field(unsigned int, task_id)
737 __field(unsigned int, client_id)
738 __field(const void *, pos)
739 __field(int, pageno)
740 __field(int, len)
741 __field(int, curlen)
742 ),
743
744 TP_fast_assign(
745 __entry->task_id = rqst->rq_task->tk_pid;
746 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
747 __entry->pos = pos;
748 __entry->pageno = pageno;
749 __entry->len = len;
750 __entry->curlen = curlen;
751 ),
752
753 TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
754 __entry->task_id, __entry->client_id,
755 __entry->pageno, __entry->pos, __entry->len, __entry->curlen
756 )
757);
758
759TRACE_EVENT(xprtrdma_decode_seg,
760 TP_PROTO(
761 u32 handle,
762 u32 length,
763 u64 offset
764 ),
765
766 TP_ARGS(handle, length, offset),
767
768 TP_STRUCT__entry(
769 __field(u32, handle)
770 __field(u32, length)
771 __field(u64, offset)
772 ),
773
774 TP_fast_assign(
775 __entry->handle = handle;
776 __entry->length = length;
777 __entry->offset = offset;
778 ),
779
780 TP_printk("%u@0x%016llx:0x%08x",
781 __entry->length, (unsigned long long)__entry->offset,
782 __entry->handle
783 )
784);
785
786/**
787 ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
788 **/
789
790TRACE_EVENT(xprtrdma_allocate,
791 TP_PROTO(
792 const struct rpc_task *task,
793 const struct rpcrdma_req *req
794 ),
795
796 TP_ARGS(task, req),
797
798 TP_STRUCT__entry(
799 __field(unsigned int, task_id)
800 __field(unsigned int, client_id)
801 __field(const void *, req)
802 __field(const void *, rep)
803 __field(size_t, callsize)
804 __field(size_t, rcvsize)
805 ),
806
807 TP_fast_assign(
808 __entry->task_id = task->tk_pid;
809 __entry->client_id = task->tk_client->cl_clid;
810 __entry->req = req;
811 __entry->rep = req ? req->rl_reply : NULL;
812 __entry->callsize = task->tk_rqstp->rq_callsize;
813 __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
814 ),
815
816 TP_printk("task:%u@%u req=%p rep=%p (%zu, %zu)",
817 __entry->task_id, __entry->client_id,
818 __entry->req, __entry->rep,
819 __entry->callsize, __entry->rcvsize
820 )
821);
822
823TRACE_EVENT(xprtrdma_rpc_done,
824 TP_PROTO(
825 const struct rpc_task *task,
826 const struct rpcrdma_req *req
827 ),
828
829 TP_ARGS(task, req),
830
831 TP_STRUCT__entry(
832 __field(unsigned int, task_id)
833 __field(unsigned int, client_id)
834 __field(const void *, req)
835 __field(const void *, rep)
836 ),
837
838 TP_fast_assign(
839 __entry->task_id = task->tk_pid;
840 __entry->client_id = task->tk_client->cl_clid;
841 __entry->req = req;
842 __entry->rep = req->rl_reply;
843 ),
844
845 TP_printk("task:%u@%u req=%p rep=%p",
846 __entry->task_id, __entry->client_id,
847 __entry->req, __entry->rep
848 )
849);
850
851DEFINE_RXPRT_EVENT(xprtrdma_noreps);
852
853/**
854 ** Callback events
855 **/
856
857TRACE_EVENT(xprtrdma_cb_setup,
858 TP_PROTO(
859 const struct rpcrdma_xprt *r_xprt,
860 unsigned int reqs
861 ),
862
863 TP_ARGS(r_xprt, reqs),
864
865 TP_STRUCT__entry(
866 __field(const void *, r_xprt)
867 __field(unsigned int, reqs)
868 __string(addr, rpcrdma_addrstr(r_xprt))
869 __string(port, rpcrdma_portstr(r_xprt))
870 ),
871
872 TP_fast_assign(
873 __entry->r_xprt = r_xprt;
874 __entry->reqs = reqs;
875 __assign_str(addr, rpcrdma_addrstr(r_xprt));
876 __assign_str(port, rpcrdma_portstr(r_xprt));
877 ),
878
879 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
880 __get_str(addr), __get_str(port),
881 __entry->r_xprt, __entry->reqs
882 )
883);
884
885DEFINE_CB_EVENT(xprtrdma_cb_call);
886DEFINE_CB_EVENT(xprtrdma_cb_reply);
887
888#endif /* _TRACE_RPCRDMA_H */
889
890#include <trace/define_trace.h>
diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
new file mode 100644
index 000000000000..7475c7be165a
--- /dev/null
+++ b/include/trace/events/sctp.h
@@ -0,0 +1,99 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM sctp
4
5#if !defined(_TRACE_SCTP_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_SCTP_H
7
8#include <net/sctp/structs.h>
9#include <linux/tracepoint.h>
10
11TRACE_EVENT(sctp_probe_path,
12
13 TP_PROTO(struct sctp_transport *sp,
14 const struct sctp_association *asoc),
15
16 TP_ARGS(sp, asoc),
17
18 TP_STRUCT__entry(
19 __field(__u64, asoc)
20 __field(__u32, primary)
21 __array(__u8, ipaddr, sizeof(union sctp_addr))
22 __field(__u32, state)
23 __field(__u32, cwnd)
24 __field(__u32, ssthresh)
25 __field(__u32, flight_size)
26 __field(__u32, partial_bytes_acked)
27 __field(__u32, pathmtu)
28 ),
29
30 TP_fast_assign(
31 __entry->asoc = (unsigned long)asoc;
32 __entry->primary = (sp == asoc->peer.primary_path);
33 memcpy(__entry->ipaddr, &sp->ipaddr, sizeof(union sctp_addr));
34 __entry->state = sp->state;
35 __entry->cwnd = sp->cwnd;
36 __entry->ssthresh = sp->ssthresh;
37 __entry->flight_size = sp->flight_size;
38 __entry->partial_bytes_acked = sp->partial_bytes_acked;
39 __entry->pathmtu = sp->pathmtu;
40 ),
41
42 TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u "
43 "flight_size=%u partial_bytes_acked=%u pathmtu=%u",
44 __entry->asoc, __entry->primary ? "(*)" : "",
45 __entry->ipaddr, __entry->state, __entry->cwnd,
46 __entry->ssthresh, __entry->flight_size,
47 __entry->partial_bytes_acked, __entry->pathmtu)
48);
49
50TRACE_EVENT(sctp_probe,
51
52 TP_PROTO(const struct sctp_endpoint *ep,
53 const struct sctp_association *asoc,
54 struct sctp_chunk *chunk),
55
56 TP_ARGS(ep, asoc, chunk),
57
58 TP_STRUCT__entry(
59 __field(__u64, asoc)
60 __field(__u32, mark)
61 __field(__u16, bind_port)
62 __field(__u16, peer_port)
63 __field(__u32, pathmtu)
64 __field(__u32, rwnd)
65 __field(__u16, unack_data)
66 ),
67
68 TP_fast_assign(
69 struct sk_buff *skb = chunk->skb;
70
71 __entry->asoc = (unsigned long)asoc;
72 __entry->mark = skb->mark;
73 __entry->bind_port = ep->base.bind_addr.port;
74 __entry->peer_port = asoc->peer.port;
75 __entry->pathmtu = asoc->pathmtu;
76 __entry->rwnd = asoc->peer.rwnd;
77 __entry->unack_data = asoc->unack_data;
78
79 if (trace_sctp_probe_path_enabled()) {
80 struct sctp_transport *sp;
81
82 list_for_each_entry(sp, &asoc->peer.transport_addr_list,
83 transports) {
84 trace_sctp_probe_path(sp, asoc);
85 }
86 }
87 ),
88
89 TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
90 "rwnd=%u unack_data=%d",
91 __entry->asoc, __entry->mark, __entry->bind_port,
92 __entry->peer_port, __entry->pathmtu, __entry->rwnd,
93 __entry->unack_data)
94);
95
96#endif /* _TRACE_SCTP_H */
97
98/* This part must be outside protection */
99#include <trace/define_trace.h>
diff --git a/include/trace/events/siox.h b/include/trace/events/siox.h
new file mode 100644
index 000000000000..68a43fc2c3a5
--- /dev/null
+++ b/include/trace/events/siox.h
@@ -0,0 +1,66 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM siox
3
4#if !defined(_TRACE_SIOX_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SIOX_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT(siox_set_data,
10 TP_PROTO(const struct siox_master *smaster,
11 const struct siox_device *sdevice,
12 unsigned int devno, size_t bufoffset),
13 TP_ARGS(smaster, sdevice, devno, bufoffset),
14 TP_STRUCT__entry(
15 __field(int, busno)
16 __field(unsigned int, devno)
17 __field(size_t, inbytes)
18 __dynamic_array(u8, buf, sdevice->inbytes)
19 ),
20 TP_fast_assign(
21 __entry->busno = smaster->busno;
22 __entry->devno = devno;
23 __entry->inbytes = sdevice->inbytes;
24 memcpy(__get_dynamic_array(buf),
25 smaster->buf + bufoffset, sdevice->inbytes);
26 ),
27 TP_printk("siox-%d-%u [%*phD]",
28 __entry->busno,
29 __entry->devno,
30 (int)__entry->inbytes, __get_dynamic_array(buf)
31 )
32);
33
34TRACE_EVENT(siox_get_data,
35 TP_PROTO(const struct siox_master *smaster,
36 const struct siox_device *sdevice,
37 unsigned int devno, u8 status_clean,
38 size_t bufoffset),
39 TP_ARGS(smaster, sdevice, devno, status_clean, bufoffset),
40 TP_STRUCT__entry(
41 __field(int, busno)
42 __field(unsigned int, devno)
43 __field(u8, status_clean)
44 __field(size_t, outbytes)
45 __dynamic_array(u8, buf, sdevice->outbytes)
46 ),
47 TP_fast_assign(
48 __entry->busno = smaster->busno;
49 __entry->devno = devno;
50 __entry->status_clean = status_clean;
51 __entry->outbytes = sdevice->outbytes;
52 memcpy(__get_dynamic_array(buf),
53 smaster->buf + bufoffset, sdevice->outbytes);
54 ),
55 TP_printk("siox-%d-%u (%02hhx) [%*phD]",
56 __entry->busno,
57 __entry->devno,
58 __entry->status_clean,
59 (int)__entry->outbytes, __get_dynamic_array(buf)
60 )
61);
62
63#endif /* if !defined(_TRACE_SIOX_H) || defined(TRACE_HEADER_MULTI_READ) */
64
65/* This part must be outside protection */
66#include <trace/define_trace.h>
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index ec4dade24466..3176a3931107 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -6,7 +6,58 @@
6#define _TRACE_SOCK_H 6#define _TRACE_SOCK_H
7 7
8#include <net/sock.h> 8#include <net/sock.h>
9#include <net/ipv6.h>
9#include <linux/tracepoint.h> 10#include <linux/tracepoint.h>
11#include <linux/ipv6.h>
12#include <linux/tcp.h>
13
14#define family_names \
15 EM(AF_INET) \
16 EMe(AF_INET6)
17
18/* The protocol traced by inet_sock_set_state */
19#define inet_protocol_names \
20 EM(IPPROTO_TCP) \
21 EM(IPPROTO_DCCP) \
22 EMe(IPPROTO_SCTP)
23
24#define tcp_state_names \
25 EM(TCP_ESTABLISHED) \
26 EM(TCP_SYN_SENT) \
27 EM(TCP_SYN_RECV) \
28 EM(TCP_FIN_WAIT1) \
29 EM(TCP_FIN_WAIT2) \
30 EM(TCP_TIME_WAIT) \
31 EM(TCP_CLOSE) \
32 EM(TCP_CLOSE_WAIT) \
33 EM(TCP_LAST_ACK) \
34 EM(TCP_LISTEN) \
35 EM(TCP_CLOSING) \
36 EMe(TCP_NEW_SYN_RECV)
37
38/* enums need to be exported to user space */
39#undef EM
40#undef EMe
41#define EM(a) TRACE_DEFINE_ENUM(a);
42#define EMe(a) TRACE_DEFINE_ENUM(a);
43
44family_names
45inet_protocol_names
46tcp_state_names
47
48#undef EM
49#undef EMe
50#define EM(a) { a, #a },
51#define EMe(a) { a, #a }
52
53#define show_family_name(val) \
54 __print_symbolic(val, family_names)
55
56#define show_inet_protocol_name(val) \
57 __print_symbolic(val, inet_protocol_names)
58
59#define show_tcp_state_name(val) \
60 __print_symbolic(val, tcp_state_names)
10 61
11TRACE_EVENT(sock_rcvqueue_full, 62TRACE_EVENT(sock_rcvqueue_full,
12 63
@@ -63,6 +114,72 @@ TRACE_EVENT(sock_exceed_buf_limit,
63 __entry->rmem_alloc) 114 __entry->rmem_alloc)
64); 115);
65 116
117TRACE_EVENT(inet_sock_set_state,
118
119 TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
120
121 TP_ARGS(sk, oldstate, newstate),
122
123 TP_STRUCT__entry(
124 __field(const void *, skaddr)
125 __field(int, oldstate)
126 __field(int, newstate)
127 __field(__u16, sport)
128 __field(__u16, dport)
129 __field(__u16, family)
130 __field(__u8, protocol)
131 __array(__u8, saddr, 4)
132 __array(__u8, daddr, 4)
133 __array(__u8, saddr_v6, 16)
134 __array(__u8, daddr_v6, 16)
135 ),
136
137 TP_fast_assign(
138 struct inet_sock *inet = inet_sk(sk);
139 struct in6_addr *pin6;
140 __be32 *p32;
141
142 __entry->skaddr = sk;
143 __entry->oldstate = oldstate;
144 __entry->newstate = newstate;
145
146 __entry->family = sk->sk_family;
147 __entry->protocol = sk->sk_protocol;
148 __entry->sport = ntohs(inet->inet_sport);
149 __entry->dport = ntohs(inet->inet_dport);
150
151 p32 = (__be32 *) __entry->saddr;
152 *p32 = inet->inet_saddr;
153
154 p32 = (__be32 *) __entry->daddr;
155 *p32 = inet->inet_daddr;
156
157#if IS_ENABLED(CONFIG_IPV6)
158 if (sk->sk_family == AF_INET6) {
159 pin6 = (struct in6_addr *)__entry->saddr_v6;
160 *pin6 = sk->sk_v6_rcv_saddr;
161 pin6 = (struct in6_addr *)__entry->daddr_v6;
162 *pin6 = sk->sk_v6_daddr;
163 } else
164#endif
165 {
166 pin6 = (struct in6_addr *)__entry->saddr_v6;
167 ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
168 pin6 = (struct in6_addr *)__entry->daddr_v6;
169 ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
170 }
171 ),
172
173 TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
174 show_family_name(__entry->family),
175 show_inet_protocol_name(__entry->protocol),
176 __entry->sport, __entry->dport,
177 __entry->saddr, __entry->daddr,
178 __entry->saddr_v6, __entry->daddr_v6,
179 show_tcp_state_name(__entry->oldstate),
180 show_tcp_state_name(__entry->newstate))
181);
182
66#endif /* _TRACE_SOCK_H */ 183#endif /* _TRACE_SOCK_H */
67 184
68/* This part must be outside protection */ 185/* This part must be outside protection */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 8c153f68509e..970c91a83173 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(rpc_task_status,
32 __entry->status = task->tk_status; 32 __entry->status = task->tk_status;
33 ), 33 ),
34 34
35 TP_printk("task:%u@%u, status %d", 35 TP_printk("task:%u@%u status=%d",
36 __entry->task_id, __entry->client_id, 36 __entry->task_id, __entry->client_id,
37 __entry->status) 37 __entry->status)
38); 38);
@@ -66,7 +66,7 @@ TRACE_EVENT(rpc_connect_status,
66 __entry->status = status; 66 __entry->status = status;
67 ), 67 ),
68 68
69 TP_printk("task:%u@%u, status %d", 69 TP_printk("task:%u@%u status=%d",
70 __entry->task_id, __entry->client_id, 70 __entry->task_id, __entry->client_id,
71 __entry->status) 71 __entry->status)
72); 72);
@@ -175,7 +175,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
175 ), 175 ),
176 176
177 TP_fast_assign( 177 TP_fast_assign(
178 __entry->client_id = clnt->cl_clid; 178 __entry->client_id = clnt ? clnt->cl_clid : -1;
179 __entry->task_id = task->tk_pid; 179 __entry->task_id = task->tk_pid;
180 __entry->timeout = task->tk_timeout; 180 __entry->timeout = task->tk_timeout;
181 __entry->runstate = task->tk_runstate; 181 __entry->runstate = task->tk_runstate;
@@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
184 __assign_str(q_name, rpc_qname(q)); 184 __assign_str(q_name, rpc_qname(q));
185 ), 185 ),
186 186
187 TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s", 187 TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
188 __entry->task_id, __entry->client_id, 188 __entry->task_id, __entry->client_id,
189 __entry->flags, 189 __entry->flags,
190 __entry->runstate, 190 __entry->runstate,
@@ -390,6 +390,10 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
390 __entry->status) 390 __entry->status)
391); 391);
392 392
393DEFINE_EVENT(rpc_xprt_event, xprt_timer,
394 TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
395 TP_ARGS(xprt, xid, status));
396
393DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst, 397DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst,
394 TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), 398 TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
395 TP_ARGS(xprt, xid, status)); 399 TP_ARGS(xprt, xid, status));
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index ab34c561f26b..878b2be7ce77 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1#undef TRACE_SYSTEM 2#undef TRACE_SYSTEM
2#define TRACE_SYSTEM tcp 3#define TRACE_SYSTEM tcp
3 4
@@ -8,22 +9,7 @@
8#include <linux/tcp.h> 9#include <linux/tcp.h>
9#include <linux/tracepoint.h> 10#include <linux/tracepoint.h>
10#include <net/ipv6.h> 11#include <net/ipv6.h>
11 12#include <net/tcp.h>
12#define tcp_state_name(state) { state, #state }
13#define show_tcp_state_name(val) \
14 __print_symbolic(val, \
15 tcp_state_name(TCP_ESTABLISHED), \
16 tcp_state_name(TCP_SYN_SENT), \
17 tcp_state_name(TCP_SYN_RECV), \
18 tcp_state_name(TCP_FIN_WAIT1), \
19 tcp_state_name(TCP_FIN_WAIT2), \
20 tcp_state_name(TCP_TIME_WAIT), \
21 tcp_state_name(TCP_CLOSE), \
22 tcp_state_name(TCP_CLOSE_WAIT), \
23 tcp_state_name(TCP_LAST_ACK), \
24 tcp_state_name(TCP_LISTEN), \
25 tcp_state_name(TCP_CLOSING), \
26 tcp_state_name(TCP_NEW_SYN_RECV))
27 13
28#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \ 14#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \
29 do { \ 15 do { \
@@ -270,6 +256,64 @@ TRACE_EVENT(tcp_retransmit_synack,
270 __entry->saddr_v6, __entry->daddr_v6) 256 __entry->saddr_v6, __entry->daddr_v6)
271); 257);
272 258
259#include <trace/events/net_probe_common.h>
260
261TRACE_EVENT(tcp_probe,
262
263 TP_PROTO(struct sock *sk, struct sk_buff *skb),
264
265 TP_ARGS(sk, skb),
266
267 TP_STRUCT__entry(
268 /* sockaddr_in6 is always bigger than sockaddr_in */
269 __array(__u8, saddr, sizeof(struct sockaddr_in6))
270 __array(__u8, daddr, sizeof(struct sockaddr_in6))
271 __field(__u16, sport)
272 __field(__u16, dport)
273 __field(__u32, mark)
274 __field(__u16, length)
275 __field(__u32, snd_nxt)
276 __field(__u32, snd_una)
277 __field(__u32, snd_cwnd)
278 __field(__u32, ssthresh)
279 __field(__u32, snd_wnd)
280 __field(__u32, srtt)
281 __field(__u32, rcv_wnd)
282 ),
283
284 TP_fast_assign(
285 const struct tcp_sock *tp = tcp_sk(sk);
286 const struct inet_sock *inet = inet_sk(sk);
287
288 memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
289 memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
290
291 TP_STORE_ADDR_PORTS(__entry, inet, sk);
292
293 /* For filtering use */
294 __entry->sport = ntohs(inet->inet_sport);
295 __entry->dport = ntohs(inet->inet_dport);
296 __entry->mark = skb->mark;
297
298 __entry->length = skb->len;
299 __entry->snd_nxt = tp->snd_nxt;
300 __entry->snd_una = tp->snd_una;
301 __entry->snd_cwnd = tp->snd_cwnd;
302 __entry->snd_wnd = tp->snd_wnd;
303 __entry->rcv_wnd = tp->rcv_wnd;
304 __entry->ssthresh = tcp_current_ssthresh(sk);
305 __entry->srtt = tp->srtt_us >> 3;
306 ),
307
308 TP_printk("src=%pISpc dest=%pISpc mark=%#x length=%d snd_nxt=%#x "
309 "snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u "
310 "rcv_wnd=%u",
311 __entry->saddr, __entry->daddr, __entry->mark,
312 __entry->length, __entry->snd_nxt, __entry->snd_una,
313 __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
314 __entry->srtt, __entry->rcv_wnd)
315);
316
273#endif /* _TRACE_TCP_H */ 317#endif /* _TRACE_TCP_H */
274 318
275/* This part must be outside protection */ 319/* This part must be outside protection */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 16e305e69f34..a57e4ee989d6 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -136,6 +136,24 @@ DEFINE_EVENT(timer_class, timer_cancel,
136 TP_ARGS(timer) 136 TP_ARGS(timer)
137); 137);
138 138
139#define decode_clockid(type) \
140 __print_symbolic(type, \
141 { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
142 { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
143 { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
144 { CLOCK_TAI, "CLOCK_TAI" })
145
146#define decode_hrtimer_mode(mode) \
147 __print_symbolic(mode, \
148 { HRTIMER_MODE_ABS, "ABS" }, \
149 { HRTIMER_MODE_REL, "REL" }, \
150 { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
151 { HRTIMER_MODE_REL_PINNED, "REL|PINNED" }, \
152 { HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
153 { HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
154 { HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
155 { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
156
139/** 157/**
140 * hrtimer_init - called when the hrtimer is initialized 158 * hrtimer_init - called when the hrtimer is initialized
141 * @hrtimer: pointer to struct hrtimer 159 * @hrtimer: pointer to struct hrtimer
@@ -162,10 +180,8 @@ TRACE_EVENT(hrtimer_init,
162 ), 180 ),
163 181
164 TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, 182 TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
165 __entry->clockid == CLOCK_REALTIME ? 183 decode_clockid(__entry->clockid),
166 "CLOCK_REALTIME" : "CLOCK_MONOTONIC", 184 decode_hrtimer_mode(__entry->mode))
167 __entry->mode == HRTIMER_MODE_ABS ?
168 "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
169); 185);
170 186
171/** 187/**
@@ -174,15 +190,16 @@ TRACE_EVENT(hrtimer_init,
174 */ 190 */
175TRACE_EVENT(hrtimer_start, 191TRACE_EVENT(hrtimer_start,
176 192
177 TP_PROTO(struct hrtimer *hrtimer), 193 TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode),
178 194
179 TP_ARGS(hrtimer), 195 TP_ARGS(hrtimer, mode),
180 196
181 TP_STRUCT__entry( 197 TP_STRUCT__entry(
182 __field( void *, hrtimer ) 198 __field( void *, hrtimer )
183 __field( void *, function ) 199 __field( void *, function )
184 __field( s64, expires ) 200 __field( s64, expires )
185 __field( s64, softexpires ) 201 __field( s64, softexpires )
202 __field( enum hrtimer_mode, mode )
186 ), 203 ),
187 204
188 TP_fast_assign( 205 TP_fast_assign(
@@ -190,12 +207,14 @@ TRACE_EVENT(hrtimer_start,
190 __entry->function = hrtimer->function; 207 __entry->function = hrtimer->function;
191 __entry->expires = hrtimer_get_expires(hrtimer); 208 __entry->expires = hrtimer_get_expires(hrtimer);
192 __entry->softexpires = hrtimer_get_softexpires(hrtimer); 209 __entry->softexpires = hrtimer_get_softexpires(hrtimer);
210 __entry->mode = mode;
193 ), 211 ),
194 212
195 TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", 213 TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu "
196 __entry->hrtimer, __entry->function, 214 "mode=%s", __entry->hrtimer, __entry->function,
197 (unsigned long long) __entry->expires, 215 (unsigned long long) __entry->expires,
198 (unsigned long long) __entry->softexpires) 216 (unsigned long long) __entry->softexpires,
217 decode_hrtimer_mode(__entry->mode))
199); 218);
200 219
201/** 220/**
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index d70b53e65f43..e0b8b9173e1c 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -192,12 +192,12 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
192 192
193TRACE_EVENT(mm_shrink_slab_start, 193TRACE_EVENT(mm_shrink_slab_start,
194 TP_PROTO(struct shrinker *shr, struct shrink_control *sc, 194 TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
195 long nr_objects_to_shrink, unsigned long pgs_scanned, 195 long nr_objects_to_shrink, unsigned long cache_items,
196 unsigned long lru_pgs, unsigned long cache_items, 196 unsigned long long delta, unsigned long total_scan,
197 unsigned long long delta, unsigned long total_scan), 197 int priority),
198 198
199 TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs, 199 TP_ARGS(shr, sc, nr_objects_to_shrink, cache_items, delta, total_scan,
200 cache_items, delta, total_scan), 200 priority),
201 201
202 TP_STRUCT__entry( 202 TP_STRUCT__entry(
203 __field(struct shrinker *, shr) 203 __field(struct shrinker *, shr)
@@ -205,11 +205,10 @@ TRACE_EVENT(mm_shrink_slab_start,
205 __field(int, nid) 205 __field(int, nid)
206 __field(long, nr_objects_to_shrink) 206 __field(long, nr_objects_to_shrink)
207 __field(gfp_t, gfp_flags) 207 __field(gfp_t, gfp_flags)
208 __field(unsigned long, pgs_scanned)
209 __field(unsigned long, lru_pgs)
210 __field(unsigned long, cache_items) 208 __field(unsigned long, cache_items)
211 __field(unsigned long long, delta) 209 __field(unsigned long long, delta)
212 __field(unsigned long, total_scan) 210 __field(unsigned long, total_scan)
211 __field(int, priority)
213 ), 212 ),
214 213
215 TP_fast_assign( 214 TP_fast_assign(
@@ -218,24 +217,22 @@ TRACE_EVENT(mm_shrink_slab_start,
218 __entry->nid = sc->nid; 217 __entry->nid = sc->nid;
219 __entry->nr_objects_to_shrink = nr_objects_to_shrink; 218 __entry->nr_objects_to_shrink = nr_objects_to_shrink;
220 __entry->gfp_flags = sc->gfp_mask; 219 __entry->gfp_flags = sc->gfp_mask;
221 __entry->pgs_scanned = pgs_scanned;
222 __entry->lru_pgs = lru_pgs;
223 __entry->cache_items = cache_items; 220 __entry->cache_items = cache_items;
224 __entry->delta = delta; 221 __entry->delta = delta;
225 __entry->total_scan = total_scan; 222 __entry->total_scan = total_scan;
223 __entry->priority = priority;
226 ), 224 ),
227 225
228 TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld", 226 TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d",
229 __entry->shrink, 227 __entry->shrink,
230 __entry->shr, 228 __entry->shr,
231 __entry->nid, 229 __entry->nid,
232 __entry->nr_objects_to_shrink, 230 __entry->nr_objects_to_shrink,
233 show_gfp_flags(__entry->gfp_flags), 231 show_gfp_flags(__entry->gfp_flags),
234 __entry->pgs_scanned,
235 __entry->lru_pgs,
236 __entry->cache_items, 232 __entry->cache_items,
237 __entry->delta, 233 __entry->delta,
238 __entry->total_scan) 234 __entry->total_scan,
235 __entry->priority)
239); 236);
240 237
241TRACE_EVENT(mm_shrink_slab_end, 238TRACE_EVENT(mm_shrink_slab_end,
diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
index fefb3d2c3fac..639fade14b23 100644
--- a/include/uapi/asm-generic/poll.h
+++ b/include/uapi/asm-generic/poll.h
@@ -3,35 +3,49 @@
3#define __ASM_GENERIC_POLL_H 3#define __ASM_GENERIC_POLL_H
4 4
5/* These are specified by iBCS2 */ 5/* These are specified by iBCS2 */
6#define POLLIN 0x0001 6#define POLLIN (__force __poll_t)0x0001
7#define POLLPRI 0x0002 7#define POLLPRI (__force __poll_t)0x0002
8#define POLLOUT 0x0004 8#define POLLOUT (__force __poll_t)0x0004
9#define POLLERR 0x0008 9#define POLLERR (__force __poll_t)0x0008
10#define POLLHUP 0x0010 10#define POLLHUP (__force __poll_t)0x0010
11#define POLLNVAL 0x0020 11#define POLLNVAL (__force __poll_t)0x0020
12 12
13/* The rest seem to be more-or-less nonstandard. Check them! */ 13/* The rest seem to be more-or-less nonstandard. Check them! */
14#define POLLRDNORM 0x0040 14#define POLLRDNORM (__force __poll_t)0x0040
15#define POLLRDBAND 0x0080 15#define POLLRDBAND (__force __poll_t)0x0080
16#ifndef POLLWRNORM 16#ifndef POLLWRNORM
17#define POLLWRNORM 0x0100 17#define POLLWRNORM (__force __poll_t)0x0100
18#endif 18#endif
19#ifndef POLLWRBAND 19#ifndef POLLWRBAND
20#define POLLWRBAND 0x0200 20#define POLLWRBAND (__force __poll_t)0x0200
21#endif 21#endif
22#ifndef POLLMSG 22#ifndef POLLMSG
23#define POLLMSG 0x0400 23#define POLLMSG (__force __poll_t)0x0400
24#endif 24#endif
25#ifndef POLLREMOVE 25#ifndef POLLREMOVE
26#define POLLREMOVE 0x1000 26#define POLLREMOVE (__force __poll_t)0x1000
27#endif 27#endif
28#ifndef POLLRDHUP 28#ifndef POLLRDHUP
29#define POLLRDHUP 0x2000 29#define POLLRDHUP (__force __poll_t)0x2000
30#endif 30#endif
31 31
32#define POLLFREE 0x4000 /* currently only for epoll */ 32#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
33 33
34#define POLL_BUSY_LOOP 0x8000 34#define POLL_BUSY_LOOP (__force __poll_t)0x8000
35
36#ifdef __KERNEL__
37#ifndef __ARCH_HAS_MANGLED_POLL
38static inline __u16 mangle_poll(__poll_t val)
39{
40 return (__force __u16)val;
41}
42
43static inline __poll_t demangle_poll(__u16 v)
44{
45 return (__force __poll_t)v;
46}
47#endif
48#endif
35 49
36struct pollfd { 50struct pollfd {
37 int fd; 51 int fd;
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index e447283b8f52..85dc965afd89 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -23,10 +23,6 @@ typedef union sigval {
23#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int)) 23#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
24#endif 24#endif
25 25
26#ifndef __ARCH_SI_UID_T
27#define __ARCH_SI_UID_T __kernel_uid32_t
28#endif
29
30/* 26/*
31 * The default "si_band" type is "long", as specified by POSIX. 27 * The default "si_band" type is "long", as specified by POSIX.
32 * However, some architectures want to override this to "int" 28 * However, some architectures want to override this to "int"
@@ -44,12 +40,15 @@ typedef union sigval {
44#define __ARCH_SI_ATTRIBUTES 40#define __ARCH_SI_ATTRIBUTES
45#endif 41#endif
46 42
47#ifndef HAVE_ARCH_SIGINFO_T
48
49typedef struct siginfo { 43typedef struct siginfo {
50 int si_signo; 44 int si_signo;
45#ifndef __ARCH_HAS_SWAPPED_SIGINFO
51 int si_errno; 46 int si_errno;
52 int si_code; 47 int si_code;
48#else
49 int si_code;
50 int si_errno;
51#endif
53 52
54 union { 53 union {
55 int _pad[SI_PAD_SIZE]; 54 int _pad[SI_PAD_SIZE];
@@ -57,14 +56,13 @@ typedef struct siginfo {
57 /* kill() */ 56 /* kill() */
58 struct { 57 struct {
59 __kernel_pid_t _pid; /* sender's pid */ 58 __kernel_pid_t _pid; /* sender's pid */
60 __ARCH_SI_UID_T _uid; /* sender's uid */ 59 __kernel_uid32_t _uid; /* sender's uid */
61 } _kill; 60 } _kill;
62 61
63 /* POSIX.1b timers */ 62 /* POSIX.1b timers */
64 struct { 63 struct {
65 __kernel_timer_t _tid; /* timer id */ 64 __kernel_timer_t _tid; /* timer id */
66 int _overrun; /* overrun count */ 65 int _overrun; /* overrun count */
67 char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
68 sigval_t _sigval; /* same as below */ 66 sigval_t _sigval; /* same as below */
69 int _sys_private; /* not to be passed to user */ 67 int _sys_private; /* not to be passed to user */
70 } _timer; 68 } _timer;
@@ -72,34 +70,47 @@ typedef struct siginfo {
72 /* POSIX.1b signals */ 70 /* POSIX.1b signals */
73 struct { 71 struct {
74 __kernel_pid_t _pid; /* sender's pid */ 72 __kernel_pid_t _pid; /* sender's pid */
75 __ARCH_SI_UID_T _uid; /* sender's uid */ 73 __kernel_uid32_t _uid; /* sender's uid */
76 sigval_t _sigval; 74 sigval_t _sigval;
77 } _rt; 75 } _rt;
78 76
79 /* SIGCHLD */ 77 /* SIGCHLD */
80 struct { 78 struct {
81 __kernel_pid_t _pid; /* which child */ 79 __kernel_pid_t _pid; /* which child */
82 __ARCH_SI_UID_T _uid; /* sender's uid */ 80 __kernel_uid32_t _uid; /* sender's uid */
83 int _status; /* exit code */ 81 int _status; /* exit code */
84 __ARCH_SI_CLOCK_T _utime; 82 __ARCH_SI_CLOCK_T _utime;
85 __ARCH_SI_CLOCK_T _stime; 83 __ARCH_SI_CLOCK_T _stime;
86 } _sigchld; 84 } _sigchld;
87 85
88 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ 86 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
89 struct { 87 struct {
90 void __user *_addr; /* faulting insn/memory ref. */ 88 void __user *_addr; /* faulting insn/memory ref. */
91#ifdef __ARCH_SI_TRAPNO 89#ifdef __ARCH_SI_TRAPNO
92 int _trapno; /* TRAP # which caused the signal */ 90 int _trapno; /* TRAP # which caused the signal */
93#endif 91#endif
94 short _addr_lsb; /* LSB of the reported address */ 92#ifdef __ia64__
93 int _imm; /* immediate value for "break" */
94 unsigned int _flags; /* see ia64 si_flags */
95 unsigned long _isr; /* isr */
96#endif
95 union { 97 union {
98 /*
99 * used when si_code=BUS_MCEERR_AR or
100 * used when si_code=BUS_MCEERR_AO
101 */
102 short _addr_lsb; /* LSB of the reported address */
96 /* used when si_code=SEGV_BNDERR */ 103 /* used when si_code=SEGV_BNDERR */
97 struct { 104 struct {
105 short _dummy_bnd;
98 void __user *_lower; 106 void __user *_lower;
99 void __user *_upper; 107 void __user *_upper;
100 } _addr_bnd; 108 } _addr_bnd;
101 /* used when si_code=SEGV_PKUERR */ 109 /* used when si_code=SEGV_PKUERR */
102 __u32 _pkey; 110 struct {
111 short _dummy_pkey;
112 __u32 _pkey;
113 } _addr_pkey;
103 }; 114 };
104 } _sigfault; 115 } _sigfault;
105 116
@@ -118,10 +129,6 @@ typedef struct siginfo {
118 } _sifields; 129 } _sifields;
119} __ARCH_SI_ATTRIBUTES siginfo_t; 130} __ARCH_SI_ATTRIBUTES siginfo_t;
120 131
121/* If the arch shares siginfo, then it has SIGSYS. */
122#define __ARCH_SIGSYS
123#endif
124
125/* 132/*
126 * How these fields are to be accessed. 133 * How these fields are to be accessed.
127 */ 134 */
@@ -143,14 +150,12 @@ typedef struct siginfo {
143#define si_addr_lsb _sifields._sigfault._addr_lsb 150#define si_addr_lsb _sifields._sigfault._addr_lsb
144#define si_lower _sifields._sigfault._addr_bnd._lower 151#define si_lower _sifields._sigfault._addr_bnd._lower
145#define si_upper _sifields._sigfault._addr_bnd._upper 152#define si_upper _sifields._sigfault._addr_bnd._upper
146#define si_pkey _sifields._sigfault._pkey 153#define si_pkey _sifields._sigfault._addr_pkey._pkey
147#define si_band _sifields._sigpoll._band 154#define si_band _sifields._sigpoll._band
148#define si_fd _sifields._sigpoll._fd 155#define si_fd _sifields._sigpoll._fd
149#ifdef __ARCH_SIGSYS
150#define si_call_addr _sifields._sigsys._call_addr 156#define si_call_addr _sifields._sigsys._call_addr
151#define si_syscall _sifields._sigsys._syscall 157#define si_syscall _sifields._sigsys._syscall
152#define si_arch _sifields._sigsys._arch 158#define si_arch _sifields._sigsys._arch
153#endif
154 159
155/* 160/*
156 * si_code values 161 * si_code values
@@ -165,6 +170,7 @@ typedef struct siginfo {
165#define SI_SIGIO -5 /* sent by queued SIGIO */ 170#define SI_SIGIO -5 /* sent by queued SIGIO */
166#define SI_TKILL -6 /* sent by tkill system call */ 171#define SI_TKILL -6 /* sent by tkill system call */
167#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */ 172#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
173#define SI_ASYNCNL -60 /* sent by glibc async name lookup completion */
168 174
169#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0) 175#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
170#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0) 176#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
@@ -173,14 +179,34 @@ typedef struct siginfo {
173 * SIGILL si_codes 179 * SIGILL si_codes
174 */ 180 */
175#define ILL_ILLOPC 1 /* illegal opcode */ 181#define ILL_ILLOPC 1 /* illegal opcode */
182#ifdef __bfin__
183# define ILL_ILLPARAOP 2 /* illegal opcode combine */
184#endif
176#define ILL_ILLOPN 2 /* illegal operand */ 185#define ILL_ILLOPN 2 /* illegal operand */
177#define ILL_ILLADR 3 /* illegal addressing mode */ 186#define ILL_ILLADR 3 /* illegal addressing mode */
178#define ILL_ILLTRP 4 /* illegal trap */ 187#define ILL_ILLTRP 4 /* illegal trap */
188#ifdef __bfin__
189# define ILL_ILLEXCPT 4 /* unrecoverable exception */
190#endif
179#define ILL_PRVOPC 5 /* privileged opcode */ 191#define ILL_PRVOPC 5 /* privileged opcode */
180#define ILL_PRVREG 6 /* privileged register */ 192#define ILL_PRVREG 6 /* privileged register */
181#define ILL_COPROC 7 /* coprocessor error */ 193#define ILL_COPROC 7 /* coprocessor error */
182#define ILL_BADSTK 8 /* internal stack error */ 194#define ILL_BADSTK 8 /* internal stack error */
183#define NSIGILL 8 195#ifdef __bfin__
196# define ILL_CPLB_VI 9 /* D/I CPLB protect violation */
197# define ILL_CPLB_MISS 10 /* D/I CPLB miss */
198# define ILL_CPLB_MULHIT 11 /* D/I CPLB multiple hit */
199#endif
200#ifdef __tile__
201# define ILL_DBLFLT 9 /* double fault */
202# define ILL_HARDWALL 10 /* user networks hardwall violation */
203#endif
204#ifdef __ia64__
205# define ILL_BADIADDR 9 /* unimplemented instruction address */
206# define __ILL_BREAK 10 /* illegal break */
207# define __ILL_BNDMOD 11 /* bundle-update (modification) in progress */
208#endif
209#define NSIGILL 11
184 210
185/* 211/*
186 * SIGFPE si_codes 212 * SIGFPE si_codes
@@ -193,15 +219,33 @@ typedef struct siginfo {
193#define FPE_FLTRES 6 /* floating point inexact result */ 219#define FPE_FLTRES 6 /* floating point inexact result */
194#define FPE_FLTINV 7 /* floating point invalid operation */ 220#define FPE_FLTINV 7 /* floating point invalid operation */
195#define FPE_FLTSUB 8 /* subscript out of range */ 221#define FPE_FLTSUB 8 /* subscript out of range */
196#define NSIGFPE 8 222#ifdef __frv__
223# define FPE_MDAOVF 9 /* media overflow */
224#endif
225#ifdef __ia64__
226# define __FPE_DECOVF 9 /* decimal overflow */
227# define __FPE_DECDIV 10 /* decimal division by zero */
228# define __FPE_DECERR 11 /* packed decimal error */
229# define __FPE_INVASC 12 /* invalid ASCII digit */
230# define __FPE_INVDEC 13 /* invalid decimal digit */
231#endif
232#define NSIGFPE 13
197 233
198/* 234/*
199 * SIGSEGV si_codes 235 * SIGSEGV si_codes
200 */ 236 */
201#define SEGV_MAPERR 1 /* address not mapped to object */ 237#define SEGV_MAPERR 1 /* address not mapped to object */
202#define SEGV_ACCERR 2 /* invalid permissions for mapped object */ 238#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
203#define SEGV_BNDERR 3 /* failed address bound checks */ 239#ifdef __bfin__
204#define SEGV_PKUERR 4 /* failed protection key checks */ 240# define SEGV_STACKFLOW 3 /* stack overflow */
241#else
242# define SEGV_BNDERR 3 /* failed address bound checks */
243#endif
244#ifdef __ia64__
245# define __SEGV_PSTKOVF 4 /* paragraph stack overflow */
246#else
247# define SEGV_PKUERR 4 /* failed protection key checks */
248#endif
205#define NSIGSEGV 4 249#define NSIGSEGV 4
206 250
207/* 251/*
@@ -210,8 +254,12 @@ typedef struct siginfo {
210#define BUS_ADRALN 1 /* invalid address alignment */ 254#define BUS_ADRALN 1 /* invalid address alignment */
211#define BUS_ADRERR 2 /* non-existent physical address */ 255#define BUS_ADRERR 2 /* non-existent physical address */
212#define BUS_OBJERR 3 /* object specific hardware error */ 256#define BUS_OBJERR 3 /* object specific hardware error */
257#ifdef __bfin__
258# define BUS_OPFETCH 4 /* error from instruction fetch */
259#else
213/* hardware memory error consumed on a machine check: action required */ 260/* hardware memory error consumed on a machine check: action required */
214#define BUS_MCEERR_AR 4 261# define BUS_MCEERR_AR 4
262#endif
215/* hardware memory error detected in process but not consumed: action optional*/ 263/* hardware memory error detected in process but not consumed: action optional*/
216#define BUS_MCEERR_AO 5 264#define BUS_MCEERR_AO 5
217#define NSIGBUS 5 265#define NSIGBUS 5
@@ -223,9 +271,20 @@ typedef struct siginfo {
223#define TRAP_TRACE 2 /* process trace trap */ 271#define TRAP_TRACE 2 /* process trace trap */
224#define TRAP_BRANCH 3 /* process taken branch trap */ 272#define TRAP_BRANCH 3 /* process taken branch trap */
225#define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */ 273#define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */
274#ifdef __bfin__
275# define TRAP_STEP 1 /* single-step breakpoint */
276# define TRAP_TRACEFLOW 2 /* trace buffer overflow */
277# define TRAP_WATCHPT 3 /* watchpoint match */
278# define TRAP_ILLTRAP 4 /* illegal trap */
279#endif
226#define NSIGTRAP 4 280#define NSIGTRAP 4
227 281
228/* 282/*
283 * There is an additional set of SIGTRAP si_codes used by ptrace
284 * that are of the form: ((PTRACE_EVENT_XXX << 8) | SIGTRAP)
285 */
286
287/*
229 * SIGCHLD si_codes 288 * SIGCHLD si_codes
230 */ 289 */
231#define CLD_EXITED 1 /* child has exited */ 290#define CLD_EXITED 1 /* child has exited */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 919248fb4028..4d21191aaed0 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -160,6 +160,7 @@ union drm_amdgpu_bo_list {
160#define AMDGPU_CTX_OP_ALLOC_CTX 1 160#define AMDGPU_CTX_OP_ALLOC_CTX 1
161#define AMDGPU_CTX_OP_FREE_CTX 2 161#define AMDGPU_CTX_OP_FREE_CTX 2
162#define AMDGPU_CTX_OP_QUERY_STATE 3 162#define AMDGPU_CTX_OP_QUERY_STATE 3
163#define AMDGPU_CTX_OP_QUERY_STATE2 4
163 164
164/* GPU reset status */ 165/* GPU reset status */
165#define AMDGPU_CTX_NO_RESET 0 166#define AMDGPU_CTX_NO_RESET 0
@@ -170,6 +171,13 @@ union drm_amdgpu_bo_list {
170/* unknown cause */ 171/* unknown cause */
171#define AMDGPU_CTX_UNKNOWN_RESET 3 172#define AMDGPU_CTX_UNKNOWN_RESET 3
172 173
174/* indicate gpu reset occured after ctx created */
175#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
176/* indicate vram lost occured after ctx created */
177#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
178/* indicate some job from this context once cause gpu hang */
179#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
180
173/* Context priority level */ 181/* Context priority level */
174#define AMDGPU_CTX_PRIORITY_UNSET -2048 182#define AMDGPU_CTX_PRIORITY_UNSET -2048
175#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023 183#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
@@ -869,6 +877,10 @@ struct drm_amdgpu_info_device {
869 __u32 _pad1; 877 __u32 _pad1;
870 /* always on cu bitmap */ 878 /* always on cu bitmap */
871 __u32 cu_ao_bitmap[4][4]; 879 __u32 cu_ao_bitmap[4][4];
880 /** Starting high virtual address for UMDs. */
881 __u64 high_va_offset;
882 /** The maximum high virtual address */
883 __u64 high_va_max;
872}; 884};
873 885
874struct drm_amdgpu_info_hw_ip { 886struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 3ad838d3f93f..e04613d30a13 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -178,7 +178,7 @@ extern "C" {
178#define DRM_FORMAT_MOD_VENDOR_NONE 0 178#define DRM_FORMAT_MOD_VENDOR_NONE 0
179#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01 179#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
180#define DRM_FORMAT_MOD_VENDOR_AMD 0x02 180#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
181#define DRM_FORMAT_MOD_VENDOR_NV 0x03 181#define DRM_FORMAT_MOD_VENDOR_NVIDIA 0x03
182#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04 182#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
183#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 183#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
184#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06 184#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
@@ -188,7 +188,7 @@ extern "C" {
188#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) 188#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
189 189
190#define fourcc_mod_code(vendor, val) \ 190#define fourcc_mod_code(vendor, val) \
191 ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) 191 ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
192 192
193/* 193/*
194 * Format Modifier tokens: 194 * Format Modifier tokens:
@@ -338,29 +338,17 @@ extern "C" {
338 */ 338 */
339#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4) 339#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
340 340
341/* NVIDIA Tegra frame buffer modifiers */ 341/* NVIDIA frame buffer modifiers */
342
343/*
344 * Some modifiers take parameters, for example the number of vertical GOBs in
345 * a block. Reserve the lower 32 bits for parameters
346 */
347#define __fourcc_mod_tegra_mode_shift 32
348#define fourcc_mod_tegra_code(val, params) \
349 fourcc_mod_code(NV, ((((__u64)val) << __fourcc_mod_tegra_mode_shift) | params))
350#define fourcc_mod_tegra_mod(m) \
351 (m & ~((1ULL << __fourcc_mod_tegra_mode_shift) - 1))
352#define fourcc_mod_tegra_param(m) \
353 (m & ((1ULL << __fourcc_mod_tegra_mode_shift) - 1))
354 342
355/* 343/*
356 * Tegra Tiled Layout, used by Tegra 2, 3 and 4. 344 * Tegra Tiled Layout, used by Tegra 2, 3 and 4.
357 * 345 *
358 * Pixels are arranged in simple tiles of 16 x 16 bytes. 346 * Pixels are arranged in simple tiles of 16 x 16 bytes.
359 */ 347 */
360#define NV_FORMAT_MOD_TEGRA_TILED fourcc_mod_tegra_code(1, 0) 348#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
361 349
362/* 350/*
363 * Tegra 16Bx2 Block Linear layout, used by TK1/TX1 351 * 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later
364 * 352 *
365 * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked 353 * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
366 * vertically by a power of 2 (1 to 32 GOBs) to form a block. 354 * vertically by a power of 2 (1 to 32 GOBs) to form a block.
@@ -380,7 +368,21 @@ extern "C" {
380 * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format 368 * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
381 * in full detail. 369 * in full detail.
382 */ 370 */
383#define NV_FORMAT_MOD_TEGRA_16BX2_BLOCK(v) fourcc_mod_tegra_code(2, v) 371#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
372 fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf))
373
374#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
375 fourcc_mod_code(NVIDIA, 0x10)
376#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
377 fourcc_mod_code(NVIDIA, 0x11)
378#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
379 fourcc_mod_code(NVIDIA, 0x12)
380#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
381 fourcc_mod_code(NVIDIA, 0x13)
382#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
383 fourcc_mod_code(NVIDIA, 0x14)
384#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
385 fourcc_mod_code(NVIDIA, 0x15)
384 386
385/* 387/*
386 * Broadcom VC4 "T" format 388 * Broadcom VC4 "T" format
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index d01087b2a651..4a54305120e0 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -135,172 +135,6 @@ struct drm_exynos_g2d_exec {
135 __u64 async; 135 __u64 async;
136}; 136};
137 137
138enum drm_exynos_ops_id {
139 EXYNOS_DRM_OPS_SRC,
140 EXYNOS_DRM_OPS_DST,
141 EXYNOS_DRM_OPS_MAX,
142};
143
144struct drm_exynos_sz {
145 __u32 hsize;
146 __u32 vsize;
147};
148
149struct drm_exynos_pos {
150 __u32 x;
151 __u32 y;
152 __u32 w;
153 __u32 h;
154};
155
156enum drm_exynos_flip {
157 EXYNOS_DRM_FLIP_NONE = (0 << 0),
158 EXYNOS_DRM_FLIP_VERTICAL = (1 << 0),
159 EXYNOS_DRM_FLIP_HORIZONTAL = (1 << 1),
160 EXYNOS_DRM_FLIP_BOTH = EXYNOS_DRM_FLIP_VERTICAL |
161 EXYNOS_DRM_FLIP_HORIZONTAL,
162};
163
164enum drm_exynos_degree {
165 EXYNOS_DRM_DEGREE_0,
166 EXYNOS_DRM_DEGREE_90,
167 EXYNOS_DRM_DEGREE_180,
168 EXYNOS_DRM_DEGREE_270,
169};
170
171enum drm_exynos_planer {
172 EXYNOS_DRM_PLANAR_Y,
173 EXYNOS_DRM_PLANAR_CB,
174 EXYNOS_DRM_PLANAR_CR,
175 EXYNOS_DRM_PLANAR_MAX,
176};
177
178/**
179 * A structure for ipp supported property list.
180 *
181 * @version: version of this structure.
182 * @ipp_id: id of ipp driver.
183 * @count: count of ipp driver.
184 * @writeback: flag of writeback supporting.
185 * @flip: flag of flip supporting.
186 * @degree: flag of degree information.
187 * @csc: flag of csc supporting.
188 * @crop: flag of crop supporting.
189 * @scale: flag of scale supporting.
190 * @refresh_min: min hz of refresh.
191 * @refresh_max: max hz of refresh.
192 * @crop_min: crop min resolution.
193 * @crop_max: crop max resolution.
194 * @scale_min: scale min resolution.
195 * @scale_max: scale max resolution.
196 */
197struct drm_exynos_ipp_prop_list {
198 __u32 version;
199 __u32 ipp_id;
200 __u32 count;
201 __u32 writeback;
202 __u32 flip;
203 __u32 degree;
204 __u32 csc;
205 __u32 crop;
206 __u32 scale;
207 __u32 refresh_min;
208 __u32 refresh_max;
209 __u32 reserved;
210 struct drm_exynos_sz crop_min;
211 struct drm_exynos_sz crop_max;
212 struct drm_exynos_sz scale_min;
213 struct drm_exynos_sz scale_max;
214};
215
216/**
217 * A structure for ipp config.
218 *
219 * @ops_id: property of operation directions.
220 * @flip: property of mirror, flip.
221 * @degree: property of rotation degree.
222 * @fmt: property of image format.
223 * @sz: property of image size.
224 * @pos: property of image position(src-cropped,dst-scaler).
225 */
226struct drm_exynos_ipp_config {
227 __u32 ops_id;
228 __u32 flip;
229 __u32 degree;
230 __u32 fmt;
231 struct drm_exynos_sz sz;
232 struct drm_exynos_pos pos;
233};
234
235enum drm_exynos_ipp_cmd {
236 IPP_CMD_NONE,
237 IPP_CMD_M2M,
238 IPP_CMD_WB,
239 IPP_CMD_OUTPUT,
240 IPP_CMD_MAX,
241};
242
243/**
244 * A structure for ipp property.
245 *
246 * @config: source, destination config.
247 * @cmd: definition of command.
248 * @ipp_id: id of ipp driver.
249 * @prop_id: id of property.
250 * @refresh_rate: refresh rate.
251 */
252struct drm_exynos_ipp_property {
253 struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX];
254 __u32 cmd;
255 __u32 ipp_id;
256 __u32 prop_id;
257 __u32 refresh_rate;
258};
259
260enum drm_exynos_ipp_buf_type {
261 IPP_BUF_ENQUEUE,
262 IPP_BUF_DEQUEUE,
263};
264
265/**
266 * A structure for ipp buffer operations.
267 *
268 * @ops_id: operation directions.
269 * @buf_type: definition of buffer.
270 * @prop_id: id of property.
271 * @buf_id: id of buffer.
272 * @handle: Y, Cb, Cr each planar handle.
273 * @user_data: user data.
274 */
275struct drm_exynos_ipp_queue_buf {
276 __u32 ops_id;
277 __u32 buf_type;
278 __u32 prop_id;
279 __u32 buf_id;
280 __u32 handle[EXYNOS_DRM_PLANAR_MAX];
281 __u32 reserved;
282 __u64 user_data;
283};
284
285enum drm_exynos_ipp_ctrl {
286 IPP_CTRL_PLAY,
287 IPP_CTRL_STOP,
288 IPP_CTRL_PAUSE,
289 IPP_CTRL_RESUME,
290 IPP_CTRL_MAX,
291};
292
293/**
294 * A structure for ipp start/stop operations.
295 *
296 * @prop_id: id of property.
297 * @ctrl: definition of control.
298 */
299struct drm_exynos_ipp_cmd_ctrl {
300 __u32 prop_id;
301 __u32 ctrl;
302};
303
304#define DRM_EXYNOS_GEM_CREATE 0x00 138#define DRM_EXYNOS_GEM_CREATE 0x00
305#define DRM_EXYNOS_GEM_MAP 0x01 139#define DRM_EXYNOS_GEM_MAP 0x01
306/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ 140/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
@@ -312,11 +146,7 @@ struct drm_exynos_ipp_cmd_ctrl {
312#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21 146#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
313#define DRM_EXYNOS_G2D_EXEC 0x22 147#define DRM_EXYNOS_G2D_EXEC 0x22
314 148
315/* IPP - Image Post Processing */ 149/* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */
316#define DRM_EXYNOS_IPP_GET_PROPERTY 0x30
317#define DRM_EXYNOS_IPP_SET_PROPERTY 0x31
318#define DRM_EXYNOS_IPP_QUEUE_BUF 0x32
319#define DRM_EXYNOS_IPP_CMD_CTRL 0x33
320 150
321#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ 151#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
322 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) 152 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -335,18 +165,8 @@ struct drm_exynos_ipp_cmd_ctrl {
335#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \ 165#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
336 DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec) 166 DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
337 167
338#define DRM_IOCTL_EXYNOS_IPP_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \
339 DRM_EXYNOS_IPP_GET_PROPERTY, struct drm_exynos_ipp_prop_list)
340#define DRM_IOCTL_EXYNOS_IPP_SET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \
341 DRM_EXYNOS_IPP_SET_PROPERTY, struct drm_exynos_ipp_property)
342#define DRM_IOCTL_EXYNOS_IPP_QUEUE_BUF DRM_IOWR(DRM_COMMAND_BASE + \
343 DRM_EXYNOS_IPP_QUEUE_BUF, struct drm_exynos_ipp_queue_buf)
344#define DRM_IOCTL_EXYNOS_IPP_CMD_CTRL DRM_IOWR(DRM_COMMAND_BASE + \
345 DRM_EXYNOS_IPP_CMD_CTRL, struct drm_exynos_ipp_cmd_ctrl)
346
347/* EXYNOS specific events */ 168/* EXYNOS specific events */
348#define DRM_EXYNOS_G2D_EVENT 0x80000000 169#define DRM_EXYNOS_G2D_EVENT 0x80000000
349#define DRM_EXYNOS_IPP_EVENT 0x80000001
350 170
351struct drm_exynos_g2d_event { 171struct drm_exynos_g2d_event {
352 struct drm_event base; 172 struct drm_event base;
@@ -357,16 +177,6 @@ struct drm_exynos_g2d_event {
357 __u32 reserved; 177 __u32 reserved;
358}; 178};
359 179
360struct drm_exynos_ipp_event {
361 struct drm_event base;
362 __u64 user_data;
363 __u32 tv_sec;
364 __u32 tv_usec;
365 __u32 prop_id;
366 __u32 reserved;
367 __u32 buf_id[EXYNOS_DRM_OPS_MAX];
368};
369
370#if defined(__cplusplus) 180#if defined(__cplusplus)
371} 181}
372#endif 182#endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index ac3c6503ca27..536ee4febd74 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -86,6 +86,62 @@ enum i915_mocs_table_index {
86 I915_MOCS_CACHED, 86 I915_MOCS_CACHED,
87}; 87};
88 88
89/*
90 * Different engines serve different roles, and there may be more than one
91 * engine serving each role. enum drm_i915_gem_engine_class provides a
92 * classification of the role of the engine, which may be used when requesting
93 * operations to be performed on a certain subset of engines, or for providing
94 * information about that group.
95 */
96enum drm_i915_gem_engine_class {
97 I915_ENGINE_CLASS_RENDER = 0,
98 I915_ENGINE_CLASS_COPY = 1,
99 I915_ENGINE_CLASS_VIDEO = 2,
100 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
101
102 I915_ENGINE_CLASS_INVALID = -1
103};
104
105/**
106 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
107 *
108 */
109
110enum drm_i915_pmu_engine_sample {
111 I915_SAMPLE_BUSY = 0,
112 I915_SAMPLE_WAIT = 1,
113 I915_SAMPLE_SEMA = 2
114};
115
116#define I915_PMU_SAMPLE_BITS (4)
117#define I915_PMU_SAMPLE_MASK (0xf)
118#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
119#define I915_PMU_CLASS_SHIFT \
120 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
121
122#define __I915_PMU_ENGINE(class, instance, sample) \
123 ((class) << I915_PMU_CLASS_SHIFT | \
124 (instance) << I915_PMU_SAMPLE_BITS | \
125 (sample))
126
127#define I915_PMU_ENGINE_BUSY(class, instance) \
128 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
129
130#define I915_PMU_ENGINE_WAIT(class, instance) \
131 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
132
133#define I915_PMU_ENGINE_SEMA(class, instance) \
134 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
135
136#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
137
138#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
139#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
140#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
141#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
142
143#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
144
89/* Each region is a minimum of 16k, and there are at most 255 of them. 145/* Each region is a minimum of 16k, and there are at most 255 of them.
90 */ 146 */
91#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 147#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -450,6 +506,27 @@ typedef struct drm_i915_irq_wait {
450 */ 506 */
451#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 507#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
452 508
509/*
510 * Query whether every context (both per-file default and user created) is
511 * isolated (insofar as HW supports). If this parameter is not true, then
512 * freshly created contexts may inherit values from an existing context,
513 * rather than default HW values. If true, it also ensures (insofar as HW
514 * supports) that all state set by this context will not leak to any other
515 * context.
516 *
517 * As not every engine across every gen support contexts, the returned
518 * value reports the support of context isolation for individual engines by
519 * returning a bitmask of each engine class set to true if that class supports
520 * isolation.
521 */
522#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
523
524/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
525 * registers. This used to be fixed per platform but from CNL onwards, this
526 * might vary depending on the parts.
527 */
528#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
529
453typedef struct drm_i915_getparam { 530typedef struct drm_i915_getparam {
454 __s32 param; 531 __s32 param;
455 /* 532 /*
diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h
new file mode 100644
index 000000000000..af0630ba5437
--- /dev/null
+++ b/include/uapi/linux/arm_sdei.h
@@ -0,0 +1,73 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* Copyright (C) 2017 Arm Ltd. */
3#ifndef _UAPI_LINUX_ARM_SDEI_H
4#define _UAPI_LINUX_ARM_SDEI_H
5
6#define SDEI_1_0_FN_BASE 0xC4000020
7#define SDEI_1_0_MASK 0xFFFFFFE0
8#define SDEI_1_0_FN(n) (SDEI_1_0_FN_BASE + (n))
9
10#define SDEI_1_0_FN_SDEI_VERSION SDEI_1_0_FN(0x00)
11#define SDEI_1_0_FN_SDEI_EVENT_REGISTER SDEI_1_0_FN(0x01)
12#define SDEI_1_0_FN_SDEI_EVENT_ENABLE SDEI_1_0_FN(0x02)
13#define SDEI_1_0_FN_SDEI_EVENT_DISABLE SDEI_1_0_FN(0x03)
14#define SDEI_1_0_FN_SDEI_EVENT_CONTEXT SDEI_1_0_FN(0x04)
15#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE SDEI_1_0_FN(0x05)
16#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME SDEI_1_0_FN(0x06)
17#define SDEI_1_0_FN_SDEI_EVENT_UNREGISTER SDEI_1_0_FN(0x07)
18#define SDEI_1_0_FN_SDEI_EVENT_STATUS SDEI_1_0_FN(0x08)
19#define SDEI_1_0_FN_SDEI_EVENT_GET_INFO SDEI_1_0_FN(0x09)
20#define SDEI_1_0_FN_SDEI_EVENT_ROUTING_SET SDEI_1_0_FN(0x0A)
21#define SDEI_1_0_FN_SDEI_PE_MASK SDEI_1_0_FN(0x0B)
22#define SDEI_1_0_FN_SDEI_PE_UNMASK SDEI_1_0_FN(0x0C)
23#define SDEI_1_0_FN_SDEI_INTERRUPT_BIND SDEI_1_0_FN(0x0D)
24#define SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE SDEI_1_0_FN(0x0E)
25#define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11)
26#define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12)
27
28#define SDEI_VERSION_MAJOR_SHIFT 48
29#define SDEI_VERSION_MAJOR_MASK 0x7fff
30#define SDEI_VERSION_MINOR_SHIFT 32
31#define SDEI_VERSION_MINOR_MASK 0xffff
32#define SDEI_VERSION_VENDOR_SHIFT 0
33#define SDEI_VERSION_VENDOR_MASK 0xffffffff
34
35#define SDEI_VERSION_MAJOR(x) (x>>SDEI_VERSION_MAJOR_SHIFT & SDEI_VERSION_MAJOR_MASK)
36#define SDEI_VERSION_MINOR(x) (x>>SDEI_VERSION_MINOR_SHIFT & SDEI_VERSION_MINOR_MASK)
37#define SDEI_VERSION_VENDOR(x) (x>>SDEI_VERSION_VENDOR_SHIFT & SDEI_VERSION_VENDOR_MASK)
38
39/* SDEI return values */
40#define SDEI_SUCCESS 0
41#define SDEI_NOT_SUPPORTED -1
42#define SDEI_INVALID_PARAMETERS -2
43#define SDEI_DENIED -3
44#define SDEI_PENDING -5
45#define SDEI_OUT_OF_RESOURCE -10
46
47/* EVENT_REGISTER flags */
48#define SDEI_EVENT_REGISTER_RM_ANY 0
49#define SDEI_EVENT_REGISTER_RM_PE 1
50
51/* EVENT_STATUS return value bits */
52#define SDEI_EVENT_STATUS_RUNNING 2
53#define SDEI_EVENT_STATUS_ENABLED 1
54#define SDEI_EVENT_STATUS_REGISTERED 0
55
56/* EVENT_COMPLETE status values */
57#define SDEI_EV_HANDLED 0
58#define SDEI_EV_FAILED 1
59
60/* GET_INFO values */
61#define SDEI_EVENT_INFO_EV_TYPE 0
62#define SDEI_EVENT_INFO_EV_SIGNALED 1
63#define SDEI_EVENT_INFO_EV_PRIORITY 2
64#define SDEI_EVENT_INFO_EV_ROUTING_MODE 3
65#define SDEI_EVENT_INFO_EV_ROUTING_AFF 4
66
67/* and their results */
68#define SDEI_EVENT_TYPE_PRIVATE 0
69#define SDEI_EVENT_TYPE_SHARED 1
70#define SDEI_EVENT_PRIORITY_NORMAL 0
71#define SDEI_EVENT_PRIORITY_CRITICAL 1
72
73#endif /* _UAPI_LINUX_ARM_SDEI_H */
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
new file mode 100644
index 000000000000..5cb360be2a11
--- /dev/null
+++ b/include/uapi/linux/batadv_packet.h
@@ -0,0 +1,644 @@
1/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
2/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _UAPI_LINUX_BATADV_PACKET_H_
20#define _UAPI_LINUX_BATADV_PACKET_H_
21
22#include <asm/byteorder.h>
23#include <linux/if_ether.h>
24#include <linux/types.h>
25
26/**
27 * batadv_tp_is_error() - Check throughput meter return code for error
28 * @n: throughput meter return code
29 *
30 * Return: 0 when not error was detected, != 0 otherwise
31 */
32#define batadv_tp_is_error(n) ((__u8)(n) > 127 ? 1 : 0)
33
34/**
35 * enum batadv_packettype - types for batman-adv encapsulated packets
36 * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
37 * @BATADV_BCAST: broadcast packets carrying broadcast payload
38 * @BATADV_CODED: network coded packets
39 * @BATADV_ELP: echo location packets for B.A.T.M.A.N. V
40 * @BATADV_OGM2: originator messages for B.A.T.M.A.N. V
41 *
42 * @BATADV_UNICAST: unicast packets carrying unicast payload traffic
43 * @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
44 * payload packet
45 * @BATADV_UNICAST_4ADDR: unicast packet including the originator address of
46 * the sender
47 * @BATADV_ICMP: unicast packet like IP ICMP used for ping or traceroute
48 * @BATADV_UNICAST_TVLV: unicast packet carrying TVLV containers
49 */
50enum batadv_packettype {
51 /* 0x00 - 0x3f: local packets or special rules for handling */
52 BATADV_IV_OGM = 0x00,
53 BATADV_BCAST = 0x01,
54 BATADV_CODED = 0x02,
55 BATADV_ELP = 0x03,
56 BATADV_OGM2 = 0x04,
57 /* 0x40 - 0x7f: unicast */
58#define BATADV_UNICAST_MIN 0x40
59 BATADV_UNICAST = 0x40,
60 BATADV_UNICAST_FRAG = 0x41,
61 BATADV_UNICAST_4ADDR = 0x42,
62 BATADV_ICMP = 0x43,
63 BATADV_UNICAST_TVLV = 0x44,
64#define BATADV_UNICAST_MAX 0x7f
65 /* 0x80 - 0xff: reserved */
66};
67
68/**
69 * enum batadv_subtype - packet subtype for unicast4addr
70 * @BATADV_P_DATA: user payload
71 * @BATADV_P_DAT_DHT_GET: DHT request message
72 * @BATADV_P_DAT_DHT_PUT: DHT store message
73 * @BATADV_P_DAT_CACHE_REPLY: ARP reply generated by DAT
74 */
75enum batadv_subtype {
76 BATADV_P_DATA = 0x01,
77 BATADV_P_DAT_DHT_GET = 0x02,
78 BATADV_P_DAT_DHT_PUT = 0x03,
79 BATADV_P_DAT_CACHE_REPLY = 0x04,
80};
81
82/* this file is included by batctl which needs these defines */
83#define BATADV_COMPAT_VERSION 15
84
85/**
86 * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
87 * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
88 * previously received from someone else than the best neighbor.
89 * @BATADV_PRIMARIES_FIRST_HOP: flag unused.
90 * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
91 * one hop neighbor on the interface where it was originally received.
92 */
93enum batadv_iv_flags {
94 BATADV_NOT_BEST_NEXT_HOP = 1UL << 0,
95 BATADV_PRIMARIES_FIRST_HOP = 1UL << 1,
96 BATADV_DIRECTLINK = 1UL << 2,
97};
98
99/**
100 * enum batadv_icmp_packettype - ICMP message types
101 * @BATADV_ECHO_REPLY: success reply to BATADV_ECHO_REQUEST
102 * @BATADV_DESTINATION_UNREACHABLE: failure when route to destination not found
103 * @BATADV_ECHO_REQUEST: request BATADV_ECHO_REPLY from destination
104 * @BATADV_TTL_EXCEEDED: error after BATADV_ECHO_REQUEST traversed too many hops
105 * @BATADV_PARAMETER_PROBLEM: return code for malformed messages
106 * @BATADV_TP: throughput meter packet
107 */
108enum batadv_icmp_packettype {
109 BATADV_ECHO_REPLY = 0,
110 BATADV_DESTINATION_UNREACHABLE = 3,
111 BATADV_ECHO_REQUEST = 8,
112 BATADV_TTL_EXCEEDED = 11,
113 BATADV_PARAMETER_PROBLEM = 12,
114 BATADV_TP = 15,
115};
116
117/**
118 * enum batadv_mcast_flags - flags for multicast capabilities and settings
119 * @BATADV_MCAST_WANT_ALL_UNSNOOPABLES: we want all packets destined for
120 * 224.0.0.0/24 or ff02::1
121 * @BATADV_MCAST_WANT_ALL_IPV4: we want all IPv4 multicast packets
122 * @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets
123 */
124enum batadv_mcast_flags {
125 BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0,
126 BATADV_MCAST_WANT_ALL_IPV4 = 1UL << 1,
127 BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2,
128};
129
130/* tt data subtypes */
131#define BATADV_TT_DATA_TYPE_MASK 0x0F
132
133/**
134 * enum batadv_tt_data_flags - flags for tt data tvlv
135 * @BATADV_TT_OGM_DIFF: TT diff propagated through OGM
136 * @BATADV_TT_REQUEST: TT request message
137 * @BATADV_TT_RESPONSE: TT response message
138 * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
139 */
140enum batadv_tt_data_flags {
141 BATADV_TT_OGM_DIFF = 1UL << 0,
142 BATADV_TT_REQUEST = 1UL << 1,
143 BATADV_TT_RESPONSE = 1UL << 2,
144 BATADV_TT_FULL_TABLE = 1UL << 4,
145};
146
147/**
148 * enum batadv_vlan_flags - flags for the four MSB of any vlan ID field
149 * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
150 */
151enum batadv_vlan_flags {
152 BATADV_VLAN_HAS_TAG = 1UL << 15,
153};
154
155/**
156 * enum batadv_bla_claimframe - claim frame types for the bridge loop avoidance
157 * @BATADV_CLAIM_TYPE_CLAIM: claim of a client mac address
158 * @BATADV_CLAIM_TYPE_UNCLAIM: unclaim of a client mac address
159 * @BATADV_CLAIM_TYPE_ANNOUNCE: announcement of backbone with current crc
160 * @BATADV_CLAIM_TYPE_REQUEST: request of full claim table
161 * @BATADV_CLAIM_TYPE_LOOPDETECT: mesh-traversing loop detect packet
162 */
163enum batadv_bla_claimframe {
164 BATADV_CLAIM_TYPE_CLAIM = 0x00,
165 BATADV_CLAIM_TYPE_UNCLAIM = 0x01,
166 BATADV_CLAIM_TYPE_ANNOUNCE = 0x02,
167 BATADV_CLAIM_TYPE_REQUEST = 0x03,
168 BATADV_CLAIM_TYPE_LOOPDETECT = 0x04,
169};
170
171/**
172 * enum batadv_tvlv_type - tvlv type definitions
173 * @BATADV_TVLV_GW: gateway tvlv
174 * @BATADV_TVLV_DAT: distributed arp table tvlv
175 * @BATADV_TVLV_NC: network coding tvlv
176 * @BATADV_TVLV_TT: translation table tvlv
177 * @BATADV_TVLV_ROAM: roaming advertisement tvlv
178 * @BATADV_TVLV_MCAST: multicast capability tvlv
179 */
180enum batadv_tvlv_type {
181 BATADV_TVLV_GW = 0x01,
182 BATADV_TVLV_DAT = 0x02,
183 BATADV_TVLV_NC = 0x03,
184 BATADV_TVLV_TT = 0x04,
185 BATADV_TVLV_ROAM = 0x05,
186 BATADV_TVLV_MCAST = 0x06,
187};
188
189#pragma pack(2)
190/* the destination hardware field in the ARP frame is used to
191 * transport the claim type and the group id
192 */
193struct batadv_bla_claim_dst {
194 __u8 magic[3]; /* FF:43:05 */
195 __u8 type; /* bla_claimframe */
196 __be16 group; /* group id */
197};
198
199#pragma pack()
200
201/**
202 * struct batadv_ogm_packet - ogm (routing protocol) packet
203 * @packet_type: batman-adv packet type, part of the general header
204 * @version: batman-adv protocol version, part of the genereal header
205 * @ttl: time to live for this packet, part of the genereal header
206 * @flags: contains routing relevant flags - see enum batadv_iv_flags
207 * @seqno: sequence identification
208 * @orig: address of the source node
209 * @prev_sender: address of the previous sender
210 * @reserved: reserved byte for alignment
211 * @tq: transmission quality
212 * @tvlv_len: length of tvlv data following the ogm header
213 */
214struct batadv_ogm_packet {
215 __u8 packet_type;
216 __u8 version;
217 __u8 ttl;
218 __u8 flags;
219 __be32 seqno;
220 __u8 orig[ETH_ALEN];
221 __u8 prev_sender[ETH_ALEN];
222 __u8 reserved;
223 __u8 tq;
224 __be16 tvlv_len;
225 /* __packed is not needed as the struct size is divisible by 4,
226 * and the largest data type in this struct has a size of 4.
227 */
228};
229
230#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
231
232/**
233 * struct batadv_ogm2_packet - ogm2 (routing protocol) packet
234 * @packet_type: batman-adv packet type, part of the general header
235 * @version: batman-adv protocol version, part of the general header
236 * @ttl: time to live for this packet, part of the general header
237 * @flags: reseved for routing relevant flags - currently always 0
238 * @seqno: sequence number
239 * @orig: originator mac address
240 * @tvlv_len: length of the appended tvlv buffer (in bytes)
241 * @throughput: the currently flooded path throughput
242 */
243struct batadv_ogm2_packet {
244 __u8 packet_type;
245 __u8 version;
246 __u8 ttl;
247 __u8 flags;
248 __be32 seqno;
249 __u8 orig[ETH_ALEN];
250 __be16 tvlv_len;
251 __be32 throughput;
252 /* __packed is not needed as the struct size is divisible by 4,
253 * and the largest data type in this struct has a size of 4.
254 */
255};
256
257#define BATADV_OGM2_HLEN sizeof(struct batadv_ogm2_packet)
258
259/**
260 * struct batadv_elp_packet - elp (neighbor discovery) packet
261 * @packet_type: batman-adv packet type, part of the general header
262 * @version: batman-adv protocol version, part of the genereal header
263 * @orig: originator mac address
264 * @seqno: sequence number
265 * @elp_interval: currently used ELP sending interval in ms
266 */
267struct batadv_elp_packet {
268 __u8 packet_type;
269 __u8 version;
270 __u8 orig[ETH_ALEN];
271 __be32 seqno;
272 __be32 elp_interval;
273};
274
275#define BATADV_ELP_HLEN sizeof(struct batadv_elp_packet)
276
277/**
278 * struct batadv_icmp_header - common members among all the ICMP packets
279 * @packet_type: batman-adv packet type, part of the general header
280 * @version: batman-adv protocol version, part of the genereal header
281 * @ttl: time to live for this packet, part of the genereal header
282 * @msg_type: ICMP packet type
283 * @dst: address of the destination node
284 * @orig: address of the source node
285 * @uid: local ICMP socket identifier
286 * @align: not used - useful for alignment purposes only
287 *
288 * This structure is used for ICMP packets parsing only and it is never sent
289 * over the wire. The alignment field at the end is there to ensure that
290 * members are padded the same way as they are in real packets.
291 */
292struct batadv_icmp_header {
293 __u8 packet_type;
294 __u8 version;
295 __u8 ttl;
296 __u8 msg_type; /* see ICMP message types above */
297 __u8 dst[ETH_ALEN];
298 __u8 orig[ETH_ALEN];
299 __u8 uid;
300 __u8 align[3];
301};
302
303/**
304 * struct batadv_icmp_packet - ICMP packet
305 * @packet_type: batman-adv packet type, part of the general header
306 * @version: batman-adv protocol version, part of the genereal header
307 * @ttl: time to live for this packet, part of the genereal header
308 * @msg_type: ICMP packet type
309 * @dst: address of the destination node
310 * @orig: address of the source node
311 * @uid: local ICMP socket identifier
312 * @reserved: not used - useful for alignment
313 * @seqno: ICMP sequence number
314 */
315struct batadv_icmp_packet {
316 __u8 packet_type;
317 __u8 version;
318 __u8 ttl;
319 __u8 msg_type; /* see ICMP message types above */
320 __u8 dst[ETH_ALEN];
321 __u8 orig[ETH_ALEN];
322 __u8 uid;
323 __u8 reserved;
324 __be16 seqno;
325};
326
327/**
328 * struct batadv_icmp_tp_packet - ICMP TP Meter packet
329 * @packet_type: batman-adv packet type, part of the general header
330 * @version: batman-adv protocol version, part of the genereal header
331 * @ttl: time to live for this packet, part of the genereal header
332 * @msg_type: ICMP packet type
333 * @dst: address of the destination node
334 * @orig: address of the source node
335 * @uid: local ICMP socket identifier
336 * @subtype: TP packet subtype (see batadv_icmp_tp_subtype)
337 * @session: TP session identifier
338 * @seqno: the TP sequence number
339 * @timestamp: time when the packet has been sent. This value is filled in a
340 * TP_MSG and echoed back in the next TP_ACK so that the sender can compute the
341 * RTT. Since it is read only by the host which wrote it, there is no need to
342 * store it using network order
343 */
344struct batadv_icmp_tp_packet {
345 __u8 packet_type;
346 __u8 version;
347 __u8 ttl;
348 __u8 msg_type; /* see ICMP message types above */
349 __u8 dst[ETH_ALEN];
350 __u8 orig[ETH_ALEN];
351 __u8 uid;
352 __u8 subtype;
353 __u8 session[2];
354 __be32 seqno;
355 __be32 timestamp;
356};
357
358/**
359 * enum batadv_icmp_tp_subtype - ICMP TP Meter packet subtypes
360 * @BATADV_TP_MSG: Msg from sender to receiver
361 * @BATADV_TP_ACK: acknowledgment from receiver to sender
362 */
363enum batadv_icmp_tp_subtype {
364 BATADV_TP_MSG = 0,
365 BATADV_TP_ACK,
366};
367
368#define BATADV_RR_LEN 16
369
370/**
371 * struct batadv_icmp_packet_rr - ICMP RouteRecord packet
372 * @packet_type: batman-adv packet type, part of the general header
373 * @version: batman-adv protocol version, part of the genereal header
374 * @ttl: time to live for this packet, part of the genereal header
375 * @msg_type: ICMP packet type
376 * @dst: address of the destination node
377 * @orig: address of the source node
378 * @uid: local ICMP socket identifier
379 * @rr_cur: number of entries the rr array
380 * @seqno: ICMP sequence number
381 * @rr: route record array
382 */
383struct batadv_icmp_packet_rr {
384 __u8 packet_type;
385 __u8 version;
386 __u8 ttl;
387 __u8 msg_type; /* see ICMP message types above */
388 __u8 dst[ETH_ALEN];
389 __u8 orig[ETH_ALEN];
390 __u8 uid;
391 __u8 rr_cur;
392 __be16 seqno;
393 __u8 rr[BATADV_RR_LEN][ETH_ALEN];
394};
395
396#define BATADV_ICMP_MAX_PACKET_SIZE sizeof(struct batadv_icmp_packet_rr)
397
398/* All packet headers in front of an ethernet header have to be completely
399 * divisible by 2 but not by 4 to make the payload after the ethernet
400 * header again 4 bytes boundary aligned.
401 *
402 * A packing of 2 is necessary to avoid extra padding at the end of the struct
403 * caused by a structure member which is larger than two bytes. Otherwise
404 * the structure would not fulfill the previously mentioned rule to avoid the
405 * misalignment of the payload after the ethernet header. It may also lead to
406 * leakage of information when the padding it not initialized before sending.
407 */
408#pragma pack(2)
409
410/**
411 * struct batadv_unicast_packet - unicast packet for network payload
412 * @packet_type: batman-adv packet type, part of the general header
413 * @version: batman-adv protocol version, part of the genereal header
414 * @ttl: time to live for this packet, part of the genereal header
415 * @ttvn: translation table version number
416 * @dest: originator destination of the unicast packet
417 */
418struct batadv_unicast_packet {
419 __u8 packet_type;
420 __u8 version;
421 __u8 ttl;
422 __u8 ttvn; /* destination translation table version number */
423 __u8 dest[ETH_ALEN];
424 /* "4 bytes boundary + 2 bytes" long to make the payload after the
425 * following ethernet header again 4 bytes boundary aligned
426 */
427};
428
429/**
430 * struct batadv_unicast_4addr_packet - extended unicast packet
431 * @u: common unicast packet header
432 * @src: address of the source
433 * @subtype: packet subtype
434 * @reserved: reserved byte for alignment
435 */
436struct batadv_unicast_4addr_packet {
437 struct batadv_unicast_packet u;
438 __u8 src[ETH_ALEN];
439 __u8 subtype;
440 __u8 reserved;
441 /* "4 bytes boundary + 2 bytes" long to make the payload after the
442 * following ethernet header again 4 bytes boundary aligned
443 */
444};
445
446/**
447 * struct batadv_frag_packet - fragmented packet
448 * @packet_type: batman-adv packet type, part of the general header
449 * @version: batman-adv protocol version, part of the genereal header
450 * @ttl: time to live for this packet, part of the genereal header
451 * @dest: final destination used when routing fragments
452 * @orig: originator of the fragment used when merging the packet
453 * @no: fragment number within this sequence
454 * @priority: priority of frame, from ToS IP precedence or 802.1p
455 * @reserved: reserved byte for alignment
456 * @seqno: sequence identification
457 * @total_size: size of the merged packet
458 */
459struct batadv_frag_packet {
460 __u8 packet_type;
461 __u8 version; /* batman version field */
462 __u8 ttl;
463#if defined(__BIG_ENDIAN_BITFIELD)
464 __u8 no:4;
465 __u8 priority:3;
466 __u8 reserved:1;
467#elif defined(__LITTLE_ENDIAN_BITFIELD)
468 __u8 reserved:1;
469 __u8 priority:3;
470 __u8 no:4;
471#else
472#error "unknown bitfield endianness"
473#endif
474 __u8 dest[ETH_ALEN];
475 __u8 orig[ETH_ALEN];
476 __be16 seqno;
477 __be16 total_size;
478};
479
480/**
481 * struct batadv_bcast_packet - broadcast packet for network payload
482 * @packet_type: batman-adv packet type, part of the general header
483 * @version: batman-adv protocol version, part of the genereal header
484 * @ttl: time to live for this packet, part of the genereal header
485 * @reserved: reserved byte for alignment
486 * @seqno: sequence identification
487 * @orig: originator of the broadcast packet
488 */
489struct batadv_bcast_packet {
490 __u8 packet_type;
491 __u8 version; /* batman version field */
492 __u8 ttl;
493 __u8 reserved;
494 __be32 seqno;
495 __u8 orig[ETH_ALEN];
496 /* "4 bytes boundary + 2 bytes" long to make the payload after the
497 * following ethernet header again 4 bytes boundary aligned
498 */
499};
500
501/**
502 * struct batadv_coded_packet - network coded packet
503 * @packet_type: batman-adv packet type, part of the general header
504 * @version: batman-adv protocol version, part of the genereal header
505 * @ttl: time to live for this packet, part of the genereal header
506 * @first_source: original source of first included packet
507 * @first_orig_dest: original destinal of first included packet
508 * @first_crc: checksum of first included packet
509 * @first_ttvn: tt-version number of first included packet
510 * @second_ttl: ttl of second packet
511 * @second_dest: second receiver of this coded packet
512 * @second_source: original source of second included packet
513 * @second_orig_dest: original destination of second included packet
514 * @second_crc: checksum of second included packet
515 * @second_ttvn: tt version number of second included packet
516 * @coded_len: length of network coded part of the payload
517 */
518struct batadv_coded_packet {
519 __u8 packet_type;
520 __u8 version; /* batman version field */
521 __u8 ttl;
522 __u8 first_ttvn;
523 /* __u8 first_dest[ETH_ALEN]; - saved in mac header destination */
524 __u8 first_source[ETH_ALEN];
525 __u8 first_orig_dest[ETH_ALEN];
526 __be32 first_crc;
527 __u8 second_ttl;
528 __u8 second_ttvn;
529 __u8 second_dest[ETH_ALEN];
530 __u8 second_source[ETH_ALEN];
531 __u8 second_orig_dest[ETH_ALEN];
532 __be32 second_crc;
533 __be16 coded_len;
534};
535
536#pragma pack()
537
538/**
539 * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload
540 * @packet_type: batman-adv packet type, part of the general header
541 * @version: batman-adv protocol version, part of the genereal header
542 * @ttl: time to live for this packet, part of the genereal header
543 * @reserved: reserved field (for packet alignment)
544 * @src: address of the source
545 * @dst: address of the destination
546 * @tvlv_len: length of tvlv data following the unicast tvlv header
547 * @align: 2 bytes to align the header to a 4 byte boundary
548 */
549struct batadv_unicast_tvlv_packet {
550 __u8 packet_type;
551 __u8 version; /* batman version field */
552 __u8 ttl;
553 __u8 reserved;
554 __u8 dst[ETH_ALEN];
555 __u8 src[ETH_ALEN];
556 __be16 tvlv_len;
557 __u16 align;
558};
559
560/**
561 * struct batadv_tvlv_hdr - base tvlv header struct
562 * @type: tvlv container type (see batadv_tvlv_type)
563 * @version: tvlv container version
564 * @len: tvlv container length
565 */
566struct batadv_tvlv_hdr {
567 __u8 type;
568 __u8 version;
569 __be16 len;
570};
571
572/**
573 * struct batadv_tvlv_gateway_data - gateway data propagated through gw tvlv
574 * container
575 * @bandwidth_down: advertised uplink download bandwidth
576 * @bandwidth_up: advertised uplink upload bandwidth
577 */
578struct batadv_tvlv_gateway_data {
579 __be32 bandwidth_down;
580 __be32 bandwidth_up;
581};
582
583/**
584 * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
585 * @flags: translation table flags (see batadv_tt_data_flags)
586 * @ttvn: translation table version number
587 * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by
588 * one batadv_tvlv_tt_vlan_data object per announced vlan
589 */
590struct batadv_tvlv_tt_data {
591 __u8 flags;
592 __u8 ttvn;
593 __be16 num_vlan;
594};
595
596/**
597 * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through
598 * the tt tvlv container
599 * @crc: crc32 checksum of the entries belonging to this vlan
600 * @vid: vlan identifier
601 * @reserved: unused, useful for alignment purposes
602 */
603struct batadv_tvlv_tt_vlan_data {
604 __be32 crc;
605 __be16 vid;
606 __u16 reserved;
607};
608
609/**
610 * struct batadv_tvlv_tt_change - translation table diff data
611 * @flags: status indicators concerning the non-mesh client (see
612 * batadv_tt_client_flags)
613 * @reserved: reserved field - useful for alignment purposes only
614 * @addr: mac address of non-mesh client that triggered this tt change
615 * @vid: VLAN identifier
616 */
617struct batadv_tvlv_tt_change {
618 __u8 flags;
619 __u8 reserved[3];
620 __u8 addr[ETH_ALEN];
621 __be16 vid;
622};
623
624/**
625 * struct batadv_tvlv_roam_adv - roaming advertisement
626 * @client: mac address of roaming client
627 * @vid: VLAN identifier
628 */
629struct batadv_tvlv_roam_adv {
630 __u8 client[ETH_ALEN];
631 __be16 vid;
632};
633
634/**
635 * struct batadv_tvlv_mcast_data - payload of a multicast tvlv
636 * @flags: multicast flags announced by the orig node
637 * @reserved: reserved field
638 */
639struct batadv_tvlv_mcast_data {
640 __u8 flags;
641 __u8 reserved[3];
642};
643
644#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index efd641c8a5d6..ae00c99cbed0 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,18 +1,25 @@
1/* SPDX-License-Identifier: MIT */
1/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors: 2/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors:
2 * 3 *
3 * Matthias Schiffer 4 * Matthias Schiffer
4 * 5 *
5 * Permission to use, copy, modify, and/or distribute this software for any 6 * Permission is hereby granted, free of charge, to any person obtaining a
6 * purpose with or without fee is hereby granted, provided that the above 7 * copy of this software and associated documentation files (the "Software"),
7 * copyright notice and this permission notice appear in all copies. 8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
8 * 12 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * The above copyright notice and this permission notice shall be included in
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * all copies or substantial portions of the Software.
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 *
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
16 */ 23 */
17 24
18#ifndef _UAPI_LINUX_BATMAN_ADV_H_ 25#ifndef _UAPI_LINUX_BATMAN_ADV_H_
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4c223ab30293..db6bdc375126 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -17,7 +17,7 @@
17#define BPF_ALU64 0x07 /* alu mode in double word width */ 17#define BPF_ALU64 0x07 /* alu mode in double word width */
18 18
19/* ld/ldx fields */ 19/* ld/ldx fields */
20#define BPF_DW 0x18 /* double word */ 20#define BPF_DW 0x18 /* double word (64-bit) */
21#define BPF_XADD 0xc0 /* exclusive add */ 21#define BPF_XADD 0xc0 /* exclusive add */
22 22
23/* alu/jmp fields */ 23/* alu/jmp fields */
@@ -197,8 +197,14 @@ enum bpf_attach_type {
197 */ 197 */
198#define BPF_F_STRICT_ALIGNMENT (1U << 0) 198#define BPF_F_STRICT_ALIGNMENT (1U << 0)
199 199
200/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
200#define BPF_PSEUDO_MAP_FD 1 201#define BPF_PSEUDO_MAP_FD 1
201 202
203/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
204 * offset to another bpf function
205 */
206#define BPF_PSEUDO_CALL 1
207
202/* flags for BPF_MAP_UPDATE_ELEM command */ 208/* flags for BPF_MAP_UPDATE_ELEM command */
203#define BPF_ANY 0 /* create new element or update existing */ 209#define BPF_ANY 0 /* create new element or update existing */
204#define BPF_NOEXIST 1 /* create new element if it didn't exist */ 210#define BPF_NOEXIST 1 /* create new element if it didn't exist */
@@ -239,6 +245,7 @@ union bpf_attr {
239 * BPF_F_NUMA_NODE is set). 245 * BPF_F_NUMA_NODE is set).
240 */ 246 */
241 char map_name[BPF_OBJ_NAME_LEN]; 247 char map_name[BPF_OBJ_NAME_LEN];
248 __u32 map_ifindex; /* ifindex of netdev to create on */
242 }; 249 };
243 250
244 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 251 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -635,6 +642,14 @@ union bpf_attr {
635 * @optlen: length of optval in bytes 642 * @optlen: length of optval in bytes
636 * Return: 0 or negative error 643 * Return: 0 or negative error
637 * 644 *
645 * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
646 * Set callback flags for sock_ops
647 * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
648 * @flags: flags value
649 * Return: 0 for no error
650 * -EINVAL if there is no full tcp socket
651 * bits in flags that are not supported by current kernel
652 *
638 * int bpf_skb_adjust_room(skb, len_diff, mode, flags) 653 * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
639 * Grow or shrink room in sk_buff. 654 * Grow or shrink room in sk_buff.
640 * @skb: pointer to skb 655 * @skb: pointer to skb
@@ -677,6 +692,10 @@ union bpf_attr {
677 * @buf: buf to fill 692 * @buf: buf to fill
678 * @buf_size: size of the buf 693 * @buf_size: size of the buf
679 * Return : 0 on success or negative error code 694 * Return : 0 on success or negative error code
695 *
696 * int bpf_override_return(pt_regs, rc)
697 * @pt_regs: pointer to struct pt_regs
698 * @rc: the return value to set
680 */ 699 */
681#define __BPF_FUNC_MAPPER(FN) \ 700#define __BPF_FUNC_MAPPER(FN) \
682 FN(unspec), \ 701 FN(unspec), \
@@ -736,7 +755,9 @@ union bpf_attr {
736 FN(xdp_adjust_meta), \ 755 FN(xdp_adjust_meta), \
737 FN(perf_event_read_value), \ 756 FN(perf_event_read_value), \
738 FN(perf_prog_read_value), \ 757 FN(perf_prog_read_value), \
739 FN(getsockopt), 758 FN(getsockopt), \
759 FN(override_return), \
760 FN(sock_ops_cb_flags_set),
740 761
741/* integer value in 'imm' field of BPF_CALL instruction selects which helper 762/* integer value in 'imm' field of BPF_CALL instruction selects which helper
742 * function eBPF program intends to call 763 * function eBPF program intends to call
@@ -888,6 +909,9 @@ struct xdp_md {
888 __u32 data; 909 __u32 data;
889 __u32 data_end; 910 __u32 data_end;
890 __u32 data_meta; 911 __u32 data_meta;
912 /* Below access go through struct xdp_rxq_info */
913 __u32 ingress_ifindex; /* rxq->dev->ifindex */
914 __u32 rx_queue_index; /* rxq->queue_index */
891}; 915};
892 916
893enum sk_action { 917enum sk_action {
@@ -910,6 +934,9 @@ struct bpf_prog_info {
910 __u32 nr_map_ids; 934 __u32 nr_map_ids;
911 __aligned_u64 map_ids; 935 __aligned_u64 map_ids;
912 char name[BPF_OBJ_NAME_LEN]; 936 char name[BPF_OBJ_NAME_LEN];
937 __u32 ifindex;
938 __u64 netns_dev;
939 __u64 netns_ino;
913} __attribute__((aligned(8))); 940} __attribute__((aligned(8)));
914 941
915struct bpf_map_info { 942struct bpf_map_info {
@@ -920,6 +947,9 @@ struct bpf_map_info {
920 __u32 max_entries; 947 __u32 max_entries;
921 __u32 map_flags; 948 __u32 map_flags;
922 char name[BPF_OBJ_NAME_LEN]; 949 char name[BPF_OBJ_NAME_LEN];
950 __u32 ifindex;
951 __u64 netns_dev;
952 __u64 netns_ino;
923} __attribute__((aligned(8))); 953} __attribute__((aligned(8)));
924 954
925/* User bpf_sock_ops struct to access socket values and specify request ops 955/* User bpf_sock_ops struct to access socket values and specify request ops
@@ -931,8 +961,9 @@ struct bpf_map_info {
931struct bpf_sock_ops { 961struct bpf_sock_ops {
932 __u32 op; 962 __u32 op;
933 union { 963 union {
934 __u32 reply; 964 __u32 args[4]; /* Optionally passed to bpf program */
935 __u32 replylong[4]; 965 __u32 reply; /* Returned by bpf program */
966 __u32 replylong[4]; /* Optionally returned by bpf prog */
936 }; 967 };
937 __u32 family; 968 __u32 family;
938 __u32 remote_ip4; /* Stored in network byte order */ 969 __u32 remote_ip4; /* Stored in network byte order */
@@ -941,8 +972,45 @@ struct bpf_sock_ops {
941 __u32 local_ip6[4]; /* Stored in network byte order */ 972 __u32 local_ip6[4]; /* Stored in network byte order */
942 __u32 remote_port; /* Stored in network byte order */ 973 __u32 remote_port; /* Stored in network byte order */
943 __u32 local_port; /* stored in host byte order */ 974 __u32 local_port; /* stored in host byte order */
975 __u32 is_fullsock; /* Some TCP fields are only valid if
976 * there is a full socket. If not, the
977 * fields read as zero.
978 */
979 __u32 snd_cwnd;
980 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
981 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
982 __u32 state;
983 __u32 rtt_min;
984 __u32 snd_ssthresh;
985 __u32 rcv_nxt;
986 __u32 snd_nxt;
987 __u32 snd_una;
988 __u32 mss_cache;
989 __u32 ecn_flags;
990 __u32 rate_delivered;
991 __u32 rate_interval_us;
992 __u32 packets_out;
993 __u32 retrans_out;
994 __u32 total_retrans;
995 __u32 segs_in;
996 __u32 data_segs_in;
997 __u32 segs_out;
998 __u32 data_segs_out;
999 __u32 lost_out;
1000 __u32 sacked_out;
1001 __u32 sk_txhash;
1002 __u64 bytes_received;
1003 __u64 bytes_acked;
944}; 1004};
945 1005
1006/* Definitions for bpf_sock_ops_cb_flags */
1007#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
1008#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
1009#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
1010#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
1011 * supported cb flags
1012 */
1013
946/* List of known BPF sock_ops operators. 1014/* List of known BPF sock_ops operators.
947 * New entries can only be added at the end 1015 * New entries can only be added at the end
948 */ 1016 */
@@ -976,6 +1044,43 @@ enum {
976 * a congestion threshold. RTTs above 1044 * a congestion threshold. RTTs above
977 * this indicate congestion 1045 * this indicate congestion
978 */ 1046 */
1047 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
1048 * Arg1: value of icsk_retransmits
1049 * Arg2: value of icsk_rto
1050 * Arg3: whether RTO has expired
1051 */
1052 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
1053 * Arg1: sequence number of 1st byte
1054 * Arg2: # segments
1055 * Arg3: return value of
1056 * tcp_transmit_skb (0 => success)
1057 */
1058 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
1059 * Arg1: old_state
1060 * Arg2: new_state
1061 */
1062};
1063
1064/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
1065 * changes between the TCP and BPF versions. Ideally this should never happen.
1066 * If it does, we need to add code to convert them before calling
1067 * the BPF sock_ops function.
1068 */
1069enum {
1070 BPF_TCP_ESTABLISHED = 1,
1071 BPF_TCP_SYN_SENT,
1072 BPF_TCP_SYN_RECV,
1073 BPF_TCP_FIN_WAIT1,
1074 BPF_TCP_FIN_WAIT2,
1075 BPF_TCP_TIME_WAIT,
1076 BPF_TCP_CLOSE,
1077 BPF_TCP_CLOSE_WAIT,
1078 BPF_TCP_LAST_ACK,
1079 BPF_TCP_LISTEN,
1080 BPF_TCP_CLOSING, /* Now a valid state */
1081 BPF_TCP_NEW_SYN_RECV,
1082
1083 BPF_TCP_MAX_STATES /* Leave at the end! */
979}; 1084};
980 1085
981#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ 1086#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
@@ -995,7 +1100,8 @@ struct bpf_perf_event_value {
995#define BPF_DEVCG_DEV_CHAR (1ULL << 1) 1100#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
996 1101
997struct bpf_cgroup_dev_ctx { 1102struct bpf_cgroup_dev_ctx {
998 __u32 access_type; /* (access << 16) | type */ 1103 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
1104 __u32 access_type;
999 __u32 major; 1105 __u32 major;
1000 __u32 minor; 1106 __u32 minor;
1001}; 1107};
diff --git a/include/uapi/linux/bpf_common.h b/include/uapi/linux/bpf_common.h
index 18be90725ab0..ee97668bdadb 100644
--- a/include/uapi/linux/bpf_common.h
+++ b/include/uapi/linux/bpf_common.h
@@ -15,9 +15,10 @@
15 15
16/* ld/ldx fields */ 16/* ld/ldx fields */
17#define BPF_SIZE(code) ((code) & 0x18) 17#define BPF_SIZE(code) ((code) & 0x18)
18#define BPF_W 0x00 18#define BPF_W 0x00 /* 32-bit */
19#define BPF_H 0x08 19#define BPF_H 0x08 /* 16-bit */
20#define BPF_B 0x10 20#define BPF_B 0x10 /* 8-bit */
21/* eBPF BPF_DW 0x18 64-bit */
21#define BPF_MODE(code) ((code) & 0xe0) 22#define BPF_MODE(code) ((code) & 0xe0)
22#define BPF_IMM 0x00 23#define BPF_IMM 0x00
23#define BPF_ABS 0x20 24#define BPF_ABS 0x20
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index ce615b75e855..c8d99b9ca550 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -33,7 +33,12 @@ struct btrfs_ioctl_vol_args {
33 char name[BTRFS_PATH_NAME_MAX + 1]; 33 char name[BTRFS_PATH_NAME_MAX + 1];
34}; 34};
35 35
36#define BTRFS_DEVICE_PATH_NAME_MAX 1024 36#define BTRFS_DEVICE_PATH_NAME_MAX 1024
37#define BTRFS_SUBVOL_NAME_MAX 4039
38
39#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
40#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
41#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
37 42
38#define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3) 43#define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3)
39 44
@@ -101,11 +106,7 @@ struct btrfs_ioctl_qgroup_limit_args {
101 * - BTRFS_IOC_SUBVOL_GETFLAGS 106 * - BTRFS_IOC_SUBVOL_GETFLAGS
102 * - BTRFS_IOC_SUBVOL_SETFLAGS 107 * - BTRFS_IOC_SUBVOL_SETFLAGS
103 */ 108 */
104#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
105#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
106#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
107 109
108#define BTRFS_SUBVOL_NAME_MAX 4039
109struct btrfs_ioctl_vol_args_v2 { 110struct btrfs_ioctl_vol_args_v2 {
110 __s64 fd; 111 __s64 fd;
111 __u64 transid; 112 __u64 transid;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 6d6e5da51527..aff1356c2bb8 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -456,6 +456,8 @@ struct btrfs_free_space_header {
456 456
457#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) 457#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32)
458#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) 458#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
459#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34)
460#define BTRFS_SUPER_FLAG_CHANGING_FSID (1ULL << 35)
459 461
460 462
461/* 463/*
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 96710e76d5ce..9f56fad4785b 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -132,6 +132,7 @@ enum {
132 IFLA_CAN_TERMINATION_CONST, 132 IFLA_CAN_TERMINATION_CONST,
133 IFLA_CAN_BITRATE_CONST, 133 IFLA_CAN_BITRATE_CONST,
134 IFLA_CAN_DATA_BITRATE_CONST, 134 IFLA_CAN_DATA_BITRATE_CONST,
135 IFLA_CAN_BITRATE_MAX,
135 __IFLA_CAN_MAX 136 __IFLA_CAN_MAX
136}; 137};
137 138
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 6665df69e26a..1df65a4c2044 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -70,6 +70,13 @@ enum devlink_command {
70 DEVLINK_CMD_DPIPE_ENTRIES_GET, 70 DEVLINK_CMD_DPIPE_ENTRIES_GET,
71 DEVLINK_CMD_DPIPE_HEADERS_GET, 71 DEVLINK_CMD_DPIPE_HEADERS_GET,
72 DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET, 72 DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
73 DEVLINK_CMD_RESOURCE_SET,
74 DEVLINK_CMD_RESOURCE_DUMP,
75
76 /* Hot driver reload, makes configuration changes take place. The
77 * devlink instance is not released during the process.
78 */
79 DEVLINK_CMD_RELOAD,
73 80
74 /* add new commands above here */ 81 /* add new commands above here */
75 __DEVLINK_CMD_MAX, 82 __DEVLINK_CMD_MAX,
@@ -202,6 +209,20 @@ enum devlink_attr {
202 DEVLINK_ATTR_PAD, 209 DEVLINK_ATTR_PAD,
203 210
204 DEVLINK_ATTR_ESWITCH_ENCAP_MODE, /* u8 */ 211 DEVLINK_ATTR_ESWITCH_ENCAP_MODE, /* u8 */
212 DEVLINK_ATTR_RESOURCE_LIST, /* nested */
213 DEVLINK_ATTR_RESOURCE, /* nested */
214 DEVLINK_ATTR_RESOURCE_NAME, /* string */
215 DEVLINK_ATTR_RESOURCE_ID, /* u64 */
216 DEVLINK_ATTR_RESOURCE_SIZE, /* u64 */
217 DEVLINK_ATTR_RESOURCE_SIZE_NEW, /* u64 */
218 DEVLINK_ATTR_RESOURCE_SIZE_VALID, /* u8 */
219 DEVLINK_ATTR_RESOURCE_SIZE_MIN, /* u64 */
220 DEVLINK_ATTR_RESOURCE_SIZE_MAX, /* u64 */
221 DEVLINK_ATTR_RESOURCE_SIZE_GRAN, /* u64 */
222 DEVLINK_ATTR_RESOURCE_UNIT, /* u8 */
223 DEVLINK_ATTR_RESOURCE_OCC, /* u64 */
224 DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, /* u64 */
225 DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,/* u64 */
205 226
206 /* add new attributes above here, update the policy in devlink.c */ 227 /* add new attributes above here, update the policy in devlink.c */
207 228
@@ -245,4 +266,8 @@ enum devlink_dpipe_header_id {
245 DEVLINK_DPIPE_HEADER_IPV6, 266 DEVLINK_DPIPE_HEADER_IPV6,
246}; 267};
247 268
269enum devlink_resource_unit {
270 DEVLINK_RESOURCE_UNIT_ENTRY,
271};
272
248#endif /* _UAPI_LINUX_DEVLINK_H_ */ 273#endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h
index c10f1324b4ca..5f3c5a918f00 100644
--- a/include/uapi/linux/dvb/dmx.h
+++ b/include/uapi/linux/dvb/dmx.h
@@ -211,6 +211,61 @@ struct dmx_stc {
211 __u64 stc; 211 __u64 stc;
212}; 212};
213 213
214/**
215 * struct dmx_buffer - dmx buffer info
216 *
217 * @index: id number of the buffer
218 * @bytesused: number of bytes occupied by data in the buffer (payload);
219 * @offset: for buffers with memory == DMX_MEMORY_MMAP;
220 * offset from the start of the device memory for this plane,
221 * (or a "cookie" that should be passed to mmap() as offset)
222 * @length: size in bytes of the buffer
223 *
224 * Contains data exchanged by application and driver using one of the streaming
225 * I/O methods.
226 */
227struct dmx_buffer {
228 __u32 index;
229 __u32 bytesused;
230 __u32 offset;
231 __u32 length;
232};
233
234/**
235 * struct dmx_requestbuffers - request dmx buffer information
236 *
237 * @count: number of requested buffers,
238 * @size: size in bytes of the requested buffer
239 *
240 * Contains data used for requesting a dmx buffer.
241 * All reserved fields must be set to zero.
242 */
243struct dmx_requestbuffers {
244 __u32 count;
245 __u32 size;
246};
247
248/**
249 * struct dmx_exportbuffer - export of dmx buffer as DMABUF file descriptor
250 *
251 * @index: id number of the buffer
252 * @flags: flags for newly created file, currently only O_CLOEXEC is
253 * supported, refer to manual of open syscall for more details
254 * @fd: file descriptor associated with DMABUF (set by driver)
255 *
256 * Contains data used for exporting a dmx buffer as DMABUF file descriptor.
257 * The buffer is identified by a 'cookie' returned by DMX_QUERYBUF
258 * (identical to the cookie used to mmap() the buffer to userspace). All
259 * reserved fields must be set to zero. The field reserved0 is expected to
260 * become a structure 'type' allowing an alternative layout of the structure
261 * content. Therefore this field should not be used for any other extensions.
262 */
263struct dmx_exportbuffer {
264 __u32 index;
265 __u32 flags;
266 __s32 fd;
267};
268
214#define DMX_START _IO('o', 41) 269#define DMX_START _IO('o', 41)
215#define DMX_STOP _IO('o', 42) 270#define DMX_STOP _IO('o', 42)
216#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params) 271#define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params)
@@ -231,4 +286,10 @@ typedef struct dmx_filter dmx_filter_t;
231 286
232#endif 287#endif
233 288
234#endif /* _UAPI_DVBDMX_H_ */ 289#define DMX_REQBUFS _IOWR('o', 60, struct dmx_requestbuffers)
290#define DMX_QUERYBUF _IOWR('o', 61, struct dmx_buffer)
291#define DMX_EXPBUF _IOWR('o', 62, struct dmx_exportbuffer)
292#define DMX_QBUF _IOWR('o', 63, struct dmx_buffer)
293#define DMX_DQBUF _IOWR('o', 64, struct dmx_buffer)
294
295#endif /* _DVBDMX_H_ */
diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h
index b297b65845d6..4f9b4551c534 100644
--- a/include/uapi/linux/dvb/frontend.h
+++ b/include/uapi/linux/dvb/frontend.h
@@ -547,7 +547,10 @@ enum fe_interleaving {
547#define DTV_STAT_ERROR_BLOCK_COUNT 68 547#define DTV_STAT_ERROR_BLOCK_COUNT 68
548#define DTV_STAT_TOTAL_BLOCK_COUNT 69 548#define DTV_STAT_TOTAL_BLOCK_COUNT 69
549 549
550#define DTV_MAX_COMMAND DTV_STAT_TOTAL_BLOCK_COUNT 550/* Physical layer scrambling */
551#define DTV_SCRAMBLING_SEQUENCE_INDEX 70
552
553#define DTV_MAX_COMMAND DTV_SCRAMBLING_SEQUENCE_INDEX
551 554
552/** 555/**
553 * enum fe_pilot - Type of pilot tone 556 * enum fe_pilot - Type of pilot tone
@@ -756,16 +759,15 @@ enum fecap_scale_params {
756/** 759/**
757 * struct dtv_stats - Used for reading a DTV status property 760 * struct dtv_stats - Used for reading a DTV status property
758 * 761 *
759 * @scale: Filled with enum fecap_scale_params - the scale 762 * @scale:
760 * in usage for that parameter 763 * Filled with enum fecap_scale_params - the scale in usage
761 * 764 * for that parameter
762 * The ``{unnamed_union}`` may have either one of the values below:
763 * 765 *
764 * %svalue 766 * @svalue:
765 * integer value of the measure, for %FE_SCALE_DECIBEL, 767 * integer value of the measure, for %FE_SCALE_DECIBEL,
766 * used for dB measures. The unit is 0.001 dB. 768 * used for dB measures. The unit is 0.001 dB.
767 * 769 *
768 * %uvalue 770 * @uvalue:
769 * unsigned integer value of the measure, used when @scale is 771 * unsigned integer value of the measure, used when @scale is
770 * either %FE_SCALE_RELATIVE or %FE_SCALE_COUNTER. 772 * either %FE_SCALE_RELATIVE or %FE_SCALE_COUNTER.
771 * 773 *
@@ -828,19 +830,19 @@ struct dtv_fe_stats {
828/** 830/**
829 * struct dtv_property - store one of frontend command and its value 831 * struct dtv_property - store one of frontend command and its value
830 * 832 *
831 * @cmd: Digital TV command. 833 * @cmd: Digital TV command.
832 * @reserved: Not used. 834 * @reserved: Not used.
833 * @u: Union with the values for the command. 835 * @u: Union with the values for the command.
834 * @result: Unused 836 * @u.data: A unsigned 32 bits integer with command value.
835 * 837 * @u.buffer: Struct to store bigger properties.
836 * The @u union may have either one of the values below: 838 * Currently unused.
839 * @u.buffer.data: an unsigned 32-bits array.
840 * @u.buffer.len: number of elements of the buffer.
841 * @u.buffer.reserved1: Reserved.
842 * @u.buffer.reserved2: Reserved.
843 * @u.st: a &struct dtv_fe_stats array of statistics.
844 * @result: Currently unused.
837 * 845 *
838 * %data
839 * an unsigned 32-bits number.
840 * %st
841 * a &struct dtv_fe_stats array of statistics.
842 * %buffer
843 * a buffer of up to 32 characters (currently unused).
844 */ 846 */
845struct dtv_property { 847struct dtv_property {
846 __u32 cmd; 848 __u32 cmd;
diff --git a/include/uapi/linux/dvb/version.h b/include/uapi/linux/dvb/version.h
index 02e32ea83984..2c5cffe6d2a0 100644
--- a/include/uapi/linux/dvb/version.h
+++ b/include/uapi/linux/dvb/version.h
@@ -25,6 +25,6 @@
25#define _DVBVERSION_H_ 25#define _DVBVERSION_H_
26 26
27#define DVB_API_VERSION 5 27#define DVB_API_VERSION 5
28#define DVB_API_VERSION_MINOR 10 28#define DVB_API_VERSION_MINOR 11
29 29
30#endif /*_DVBVERSION_H_*/ 30#endif /*_DVBVERSION_H_*/
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
index 4d51f98182bb..df3d7028c807 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/include/uapi/linux/dvb/video.h
@@ -83,11 +83,11 @@ typedef enum {
83#define VIDEO_CMD_CONTINUE (3) 83#define VIDEO_CMD_CONTINUE (3)
84 84
85/* Flags for VIDEO_CMD_FREEZE */ 85/* Flags for VIDEO_CMD_FREEZE */
86#define VIDEO_CMD_FREEZE_TO_BLACK (1 << 0) 86#define VIDEO_CMD_FREEZE_TO_BLACK (1 << 0)
87 87
88/* Flags for VIDEO_CMD_STOP */ 88/* Flags for VIDEO_CMD_STOP */
89#define VIDEO_CMD_STOP_TO_BLACK (1 << 0) 89#define VIDEO_CMD_STOP_TO_BLACK (1 << 0)
90#define VIDEO_CMD_STOP_IMMEDIATELY (1 << 1) 90#define VIDEO_CMD_STOP_IMMEDIATELY (1 << 1)
91 91
92/* Play input formats: */ 92/* Play input formats: */
93/* The decoder has no special format requirements */ 93/* The decoder has no special format requirements */
@@ -124,8 +124,8 @@ struct video_command {
124/* FIELD_UNKNOWN can be used if the hardware does not know whether 124/* FIELD_UNKNOWN can be used if the hardware does not know whether
125 the Vsync is for an odd, even or progressive (i.e. non-interlaced) 125 the Vsync is for an odd, even or progressive (i.e. non-interlaced)
126 field. */ 126 field. */
127#define VIDEO_VSYNC_FIELD_UNKNOWN (0) 127#define VIDEO_VSYNC_FIELD_UNKNOWN (0)
128#define VIDEO_VSYNC_FIELD_ODD (1) 128#define VIDEO_VSYNC_FIELD_ODD (1)
129#define VIDEO_VSYNC_FIELD_EVEN (2) 129#define VIDEO_VSYNC_FIELD_EVEN (2)
130#define VIDEO_VSYNC_FIELD_PROGRESSIVE (3) 130#define VIDEO_VSYNC_FIELD_PROGRESSIVE (3)
131 131
@@ -133,8 +133,8 @@ struct video_event {
133 __s32 type; 133 __s32 type;
134#define VIDEO_EVENT_SIZE_CHANGED 1 134#define VIDEO_EVENT_SIZE_CHANGED 1
135#define VIDEO_EVENT_FRAME_RATE_CHANGED 2 135#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
136#define VIDEO_EVENT_DECODER_STOPPED 3 136#define VIDEO_EVENT_DECODER_STOPPED 3
137#define VIDEO_EVENT_VSYNC 4 137#define VIDEO_EVENT_VSYNC 4
138 /* unused, make sure to use atomic time for y2038 if it ever gets used */ 138 /* unused, make sure to use atomic time for y2038 if it ever gets used */
139 long timestamp; 139 long timestamp;
140 union { 140 union {
@@ -268,9 +268,9 @@ typedef __u16 video_attributes_t;
268#define VIDEO_GET_PTS _IOR('o', 57, __u64) 268#define VIDEO_GET_PTS _IOR('o', 57, __u64)
269 269
270/* Read the number of displayed frames since the decoder was started */ 270/* Read the number of displayed frames since the decoder was started */
271#define VIDEO_GET_FRAME_COUNT _IOR('o', 58, __u64) 271#define VIDEO_GET_FRAME_COUNT _IOR('o', 58, __u64)
272 272
273#define VIDEO_COMMAND _IOWR('o', 59, struct video_command) 273#define VIDEO_COMMAND _IOWR('o', 59, struct video_command)
274#define VIDEO_TRY_COMMAND _IOWR('o', 60, struct video_command) 274#define VIDEO_TRY_COMMAND _IOWR('o', 60, struct video_command)
275 275
276#endif /* _UAPI_DVBVIDEO_H_ */ 276#endif /* _UAPI_DVBVIDEO_H_ */
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index bb6836986200..3bf73fb58045 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -396,6 +396,7 @@ typedef struct elf64_shdr {
396#define NT_PPC_TM_CTAR 0x10d /* TM checkpointed Target Address Register */ 396#define NT_PPC_TM_CTAR 0x10d /* TM checkpointed Target Address Register */
397#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */ 397#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */
398#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */ 398#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */
399#define NT_PPC_PKEY 0x110 /* Memory Protection Keys registers */
399#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ 400#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
400#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ 401#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
401#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */ 402#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
diff --git a/include/uapi/linux/erspan.h b/include/uapi/linux/erspan.h
new file mode 100644
index 000000000000..841573019ae1
--- /dev/null
+++ b/include/uapi/linux/erspan.h
@@ -0,0 +1,52 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * ERSPAN Tunnel Metadata
4 *
5 * Copyright (c) 2018 VMware
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation.
10 *
11 * Userspace API for metadata mode ERSPAN tunnel
12 */
13#ifndef _UAPI_ERSPAN_H
14#define _UAPI_ERSPAN_H
15
16#include <linux/types.h> /* For __beXX in userspace */
17#include <asm/byteorder.h>
18
19/* ERSPAN version 2 metadata header */
20struct erspan_md2 {
21 __be32 timestamp;
22 __be16 sgt; /* security group tag */
23#if defined(__LITTLE_ENDIAN_BITFIELD)
24 __u8 hwid_upper:2,
25 ft:5,
26 p:1;
27 __u8 o:1,
28 gra:2,
29 dir:1,
30 hwid:4;
31#elif defined(__BIG_ENDIAN_BITFIELD)
32 __u8 p:1,
33 ft:5,
34 hwid_upper:2;
35 __u8 hwid:4,
36 dir:1,
37 gra:2,
38 o:1;
39#else
40#error "Please fix <asm/byteorder.h>"
41#endif
42};
43
44struct erspan_metadata {
45 int version;
46 union {
47 __be32 index; /* Version 1 (type II)*/
48 struct erspan_md2 md2; /* Version 2 (type III) */
49 } u;
50};
51
52#endif /* _UAPI_ERSPAN_H */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index ac71559314e7..44a0b675a6bc 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1686,6 +1686,7 @@ enum ethtool_reset_flags {
1686 ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */ 1686 ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */
1687 ETH_RESET_RAM = 1 << 7, /* RAM shared between 1687 ETH_RESET_RAM = 1 << 7, /* RAM shared between
1688 * multiple components */ 1688 * multiple components */
1689 ETH_RESET_AP = 1 << 8, /* Application processor */
1689 1690
1690 ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to 1691 ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to
1691 * this interface */ 1692 * this interface */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 4199f8acbce5..d2a8313fabd7 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -377,7 +377,11 @@ typedef int __bitwise __kernel_rwf_t;
377/* per-IO, return -EAGAIN if operation would block */ 377/* per-IO, return -EAGAIN if operation would block */
378#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008) 378#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008)
379 379
380/* per-IO O_APPEND */
381#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
382
380/* mask of flags supported by the kernel */ 383/* mask of flags supported by the kernel */
381#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT) 384#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
385 RWF_APPEND)
382 386
383#endif /* _UAPI_LINUX_FS_H */ 387#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h
index 5156bad77b47..2dc10a034de1 100644
--- a/include/uapi/linux/gfs2_ondisk.h
+++ b/include/uapi/linux/gfs2_ondisk.h
@@ -187,10 +187,19 @@ struct gfs2_rgrp {
187 __be32 rg_flags; 187 __be32 rg_flags;
188 __be32 rg_free; 188 __be32 rg_free;
189 __be32 rg_dinodes; 189 __be32 rg_dinodes;
190 __be32 __pad; 190 union {
191 __be32 __pad;
192 __be32 rg_skip; /* Distance to the next rgrp in fs blocks */
193 };
191 __be64 rg_igeneration; 194 __be64 rg_igeneration;
192 195 /* The following 3 fields are duplicated from gfs2_rindex to reduce
193 __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */ 196 reliance on the rindex */
197 __be64 rg_data0; /* First data location */
198 __be32 rg_data; /* Number of data blocks in rgrp */
199 __be32 rg_bitbytes; /* Number of bytes in data bitmaps */
200 __be32 rg_crc; /* crc32 of the structure with this field 0 */
201
202 __u8 rg_reserved[60]; /* Several fields from gfs1 now reserved */
194}; 203};
195 204
196/* 205/*
@@ -394,7 +403,36 @@ struct gfs2_ea_header {
394 * Log header structure 403 * Log header structure
395 */ 404 */
396 405
397#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */ 406#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */
407#define GFS2_LOG_HEAD_FLUSH_NORMAL 0x00000002 /* normal log flush */
408#define GFS2_LOG_HEAD_FLUSH_SYNC 0x00000004 /* Sync log flush */
409#define GFS2_LOG_HEAD_FLUSH_SHUTDOWN 0x00000008 /* Shutdown log flush */
410#define GFS2_LOG_HEAD_FLUSH_FREEZE 0x00000010 /* Freeze flush */
411#define GFS2_LOG_HEAD_RECOVERY 0x00000020 /* Journal recovery */
412#define GFS2_LOG_HEAD_USERSPACE 0x80000000 /* Written by gfs2-utils */
413
414/* Log flush callers */
415#define GFS2_LFC_SHUTDOWN 0x00000100
416#define GFS2_LFC_JDATA_WPAGES 0x00000200
417#define GFS2_LFC_SET_FLAGS 0x00000400
418#define GFS2_LFC_AIL_EMPTY_GL 0x00000800
419#define GFS2_LFC_AIL_FLUSH 0x00001000
420#define GFS2_LFC_RGRP_GO_SYNC 0x00002000
421#define GFS2_LFC_INODE_GO_SYNC 0x00004000
422#define GFS2_LFC_INODE_GO_INVAL 0x00008000
423#define GFS2_LFC_FREEZE_GO_SYNC 0x00010000
424#define GFS2_LFC_KILL_SB 0x00020000
425#define GFS2_LFC_DO_SYNC 0x00040000
426#define GFS2_LFC_INPLACE_RESERVE 0x00080000
427#define GFS2_LFC_WRITE_INODE 0x00100000
428#define GFS2_LFC_MAKE_FS_RO 0x00200000
429#define GFS2_LFC_SYNC_FS 0x00400000
430#define GFS2_LFC_EVICT_INODE 0x00800000
431#define GFS2_LFC_TRANS_END 0x01000000
432#define GFS2_LFC_LOGD_JFLUSH_REQD 0x02000000
433#define GFS2_LFC_LOGD_AIL_FLUSH_REQD 0x04000000
434
435#define LH_V1_SIZE (offsetofend(struct gfs2_log_header, lh_hash))
398 436
399struct gfs2_log_header { 437struct gfs2_log_header {
400 struct gfs2_meta_header lh_header; 438 struct gfs2_meta_header lh_header;
@@ -403,7 +441,21 @@ struct gfs2_log_header {
403 __be32 lh_flags; /* GFS2_LOG_HEAD_... */ 441 __be32 lh_flags; /* GFS2_LOG_HEAD_... */
404 __be32 lh_tail; /* Block number of log tail */ 442 __be32 lh_tail; /* Block number of log tail */
405 __be32 lh_blkno; 443 __be32 lh_blkno;
406 __be32 lh_hash; 444 __be32 lh_hash; /* crc up to here with this field 0 */
445
446 /* Version 2 additional fields start here */
447 __be32 lh_crc; /* crc32c from lh_nsec to end of block */
448 __be32 lh_nsec; /* Nanoseconds of timestamp */
449 __be64 lh_sec; /* Seconds of timestamp */
450 __be64 lh_addr; /* Block addr of this log header (absolute) */
451 __be64 lh_jinode; /* Journal inode number */
452 __be64 lh_statfs_addr; /* Local statfs inode number */
453 __be64 lh_quota_addr; /* Local quota change inode number */
454
455 /* Statfs local changes (i.e. diff from global statfs) */
456 __be64 lh_local_total;
457 __be64 lh_local_free;
458 __be64 lh_local_dinodes;
407}; 459};
408 460
409/* 461/*
diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h
index fe648032d6b9..f71a1751cacf 100644
--- a/include/uapi/linux/i2c.h
+++ b/include/uapi/linux/i2c.h
@@ -72,6 +72,9 @@ struct i2c_msg {
72#define I2C_M_RD 0x0001 /* read data, from slave to master */ 72#define I2C_M_RD 0x0001 /* read data, from slave to master */
73 /* I2C_M_RD is guaranteed to be 0x0001! */ 73 /* I2C_M_RD is guaranteed to be 0x0001! */
74#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */ 74#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
75#define I2C_M_DMA_SAFE 0x0200 /* the buffer of this message is DMA safe */
76 /* makes only sense in kernelspace */
77 /* userspace buffers are copied anyway */
75#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */ 78#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */
76#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */ 79#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
77#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */ 80#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 144de4d2f385..f8cb5760ea4f 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -48,6 +48,7 @@
48#define ETH_P_PUP 0x0200 /* Xerox PUP packet */ 48#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
49#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */ 49#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
50#define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */ 50#define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */
51#define ETH_P_ERSPAN2 0x22EB /* ERSPAN version 2 (type III) */
51#define ETH_P_IP 0x0800 /* Internet Protocol packet */ 52#define ETH_P_IP 0x0800 /* Internet Protocol packet */
52#define ETH_P_X25 0x0805 /* CCITT X.25 */ 53#define ETH_P_X25 0x0805 /* CCITT X.25 */
53#define ETH_P_ARP 0x0806 /* Address Resolution packet */ 54#define ETH_P_ARP 0x0806 /* Address Resolution packet */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 19fc02660e0c..6d9447700e18 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -161,6 +161,9 @@ enum {
161 IFLA_EVENT, 161 IFLA_EVENT,
162 IFLA_NEW_NETNSID, 162 IFLA_NEW_NETNSID,
163 IFLA_IF_NETNSID, 163 IFLA_IF_NETNSID,
164 IFLA_CARRIER_UP_COUNT,
165 IFLA_CARRIER_DOWN_COUNT,
166 IFLA_NEW_IFINDEX,
164 __IFLA_MAX 167 __IFLA_MAX
165}; 168};
166 169
@@ -732,6 +735,8 @@ enum {
732 IFLA_VF_STATS_BROADCAST, 735 IFLA_VF_STATS_BROADCAST,
733 IFLA_VF_STATS_MULTICAST, 736 IFLA_VF_STATS_MULTICAST,
734 IFLA_VF_STATS_PAD, 737 IFLA_VF_STATS_PAD,
738 IFLA_VF_STATS_RX_DROPPED,
739 IFLA_VF_STATS_TX_DROPPED,
735 __IFLA_VF_STATS_MAX, 740 __IFLA_VF_STATS_MAX,
736}; 741};
737 742
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 719d243471f4..98e4d5d7c45c 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -22,8 +22,13 @@
22 22
23#define MACSEC_KEYID_LEN 16 23#define MACSEC_KEYID_LEN 16
24 24
25#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL 25/* cipher IDs as per IEEE802.1AEbn-2011 */
26#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL 26#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL
27#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL
28
29/* deprecated cipher ID for GCM-AES-128 */
30#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
31#define MACSEC_DEFAULT_CIPHER_ALT MACSEC_CIPHER_ID_GCM_AES_128
27 32
28#define MACSEC_MIN_ICV_LEN 8 33#define MACSEC_MIN_ICV_LEN 8
29#define MACSEC_MAX_ICV_LEN 32 34#define MACSEC_MAX_ICV_LEN 32
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 030d3e6d6029..ee432cd3018c 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -57,6 +57,8 @@
57 */ 57 */
58#define TUNSETVNETBE _IOW('T', 222, int) 58#define TUNSETVNETBE _IOW('T', 222, int)
59#define TUNGETVNETBE _IOR('T', 223, int) 59#define TUNGETVNETBE _IOR('T', 223, int)
60#define TUNSETSTEERINGEBPF _IOR('T', 224, int)
61#define TUNSETFILTEREBPF _IOR('T', 225, int)
60 62
61/* TUNSETIFF ifr flags */ 63/* TUNSETIFF ifr flags */
62#define IFF_TUN 0x0001 64#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index e68dadbd6d45..1b3d148c4560 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -137,6 +137,9 @@ enum {
137 IFLA_GRE_IGNORE_DF, 137 IFLA_GRE_IGNORE_DF,
138 IFLA_GRE_FWMARK, 138 IFLA_GRE_FWMARK,
139 IFLA_GRE_ERSPAN_INDEX, 139 IFLA_GRE_ERSPAN_INDEX,
140 IFLA_GRE_ERSPAN_VER,
141 IFLA_GRE_ERSPAN_DIR,
142 IFLA_GRE_ERSPAN_HWID,
140 __IFLA_GRE_MAX, 143 __IFLA_GRE_MAX,
141}; 144};
142 145
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 817d807e9481..14565d703291 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -92,6 +92,8 @@ enum {
92 INET_DIAG_BC_D_COND, 92 INET_DIAG_BC_D_COND,
93 INET_DIAG_BC_DEV_COND, /* u32 ifindex */ 93 INET_DIAG_BC_DEV_COND, /* u32 ifindex */
94 INET_DIAG_BC_MARK_COND, 94 INET_DIAG_BC_MARK_COND,
95 INET_DIAG_BC_S_EQ,
96 INET_DIAG_BC_D_EQ,
95}; 97};
96 98
97struct inet_diag_hostcond { 99struct inet_diag_hostcond {
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 061fa62958a2..53fbae27b280 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -594,6 +594,7 @@
594#define BTN_DPAD_RIGHT 0x223 594#define BTN_DPAD_RIGHT 0x223
595 595
596#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */ 596#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
597#define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */
597 598
598#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */ 599#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */
599#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */ 600#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 8c5a0bf6ee35..7288a7c573cc 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -21,10 +21,21 @@
21 21
22/* 22/*
23 * The event structure itself 23 * The event structure itself
24 * Note that __USE_TIME_BITS64 is defined by libc based on
25 * application's request to use 64 bit time_t.
24 */ 26 */
25 27
26struct input_event { 28struct input_event {
29#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL)
27 struct timeval time; 30 struct timeval time;
31#define input_event_sec time.tv_sec
32#define input_event_usec time.tv_usec
33#else
34 __kernel_ulong_t __sec;
35 __kernel_ulong_t __usec;
36#define input_event_sec __sec
37#define input_event_usec __usec
38#endif
28 __u16 type; 39 __u16 type;
29 __u16 code; 40 __u16 code;
30 __s32 value; 41 __s32 value;
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 6e80501368ae..f4cab5b3ba9a 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -58,7 +58,8 @@ struct kfd_ioctl_create_queue_args {
58 __u64 eop_buffer_address; /* to KFD */ 58 __u64 eop_buffer_address; /* to KFD */
59 __u64 eop_buffer_size; /* to KFD */ 59 __u64 eop_buffer_size; /* to KFD */
60 __u64 ctx_save_restore_address; /* to KFD */ 60 __u64 ctx_save_restore_address; /* to KFD */
61 __u64 ctx_save_restore_size; /* to KFD */ 61 __u32 ctx_save_restore_size; /* to KFD */
62 __u32 ctl_stack_size; /* to KFD */
62}; 63};
63 64
64struct kfd_ioctl_destroy_queue_args { 65struct kfd_ioctl_destroy_queue_args {
@@ -261,6 +262,13 @@ struct kfd_ioctl_get_tile_config_args {
261 */ 262 */
262}; 263};
263 264
265struct kfd_ioctl_set_trap_handler_args {
266 uint64_t tba_addr; /* to KFD */
267 uint64_t tma_addr; /* to KFD */
268 uint32_t gpu_id; /* to KFD */
269 uint32_t pad;
270};
271
264#define AMDKFD_IOCTL_BASE 'K' 272#define AMDKFD_IOCTL_BASE 'K'
265#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) 273#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
266#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) 274#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -321,7 +329,10 @@ struct kfd_ioctl_get_tile_config_args {
321#define AMDKFD_IOC_GET_TILE_CONFIG \ 329#define AMDKFD_IOC_GET_TILE_CONFIG \
322 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args) 330 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
323 331
332#define AMDKFD_IOC_SET_TRAP_HANDLER \
333 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
334
324#define AMDKFD_COMMAND_START 0x01 335#define AMDKFD_COMMAND_START 0x01
325#define AMDKFD_COMMAND_END 0x13 336#define AMDKFD_COMMAND_END 0x14
326 337
327#endif 338#endif
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index d84ce5c1c9aa..7d570c7bd117 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -65,7 +65,7 @@ struct sockaddr_l2tpip6 {
65 * TUNNEL_MODIFY - CONN_ID, udpcsum 65 * TUNNEL_MODIFY - CONN_ID, udpcsum
66 * TUNNEL_GETSTATS - CONN_ID, (stats) 66 * TUNNEL_GETSTATS - CONN_ID, (stats)
67 * TUNNEL_GET - CONN_ID, (...) 67 * TUNNEL_GET - CONN_ID, (...)
68 * SESSION_CREATE - SESSION_ID, PW_TYPE, offset, data_seq, cookie, peer_cookie, offset, l2spec 68 * SESSION_CREATE - SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec
69 * SESSION_DELETE - SESSION_ID 69 * SESSION_DELETE - SESSION_ID
70 * SESSION_MODIFY - SESSION_ID, data_seq 70 * SESSION_MODIFY - SESSION_ID, data_seq
71 * SESSION_GET - SESSION_ID, (...) 71 * SESSION_GET - SESSION_ID, (...)
@@ -94,10 +94,10 @@ enum {
94 L2TP_ATTR_NONE, /* no data */ 94 L2TP_ATTR_NONE, /* no data */
95 L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */ 95 L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */
96 L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */ 96 L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */
97 L2TP_ATTR_OFFSET, /* u16 */ 97 L2TP_ATTR_OFFSET, /* u16 (not used) */
98 L2TP_ATTR_DATA_SEQ, /* u16 */ 98 L2TP_ATTR_DATA_SEQ, /* u16 */
99 L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */ 99 L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */
100 L2TP_ATTR_L2SPEC_LEN, /* u8, enum l2tp_l2spec_type */ 100 L2TP_ATTR_L2SPEC_LEN, /* u8 (not used) */
101 L2TP_ATTR_PROTO_VERSION, /* u8 */ 101 L2TP_ATTR_PROTO_VERSION, /* u8 */
102 L2TP_ATTR_IFNAME, /* string */ 102 L2TP_ATTR_IFNAME, /* string */
103 L2TP_ATTR_CONN_ID, /* u32 */ 103 L2TP_ATTR_CONN_ID, /* u32 */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index 42d1a434af29..f9a1be7fc696 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -75,14 +75,23 @@ struct nvm_ioctl_create_simple {
75 __u32 lun_end; 75 __u32 lun_end;
76}; 76};
77 77
78struct nvm_ioctl_create_extended {
79 __u16 lun_begin;
80 __u16 lun_end;
81 __u16 op;
82 __u16 rsv;
83};
84
78enum { 85enum {
79 NVM_CONFIG_TYPE_SIMPLE = 0, 86 NVM_CONFIG_TYPE_SIMPLE = 0,
87 NVM_CONFIG_TYPE_EXTENDED = 1,
80}; 88};
81 89
82struct nvm_ioctl_create_conf { 90struct nvm_ioctl_create_conf {
83 __u32 type; 91 __u32 type;
84 union { 92 union {
85 struct nvm_ioctl_create_simple s; 93 struct nvm_ioctl_create_simple s;
94 struct nvm_ioctl_create_extended e;
86 }; 95 };
87}; 96};
88 97
diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h
index c3aef4316fbf..4fe580d36e41 100644
--- a/include/uapi/linux/lirc.h
+++ b/include/uapi/linux/lirc.h
@@ -47,12 +47,14 @@
47#define LIRC_MODE_RAW 0x00000001 47#define LIRC_MODE_RAW 0x00000001
48#define LIRC_MODE_PULSE 0x00000002 48#define LIRC_MODE_PULSE 0x00000002
49#define LIRC_MODE_MODE2 0x00000004 49#define LIRC_MODE_MODE2 0x00000004
50#define LIRC_MODE_SCANCODE 0x00000008
50#define LIRC_MODE_LIRCCODE 0x00000010 51#define LIRC_MODE_LIRCCODE 0x00000010
51 52
52 53
53#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) 54#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW)
54#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) 55#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE)
55#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) 56#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2)
57#define LIRC_CAN_SEND_SCANCODE LIRC_MODE2SEND(LIRC_MODE_SCANCODE)
56#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) 58#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE)
57 59
58#define LIRC_CAN_SEND_MASK 0x0000003f 60#define LIRC_CAN_SEND_MASK 0x0000003f
@@ -64,6 +66,7 @@
64#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) 66#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW)
65#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) 67#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE)
66#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) 68#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2)
69#define LIRC_CAN_REC_SCANCODE LIRC_MODE2REC(LIRC_MODE_SCANCODE)
67#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) 70#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE)
68 71
69#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) 72#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK)
@@ -131,4 +134,83 @@
131 134
132#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32) 135#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32)
133 136
137/*
138 * struct lirc_scancode - decoded scancode with protocol for use with
139 * LIRC_MODE_SCANCODE
140 *
141 * @timestamp: Timestamp in nanoseconds using CLOCK_MONOTONIC when IR
142 * was decoded.
143 * @flags: should be 0 for transmit. When receiving scancodes,
144 * LIRC_SCANCODE_FLAG_TOGGLE or LIRC_SCANCODE_FLAG_REPEAT can be set
145 * depending on the protocol
146 * @rc_proto: see enum rc_proto
147 * @keycode: the translated keycode. Set to 0 for transmit.
148 * @scancode: the scancode received or to be sent
149 */
150struct lirc_scancode {
151 __u64 timestamp;
152 __u16 flags;
153 __u16 rc_proto;
154 __u32 keycode;
155 __u64 scancode;
156};
157
158/* Set if the toggle bit of rc-5 or rc-6 is enabled */
159#define LIRC_SCANCODE_FLAG_TOGGLE 1
160/* Set if this is a nec or sanyo repeat */
161#define LIRC_SCANCODE_FLAG_REPEAT 2
162
163/**
164 * enum rc_proto - the Remote Controller protocol
165 *
166 * @RC_PROTO_UNKNOWN: Protocol not known
167 * @RC_PROTO_OTHER: Protocol known but proprietary
168 * @RC_PROTO_RC5: Philips RC5 protocol
169 * @RC_PROTO_RC5X_20: Philips RC5x 20 bit protocol
170 * @RC_PROTO_RC5_SZ: StreamZap variant of RC5
171 * @RC_PROTO_JVC: JVC protocol
172 * @RC_PROTO_SONY12: Sony 12 bit protocol
173 * @RC_PROTO_SONY15: Sony 15 bit protocol
174 * @RC_PROTO_SONY20: Sony 20 bit protocol
175 * @RC_PROTO_NEC: NEC protocol
176 * @RC_PROTO_NECX: Extended NEC protocol
177 * @RC_PROTO_NEC32: NEC 32 bit protocol
178 * @RC_PROTO_SANYO: Sanyo protocol
179 * @RC_PROTO_MCIR2_KBD: RC6-ish MCE keyboard
180 * @RC_PROTO_MCIR2_MSE: RC6-ish MCE mouse
181 * @RC_PROTO_RC6_0: Philips RC6-0-16 protocol
182 * @RC_PROTO_RC6_6A_20: Philips RC6-6A-20 protocol
183 * @RC_PROTO_RC6_6A_24: Philips RC6-6A-24 protocol
184 * @RC_PROTO_RC6_6A_32: Philips RC6-6A-32 protocol
185 * @RC_PROTO_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol
186 * @RC_PROTO_SHARP: Sharp protocol
187 * @RC_PROTO_XMP: XMP protocol
188 * @RC_PROTO_CEC: CEC protocol
189 */
190enum rc_proto {
191 RC_PROTO_UNKNOWN = 0,
192 RC_PROTO_OTHER = 1,
193 RC_PROTO_RC5 = 2,
194 RC_PROTO_RC5X_20 = 3,
195 RC_PROTO_RC5_SZ = 4,
196 RC_PROTO_JVC = 5,
197 RC_PROTO_SONY12 = 6,
198 RC_PROTO_SONY15 = 7,
199 RC_PROTO_SONY20 = 8,
200 RC_PROTO_NEC = 9,
201 RC_PROTO_NECX = 10,
202 RC_PROTO_NEC32 = 11,
203 RC_PROTO_SANYO = 12,
204 RC_PROTO_MCIR2_KBD = 13,
205 RC_PROTO_MCIR2_MSE = 14,
206 RC_PROTO_RC6_0 = 15,
207 RC_PROTO_RC6_6A_20 = 16,
208 RC_PROTO_RC6_6A_24 = 17,
209 RC_PROTO_RC6_6A_32 = 18,
210 RC_PROTO_RC6_MCE = 19,
211 RC_PROTO_SHARP = 20,
212 RC_PROTO_XMP = 21,
213 RC_PROTO_CEC = 22,
214};
215
134#endif 216#endif
diff --git a/include/uapi/linux/lp.h b/include/uapi/linux/lp.h
index dafcfe4e4834..8589a27037d7 100644
--- a/include/uapi/linux/lp.h
+++ b/include/uapi/linux/lp.h
@@ -8,6 +8,8 @@
8#ifndef _UAPI_LINUX_LP_H 8#ifndef _UAPI_LINUX_LP_H
9#define _UAPI_LINUX_LP_H 9#define _UAPI_LINUX_LP_H
10 10
11#include <linux/types.h>
12#include <linux/ioctl.h>
11 13
12/* 14/*
13 * Per POSIX guidelines, this module reserves the LP and lp prefixes 15 * Per POSIX guidelines, this module reserves the LP and lp prefixes
@@ -88,7 +90,15 @@
88#define LPGETSTATS 0x060d /* get statistics (struct lp_stats) */ 90#define LPGETSTATS 0x060d /* get statistics (struct lp_stats) */
89#endif 91#endif
90#define LPGETFLAGS 0x060e /* get status flags */ 92#define LPGETFLAGS 0x060e /* get status flags */
91#define LPSETTIMEOUT 0x060f /* set parport timeout */ 93#define LPSETTIMEOUT_OLD 0x060f /* set parport timeout */
94#define LPSETTIMEOUT_NEW \
95 _IOW(0x6, 0xf, __s64[2]) /* set parport timeout */
96#if __BITS_PER_LONG == 64
97#define LPSETTIMEOUT LPSETTIMEOUT_OLD
98#else
99#define LPSETTIMEOUT (sizeof(time_t) > sizeof(__kernel_long_t) ? \
100 LPSETTIMEOUT_NEW : LPSETTIMEOUT_OLD)
101#endif
92 102
93/* timeout for printk'ing a timeout, in jiffies (100ths of a second). 103/* timeout for printk'ing a timeout, in jiffies (100ths of a second).
94 This is also used for re-checking error conditions if LP_ABORT is 104 This is also used for re-checking error conditions if LP_ABORT is
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
index 4e01ad7ffe98..5891d7614c8c 100644
--- a/include/uapi/linux/membarrier.h
+++ b/include/uapi/linux/membarrier.h
@@ -31,7 +31,7 @@
31 * enum membarrier_cmd - membarrier system call command 31 * enum membarrier_cmd - membarrier system call command
32 * @MEMBARRIER_CMD_QUERY: Query the set of supported commands. It returns 32 * @MEMBARRIER_CMD_QUERY: Query the set of supported commands. It returns
33 * a bitmask of valid commands. 33 * a bitmask of valid commands.
34 * @MEMBARRIER_CMD_SHARED: Execute a memory barrier on all running threads. 34 * @MEMBARRIER_CMD_GLOBAL: Execute a memory barrier on all running threads.
35 * Upon return from system call, the caller thread 35 * Upon return from system call, the caller thread
36 * is ensured that all running threads have passed 36 * is ensured that all running threads have passed
37 * through a state where all memory accesses to 37 * through a state where all memory accesses to
@@ -40,6 +40,28 @@
40 * (non-running threads are de facto in such a 40 * (non-running threads are de facto in such a
41 * state). This covers threads from all processes 41 * state). This covers threads from all processes
42 * running on the system. This command returns 0. 42 * running on the system. This command returns 0.
43 * @MEMBARRIER_CMD_GLOBAL_EXPEDITED:
44 * Execute a memory barrier on all running threads
45 * of all processes which previously registered
46 * with MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED.
47 * Upon return from system call, the caller thread
48 * is ensured that all running threads have passed
49 * through a state where all memory accesses to
50 * user-space addresses match program order between
51 * entry to and return from the system call
52 * (non-running threads are de facto in such a
53 * state). This only covers threads from processes
54 * which registered with
55 * MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED.
56 * This command returns 0. Given that
57 * registration is about the intent to receive
58 * the barriers, it is valid to invoke
59 * MEMBARRIER_CMD_GLOBAL_EXPEDITED from a
60 * non-registered process.
61 * @MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
62 * Register the process intent to receive
63 * MEMBARRIER_CMD_GLOBAL_EXPEDITED memory
64 * barriers. Always returns 0.
43 * @MEMBARRIER_CMD_PRIVATE_EXPEDITED: 65 * @MEMBARRIER_CMD_PRIVATE_EXPEDITED:
44 * Execute a memory barrier on each running 66 * Execute a memory barrier on each running
45 * thread belonging to the same process as the current 67 * thread belonging to the same process as the current
@@ -51,7 +73,7 @@
51 * to and return from the system call 73 * to and return from the system call
52 * (non-running threads are de facto in such a 74 * (non-running threads are de facto in such a
53 * state). This only covers threads from the 75 * state). This only covers threads from the
54 * same processes as the caller thread. This 76 * same process as the caller thread. This
55 * command returns 0 on success. The 77 * command returns 0 on success. The
56 * "expedited" commands complete faster than 78 * "expedited" commands complete faster than
57 * the non-expedited ones, they never block, 79 * the non-expedited ones, they never block,
@@ -64,18 +86,54 @@
64 * Register the process intent to use 86 * Register the process intent to use
65 * MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always 87 * MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
66 * returns 0. 88 * returns 0.
89 * @MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
90 * In addition to provide memory ordering
91 * guarantees described in
92 * MEMBARRIER_CMD_PRIVATE_EXPEDITED, ensure
93 * the caller thread, upon return from system
94 * call, that all its running threads siblings
95 * have executed a core serializing
96 * instruction. (architectures are required to
97 * guarantee that non-running threads issue
98 * core serializing instructions before they
99 * resume user-space execution). This only
100 * covers threads from the same process as the
101 * caller thread. This command returns 0 on
102 * success. The "expedited" commands complete
103 * faster than the non-expedited ones, they
104 * never block, but have the downside of
105 * causing extra overhead. If this command is
106 * not implemented by an architecture, -EINVAL
107 * is returned. A process needs to register its
108 * intent to use the private expedited sync
109 * core command prior to using it, otherwise
110 * this command returns -EPERM.
111 * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
112 * Register the process intent to use
113 * MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE.
114 * If this command is not implemented by an
115 * architecture, -EINVAL is returned.
116 * Returns 0 on success.
117 * @MEMBARRIER_CMD_SHARED:
118 * Alias to MEMBARRIER_CMD_GLOBAL. Provided for
119 * header backward compatibility.
67 * 120 *
68 * Command to be passed to the membarrier system call. The commands need to 121 * Command to be passed to the membarrier system call. The commands need to
69 * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to 122 * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
70 * the value 0. 123 * the value 0.
71 */ 124 */
72enum membarrier_cmd { 125enum membarrier_cmd {
73 MEMBARRIER_CMD_QUERY = 0, 126 MEMBARRIER_CMD_QUERY = 0,
74 MEMBARRIER_CMD_SHARED = (1 << 0), 127 MEMBARRIER_CMD_GLOBAL = (1 << 0),
75 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ 128 MEMBARRIER_CMD_GLOBAL_EXPEDITED = (1 << 1),
76 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ 129 MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = (1 << 2),
77 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), 130 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
78 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), 131 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
132 MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 5),
133 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6),
134
135 /* Alias for header backward compatibility. */
136 MEMBARRIER_CMD_SHARED = MEMBARRIER_CMD_GLOBAL,
79}; 137};
80 138
81#endif /* _UAPI_LINUX_MEMBARRIER_H */ 139#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 3f03567631cb..7e27070b9440 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -15,54 +15,6 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18struct nd_cmd_smart {
19 __u32 status;
20 __u8 data[128];
21} __packed;
22
23#define ND_SMART_HEALTH_VALID (1 << 0)
24#define ND_SMART_SPARES_VALID (1 << 1)
25#define ND_SMART_USED_VALID (1 << 2)
26#define ND_SMART_TEMP_VALID (1 << 3)
27#define ND_SMART_CTEMP_VALID (1 << 4)
28#define ND_SMART_ALARM_VALID (1 << 9)
29#define ND_SMART_SHUTDOWN_VALID (1 << 10)
30#define ND_SMART_VENDOR_VALID (1 << 11)
31#define ND_SMART_SPARE_TRIP (1 << 0)
32#define ND_SMART_TEMP_TRIP (1 << 1)
33#define ND_SMART_CTEMP_TRIP (1 << 2)
34#define ND_SMART_NON_CRITICAL_HEALTH (1 << 0)
35#define ND_SMART_CRITICAL_HEALTH (1 << 1)
36#define ND_SMART_FATAL_HEALTH (1 << 2)
37
38struct nd_smart_payload {
39 __u32 flags;
40 __u8 reserved0[4];
41 __u8 health;
42 __u8 spares;
43 __u8 life_used;
44 __u8 alarm_flags;
45 __u16 temperature;
46 __u16 ctrl_temperature;
47 __u8 reserved1[15];
48 __u8 shutdown_state;
49 __u32 vendor_size;
50 __u8 vendor_data[92];
51} __packed;
52
53struct nd_cmd_smart_threshold {
54 __u32 status;
55 __u8 data[8];
56} __packed;
57
58struct nd_smart_threshold_payload {
59 __u8 alarm_control;
60 __u8 reserved0;
61 __u16 temperature;
62 __u8 spares;
63 __u8 reserved[3];
64} __packed;
65
66struct nd_cmd_dimm_flags { 18struct nd_cmd_dimm_flags {
67 __u32 status; 19 __u32 status;
68 __u32 flags; 20 __u32 flags;
@@ -211,12 +163,6 @@ static inline const char *nvdimm_cmd_name(unsigned cmd)
211 163
212#define ND_IOCTL 'N' 164#define ND_IOCTL 'N'
213 165
214#define ND_IOCTL_SMART _IOWR(ND_IOCTL, ND_CMD_SMART,\
215 struct nd_cmd_smart)
216
217#define ND_IOCTL_SMART_THRESHOLD _IOWR(ND_IOCTL, ND_CMD_SMART_THRESHOLD,\
218 struct nd_cmd_smart_threshold)
219
220#define ND_IOCTL_DIMM_FLAGS _IOWR(ND_IOCTL, ND_CMD_DIMM_FLAGS,\ 166#define ND_IOCTL_DIMM_FLAGS _IOWR(ND_IOCTL, ND_CMD_DIMM_FLAGS,\
221 struct nd_cmd_dimm_flags) 167 struct nd_cmd_dimm_flags)
222 168
@@ -263,7 +209,7 @@ enum nd_driver_flags {
263}; 209};
264 210
265enum { 211enum {
266 ND_MIN_NAMESPACE_SIZE = 0x00400000, 212 ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
267}; 213};
268 214
269enum ars_masks { 215enum ars_masks {
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 57ccfb32e87f..9574bd40870b 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -101,12 +101,16 @@ enum ip_conntrack_status {
101 IPS_HELPER_BIT = 13, 101 IPS_HELPER_BIT = 13,
102 IPS_HELPER = (1 << IPS_HELPER_BIT), 102 IPS_HELPER = (1 << IPS_HELPER_BIT),
103 103
104 /* Conntrack has been offloaded to flow table. */
105 IPS_OFFLOAD_BIT = 14,
106 IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT),
107
104 /* Be careful here, modifying these bits can make things messy, 108 /* Be careful here, modifying these bits can make things messy,
105 * so don't let users modify them directly. 109 * so don't let users modify them directly.
106 */ 110 */
107 IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK | 111 IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
108 IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | 112 IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
109 IPS_SEQ_ADJUST | IPS_TEMPLATE), 113 IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
110 114
111 __IPS_MAX_BIT = 14, 115 __IPS_MAX_BIT = 14,
112}; 116};
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a3ee277b17a1..66dceee0ae30 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -92,6 +92,9 @@ enum nft_verdicts {
92 * @NFT_MSG_GETOBJ: get a stateful object (enum nft_obj_attributes) 92 * @NFT_MSG_GETOBJ: get a stateful object (enum nft_obj_attributes)
93 * @NFT_MSG_DELOBJ: delete a stateful object (enum nft_obj_attributes) 93 * @NFT_MSG_DELOBJ: delete a stateful object (enum nft_obj_attributes)
94 * @NFT_MSG_GETOBJ_RESET: get and reset a stateful object (enum nft_obj_attributes) 94 * @NFT_MSG_GETOBJ_RESET: get and reset a stateful object (enum nft_obj_attributes)
95 * @NFT_MSG_NEWFLOWTABLE: add new flow table (enum nft_flowtable_attributes)
96 * @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes)
97 * @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes)
95 */ 98 */
96enum nf_tables_msg_types { 99enum nf_tables_msg_types {
97 NFT_MSG_NEWTABLE, 100 NFT_MSG_NEWTABLE,
@@ -116,6 +119,9 @@ enum nf_tables_msg_types {
116 NFT_MSG_GETOBJ, 119 NFT_MSG_GETOBJ,
117 NFT_MSG_DELOBJ, 120 NFT_MSG_DELOBJ,
118 NFT_MSG_GETOBJ_RESET, 121 NFT_MSG_GETOBJ_RESET,
122 NFT_MSG_NEWFLOWTABLE,
123 NFT_MSG_GETFLOWTABLE,
124 NFT_MSG_DELFLOWTABLE,
119 NFT_MSG_MAX, 125 NFT_MSG_MAX,
120}; 126};
121 127
@@ -168,6 +174,8 @@ enum nft_table_attributes {
168 NFTA_TABLE_NAME, 174 NFTA_TABLE_NAME,
169 NFTA_TABLE_FLAGS, 175 NFTA_TABLE_FLAGS,
170 NFTA_TABLE_USE, 176 NFTA_TABLE_USE,
177 NFTA_TABLE_HANDLE,
178 NFTA_TABLE_PAD,
171 __NFTA_TABLE_MAX 179 __NFTA_TABLE_MAX
172}; 180};
173#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1) 181#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
@@ -311,6 +319,7 @@ enum nft_set_desc_attributes {
311 * @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32) 319 * @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32)
312 * @NFTA_SET_USERDATA: user data (NLA_BINARY) 320 * @NFTA_SET_USERDATA: user data (NLA_BINARY)
313 * @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*) 321 * @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*)
322 * @NFTA_SET_HANDLE: set handle (NLA_U64)
314 */ 323 */
315enum nft_set_attributes { 324enum nft_set_attributes {
316 NFTA_SET_UNSPEC, 325 NFTA_SET_UNSPEC,
@@ -329,6 +338,7 @@ enum nft_set_attributes {
329 NFTA_SET_USERDATA, 338 NFTA_SET_USERDATA,
330 NFTA_SET_PAD, 339 NFTA_SET_PAD,
331 NFTA_SET_OBJ_TYPE, 340 NFTA_SET_OBJ_TYPE,
341 NFTA_SET_HANDLE,
332 __NFTA_SET_MAX 342 __NFTA_SET_MAX
333}; 343};
334#define NFTA_SET_MAX (__NFTA_SET_MAX - 1) 344#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -777,6 +787,7 @@ enum nft_exthdr_attributes {
777 * @NFT_META_OIFGROUP: packet output interface group 787 * @NFT_META_OIFGROUP: packet output interface group
778 * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid) 788 * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid)
779 * @NFT_META_PRANDOM: a 32bit pseudo-random number 789 * @NFT_META_PRANDOM: a 32bit pseudo-random number
790 * @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp)
780 */ 791 */
781enum nft_meta_keys { 792enum nft_meta_keys {
782 NFT_META_LEN, 793 NFT_META_LEN,
@@ -804,6 +815,7 @@ enum nft_meta_keys {
804 NFT_META_OIFGROUP, 815 NFT_META_OIFGROUP,
805 NFT_META_CGROUP, 816 NFT_META_CGROUP,
806 NFT_META_PRANDOM, 817 NFT_META_PRANDOM,
818 NFT_META_SECPATH,
807}; 819};
808 820
809/** 821/**
@@ -949,6 +961,17 @@ enum nft_ct_attributes {
949}; 961};
950#define NFTA_CT_MAX (__NFTA_CT_MAX - 1) 962#define NFTA_CT_MAX (__NFTA_CT_MAX - 1)
951 963
964/**
965 * enum nft_flow_attributes - ct offload expression attributes
966 * @NFTA_FLOW_TABLE_NAME: flow table name (NLA_STRING)
967 */
968enum nft_offload_attributes {
969 NFTA_FLOW_UNSPEC,
970 NFTA_FLOW_TABLE_NAME,
971 __NFTA_FLOW_MAX,
972};
973#define NFTA_FLOW_MAX (__NFTA_FLOW_MAX - 1)
974
952enum nft_limit_type { 975enum nft_limit_type {
953 NFT_LIMIT_PKTS, 976 NFT_LIMIT_PKTS,
954 NFT_LIMIT_PKT_BYTES 977 NFT_LIMIT_PKT_BYTES
@@ -1295,6 +1318,7 @@ enum nft_ct_helper_attributes {
1295 * @NFTA_OBJ_TYPE: stateful object type (NLA_U32) 1318 * @NFTA_OBJ_TYPE: stateful object type (NLA_U32)
1296 * @NFTA_OBJ_DATA: stateful object data (NLA_NESTED) 1319 * @NFTA_OBJ_DATA: stateful object data (NLA_NESTED)
1297 * @NFTA_OBJ_USE: number of references to this expression (NLA_U32) 1320 * @NFTA_OBJ_USE: number of references to this expression (NLA_U32)
1321 * @NFTA_OBJ_HANDLE: object handle (NLA_U64)
1298 */ 1322 */
1299enum nft_object_attributes { 1323enum nft_object_attributes {
1300 NFTA_OBJ_UNSPEC, 1324 NFTA_OBJ_UNSPEC,
@@ -1303,11 +1327,63 @@ enum nft_object_attributes {
1303 NFTA_OBJ_TYPE, 1327 NFTA_OBJ_TYPE,
1304 NFTA_OBJ_DATA, 1328 NFTA_OBJ_DATA,
1305 NFTA_OBJ_USE, 1329 NFTA_OBJ_USE,
1330 NFTA_OBJ_HANDLE,
1331 NFTA_OBJ_PAD,
1306 __NFTA_OBJ_MAX 1332 __NFTA_OBJ_MAX
1307}; 1333};
1308#define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1) 1334#define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1)
1309 1335
1310/** 1336/**
1337 * enum nft_flowtable_attributes - nf_tables flow table netlink attributes
1338 *
1339 * @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING)
1340 * @NFTA_FLOWTABLE_NAME: name of this flow table (NLA_STRING)
1341 * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
1342 * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
1343 * @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
1344 */
1345enum nft_flowtable_attributes {
1346 NFTA_FLOWTABLE_UNSPEC,
1347 NFTA_FLOWTABLE_TABLE,
1348 NFTA_FLOWTABLE_NAME,
1349 NFTA_FLOWTABLE_HOOK,
1350 NFTA_FLOWTABLE_USE,
1351 NFTA_FLOWTABLE_HANDLE,
1352 NFTA_FLOWTABLE_PAD,
1353 __NFTA_FLOWTABLE_MAX
1354};
1355#define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1)
1356
1357/**
1358 * enum nft_flowtable_hook_attributes - nf_tables flow table hook netlink attributes
1359 *
1360 * @NFTA_FLOWTABLE_HOOK_NUM: netfilter hook number (NLA_U32)
1361 * @NFTA_FLOWTABLE_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
1362 * @NFTA_FLOWTABLE_HOOK_DEVS: input devices this flow table is bound to (NLA_NESTED)
1363 */
1364enum nft_flowtable_hook_attributes {
1365 NFTA_FLOWTABLE_HOOK_UNSPEC,
1366 NFTA_FLOWTABLE_HOOK_NUM,
1367 NFTA_FLOWTABLE_HOOK_PRIORITY,
1368 NFTA_FLOWTABLE_HOOK_DEVS,
1369 __NFTA_FLOWTABLE_HOOK_MAX
1370};
1371#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1)
1372
1373/**
1374 * enum nft_device_attributes - nf_tables device netlink attributes
1375 *
1376 * @NFTA_DEVICE_NAME: name of this device (NLA_STRING)
1377 */
1378enum nft_devices_attributes {
1379 NFTA_DEVICE_UNSPEC,
1380 NFTA_DEVICE_NAME,
1381 __NFTA_DEVICE_MAX
1382};
1383#define NFTA_DEVICE_MAX (__NFTA_DEVICE_MAX - 1)
1384
1385
1386/**
1311 * enum nft_trace_attributes - nf_tables trace netlink attributes 1387 * enum nft_trace_attributes - nf_tables trace netlink attributes
1312 * 1388 *
1313 * @NFTA_TRACE_TABLE: name of the table (NLA_STRING) 1389 * @NFTA_TRACE_TABLE: name of the table (NLA_STRING)
diff --git a/include/uapi/linux/netfilter/xt_connlimit.h b/include/uapi/linux/netfilter/xt_connlimit.h
index 07e5e9d47882..d4d1943dcd11 100644
--- a/include/uapi/linux/netfilter/xt_connlimit.h
+++ b/include/uapi/linux/netfilter/xt_connlimit.h
@@ -27,7 +27,7 @@ struct xt_connlimit_info {
27 __u32 flags; 27 __u32 flags;
28 28
29 /* Used internally by the kernel */ 29 /* Used internally by the kernel */
30 struct xt_connlimit_data *data __attribute__((aligned(8))); 30 struct nf_conncount_data *data __attribute__((aligned(8)));
31}; 31};
32 32
33#endif /* _XT_CONNLIMIT_H */ 33#endif /* _XT_CONNLIMIT_H */
diff --git a/include/uapi/linux/netfilter_arp.h b/include/uapi/linux/netfilter_arp.h
index 81b6a4cbcb72..791dfc5ae907 100644
--- a/include/uapi/linux/netfilter_arp.h
+++ b/include/uapi/linux/netfilter_arp.h
@@ -15,6 +15,9 @@
15#define NF_ARP_IN 0 15#define NF_ARP_IN 0
16#define NF_ARP_OUT 1 16#define NF_ARP_OUT 1
17#define NF_ARP_FORWARD 2 17#define NF_ARP_FORWARD 2
18
19#ifndef __KERNEL__
18#define NF_ARP_NUMHOOKS 3 20#define NF_ARP_NUMHOOKS 3
21#endif
19 22
20#endif /* __LINUX_ARP_NETFILTER_H */ 23#endif /* __LINUX_ARP_NETFILTER_H */
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
index 9089c38f6abe..61f1c7dfd033 100644
--- a/include/uapi/linux/netfilter_decnet.h
+++ b/include/uapi/linux/netfilter_decnet.h
@@ -24,6 +24,9 @@
24#define NFC_DN_IF_IN 0x0004 24#define NFC_DN_IF_IN 0x0004
25/* Output device. */ 25/* Output device. */
26#define NFC_DN_IF_OUT 0x0008 26#define NFC_DN_IF_OUT 0x0008
27
28/* kernel define is in netfilter_defs.h */
29#define NF_DN_NUMHOOKS 7
27#endif /* ! __KERNEL__ */ 30#endif /* ! __KERNEL__ */
28 31
29/* DECnet Hooks */ 32/* DECnet Hooks */
@@ -41,7 +44,6 @@
41#define NF_DN_HELLO 5 44#define NF_DN_HELLO 5
42/* Input Routing Packets */ 45/* Input Routing Packets */
43#define NF_DN_ROUTE 6 46#define NF_DN_ROUTE 6
44#define NF_DN_NUMHOOKS 7
45 47
46enum nf_dn_hook_priorities { 48enum nf_dn_hook_priorities {
47 NF_DN_PRI_FIRST = INT_MIN, 49 NF_DN_PRI_FIRST = INT_MIN,
diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h
index e6b1a84f5dd3..c3b060775e13 100644
--- a/include/uapi/linux/netfilter_ipv4.h
+++ b/include/uapi/linux/netfilter_ipv4.h
@@ -57,6 +57,7 @@
57 57
58enum nf_ip_hook_priorities { 58enum nf_ip_hook_priorities {
59 NF_IP_PRI_FIRST = INT_MIN, 59 NF_IP_PRI_FIRST = INT_MIN,
60 NF_IP_PRI_RAW_BEFORE_DEFRAG = -450,
60 NF_IP_PRI_CONNTRACK_DEFRAG = -400, 61 NF_IP_PRI_CONNTRACK_DEFRAG = -400,
61 NF_IP_PRI_RAW = -300, 62 NF_IP_PRI_RAW = -300,
62 NF_IP_PRI_SELINUX_FIRST = -225, 63 NF_IP_PRI_SELINUX_FIRST = -225,
diff --git a/include/uapi/linux/netfilter_ipv6.h b/include/uapi/linux/netfilter_ipv6.h
index 2f9724611cc2..dc624fd24d25 100644
--- a/include/uapi/linux/netfilter_ipv6.h
+++ b/include/uapi/linux/netfilter_ipv6.h
@@ -62,6 +62,7 @@
62 62
63enum nf_ip6_hook_priorities { 63enum nf_ip6_hook_priorities {
64 NF_IP6_PRI_FIRST = INT_MIN, 64 NF_IP6_PRI_FIRST = INT_MIN,
65 NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450,
65 NF_IP6_PRI_CONNTRACK_DEFRAG = -400, 66 NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
66 NF_IP6_PRI_RAW = -300, 67 NF_IP6_PRI_RAW = -300,
67 NF_IP6_PRI_SELINUX_FIRST = -225, 68 NF_IP6_PRI_SELINUX_FIRST = -225,
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_srh.h b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
new file mode 100644
index 000000000000..f3cc0ef514a7
--- /dev/null
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
@@ -0,0 +1,57 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _IP6T_SRH_H
3#define _IP6T_SRH_H
4
5#include <linux/types.h>
6#include <linux/netfilter.h>
7
8/* Values for "mt_flags" field in struct ip6t_srh */
9#define IP6T_SRH_NEXTHDR 0x0001
10#define IP6T_SRH_LEN_EQ 0x0002
11#define IP6T_SRH_LEN_GT 0x0004
12#define IP6T_SRH_LEN_LT 0x0008
13#define IP6T_SRH_SEGS_EQ 0x0010
14#define IP6T_SRH_SEGS_GT 0x0020
15#define IP6T_SRH_SEGS_LT 0x0040
16#define IP6T_SRH_LAST_EQ 0x0080
17#define IP6T_SRH_LAST_GT 0x0100
18#define IP6T_SRH_LAST_LT 0x0200
19#define IP6T_SRH_TAG 0x0400
20#define IP6T_SRH_MASK 0x07FF
21
22/* Values for "mt_invflags" field in struct ip6t_srh */
23#define IP6T_SRH_INV_NEXTHDR 0x0001
24#define IP6T_SRH_INV_LEN_EQ 0x0002
25#define IP6T_SRH_INV_LEN_GT 0x0004
26#define IP6T_SRH_INV_LEN_LT 0x0008
27#define IP6T_SRH_INV_SEGS_EQ 0x0010
28#define IP6T_SRH_INV_SEGS_GT 0x0020
29#define IP6T_SRH_INV_SEGS_LT 0x0040
30#define IP6T_SRH_INV_LAST_EQ 0x0080
31#define IP6T_SRH_INV_LAST_GT 0x0100
32#define IP6T_SRH_INV_LAST_LT 0x0200
33#define IP6T_SRH_INV_TAG 0x0400
34#define IP6T_SRH_INV_MASK 0x07FF
35
36/**
37 * struct ip6t_srh - SRH match options
38 * @ next_hdr: Next header field of SRH
39 * @ hdr_len: Extension header length field of SRH
40 * @ segs_left: Segments left field of SRH
41 * @ last_entry: Last entry field of SRH
42 * @ tag: Tag field of SRH
43 * @ mt_flags: match options
44 * @ mt_invflags: Invert the sense of match options
45 */
46
47struct ip6t_srh {
48 __u8 next_hdr;
49 __u8 hdr_len;
50 __u8 segs_left;
51 __u8 last_entry;
52 __u16 tag;
53 __u16 mt_flags;
54 __u16 mt_invflags;
55};
56
57#endif /*_IP6T_SRH_H*/
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h
index 057d22a48416..946cb62d64b0 100644
--- a/include/uapi/linux/nfs.h
+++ b/include/uapi/linux/nfs.h
@@ -12,6 +12,7 @@
12 12
13#define NFS_PROGRAM 100003 13#define NFS_PROGRAM 100003
14#define NFS_PORT 2049 14#define NFS_PORT 2049
15#define NFS_RDMA_PORT 20049
15#define NFS_MAXDATA 8192 16#define NFS_MAXDATA 8192
16#define NFS_MAXPATHLEN 1024 17#define NFS_MAXPATHLEN 1024
17#define NFS_MAXNAMLEN 255 18#define NFS_MAXNAMLEN 255
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f882fe1f9709..c587a61c32bf 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -3862,6 +3862,9 @@ enum nl80211_bss_scan_width {
3862 * @NL80211_BSS_PARENT_BSSID. (u64). 3862 * @NL80211_BSS_PARENT_BSSID. (u64).
3863 * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF 3863 * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF
3864 * is set. 3864 * is set.
3865 * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update.
3866 * Contains a nested array of signal strength attributes (u8, dBm),
3867 * using the nesting index as the antenna number.
3865 * @__NL80211_BSS_AFTER_LAST: internal 3868 * @__NL80211_BSS_AFTER_LAST: internal
3866 * @NL80211_BSS_MAX: highest BSS attribute 3869 * @NL80211_BSS_MAX: highest BSS attribute
3867 */ 3870 */
@@ -3885,6 +3888,7 @@ enum nl80211_bss {
3885 NL80211_BSS_PAD, 3888 NL80211_BSS_PAD,
3886 NL80211_BSS_PARENT_TSF, 3889 NL80211_BSS_PARENT_TSF,
3887 NL80211_BSS_PARENT_BSSID, 3890 NL80211_BSS_PARENT_BSSID,
3891 NL80211_BSS_CHAIN_SIGNAL,
3888 3892
3889 /* keep last */ 3893 /* keep last */
3890 __NL80211_BSS_AFTER_LAST, 3894 __NL80211_BSS_AFTER_LAST,
diff --git a/include/uapi/linux/nubus.h b/include/uapi/linux/nubus.h
index f3776cc80f4d..48031e7858f1 100644
--- a/include/uapi/linux/nubus.h
+++ b/include/uapi/linux/nubus.h
@@ -221,27 +221,4 @@ enum nubus_display_res_id {
221 NUBUS_RESID_SIXTHMODE = 0x0085 221 NUBUS_RESID_SIXTHMODE = 0x0085
222}; 222};
223 223
224struct nubus_dir
225{
226 unsigned char *base;
227 unsigned char *ptr;
228 int done;
229 int mask;
230};
231
232struct nubus_dirent
233{
234 unsigned char *base;
235 unsigned char type;
236 __u32 data; /* Actually 24bits used */
237 int mask;
238};
239
240
241/* We'd like to get rid of this eventually. Only daynaport.c uses it now. */
242static inline void *nubus_slot_addr(int slot)
243{
244 return (void *)(0xF0000000|(slot<<24));
245}
246
247#endif /* _UAPILINUX_NUBUS_H */ 224#endif /* _UAPILINUX_NUBUS_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index dcfab5e3b55c..713e56ce681f 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -363,6 +363,7 @@ enum ovs_tunnel_key_attr {
363 OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */ 363 OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */
364 OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */ 364 OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
365 OVS_TUNNEL_KEY_ATTR_PAD, 365 OVS_TUNNEL_KEY_ATTR_PAD,
366 OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* struct erspan_metadata */
366 __OVS_TUNNEL_KEY_ATTR_MAX 367 __OVS_TUNNEL_KEY_ATTR_MAX
367}; 368};
368 369
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 70c2b2ade048..0c79eac5e9b8 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -622,15 +622,19 @@
622 * safely. 622 * safely.
623 */ 623 */
624#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ 624#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
625#define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */
625#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */ 626#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
626#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */ 627#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
627#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */ 628#define PCI_EXP_DEVCAP2_ATOMIC_COMP32 0x00000080 /* 32b AtomicOp completion */
629#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */
630#define PCI_EXP_DEVCAP2_ATOMIC_COMP128 0x00000200 /* 128b AtomicOp completion */
628#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */ 631#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */
629#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ 632#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
630#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ 633#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
631#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */ 634#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
632#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ 635#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
633#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ 636#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
637#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
634#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */ 638#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
635#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */ 639#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
636#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */ 640#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
@@ -966,26 +970,28 @@
966 970
967/* Downstream Port Containment */ 971/* Downstream Port Containment */
968#define PCI_EXP_DPC_CAP 4 /* DPC Capability */ 972#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
969#define PCI_EXP_DPC_IRQ 0x1f /* DPC Interrupt Message Number */ 973#define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */
970#define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */ 974#define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */
971#define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */ 975#define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */
972#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */ 976#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x0080 /* Software Triggering Supported */
973#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0xF00 /* RP PIO log size */ 977#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */
974#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */ 978#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
975 979
976#define PCI_EXP_DPC_CTL 6 /* DPC control */ 980#define PCI_EXP_DPC_CTL 6 /* DPC control */
977#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x02 /* Enable trigger on ERR_NONFATAL message */ 981#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
978#define PCI_EXP_DPC_CTL_INT_EN 0x08 /* DPC Interrupt Enable */ 982#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
979 983
980#define PCI_EXP_DPC_STATUS 8 /* DPC Status */ 984#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
981#define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */ 985#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
982#define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */ 986#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
983#define PCI_EXP_DPC_RP_BUSY 0x10 /* Root Port Busy */ 987#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
988#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
989#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
984 990
985#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */ 991#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
986 992
987#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */ 993#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */
988#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO MASK */ 994#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */
989#define PCI_EXP_DPC_RP_PIO_SEVERITY 0x14 /* RP PIO Severity */ 995#define PCI_EXP_DPC_RP_PIO_SEVERITY 0x14 /* RP PIO Severity */
990#define PCI_EXP_DPC_RP_PIO_SYSERROR 0x18 /* RP PIO SysError */ 996#define PCI_EXP_DPC_RP_PIO_SYSERROR 0x18 /* RP PIO SysError */
991#define PCI_EXP_DPC_RP_PIO_EXCEPTION 0x1C /* RP PIO Exception */ 997#define PCI_EXP_DPC_RP_PIO_EXCEPTION 0x1C /* RP PIO Exception */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b9a4953018ed..e0739a1aa4b2 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -418,6 +418,27 @@ struct perf_event_attr {
418 __u16 __reserved_2; /* align to __u64 */ 418 __u16 __reserved_2; /* align to __u64 */
419}; 419};
420 420
421/*
422 * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
423 * to query bpf programs attached to the same perf tracepoint
424 * as the given perf event.
425 */
426struct perf_event_query_bpf {
427 /*
428 * The below ids array length
429 */
430 __u32 ids_len;
431 /*
432 * Set by the kernel to indicate the number of
433 * available programs
434 */
435 __u32 prog_cnt;
436 /*
437 * User provided buffer to store program ids
438 */
439 __u32 ids[0];
440};
441
421#define perf_flags(attr) (*(&(attr)->read_format + 1)) 442#define perf_flags(attr) (*(&(attr)->read_format + 1))
422 443
423/* 444/*
@@ -433,6 +454,7 @@ struct perf_event_attr {
433#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) 454#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
434#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) 455#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
435#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) 456#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
457#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
436 458
437enum perf_event_ioc_flags { 459enum perf_event_ioc_flags {
438 PERF_IOC_FLAG_GROUP = 1U << 0, 460 PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -612,9 +634,12 @@ struct perf_event_mmap_page {
612 */ 634 */
613#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12) 635#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
614/* 636/*
615 * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on 637 * Following PERF_RECORD_MISC_* are used on different
616 * different events so can reuse the same bit position. 638 * events, so can reuse the same bit position:
617 * Ditto PERF_RECORD_MISC_SWITCH_OUT. 639 *
640 * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
641 * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
642 * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
618 */ 643 */
619#define PERF_RECORD_MISC_MMAP_DATA (1 << 13) 644#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
620#define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 645#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
@@ -864,6 +889,7 @@ enum perf_event_type {
864 * struct perf_event_header header; 889 * struct perf_event_header header;
865 * u32 pid; 890 * u32 pid;
866 * u32 tid; 891 * u32 tid;
892 * struct sample_id sample_id;
867 * }; 893 * };
868 */ 894 */
869 PERF_RECORD_ITRACE_START = 12, 895 PERF_RECORD_ITRACE_START = 12,
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index 760e52a9640f..b3bcabe380da 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -88,6 +88,9 @@
88 (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT) 88 (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
89#define PSCI_VERSION_MINOR(ver) \ 89#define PSCI_VERSION_MINOR(ver) \
90 ((ver) & PSCI_VERSION_MINOR_MASK) 90 ((ver) & PSCI_VERSION_MINOR_MASK)
91#define PSCI_VERSION(maj, min) \
92 ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
93 ((min) & PSCI_VERSION_MINOR_MASK))
91 94
92/* PSCI features decoding (>=1.0) */ 95/* PSCI features decoding (>=1.0) */
93#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1 96#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index e3939e00980b..e46d82b91166 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -66,6 +66,12 @@ struct ptrace_peeksiginfo_args {
66#define PTRACE_SETSIGMASK 0x420b 66#define PTRACE_SETSIGMASK 0x420b
67 67
68#define PTRACE_SECCOMP_GET_FILTER 0x420c 68#define PTRACE_SECCOMP_GET_FILTER 0x420c
69#define PTRACE_SECCOMP_GET_METADATA 0x420d
70
71struct seccomp_metadata {
72 unsigned long filter_off; /* Input: which filter */
73 unsigned int flags; /* Output: filter's flags */
74};
69 75
70/* Read signals from a shared (process wide) queue */ 76/* Read signals from a shared (process wide) queue */
71#define PTRACE_PEEKSIGINFO_SHARED (1 << 0) 77#define PTRACE_PEEKSIGINFO_SHARED (1 << 0)
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 843e29aa3cac..9b15005955fa 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -541,9 +541,19 @@ struct tcmsg {
541 int tcm_ifindex; 541 int tcm_ifindex;
542 __u32 tcm_handle; 542 __u32 tcm_handle;
543 __u32 tcm_parent; 543 __u32 tcm_parent;
544/* tcm_block_index is used instead of tcm_parent
545 * in case tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK
546 */
547#define tcm_block_index tcm_parent
544 __u32 tcm_info; 548 __u32 tcm_info;
545}; 549};
546 550
551/* For manipulation of filters in shared block, tcm_ifindex is set to
552 * TCM_IFINDEX_MAGIC_BLOCK, and tcm_parent is aliased to tcm_block_index
553 * which is the block index.
554 */
555#define TCM_IFINDEX_MAGIC_BLOCK (0xFFFFFFFFU)
556
547enum { 557enum {
548 TCA_UNSPEC, 558 TCA_UNSPEC,
549 TCA_KIND, 559 TCA_KIND,
@@ -558,6 +568,8 @@ enum {
558 TCA_DUMP_INVISIBLE, 568 TCA_DUMP_INVISIBLE,
559 TCA_CHAIN, 569 TCA_CHAIN,
560 TCA_HW_OFFLOAD, 570 TCA_HW_OFFLOAD,
571 TCA_INGRESS_BLOCK,
572 TCA_EGRESS_BLOCK,
561 __TCA_MAX 573 __TCA_MAX
562}; 574};
563 575
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 30a9e51bbb1e..22627f80063e 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -49,5 +49,10 @@
49 */ 49 */
50#define SCHED_FLAG_RESET_ON_FORK 0x01 50#define SCHED_FLAG_RESET_ON_FORK 0x01
51#define SCHED_FLAG_RECLAIM 0x02 51#define SCHED_FLAG_RECLAIM 0x02
52#define SCHED_FLAG_DL_OVERRUN 0x04
53
54#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \
55 SCHED_FLAG_RECLAIM | \
56 SCHED_FLAG_DL_OVERRUN)
52 57
53#endif /* _UAPI_LINUX_SCHED_H */ 58#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d9adab32dbee..4c4db14786bd 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -125,6 +125,7 @@ typedef __s32 sctp_assoc_t;
125#define SCTP_SOCKOPT_PEELOFF_FLAGS 122 125#define SCTP_SOCKOPT_PEELOFF_FLAGS 122
126#define SCTP_STREAM_SCHEDULER 123 126#define SCTP_STREAM_SCHEDULER 123
127#define SCTP_STREAM_SCHEDULER_VALUE 124 127#define SCTP_STREAM_SCHEDULER_VALUE 124
128#define SCTP_INTERLEAVING_SUPPORTED 125
128 129
129/* PR-SCTP policies */ 130/* PR-SCTP policies */
130#define SCTP_PR_SCTP_NONE 0x0000 131#define SCTP_PR_SCTP_NONE 0x0000
@@ -459,6 +460,8 @@ struct sctp_pdapi_event {
459 __u32 pdapi_length; 460 __u32 pdapi_length;
460 __u32 pdapi_indication; 461 __u32 pdapi_indication;
461 sctp_assoc_t pdapi_assoc_id; 462 sctp_assoc_t pdapi_assoc_id;
463 __u32 pdapi_stream;
464 __u32 pdapi_seq;
462}; 465};
463 466
464enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, }; 467enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
index 75df44373034..4f4daf8db954 100644
--- a/include/uapi/linux/switchtec_ioctl.h
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -88,7 +88,8 @@ struct switchtec_ioctl_event_summary {
88#define SWITCHTEC_IOCTL_EVENT_FORCE_SPEED 26 88#define SWITCHTEC_IOCTL_EVENT_FORCE_SPEED 26
89#define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27 89#define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27
90#define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28 90#define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28
91#define SWITCHTEC_IOCTL_MAX_EVENTS 29 91#define SWITCHTEC_IOCTL_EVENT_GFMS 29
92#define SWITCHTEC_IOCTL_MAX_EVENTS 30
92 93
93#define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1 94#define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1
94#define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2 95#define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index 688782e90140..4b9eb064d7e7 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -50,6 +50,7 @@
50 50
51#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */ 51#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
52#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */ 52#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */
53#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */
53 54
54/* 55/*
55 * TEE Implementation ID 56 * TEE Implementation ID
@@ -154,6 +155,13 @@ struct tee_ioctl_buf_data {
154 */ 155 */
155#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff 156#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
156 157
158/* Meta parameter carrying extra information about the message. */
159#define TEE_IOCTL_PARAM_ATTR_META 0x100
160
161/* Mask of all known attr bits */
162#define TEE_IOCTL_PARAM_ATTR_MASK \
163 (TEE_IOCTL_PARAM_ATTR_TYPE_MASK | TEE_IOCTL_PARAM_ATTR_META)
164
157/* 165/*
158 * Matches TEEC_LOGIN_* in GP TEE Client API 166 * Matches TEEC_LOGIN_* in GP TEE Client API
159 * Are only defined for GP compliant TEEs 167 * Are only defined for GP compliant TEEs
@@ -332,6 +340,35 @@ struct tee_iocl_supp_send_arg {
332#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \ 340#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \
333 struct tee_ioctl_buf_data) 341 struct tee_ioctl_buf_data)
334 342
343/**
344 * struct tee_ioctl_shm_register_data - Shared memory register argument
345 * @addr: [in] Start address of shared memory to register
346 * @length: [in/out] Length of shared memory to register
347 * @flags: [in/out] Flags to/from registration.
348 * @id: [out] Identifier of the shared memory
349 *
350 * The flags field should currently be zero as input. Updated by the call
351 * with actual flags as defined by TEE_IOCTL_SHM_* above.
352 * This structure is used as argument for TEE_IOC_SHM_REGISTER below.
353 */
354struct tee_ioctl_shm_register_data {
355 __u64 addr;
356 __u64 length;
357 __u32 flags;
358 __s32 id;
359};
360
361/**
362 * TEE_IOC_SHM_REGISTER - Register shared memory argument
363 *
364 * Registers shared memory between the user space process and secure OS.
365 *
366 * Returns a file descriptor on success or < 0 on failure
367 *
368 * The shared memory is unregisterred when the descriptor is closed.
369 */
370#define TEE_IOC_SHM_REGISTER _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 9, \
371 struct tee_ioctl_shm_register_data)
335/* 372/*
336 * Five syscalls are used when communicating with the TEE driver. 373 * Five syscalls are used when communicating with the TEE driver.
337 * open(): opens the device associated with the driver 374 * open(): opens the device associated with the driver
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 35f79d1f8c3a..14bacc7e6cef 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -117,10 +117,9 @@ static inline unsigned int tipc_node(__u32 addr)
117/* 117/*
118 * Publication scopes when binding port names and port name sequences 118 * Publication scopes when binding port names and port name sequences
119 */ 119 */
120 120#define TIPC_ZONE_SCOPE 1
121#define TIPC_ZONE_SCOPE 1 121#define TIPC_CLUSTER_SCOPE 2
122#define TIPC_CLUSTER_SCOPE 2 122#define TIPC_NODE_SCOPE 3
123#define TIPC_NODE_SCOPE 3
124 123
125/* 124/*
126 * Limiting values for messages 125 * Limiting values for messages
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index e3d1d0c78f3c..cd4f0b897a48 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -49,5 +49,11 @@ typedef __u32 __bitwise __wsum;
49#define __aligned_be64 __be64 __attribute__((aligned(8))) 49#define __aligned_be64 __be64 __attribute__((aligned(8)))
50#define __aligned_le64 __le64 __attribute__((aligned(8))) 50#define __aligned_le64 __le64 __attribute__((aligned(8)))
51 51
52#ifdef __CHECK_POLL
53typedef unsigned __bitwise __poll_t;
54#else
55typedef unsigned __poll_t;
56#endif
57
52#endif /* __ASSEMBLY__ */ 58#endif /* __ASSEMBLY__ */
53#endif /* _UAPI_LINUX_TYPES_H */ 59#endif /* _UAPI_LINUX_TYPES_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index c4c79aa331bd..d5a5caec8fbc 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -1077,9 +1077,9 @@ struct usb_ptm_cap_descriptor {
1077#define USB_DT_USB_PTM_ID_SIZE 3 1077#define USB_DT_USB_PTM_ID_SIZE 3
1078/* 1078/*
1079 * The size of the descriptor for the Sublink Speed Attribute Count 1079 * The size of the descriptor for the Sublink Speed Attribute Count
1080 * (SSAC) specified in bmAttributes[4:0]. 1080 * (SSAC) specified in bmAttributes[4:0]. SSAC is zero-based
1081 */ 1081 */
1082#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4) 1082#define USB_DT_USB_SSP_CAP_SIZE(ssac) (12 + (ssac + 1) * 4)
1083 1083
1084/*-------------------------------------------------------------------------*/ 1084/*-------------------------------------------------------------------------*/
1085 1085
diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h
index 70ed5338d447..964e87217be4 100644
--- a/include/uapi/linux/usbdevice_fs.h
+++ b/include/uapi/linux/usbdevice_fs.h
@@ -79,7 +79,7 @@ struct usbdevfs_connectinfo {
79#define USBDEVFS_URB_SHORT_NOT_OK 0x01 79#define USBDEVFS_URB_SHORT_NOT_OK 0x01
80#define USBDEVFS_URB_ISO_ASAP 0x02 80#define USBDEVFS_URB_ISO_ASAP 0x02
81#define USBDEVFS_URB_BULK_CONTINUATION 0x04 81#define USBDEVFS_URB_BULK_CONTINUATION 0x04
82#define USBDEVFS_URB_NO_FSBR 0x20 82#define USBDEVFS_URB_NO_FSBR 0x20 /* Not used */
83#define USBDEVFS_URB_ZERO_PACKET 0x40 83#define USBDEVFS_URB_ZERO_PACKET 0x40
84#define USBDEVFS_URB_NO_INTERRUPT 0x80 84#define USBDEVFS_URB_NO_INTERRUPT 0x80
85 85
diff --git a/include/uapi/linux/uuid.h b/include/uapi/linux/uuid.h
index 5c04130bb524..e5a7eecef7c3 100644
--- a/include/uapi/linux/uuid.h
+++ b/include/uapi/linux/uuid.h
@@ -19,7 +19,6 @@
19#define _UAPI_LINUX_UUID_H_ 19#define _UAPI_LINUX_UUID_H_
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/string.h>
23 22
24typedef struct { 23typedef struct {
25 __u8 b[16]; 24 __u8 b[16];
diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
index e80b4655d8cd..020714d2c5bd 100644
--- a/include/uapi/linux/uvcvideo.h
+++ b/include/uapi/linux/uvcvideo.h
@@ -68,4 +68,30 @@ struct uvc_xu_control_query {
68#define UVCIOC_CTRL_MAP _IOWR('u', 0x20, struct uvc_xu_control_mapping) 68#define UVCIOC_CTRL_MAP _IOWR('u', 0x20, struct uvc_xu_control_mapping)
69#define UVCIOC_CTRL_QUERY _IOWR('u', 0x21, struct uvc_xu_control_query) 69#define UVCIOC_CTRL_QUERY _IOWR('u', 0x21, struct uvc_xu_control_query)
70 70
71/*
72 * Metadata node
73 */
74
75/**
76 * struct uvc_meta_buf - metadata buffer building block
77 * @ns - system timestamp of the payload in nanoseconds
78 * @sof - USB Frame Number
79 * @length - length of the payload header
80 * @flags - payload header flags
81 * @buf - optional device-specific header data
82 *
83 * UVC metadata nodes fill buffers with possibly multiple instances of this
84 * struct. The first two fields are added by the driver, they can be used for
85 * clock synchronisation. The rest is an exact copy of a UVC payload header.
86 * Only complete objects with complete buffers are included. Therefore it's
87 * always sizeof(meta->ts) + sizeof(meta->sof) + meta->length bytes large.
88 */
89struct uvc_meta_buf {
90 __u64 ns;
91 __u16 sof;
92 __u8 length;
93 __u8 flags;
94 __u8 buf[];
95} __packed;
96
71#endif 97#endif
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index a692623e0236..cbbb750d87d1 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -67,8 +67,8 @@
67/* User-class control IDs */ 67/* User-class control IDs */
68 68
69#define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900) 69#define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900)
70#define V4L2_CID_USER_BASE V4L2_CID_BASE 70#define V4L2_CID_USER_BASE V4L2_CID_BASE
71#define V4L2_CID_USER_CLASS (V4L2_CTRL_CLASS_USER | 1) 71#define V4L2_CID_USER_CLASS (V4L2_CTRL_CLASS_USER | 1)
72#define V4L2_CID_BRIGHTNESS (V4L2_CID_BASE+0) 72#define V4L2_CID_BRIGHTNESS (V4L2_CID_BASE+0)
73#define V4L2_CID_CONTRAST (V4L2_CID_BASE+1) 73#define V4L2_CID_CONTRAST (V4L2_CID_BASE+1)
74#define V4L2_CID_SATURATION (V4L2_CID_BASE+2) 74#define V4L2_CID_SATURATION (V4L2_CID_BASE+2)
@@ -102,7 +102,7 @@ enum v4l2_power_line_frequency {
102#define V4L2_CID_HUE_AUTO (V4L2_CID_BASE+25) 102#define V4L2_CID_HUE_AUTO (V4L2_CID_BASE+25)
103#define V4L2_CID_WHITE_BALANCE_TEMPERATURE (V4L2_CID_BASE+26) 103#define V4L2_CID_WHITE_BALANCE_TEMPERATURE (V4L2_CID_BASE+26)
104#define V4L2_CID_SHARPNESS (V4L2_CID_BASE+27) 104#define V4L2_CID_SHARPNESS (V4L2_CID_BASE+27)
105#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28) 105#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28)
106#define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29) 106#define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29)
107#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30) 107#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30)
108#define V4L2_CID_COLORFX (V4L2_CID_BASE+31) 108#define V4L2_CID_COLORFX (V4L2_CID_BASE+31)
@@ -194,11 +194,11 @@ enum v4l2_colorfx {
194/* The MPEG controls are applicable to all codec controls 194/* The MPEG controls are applicable to all codec controls
195 * and the 'MPEG' part of the define is historical */ 195 * and the 'MPEG' part of the define is historical */
196 196
197#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900) 197#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
198#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1) 198#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1)
199 199
200/* MPEG streams, specific to multiplexed streams */ 200/* MPEG streams, specific to multiplexed streams */
201#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_MPEG_BASE+0) 201#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_MPEG_BASE+0)
202enum v4l2_mpeg_stream_type { 202enum v4l2_mpeg_stream_type {
203 V4L2_MPEG_STREAM_TYPE_MPEG2_PS = 0, /* MPEG-2 program stream */ 203 V4L2_MPEG_STREAM_TYPE_MPEG2_PS = 0, /* MPEG-2 program stream */
204 V4L2_MPEG_STREAM_TYPE_MPEG2_TS = 1, /* MPEG-2 transport stream */ 204 V4L2_MPEG_STREAM_TYPE_MPEG2_TS = 1, /* MPEG-2 transport stream */
@@ -207,26 +207,26 @@ enum v4l2_mpeg_stream_type {
207 V4L2_MPEG_STREAM_TYPE_MPEG1_VCD = 4, /* MPEG-1 VCD-compatible stream */ 207 V4L2_MPEG_STREAM_TYPE_MPEG1_VCD = 4, /* MPEG-1 VCD-compatible stream */
208 V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */ 208 V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */
209}; 209};
210#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_MPEG_BASE+1) 210#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_MPEG_BASE+1)
211#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_MPEG_BASE+2) 211#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_MPEG_BASE+2)
212#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_MPEG_BASE+3) 212#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_MPEG_BASE+3)
213#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4) 213#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4)
214#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5) 214#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5)
215#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6) 215#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6)
216#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7) 216#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7)
217enum v4l2_mpeg_stream_vbi_fmt { 217enum v4l2_mpeg_stream_vbi_fmt {
218 V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */ 218 V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */
219 V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */ 219 V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */
220}; 220};
221 221
222/* MPEG audio controls specific to multiplexed streams */ 222/* MPEG audio controls specific to multiplexed streams */
223#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100) 223#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100)
224enum v4l2_mpeg_audio_sampling_freq { 224enum v4l2_mpeg_audio_sampling_freq {
225 V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0, 225 V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0,
226 V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1, 226 V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1,
227 V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2, 227 V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2,
228}; 228};
229#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_MPEG_BASE+101) 229#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_MPEG_BASE+101)
230enum v4l2_mpeg_audio_encoding { 230enum v4l2_mpeg_audio_encoding {
231 V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0, 231 V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0,
232 V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1, 232 V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1,
@@ -234,7 +234,7 @@ enum v4l2_mpeg_audio_encoding {
234 V4L2_MPEG_AUDIO_ENCODING_AAC = 3, 234 V4L2_MPEG_AUDIO_ENCODING_AAC = 3,
235 V4L2_MPEG_AUDIO_ENCODING_AC3 = 4, 235 V4L2_MPEG_AUDIO_ENCODING_AC3 = 4,
236}; 236};
237#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_MPEG_BASE+102) 237#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_MPEG_BASE+102)
238enum v4l2_mpeg_audio_l1_bitrate { 238enum v4l2_mpeg_audio_l1_bitrate {
239 V4L2_MPEG_AUDIO_L1_BITRATE_32K = 0, 239 V4L2_MPEG_AUDIO_L1_BITRATE_32K = 0,
240 V4L2_MPEG_AUDIO_L1_BITRATE_64K = 1, 240 V4L2_MPEG_AUDIO_L1_BITRATE_64K = 1,
@@ -251,7 +251,7 @@ enum v4l2_mpeg_audio_l1_bitrate {
251 V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12, 251 V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12,
252 V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13, 252 V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13,
253}; 253};
254#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_MPEG_BASE+103) 254#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_MPEG_BASE+103)
255enum v4l2_mpeg_audio_l2_bitrate { 255enum v4l2_mpeg_audio_l2_bitrate {
256 V4L2_MPEG_AUDIO_L2_BITRATE_32K = 0, 256 V4L2_MPEG_AUDIO_L2_BITRATE_32K = 0,
257 V4L2_MPEG_AUDIO_L2_BITRATE_48K = 1, 257 V4L2_MPEG_AUDIO_L2_BITRATE_48K = 1,
@@ -268,7 +268,7 @@ enum v4l2_mpeg_audio_l2_bitrate {
268 V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12, 268 V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12,
269 V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13, 269 V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13,
270}; 270};
271#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_MPEG_BASE+104) 271#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_MPEG_BASE+104)
272enum v4l2_mpeg_audio_l3_bitrate { 272enum v4l2_mpeg_audio_l3_bitrate {
273 V4L2_MPEG_AUDIO_L3_BITRATE_32K = 0, 273 V4L2_MPEG_AUDIO_L3_BITRATE_32K = 0,
274 V4L2_MPEG_AUDIO_L3_BITRATE_40K = 1, 274 V4L2_MPEG_AUDIO_L3_BITRATE_40K = 1,
@@ -285,32 +285,32 @@ enum v4l2_mpeg_audio_l3_bitrate {
285 V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12, 285 V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12,
286 V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13, 286 V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13,
287}; 287};
288#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_MPEG_BASE+105) 288#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_MPEG_BASE+105)
289enum v4l2_mpeg_audio_mode { 289enum v4l2_mpeg_audio_mode {
290 V4L2_MPEG_AUDIO_MODE_STEREO = 0, 290 V4L2_MPEG_AUDIO_MODE_STEREO = 0,
291 V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1, 291 V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1,
292 V4L2_MPEG_AUDIO_MODE_DUAL = 2, 292 V4L2_MPEG_AUDIO_MODE_DUAL = 2,
293 V4L2_MPEG_AUDIO_MODE_MONO = 3, 293 V4L2_MPEG_AUDIO_MODE_MONO = 3,
294}; 294};
295#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_MPEG_BASE+106) 295#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_MPEG_BASE+106)
296enum v4l2_mpeg_audio_mode_extension { 296enum v4l2_mpeg_audio_mode_extension {
297 V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4 = 0, 297 V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4 = 0,
298 V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8 = 1, 298 V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8 = 1,
299 V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2, 299 V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2,
300 V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3, 300 V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3,
301}; 301};
302#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_MPEG_BASE+107) 302#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_MPEG_BASE+107)
303enum v4l2_mpeg_audio_emphasis { 303enum v4l2_mpeg_audio_emphasis {
304 V4L2_MPEG_AUDIO_EMPHASIS_NONE = 0, 304 V4L2_MPEG_AUDIO_EMPHASIS_NONE = 0,
305 V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1, 305 V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1,
306 V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17 = 2, 306 V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17 = 2,
307}; 307};
308#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_MPEG_BASE+108) 308#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_MPEG_BASE+108)
309enum v4l2_mpeg_audio_crc { 309enum v4l2_mpeg_audio_crc {
310 V4L2_MPEG_AUDIO_CRC_NONE = 0, 310 V4L2_MPEG_AUDIO_CRC_NONE = 0,
311 V4L2_MPEG_AUDIO_CRC_CRC16 = 1, 311 V4L2_MPEG_AUDIO_CRC_CRC16 = 1,
312}; 312};
313#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109) 313#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109)
314#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_MPEG_BASE+110) 314#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_MPEG_BASE+110)
315#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_MPEG_BASE+111) 315#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_MPEG_BASE+111)
316enum v4l2_mpeg_audio_ac3_bitrate { 316enum v4l2_mpeg_audio_ac3_bitrate {
@@ -346,33 +346,33 @@ enum v4l2_mpeg_audio_dec_playback {
346#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_MPEG_BASE+113) 346#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_MPEG_BASE+113)
347 347
348/* MPEG video controls specific to multiplexed streams */ 348/* MPEG video controls specific to multiplexed streams */
349#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200) 349#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200)
350enum v4l2_mpeg_video_encoding { 350enum v4l2_mpeg_video_encoding {
351 V4L2_MPEG_VIDEO_ENCODING_MPEG_1 = 0, 351 V4L2_MPEG_VIDEO_ENCODING_MPEG_1 = 0,
352 V4L2_MPEG_VIDEO_ENCODING_MPEG_2 = 1, 352 V4L2_MPEG_VIDEO_ENCODING_MPEG_2 = 1,
353 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2, 353 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2,
354}; 354};
355#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_MPEG_BASE+201) 355#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_MPEG_BASE+201)
356enum v4l2_mpeg_video_aspect { 356enum v4l2_mpeg_video_aspect {
357 V4L2_MPEG_VIDEO_ASPECT_1x1 = 0, 357 V4L2_MPEG_VIDEO_ASPECT_1x1 = 0,
358 V4L2_MPEG_VIDEO_ASPECT_4x3 = 1, 358 V4L2_MPEG_VIDEO_ASPECT_4x3 = 1,
359 V4L2_MPEG_VIDEO_ASPECT_16x9 = 2, 359 V4L2_MPEG_VIDEO_ASPECT_16x9 = 2,
360 V4L2_MPEG_VIDEO_ASPECT_221x100 = 3, 360 V4L2_MPEG_VIDEO_ASPECT_221x100 = 3,
361}; 361};
362#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_MPEG_BASE+202) 362#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_MPEG_BASE+202)
363#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_MPEG_BASE+203) 363#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_MPEG_BASE+203)
364#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_MPEG_BASE+204) 364#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_MPEG_BASE+204)
365#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_MPEG_BASE+205) 365#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_MPEG_BASE+205)
366#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_MPEG_BASE+206) 366#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_MPEG_BASE+206)
367enum v4l2_mpeg_video_bitrate_mode { 367enum v4l2_mpeg_video_bitrate_mode {
368 V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0, 368 V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0,
369 V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1, 369 V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1,
370}; 370};
371#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207) 371#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207)
372#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208) 372#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208)
373#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209) 373#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209)
374#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210) 374#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210)
375#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211) 375#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211)
376#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_MPEG_BASE+212) 376#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_MPEG_BASE+212)
377#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_MPEG_BASE+213) 377#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_MPEG_BASE+213)
378#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_MPEG_BASE+214) 378#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_MPEG_BASE+214)
@@ -590,14 +590,14 @@ enum v4l2_vp8_golden_frame_sel {
590#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE (V4L2_CID_MPEG_BASE+511) 590#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE (V4L2_CID_MPEG_BASE+511)
591 591
592/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */ 592/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
593#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000) 593#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
594#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0) 594#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0)
595enum v4l2_mpeg_cx2341x_video_spatial_filter_mode { 595enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
596 V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0, 596 V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0,
597 V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO = 1, 597 V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO = 1,
598}; 598};
599#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+1) 599#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+1)
600#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+2) 600#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+2)
601enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type { 601enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
602 V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF = 0, 602 V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF = 0,
603 V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR = 1, 603 V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
@@ -605,18 +605,18 @@ enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
605 V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE = 3, 605 V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE = 3,
606 V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4, 606 V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4,
607}; 607};
608#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+3) 608#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+3)
609enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type { 609enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type {
610 V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF = 0, 610 V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF = 0,
611 V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1, 611 V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
612}; 612};
613#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+4) 613#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+4)
614enum v4l2_mpeg_cx2341x_video_temporal_filter_mode { 614enum v4l2_mpeg_cx2341x_video_temporal_filter_mode {
615 V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0, 615 V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0,
616 V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO = 1, 616 V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO = 1,
617}; 617};
618#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+5) 618#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+5)
619#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+6) 619#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+6)
620enum v4l2_mpeg_cx2341x_video_median_filter_type { 620enum v4l2_mpeg_cx2341x_video_median_filter_type {
621 V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF = 0, 621 V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF = 0,
622 V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR = 1, 622 V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR = 1,
@@ -624,11 +624,11 @@ enum v4l2_mpeg_cx2341x_video_median_filter_type {
624 V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3, 624 V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3,
625 V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG = 4, 625 V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG = 4,
626}; 626};
627#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+7) 627#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+7)
628#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8) 628#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8)
629#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+9) 629#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+9)
630#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10) 630#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10)
631#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11) 631#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11)
632 632
633/* MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */ 633/* MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */
634#define V4L2_CID_MPEG_MFC51_BASE (V4L2_CTRL_CLASS_MPEG | 0x1100) 634#define V4L2_CID_MPEG_MFC51_BASE (V4L2_CTRL_CLASS_MPEG | 0x1100)
@@ -660,8 +660,8 @@ enum v4l2_mpeg_mfc51_video_force_frame_type {
660 660
661/* Camera class control IDs */ 661/* Camera class control IDs */
662 662
663#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900) 663#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
664#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1) 664#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
665 665
666#define V4L2_CID_EXPOSURE_AUTO (V4L2_CID_CAMERA_CLASS_BASE+1) 666#define V4L2_CID_EXPOSURE_AUTO (V4L2_CID_CAMERA_CLASS_BASE+1)
667enum v4l2_exposure_auto_type { 667enum v4l2_exposure_auto_type {
diff --git a/include/uapi/linux/vbox_err.h b/include/uapi/linux/vbox_err.h
new file mode 100644
index 000000000000..7eae536ff1e6
--- /dev/null
+++ b/include/uapi/linux/vbox_err.h
@@ -0,0 +1,151 @@
1/* SPDX-License-Identifier: MIT */
2/* Copyright (C) 2017 Oracle Corporation */
3
4#ifndef __UAPI_VBOX_ERR_H__
5#define __UAPI_VBOX_ERR_H__
6
7#define VINF_SUCCESS 0
8#define VERR_GENERAL_FAILURE (-1)
9#define VERR_INVALID_PARAMETER (-2)
10#define VERR_INVALID_MAGIC (-3)
11#define VERR_INVALID_HANDLE (-4)
12#define VERR_LOCK_FAILED (-5)
13#define VERR_INVALID_POINTER (-6)
14#define VERR_IDT_FAILED (-7)
15#define VERR_NO_MEMORY (-8)
16#define VERR_ALREADY_LOADED (-9)
17#define VERR_PERMISSION_DENIED (-10)
18#define VERR_VERSION_MISMATCH (-11)
19#define VERR_NOT_IMPLEMENTED (-12)
20#define VERR_INVALID_FLAGS (-13)
21
22#define VERR_NOT_EQUAL (-18)
23#define VERR_NOT_SYMLINK (-19)
24#define VERR_NO_TMP_MEMORY (-20)
25#define VERR_INVALID_FMODE (-21)
26#define VERR_WRONG_ORDER (-22)
27#define VERR_NO_TLS_FOR_SELF (-23)
28#define VERR_FAILED_TO_SET_SELF_TLS (-24)
29#define VERR_NO_CONT_MEMORY (-26)
30#define VERR_NO_PAGE_MEMORY (-27)
31#define VERR_THREAD_IS_DEAD (-29)
32#define VERR_THREAD_NOT_WAITABLE (-30)
33#define VERR_PAGE_TABLE_NOT_PRESENT (-31)
34#define VERR_INVALID_CONTEXT (-32)
35#define VERR_TIMER_BUSY (-33)
36#define VERR_ADDRESS_CONFLICT (-34)
37#define VERR_UNRESOLVED_ERROR (-35)
38#define VERR_INVALID_FUNCTION (-36)
39#define VERR_NOT_SUPPORTED (-37)
40#define VERR_ACCESS_DENIED (-38)
41#define VERR_INTERRUPTED (-39)
42#define VERR_TIMEOUT (-40)
43#define VERR_BUFFER_OVERFLOW (-41)
44#define VERR_TOO_MUCH_DATA (-42)
45#define VERR_MAX_THRDS_REACHED (-43)
46#define VERR_MAX_PROCS_REACHED (-44)
47#define VERR_SIGNAL_REFUSED (-45)
48#define VERR_SIGNAL_PENDING (-46)
49#define VERR_SIGNAL_INVALID (-47)
50#define VERR_STATE_CHANGED (-48)
51#define VERR_INVALID_UUID_FORMAT (-49)
52#define VERR_PROCESS_NOT_FOUND (-50)
53#define VERR_PROCESS_RUNNING (-51)
54#define VERR_TRY_AGAIN (-52)
55#define VERR_PARSE_ERROR (-53)
56#define VERR_OUT_OF_RANGE (-54)
57#define VERR_NUMBER_TOO_BIG (-55)
58#define VERR_NO_DIGITS (-56)
59#define VERR_NEGATIVE_UNSIGNED (-57)
60#define VERR_NO_TRANSLATION (-58)
61
62#define VERR_NOT_FOUND (-78)
63#define VERR_INVALID_STATE (-79)
64#define VERR_OUT_OF_RESOURCES (-80)
65
66#define VERR_FILE_NOT_FOUND (-102)
67#define VERR_PATH_NOT_FOUND (-103)
68#define VERR_INVALID_NAME (-104)
69#define VERR_ALREADY_EXISTS (-105)
70#define VERR_TOO_MANY_OPEN_FILES (-106)
71#define VERR_SEEK (-107)
72#define VERR_NEGATIVE_SEEK (-108)
73#define VERR_SEEK_ON_DEVICE (-109)
74#define VERR_EOF (-110)
75#define VERR_READ_ERROR (-111)
76#define VERR_WRITE_ERROR (-112)
77#define VERR_WRITE_PROTECT (-113)
78#define VERR_SHARING_VIOLATION (-114)
79#define VERR_FILE_LOCK_FAILED (-115)
80#define VERR_FILE_LOCK_VIOLATION (-116)
81#define VERR_CANT_CREATE (-117)
82#define VERR_CANT_DELETE_DIRECTORY (-118)
83#define VERR_NOT_SAME_DEVICE (-119)
84#define VERR_FILENAME_TOO_LONG (-120)
85#define VERR_MEDIA_NOT_PRESENT (-121)
86#define VERR_MEDIA_NOT_RECOGNIZED (-122)
87#define VERR_FILE_NOT_LOCKED (-123)
88#define VERR_FILE_LOCK_LOST (-124)
89#define VERR_DIR_NOT_EMPTY (-125)
90#define VERR_NOT_A_DIRECTORY (-126)
91#define VERR_IS_A_DIRECTORY (-127)
92#define VERR_FILE_TOO_BIG (-128)
93
94#define VERR_NET_IO_ERROR (-400)
95#define VERR_NET_OUT_OF_RESOURCES (-401)
96#define VERR_NET_HOST_NOT_FOUND (-402)
97#define VERR_NET_PATH_NOT_FOUND (-403)
98#define VERR_NET_PRINT_ERROR (-404)
99#define VERR_NET_NO_NETWORK (-405)
100#define VERR_NET_NOT_UNIQUE_NAME (-406)
101
102#define VERR_NET_IN_PROGRESS (-436)
103#define VERR_NET_ALREADY_IN_PROGRESS (-437)
104#define VERR_NET_NOT_SOCKET (-438)
105#define VERR_NET_DEST_ADDRESS_REQUIRED (-439)
106#define VERR_NET_MSG_SIZE (-440)
107#define VERR_NET_PROTOCOL_TYPE (-441)
108#define VERR_NET_PROTOCOL_NOT_AVAILABLE (-442)
109#define VERR_NET_PROTOCOL_NOT_SUPPORTED (-443)
110#define VERR_NET_SOCKET_TYPE_NOT_SUPPORTED (-444)
111#define VERR_NET_OPERATION_NOT_SUPPORTED (-445)
112#define VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED (-446)
113#define VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED (-447)
114#define VERR_NET_ADDRESS_IN_USE (-448)
115#define VERR_NET_ADDRESS_NOT_AVAILABLE (-449)
116#define VERR_NET_DOWN (-450)
117#define VERR_NET_UNREACHABLE (-451)
118#define VERR_NET_CONNECTION_RESET (-452)
119#define VERR_NET_CONNECTION_ABORTED (-453)
120#define VERR_NET_CONNECTION_RESET_BY_PEER (-454)
121#define VERR_NET_NO_BUFFER_SPACE (-455)
122#define VERR_NET_ALREADY_CONNECTED (-456)
123#define VERR_NET_NOT_CONNECTED (-457)
124#define VERR_NET_SHUTDOWN (-458)
125#define VERR_NET_TOO_MANY_REFERENCES (-459)
126#define VERR_NET_CONNECTION_TIMED_OUT (-460)
127#define VERR_NET_CONNECTION_REFUSED (-461)
128#define VERR_NET_HOST_DOWN (-464)
129#define VERR_NET_HOST_UNREACHABLE (-465)
130#define VERR_NET_PROTOCOL_ERROR (-466)
131#define VERR_NET_INCOMPLETE_TX_PACKET (-467)
132
133/* misc. unsorted codes */
134#define VERR_RESOURCE_BUSY (-138)
135#define VERR_DISK_FULL (-152)
136#define VERR_TOO_MANY_SYMLINKS (-156)
137#define VERR_NO_MORE_FILES (-201)
138#define VERR_INTERNAL_ERROR (-225)
139#define VERR_INTERNAL_ERROR_2 (-226)
140#define VERR_INTERNAL_ERROR_3 (-227)
141#define VERR_INTERNAL_ERROR_4 (-228)
142#define VERR_DEV_IO_ERROR (-250)
143#define VERR_IO_BAD_LENGTH (-255)
144#define VERR_BROKEN_PIPE (-301)
145#define VERR_NO_DATA (-304)
146#define VERR_SEM_DESTROYED (-363)
147#define VERR_DEADLOCK (-365)
148#define VERR_BAD_EXE_FORMAT (-608)
149#define VINF_HGCM_ASYNC_EXECUTE (2903)
150
151#endif
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
new file mode 100644
index 000000000000..0e68024f36c7
--- /dev/null
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -0,0 +1,226 @@
1/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2/*
3 * Virtual Device for Guest <-> VMM/Host communication, type definitions
4 * which are also used for the vboxguest ioctl interface / by vboxsf
5 *
6 * Copyright (C) 2006-2016 Oracle Corporation
7 */
8
9#ifndef __UAPI_VBOX_VMMDEV_TYPES_H__
10#define __UAPI_VBOX_VMMDEV_TYPES_H__
11
12#include <asm/bitsperlong.h>
13#include <linux/types.h>
14
15/*
16 * We cannot use linux' compiletime_assert here because it expects to be used
17 * inside a function only. Use a typedef to a char array with a negative size.
18 */
19#define VMMDEV_ASSERT_SIZE(type, size) \
20 typedef char type ## _asrt_size[1 - 2*!!(sizeof(struct type) != (size))]
21
22/** enum vmmdev_request_type - VMMDev request types. */
23enum vmmdev_request_type {
24 VMMDEVREQ_INVALID_REQUEST = 0,
25 VMMDEVREQ_GET_MOUSE_STATUS = 1,
26 VMMDEVREQ_SET_MOUSE_STATUS = 2,
27 VMMDEVREQ_SET_POINTER_SHAPE = 3,
28 VMMDEVREQ_GET_HOST_VERSION = 4,
29 VMMDEVREQ_IDLE = 5,
30 VMMDEVREQ_GET_HOST_TIME = 10,
31 VMMDEVREQ_GET_HYPERVISOR_INFO = 20,
32 VMMDEVREQ_SET_HYPERVISOR_INFO = 21,
33 VMMDEVREQ_REGISTER_PATCH_MEMORY = 22, /* since version 3.0.6 */
34 VMMDEVREQ_DEREGISTER_PATCH_MEMORY = 23, /* since version 3.0.6 */
35 VMMDEVREQ_SET_POWER_STATUS = 30,
36 VMMDEVREQ_ACKNOWLEDGE_EVENTS = 41,
37 VMMDEVREQ_CTL_GUEST_FILTER_MASK = 42,
38 VMMDEVREQ_REPORT_GUEST_INFO = 50,
39 VMMDEVREQ_REPORT_GUEST_INFO2 = 58, /* since version 3.2.0 */
40 VMMDEVREQ_REPORT_GUEST_STATUS = 59, /* since version 3.2.8 */
41 VMMDEVREQ_REPORT_GUEST_USER_STATE = 74, /* since version 4.3 */
42 /* Retrieve a display resize request sent by the host, deprecated. */
43 VMMDEVREQ_GET_DISPLAY_CHANGE_REQ = 51,
44 VMMDEVREQ_VIDEMODE_SUPPORTED = 52,
45 VMMDEVREQ_GET_HEIGHT_REDUCTION = 53,
46 /**
47 * @VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
48 * Retrieve a display resize request sent by the host.
49 *
50 * Queries a display resize request sent from the host. If the
51 * event_ack member is sent to true and there is an unqueried request
52 * available for one of the virtual display then that request will
53 * be returned. If several displays have unqueried requests the lowest
54 * numbered display will be chosen first. Only the most recent unseen
55 * request for each display is remembered.
56 * If event_ack is set to false, the last host request queried with
57 * event_ack set is resent, or failing that the most recent received
58 * from the host. If no host request was ever received then all zeros
59 * are returned.
60 */
61 VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2 = 54,
62 VMMDEVREQ_REPORT_GUEST_CAPABILITIES = 55,
63 VMMDEVREQ_SET_GUEST_CAPABILITIES = 56,
64 VMMDEVREQ_VIDEMODE_SUPPORTED2 = 57, /* since version 3.2.0 */
65 VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX = 80, /* since version 4.2.4 */
66 VMMDEVREQ_HGCM_CONNECT = 60,
67 VMMDEVREQ_HGCM_DISCONNECT = 61,
68 VMMDEVREQ_HGCM_CALL32 = 62,
69 VMMDEVREQ_HGCM_CALL64 = 63,
70 VMMDEVREQ_HGCM_CANCEL = 64,
71 VMMDEVREQ_HGCM_CANCEL2 = 65,
72 VMMDEVREQ_VIDEO_ACCEL_ENABLE = 70,
73 VMMDEVREQ_VIDEO_ACCEL_FLUSH = 71,
74 VMMDEVREQ_VIDEO_SET_VISIBLE_REGION = 72,
75 VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ = 73,
76 VMMDEVREQ_QUERY_CREDENTIALS = 100,
77 VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT = 101,
78 VMMDEVREQ_REPORT_GUEST_STATS = 110,
79 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ = 111,
80 VMMDEVREQ_GET_STATISTICS_CHANGE_REQ = 112,
81 VMMDEVREQ_CHANGE_MEMBALLOON = 113,
82 VMMDEVREQ_GET_VRDPCHANGE_REQ = 150,
83 VMMDEVREQ_LOG_STRING = 200,
84 VMMDEVREQ_GET_CPU_HOTPLUG_REQ = 210,
85 VMMDEVREQ_SET_CPU_HOTPLUG_STATUS = 211,
86 VMMDEVREQ_REGISTER_SHARED_MODULE = 212,
87 VMMDEVREQ_UNREGISTER_SHARED_MODULE = 213,
88 VMMDEVREQ_CHECK_SHARED_MODULES = 214,
89 VMMDEVREQ_GET_PAGE_SHARING_STATUS = 215,
90 VMMDEVREQ_DEBUG_IS_PAGE_SHARED = 216,
91 VMMDEVREQ_GET_SESSION_ID = 217, /* since version 3.2.8 */
92 VMMDEVREQ_WRITE_COREDUMP = 218,
93 VMMDEVREQ_GUEST_HEARTBEAT = 219,
94 VMMDEVREQ_HEARTBEAT_CONFIGURE = 220,
95 /* Ensure the enum is a 32 bit data-type */
96 VMMDEVREQ_SIZEHACK = 0x7fffffff
97};
98
99#if __BITS_PER_LONG == 64
100#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL64
101#else
102#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
103#endif
104
105/** HGCM service location types. */
106enum vmmdev_hgcm_service_location_type {
107 VMMDEV_HGCM_LOC_INVALID = 0,
108 VMMDEV_HGCM_LOC_LOCALHOST = 1,
109 VMMDEV_HGCM_LOC_LOCALHOST_EXISTING = 2,
110 /* Ensure the enum is a 32 bit data-type */
111 VMMDEV_HGCM_LOC_SIZEHACK = 0x7fffffff
112};
113
114/** HGCM host service location. */
115struct vmmdev_hgcm_service_location_localhost {
116 /** Service name */
117 char service_name[128];
118};
119VMMDEV_ASSERT_SIZE(vmmdev_hgcm_service_location_localhost, 128);
120
121/** HGCM service location. */
122struct vmmdev_hgcm_service_location {
123 /** Type of the location. */
124 enum vmmdev_hgcm_service_location_type type;
125
126 union {
127 struct vmmdev_hgcm_service_location_localhost localhost;
128 } u;
129};
130VMMDEV_ASSERT_SIZE(vmmdev_hgcm_service_location, 128 + 4);
131
132/** HGCM function parameter type. */
133enum vmmdev_hgcm_function_parameter_type {
134 VMMDEV_HGCM_PARM_TYPE_INVALID = 0,
135 VMMDEV_HGCM_PARM_TYPE_32BIT = 1,
136 VMMDEV_HGCM_PARM_TYPE_64BIT = 2,
137 /** Deprecated Doesn't work, use PAGELIST. */
138 VMMDEV_HGCM_PARM_TYPE_PHYSADDR = 3,
139 /** In and Out, user-memory */
140 VMMDEV_HGCM_PARM_TYPE_LINADDR = 4,
141 /** In, user-memory (read; host<-guest) */
142 VMMDEV_HGCM_PARM_TYPE_LINADDR_IN = 5,
143 /** Out, user-memory (write; host->guest) */
144 VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT = 6,
145 /** In and Out, kernel-memory */
146 VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL = 7,
147 /** In, kernel-memory (read; host<-guest) */
148 VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN = 8,
149 /** Out, kernel-memory (write; host->guest) */
150 VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT = 9,
151 /** Physical addresses of locked pages for a buffer. */
152 VMMDEV_HGCM_PARM_TYPE_PAGELIST = 10,
153 /* Ensure the enum is a 32 bit data-type */
154 VMMDEV_HGCM_PARM_TYPE_SIZEHACK = 0x7fffffff
155};
156
157/** HGCM function parameter, 32-bit client. */
158struct vmmdev_hgcm_function_parameter32 {
159 enum vmmdev_hgcm_function_parameter_type type;
160 union {
161 __u32 value32;
162 __u64 value64;
163 struct {
164 __u32 size;
165 union {
166 __u32 phys_addr;
167 __u32 linear_addr;
168 } u;
169 } pointer;
170 struct {
171 /** Size of the buffer described by the page list. */
172 __u32 size;
173 /** Relative to the request header. */
174 __u32 offset;
175 } page_list;
176 } u;
177} __packed;
178VMMDEV_ASSERT_SIZE(vmmdev_hgcm_function_parameter32, 4 + 8);
179
180/** HGCM function parameter, 64-bit client. */
181struct vmmdev_hgcm_function_parameter64 {
182 enum vmmdev_hgcm_function_parameter_type type;
183 union {
184 __u32 value32;
185 __u64 value64;
186 struct {
187 __u32 size;
188 union {
189 __u64 phys_addr;
190 __u64 linear_addr;
191 } u;
192 } __packed pointer;
193 struct {
194 /** Size of the buffer described by the page list. */
195 __u32 size;
196 /** Relative to the request header. */
197 __u32 offset;
198 } page_list;
199 } __packed u;
200} __packed;
201VMMDEV_ASSERT_SIZE(vmmdev_hgcm_function_parameter64, 4 + 12);
202
203#if __BITS_PER_LONG == 64
204#define vmmdev_hgcm_function_parameter vmmdev_hgcm_function_parameter64
205#else
206#define vmmdev_hgcm_function_parameter vmmdev_hgcm_function_parameter32
207#endif
208
209#define VMMDEV_HGCM_F_PARM_DIRECTION_NONE 0x00000000U
210#define VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST 0x00000001U
211#define VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST 0x00000002U
212#define VMMDEV_HGCM_F_PARM_DIRECTION_BOTH 0x00000003U
213
214/**
215 * struct vmmdev_hgcm_pagelist - VMMDEV_HGCM_PARM_TYPE_PAGELIST parameters
216 * point to this structure to actually describe the buffer.
217 */
218struct vmmdev_hgcm_pagelist {
219 __u32 flags; /** VMMDEV_HGCM_F_PARM_*. */
220 __u16 offset_first_page; /** Data offset in the first page. */
221 __u16 page_count; /** Number of pages. */
222 __u64 pages[1]; /** Page addresses. */
223};
224VMMDEV_ASSERT_SIZE(vmmdev_hgcm_pagelist, 4 + 2 + 2 + 8);
225
226#endif
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
new file mode 100644
index 000000000000..612f0c7d3558
--- /dev/null
+++ b/include/uapi/linux/vboxguest.h
@@ -0,0 +1,330 @@
1/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2/*
3 * VBoxGuest - VirtualBox Guest Additions Driver Interface.
4 *
5 * Copyright (C) 2006-2016 Oracle Corporation
6 */
7
8#ifndef __UAPI_VBOXGUEST_H__
9#define __UAPI_VBOXGUEST_H__
10
11#include <asm/bitsperlong.h>
12#include <linux/ioctl.h>
13#include <linux/vbox_err.h>
14#include <linux/vbox_vmmdev_types.h>
15
16/* Version of vbg_ioctl_hdr structure. */
17#define VBG_IOCTL_HDR_VERSION 0x10001
18/* Default request type. Use this for non-VMMDev requests. */
19#define VBG_IOCTL_HDR_TYPE_DEFAULT 0
20
21/**
22 * Common ioctl header.
23 *
24 * This is a mirror of vmmdev_request_header to prevent duplicating data and
25 * needing to verify things multiple times.
26 */
27struct vbg_ioctl_hdr {
28 /** IN: The request input size, and output size if size_out is zero. */
29 __u32 size_in;
30 /** IN: Structure version (VBG_IOCTL_HDR_VERSION) */
31 __u32 version;
32 /** IN: The VMMDev request type or VBG_IOCTL_HDR_TYPE_DEFAULT. */
33 __u32 type;
34 /**
35 * OUT: The VBox status code of the operation, out direction only.
36 * This is a VINF_ or VERR_ value as defined in vbox_err.h.
37 */
38 __s32 rc;
39 /** IN: Output size. Set to zero to use size_in as output size. */
40 __u32 size_out;
41 /** Reserved, MBZ. */
42 __u32 reserved;
43};
44VMMDEV_ASSERT_SIZE(vbg_ioctl_hdr, 24);
45
46
47/*
48 * The VBoxGuest I/O control version.
49 *
50 * As usual, the high word contains the major version and changes to it
51 * signifies incompatible changes.
52 *
53 * The lower word is the minor version number, it is increased when new
54 * functions are added or existing changed in a backwards compatible manner.
55 */
56#define VBG_IOC_VERSION 0x00010000u
57
58/**
59 * VBG_IOCTL_DRIVER_VERSION_INFO data structure
60 *
61 * Note VBG_IOCTL_DRIVER_VERSION_INFO may switch the session to a backwards
62 * compatible interface version if uClientVersion indicates older client code.
63 */
64struct vbg_ioctl_driver_version_info {
65 /** The header. */
66 struct vbg_ioctl_hdr hdr;
67 union {
68 struct {
69 /** Requested interface version (VBG_IOC_VERSION). */
70 __u32 req_version;
71 /**
72 * Minimum interface version number (typically the
73 * major version part of VBG_IOC_VERSION).
74 */
75 __u32 min_version;
76 /** Reserved, MBZ. */
77 __u32 reserved1;
78 /** Reserved, MBZ. */
79 __u32 reserved2;
80 } in;
81 struct {
82 /** Version for this session (typ. VBG_IOC_VERSION). */
83 __u32 session_version;
84 /** Version of the IDC interface (VBG_IOC_VERSION). */
85 __u32 driver_version;
86 /** The SVN revision of the driver, or 0. */
87 __u32 driver_revision;
88 /** Reserved \#1 (zero until defined). */
89 __u32 reserved1;
90 /** Reserved \#2 (zero until defined). */
91 __u32 reserved2;
92 } out;
93 } u;
94};
95VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20);
96
97#define VBG_IOCTL_DRIVER_VERSION_INFO \
98 _IOWR('V', 0, struct vbg_ioctl_driver_version_info)
99
100
101/* IOCTL to perform a VMM Device request less than 1KB in size. */
102#define VBG_IOCTL_VMMDEV_REQUEST(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 2, s)
103
104
105/* IOCTL to perform a VMM Device request larger then 1KB. */
106#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
107
108
109/** VBG_IOCTL_HGCM_CONNECT data structure. */
110struct vbg_ioctl_hgcm_connect {
111 struct vbg_ioctl_hdr hdr;
112 union {
113 struct {
114 struct vmmdev_hgcm_service_location loc;
115 } in;
116 struct {
117 __u32 client_id;
118 } out;
119 } u;
120};
121VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_connect, 24 + 132);
122
123#define VBG_IOCTL_HGCM_CONNECT \
124 _IOWR('V', 4, struct vbg_ioctl_hgcm_connect)
125
126
127/** VBG_IOCTL_HGCM_DISCONNECT data structure. */
128struct vbg_ioctl_hgcm_disconnect {
129 struct vbg_ioctl_hdr hdr;
130 union {
131 struct {
132 __u32 client_id;
133 } in;
134 } u;
135};
136VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_disconnect, 24 + 4);
137
138#define VBG_IOCTL_HGCM_DISCONNECT \
139 _IOWR('V', 5, struct vbg_ioctl_hgcm_disconnect)
140
141
142/** VBG_IOCTL_HGCM_CALL data structure. */
143struct vbg_ioctl_hgcm_call {
144 /** The header. */
145 struct vbg_ioctl_hdr hdr;
146 /** Input: The id of the caller. */
147 __u32 client_id;
148 /** Input: Function number. */
149 __u32 function;
150 /**
151 * Input: How long to wait (milliseconds) for completion before
152 * cancelling the call. Set to -1 to wait indefinitely.
153 */
154 __u32 timeout_ms;
155 /** Interruptable flag, ignored for userspace calls. */
156 __u8 interruptible;
157 /** Explicit padding, MBZ. */
158 __u8 reserved;
159 /**
160 * Input: How many parameters following this structure.
161 *
162 * The parameters are either HGCMFunctionParameter64 or 32,
163 * depending on whether we're receiving a 64-bit or 32-bit request.
164 *
165 * The current maximum is 61 parameters (given a 1KB max request size,
166 * and a 64-bit parameter size of 16 bytes).
167 */
168 __u16 parm_count;
169 /*
170 * Parameters follow in form:
171 * struct hgcm_function_parameter<32|64> parms[parm_count]
172 */
173};
174VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_call, 24 + 16);
175
176#define VBG_IOCTL_HGCM_CALL_32(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 6, s)
177#define VBG_IOCTL_HGCM_CALL_64(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 7, s)
178#if __BITS_PER_LONG == 64
179#define VBG_IOCTL_HGCM_CALL(s) VBG_IOCTL_HGCM_CALL_64(s)
180#else
181#define VBG_IOCTL_HGCM_CALL(s) VBG_IOCTL_HGCM_CALL_32(s)
182#endif
183
184
185/** VBG_IOCTL_LOG data structure. */
186struct vbg_ioctl_log {
187 /** The header. */
188 struct vbg_ioctl_hdr hdr;
189 union {
190 struct {
191 /**
192 * The log message, this may be zero terminated. If it
193 * is not zero terminated then the length is determined
194 * from the input size.
195 */
196 char msg[1];
197 } in;
198 } u;
199};
200
201#define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
202
203
204/** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */
205struct vbg_ioctl_wait_for_events {
206 /** The header. */
207 struct vbg_ioctl_hdr hdr;
208 union {
209 struct {
210 /** Timeout in milliseconds. */
211 __u32 timeout_ms;
212 /** Events to wait for. */
213 __u32 events;
214 } in;
215 struct {
216 /** Events that occurred. */
217 __u32 events;
218 } out;
219 } u;
220};
221VMMDEV_ASSERT_SIZE(vbg_ioctl_wait_for_events, 24 + 8);
222
223#define VBG_IOCTL_WAIT_FOR_EVENTS \
224 _IOWR('V', 10, struct vbg_ioctl_wait_for_events)
225
226
227/*
228 * IOCTL to VBoxGuest to interrupt (cancel) any pending
229 * VBG_IOCTL_WAIT_FOR_EVENTS and return.
230 *
231 * Handled inside the vboxguest driver and not seen by the host at all.
232 * After calling this, VBG_IOCTL_WAIT_FOR_EVENTS should no longer be called in
233 * the same session. Any VBOXGUEST_IOCTL_WAITEVENT calls in the same session
234 * done after calling this will directly exit with -EINTR.
235 */
236#define VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS \
237 _IOWR('V', 11, struct vbg_ioctl_hdr)
238
239
240/** VBG_IOCTL_CHANGE_FILTER_MASK data structure. */
241struct vbg_ioctl_change_filter {
242 /** The header. */
243 struct vbg_ioctl_hdr hdr;
244 union {
245 struct {
246 /** Flags to set. */
247 __u32 or_mask;
248 /** Flags to remove. */
249 __u32 not_mask;
250 } in;
251 } u;
252};
253VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8);
254
255/* IOCTL to VBoxGuest to control the event filter mask. */
256#define VBG_IOCTL_CHANGE_FILTER_MASK \
257 _IOWR('V', 12, struct vbg_ioctl_change_filter)
258
259
260/** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */
261struct vbg_ioctl_set_guest_caps {
262 /** The header. */
263 struct vbg_ioctl_hdr hdr;
264 union {
265 struct {
266 /** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */
267 __u32 or_mask;
268 /** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */
269 __u32 not_mask;
270 } in;
271 struct {
272 /** Capabilities held by the session after the call. */
273 __u32 session_caps;
274 /** Capabilities for all the sessions after the call. */
275 __u32 global_caps;
276 } out;
277 } u;
278};
279VMMDEV_ASSERT_SIZE(vbg_ioctl_set_guest_caps, 24 + 8);
280
281#define VBG_IOCTL_CHANGE_GUEST_CAPABILITIES \
282 _IOWR('V', 14, struct vbg_ioctl_set_guest_caps)
283
284
285/** VBG_IOCTL_CHECK_BALLOON data structure. */
286struct vbg_ioctl_check_balloon {
287 /** The header. */
288 struct vbg_ioctl_hdr hdr;
289 union {
290 struct {
291 /** The size of the balloon in chunks of 1MB. */
292 __u32 balloon_chunks;
293 /**
294 * false = handled in R0, no further action required.
295 * true = allocate balloon memory in R3.
296 */
297 __u8 handle_in_r3;
298 /** Explicit padding, MBZ. */
299 __u8 padding[3];
300 } out;
301 } u;
302};
303VMMDEV_ASSERT_SIZE(vbg_ioctl_check_balloon, 24 + 8);
304
305/*
306 * IOCTL to check memory ballooning.
307 *
308 * The guest kernel module will ask the host for the current size of the
309 * balloon and adjust the size. Or it will set handle_in_r3 = true and R3 is
310 * responsible for allocating memory and calling VBG_IOCTL_CHANGE_BALLOON.
311 */
312#define VBG_IOCTL_CHECK_BALLOON \
313 _IOWR('V', 17, struct vbg_ioctl_check_balloon)
314
315
316/** VBG_IOCTL_WRITE_CORE_DUMP data structure. */
317struct vbg_ioctl_write_coredump {
318 struct vbg_ioctl_hdr hdr;
319 union {
320 struct {
321 __u32 flags; /** Flags (reserved, MBZ). */
322 } in;
323 } u;
324};
325VMMDEV_ASSERT_SIZE(vbg_ioctl_write_coredump, 24 + 4);
326
327#define VBG_IOCTL_WRITE_CORE_DUMP \
328 _IOWR('V', 19, struct vbg_ioctl_write_coredump)
329
330#endif
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index e3301dbd27d4..c74372163ed2 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -301,6 +301,16 @@ struct vfio_region_info_cap_type {
301#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2) 301#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2)
302#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3) 302#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
303 303
304/*
305 * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
306 * which allows direct access to non-MSIX registers which happened to be within
307 * the same system page.
308 *
309 * Even though the userspace gets direct access to the MSIX data, the existing
310 * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
311 */
312#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3
313
304/** 314/**
305 * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9, 315 * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
306 * struct vfio_irq_info) 316 * struct vfio_irq_info)
@@ -503,6 +513,68 @@ struct vfio_pci_hot_reset {
503 513
504#define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13) 514#define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13)
505 515
516/**
517 * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
518 * struct vfio_device_query_gfx_plane)
519 *
520 * Set the drm_plane_type and flags, then retrieve the gfx plane info.
521 *
522 * flags supported:
523 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
524 * to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
525 * support for dma-buf.
526 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
527 * to ask if the mdev supports region. 0 on support, -EINVAL on no
528 * support for region.
529 * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
530 * with each call to query the plane info.
531 * - Others are invalid and return -EINVAL.
532 *
533 * Note:
534 * 1. Plane could be disabled by guest. In that case, success will be
535 * returned with zero-initialized drm_format, size, width and height
536 * fields.
537 * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
538 *
539 * Return: 0 on success, -errno on other failure.
540 */
541struct vfio_device_gfx_plane_info {
542 __u32 argsz;
543 __u32 flags;
544#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
545#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
546#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
547 /* in */
548 __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */
549 /* out */
550 __u32 drm_format; /* drm format of plane */
551 __u64 drm_format_mod; /* tiled mode */
552 __u32 width; /* width of plane */
553 __u32 height; /* height of plane */
554 __u32 stride; /* stride of plane */
555 __u32 size; /* size of plane in bytes, align on page*/
556 __u32 x_pos; /* horizontal position of cursor plane */
557 __u32 y_pos; /* vertical position of cursor plane*/
558 __u32 x_hot; /* horizontal position of cursor hotspot */
559 __u32 y_hot; /* vertical position of cursor hotspot */
560 union {
561 __u32 region_index; /* region index */
562 __u32 dmabuf_id; /* dma-buf id */
563 };
564};
565
566#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
567
568/**
569 * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
570 *
571 * Return a new dma-buf file descriptor for an exposed guest framebuffer
572 * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
573 * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
574 */
575
576#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
577
506/* -------- API for Type1 VFIO IOMMU -------- */ 578/* -------- API for Type1 VFIO IOMMU -------- */
507 579
508/** 580/**
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 1c095b5a99c5..982718965180 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -107,14 +107,14 @@ enum v4l2_field {
107 transmitted first */ 107 transmitted first */
108}; 108};
109#define V4L2_FIELD_HAS_TOP(field) \ 109#define V4L2_FIELD_HAS_TOP(field) \
110 ((field) == V4L2_FIELD_TOP ||\ 110 ((field) == V4L2_FIELD_TOP ||\
111 (field) == V4L2_FIELD_INTERLACED ||\ 111 (field) == V4L2_FIELD_INTERLACED ||\
112 (field) == V4L2_FIELD_INTERLACED_TB ||\ 112 (field) == V4L2_FIELD_INTERLACED_TB ||\
113 (field) == V4L2_FIELD_INTERLACED_BT ||\ 113 (field) == V4L2_FIELD_INTERLACED_BT ||\
114 (field) == V4L2_FIELD_SEQ_TB ||\ 114 (field) == V4L2_FIELD_SEQ_TB ||\
115 (field) == V4L2_FIELD_SEQ_BT) 115 (field) == V4L2_FIELD_SEQ_BT)
116#define V4L2_FIELD_HAS_BOTTOM(field) \ 116#define V4L2_FIELD_HAS_BOTTOM(field) \
117 ((field) == V4L2_FIELD_BOTTOM ||\ 117 ((field) == V4L2_FIELD_BOTTOM ||\
118 (field) == V4L2_FIELD_INTERLACED ||\ 118 (field) == V4L2_FIELD_INTERLACED ||\
119 (field) == V4L2_FIELD_INTERLACED_TB ||\ 119 (field) == V4L2_FIELD_INTERLACED_TB ||\
120 (field) == V4L2_FIELD_INTERLACED_BT ||\ 120 (field) == V4L2_FIELD_INTERLACED_BT ||\
@@ -467,12 +467,12 @@ struct v4l2_capability {
467 * V I D E O I M A G E F O R M A T 467 * V I D E O I M A G E F O R M A T
468 */ 468 */
469struct v4l2_pix_format { 469struct v4l2_pix_format {
470 __u32 width; 470 __u32 width;
471 __u32 height; 471 __u32 height;
472 __u32 pixelformat; 472 __u32 pixelformat;
473 __u32 field; /* enum v4l2_field */ 473 __u32 field; /* enum v4l2_field */
474 __u32 bytesperline; /* for padding, zero if unused */ 474 __u32 bytesperline; /* for padding, zero if unused */
475 __u32 sizeimage; 475 __u32 sizeimage;
476 __u32 colorspace; /* enum v4l2_colorspace */ 476 __u32 colorspace; /* enum v4l2_colorspace */
477 __u32 priv; /* private data, depends on pixelformat */ 477 __u32 priv; /* private data, depends on pixelformat */
478 __u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */ 478 __u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
@@ -669,6 +669,12 @@ struct v4l2_pix_format {
669#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */ 669#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
670#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */ 670#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
671 671
672/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
673#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
674#define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */
675#define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */
676#define V4L2_PIX_FMT_IPU3_SRGGB10 v4l2_fourcc('i', 'p', '3', 'r') /* IPU3 packed 10-bit RGGB bayer */
677
672/* SDR formats - used only for Software Defined Radio devices */ 678/* SDR formats - used only for Software Defined Radio devices */
673#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */ 679#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
674#define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */ 680#define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */
@@ -688,6 +694,7 @@ struct v4l2_pix_format {
688/* Meta-data formats */ 694/* Meta-data formats */
689#define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */ 695#define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */
690#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */ 696#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
697#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */
691 698
692/* priv field value to indicates that subsequent fields are valid. */ 699/* priv field value to indicates that subsequent fields are valid. */
693#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe 700#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -1166,7 +1173,7 @@ typedef __u64 v4l2_std_id;
1166 V4L2_STD_NTSC_M_JP |\ 1173 V4L2_STD_NTSC_M_JP |\
1167 V4L2_STD_NTSC_M_KR) 1174 V4L2_STD_NTSC_M_KR)
1168/* Secam macros */ 1175/* Secam macros */
1169#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\ 1176#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\
1170 V4L2_STD_SECAM_K |\ 1177 V4L2_STD_SECAM_K |\
1171 V4L2_STD_SECAM_K1) 1178 V4L2_STD_SECAM_K1)
1172/* All Secam Standards */ 1179/* All Secam Standards */
@@ -1247,7 +1254,7 @@ struct v4l2_standard {
1247}; 1254};
1248 1255
1249/* 1256/*
1250 * D V B T T I M I N G S 1257 * D V B T T I M I N G S
1251 */ 1258 */
1252 1259
1253/** struct v4l2_bt_timings - BT.656/BT.1120 timing data 1260/** struct v4l2_bt_timings - BT.656/BT.1120 timing data
@@ -1588,7 +1595,7 @@ struct v4l2_ext_controls {
1588 struct v4l2_ext_control *controls; 1595 struct v4l2_ext_control *controls;
1589}; 1596};
1590 1597
1591#define V4L2_CTRL_ID_MASK (0x0fffffff) 1598#define V4L2_CTRL_ID_MASK (0x0fffffff)
1592#ifndef __KERNEL__ 1599#ifndef __KERNEL__
1593#define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL) 1600#define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL)
1594#endif 1601#endif
@@ -1660,11 +1667,11 @@ struct v4l2_querymenu {
1660/* Control flags */ 1667/* Control flags */
1661#define V4L2_CTRL_FLAG_DISABLED 0x0001 1668#define V4L2_CTRL_FLAG_DISABLED 0x0001
1662#define V4L2_CTRL_FLAG_GRABBED 0x0002 1669#define V4L2_CTRL_FLAG_GRABBED 0x0002
1663#define V4L2_CTRL_FLAG_READ_ONLY 0x0004 1670#define V4L2_CTRL_FLAG_READ_ONLY 0x0004
1664#define V4L2_CTRL_FLAG_UPDATE 0x0008 1671#define V4L2_CTRL_FLAG_UPDATE 0x0008
1665#define V4L2_CTRL_FLAG_INACTIVE 0x0010 1672#define V4L2_CTRL_FLAG_INACTIVE 0x0010
1666#define V4L2_CTRL_FLAG_SLIDER 0x0020 1673#define V4L2_CTRL_FLAG_SLIDER 0x0020
1667#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040 1674#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
1668#define V4L2_CTRL_FLAG_VOLATILE 0x0080 1675#define V4L2_CTRL_FLAG_VOLATILE 0x0080
1669#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100 1676#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
1670#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200 1677#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
@@ -1778,21 +1785,21 @@ struct v4l2_hw_freq_seek {
1778 */ 1785 */
1779 1786
1780struct v4l2_rds_data { 1787struct v4l2_rds_data {
1781 __u8 lsb; 1788 __u8 lsb;
1782 __u8 msb; 1789 __u8 msb;
1783 __u8 block; 1790 __u8 block;
1784} __attribute__ ((packed)); 1791} __attribute__ ((packed));
1785 1792
1786#define V4L2_RDS_BLOCK_MSK 0x7 1793#define V4L2_RDS_BLOCK_MSK 0x7
1787#define V4L2_RDS_BLOCK_A 0 1794#define V4L2_RDS_BLOCK_A 0
1788#define V4L2_RDS_BLOCK_B 1 1795#define V4L2_RDS_BLOCK_B 1
1789#define V4L2_RDS_BLOCK_C 2 1796#define V4L2_RDS_BLOCK_C 2
1790#define V4L2_RDS_BLOCK_D 3 1797#define V4L2_RDS_BLOCK_D 3
1791#define V4L2_RDS_BLOCK_C_ALT 4 1798#define V4L2_RDS_BLOCK_C_ALT 4
1792#define V4L2_RDS_BLOCK_INVALID 7 1799#define V4L2_RDS_BLOCK_INVALID 7
1793 1800
1794#define V4L2_RDS_BLOCK_CORRECTED 0x40 1801#define V4L2_RDS_BLOCK_CORRECTED 0x40
1795#define V4L2_RDS_BLOCK_ERROR 0x80 1802#define V4L2_RDS_BLOCK_ERROR 0x80
1796 1803
1797/* 1804/*
1798 * A U D I O 1805 * A U D I O
@@ -2348,8 +2355,8 @@ struct v4l2_create_buffers {
2348#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop) 2355#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop)
2349#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression) 2356#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression)
2350#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression) 2357#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression)
2351#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id) 2358#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id)
2352#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format) 2359#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format)
2353#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio) 2360#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio)
2354#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout) 2361#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout)
2355#define VIDIOC_G_PRIORITY _IOR('V', 67, __u32) /* enum v4l2_priority */ 2362#define VIDIOC_G_PRIORITY _IOR('V', 67, __u32) /* enum v4l2_priority */
@@ -2370,8 +2377,8 @@ struct v4l2_create_buffers {
2370 * Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined. 2377 * Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
2371 * You must be root to use these ioctls. Never use these in applications! 2378 * You must be root to use these ioctls. Never use these in applications!
2372 */ 2379 */
2373#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register) 2380#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
2374#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register) 2381#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
2375 2382
2376#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) 2383#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
2377#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings) 2384#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 343d7ddefe04..4e8b8304b793 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -52,7 +52,8 @@ struct virtio_balloon_config {
52#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */ 52#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
53#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ 53#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
54#define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */ 54#define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */
55#define VIRTIO_BALLOON_S_NR 7 55#define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */
56#define VIRTIO_BALLOON_S_NR 8
56 57
57/* 58/*
58 * Memory statistics structure. 59 * Memory statistics structure.
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index fc353b518288..5de6ed37695b 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -57,6 +57,8 @@
57 * Steering */ 57 * Steering */
58#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ 58#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
59 59
60#define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */
61
60#ifndef VIRTIO_NET_NO_LEGACY 62#ifndef VIRTIO_NET_NO_LEGACY
61#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ 63#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
62#endif /* VIRTIO_NET_NO_LEGACY */ 64#endif /* VIRTIO_NET_NO_LEGACY */
@@ -76,6 +78,17 @@ struct virtio_net_config {
76 __u16 max_virtqueue_pairs; 78 __u16 max_virtqueue_pairs;
77 /* Default maximum transmit unit advice */ 79 /* Default maximum transmit unit advice */
78 __u16 mtu; 80 __u16 mtu;
81 /*
82 * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
83 * Any other value stands for unknown.
84 */
85 __u32 speed;
86 /*
87 * 0x00 - half duplex
88 * 0x01 - full duplex
89 * Any other value stands for unknown.
90 */
91 __u8 duplex;
79} __attribute__((packed)); 92} __attribute__((packed));
80 93
81/* 94/*
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index 49e8fd08855a..56376d3907d8 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -20,20 +20,22 @@ struct cxl_ioctl_start_work {
20 __u64 work_element_descriptor; 20 __u64 work_element_descriptor;
21 __u64 amr; 21 __u64 amr;
22 __s16 num_interrupts; 22 __s16 num_interrupts;
23 __s16 reserved1; 23 __u16 tid;
24 __s32 reserved2; 24 __s32 reserved1;
25 __u64 reserved2;
25 __u64 reserved3; 26 __u64 reserved3;
26 __u64 reserved4; 27 __u64 reserved4;
27 __u64 reserved5; 28 __u64 reserved5;
28 __u64 reserved6;
29}; 29};
30 30
31#define CXL_START_WORK_AMR 0x0000000000000001ULL 31#define CXL_START_WORK_AMR 0x0000000000000001ULL
32#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL 32#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
33#define CXL_START_WORK_ERR_FF 0x0000000000000004ULL 33#define CXL_START_WORK_ERR_FF 0x0000000000000004ULL
34#define CXL_START_WORK_TID 0x0000000000000008ULL
34#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\ 35#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
35 CXL_START_WORK_NUM_IRQS |\ 36 CXL_START_WORK_NUM_IRQS |\
36 CXL_START_WORK_ERR_FF) 37 CXL_START_WORK_ERR_FF |\
38 CXL_START_WORK_TID)
37 39
38 40
39/* Possible modes that an afu can be in */ 41/* Possible modes that an afu can be in */
diff --git a/include/uapi/misc/ocxl.h b/include/uapi/misc/ocxl.h
new file mode 100644
index 000000000000..4b0b0b756f3e
--- /dev/null
+++ b/include/uapi/misc/ocxl.h
@@ -0,0 +1,49 @@
1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2/* Copyright 2017 IBM Corp. */
3#ifndef _UAPI_MISC_OCXL_H
4#define _UAPI_MISC_OCXL_H
5
6#include <linux/types.h>
7#include <linux/ioctl.h>
8
9enum ocxl_event_type {
10 OCXL_AFU_EVENT_XSL_FAULT_ERROR = 0,
11};
12
13#define OCXL_KERNEL_EVENT_FLAG_LAST 0x0001 /* This is the last event pending */
14
15struct ocxl_kernel_event_header {
16 __u16 type;
17 __u16 flags;
18 __u32 reserved;
19};
20
21struct ocxl_kernel_event_xsl_fault_error {
22 __u64 addr;
23 __u64 dsisr;
24 __u64 count;
25 __u64 reserved;
26};
27
28struct ocxl_ioctl_attach {
29 __u64 amr;
30 __u64 reserved1;
31 __u64 reserved2;
32 __u64 reserved3;
33};
34
35struct ocxl_ioctl_irq_fd {
36 __u64 irq_offset;
37 __s32 eventfd;
38 __u32 reserved;
39};
40
41/* ioctl numbers */
42#define OCXL_MAGIC 0xCA
43/* AFU devices */
44#define OCXL_IOCTL_ATTACH _IOW(OCXL_MAGIC, 0x10, struct ocxl_ioctl_attach)
45#define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64)
46#define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64)
47#define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd)
48
49#endif /* _UAPI_MISC_OCXL_H */
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index 398a514ee446..db54115be044 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -82,6 +82,15 @@ struct bnxt_re_qp_resp {
82 __u32 rsvd; 82 __u32 rsvd;
83}; 83};
84 84
85struct bnxt_re_srq_req {
86 __u64 srqva;
87 __u64 srq_handle;
88};
89
90struct bnxt_re_srq_resp {
91 __u32 srqid;
92};
93
85enum bnxt_re_shpg_offt { 94enum bnxt_re_shpg_offt {
86 BNXT_RE_BEG_RESV_OFFT = 0x00, 95 BNXT_RE_BEG_RESV_OFFT = 0x00,
87 BNXT_RE_AVID_OFFT = 0x10, 96 BNXT_RE_AVID_OFFT = 0x10,
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 7e11bb8651b6..04d0e67b1312 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -402,13 +402,18 @@ struct ib_uverbs_create_cq {
402 __u64 driver_data[0]; 402 __u64 driver_data[0];
403}; 403};
404 404
405enum ib_uverbs_ex_create_cq_flags {
406 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
407 IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
408};
409
405struct ib_uverbs_ex_create_cq { 410struct ib_uverbs_ex_create_cq {
406 __u64 user_handle; 411 __u64 user_handle;
407 __u32 cqe; 412 __u32 cqe;
408 __u32 comp_vector; 413 __u32 comp_vector;
409 __s32 comp_channel; 414 __s32 comp_channel;
410 __u32 comp_mask; 415 __u32 comp_mask;
411 __u32 flags; 416 __u32 flags; /* bitmask of ib_uverbs_ex_create_cq_flags */
412 __u32 reserved; 417 __u32 reserved;
413}; 418};
414 419
@@ -449,7 +454,7 @@ struct ib_uverbs_wc {
449 __u32 vendor_err; 454 __u32 vendor_err;
450 __u32 byte_len; 455 __u32 byte_len;
451 union { 456 union {
452 __u32 imm_data; 457 __be32 imm_data;
453 __u32 invalidate_rkey; 458 __u32 invalidate_rkey;
454 } ex; 459 } ex;
455 __u32 qp_num; 460 __u32 qp_num;
@@ -765,7 +770,7 @@ struct ib_uverbs_send_wr {
765 __u32 opcode; 770 __u32 opcode;
766 __u32 send_flags; 771 __u32 send_flags;
767 union { 772 union {
768 __u32 imm_data; 773 __be32 imm_data;
769 __u32 invalidate_rkey; 774 __u32 invalidate_rkey;
770 } ex; 775 } ex;
771 union { 776 union {
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
index 224b52b6279c..7f9c37346613 100644
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -97,8 +97,8 @@ struct mlx4_ib_create_srq_resp {
97}; 97};
98 98
99struct mlx4_ib_create_qp_rss { 99struct mlx4_ib_create_qp_rss {
100 __u64 rx_hash_fields_mask; 100 __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
101 __u8 rx_hash_function; 101 __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
102 __u8 reserved[7]; 102 __u8 reserved[7];
103 __u8 rx_hash_key[40]; 103 __u8 rx_hash_key[40];
104 __u32 comp_mask; 104 __u32 comp_mask;
@@ -152,7 +152,8 @@ enum mlx4_ib_rx_hash_fields {
152 MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4, 152 MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4,
153 MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5, 153 MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5,
154 MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6, 154 MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6,
155 MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7 155 MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7,
156 MLX4_IB_RX_HASH_INNER = 1ULL << 31,
156}; 157};
157 158
158#endif /* MLX4_ABI_USER_H */ 159#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index a33e0517d3fd..1111aa4e7c1e 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -41,6 +41,9 @@ enum {
41 MLX5_QP_FLAG_SIGNATURE = 1 << 0, 41 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
42 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1, 42 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
43 MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2, 43 MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
44 MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
45 MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
46 MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
44}; 47};
45 48
46enum { 49enum {
@@ -121,10 +124,12 @@ struct mlx5_ib_alloc_ucontext_resp {
121 __u8 cqe_version; 124 __u8 cqe_version;
122 __u8 cmds_supp_uhw; 125 __u8 cmds_supp_uhw;
123 __u8 eth_min_inline; 126 __u8 eth_min_inline;
124 __u8 reserved2; 127 __u8 clock_info_versions;
125 __u64 hca_core_clock_offset; 128 __u64 hca_core_clock_offset;
126 __u32 log_uar_size; 129 __u32 log_uar_size;
127 __u32 num_uars_per_page; 130 __u32 num_uars_per_page;
131 __u32 num_dyn_bfregs;
132 __u32 reserved3;
128}; 133};
129 134
130struct mlx5_ib_alloc_pd_resp { 135struct mlx5_ib_alloc_pd_resp {
@@ -280,8 +285,11 @@ struct mlx5_ib_create_qp {
280 __u32 rq_wqe_shift; 285 __u32 rq_wqe_shift;
281 __u32 flags; 286 __u32 flags;
282 __u32 uidx; 287 __u32 uidx;
283 __u32 reserved0; 288 __u32 bfreg_index;
284 __u64 sq_buf_addr; 289 union {
290 __u64 sq_buf_addr;
291 __u64 access_key;
292 };
285}; 293};
286 294
287/* RX Hash function flags */ 295/* RX Hash function flags */
@@ -307,7 +315,7 @@ enum mlx5_rx_hash_fields {
307 MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6, 315 MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
308 MLX5_RX_HASH_DST_PORT_UDP = 1 << 7, 316 MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
309 /* Save bits for future fields */ 317 /* Save bits for future fields */
310 MLX5_RX_HASH_INNER = 1 << 31 318 MLX5_RX_HASH_INNER = (1UL << 31),
311}; 319};
312 320
313struct mlx5_ib_create_qp_rss { 321struct mlx5_ib_create_qp_rss {
@@ -354,6 +362,11 @@ struct mlx5_ib_create_ah_resp {
354 __u8 reserved[6]; 362 __u8 reserved[6];
355}; 363};
356 364
365struct mlx5_ib_modify_qp_resp {
366 __u32 response_length;
367 __u32 dctn;
368};
369
357struct mlx5_ib_create_wq_resp { 370struct mlx5_ib_create_wq_resp {
358 __u32 response_length; 371 __u32 response_length;
359 __u32 reserved; 372 __u32 reserved;
@@ -368,4 +381,36 @@ struct mlx5_ib_modify_wq {
368 __u32 comp_mask; 381 __u32 comp_mask;
369 __u32 reserved; 382 __u32 reserved;
370}; 383};
384
385struct mlx5_ib_clock_info {
386 __u32 sign;
387 __u32 resv;
388 __u64 nsec;
389 __u64 cycles;
390 __u64 frac;
391 __u32 mult;
392 __u32 shift;
393 __u64 mask;
394 __u64 overflow_period;
395};
396
397enum mlx5_ib_mmap_cmd {
398 MLX5_IB_MMAP_REGULAR_PAGE = 0,
399 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
400 MLX5_IB_MMAP_WC_PAGE = 2,
401 MLX5_IB_MMAP_NC_PAGE = 3,
402 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
403 MLX5_IB_MMAP_CORE_CLOCK = 5,
404 MLX5_IB_MMAP_ALLOC_WC = 6,
405 MLX5_IB_MMAP_CLOCK_INFO = 7,
406};
407
408enum {
409 MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
410};
411
412/* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
413enum {
414 MLX5_IB_CLOCK_INFO_V1 = 0,
415};
371#endif /* MLX5_ABI_USER_H */ 416#endif /* MLX5_ABI_USER_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index cc002e316d09..4c77e2a7b07e 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -227,14 +227,16 @@ enum rdma_nldev_command {
227 RDMA_NLDEV_CMD_UNSPEC, 227 RDMA_NLDEV_CMD_UNSPEC,
228 228
229 RDMA_NLDEV_CMD_GET, /* can dump */ 229 RDMA_NLDEV_CMD_GET, /* can dump */
230 RDMA_NLDEV_CMD_SET,
231 RDMA_NLDEV_CMD_NEW,
232 RDMA_NLDEV_CMD_DEL,
233 230
234 RDMA_NLDEV_CMD_PORT_GET, /* can dump */ 231 /* 2 - 4 are free to use */
235 RDMA_NLDEV_CMD_PORT_SET, 232
236 RDMA_NLDEV_CMD_PORT_NEW, 233 RDMA_NLDEV_CMD_PORT_GET = 5, /* can dump */
237 RDMA_NLDEV_CMD_PORT_DEL, 234
235 /* 6 - 8 are free to use */
236
237 RDMA_NLDEV_CMD_RES_GET = 9, /* can dump */
238
239 RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */
238 240
239 RDMA_NLDEV_NUM_OPS 241 RDMA_NLDEV_NUM_OPS
240}; 242};
@@ -303,6 +305,51 @@ enum rdma_nldev_attr {
303 305
304 RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */ 306 RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */
305 307
308 RDMA_NLDEV_ATTR_RES_SUMMARY, /* nested table */
309 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY, /* nested table */
310 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, /* string */
311 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, /* u64 */
312
313 RDMA_NLDEV_ATTR_RES_QP, /* nested table */
314 RDMA_NLDEV_ATTR_RES_QP_ENTRY, /* nested table */
315 /*
316 * Local QPN
317 */
318 RDMA_NLDEV_ATTR_RES_LQPN, /* u32 */
319 /*
320 * Remote QPN,
321 * Applicable for RC and UC only IBTA 11.2.5.3 QUERY QUEUE PAIR
322 */
323 RDMA_NLDEV_ATTR_RES_RQPN, /* u32 */
324 /*
325 * Receive Queue PSN,
326 * Applicable for RC and UC only 11.2.5.3 QUERY QUEUE PAIR
327 */
328 RDMA_NLDEV_ATTR_RES_RQ_PSN, /* u32 */
329 /*
330 * Send Queue PSN
331 */
332 RDMA_NLDEV_ATTR_RES_SQ_PSN, /* u32 */
333 RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, /* u8 */
334 /*
335 * QP types as visible to RDMA/core, the reserved QPT
336 * are not exported through this interface.
337 */
338 RDMA_NLDEV_ATTR_RES_TYPE, /* u8 */
339 RDMA_NLDEV_ATTR_RES_STATE, /* u8 */
340 /*
341 * Process ID which created object,
342 * in case of kernel origin, PID won't exist.
343 */
344 RDMA_NLDEV_ATTR_RES_PID, /* u32 */
345 /*
346 * The name of process created following resource.
347 * It will exist only for kernel objects.
348 * For user created objects, the user is supposed
349 * to read /proc/PID/comm file.
350 */
351 RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */
352
306 RDMA_NLDEV_ATTR_MAX 353 RDMA_NLDEV_ATTR_MAX
307}; 354};
308#endif /* _UAPI_RDMA_NETLINK_H */ 355#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index aaa352f2f110..02ca0d0f1eb7 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -52,12 +52,14 @@
52#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */ 52#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
53#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */ 53#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
54#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */ 54#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
55#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */ 55#define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
56#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */ 56#define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
57#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */ 57#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
58#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */ 58#define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
59#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */ 59#define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
60#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */ 60#define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
61#define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
62#define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
61 63
62enum pvrdma_wr_opcode { 64enum pvrdma_wr_opcode {
63 PVRDMA_WR_RDMA_WRITE, 65 PVRDMA_WR_RDMA_WRITE,
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
deleted file mode 100644
index 78957c9626f5..000000000000
--- a/include/video/exynos5433_decon.h
+++ /dev/null
@@ -1,209 +0,0 @@
1/*
2 * Copyright (C) 2014 Samsung Electronics Co.Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundationr
7 */
8
9#ifndef EXYNOS_REGS_DECON_H
10#define EXYNOS_REGS_DECON_H
11
12/* Exynos543X DECON */
13#define DECON_VIDCON0 0x0000
14#define DECON_VIDOUTCON0 0x0010
15#define DECON_WINCONx(n) (0x0020 + ((n) * 4))
16#define DECON_VIDOSDxH(n) (0x0080 + ((n) * 4))
17#define DECON_SHADOWCON 0x00A0
18#define DECON_VIDOSDxA(n) (0x00B0 + ((n) * 0x20))
19#define DECON_VIDOSDxB(n) (0x00B4 + ((n) * 0x20))
20#define DECON_VIDOSDxC(n) (0x00B8 + ((n) * 0x20))
21#define DECON_VIDOSDxD(n) (0x00BC + ((n) * 0x20))
22#define DECON_VIDOSDxE(n) (0x00C0 + ((n) * 0x20))
23#define DECON_VIDW0xADD0B0(n) (0x0150 + ((n) * 0x10))
24#define DECON_VIDW0xADD0B1(n) (0x0154 + ((n) * 0x10))
25#define DECON_VIDW0xADD0B2(n) (0x0158 + ((n) * 0x10))
26#define DECON_VIDW0xADD1B0(n) (0x01A0 + ((n) * 0x10))
27#define DECON_VIDW0xADD1B1(n) (0x01A4 + ((n) * 0x10))
28#define DECON_VIDW0xADD1B2(n) (0x01A8 + ((n) * 0x10))
29#define DECON_VIDW0xADD2(n) (0x0200 + ((n) * 4))
30#define DECON_LOCALxSIZE(n) (0x0214 + ((n) * 4))
31#define DECON_VIDINTCON0 0x0220
32#define DECON_VIDINTCON1 0x0224
33#define DECON_WxKEYCON0(n) (0x0230 + ((n - 1) * 8))
34#define DECON_WxKEYCON1(n) (0x0234 + ((n - 1) * 8))
35#define DECON_WxKEYALPHA(n) (0x0250 + ((n - 1) * 4))
36#define DECON_WINxMAP(n) (0x0270 + ((n) * 4))
37#define DECON_QOSLUT07_00 0x02C0
38#define DECON_QOSLUT15_08 0x02C4
39#define DECON_QOSCTRL 0x02C8
40#define DECON_BLENDERQx(n) (0x0300 + ((n - 1) * 4))
41#define DECON_BLENDCON 0x0310
42#define DECON_OPE_VIDW0xADD0(n) (0x0400 + ((n) * 4))
43#define DECON_OPE_VIDW0xADD1(n) (0x0414 + ((n) * 4))
44#define DECON_FRAMEFIFO_REG7 0x051C
45#define DECON_FRAMEFIFO_REG8 0x0520
46#define DECON_FRAMEFIFO_STATUS 0x0524
47#define DECON_CMU 0x1404
48#define DECON_UPDATE 0x1410
49#define DECON_CRFMID 0x1414
50#define DECON_UPDATE_SCHEME 0x1438
51#define DECON_VIDCON1 0x2000
52#define DECON_VIDCON2 0x2004
53#define DECON_VIDCON3 0x2008
54#define DECON_VIDCON4 0x200C
55#define DECON_VIDTCON2 0x2028
56#define DECON_FRAME_SIZE 0x2038
57#define DECON_LINECNT_OP_THRESHOLD 0x203C
58#define DECON_TRIGCON 0x2040
59#define DECON_TRIGSKIP 0x2050
60#define DECON_CRCRDATA 0x20B0
61#define DECON_CRCCTRL 0x20B4
62
63/* Exynos5430 DECON */
64#define DECON_VIDTCON0 0x2020
65#define DECON_VIDTCON1 0x2024
66
67/* Exynos5433 DECON */
68#define DECON_VIDTCON00 0x2010
69#define DECON_VIDTCON01 0x2014
70#define DECON_VIDTCON10 0x2018
71#define DECON_VIDTCON11 0x201C
72
73/* Exynos543X DECON Internal */
74#define DECON_W013DSTREOCON 0x0320
75#define DECON_W233DSTREOCON 0x0324
76#define DECON_FRAMEFIFO_REG0 0x0500
77#define DECON_ENHANCER_CTRL 0x2100
78
79/* Exynos543X DECON TV */
80#define DECON_VCLKCON0 0x0014
81#define DECON_VIDINTCON2 0x0228
82#define DECON_VIDINTCON3 0x022C
83
84/* VIDCON0 */
85#define VIDCON0_SWRESET (1 << 28)
86#define VIDCON0_CLKVALUP (1 << 14)
87#define VIDCON0_VLCKFREE (1 << 5)
88#define VIDCON0_STOP_STATUS (1 << 2)
89#define VIDCON0_ENVID (1 << 1)
90#define VIDCON0_ENVID_F (1 << 0)
91
92/* VIDOUTCON0 */
93#define VIDOUT_INTERLACE_FIELD_F (1 << 29)
94#define VIDOUT_INTERLACE_EN_F (1 << 28)
95#define VIDOUT_LCD_ON (1 << 24)
96#define VIDOUT_IF_F_MASK (0x3 << 20)
97#define VIDOUT_RGB_IF (0x0 << 20)
98#define VIDOUT_COMMAND_IF (0x2 << 20)
99
100/* WINCONx */
101#define WINCONx_HAWSWP_F (1 << 16)
102#define WINCONx_WSWP_F (1 << 15)
103#define WINCONx_BURSTLEN_MASK (0x3 << 10)
104#define WINCONx_BURSTLEN_16WORD (0x0 << 10)
105#define WINCONx_BURSTLEN_8WORD (0x1 << 10)
106#define WINCONx_BURSTLEN_4WORD (0x2 << 10)
107#define WINCONx_BLD_PIX_F (1 << 6)
108#define WINCONx_BPPMODE_MASK (0xf << 2)
109#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2)
110#define WINCONx_BPPMODE_16BPP_A1555 (0x6 << 2)
111#define WINCONx_BPPMODE_16BPP_I1555 (0x7 << 2)
112#define WINCONx_BPPMODE_24BPP_888 (0xb << 2)
113#define WINCONx_BPPMODE_24BPP_A1887 (0xc << 2)
114#define WINCONx_BPPMODE_25BPP_A1888 (0xd << 2)
115#define WINCONx_BPPMODE_32BPP_A8888 (0xd << 2)
116#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2)
117#define WINCONx_ALPHA_SEL_F (1 << 1)
118#define WINCONx_ENWIN_F (1 << 0)
119
120/* SHADOWCON */
121#define SHADOWCON_PROTECT_MASK GENMASK(14, 10)
122#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n)))
123
124/* VIDOSDxD */
125#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16)
126#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8)
127#define VIDOSD_Wx_ALPHA_B_F(n) (((n) & 0xff) << 0)
128
129/* VIDINTCON0 */
130#define VIDINTCON0_FRAMEDONE (1 << 17)
131#define VIDINTCON0_FRAMESEL_BP (0 << 15)
132#define VIDINTCON0_FRAMESEL_VS (1 << 15)
133#define VIDINTCON0_FRAMESEL_AC (2 << 15)
134#define VIDINTCON0_FRAMESEL_FP (3 << 15)
135#define VIDINTCON0_INTFRMEN (1 << 12)
136#define VIDINTCON0_INTEN (1 << 0)
137
138/* VIDINTCON1 */
139#define VIDINTCON1_INTFRMDONEPEND (1 << 2)
140#define VIDINTCON1_INTFRMPEND (1 << 1)
141#define VIDINTCON1_INTFIFOPEND (1 << 0)
142
143/* DECON_CMU */
144#define CMU_CLKGAGE_MODE_SFR_F (1 << 1)
145#define CMU_CLKGAGE_MODE_MEM_F (1 << 0)
146
147/* DECON_UPDATE */
148#define STANDALONE_UPDATE_F (1 << 0)
149
150/* DECON_VIDCON1 */
151#define VIDCON1_LINECNT_MASK (0x0fff << 16)
152#define VIDCON1_I80_ACTIVE (1 << 15)
153#define VIDCON1_VSTATUS_MASK (0x3 << 13)
154#define VIDCON1_VSTATUS_VS (0 << 13)
155#define VIDCON1_VSTATUS_BP (1 << 13)
156#define VIDCON1_VSTATUS_AC (2 << 13)
157#define VIDCON1_VSTATUS_FP (3 << 13)
158#define VIDCON1_VCLK_MASK (0x3 << 9)
159#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
160#define VIDCON1_VCLK_HOLD (0x0 << 9)
161#define VIDCON1_VCLK_RUN (0x1 << 9)
162
163
164/* DECON_VIDTCON00 */
165#define VIDTCON00_VBPD_F(x) (((x) & 0xfff) << 16)
166#define VIDTCON00_VFPD_F(x) ((x) & 0xfff)
167
168/* DECON_VIDTCON01 */
169#define VIDTCON01_VSPW_F(x) (((x) & 0xfff) << 16)
170
171/* DECON_VIDTCON10 */
172#define VIDTCON10_HBPD_F(x) (((x) & 0xfff) << 16)
173#define VIDTCON10_HFPD_F(x) ((x) & 0xfff)
174
175/* DECON_VIDTCON11 */
176#define VIDTCON11_HSPW_F(x) (((x) & 0xfff) << 16)
177
178/* DECON_VIDTCON2 */
179#define VIDTCON2_LINEVAL(x) (((x) & 0xfff) << 16)
180#define VIDTCON2_HOZVAL(x) ((x) & 0xfff)
181
182/* TRIGCON */
183#define TRIGCON_TRIGEN_PER_F (1 << 31)
184#define TRIGCON_TRIGEN_F (1 << 30)
185#define TRIGCON_TE_AUTO_MASK (1 << 29)
186#define TRIGCON_WB_SWTRIGCMD (1 << 28)
187#define TRIGCON_SWTRIGCMD_W4BUF (1 << 26)
188#define TRIGCON_TRIGMODE_W4BUF (1 << 25)
189#define TRIGCON_SWTRIGCMD_W3BUF (1 << 21)
190#define TRIGCON_TRIGMODE_W3BUF (1 << 20)
191#define TRIGCON_SWTRIGCMD_W2BUF (1 << 16)
192#define TRIGCON_TRIGMODE_W2BUF (1 << 15)
193#define TRIGCON_SWTRIGCMD_W1BUF (1 << 11)
194#define TRIGCON_TRIGMODE_W1BUF (1 << 10)
195#define TRIGCON_SWTRIGCMD_W0BUF (1 << 6)
196#define TRIGCON_TRIGMODE_W0BUF (1 << 5)
197#define TRIGCON_HWTRIGMASK (1 << 4)
198#define TRIGCON_HWTRIGEN (1 << 3)
199#define TRIGCON_HWTRIG_INV (1 << 2)
200#define TRIGCON_SWTRIGCMD (1 << 1)
201#define TRIGCON_SWTRIGEN (1 << 0)
202
203/* DECON_CRCCTRL */
204#define CRCCTRL_CRCCLKEN (0x1 << 2)
205#define CRCCTRL_CRCSTART_F (0x1 << 1)
206#define CRCCTRL_CRCEN (0x1 << 0)
207#define CRCCTRL_MASK (0x7)
208
209#endif /* EXYNOS_REGS_DECON_H */
diff --git a/include/video/exynos7_decon.h b/include/video/exynos7_decon.h
deleted file mode 100644
index a62b11b613f6..000000000000
--- a/include/video/exynos7_decon.h
+++ /dev/null
@@ -1,349 +0,0 @@
1/* include/video/exynos7_decon.h
2 *
3 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
4 * Author: Ajay Kumar <ajaykumar.rs@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/* VIDCON0 */
13#define VIDCON0 0x00
14
15#define VIDCON0_SWRESET (1 << 28)
16#define VIDCON0_DECON_STOP_STATUS (1 << 2)
17#define VIDCON0_ENVID (1 << 1)
18#define VIDCON0_ENVID_F (1 << 0)
19
20/* VIDOUTCON0 */
21#define VIDOUTCON0 0x4
22
23#define VIDOUTCON0_DUAL_MASK (0x3 << 24)
24#define VIDOUTCON0_DUAL_ON (0x3 << 24)
25#define VIDOUTCON0_DISP_IF_1_ON (0x2 << 24)
26#define VIDOUTCON0_DISP_IF_0_ON (0x1 << 24)
27#define VIDOUTCON0_DUAL_OFF (0x0 << 24)
28#define VIDOUTCON0_IF_SHIFT 23
29#define VIDOUTCON0_IF_MASK (0x1 << 23)
30#define VIDOUTCON0_RGBIF (0x0 << 23)
31#define VIDOUTCON0_I80IF (0x1 << 23)
32
33/* VIDCON3 */
34#define VIDCON3 0x8
35
36/* VIDCON4 */
37#define VIDCON4 0xC
38#define VIDCON4_FIFOCNT_START_EN (1 << 0)
39
40/* VCLKCON0 */
41#define VCLKCON0 0x10
42#define VCLKCON0_CLKVALUP (1 << 8)
43#define VCLKCON0_VCLKFREE (1 << 0)
44
45/* VCLKCON */
46#define VCLKCON1 0x14
47#define VCLKCON1_CLKVAL_NUM_VCLK(val) (((val) & 0xff) << 0)
48#define VCLKCON2 0x18
49
50/* SHADOWCON */
51#define SHADOWCON 0x30
52
53#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win)))
54
55/* WINCONx */
56#define WINCON(_win) (0x50 + ((_win) * 4))
57
58#define WINCONx_BUFSTATUS (0x3 << 30)
59#define WINCONx_BUFSEL_MASK (0x3 << 28)
60#define WINCONx_BUFSEL_SHIFT 28
61#define WINCONx_TRIPLE_BUF_MODE (0x1 << 18)
62#define WINCONx_DOUBLE_BUF_MODE (0x0 << 18)
63#define WINCONx_BURSTLEN_16WORD (0x0 << 11)
64#define WINCONx_BURSTLEN_8WORD (0x1 << 11)
65#define WINCONx_BURSTLEN_MASK (0x1 << 11)
66#define WINCONx_BURSTLEN_SHIFT 11
67#define WINCONx_BLD_PLANE (0 << 8)
68#define WINCONx_BLD_PIX (1 << 8)
69#define WINCONx_ALPHA_MUL (1 << 7)
70
71#define WINCONx_BPPMODE_MASK (0xf << 2)
72#define WINCONx_BPPMODE_SHIFT 2
73#define WINCONx_BPPMODE_16BPP_565 (0x8 << 2)
74#define WINCONx_BPPMODE_24BPP_BGRx (0x7 << 2)
75#define WINCONx_BPPMODE_24BPP_RGBx (0x6 << 2)
76#define WINCONx_BPPMODE_24BPP_xBGR (0x5 << 2)
77#define WINCONx_BPPMODE_24BPP_xRGB (0x4 << 2)
78#define WINCONx_BPPMODE_32BPP_BGRA (0x3 << 2)
79#define WINCONx_BPPMODE_32BPP_RGBA (0x2 << 2)
80#define WINCONx_BPPMODE_32BPP_ABGR (0x1 << 2)
81#define WINCONx_BPPMODE_32BPP_ARGB (0x0 << 2)
82#define WINCONx_ALPHA_SEL (1 << 1)
83#define WINCONx_ENWIN (1 << 0)
84
85#define WINCON1_ALPHA_MUL_F (1 << 7)
86#define WINCON2_ALPHA_MUL_F (1 << 7)
87#define WINCON3_ALPHA_MUL_F (1 << 7)
88#define WINCON4_ALPHA_MUL_F (1 << 7)
89
90/* VIDOSDxH: The height for the OSD image(READ ONLY)*/
91#define VIDOSD_H(_x) (0x80 + ((_x) * 4))
92
93/* Frame buffer start addresses: VIDWxxADD0n */
94#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10))
95#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10))
96#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10))
97
98#define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8))
99#define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8))
100#define VIDW_OFFSET_X(_win) (0x0170 + ((_win) * 8))
101#define VIDW_OFFSET_Y(_win) (0x0174 + ((_win) * 8))
102#define VIDW_BLKOFFSET(_win) (0x01B0 + ((_win) * 4))
103#define VIDW_BLKSIZE(win) (0x0200 + ((_win) * 4))
104
105/* Interrupt controls register */
106#define VIDINTCON2 0x228
107
108#define VIDINTCON1_INTEXTRA1_EN (1 << 1)
109#define VIDINTCON1_INTEXTRA0_EN (1 << 0)
110
111/* Interrupt controls and status register */
112#define VIDINTCON3 0x22C
113
114#define VIDINTCON1_INTEXTRA1_PEND (1 << 1)
115#define VIDINTCON1_INTEXTRA0_PEND (1 << 0)
116
117/* VIDOSDxA ~ VIDOSDxE */
118#define VIDOSD_BASE 0x230
119
120#define OSD_STRIDE 0x20
121
122#define VIDOSD_A(_win) (VIDOSD_BASE + \
123 ((_win) * OSD_STRIDE) + 0x00)
124#define VIDOSD_B(_win) (VIDOSD_BASE + \
125 ((_win) * OSD_STRIDE) + 0x04)
126#define VIDOSD_C(_win) (VIDOSD_BASE + \
127 ((_win) * OSD_STRIDE) + 0x08)
128#define VIDOSD_D(_win) (VIDOSD_BASE + \
129 ((_win) * OSD_STRIDE) + 0x0C)
130#define VIDOSD_E(_win) (VIDOSD_BASE + \
131 ((_win) * OSD_STRIDE) + 0x10)
132
133#define VIDOSDxA_TOPLEFT_X_MASK (0x1fff << 13)
134#define VIDOSDxA_TOPLEFT_X_SHIFT 13
135#define VIDOSDxA_TOPLEFT_X_LIMIT 0x1fff
136#define VIDOSDxA_TOPLEFT_X(_x) (((_x) & 0x1fff) << 13)
137
138#define VIDOSDxA_TOPLEFT_Y_MASK (0x1fff << 0)
139#define VIDOSDxA_TOPLEFT_Y_SHIFT 0
140#define VIDOSDxA_TOPLEFT_Y_LIMIT 0x1fff
141#define VIDOSDxA_TOPLEFT_Y(_x) (((_x) & 0x1fff) << 0)
142
143#define VIDOSDxB_BOTRIGHT_X_MASK (0x1fff << 13)
144#define VIDOSDxB_BOTRIGHT_X_SHIFT 13
145#define VIDOSDxB_BOTRIGHT_X_LIMIT 0x1fff
146#define VIDOSDxB_BOTRIGHT_X(_x) (((_x) & 0x1fff) << 13)
147
148#define VIDOSDxB_BOTRIGHT_Y_MASK (0x1fff << 0)
149#define VIDOSDxB_BOTRIGHT_Y_SHIFT 0
150#define VIDOSDxB_BOTRIGHT_Y_LIMIT 0x1fff
151#define VIDOSDxB_BOTRIGHT_Y(_x) (((_x) & 0x1fff) << 0)
152
153#define VIDOSDxC_ALPHA0_R_F(_x) (((_x) & 0xFF) << 16)
154#define VIDOSDxC_ALPHA0_G_F(_x) (((_x) & 0xFF) << 8)
155#define VIDOSDxC_ALPHA0_B_F(_x) (((_x) & 0xFF) << 0)
156
157#define VIDOSDxD_ALPHA1_R_F(_x) (((_x) & 0xFF) << 16)
158#define VIDOSDxD_ALPHA1_G_F(_x) (((_x) & 0xFF) << 8)
159#define VIDOSDxD_ALPHA1_B_F(_x) (((_x) & 0xFF) >> 0)
160
161/* Window MAP (Color map) */
162#define WINxMAP(_win) (0x340 + ((_win) * 4))
163
164#define WINxMAP_MAP (1 << 24)
165#define WINxMAP_MAP_COLOUR_MASK (0xffffff << 0)
166#define WINxMAP_MAP_COLOUR_SHIFT 0
167#define WINxMAP_MAP_COLOUR_LIMIT 0xffffff
168#define WINxMAP_MAP_COLOUR(_x) ((_x) << 0)
169
170/* Window colour-key control registers */
171#define WKEYCON 0x370
172
173#define WKEYCON0 0x00
174#define WKEYCON1 0x04
175#define WxKEYCON0_KEYBL_EN (1 << 26)
176#define WxKEYCON0_KEYEN_F (1 << 25)
177#define WxKEYCON0_DIRCON (1 << 24)
178#define WxKEYCON0_COMPKEY_MASK (0xffffff << 0)
179#define WxKEYCON0_COMPKEY_SHIFT 0
180#define WxKEYCON0_COMPKEY_LIMIT 0xffffff
181#define WxKEYCON0_COMPKEY(_x) ((_x) << 0)
182#define WxKEYCON1_COLVAL_MASK (0xffffff << 0)
183#define WxKEYCON1_COLVAL_SHIFT 0
184#define WxKEYCON1_COLVAL_LIMIT 0xffffff
185#define WxKEYCON1_COLVAL(_x) ((_x) << 0)
186
187/* color key control register for hardware window 1 ~ 4. */
188#define WKEYCON0_BASE(x) ((WKEYCON + WKEYCON0) + ((x - 1) * 8))
189/* color key value register for hardware window 1 ~ 4. */
190#define WKEYCON1_BASE(x) ((WKEYCON + WKEYCON1) + ((x - 1) * 8))
191
192/* Window KEY Alpha value */
193#define WxKEYALPHA(_win) (0x3A0 + (((_win) - 1) * 0x4))
194
195#define Wx_KEYALPHA_R_F_SHIFT 16
196#define Wx_KEYALPHA_G_F_SHIFT 8
197#define Wx_KEYALPHA_B_F_SHIFT 0
198
199/* Blending equation */
200#define BLENDE(_win) (0x03C0 + ((_win) * 4))
201#define BLENDE_COEF_ZERO 0x0
202#define BLENDE_COEF_ONE 0x1
203#define BLENDE_COEF_ALPHA_A 0x2
204#define BLENDE_COEF_ONE_MINUS_ALPHA_A 0x3
205#define BLENDE_COEF_ALPHA_B 0x4
206#define BLENDE_COEF_ONE_MINUS_ALPHA_B 0x5
207#define BLENDE_COEF_ALPHA0 0x6
208#define BLENDE_COEF_A 0xA
209#define BLENDE_COEF_ONE_MINUS_A 0xB
210#define BLENDE_COEF_B 0xC
211#define BLENDE_COEF_ONE_MINUS_B 0xD
212#define BLENDE_Q_FUNC(_v) ((_v) << 18)
213#define BLENDE_P_FUNC(_v) ((_v) << 12)
214#define BLENDE_B_FUNC(_v) ((_v) << 6)
215#define BLENDE_A_FUNC(_v) ((_v) << 0)
216
217/* Blending equation control */
218#define BLENDCON 0x3D8
219#define BLENDCON_NEW_MASK (1 << 0)
220#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
221#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
222
223/* Interrupt control register */
224#define VIDINTCON0 0x500
225
226#define VIDINTCON0_WAKEUP_MASK (0x3f << 26)
227#define VIDINTCON0_INTEXTRAEN (1 << 21)
228
229#define VIDINTCON0_FRAMESEL0_SHIFT 15
230#define VIDINTCON0_FRAMESEL0_MASK (0x3 << 15)
231#define VIDINTCON0_FRAMESEL0_BACKPORCH (0x0 << 15)
232#define VIDINTCON0_FRAMESEL0_VSYNC (0x1 << 15)
233#define VIDINTCON0_FRAMESEL0_ACTIVE (0x2 << 15)
234#define VIDINTCON0_FRAMESEL0_FRONTPORCH (0x3 << 15)
235
236#define VIDINTCON0_INT_FRAME (1 << 11)
237
238#define VIDINTCON0_FIFOLEVEL_MASK (0x7 << 3)
239#define VIDINTCON0_FIFOLEVEL_SHIFT 3
240#define VIDINTCON0_FIFOLEVEL_EMPTY (0x0 << 3)
241#define VIDINTCON0_FIFOLEVEL_TO25PC (0x1 << 3)
242#define VIDINTCON0_FIFOLEVEL_TO50PC (0x2 << 3)
243#define VIDINTCON0_FIFOLEVEL_FULL (0x4 << 3)
244
245#define VIDINTCON0_FIFOSEL_MAIN_EN (1 << 1)
246#define VIDINTCON0_INT_FIFO (1 << 1)
247
248#define VIDINTCON0_INT_ENABLE (1 << 0)
249
250/* Interrupt controls and status register */
251#define VIDINTCON1 0x504
252
253#define VIDINTCON1_INT_EXTRA (1 << 3)
254#define VIDINTCON1_INT_I80 (1 << 2)
255#define VIDINTCON1_INT_FRAME (1 << 1)
256#define VIDINTCON1_INT_FIFO (1 << 0)
257
258/* VIDCON1 */
259#define VIDCON1(_x) (0x0600 + ((_x) * 0x50))
260#define VIDCON1_LINECNT_GET(_v) (((_v) >> 17) & 0x1fff)
261#define VIDCON1_VCLK_MASK (0x3 << 9)
262#define VIDCON1_VCLK_HOLD (0x0 << 9)
263#define VIDCON1_VCLK_RUN (0x1 << 9)
264#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
265#define VIDCON1_RGB_ORDER_O_MASK (0x7 << 4)
266#define VIDCON1_RGB_ORDER_O_RGB (0x0 << 4)
267#define VIDCON1_RGB_ORDER_O_GBR (0x1 << 4)
268#define VIDCON1_RGB_ORDER_O_BRG (0x2 << 4)
269#define VIDCON1_RGB_ORDER_O_BGR (0x4 << 4)
270#define VIDCON1_RGB_ORDER_O_RBG (0x5 << 4)
271#define VIDCON1_RGB_ORDER_O_GRB (0x6 << 4)
272
273/* VIDTCON0 */
274#define VIDTCON0 0x610
275
276#define VIDTCON0_VBPD_MASK (0xffff << 16)
277#define VIDTCON0_VBPD_SHIFT 16
278#define VIDTCON0_VBPD_LIMIT 0xffff
279#define VIDTCON0_VBPD(_x) ((_x) << 16)
280
281#define VIDTCON0_VFPD_MASK (0xffff << 0)
282#define VIDTCON0_VFPD_SHIFT 0
283#define VIDTCON0_VFPD_LIMIT 0xffff
284#define VIDTCON0_VFPD(_x) ((_x) << 0)
285
286/* VIDTCON1 */
287#define VIDTCON1 0x614
288
289#define VIDTCON1_VSPW_MASK (0xffff << 16)
290#define VIDTCON1_VSPW_SHIFT 16
291#define VIDTCON1_VSPW_LIMIT 0xffff
292#define VIDTCON1_VSPW(_x) ((_x) << 16)
293
294/* VIDTCON2 */
295#define VIDTCON2 0x618
296
297#define VIDTCON2_HBPD_MASK (0xffff << 16)
298#define VIDTCON2_HBPD_SHIFT 16
299#define VIDTCON2_HBPD_LIMIT 0xffff
300#define VIDTCON2_HBPD(_x) ((_x) << 16)
301
302#define VIDTCON2_HFPD_MASK (0xffff << 0)
303#define VIDTCON2_HFPD_SHIFT 0
304#define VIDTCON2_HFPD_LIMIT 0xffff
305#define VIDTCON2_HFPD(_x) ((_x) << 0)
306
307/* VIDTCON3 */
308#define VIDTCON3 0x61C
309
310#define VIDTCON3_HSPW_MASK (0xffff << 16)
311#define VIDTCON3_HSPW_SHIFT 16
312#define VIDTCON3_HSPW_LIMIT 0xffff
313#define VIDTCON3_HSPW(_x) ((_x) << 16)
314
315/* VIDTCON4 */
316#define VIDTCON4 0x620
317
318#define VIDTCON4_LINEVAL_MASK (0xfff << 16)
319#define VIDTCON4_LINEVAL_SHIFT 16
320#define VIDTCON4_LINEVAL_LIMIT 0xfff
321#define VIDTCON4_LINEVAL(_x) (((_x) & 0xfff) << 16)
322
323#define VIDTCON4_HOZVAL_MASK (0xfff << 0)
324#define VIDTCON4_HOZVAL_SHIFT 0
325#define VIDTCON4_HOZVAL_LIMIT 0xfff
326#define VIDTCON4_HOZVAL(_x) (((_x) & 0xfff) << 0)
327
328/* LINECNT OP THRSHOLD*/
329#define LINECNT_OP_THRESHOLD 0x630
330
331/* CRCCTRL */
332#define CRCCTRL 0x6C8
333#define CRCCTRL_CRCCLKEN (0x1 << 2)
334#define CRCCTRL_CRCSTART_F (0x1 << 1)
335#define CRCCTRL_CRCEN (0x1 << 0)
336
337/* DECON_CMU */
338#define DECON_CMU 0x704
339
340#define DECON_CMU_ALL_CLKGATE_ENABLE 0x3
341#define DECON_CMU_SE_CLKGATE_ENABLE (0x1 << 2)
342#define DECON_CMU_SFR_CLKGATE_ENABLE (0x1 << 1)
343#define DECON_CMU_MEM_CLKGATE_ENABLE (0x1 << 0)
344
345/* DECON_UPDATE */
346#define DECON_UPDATE 0x710
347
348#define DECON_UPDATE_SLAVE_SYNC (1 << 4)
349#define DECON_UPDATE_STANDALONE_F (1 << 0)
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index ce4c07688b13..abbad94e14a1 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -344,7 +344,7 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan);
344int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, 344int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
345 unsigned int axi_id, unsigned int width, 345 unsigned int axi_id, unsigned int width,
346 unsigned int height, unsigned int stride, 346 unsigned int height, unsigned int stride,
347 u32 format, unsigned long *eba); 347 u32 format, uint64_t modifier, unsigned long *eba);
348 348
349/* 349/*
350 * IPU CMOS Sensor Interface (csi) functions 350 * IPU CMOS Sensor Interface (csi) functions
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index 1252a7a89bc0..0cabe6b09095 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -19,7 +19,7 @@ struct dloarea {
19 19
20struct urb_node { 20struct urb_node {
21 struct list_head entry; 21 struct list_head entry;
22 struct dlfb_data *dev; 22 struct dlfb_data *dlfb;
23 struct delayed_work release_urb_work; 23 struct delayed_work release_urb_work;
24 struct urb *urb; 24 struct urb *urb;
25}; 25};
@@ -35,7 +35,6 @@ struct urb_list {
35 35
36struct dlfb_data { 36struct dlfb_data {
37 struct usb_device *udev; 37 struct usb_device *udev;
38 struct device *gdev; /* &udev->dev */
39 struct fb_info *info; 38 struct fb_info *info;
40 struct urb_list urbs; 39 struct urb_list urbs;
41 struct kref kref; 40 struct kref kref;