aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acbuffer.h1
-rw-r--r--include/acpi/acconfig.h4
-rw-r--r--include/acpi/acexcep.h7
-rw-r--r--include/acpi/acoutput.h21
-rw-r--r--include/acpi/acpi_bus.h4
-rw-r--r--include/acpi/acpi_drivers.h4
-rw-r--r--include/acpi/acpiosxf.h6
-rw-r--r--include/acpi/acpixf.h16
-rw-r--r--include/acpi/actbl2.h17
-rw-r--r--include/acpi/actypes.h13
-rw-r--r--include/acpi/platform/acenv.h19
-rw-r--r--include/acpi/platform/acenvex.h3
-rw-r--r--include/acpi/platform/acmsvcex.h54
-rw-r--r--include/acpi/platform/acwinex.h49
-rw-r--r--include/acpi/processor.h59
-rw-r--r--include/asm-generic/atomic-long.h263
-rw-r--r--include/asm-generic/atomic.h11
-rw-r--r--include/asm-generic/atomic64.h4
-rw-r--r--include/asm-generic/barrier.h4
-rw-r--r--include/asm-generic/dma-mapping-common.h118
-rw-r--r--include/asm-generic/early_ioremap.h8
-rw-r--r--include/asm-generic/fixmap.h3
-rw-r--r--include/asm-generic/io.h30
-rw-r--r--include/asm-generic/memory_model.h6
-rw-r--r--include/asm-generic/pci_iomap.h14
-rw-r--r--include/asm-generic/preempt.h5
-rw-r--r--include/asm-generic/qrwlock.h78
-rw-r--r--include/asm-generic/rtc.h29
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/crypto/aead.h172
-rw-r--r--include/crypto/algapi.h3
-rw-r--r--include/crypto/chacha20.h25
-rw-r--r--include/crypto/hash.h5
-rw-r--r--include/crypto/internal/aead.h72
-rw-r--r--include/crypto/internal/geniv.h9
-rw-r--r--include/crypto/internal/skcipher.h15
-rw-r--r--include/crypto/pkcs7.h13
-rw-r--r--include/crypto/poly1305.h41
-rw-r--r--include/crypto/public_key.h18
-rw-r--r--include/crypto/scatterwalk.h10
-rw-r--r--include/crypto/skcipher.h391
-rw-r--r--include/drm/bridge/dw_hdmi.h7
-rw-r--r--include/drm/drmP.h57
-rw-r--r--include/drm/drm_atomic.h3
-rw-r--r--include/drm/drm_atomic_helper.h4
-rw-r--r--include/drm/drm_crtc.h83
-rw-r--r--include/drm/drm_crtc_helper.h8
-rw-r--r--include/drm/drm_dp_helper.h3
-rw-r--r--include/drm/drm_fb_helper.h212
-rw-r--r--include/drm/drm_modeset_lock.h1
-rw-r--r--include/drm/drm_plane_helper.h45
-rw-r--r--include/drm/i915_component.h11
-rw-r--r--include/drm/intel-gtt.h4
-rw-r--r--include/dt-bindings/clock/exynos3250.h1
-rw-r--r--include/dt-bindings/clock/exynos5250.h1
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h5
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h240
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h3
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h2
-rw-r--r--include/dt-bindings/clock/r8a7793-clock.h164
-rw-r--r--include/dt-bindings/clock/rk3066a-cru.h5
-rw-r--r--include/dt-bindings/clock/rk3188-cru-common.h5
-rw-r--r--include/dt-bindings/clock/rk3188-cru.h5
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h5
-rw-r--r--include/dt-bindings/clock/rk3368-cru.h384
-rw-r--r--include/dt-bindings/clock/zx296702-clock.h17
-rw-r--r--include/dt-bindings/dma/axi-dmac.h48
-rw-r--r--include/dt-bindings/dma/jz4780-dma.h49
-rw-r--r--include/dt-bindings/i2c/i2c.h18
-rw-r--r--include/dt-bindings/leds/leds-ns2.h8
-rw-r--r--include/dt-bindings/media/c8sectpfe.h12
-rw-r--r--include/dt-bindings/memory/tegra210-mc.h36
-rw-r--r--include/dt-bindings/mfd/st-lpc.h1
-rw-r--r--include/dt-bindings/pinctrl/am43xx.h1
-rw-r--r--include/dt-bindings/pinctrl/dra.h20
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-mpp.h51
-rw-r--r--include/dt-bindings/power/mt8173-power.h15
-rw-r--r--include/dt-bindings/reset/altr,rst-mgr-a10.h110
-rw-r--r--include/dt-bindings/reset/stih407-resets.h (renamed from include/dt-bindings/reset-controller/stih407-resets.h)0
-rw-r--r--include/dt-bindings/reset/stih415-resets.h (renamed from include/dt-bindings/reset-controller/stih415-resets.h)0
-rw-r--r--include/dt-bindings/reset/stih416-resets.h (renamed from include/dt-bindings/reset-controller/stih416-resets.h)0
-rw-r--r--include/dt-bindings/reset/tegra124-car.h12
-rw-r--r--include/keys/system_keyring.h7
-rw-r--r--include/kvm/arm_arch_timer.h7
-rw-r--r--include/kvm/arm_vgic.h39
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/asn1_ber_bytecode.h16
-rw-r--r--include/linux/atmel_serial.h240
-rw-r--r--include/linux/atomic.h361
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/average.h61
-rw-r--r--include/linux/backing-dev.h26
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/bio.h38
-rw-r--r--include/linux/bitmap.h2
-rw-r--r--include/linux/bitops.h6
-rw-r--r--include/linux/blk-cgroup.h340
-rw-r--r--include/linux/blk_types.h15
-rw-r--r--include/linux/blkdev.h44
-rw-r--r--include/linux/bpf.h12
-rw-r--r--include/linux/cgroup-defs.h15
-rw-r--r--include/linux/cgroup.h24
-rw-r--r--include/linux/cgroup_subsys.h30
-rw-r--r--include/linux/clk-provider.h89
-rw-r--r--include/linux/clk/clk-conf.h2
-rw-r--r--include/linux/clk/shmobile.h12
-rw-r--r--include/linux/clk/tegra.h3
-rw-r--r--include/linux/clk/ti.h160
-rw-r--r--include/linux/clockchips.h3
-rw-r--r--include/linux/compiler.h7
-rw-r--r--include/linux/context_tracking.h15
-rw-r--r--include/linux/context_tracking_state.h1
-rw-r--r--include/linux/coresight.h21
-rw-r--r--include/linux/cpufeature.h7
-rw-r--r--include/linux/cpufreq.h28
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/cred.h8
-rw-r--r--include/linux/crypto.h54
-rw-r--r--include/linux/dax.h39
-rw-r--r--include/linux/debugfs.h20
-rw-r--r--include/linux/device-mapper.h4
-rw-r--r--include/linux/device.h28
-rw-r--r--include/linux/dmaengine.h75
-rw-r--r--include/linux/dmapool.h6
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/extcon.h7
-rw-r--r--include/linux/f2fs_fs.h16
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fdtable.h4
-rw-r--r--include/linux/filter.h17
-rw-r--r--include/linux/fs.h57
-rw-r--r--include/linux/fsl_devices.h20
-rw-r--r--include/linux/fsl_ifc.h50
-rw-r--r--include/linux/fsnotify_backend.h59
-rw-r--r--include/linux/genalloc.h6
-rw-r--r--include/linux/genhd.h33
-rw-r--r--include/linux/gfp.h31
-rw-r--r--include/linux/gpio/consumer.h82
-rw-r--r--include/linux/gpio/driver.h37
-rw-r--r--include/linux/gpio/machine.h1
-rw-r--r--include/linux/huge_mm.h20
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/i2c.h19
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/common/st_sensors.h2
-rw-r--r--include/linux/iio/consumer.h2
-rw-r--r--include/linux/iio/iio.h17
-rw-r--r--include/linux/iio/sysfs.h3
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/iio/triggered_buffer.h4
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/input/touchscreen.h11
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/io.h33
-rw-r--r--include/linux/ipmi_smi.h7
-rw-r--r--include/linux/ipv6.h5
-rw-r--r--include/linux/irq.h19
-rw-r--r--include/linux/irqchip/arm-gic-v3.h13
-rw-r--r--include/linux/irqchip/arm-gic.h10
-rw-r--r--include/linux/irqchip/mips-gic.h14
-rw-r--r--include/linux/irqdesc.h8
-rw-r--r--include/linux/irqdomain.h26
-rw-r--r--include/linux/jbd.h1047
-rw-r--r--include/linux/jbd2.h44
-rw-r--r--include/linux/jbd_common.h46
-rw-r--r--include/linux/jiffies.h35
-rw-r--r--include/linux/jump_label.h261
-rw-r--r--include/linux/kasan.h10
-rw-r--r--include/linux/kernfs.h4
-rw-r--r--include/linux/kexec.h18
-rw-r--r--include/linux/klist.h1
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/kvm_host.h25
-rw-r--r--include/linux/libnvdimm.h4
-rw-r--r--include/linux/list.h5
-rw-r--r--include/linux/llist.h2
-rw-r--r--include/linux/lsm_audit.h7
-rw-r--r--include/linux/lsm_hooks.h6
-rw-r--r--include/linux/mailbox_controller.h7
-rw-r--r--include/linux/mei_cl_bus.h15
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/memcontrol.h392
-rw-r--r--include/linux/memory_hotplug.h5
-rw-r--r--include/linux/mfd/88pm80x.h162
-rw-r--r--include/linux/mfd/arizona/core.h3
-rw-r--r--include/linux/mfd/arizona/pdata.h14
-rw-r--r--include/linux/mfd/arizona/registers.h257
-rw-r--r--include/linux/mfd/axp20x.h67
-rw-r--r--include/linux/mfd/da9062/core.h50
-rw-r--r--include/linux/mfd/da9062/registers.h1108
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/mfd/lpc_ich.h6
-rw-r--r--include/linux/mfd/max77693-common.h49
-rw-r--r--include/linux/mfd/max77693-private.h134
-rw-r--r--include/linux/mfd/max77843-private.h174
-rw-r--r--include/linux/mfd/mt6397/core.h1
-rw-r--r--include/linux/mfd/palmas.h7
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h8
-rw-r--r--include/linux/microchipphy.h73
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cq.h3
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx4/driver.h1
-rw-r--r--include/linux/mlx4/qp.h3
-rw-r--r--include/linux/mlx5/device.h21
-rw-r--r--include/linux/mlx5/driver.h30
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/mm.h58
-rw-r--r--include/linux/mm_types.h14
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/dw_mmc.h9
-rw-r--r--include/linux/mmc/host.h3
-rw-r--r--include/linux/mmu_notifier.h46
-rw-r--r--include/linux/mmzone.h31
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/mpls_iptunnel.h6
-rw-r--r--include/linux/msi.h109
-rw-r--r--include/linux/mtd/map.h2
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/netdevice.h176
-rw-r--r--include/linux/netfilter.h44
-rw-r--r--include/linux/netfilter/nf_conntrack_zones_common.h23
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h3
-rw-r--r--include/linux/netfilter/x_tables.h8
-rw-r--r--include/linux/netfilter_bridge.h12
-rw-r--r--include/linux/netfilter_ipv6.h18
-rw-r--r--include/linux/netlink.h13
-rw-r--r--include/linux/nfs4.h18
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_fs_sb.h5
-rw-r--r--include/linux/nfs_xdr.h8
-rw-r--r--include/linux/nmi.h21
-rw-r--r--include/linux/nvme.h22
-rw-r--r--include/linux/nvmem-consumer.h157
-rw-r--r--include/linux/nvmem-provider.h47
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/of_gpio.h4
-rw-r--r--include/linux/of_irq.h1
-rw-r--r--include/linux/of_platform.h9
-rw-r--r--include/linux/oid_registry.h7
-rw-r--r--include/linux/oom.h38
-rw-r--r--include/linux/page-flags.h11
-rw-r--r--include/linux/page-isolation.h5
-rw-r--r--include/linux/page_ext.h4
-rw-r--r--include/linux/page_idle.h110
-rw-r--r--include/linux/pci-ats.h49
-rw-r--r--include/linux/pci.h68
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/percpu-defs.h6
-rw-r--r--include/linux/percpu-rwsem.h20
-rw-r--r--include/linux/perf/arm_pmu.h154
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/phy_fixed.h8
-rw-r--r--include/linux/platform_data/atmel.h12
-rw-r--r--include/linux/platform_data/atmel_mxt_ts.h (renamed from include/linux/i2c/atmel_mxt_ts.h)12
-rw-r--r--include/linux/platform_data/clk-ux500.h12
-rw-r--r--include/linux/platform_data/gpio-em.h11
-rw-r--r--include/linux/platform_data/i2c-mux-reg.h44
-rw-r--r--include/linux/platform_data/itco_wdt.h19
-rw-r--r--include/linux/platform_data/leds-kirkwood-ns2.h14
-rw-r--r--include/linux/platform_data/lp855x.h2
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/platform_data/pixcir_i2c_ts.h (renamed from include/linux/input/pixcir_ts.h)1
-rw-r--r--include/linux/platform_data/spi-davinci.h1
-rw-r--r--include/linux/platform_data/spi-mt65xx.h20
-rw-r--r--include/linux/platform_data/st_nci.h29
-rw-r--r--include/linux/platform_data/video-ep93xx.h8
-rw-r--r--include/linux/platform_data/zforce_ts.h3
-rw-r--r--include/linux/pm_domain.h9
-rw-r--r--include/linux/pm_opp.h36
-rw-r--r--include/linux/pm_qos.h5
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pmem.h115
-rw-r--r--include/linux/poison.h11
-rw-r--r--include/linux/preempt.h19
-rw-r--r--include/linux/printk.h14
-rw-r--r--include/linux/property.h4
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/psci.h52
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/pwm.h99
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/quotaops.h5
-rw-r--r--include/linux/rcupdate.h144
-rw-r--r--include/linux/rcutiny.h10
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regmap.h385
-rw-r--r--include/linux/regulator/consumer.h16
-rw-r--r--include/linux/regulator/da9211.h19
-rw-r--r--include/linux/regulator/driver.h1
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/regulator/mt6311.h29
-rw-r--r--include/linux/reset.h14
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/linux/scatterlist.h9
-rw-r--r--include/linux/sched.h121
-rw-r--r--include/linux/seccomp.h2
-rw-r--r--include/linux/seq_file.h39
-rw-r--r--include/linux/serial_8250.h7
-rw-r--r--include/linux/serio.h2
-rw-r--r--include/linux/shdma-base.h5
-rw-r--r--include/linux/skbuff.h153
-rw-r--r--include/linux/slab.h10
-rw-r--r--include/linux/smpboot.h11
-rw-r--r--include/linux/soc/dove/pmu.h6
-rw-r--r--include/linux/soc/mediatek/infracfg.h26
-rw-r--r--include/linux/soc/qcom/smd-rpm.h35
-rw-r--r--include/linux/soc/qcom/smd.h46
-rw-r--r--include/linux/soc/qcom/smem.h11
-rw-r--r--include/linux/spi/spi.h64
-rw-r--r--include/linux/spinlock.h40
-rw-r--r--include/linux/stmmac.h22
-rw-r--r--include/linux/stop_machine.h28
-rw-r--r--include/linux/string_helpers.h14
-rw-r--r--include/linux/sunrpc/addr.h27
-rw-r--r--include/linux/sunrpc/auth.h8
-rw-r--r--include/linux/sunrpc/cache.h9
-rw-r--r--include/linux/sunrpc/svc.h68
-rw-r--r--include/linux/sunrpc/svc_rdma.h92
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/swap.h23
-rw-r--r--include/linux/swapops.h37
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/ti_wilink_st.h1
-rw-r--r--include/linux/tick.h25
-rw-r--r--include/linux/time64.h35
-rw-r--r--include/linux/timekeeping.h9
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/tty_driver.h2
-rw-r--r--include/linux/types.h3
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/uprobes.h17
-rw-r--r--include/linux/usb/chipidea.h15
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--include/linux/usb/gadget.h198
-rw-r--r--include/linux/usb/hcd.h6
-rw-r--r--include/linux/usb/msm_hsusb.h9
-rw-r--r--include/linux/usb/of.h7
-rw-r--r--include/linux/usb/otg.h15
-rw-r--r--include/linux/userfaultfd_k.h85
-rw-r--r--include/linux/verify_pefile.h6
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/linux/watchdog.h8
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/linux/zbud.h2
-rw-r--r--include/linux/zpool.h6
-rw-r--r--include/linux/zsmalloc.h6
-rw-r--r--include/media/media-devnode.h4
-rw-r--r--include/media/omap3isp.h158
-rw-r--r--include/media/rc-core.h6
-rw-r--r--include/media/rc-map.h38
-rw-r--r--include/media/tc358743.h131
-rw-r--r--include/media/v4l2-async.h8
-rw-r--r--include/media/v4l2-ctrls.h1018
-rw-r--r--include/media/v4l2-dv-timings.h141
-rw-r--r--include/media/v4l2-event.h47
-rw-r--r--include/media/v4l2-flash-led-class.h12
-rw-r--r--include/media/v4l2-mediabus.h4
-rw-r--r--include/media/v4l2-mem2mem.h20
-rw-r--r--include/media/v4l2-subdev.h376
-rw-r--r--include/media/videobuf-core.h2
-rw-r--r--include/media/videobuf2-core.h10
-rw-r--r--include/media/videobuf2-memops.h3
-rw-r--r--include/misc/cxl.h10
-rw-r--r--include/net/6lowpan.h23
-rw-r--r--include/net/act_api.h16
-rw-r--r--include/net/addrconf.h35
-rw-r--r--include/net/bluetooth/hci_core.h31
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/bond_options.h1
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h3
-rw-r--r--include/net/cfg802154.h10
-rw-r--r--include/net/checksum.h8
-rw-r--r--include/net/cls_cgroup.h29
-rw-r--r--include/net/dsa.h33
-rw-r--r--include/net/dst.h29
-rw-r--r--include/net/dst_metadata.h108
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/flow.h29
-rw-r--r--include/net/flow_dissector.h67
-rw-r--r--include/net/geneve.h35
-rw-r--r--include/net/gre.h92
-rw-r--r--include/net/gro_cells.h18
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/inet_timewait_sock.h8
-rw-r--r--include/net/inetpeer.h118
-rw-r--r--include/net/ip.h31
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip_fib.h5
-rw-r--r--include/net/ip_tunnels.h145
-rw-r--r--include/net/ip_vs.h23
-rw-r--r--include/net/ipv6.h76
-rw-r--r--include/net/lwtunnel.h175
-rw-r--r--include/net/mac80211.h73
-rw-r--r--include/net/mac802154.h17
-rw-r--r--include/net/mpls_iptunnel.h29
-rw-r--r--include/net/ndisc.h3
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/br_netfilter.h2
-rw-r--r--include/net/netfilter/ipv4/nf_dup_ipv4.h7
-rw-r--r--include/net/netfilter/ipv6/nf_dup_ipv6.h7
-rw-r--r--include/net/netfilter/nf_conntrack.h11
-rw-r--r--include/net/netfilter/nf_conntrack_core.h3
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h11
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h4
-rw-r--r--include/net/netfilter/nf_conntrack_zones.h86
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netfilter/nft_dup.h9
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/netfilter.h1
-rw-r--r--include/net/nfc/nci_core.h3
-rw-r--r--include/net/nfc/nfc.h41
-rw-r--r--include/net/nl802154.h4
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/sch_generic.h32
-rw-r--r--include/net/sock.h51
-rw-r--r--include/net/switchdev.h10
-rw-r--r--include/net/tc_act/tc_bpf.h2
-rw-r--r--include/net/tc_act/tc_gact.h7
-rw-r--r--include/net/tc_act/tc_mirred.h2
-rw-r--r--include/net/tcp.h24
-rw-r--r--include/net/timewait_sock.h3
-rw-r--r--include/net/udp_tunnel.h7
-rw-r--r--include/net/vrf.h178
-rw-r--r--include/net/vxlan.h90
-rw-r--r--include/net/xfrm.h7
-rw-r--r--include/rdma/ib_cm.h25
-rw-r--r--include/rdma/ib_mad.h82
-rw-r--r--include/rdma/ib_pack.h2
-rw-r--r--include/rdma/ib_smi.h47
-rw-r--r--include/rdma/ib_verbs.h249
-rw-r--r--include/rdma/opa_port_info.h433
-rw-r--r--include/rdma/opa_smi.h47
-rw-r--r--include/rdma/rdma_netlink.h7
-rw-r--r--include/scsi/scsi_device.h3
-rw-r--r--include/scsi/scsi_transport_iscsi.h1
-rw-r--r--include/soc/tegra/fuse.h6
-rw-r--r--include/soc/tegra/mc.h10
-rw-r--r--include/soc/tegra/pmc.h5
-rw-r--r--include/sound/ac97_codec.h2
-rw-r--r--include/sound/hda_i915.h9
-rw-r--r--include/sound/hda_register.h4
-rw-r--r--include/sound/hdaudio.h19
-rw-r--r--include/sound/hdaudio_ext.h71
-rw-r--r--include/sound/rcar_snd.h14
-rw-r--r--include/sound/rt298.h20
-rw-r--r--include/sound/soc-dapm.h84
-rw-r--r--include/sound/soc-topology.h13
-rw-r--r--include/sound/soc.h29
-rw-r--r--include/trace/events/asoc.h53
-rw-r--r--include/trace/events/ext3.h866
-rw-r--r--include/trace/events/f2fs.h12
-rw-r--r--include/trace/events/fib.h113
-rw-r--r--include/trace/events/jbd.h194
-rw-r--r--include/trace/events/kvm.h30
-rw-r--r--include/trace/events/rcu.h1
-rw-r--r--include/trace/events/sched.h30
-rw-r--r--include/trace/events/spmi.h135
-rw-r--r--include/trace/events/sunrpc.h21
-rw-r--r--include/trace/events/task.h2
-rw-r--r--include/trace/events/tlb.h3
-rw-r--r--include/trace/events/v4l2.h257
-rw-r--r--include/trace/events/writeback.h180
-rw-r--r--include/uapi/drm/drm_fourcc.h7
-rw-r--r--include/uapi/drm/i915_drm.h16
-rw-r--r--include/uapi/drm/vmwgfx_drm.h38
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/audit.h8
-rw-r--r--include/uapi/linux/bpf.h29
-rw-r--r--include/uapi/linux/dlm_device.h2
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/elf-em.h3
-rw-r--r--include/uapi/linux/ethtool.h5
-rw-r--r--include/uapi/linux/fib_rules.h2
-rw-r--r--include/uapi/linux/gsmmux.h1
-rw-r--r--include/uapi/linux/if_bridge.h1
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h16
-rw-r--r--include/uapi/linux/if_packet.h3
-rw-r--r--include/uapi/linux/if_tunnel.h1
-rw-r--r--include/uapi/linux/ila.h15
-rw-r--r--include/uapi/linux/ip_vs.h5
-rw-r--r--include/uapi/linux/ipv6.h3
-rw-r--r--include/uapi/linux/kernel-page-flags.h1
-rw-r--r--include/uapi/linux/kvm.h7
-rw-r--r--include/uapi/linux/lwtunnel.h47
-rw-r--r--include/uapi/linux/mei.h19
-rw-r--r--include/uapi/linux/mpls.h2
-rw-r--r--include/uapi/linux/mpls_iptunnel.h28
-rw-r--r--include/uapi/linux/ndctl.h12
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_sctp.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h23
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h1
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cttimeout.h2
-rw-r--r--include/uapi/linux/netfilter/xt_CT.h8
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h4
-rw-r--r--include/uapi/linux/netlink.h1
-rw-r--r--include/uapi/linux/nfs4.h2
-rw-r--r--include/uapi/linux/nfsacl.h1
-rw-r--r--include/uapi/linux/nvme.h1
-rw-r--r--include/uapi/linux/openvswitch.h60
-rw-r--r--include/uapi/linux/perf_event.h35
-rw-r--r--include/uapi/linux/prctl.h7
-rw-r--r--include/uapi/linux/ptrace.h6
-rw-r--r--include/uapi/linux/rtnetlink.h13
-rw-r--r--include/uapi/linux/securebits.h11
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/toshiba.h32
-rw-r--r--include/uapi/linux/usb/ch9.h12
-rw-r--r--include/uapi/linux/userfaultfd.h169
-rw-r--r--include/uapi/linux/v4l2-controls.h4
-rw-r--r--include/uapi/linux/vsp1.h2
-rw-r--r--include/uapi/misc/cxl.h4
-rw-r--r--include/uapi/rdma/Kbuild1
-rw-r--r--include/uapi/rdma/hfi/Kbuild2
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h427
-rw-r--r--include/uapi/rdma/rdma_netlink.h82
-rw-r--r--include/uapi/scsi/Kbuild1
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h174
-rw-r--r--include/uapi/xen/privcmd.h4
-rw-r--r--include/video/kyro.h4
-rw-r--r--include/video/samsung_fimd.h1
-rw-r--r--include/video/vga.h2
-rw-r--r--include/xen/events.h1
-rw-r--r--include/xen/interface/io/netif.h8
-rw-r--r--include/xen/interface/platform.h18
-rw-r--r--include/xen/interface/xen.h37
-rw-r--r--include/xen/interface/xenpmu.h94
-rw-r--r--include/xen/page.h8
-rw-r--r--include/xen/xen-ops.h10
544 files changed, 15909 insertions, 6608 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 6b040f4ddfab..fcf9080eae85 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -147,6 +147,7 @@ struct acpi_pld_info {
147 * (Intended for BIOS use only) 147 * (Intended for BIOS use only)
148 */ 148 */
149#define ACPI_PLD_REV1_BUFFER_SIZE 16 /* For Revision 1 of the buffer (From ACPI spec) */ 149#define ACPI_PLD_REV1_BUFFER_SIZE 16 /* For Revision 1 of the buffer (From ACPI spec) */
150#define ACPI_PLD_REV2_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */
150#define ACPI_PLD_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */ 151#define ACPI_PLD_BUFFER_SIZE 20 /* For Revision 2 of the buffer (From ACPI spec) */
151 152
152/* First 32-bit dword, bits 0:32 */ 153/* First 32-bit dword, bits 0:32 */
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 03aacfb3e98b..e11611ca72a4 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -136,10 +136,6 @@
136 136
137#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4 137#define ACPI_ROOT_TABLE_SIZE_INCREMENT 4
138 138
139/* Maximum number of While() loop iterations before forced abort */
140
141#define ACPI_MAX_LOOP_ITERATIONS 0xFFFF
142
143/* Maximum sleep allowed via Sleep() operator */ 139/* Maximum sleep allowed via Sleep() operator */
144 140
145#define ACPI_MAX_SLEEP 2000 /* 2000 millisec == two seconds */ 141#define ACPI_MAX_SLEEP 2000 /* 2000 millisec == two seconds */
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 11c3a011dcbf..9f20eb4acaa6 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -192,8 +192,9 @@ struct acpi_exception_info {
192#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F) 192#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F)
193#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020) 193#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020)
194#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021) 194#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021)
195#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022)
195 196
196#define AE_CODE_AML_MAX 0x0021 197#define AE_CODE_AML_MAX 0x0022
197 198
198/* 199/*
199 * Internal exceptions used for control 200 * Internal exceptions used for control
@@ -355,7 +356,9 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
355 EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS", 356 EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
356 "A memory, I/O, or PCI configuration address is invalid"), 357 "A memory, I/O, or PCI configuration address is invalid"),
357 EXCEP_TXT("AE_AML_INFINITE_LOOP", 358 EXCEP_TXT("AE_AML_INFINITE_LOOP",
358 "An apparent infinite AML While loop, method was aborted") 359 "An apparent infinite AML While loop, method was aborted"),
360 EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
361 "A namespace node is uninitialized or unresolved")
359}; 362};
360 363
361static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = { 364static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index f56de8c5d844..908d4f9c348c 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -88,7 +88,8 @@
88#define ACPI_LV_DEBUG_OBJECT 0x00000002 88#define ACPI_LV_DEBUG_OBJECT 0x00000002
89#define ACPI_LV_INFO 0x00000004 89#define ACPI_LV_INFO 0x00000004
90#define ACPI_LV_REPAIR 0x00000008 90#define ACPI_LV_REPAIR 0x00000008
91#define ACPI_LV_ALL_EXCEPTIONS 0x0000000F 91#define ACPI_LV_TRACE_POINT 0x00000010
92#define ACPI_LV_ALL_EXCEPTIONS 0x0000001F
92 93
93/* Trace verbosity level 1 [Standard Trace Level] */ 94/* Trace verbosity level 1 [Standard Trace Level] */
94 95
@@ -147,6 +148,7 @@
147#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT) 148#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT)
148#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO) 149#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO)
149#define ACPI_DB_REPAIR ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR) 150#define ACPI_DB_REPAIR ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR)
151#define ACPI_DB_TRACE_POINT ACPI_DEBUG_LEVEL (ACPI_LV_TRACE_POINT)
150#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS) 152#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS)
151 153
152/* Trace level -- also used in the global "DebugLevel" */ 154/* Trace level -- also used in the global "DebugLevel" */
@@ -182,6 +184,20 @@
182#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) 184#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
183#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) 185#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
184 186
187/*
188 * Global trace flags
189 */
190#define ACPI_TRACE_ENABLED ((u32) 4)
191#define ACPI_TRACE_ONESHOT ((u32) 2)
192#define ACPI_TRACE_OPCODE ((u32) 1)
193
194/* Defaults for trace debugging level/layer */
195
196#define ACPI_TRACE_LEVEL_ALL ACPI_LV_ALL
197#define ACPI_TRACE_LAYER_ALL 0x000001FF
198#define ACPI_TRACE_LEVEL_DEFAULT ACPI_LV_TRACE_POINT
199#define ACPI_TRACE_LAYER_DEFAULT ACPI_EXECUTER
200
185#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES) 201#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
186/* 202/*
187 * The module name is used primarily for error and debug messages. 203 * The module name is used primarily for error and debug messages.
@@ -432,6 +448,8 @@
432#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d) 448#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d)
433#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) 449#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
434 450
451#define ACPI_TRACE_POINT(a, b, c, d) acpi_trace_point (a, b, c, d)
452
435#else /* ACPI_DEBUG_OUTPUT */ 453#else /* ACPI_DEBUG_OUTPUT */
436/* 454/*
437 * This is the non-debug case -- make everything go away, 455 * This is the non-debug case -- make everything go away,
@@ -453,6 +471,7 @@
453#define ACPI_DUMP_PATHNAME(a, b, c, d) 471#define ACPI_DUMP_PATHNAME(a, b, c, d)
454#define ACPI_DUMP_BUFFER(a, b) 472#define ACPI_DUMP_BUFFER(a, b)
455#define ACPI_IS_DEBUG_ENABLED(level, component) 0 473#define ACPI_IS_DEBUG_ENABLED(level, component) 0
474#define ACPI_TRACE_POINT(a, b, c, d)
456 475
457/* Return macros must have a return statement at the minimum */ 476/* Return macros must have a return statement at the minimum */
458 477
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 83061cac719b..5ba8fb64f664 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -16,10 +16,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 20 */
25 21
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index ea6428b7dacb..29c691265b49 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -16,10 +16,6 @@
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details. 17 * General Public License for more details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 20 */
25 21
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index d02df0a49d98..a54ad1cc990c 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -430,4 +430,10 @@ long acpi_os_get_file_offset(ACPI_FILE file);
430acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from); 430acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from);
431#endif 431#endif
432 432
433#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point
434void
435acpi_os_trace_point(acpi_trace_event_type type,
436 u8 begin, u8 *aml, char *pathname);
437#endif
438
433#endif /* __ACPIOSXF_H__ */ 439#endif /* __ACPIOSXF_H__ */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e8ec18a4a634..c33eeabde160 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20150619 49#define ACPI_CA_VERSION 0x20150818
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -251,7 +251,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
251 * traced each time it is executed. 251 * traced each time it is executed.
252 */ 252 */
253ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_flags, 0); 253ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_flags, 0);
254ACPI_INIT_GLOBAL(acpi_name, acpi_gbl_trace_method_name, 0); 254ACPI_INIT_GLOBAL(const char *, acpi_gbl_trace_method_name, NULL);
255ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_level, ACPI_TRACE_LEVEL_DEFAULT);
256ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_layer, ACPI_TRACE_LAYER_DEFAULT);
255 257
256/* 258/*
257 * Runtime configuration of debug output control masks. We want the debug 259 * Runtime configuration of debug output control masks. We want the debug
@@ -504,7 +506,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
504 acpi_object_handler handler, 506 acpi_object_handler handler,
505 void **data)) 507 void **data))
506ACPI_EXTERNAL_RETURN_STATUS(acpi_status 508ACPI_EXTERNAL_RETURN_STATUS(acpi_status
507 acpi_debug_trace(char *name, u32 debug_level, 509 acpi_debug_trace(const char *name, u32 debug_level,
508 u32 debug_layer, u32 flags)) 510 u32 debug_layer, u32 flags))
509 511
510/* 512/*
@@ -907,9 +909,17 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(6)
907 const char *module_name, 909 const char *module_name,
908 u32 component_id, 910 u32 component_id,
909 const char *format, ...)) 911 const char *format, ...))
912
913ACPI_DBG_DEPENDENT_RETURN_VOID(void
914 acpi_trace_point(acpi_trace_event_type type,
915 u8 begin,
916 u8 *aml, char *pathname))
910ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1) 917ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
911 void ACPI_INTERNAL_VAR_XFACE 918 void ACPI_INTERNAL_VAR_XFACE
912 acpi_log_error(const char *format, ...)) 919 acpi_log_error(const char *format, ...))
920 acpi_status acpi_initialize_debugger(void);
921
922void acpi_terminate_debugger(void);
913 923
914/* 924/*
915 * Divergences 925 * Divergences
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index a948fc586b9b..6e28f544b7b2 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1186,20 +1186,29 @@ enum acpi_spmi_interface_types {
1186 * December 19, 2014 1186 * December 19, 2014
1187 * 1187 *
1188 * NOTE: There are two versions of the table with the same signature -- 1188 * NOTE: There are two versions of the table with the same signature --
1189 * the client version and the server version. 1189 * the client version and the server version. The common platform_class
1190 * field is used to differentiate the two types of tables.
1190 * 1191 *
1191 ******************************************************************************/ 1192 ******************************************************************************/
1192 1193
1193struct acpi_table_tcpa_client { 1194struct acpi_table_tcpa_hdr {
1194 struct acpi_table_header header; /* Common ACPI table header */ 1195 struct acpi_table_header header; /* Common ACPI table header */
1195 u16 platform_class; 1196 u16 platform_class;
1197};
1198
1199/*
1200 * Values for platform_class above.
1201 * This is how the client and server subtables are differentiated
1202 */
1203#define ACPI_TCPA_CLIENT_TABLE 0
1204#define ACPI_TCPA_SERVER_TABLE 1
1205
1206struct acpi_table_tcpa_client {
1196 u32 minimum_log_length; /* Minimum length for the event log area */ 1207 u32 minimum_log_length; /* Minimum length for the event log area */
1197 u64 log_address; /* Address of the event log area */ 1208 u64 log_address; /* Address of the event log area */
1198}; 1209};
1199 1210
1200struct acpi_table_tcpa_server { 1211struct acpi_table_tcpa_server {
1201 struct acpi_table_header header; /* Common ACPI table header */
1202 u16 platform_class;
1203 u16 reserved; 1212 u16 reserved;
1204 u64 minimum_log_length; /* Minimum length for the event log area */ 1213 u64 minimum_log_length; /* Minimum length for the event log area */
1205 u64 log_address; /* Address of the event log area */ 1214 u64 log_address; /* Address of the event log area */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index c2a41d223162..f914958c4adb 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -662,6 +662,7 @@ typedef u32 acpi_object_type;
662#define ACPI_TYPE_DEBUG_OBJECT 0x10 662#define ACPI_TYPE_DEBUG_OBJECT 0x10
663 663
664#define ACPI_TYPE_EXTERNAL_MAX 0x10 664#define ACPI_TYPE_EXTERNAL_MAX 0x10
665#define ACPI_NUM_TYPES (ACPI_TYPE_EXTERNAL_MAX + 1)
665 666
666/* 667/*
667 * These are object types that do not map directly to the ACPI 668 * These are object types that do not map directly to the ACPI
@@ -683,6 +684,7 @@ typedef u32 acpi_object_type;
683#define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */ 684#define ACPI_TYPE_LOCAL_SCOPE 0x1B /* 1 Name, multiple object_list Nodes */
684 685
685#define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */ 686#define ACPI_TYPE_NS_NODE_MAX 0x1B /* Last typecode used within a NS Node */
687#define ACPI_TOTAL_TYPES (ACPI_TYPE_NS_NODE_MAX + 1)
686 688
687/* 689/*
688 * These are special object types that never appear in 690 * These are special object types that never appear in
@@ -985,7 +987,8 @@ struct acpi_buffer {
985 */ 987 */
986#define ACPI_FULL_PATHNAME 0 988#define ACPI_FULL_PATHNAME 0
987#define ACPI_SINGLE_NAME 1 989#define ACPI_SINGLE_NAME 1
988#define ACPI_NAME_TYPE_MAX 1 990#define ACPI_FULL_PATHNAME_NO_TRAILING 2
991#define ACPI_NAME_TYPE_MAX 2
989 992
990/* 993/*
991 * Predefined Namespace items 994 * Predefined Namespace items
@@ -1246,6 +1249,14 @@ struct acpi_memory_list {
1246#endif 1249#endif
1247}; 1250};
1248 1251
1252/* Definitions of trace event types */
1253
1254typedef enum {
1255 ACPI_TRACE_AML_METHOD,
1256 ACPI_TRACE_AML_OPCODE,
1257 ACPI_TRACE_AML_REGION
1258} acpi_trace_event_type;
1259
1249/* Definitions of _OSI support */ 1260/* Definitions of _OSI support */
1250 1261
1251#define ACPI_VENDOR_STRINGS 0x01 1262#define ACPI_VENDOR_STRINGS 0x01
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 3cedd43943f4..ec00e2bb029e 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -70,13 +70,14 @@
70 70
71#ifdef ACPI_ASL_COMPILER 71#ifdef ACPI_ASL_COMPILER
72#define ACPI_APPLICATION 72#define ACPI_APPLICATION
73#define ACPI_DISASSEMBLER
74#define ACPI_DEBUG_OUTPUT 73#define ACPI_DEBUG_OUTPUT
75#define ACPI_CONSTANT_EVAL_ONLY 74#define ACPI_CONSTANT_EVAL_ONLY
76#define ACPI_LARGE_NAMESPACE_NODE 75#define ACPI_LARGE_NAMESPACE_NODE
77#define ACPI_DATA_TABLE_DISASSEMBLY 76#define ACPI_DATA_TABLE_DISASSEMBLY
78#define ACPI_SINGLE_THREADED 77#define ACPI_SINGLE_THREADED
79#define ACPI_32BIT_PHYSICAL_ADDRESS 78#define ACPI_32BIT_PHYSICAL_ADDRESS
79
80#define ACPI_DISASSEMBLER 1
80#endif 81#endif
81 82
82/* acpi_exec configuration. Multithreaded with full AML debugger */ 83/* acpi_exec configuration. Multithreaded with full AML debugger */
@@ -89,8 +90,8 @@
89#endif 90#endif
90 91
91/* 92/*
92 * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example configuration. 93 * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example
93 * All single threaded. 94 * configuration. All single threaded.
94 */ 95 */
95#if (defined ACPI_BIN_APP) || \ 96#if (defined ACPI_BIN_APP) || \
96 (defined ACPI_DUMP_APP) || \ 97 (defined ACPI_DUMP_APP) || \
@@ -123,7 +124,7 @@
123#define ACPI_USE_NATIVE_RSDP_POINTER 124#define ACPI_USE_NATIVE_RSDP_POINTER
124#endif 125#endif
125 126
126/* acpi_dump configuration. Native mapping used if provied by OSPMs */ 127/* acpi_dump configuration. Native mapping used if provided by the host */
127 128
128#ifdef ACPI_DUMP_APP 129#ifdef ACPI_DUMP_APP
129#define ACPI_USE_NATIVE_MEMORY_MAPPING 130#define ACPI_USE_NATIVE_MEMORY_MAPPING
@@ -151,12 +152,12 @@
151#define ACPI_USE_LOCAL_CACHE 152#define ACPI_USE_LOCAL_CACHE
152#endif 153#endif
153 154
154/* Common debug support */ 155/* Common debug/disassembler support */
155 156
156#ifdef ACPI_FULL_DEBUG 157#ifdef ACPI_FULL_DEBUG
157#define ACPI_DEBUGGER
158#define ACPI_DEBUG_OUTPUT 158#define ACPI_DEBUG_OUTPUT
159#define ACPI_DISASSEMBLER 159#define ACPI_DEBUGGER 1
160#define ACPI_DISASSEMBLER 1
160#endif 161#endif
161 162
162 163
@@ -323,8 +324,8 @@
323 * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and 324 * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and
324 * the standard header files may be used. 325 * the standard header files may be used.
325 * 326 *
326 * The ACPICA subsystem only uses low level C library functions that do not call 327 * The ACPICA subsystem only uses low level C library functions that do not
327 * operating system services and may therefore be inlined in the code. 328 * call operating system services and may therefore be inlined in the code.
328 * 329 *
329 * It may be necessary to tailor these include files to the target 330 * It may be necessary to tailor these include files to the target
330 * generation environment. 331 * generation environment.
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 0a7dc8e583b1..2f296cb5f7e2 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -56,6 +56,9 @@
56#if defined(_LINUX) || defined(__linux__) 56#if defined(_LINUX) || defined(__linux__)
57#include <acpi/platform/aclinuxex.h> 57#include <acpi/platform/aclinuxex.h>
58 58
59#elif defined(WIN32)
60#include "acwinex.h"
61
59#elif defined(_AED_EFI) 62#elif defined(_AED_EFI)
60#include "acefiex.h" 63#include "acefiex.h"
61 64
diff --git a/include/acpi/platform/acmsvcex.h b/include/acpi/platform/acmsvcex.h
new file mode 100644
index 000000000000..b64797488775
--- /dev/null
+++ b/include/acpi/platform/acmsvcex.h
@@ -0,0 +1,54 @@
1/******************************************************************************
2 *
3 * Name: acmsvcex.h - Extra VC specific defines, etc.
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACMSVCEX_H__
45#define __ACMSVCEX_H__
46
47/* Debug support. */
48
49#ifdef _DEBUG
50#define _CRTDBG_MAP_ALLOC /* Enables specific file/lineno for leaks */
51#include <crtdbg.h>
52#endif
53
54#endif /* __ACMSVCEX_H__ */
diff --git a/include/acpi/platform/acwinex.h b/include/acpi/platform/acwinex.h
new file mode 100644
index 000000000000..6ed1d713509b
--- /dev/null
+++ b/include/acpi/platform/acwinex.h
@@ -0,0 +1,49 @@
1/******************************************************************************
2 *
3 * Name: acwinex.h - Extra OS specific defines, etc.
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2015, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#ifndef __ACWINEX_H__
45#define __ACWINEX_H__
46
47/* Windows uses VC */
48
49#endif /* __ACWINEX_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 4188a4d3b597..ff5f135f16b1 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -228,10 +228,7 @@ extern int acpi_processor_preregister_performance(struct
228 228
229extern int acpi_processor_register_performance(struct acpi_processor_performance 229extern int acpi_processor_register_performance(struct acpi_processor_performance
230 *performance, unsigned int cpu); 230 *performance, unsigned int cpu);
231extern void acpi_processor_unregister_performance(struct 231extern void acpi_processor_unregister_performance(unsigned int cpu);
232 acpi_processor_performance
233 *performance,
234 unsigned int cpu);
235 232
236/* note: this locks both the calling module and the processor module 233/* note: this locks both the calling module and the processor module
237 if a _PPC object exists, rmmod is disallowed then */ 234 if a _PPC object exists, rmmod is disallowed then */
@@ -318,6 +315,7 @@ int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
318void acpi_processor_set_pdc(acpi_handle handle); 315void acpi_processor_set_pdc(acpi_handle handle);
319 316
320/* in processor_throttling.c */ 317/* in processor_throttling.c */
318#ifdef CONFIG_ACPI_CPU_FREQ_PSS
321int acpi_processor_tstate_has_changed(struct acpi_processor *pr); 319int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
322int acpi_processor_get_throttling_info(struct acpi_processor *pr); 320int acpi_processor_get_throttling_info(struct acpi_processor *pr);
323extern int acpi_processor_set_throttling(struct acpi_processor *pr, 321extern int acpi_processor_set_throttling(struct acpi_processor *pr,
@@ -330,14 +328,59 @@ extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
330 unsigned long action); 328 unsigned long action);
331extern const struct file_operations acpi_processor_throttling_fops; 329extern const struct file_operations acpi_processor_throttling_fops;
332extern void acpi_processor_throttling_init(void); 330extern void acpi_processor_throttling_init(void);
331#else
332static inline int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
333{
334 return 0;
335}
336
337static inline int acpi_processor_get_throttling_info(struct acpi_processor *pr)
338{
339 return -ENODEV;
340}
341
342static inline int acpi_processor_set_throttling(struct acpi_processor *pr,
343 int state, bool force)
344{
345 return -ENODEV;
346}
347
348static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
349 unsigned long action) {}
350
351static inline void acpi_processor_throttling_init(void) {}
352#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
353
333/* in processor_idle.c */ 354/* in processor_idle.c */
355extern struct cpuidle_driver acpi_idle_driver;
356#ifdef CONFIG_ACPI_PROCESSOR_IDLE
334int acpi_processor_power_init(struct acpi_processor *pr); 357int acpi_processor_power_init(struct acpi_processor *pr);
335int acpi_processor_power_exit(struct acpi_processor *pr); 358int acpi_processor_power_exit(struct acpi_processor *pr);
336int acpi_processor_cst_has_changed(struct acpi_processor *pr); 359int acpi_processor_cst_has_changed(struct acpi_processor *pr);
337int acpi_processor_hotplug(struct acpi_processor *pr); 360int acpi_processor_hotplug(struct acpi_processor *pr);
338extern struct cpuidle_driver acpi_idle_driver; 361#else
362static inline int acpi_processor_power_init(struct acpi_processor *pr)
363{
364 return -ENODEV;
365}
366
367static inline int acpi_processor_power_exit(struct acpi_processor *pr)
368{
369 return -ENODEV;
370}
371
372static inline int acpi_processor_cst_has_changed(struct acpi_processor *pr)
373{
374 return -ENODEV;
375}
339 376
340#ifdef CONFIG_PM_SLEEP 377static inline int acpi_processor_hotplug(struct acpi_processor *pr)
378{
379 return -ENODEV;
380}
381#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
382
383#if defined(CONFIG_PM_SLEEP) & defined(CONFIG_ACPI_PROCESSOR_IDLE)
341void acpi_processor_syscore_init(void); 384void acpi_processor_syscore_init(void);
342void acpi_processor_syscore_exit(void); 385void acpi_processor_syscore_exit(void);
343#else 386#else
@@ -348,7 +391,7 @@ static inline void acpi_processor_syscore_exit(void) {}
348/* in processor_thermal.c */ 391/* in processor_thermal.c */
349int acpi_processor_get_limit_info(struct acpi_processor *pr); 392int acpi_processor_get_limit_info(struct acpi_processor *pr);
350extern const struct thermal_cooling_device_ops processor_cooling_ops; 393extern const struct thermal_cooling_device_ops processor_cooling_ops;
351#ifdef CONFIG_CPU_FREQ 394#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
352void acpi_thermal_cpufreq_init(void); 395void acpi_thermal_cpufreq_init(void);
353void acpi_thermal_cpufreq_exit(void); 396void acpi_thermal_cpufreq_exit(void);
354#else 397#else
@@ -360,6 +403,6 @@ static inline void acpi_thermal_cpufreq_exit(void)
360{ 403{
361 return; 404 return;
362} 405}
363#endif 406#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
364 407
365#endif 408#endif
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index b7babf0206b8..a94cbebbc33d 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -23,236 +23,159 @@
23typedef atomic64_t atomic_long_t; 23typedef atomic64_t atomic_long_t;
24 24
25#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) 25#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
26#define ATOMIC_LONG_PFX(x) atomic64 ## x
26 27
27static inline long atomic_long_read(atomic_long_t *l) 28#else
28{
29 atomic64_t *v = (atomic64_t *)l;
30
31 return (long)atomic64_read(v);
32}
33
34static inline void atomic_long_set(atomic_long_t *l, long i)
35{
36 atomic64_t *v = (atomic64_t *)l;
37
38 atomic64_set(v, i);
39}
40
41static inline void atomic_long_inc(atomic_long_t *l)
42{
43 atomic64_t *v = (atomic64_t *)l;
44
45 atomic64_inc(v);
46}
47
48static inline void atomic_long_dec(atomic_long_t *l)
49{
50 atomic64_t *v = (atomic64_t *)l;
51
52 atomic64_dec(v);
53}
54
55static inline void atomic_long_add(long i, atomic_long_t *l)
56{
57 atomic64_t *v = (atomic64_t *)l;
58
59 atomic64_add(i, v);
60}
61
62static inline void atomic_long_sub(long i, atomic_long_t *l)
63{
64 atomic64_t *v = (atomic64_t *)l;
65
66 atomic64_sub(i, v);
67}
68
69static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
70{
71 atomic64_t *v = (atomic64_t *)l;
72
73 return atomic64_sub_and_test(i, v);
74}
75
76static inline int atomic_long_dec_and_test(atomic_long_t *l)
77{
78 atomic64_t *v = (atomic64_t *)l;
79
80 return atomic64_dec_and_test(v);
81}
82
83static inline int atomic_long_inc_and_test(atomic_long_t *l)
84{
85 atomic64_t *v = (atomic64_t *)l;
86
87 return atomic64_inc_and_test(v);
88}
89
90static inline int atomic_long_add_negative(long i, atomic_long_t *l)
91{
92 atomic64_t *v = (atomic64_t *)l;
93
94 return atomic64_add_negative(i, v);
95}
96
97static inline long atomic_long_add_return(long i, atomic_long_t *l)
98{
99 atomic64_t *v = (atomic64_t *)l;
100
101 return (long)atomic64_add_return(i, v);
102}
103
104static inline long atomic_long_sub_return(long i, atomic_long_t *l)
105{
106 atomic64_t *v = (atomic64_t *)l;
107
108 return (long)atomic64_sub_return(i, v);
109}
110
111static inline long atomic_long_inc_return(atomic_long_t *l)
112{
113 atomic64_t *v = (atomic64_t *)l;
114
115 return (long)atomic64_inc_return(v);
116}
117
118static inline long atomic_long_dec_return(atomic_long_t *l)
119{
120 atomic64_t *v = (atomic64_t *)l;
121
122 return (long)atomic64_dec_return(v);
123}
124
125static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
126{
127 atomic64_t *v = (atomic64_t *)l;
128
129 return (long)atomic64_add_unless(v, a, u);
130}
131
132#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
133
134#define atomic_long_cmpxchg(l, old, new) \
135 (atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
136#define atomic_long_xchg(v, new) \
137 (atomic64_xchg((atomic64_t *)(v), (new)))
138
139#else /* BITS_PER_LONG == 64 */
140 29
141typedef atomic_t atomic_long_t; 30typedef atomic_t atomic_long_t;
142 31
143#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) 32#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
144static inline long atomic_long_read(atomic_long_t *l) 33#define ATOMIC_LONG_PFX(x) atomic ## x
145{ 34
146 atomic_t *v = (atomic_t *)l; 35#endif
147 36
148 return (long)atomic_read(v); 37#define ATOMIC_LONG_READ_OP(mo) \
149} 38static inline long atomic_long_read##mo(atomic_long_t *l) \
150 39{ \
151static inline void atomic_long_set(atomic_long_t *l, long i) 40 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
152{ 41 \
153 atomic_t *v = (atomic_t *)l; 42 return (long)ATOMIC_LONG_PFX(_read##mo)(v); \
154 43}
155 atomic_set(v, i); 44ATOMIC_LONG_READ_OP()
156} 45ATOMIC_LONG_READ_OP(_acquire)
46
47#undef ATOMIC_LONG_READ_OP
48
49#define ATOMIC_LONG_SET_OP(mo) \
50static inline void atomic_long_set##mo(atomic_long_t *l, long i) \
51{ \
52 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
53 \
54 ATOMIC_LONG_PFX(_set##mo)(v, i); \
55}
56ATOMIC_LONG_SET_OP()
57ATOMIC_LONG_SET_OP(_release)
58
59#undef ATOMIC_LONG_SET_OP
60
61#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \
62static inline long \
63atomic_long_##op##_return##mo(long i, atomic_long_t *l) \
64{ \
65 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
66 \
67 return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \
68}
69ATOMIC_LONG_ADD_SUB_OP(add,)
70ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
71ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
72ATOMIC_LONG_ADD_SUB_OP(add, _release)
73ATOMIC_LONG_ADD_SUB_OP(sub,)
74ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
75ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
76ATOMIC_LONG_ADD_SUB_OP(sub, _release)
77
78#undef ATOMIC_LONG_ADD_SUB_OP
79
80#define atomic_long_cmpxchg_relaxed(l, old, new) \
81 (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
82 (old), (new)))
83#define atomic_long_cmpxchg_acquire(l, old, new) \
84 (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
85 (old), (new)))
86#define atomic_long_cmpxchg_release(l, old, new) \
87 (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
88 (old), (new)))
89#define atomic_long_cmpxchg(l, old, new) \
90 (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
91
92#define atomic_long_xchg_relaxed(v, new) \
93 (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
94#define atomic_long_xchg_acquire(v, new) \
95 (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
96#define atomic_long_xchg_release(v, new) \
97 (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
98#define atomic_long_xchg(v, new) \
99 (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
157 100
158static inline void atomic_long_inc(atomic_long_t *l) 101static inline void atomic_long_inc(atomic_long_t *l)
159{ 102{
160 atomic_t *v = (atomic_t *)l; 103 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
161 104
162 atomic_inc(v); 105 ATOMIC_LONG_PFX(_inc)(v);
163} 106}
164 107
165static inline void atomic_long_dec(atomic_long_t *l) 108static inline void atomic_long_dec(atomic_long_t *l)
166{ 109{
167 atomic_t *v = (atomic_t *)l; 110 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
168 111
169 atomic_dec(v); 112 ATOMIC_LONG_PFX(_dec)(v);
170} 113}
171 114
172static inline void atomic_long_add(long i, atomic_long_t *l) 115static inline void atomic_long_add(long i, atomic_long_t *l)
173{ 116{
174 atomic_t *v = (atomic_t *)l; 117 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
175 118
176 atomic_add(i, v); 119 ATOMIC_LONG_PFX(_add)(i, v);
177} 120}
178 121
179static inline void atomic_long_sub(long i, atomic_long_t *l) 122static inline void atomic_long_sub(long i, atomic_long_t *l)
180{ 123{
181 atomic_t *v = (atomic_t *)l; 124 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
182 125
183 atomic_sub(i, v); 126 ATOMIC_LONG_PFX(_sub)(i, v);
184} 127}
185 128
186static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) 129static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
187{ 130{
188 atomic_t *v = (atomic_t *)l; 131 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
189 132
190 return atomic_sub_and_test(i, v); 133 return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
191} 134}
192 135
193static inline int atomic_long_dec_and_test(atomic_long_t *l) 136static inline int atomic_long_dec_and_test(atomic_long_t *l)
194{ 137{
195 atomic_t *v = (atomic_t *)l; 138 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
196 139
197 return atomic_dec_and_test(v); 140 return ATOMIC_LONG_PFX(_dec_and_test)(v);
198} 141}
199 142
200static inline int atomic_long_inc_and_test(atomic_long_t *l) 143static inline int atomic_long_inc_and_test(atomic_long_t *l)
201{ 144{
202 atomic_t *v = (atomic_t *)l; 145 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
203 146
204 return atomic_inc_and_test(v); 147 return ATOMIC_LONG_PFX(_inc_and_test)(v);
205} 148}
206 149
207static inline int atomic_long_add_negative(long i, atomic_long_t *l) 150static inline int atomic_long_add_negative(long i, atomic_long_t *l)
208{ 151{
209 atomic_t *v = (atomic_t *)l; 152 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
210 153
211 return atomic_add_negative(i, v); 154 return ATOMIC_LONG_PFX(_add_negative)(i, v);
212}
213
214static inline long atomic_long_add_return(long i, atomic_long_t *l)
215{
216 atomic_t *v = (atomic_t *)l;
217
218 return (long)atomic_add_return(i, v);
219}
220
221static inline long atomic_long_sub_return(long i, atomic_long_t *l)
222{
223 atomic_t *v = (atomic_t *)l;
224
225 return (long)atomic_sub_return(i, v);
226} 155}
227 156
228static inline long atomic_long_inc_return(atomic_long_t *l) 157static inline long atomic_long_inc_return(atomic_long_t *l)
229{ 158{
230 atomic_t *v = (atomic_t *)l; 159 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
231 160
232 return (long)atomic_inc_return(v); 161 return (long)ATOMIC_LONG_PFX(_inc_return)(v);
233} 162}
234 163
235static inline long atomic_long_dec_return(atomic_long_t *l) 164static inline long atomic_long_dec_return(atomic_long_t *l)
236{ 165{
237 atomic_t *v = (atomic_t *)l; 166 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
238 167
239 return (long)atomic_dec_return(v); 168 return (long)ATOMIC_LONG_PFX(_dec_return)(v);
240} 169}
241 170
242static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) 171static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
243{ 172{
244 atomic_t *v = (atomic_t *)l; 173 ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
245 174
246 return (long)atomic_add_unless(v, a, u); 175 return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
247} 176}
248 177
249#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) 178#define atomic_long_inc_not_zero(l) \
250 179 ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
251#define atomic_long_cmpxchg(l, old, new) \
252 (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
253#define atomic_long_xchg(v, new) \
254 (atomic_xchg((atomic_t *)(v), (new)))
255
256#endif /* BITS_PER_LONG == 64 */
257 180
258#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ 181#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 1973ad2b13f4..d4d7e337fdcb 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -98,15 +98,16 @@ ATOMIC_OP_RETURN(add, +)
98ATOMIC_OP_RETURN(sub, -) 98ATOMIC_OP_RETURN(sub, -)
99#endif 99#endif
100 100
101#ifndef atomic_clear_mask 101#ifndef atomic_and
102ATOMIC_OP(and, &) 102ATOMIC_OP(and, &)
103#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
104#endif 103#endif
105 104
106#ifndef atomic_set_mask 105#ifndef atomic_or
107#define CONFIG_ARCH_HAS_ATOMIC_OR
108ATOMIC_OP(or, |) 106ATOMIC_OP(or, |)
109#define atomic_set_mask(i, v) atomic_or((i), (v)) 107#endif
108
109#ifndef atomic_xor
110ATOMIC_OP(xor, ^)
110#endif 111#endif
111 112
112#undef ATOMIC_OP_RETURN 113#undef ATOMIC_OP_RETURN
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 30ad9c86cebb..d48e78ccad3d 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -32,6 +32,10 @@ extern long long atomic64_##op##_return(long long a, atomic64_t *v);
32ATOMIC64_OPS(add) 32ATOMIC64_OPS(add)
33ATOMIC64_OPS(sub) 33ATOMIC64_OPS(sub)
34 34
35ATOMIC64_OP(and)
36ATOMIC64_OP(or)
37ATOMIC64_OP(xor)
38
35#undef ATOMIC64_OPS 39#undef ATOMIC64_OPS
36#undef ATOMIC64_OP_RETURN 40#undef ATOMIC64_OP_RETURN
37#undef ATOMIC64_OP 41#undef ATOMIC64_OP
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 55e3abc2d027..b42afada1280 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -108,12 +108,12 @@
108do { \ 108do { \
109 compiletime_assert_atomic_type(*p); \ 109 compiletime_assert_atomic_type(*p); \
110 smp_mb(); \ 110 smp_mb(); \
111 ACCESS_ONCE(*p) = (v); \ 111 WRITE_ONCE(*p, v); \
112} while (0) 112} while (0)
113 113
114#define smp_load_acquire(p) \ 114#define smp_load_acquire(p) \
115({ \ 115({ \
116 typeof(*p) ___p1 = ACCESS_ONCE(*p); \ 116 typeof(*p) ___p1 = READ_ONCE(*p); \
117 compiletime_assert_atomic_type(*p); \ 117 compiletime_assert_atomic_type(*p); \
118 smp_mb(); \ 118 smp_mb(); \
119 ___p1; \ 119 ___p1; \
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 940d5ec122c9..b1bc954eccf3 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -6,6 +6,7 @@
6#include <linux/scatterlist.h> 6#include <linux/scatterlist.h>
7#include <linux/dma-debug.h> 7#include <linux/dma-debug.h>
8#include <linux/dma-attrs.h> 8#include <linux/dma-attrs.h>
9#include <asm-generic/dma-coherent.h>
9 10
10static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 11static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
11 size_t size, 12 size_t size,
@@ -237,4 +238,121 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
237 238
238#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) 239#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
239 240
241#ifndef arch_dma_alloc_attrs
242#define arch_dma_alloc_attrs(dev, flag) (true)
243#endif
244
245static inline void *dma_alloc_attrs(struct device *dev, size_t size,
246 dma_addr_t *dma_handle, gfp_t flag,
247 struct dma_attrs *attrs)
248{
249 struct dma_map_ops *ops = get_dma_ops(dev);
250 void *cpu_addr;
251
252 BUG_ON(!ops);
253
254 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
255 return cpu_addr;
256
257 if (!arch_dma_alloc_attrs(&dev, &flag))
258 return NULL;
259 if (!ops->alloc)
260 return NULL;
261
262 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
263 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
264 return cpu_addr;
265}
266
267static inline void dma_free_attrs(struct device *dev, size_t size,
268 void *cpu_addr, dma_addr_t dma_handle,
269 struct dma_attrs *attrs)
270{
271 struct dma_map_ops *ops = get_dma_ops(dev);
272
273 BUG_ON(!ops);
274 WARN_ON(irqs_disabled());
275
276 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
277 return;
278
279 if (!ops->free)
280 return;
281
282 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
283 ops->free(dev, size, cpu_addr, dma_handle, attrs);
284}
285
286static inline void *dma_alloc_coherent(struct device *dev, size_t size,
287 dma_addr_t *dma_handle, gfp_t flag)
288{
289 return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
290}
291
292static inline void dma_free_coherent(struct device *dev, size_t size,
293 void *cpu_addr, dma_addr_t dma_handle)
294{
295 return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
296}
297
298static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
299 dma_addr_t *dma_handle, gfp_t gfp)
300{
301 DEFINE_DMA_ATTRS(attrs);
302
303 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
304 return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
305}
306
307static inline void dma_free_noncoherent(struct device *dev, size_t size,
308 void *cpu_addr, dma_addr_t dma_handle)
309{
310 DEFINE_DMA_ATTRS(attrs);
311
312 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
313 dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
314}
315
316static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
317{
318 debug_dma_mapping_error(dev, dma_addr);
319
320 if (get_dma_ops(dev)->mapping_error)
321 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
322
323#ifdef DMA_ERROR_CODE
324 return dma_addr == DMA_ERROR_CODE;
325#else
326 return 0;
327#endif
328}
329
330#ifndef HAVE_ARCH_DMA_SUPPORTED
331static inline int dma_supported(struct device *dev, u64 mask)
332{
333 struct dma_map_ops *ops = get_dma_ops(dev);
334
335 if (!ops)
336 return 0;
337 if (!ops->dma_supported)
338 return 1;
339 return ops->dma_supported(dev, mask);
340}
341#endif
342
343#ifndef HAVE_ARCH_DMA_SET_MASK
344static inline int dma_set_mask(struct device *dev, u64 mask)
345{
346 struct dma_map_ops *ops = get_dma_ops(dev);
347
348 if (ops->set_dma_mask)
349 return ops->set_dma_mask(dev, mask);
350
351 if (!dev->dma_mask || !dma_supported(dev, mask))
352 return -EIO;
353 *dev->dma_mask = mask;
354 return 0;
355}
356#endif
357
240#endif 358#endif
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index a5de55c04fb2..734ad4db388c 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -11,6 +11,8 @@ extern void __iomem *early_ioremap(resource_size_t phys_addr,
11 unsigned long size); 11 unsigned long size);
12extern void *early_memremap(resource_size_t phys_addr, 12extern void *early_memremap(resource_size_t phys_addr,
13 unsigned long size); 13 unsigned long size);
14extern void *early_memremap_ro(resource_size_t phys_addr,
15 unsigned long size);
14extern void early_iounmap(void __iomem *addr, unsigned long size); 16extern void early_iounmap(void __iomem *addr, unsigned long size);
15extern void early_memunmap(void *addr, unsigned long size); 17extern void early_memunmap(void *addr, unsigned long size);
16 18
@@ -33,6 +35,12 @@ extern void early_ioremap_setup(void);
33 */ 35 */
34extern void early_ioremap_reset(void); 36extern void early_ioremap_reset(void);
35 37
38/*
39 * Early copy from unmapped memory to kernel mapped memory.
40 */
41extern void copy_from_early_mem(void *dest, phys_addr_t src,
42 unsigned long size);
43
36#else 44#else
37static inline void early_ioremap_init(void) { } 45static inline void early_ioremap_init(void) { }
38static inline void early_ioremap_setup(void) { } 46static inline void early_ioremap_setup(void) { }
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index f23174fb9ec4..1cbb8338edf3 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -46,6 +46,9 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
46#ifndef FIXMAP_PAGE_NORMAL 46#ifndef FIXMAP_PAGE_NORMAL
47#define FIXMAP_PAGE_NORMAL PAGE_KERNEL 47#define FIXMAP_PAGE_NORMAL PAGE_KERNEL
48#endif 48#endif
49#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO)
50#define FIXMAP_PAGE_RO PAGE_KERNEL_RO
51#endif
49#ifndef FIXMAP_PAGE_NOCACHE 52#ifndef FIXMAP_PAGE_NOCACHE
50#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE 53#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
51#endif 54#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index f56094cfdeff..eed3bbe88c8a 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -736,6 +736,35 @@ static inline void *phys_to_virt(unsigned long address)
736} 736}
737#endif 737#endif
738 738
739/**
740 * DOC: ioremap() and ioremap_*() variants
741 *
742 * If you have an IOMMU your architecture is expected to have both ioremap()
743 * and iounmap() implemented otherwise the asm-generic helpers will provide a
744 * direct mapping.
745 *
746 * There are ioremap_*() call variants, if you have no IOMMU we naturally will
747 * default to direct mapping for all of them, you can override these defaults.
748 * If you have an IOMMU you are highly encouraged to provide your own
749 * ioremap variant implementation as there currently is no safe architecture
750 * agnostic default. To avoid possible improper behaviour default asm-generic
751 * ioremap_*() variants all return NULL when an IOMMU is available. If you've
752 * defined your own ioremap_*() variant you must then declare your own
753 * ioremap_*() variant as defined to itself to avoid the default NULL return.
754 */
755
756#ifdef CONFIG_MMU
757
758#ifndef ioremap_uc
759#define ioremap_uc ioremap_uc
760static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
761{
762 return NULL;
763}
764#endif
765
766#else /* !CONFIG_MMU */
767
739/* 768/*
740 * Change "struct page" to physical address. 769 * Change "struct page" to physical address.
741 * 770 *
@@ -743,7 +772,6 @@ static inline void *phys_to_virt(unsigned long address)
743 * you'll need to provide your own definitions. 772 * you'll need to provide your own definitions.
744 */ 773 */
745 774
746#ifndef CONFIG_MMU
747#ifndef ioremap 775#ifndef ioremap
748#define ioremap ioremap 776#define ioremap ioremap
749static inline void __iomem *ioremap(phys_addr_t offset, size_t size) 777static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 14909b0b9cae..f20f407ce45d 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -69,6 +69,12 @@
69}) 69})
70#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ 70#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
71 71
72/*
73 * Convert a physical address to a Page Frame Number and back
74 */
75#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
76#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
77
72#define page_to_pfn __page_to_pfn 78#define page_to_pfn __page_to_pfn
73#define pfn_to_page __pfn_to_page 79#define pfn_to_page __pfn_to_page
74 80
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index 7389c87116a0..b1e17fcee2d0 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -15,9 +15,13 @@ struct pci_dev;
15#ifdef CONFIG_PCI 15#ifdef CONFIG_PCI
16/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ 16/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
17extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 17extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
18extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max);
18extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, 19extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
19 unsigned long offset, 20 unsigned long offset,
20 unsigned long maxlen); 21 unsigned long maxlen);
22extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
23 unsigned long offset,
24 unsigned long maxlen);
21/* Create a virtual mapping cookie for a port on a given PCI device. 25/* Create a virtual mapping cookie for a port on a given PCI device.
22 * Do not call this directly, it exists to make it easier for architectures 26 * Do not call this directly, it exists to make it easier for architectures
23 * to override */ 27 * to override */
@@ -34,12 +38,22 @@ static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned lon
34 return NULL; 38 return NULL;
35} 39}
36 40
41static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max)
42{
43 return NULL;
44}
37static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, 45static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
38 unsigned long offset, 46 unsigned long offset,
39 unsigned long maxlen) 47 unsigned long maxlen)
40{ 48{
41 return NULL; 49 return NULL;
42} 50}
51static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
52 unsigned long offset,
53 unsigned long maxlen)
54{
55 return NULL;
56}
43#endif 57#endif
44 58
45#endif /* __ASM_GENERIC_IO_H */ 59#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index d0a7a4753db2..0bec580a4885 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
71/* 71/*
72 * Returns true when we need to resched and can (barring IRQ state). 72 * Returns true when we need to resched and can (barring IRQ state).
73 */ 73 */
74static __always_inline bool should_resched(void) 74static __always_inline bool should_resched(int preempt_offset)
75{ 75{
76 return unlikely(!preempt_count() && tif_need_resched()); 76 return unlikely(preempt_count() == preempt_offset &&
77 tif_need_resched());
77} 78}
78 79
79#ifdef CONFIG_PREEMPT 80#ifdef CONFIG_PREEMPT
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 6383d54bf983..54a8e65e18b6 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -36,39 +36,39 @@
36/* 36/*
37 * External function declarations 37 * External function declarations
38 */ 38 */
39extern void queue_read_lock_slowpath(struct qrwlock *lock); 39extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
40extern void queue_write_lock_slowpath(struct qrwlock *lock); 40extern void queued_write_lock_slowpath(struct qrwlock *lock);
41 41
42/** 42/**
43 * queue_read_can_lock- would read_trylock() succeed? 43 * queued_read_can_lock- would read_trylock() succeed?
44 * @lock: Pointer to queue rwlock structure 44 * @lock: Pointer to queue rwlock structure
45 */ 45 */
46static inline int queue_read_can_lock(struct qrwlock *lock) 46static inline int queued_read_can_lock(struct qrwlock *lock)
47{ 47{
48 return !(atomic_read(&lock->cnts) & _QW_WMASK); 48 return !(atomic_read(&lock->cnts) & _QW_WMASK);
49} 49}
50 50
51/** 51/**
52 * queue_write_can_lock- would write_trylock() succeed? 52 * queued_write_can_lock- would write_trylock() succeed?
53 * @lock: Pointer to queue rwlock structure 53 * @lock: Pointer to queue rwlock structure
54 */ 54 */
55static inline int queue_write_can_lock(struct qrwlock *lock) 55static inline int queued_write_can_lock(struct qrwlock *lock)
56{ 56{
57 return !atomic_read(&lock->cnts); 57 return !atomic_read(&lock->cnts);
58} 58}
59 59
60/** 60/**
61 * queue_read_trylock - try to acquire read lock of a queue rwlock 61 * queued_read_trylock - try to acquire read lock of a queue rwlock
62 * @lock : Pointer to queue rwlock structure 62 * @lock : Pointer to queue rwlock structure
63 * Return: 1 if lock acquired, 0 if failed 63 * Return: 1 if lock acquired, 0 if failed
64 */ 64 */
65static inline int queue_read_trylock(struct qrwlock *lock) 65static inline int queued_read_trylock(struct qrwlock *lock)
66{ 66{
67 u32 cnts; 67 u32 cnts;
68 68
69 cnts = atomic_read(&lock->cnts); 69 cnts = atomic_read(&lock->cnts);
70 if (likely(!(cnts & _QW_WMASK))) { 70 if (likely(!(cnts & _QW_WMASK))) {
71 cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts); 71 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
72 if (likely(!(cnts & _QW_WMASK))) 72 if (likely(!(cnts & _QW_WMASK)))
73 return 1; 73 return 1;
74 atomic_sub(_QR_BIAS, &lock->cnts); 74 atomic_sub(_QR_BIAS, &lock->cnts);
@@ -77,11 +77,11 @@ static inline int queue_read_trylock(struct qrwlock *lock)
77} 77}
78 78
79/** 79/**
80 * queue_write_trylock - try to acquire write lock of a queue rwlock 80 * queued_write_trylock - try to acquire write lock of a queue rwlock
81 * @lock : Pointer to queue rwlock structure 81 * @lock : Pointer to queue rwlock structure
82 * Return: 1 if lock acquired, 0 if failed 82 * Return: 1 if lock acquired, 0 if failed
83 */ 83 */
84static inline int queue_write_trylock(struct qrwlock *lock) 84static inline int queued_write_trylock(struct qrwlock *lock)
85{ 85{
86 u32 cnts; 86 u32 cnts;
87 87
@@ -89,78 +89,70 @@ static inline int queue_write_trylock(struct qrwlock *lock)
89 if (unlikely(cnts)) 89 if (unlikely(cnts))
90 return 0; 90 return 0;
91 91
92 return likely(atomic_cmpxchg(&lock->cnts, 92 return likely(atomic_cmpxchg_acquire(&lock->cnts,
93 cnts, cnts | _QW_LOCKED) == cnts); 93 cnts, cnts | _QW_LOCKED) == cnts);
94} 94}
95/** 95/**
96 * queue_read_lock - acquire read lock of a queue rwlock 96 * queued_read_lock - acquire read lock of a queue rwlock
97 * @lock: Pointer to queue rwlock structure 97 * @lock: Pointer to queue rwlock structure
98 */ 98 */
99static inline void queue_read_lock(struct qrwlock *lock) 99static inline void queued_read_lock(struct qrwlock *lock)
100{ 100{
101 u32 cnts; 101 u32 cnts;
102 102
103 cnts = atomic_add_return(_QR_BIAS, &lock->cnts); 103 cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
104 if (likely(!(cnts & _QW_WMASK))) 104 if (likely(!(cnts & _QW_WMASK)))
105 return; 105 return;
106 106
107 /* The slowpath will decrement the reader count, if necessary. */ 107 /* The slowpath will decrement the reader count, if necessary. */
108 queue_read_lock_slowpath(lock); 108 queued_read_lock_slowpath(lock, cnts);
109} 109}
110 110
111/** 111/**
112 * queue_write_lock - acquire write lock of a queue rwlock 112 * queued_write_lock - acquire write lock of a queue rwlock
113 * @lock : Pointer to queue rwlock structure 113 * @lock : Pointer to queue rwlock structure
114 */ 114 */
115static inline void queue_write_lock(struct qrwlock *lock) 115static inline void queued_write_lock(struct qrwlock *lock)
116{ 116{
117 /* Optimize for the unfair lock case where the fair flag is 0. */ 117 /* Optimize for the unfair lock case where the fair flag is 0. */
118 if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) 118 if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
119 return; 119 return;
120 120
121 queue_write_lock_slowpath(lock); 121 queued_write_lock_slowpath(lock);
122} 122}
123 123
124/** 124/**
125 * queue_read_unlock - release read lock of a queue rwlock 125 * queued_read_unlock - release read lock of a queue rwlock
126 * @lock : Pointer to queue rwlock structure 126 * @lock : Pointer to queue rwlock structure
127 */ 127 */
128static inline void queue_read_unlock(struct qrwlock *lock) 128static inline void queued_read_unlock(struct qrwlock *lock)
129{ 129{
130 /* 130 /*
131 * Atomically decrement the reader count 131 * Atomically decrement the reader count
132 */ 132 */
133 smp_mb__before_atomic(); 133 (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
134 atomic_sub(_QR_BIAS, &lock->cnts);
135} 134}
136 135
137#ifndef queue_write_unlock
138/** 136/**
139 * queue_write_unlock - release write lock of a queue rwlock 137 * queued_write_unlock - release write lock of a queue rwlock
140 * @lock : Pointer to queue rwlock structure 138 * @lock : Pointer to queue rwlock structure
141 */ 139 */
142static inline void queue_write_unlock(struct qrwlock *lock) 140static inline void queued_write_unlock(struct qrwlock *lock)
143{ 141{
144 /* 142 smp_store_release((u8 *)&lock->cnts, 0);
145 * If the writer field is atomic, it can be cleared directly.
146 * Otherwise, an atomic subtraction will be used to clear it.
147 */
148 smp_mb__before_atomic();
149 atomic_sub(_QW_LOCKED, &lock->cnts);
150} 143}
151#endif
152 144
153/* 145/*
154 * Remapping rwlock architecture specific functions to the corresponding 146 * Remapping rwlock architecture specific functions to the corresponding
155 * queue rwlock functions. 147 * queue rwlock functions.
156 */ 148 */
157#define arch_read_can_lock(l) queue_read_can_lock(l) 149#define arch_read_can_lock(l) queued_read_can_lock(l)
158#define arch_write_can_lock(l) queue_write_can_lock(l) 150#define arch_write_can_lock(l) queued_write_can_lock(l)
159#define arch_read_lock(l) queue_read_lock(l) 151#define arch_read_lock(l) queued_read_lock(l)
160#define arch_write_lock(l) queue_write_lock(l) 152#define arch_write_lock(l) queued_write_lock(l)
161#define arch_read_trylock(l) queue_read_trylock(l) 153#define arch_read_trylock(l) queued_read_trylock(l)
162#define arch_write_trylock(l) queue_write_trylock(l) 154#define arch_write_trylock(l) queued_write_trylock(l)
163#define arch_read_unlock(l) queue_read_unlock(l) 155#define arch_read_unlock(l) queued_read_unlock(l)
164#define arch_write_unlock(l) queue_write_unlock(l) 156#define arch_write_unlock(l) queued_write_unlock(l)
165 157
166#endif /* __ASM_GENERIC_QRWLOCK_H */ 158#endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index fa86f240c874..4e3b6558331e 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -16,6 +16,9 @@
16#include <linux/rtc.h> 16#include <linux/rtc.h>
17#include <linux/bcd.h> 17#include <linux/bcd.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#ifdef CONFIG_ACPI
20#include <linux/acpi.h>
21#endif
19 22
20#define RTC_PIE 0x40 /* periodic interrupt enable */ 23#define RTC_PIE 0x40 /* periodic interrupt enable */
21#define RTC_AIE 0x20 /* alarm interrupt enable */ 24#define RTC_AIE 0x20 /* alarm interrupt enable */
@@ -46,6 +49,7 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
46{ 49{
47 unsigned char ctrl; 50 unsigned char ctrl;
48 unsigned long flags; 51 unsigned long flags;
52 unsigned char century = 0;
49 53
50#ifdef CONFIG_MACH_DECSTATION 54#ifdef CONFIG_MACH_DECSTATION
51 unsigned int real_year; 55 unsigned int real_year;
@@ -79,6 +83,11 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
79#ifdef CONFIG_MACH_DECSTATION 83#ifdef CONFIG_MACH_DECSTATION
80 real_year = CMOS_READ(RTC_DEC_YEAR); 84 real_year = CMOS_READ(RTC_DEC_YEAR);
81#endif 85#endif
86#ifdef CONFIG_ACPI
87 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
88 acpi_gbl_FADT.century)
89 century = CMOS_READ(acpi_gbl_FADT.century);
90#endif
82 ctrl = CMOS_READ(RTC_CONTROL); 91 ctrl = CMOS_READ(RTC_CONTROL);
83 spin_unlock_irqrestore(&rtc_lock, flags); 92 spin_unlock_irqrestore(&rtc_lock, flags);
84 93
@@ -90,12 +99,16 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
90 time->tm_mday = bcd2bin(time->tm_mday); 99 time->tm_mday = bcd2bin(time->tm_mday);
91 time->tm_mon = bcd2bin(time->tm_mon); 100 time->tm_mon = bcd2bin(time->tm_mon);
92 time->tm_year = bcd2bin(time->tm_year); 101 time->tm_year = bcd2bin(time->tm_year);
102 century = bcd2bin(century);
93 } 103 }
94 104
95#ifdef CONFIG_MACH_DECSTATION 105#ifdef CONFIG_MACH_DECSTATION
96 time->tm_year += real_year - 72; 106 time->tm_year += real_year - 72;
97#endif 107#endif
98 108
109 if (century)
110 time->tm_year += (century - 19) * 100;
111
99 /* 112 /*
100 * Account for differences between how the RTC uses the values 113 * Account for differences between how the RTC uses the values
101 * and how they are defined in a struct rtc_time; 114 * and how they are defined in a struct rtc_time;
@@ -122,6 +135,7 @@ static inline int __set_rtc_time(struct rtc_time *time)
122#ifdef CONFIG_MACH_DECSTATION 135#ifdef CONFIG_MACH_DECSTATION
123 unsigned int real_yrs, leap_yr; 136 unsigned int real_yrs, leap_yr;
124#endif 137#endif
138 unsigned char century = 0;
125 139
126 yrs = time->tm_year; 140 yrs = time->tm_year;
127 mon = time->tm_mon + 1; /* tm_mon starts at zero */ 141 mon = time->tm_mon + 1; /* tm_mon starts at zero */
@@ -150,6 +164,15 @@ static inline int __set_rtc_time(struct rtc_time *time)
150 yrs = 73; 164 yrs = 73;
151 } 165 }
152#endif 166#endif
167
168#ifdef CONFIG_ACPI
169 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
170 acpi_gbl_FADT.century) {
171 century = (yrs + 1900) / 100;
172 yrs %= 100;
173 }
174#endif
175
153 /* These limits and adjustments are independent of 176 /* These limits and adjustments are independent of
154 * whether the chip is in binary mode or not. 177 * whether the chip is in binary mode or not.
155 */ 178 */
@@ -169,6 +192,7 @@ static inline int __set_rtc_time(struct rtc_time *time)
169 day = bin2bcd(day); 192 day = bin2bcd(day);
170 mon = bin2bcd(mon); 193 mon = bin2bcd(mon);
171 yrs = bin2bcd(yrs); 194 yrs = bin2bcd(yrs);
195 century = bin2bcd(century);
172 } 196 }
173 197
174 save_control = CMOS_READ(RTC_CONTROL); 198 save_control = CMOS_READ(RTC_CONTROL);
@@ -185,6 +209,11 @@ static inline int __set_rtc_time(struct rtc_time *time)
185 CMOS_WRITE(hrs, RTC_HOURS); 209 CMOS_WRITE(hrs, RTC_HOURS);
186 CMOS_WRITE(min, RTC_MINUTES); 210 CMOS_WRITE(min, RTC_MINUTES);
187 CMOS_WRITE(sec, RTC_SECONDS); 211 CMOS_WRITE(sec, RTC_SECONDS);
212#ifdef CONFIG_ACPI
213 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
214 acpi_gbl_FADT.century)
215 CMOS_WRITE(century, acpi_gbl_FADT.century);
216#endif
188 217
189 CMOS_WRITE(save_control, RTC_CONTROL); 218 CMOS_WRITE(save_control, RTC_CONTROL);
190 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 219 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8bd374d3cf21..1781e54ea6d3 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -412,12 +412,10 @@
412 * during second ld run in second ld pass when generating System.map */ 412 * during second ld run in second ld pass when generating System.map */
413#define TEXT_TEXT \ 413#define TEXT_TEXT \
414 ALIGN_FUNCTION(); \ 414 ALIGN_FUNCTION(); \
415 *(.text.hot) \ 415 *(.text.hot .text .text.fixup .text.unlikely) \
416 *(.text .text.fixup) \
417 *(.ref.text) \ 416 *(.ref.text) \
418 MEM_KEEP(init.text) \ 417 MEM_KEEP(init.text) \
419 MEM_KEEP(exit.text) \ 418 MEM_KEEP(exit.text) \
420 *(.text.unlikely)
421 419
422 420
423/* sched.text is aling to function alignment to secure we have same 421/* sched.text is aling to function alignment to secure we have same
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 7169ad04acc0..077cae1e6b51 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * AEAD: Authenticated Encryption with Associated Data 2 * AEAD: Authenticated Encryption with Associated Data
3 * 3 *
4 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free 7 * under the terms of the GNU General Public License as published by the Free
@@ -45,16 +45,40 @@
45 * a breach in the integrity of the message. In essence, that -EBADMSG error 45 * a breach in the integrity of the message. In essence, that -EBADMSG error
46 * code is the key bonus an AEAD cipher has over "standard" block chaining 46 * code is the key bonus an AEAD cipher has over "standard" block chaining
47 * modes. 47 * modes.
48 *
49 * Memory Structure:
50 *
51 * To support the needs of the most prominent user of AEAD ciphers, namely
52 * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere
53 * to.
54 *
55 * The scatter list pointing to the input data must contain:
56 *
57 * * for RFC4106 ciphers, the concatenation of
58 * associated authentication data || IV || plaintext or ciphertext. Note, the
59 * same IV (buffer) is also set with the aead_request_set_crypt call. Note,
60 * the API call of aead_request_set_ad must provide the length of the AAD and
61 * the IV. The API call of aead_request_set_crypt only points to the size of
62 * the input plaintext or ciphertext.
63 *
64 * * for "normal" AEAD ciphers, the concatenation of
65 * associated authentication data || plaintext or ciphertext.
66 *
67 * It is important to note that if multiple scatter gather list entries form
68 * the input data mentioned above, the first entry must not point to a NULL
69 * buffer. If there is any potential where the AAD buffer can be NULL, the
70 * calling code must contain a precaution to ensure that this does not result
71 * in the first scatter gather list entry pointing to a NULL buffer.
48 */ 72 */
49 73
74struct crypto_aead;
75
50/** 76/**
51 * struct aead_request - AEAD request 77 * struct aead_request - AEAD request
52 * @base: Common attributes for async crypto requests 78 * @base: Common attributes for async crypto requests
53 * @old: Boolean whether the old or new AEAD API is used
54 * @assoclen: Length in bytes of associated data for authentication 79 * @assoclen: Length in bytes of associated data for authentication
55 * @cryptlen: Length of data to be encrypted or decrypted 80 * @cryptlen: Length of data to be encrypted or decrypted
56 * @iv: Initialisation vector 81 * @iv: Initialisation vector
57 * @assoc: Associated data
58 * @src: Source data 82 * @src: Source data
59 * @dst: Destination data 83 * @dst: Destination data
60 * @__ctx: Start of private context data 84 * @__ctx: Start of private context data
@@ -62,14 +86,11 @@
62struct aead_request { 86struct aead_request {
63 struct crypto_async_request base; 87 struct crypto_async_request base;
64 88
65 bool old;
66
67 unsigned int assoclen; 89 unsigned int assoclen;
68 unsigned int cryptlen; 90 unsigned int cryptlen;
69 91
70 u8 *iv; 92 u8 *iv;
71 93
72 struct scatterlist *assoc;
73 struct scatterlist *src; 94 struct scatterlist *src;
74 struct scatterlist *dst; 95 struct scatterlist *dst;
75 96
@@ -77,19 +98,6 @@ struct aead_request {
77}; 98};
78 99
79/** 100/**
80 * struct aead_givcrypt_request - AEAD request with IV generation
81 * @seq: Sequence number for IV generation
82 * @giv: Space for generated IV
83 * @areq: The AEAD request itself
84 */
85struct aead_givcrypt_request {
86 u64 seq;
87 u8 *giv;
88
89 struct aead_request areq;
90};
91
92/**
93 * struct aead_alg - AEAD cipher definition 101 * struct aead_alg - AEAD cipher definition
94 * @maxauthsize: Set the maximum authentication tag size supported by the 102 * @maxauthsize: Set the maximum authentication tag size supported by the
95 * transformation. A transformation may support smaller tag sizes. 103 * transformation. A transformation may support smaller tag sizes.
@@ -141,16 +149,6 @@ struct aead_alg {
141}; 149};
142 150
143struct crypto_aead { 151struct crypto_aead {
144 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
145 unsigned int keylen);
146 int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
147 int (*encrypt)(struct aead_request *req);
148 int (*decrypt)(struct aead_request *req);
149 int (*givencrypt)(struct aead_givcrypt_request *req);
150 int (*givdecrypt)(struct aead_givcrypt_request *req);
151
152 struct crypto_aead *child;
153
154 unsigned int authsize; 152 unsigned int authsize;
155 unsigned int reqsize; 153 unsigned int reqsize;
156 154
@@ -192,16 +190,6 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
192 crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm)); 190 crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
193} 191}
194 192
195static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm)
196{
197 return tfm;
198}
199
200static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm)
201{
202 return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
203}
204
205static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) 193static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
206{ 194{
207 return container_of(crypto_aead_tfm(tfm)->__crt_alg, 195 return container_of(crypto_aead_tfm(tfm)->__crt_alg,
@@ -210,8 +198,7 @@ static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
210 198
211static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg) 199static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
212{ 200{
213 return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize : 201 return alg->ivsize;
214 alg->ivsize;
215} 202}
216 203
217/** 204/**
@@ -337,7 +324,7 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
337 */ 324 */
338static inline int crypto_aead_encrypt(struct aead_request *req) 325static inline int crypto_aead_encrypt(struct aead_request *req)
339{ 326{
340 return crypto_aead_reqtfm(req)->encrypt(req); 327 return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req);
341} 328}
342 329
343/** 330/**
@@ -364,10 +351,12 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
364 */ 351 */
365static inline int crypto_aead_decrypt(struct aead_request *req) 352static inline int crypto_aead_decrypt(struct aead_request *req)
366{ 353{
367 if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req))) 354 struct crypto_aead *aead = crypto_aead_reqtfm(req);
355
356 if (req->cryptlen < crypto_aead_authsize(aead))
368 return -EINVAL; 357 return -EINVAL;
369 358
370 return crypto_aead_reqtfm(req)->decrypt(req); 359 return crypto_aead_alg(aead)->decrypt(req);
371} 360}
372 361
373/** 362/**
@@ -387,7 +376,10 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
387 * 376 *
388 * Return: number of bytes 377 * Return: number of bytes
389 */ 378 */
390unsigned int crypto_aead_reqsize(struct crypto_aead *tfm); 379static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
380{
381 return tfm->reqsize;
382}
391 383
392/** 384/**
393 * aead_request_set_tfm() - update cipher handle reference in request 385 * aead_request_set_tfm() - update cipher handle reference in request
@@ -400,7 +392,7 @@ unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
400static inline void aead_request_set_tfm(struct aead_request *req, 392static inline void aead_request_set_tfm(struct aead_request *req,
401 struct crypto_aead *tfm) 393 struct crypto_aead *tfm)
402{ 394{
403 req->base.tfm = crypto_aead_tfm(tfm->child); 395 req->base.tfm = crypto_aead_tfm(tfm);
404} 396}
405 397
406/** 398/**
@@ -526,23 +518,6 @@ static inline void aead_request_set_crypt(struct aead_request *req,
526} 518}
527 519
528/** 520/**
529 * aead_request_set_assoc() - set the associated data scatter / gather list
530 * @req: request handle
531 * @assoc: associated data scatter / gather list
532 * @assoclen: number of bytes to process from @assoc
533 *
534 * Obsolete, do not use.
535 */
536static inline void aead_request_set_assoc(struct aead_request *req,
537 struct scatterlist *assoc,
538 unsigned int assoclen)
539{
540 req->assoc = assoc;
541 req->assoclen = assoclen;
542 req->old = true;
543}
544
545/**
546 * aead_request_set_ad - set associated data information 521 * aead_request_set_ad - set associated data information
547 * @req: request handle 522 * @req: request handle
548 * @assoclen: number of bytes in associated data 523 * @assoclen: number of bytes in associated data
@@ -554,77 +529,6 @@ static inline void aead_request_set_ad(struct aead_request *req,
554 unsigned int assoclen) 529 unsigned int assoclen)
555{ 530{
556 req->assoclen = assoclen; 531 req->assoclen = assoclen;
557 req->old = false;
558}
559
560static inline struct crypto_aead *aead_givcrypt_reqtfm(
561 struct aead_givcrypt_request *req)
562{
563 return crypto_aead_reqtfm(&req->areq);
564}
565
566static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
567{
568 return aead_givcrypt_reqtfm(req)->givencrypt(req);
569};
570
571static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
572{
573 return aead_givcrypt_reqtfm(req)->givdecrypt(req);
574};
575
576static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
577 struct crypto_aead *tfm)
578{
579 req->areq.base.tfm = crypto_aead_tfm(tfm);
580}
581
582static inline struct aead_givcrypt_request *aead_givcrypt_alloc(
583 struct crypto_aead *tfm, gfp_t gfp)
584{
585 struct aead_givcrypt_request *req;
586
587 req = kmalloc(sizeof(struct aead_givcrypt_request) +
588 crypto_aead_reqsize(tfm), gfp);
589
590 if (likely(req))
591 aead_givcrypt_set_tfm(req, tfm);
592
593 return req;
594}
595
596static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
597{
598 kfree(req);
599}
600
601static inline void aead_givcrypt_set_callback(
602 struct aead_givcrypt_request *req, u32 flags,
603 crypto_completion_t compl, void *data)
604{
605 aead_request_set_callback(&req->areq, flags, compl, data);
606}
607
608static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
609 struct scatterlist *src,
610 struct scatterlist *dst,
611 unsigned int nbytes, void *iv)
612{
613 aead_request_set_crypt(&req->areq, src, dst, nbytes, iv);
614}
615
616static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req,
617 struct scatterlist *assoc,
618 unsigned int assoclen)
619{
620 aead_request_set_assoc(&req->areq, assoc, assoclen);
621}
622
623static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req,
624 u8 *giv, u64 seq)
625{
626 req->giv = giv;
627 req->seq = seq;
628} 532}
629 533
630#endif /* _CRYPTO_AEAD_H */ 534#endif /* _CRYPTO_AEAD_H */
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index d4ebf6e9af6a..c9fe145f7dd3 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -18,6 +18,7 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19 19
20struct crypto_aead; 20struct crypto_aead;
21struct crypto_instance;
21struct module; 22struct module;
22struct rtattr; 23struct rtattr;
23struct seq_file; 24struct seq_file;
@@ -30,6 +31,7 @@ struct crypto_type {
30 void (*show)(struct seq_file *m, struct crypto_alg *alg); 31 void (*show)(struct seq_file *m, struct crypto_alg *alg);
31 int (*report)(struct sk_buff *skb, struct crypto_alg *alg); 32 int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
32 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); 33 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
34 void (*free)(struct crypto_instance *inst);
33 35
34 unsigned int type; 36 unsigned int type;
35 unsigned int maskclear; 37 unsigned int maskclear;
@@ -180,7 +182,6 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
180void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); 182void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
181int crypto_enqueue_request(struct crypto_queue *queue, 183int crypto_enqueue_request(struct crypto_queue *queue,
182 struct crypto_async_request *request); 184 struct crypto_async_request *request);
183void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
184struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); 185struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
185int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); 186int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
186 187
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
new file mode 100644
index 000000000000..274bbaeeed0f
--- /dev/null
+++ b/include/crypto/chacha20.h
@@ -0,0 +1,25 @@
1/*
2 * Common values for the ChaCha20 algorithm
3 */
4
5#ifndef _CRYPTO_CHACHA20_H
6#define _CRYPTO_CHACHA20_H
7
8#include <linux/types.h>
9#include <linux/crypto.h>
10
11#define CHACHA20_IV_SIZE 16
12#define CHACHA20_KEY_SIZE 32
13#define CHACHA20_BLOCK_SIZE 64
14
15struct chacha20_ctx {
16 u32 key[8];
17};
18
19void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
20int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
21 unsigned int keysize);
22int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
23 struct scatterlist *src, unsigned int nbytes);
24
25#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 57c8a6ee33c2..8e920b44c0ac 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -63,6 +63,11 @@ struct ahash_request {
63 void *__ctx[] CRYPTO_MINALIGN_ATTR; 63 void *__ctx[] CRYPTO_MINALIGN_ATTR;
64}; 64};
65 65
66#define AHASH_REQUEST_ON_STACK(name, ahash) \
67 char __##name##_desc[sizeof(struct ahash_request) + \
68 crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
69 struct ahash_request *name = (void *)__##name##_desc
70
66/** 71/**
67 * struct ahash_alg - asynchronous message digest definition 72 * struct ahash_alg - asynchronous message digest definition
68 * @init: Initialize the transformation context. Intended only to initialize the 73 * @init: Initialize the transformation context. Intended only to initialize the
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 4b2547186519..5554cdd8d6c1 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * AEAD: Authenticated Encryption with Associated Data 2 * AEAD: Authenticated Encryption with Associated Data
3 * 3 *
4 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free 7 * under the terms of the GNU General Public License as published by the Free
@@ -21,6 +21,7 @@
21struct rtattr; 21struct rtattr;
22 22
23struct aead_instance { 23struct aead_instance {
24 void (*free)(struct aead_instance *inst);
24 union { 25 union {
25 struct { 26 struct {
26 char head[offsetof(struct aead_alg, base)]; 27 char head[offsetof(struct aead_alg, base)];
@@ -34,20 +35,15 @@ struct crypto_aead_spawn {
34 struct crypto_spawn base; 35 struct crypto_spawn base;
35}; 36};
36 37
37extern const struct crypto_type crypto_aead_type; 38struct aead_queue {
38extern const struct crypto_type crypto_nivaead_type; 39 struct crypto_queue base;
40};
39 41
40static inline void *crypto_aead_ctx(struct crypto_aead *tfm) 42static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
41{ 43{
42 return crypto_tfm_ctx(&tfm->base); 44 return crypto_tfm_ctx(&tfm->base);
43} 45}
44 46
45static inline struct crypto_instance *crypto_aead_alg_instance(
46 struct crypto_aead *aead)
47{
48 return crypto_tfm_alg_instance(&aead->base);
49}
50
51static inline struct crypto_instance *aead_crypto_instance( 47static inline struct crypto_instance *aead_crypto_instance(
52 struct aead_instance *inst) 48 struct aead_instance *inst)
53{ 49{
@@ -61,7 +57,7 @@ static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
61 57
62static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead) 58static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
63{ 59{
64 return aead_instance(crypto_aead_alg_instance(aead)); 60 return aead_instance(crypto_tfm_alg_instance(&aead->base));
65} 61}
66 62
67static inline void *aead_instance_ctx(struct aead_instance *inst) 63static inline void *aead_instance_ctx(struct aead_instance *inst)
@@ -90,8 +86,6 @@ static inline void crypto_set_aead_spawn(
90 crypto_set_spawn(&spawn->base, inst); 86 crypto_set_spawn(&spawn->base, inst);
91} 87}
92 88
93struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
94
95int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, 89int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
96 u32 type, u32 mask); 90 u32 type, u32 mask);
97 91
@@ -100,12 +94,6 @@ static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
100 crypto_drop_spawn(&spawn->base); 94 crypto_drop_spawn(&spawn->base);
101} 95}
102 96
103static inline struct crypto_alg *crypto_aead_spawn_alg(
104 struct crypto_aead_spawn *spawn)
105{
106 return spawn->base.alg;
107}
108
109static inline struct aead_alg *crypto_spawn_aead_alg( 97static inline struct aead_alg *crypto_spawn_aead_alg(
110 struct crypto_aead_spawn *spawn) 98 struct crypto_aead_spawn *spawn)
111{ 99{
@@ -118,43 +106,51 @@ static inline struct crypto_aead *crypto_spawn_aead(
118 return crypto_spawn_tfm2(&spawn->base); 106 return crypto_spawn_tfm2(&spawn->base);
119} 107}
120 108
121struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, 109static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
122 struct rtattr **tb, u32 type, u32 mask); 110 unsigned int reqsize)
123void aead_geniv_free(struct aead_instance *inst); 111{
124int aead_geniv_init(struct crypto_tfm *tfm); 112 aead->reqsize = reqsize;
125void aead_geniv_exit(struct crypto_tfm *tfm); 113}
126 114
127static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv) 115static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
128{ 116{
129 return geniv->child; 117 return alg->maxauthsize;
130} 118}
131 119
132static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req) 120static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
133{ 121{
134 return aead_request_ctx(&req->areq); 122 return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
135} 123}
136 124
137static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req, 125static inline void aead_init_queue(struct aead_queue *queue,
138 int err) 126 unsigned int max_qlen)
139{ 127{
140 aead_request_complete(&req->areq, err); 128 crypto_init_queue(&queue->base, max_qlen);
141} 129}
142 130
143static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, 131static inline int aead_enqueue_request(struct aead_queue *queue,
144 unsigned int reqsize) 132 struct aead_request *request)
145{ 133{
146 crypto_aead_crt(aead)->reqsize = reqsize; 134 return crypto_enqueue_request(&queue->base, &request->base);
147} 135}
148 136
149static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg) 137static inline struct aead_request *aead_dequeue_request(
138 struct aead_queue *queue)
150{ 139{
151 return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize : 140 struct crypto_async_request *req;
152 alg->maxauthsize; 141
142 req = crypto_dequeue_request(&queue->base);
143
144 return req ? container_of(req, struct aead_request, base) : NULL;
153} 145}
154 146
155static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead) 147static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
156{ 148{
157 return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead)); 149 struct crypto_async_request *req;
150
151 req = crypto_get_backlog(&queue->base);
152
153 return req ? container_of(req, struct aead_request, base) : NULL;
158} 154}
159 155
160int crypto_register_aead(struct aead_alg *alg); 156int crypto_register_aead(struct aead_alg *alg);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 9ca9b871aba5..59333635e712 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -15,10 +15,19 @@
15 15
16#include <crypto/internal/aead.h> 16#include <crypto/internal/aead.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/types.h>
18 19
19struct aead_geniv_ctx { 20struct aead_geniv_ctx {
20 spinlock_t lock; 21 spinlock_t lock;
21 struct crypto_aead *child; 22 struct crypto_aead *child;
23 struct crypto_blkcipher *null;
24 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
22}; 25};
23 26
27struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
28 struct rtattr **tb, u32 type, u32 mask);
29void aead_geniv_free(struct aead_instance *inst);
30int aead_init_geniv(struct crypto_aead *tfm);
31void aead_exit_geniv(struct crypto_aead *tfm);
32
24#endif /* _CRYPTO_INTERNAL_GENIV_H */ 33#endif /* _CRYPTO_INTERNAL_GENIV_H */
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index b3a46c515d1b..2cf7a61ece59 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -107,5 +107,20 @@ static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
107 return req->base.flags; 107 return req->base.flags;
108} 108}
109 109
110static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
111{
112 return crypto_tfm_ctx(&tfm->base);
113}
114
115static inline void *skcipher_request_ctx(struct skcipher_request *req)
116{
117 return req->__ctx;
118}
119
120static inline u32 skcipher_request_flags(struct skcipher_request *req)
121{
122 return req->base.flags;
123}
124
110#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ 125#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
111 126
diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h
index 691c79172a26..441aff9b5aa7 100644
--- a/include/crypto/pkcs7.h
+++ b/include/crypto/pkcs7.h
@@ -9,6 +9,11 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _CRYPTO_PKCS7_H
13#define _CRYPTO_PKCS7_H
14
15#include <crypto/public_key.h>
16
12struct key; 17struct key;
13struct pkcs7_message; 18struct pkcs7_message;
14 19
@@ -33,4 +38,10 @@ extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
33/* 38/*
34 * pkcs7_verify.c 39 * pkcs7_verify.c
35 */ 40 */
36extern int pkcs7_verify(struct pkcs7_message *pkcs7); 41extern int pkcs7_verify(struct pkcs7_message *pkcs7,
42 enum key_being_used_for usage);
43
44extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
45 const void *data, size_t datalen);
46
47#endif /* _CRYPTO_PKCS7_H */
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
new file mode 100644
index 000000000000..894df59b74e4
--- /dev/null
+++ b/include/crypto/poly1305.h
@@ -0,0 +1,41 @@
1/*
2 * Common values for the Poly1305 algorithm
3 */
4
5#ifndef _CRYPTO_POLY1305_H
6#define _CRYPTO_POLY1305_H
7
8#include <linux/types.h>
9#include <linux/crypto.h>
10
11#define POLY1305_BLOCK_SIZE 16
12#define POLY1305_KEY_SIZE 32
13#define POLY1305_DIGEST_SIZE 16
14
15struct poly1305_desc_ctx {
16 /* key */
17 u32 r[5];
18 /* finalize key */
19 u32 s[4];
20 /* accumulator */
21 u32 h[5];
22 /* partial buffer */
23 u8 buf[POLY1305_BLOCK_SIZE];
24 /* bytes used in partial buffer */
25 unsigned int buflen;
26 /* r key has been set */
27 bool rset;
28 /* s key has been set */
29 bool sset;
30};
31
32int crypto_poly1305_init(struct shash_desc *desc);
33int crypto_poly1305_setkey(struct crypto_shash *tfm,
34 const u8 *key, unsigned int keylen);
35unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
36 const u8 *src, unsigned int srclen);
37int crypto_poly1305_update(struct shash_desc *desc,
38 const u8 *src, unsigned int srclen);
39int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
40
41#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 54add2069901..067c242b1e15 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -33,12 +33,27 @@ extern const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST];
33enum pkey_id_type { 33enum pkey_id_type {
34 PKEY_ID_PGP, /* OpenPGP generated key ID */ 34 PKEY_ID_PGP, /* OpenPGP generated key ID */
35 PKEY_ID_X509, /* X.509 arbitrary subjectKeyIdentifier */ 35 PKEY_ID_X509, /* X.509 arbitrary subjectKeyIdentifier */
36 PKEY_ID_PKCS7, /* Signature in PKCS#7 message */
36 PKEY_ID_TYPE__LAST 37 PKEY_ID_TYPE__LAST
37}; 38};
38 39
39extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST]; 40extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST];
40 41
41/* 42/*
43 * The use to which an asymmetric key is being put.
44 */
45enum key_being_used_for {
46 VERIFYING_MODULE_SIGNATURE,
47 VERIFYING_FIRMWARE_SIGNATURE,
48 VERIFYING_KEXEC_PE_SIGNATURE,
49 VERIFYING_KEY_SIGNATURE,
50 VERIFYING_KEY_SELF_SIGNATURE,
51 VERIFYING_UNSPECIFIED_SIGNATURE,
52 NR__KEY_BEING_USED_FOR
53};
54extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
55
56/*
42 * Cryptographic data for the public-key subtype of the asymmetric key type. 57 * Cryptographic data for the public-key subtype of the asymmetric key type.
43 * 58 *
44 * Note that this may include private part of the key as well as the public 59 * Note that this may include private part of the key as well as the public
@@ -101,7 +116,8 @@ extern int verify_signature(const struct key *key,
101 116
102struct asymmetric_key_id; 117struct asymmetric_key_id;
103extern struct key *x509_request_asymmetric_key(struct key *keyring, 118extern struct key *x509_request_asymmetric_key(struct key *keyring,
104 const struct asymmetric_key_id *kid, 119 const struct asymmetric_key_id *id,
120 const struct asymmetric_key_id *skid,
105 bool partial); 121 bool partial);
106 122
107#endif /* _LINUX_PUBLIC_KEY_H */ 123#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 96670e7e7c14..35f99b68d037 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -25,14 +25,6 @@
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27 27
28static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
29 struct scatterlist *sg2)
30{
31 sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
32 sg1[num - 1].page_link &= ~0x02;
33 sg1[num - 1].page_link |= 0x01;
34}
35
36static inline void scatterwalk_crypto_chain(struct scatterlist *head, 28static inline void scatterwalk_crypto_chain(struct scatterlist *head,
37 struct scatterlist *sg, 29 struct scatterlist *sg,
38 int chain, int num) 30 int chain, int num)
@@ -43,7 +35,7 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head,
43 } 35 }
44 36
45 if (sg) 37 if (sg)
46 scatterwalk_sg_chain(head, num, sg); 38 sg_chain(head, num, sg);
47 else 39 else
48 sg_mark_end(head); 40 sg_mark_end(head);
49} 41}
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 07d245f073d1..d8dd41fb034f 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Symmetric key ciphers. 2 * Symmetric key ciphers.
3 * 3 *
4 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free 7 * under the terms of the GNU General Public License as published by the Free
@@ -18,6 +18,28 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20/** 20/**
21 * struct skcipher_request - Symmetric key cipher request
22 * @cryptlen: Number of bytes to encrypt or decrypt
23 * @iv: Initialisation Vector
24 * @src: Source SG list
25 * @dst: Destination SG list
26 * @base: Underlying async request request
27 * @__ctx: Start of private context data
28 */
29struct skcipher_request {
30 unsigned int cryptlen;
31
32 u8 *iv;
33
34 struct scatterlist *src;
35 struct scatterlist *dst;
36
37 struct crypto_async_request base;
38
39 void *__ctx[] CRYPTO_MINALIGN_ATTR;
40};
41
42/**
21 * struct skcipher_givcrypt_request - Crypto request with IV generation 43 * struct skcipher_givcrypt_request - Crypto request with IV generation
22 * @seq: Sequence number for IV generation 44 * @seq: Sequence number for IV generation
23 * @giv: Space for generated IV 45 * @giv: Space for generated IV
@@ -30,6 +52,23 @@ struct skcipher_givcrypt_request {
30 struct ablkcipher_request creq; 52 struct ablkcipher_request creq;
31}; 53};
32 54
55struct crypto_skcipher {
56 int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
57 unsigned int keylen);
58 int (*encrypt)(struct skcipher_request *req);
59 int (*decrypt)(struct skcipher_request *req);
60
61 unsigned int ivsize;
62 unsigned int reqsize;
63
64 struct crypto_tfm base;
65};
66
67#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
68 char __##name##_desc[sizeof(struct skcipher_request) + \
69 crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
70 struct skcipher_request *name = (void *)__##name##_desc
71
33static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( 72static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
34 struct skcipher_givcrypt_request *req) 73 struct skcipher_givcrypt_request *req)
35{ 74{
@@ -106,5 +145,355 @@ static inline void skcipher_givcrypt_set_giv(
106 req->seq = seq; 145 req->seq = seq;
107} 146}
108 147
148/**
149 * DOC: Symmetric Key Cipher API
150 *
151 * Symmetric key cipher API is used with the ciphers of type
152 * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto).
153 *
154 * Asynchronous cipher operations imply that the function invocation for a
155 * cipher request returns immediately before the completion of the operation.
156 * The cipher request is scheduled as a separate kernel thread and therefore
157 * load-balanced on the different CPUs via the process scheduler. To allow
158 * the kernel crypto API to inform the caller about the completion of a cipher
159 * request, the caller must provide a callback function. That function is
160 * invoked with the cipher handle when the request completes.
161 *
162 * To support the asynchronous operation, additional information than just the
163 * cipher handle must be supplied to the kernel crypto API. That additional
164 * information is given by filling in the skcipher_request data structure.
165 *
166 * For the symmetric key cipher API, the state is maintained with the tfm
167 * cipher handle. A single tfm can be used across multiple calls and in
168 * parallel. For asynchronous block cipher calls, context data supplied and
169 * only used by the caller can be referenced the request data structure in
170 * addition to the IV used for the cipher request. The maintenance of such
171 * state information would be important for a crypto driver implementer to
172 * have, because when calling the callback function upon completion of the
173 * cipher operation, that callback function may need some information about
174 * which operation just finished if it invoked multiple in parallel. This
175 * state information is unused by the kernel crypto API.
176 */
177
178static inline struct crypto_skcipher *__crypto_skcipher_cast(
179 struct crypto_tfm *tfm)
180{
181 return container_of(tfm, struct crypto_skcipher, base);
182}
183
184/**
185 * crypto_alloc_skcipher() - allocate symmetric key cipher handle
186 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
187 * skcipher cipher
188 * @type: specifies the type of the cipher
189 * @mask: specifies the mask for the cipher
190 *
191 * Allocate a cipher handle for an skcipher. The returned struct
192 * crypto_skcipher is the cipher handle that is required for any subsequent
193 * API invocation for that skcipher.
194 *
195 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
196 * of an error, PTR_ERR() returns the error code.
197 */
198struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
199 u32 type, u32 mask);
200
201static inline struct crypto_tfm *crypto_skcipher_tfm(
202 struct crypto_skcipher *tfm)
203{
204 return &tfm->base;
205}
206
207/**
208 * crypto_free_skcipher() - zeroize and free cipher handle
209 * @tfm: cipher handle to be freed
210 */
211static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
212{
213 crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
214}
215
216/**
217 * crypto_has_skcipher() - Search for the availability of an skcipher.
218 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
219 * skcipher
220 * @type: specifies the type of the cipher
221 * @mask: specifies the mask for the cipher
222 *
223 * Return: true when the skcipher is known to the kernel crypto API; false
224 * otherwise
225 */
226static inline int crypto_has_skcipher(const char *alg_name, u32 type,
227 u32 mask)
228{
229 return crypto_has_alg(alg_name, crypto_skcipher_type(type),
230 crypto_skcipher_mask(mask));
231}
232
233/**
234 * crypto_skcipher_ivsize() - obtain IV size
235 * @tfm: cipher handle
236 *
237 * The size of the IV for the skcipher referenced by the cipher handle is
238 * returned. This IV size may be zero if the cipher does not need an IV.
239 *
240 * Return: IV size in bytes
241 */
242static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
243{
244 return tfm->ivsize;
245}
246
247/**
248 * crypto_skcipher_blocksize() - obtain block size of cipher
249 * @tfm: cipher handle
250 *
251 * The block size for the skcipher referenced with the cipher handle is
252 * returned. The caller may use that information to allocate appropriate
253 * memory for the data returned by the encryption or decryption operation
254 *
255 * Return: block size of cipher
256 */
257static inline unsigned int crypto_skcipher_blocksize(
258 struct crypto_skcipher *tfm)
259{
260 return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
261}
262
263static inline unsigned int crypto_skcipher_alignmask(
264 struct crypto_skcipher *tfm)
265{
266 return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
267}
268
269static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
270{
271 return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
272}
273
274static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm,
275 u32 flags)
276{
277 crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags);
278}
279
280static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
281 u32 flags)
282{
283 crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
284}
285
286/**
287 * crypto_skcipher_setkey() - set key for cipher
288 * @tfm: cipher handle
289 * @key: buffer holding the key
290 * @keylen: length of the key in bytes
291 *
292 * The caller provided key is set for the skcipher referenced by the cipher
293 * handle.
294 *
295 * Note, the key length determines the cipher type. Many block ciphers implement
296 * different cipher modes depending on the key size, such as AES-128 vs AES-192
297 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
298 * is performed.
299 *
300 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
301 */
302static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
303 const u8 *key, unsigned int keylen)
304{
305 return tfm->setkey(tfm, key, keylen);
306}
307
308/**
309 * crypto_skcipher_reqtfm() - obtain cipher handle from request
310 * @req: skcipher_request out of which the cipher handle is to be obtained
311 *
312 * Return the crypto_skcipher handle when furnishing an skcipher_request
313 * data structure.
314 *
315 * Return: crypto_skcipher handle
316 */
317static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
318 struct skcipher_request *req)
319{
320 return __crypto_skcipher_cast(req->base.tfm);
321}
322
323/**
324 * crypto_skcipher_encrypt() - encrypt plaintext
325 * @req: reference to the skcipher_request handle that holds all information
326 * needed to perform the cipher operation
327 *
328 * Encrypt plaintext data using the skcipher_request handle. That data
329 * structure and how it is filled with data is discussed with the
330 * skcipher_request_* functions.
331 *
332 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
333 */
334static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
335{
336 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
337
338 return tfm->encrypt(req);
339}
340
341/**
342 * crypto_skcipher_decrypt() - decrypt ciphertext
343 * @req: reference to the skcipher_request handle that holds all information
344 * needed to perform the cipher operation
345 *
346 * Decrypt ciphertext data using the skcipher_request handle. That data
347 * structure and how it is filled with data is discussed with the
348 * skcipher_request_* functions.
349 *
350 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
351 */
352static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
353{
354 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
355
356 return tfm->decrypt(req);
357}
358
359/**
360 * DOC: Symmetric Key Cipher Request Handle
361 *
362 * The skcipher_request data structure contains all pointers to data
363 * required for the symmetric key cipher operation. This includes the cipher
364 * handle (which can be used by multiple skcipher_request instances), pointer
365 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
366 * as a handle to the skcipher_request_* API calls in a similar way as
367 * skcipher handle to the crypto_skcipher_* API calls.
368 */
369
370/**
371 * crypto_skcipher_reqsize() - obtain size of the request data structure
372 * @tfm: cipher handle
373 *
374 * Return: number of bytes
375 */
376static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm)
377{
378 return tfm->reqsize;
379}
380
381/**
382 * skcipher_request_set_tfm() - update cipher handle reference in request
383 * @req: request handle to be modified
384 * @tfm: cipher handle that shall be added to the request handle
385 *
386 * Allow the caller to replace the existing skcipher handle in the request
387 * data structure with a different one.
388 */
389static inline void skcipher_request_set_tfm(struct skcipher_request *req,
390 struct crypto_skcipher *tfm)
391{
392 req->base.tfm = crypto_skcipher_tfm(tfm);
393}
394
395static inline struct skcipher_request *skcipher_request_cast(
396 struct crypto_async_request *req)
397{
398 return container_of(req, struct skcipher_request, base);
399}
400
401/**
402 * skcipher_request_alloc() - allocate request data structure
403 * @tfm: cipher handle to be registered with the request
404 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
405 *
406 * Allocate the request data structure that must be used with the skcipher
407 * encrypt and decrypt API calls. During the allocation, the provided skcipher
408 * handle is registered in the request data structure.
409 *
410 * Return: allocated request handle in case of success; IS_ERR() is true in case
411 * of an error, PTR_ERR() returns the error code.
412 */
413static inline struct skcipher_request *skcipher_request_alloc(
414 struct crypto_skcipher *tfm, gfp_t gfp)
415{
416 struct skcipher_request *req;
417
418 req = kmalloc(sizeof(struct skcipher_request) +
419 crypto_skcipher_reqsize(tfm), gfp);
420
421 if (likely(req))
422 skcipher_request_set_tfm(req, tfm);
423
424 return req;
425}
426
427/**
428 * skcipher_request_free() - zeroize and free request data structure
429 * @req: request data structure cipher handle to be freed
430 */
431static inline void skcipher_request_free(struct skcipher_request *req)
432{
433 kzfree(req);
434}
435
436/**
437 * skcipher_request_set_callback() - set asynchronous callback function
438 * @req: request handle
439 * @flags: specify zero or an ORing of the flags
440 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
441 * increase the wait queue beyond the initial maximum size;
442 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
443 * @compl: callback function pointer to be registered with the request handle
444 * @data: The data pointer refers to memory that is not used by the kernel
445 * crypto API, but provided to the callback function for it to use. Here,
446 * the caller can provide a reference to memory the callback function can
447 * operate on. As the callback function is invoked asynchronously to the
448 * related functionality, it may need to access data structures of the
449 * related functionality which can be referenced using this pointer. The
450 * callback function can access the memory via the "data" field in the
451 * crypto_async_request data structure provided to the callback function.
452 *
453 * This function allows setting the callback function that is triggered once the
454 * cipher operation completes.
455 *
456 * The callback function is registered with the skcipher_request handle and
457 * must comply with the following template
458 *
459 * void callback_function(struct crypto_async_request *req, int error)
460 */
461static inline void skcipher_request_set_callback(struct skcipher_request *req,
462 u32 flags,
463 crypto_completion_t compl,
464 void *data)
465{
466 req->base.complete = compl;
467 req->base.data = data;
468 req->base.flags = flags;
469}
470
471/**
472 * skcipher_request_set_crypt() - set data buffers
473 * @req: request handle
474 * @src: source scatter / gather list
475 * @dst: destination scatter / gather list
476 * @cryptlen: number of bytes to process from @src
477 * @iv: IV for the cipher operation which must comply with the IV size defined
478 * by crypto_skcipher_ivsize
479 *
480 * This function allows setting of the source data and destination data
481 * scatter / gather lists.
482 *
483 * For encryption, the source is treated as the plaintext and the
484 * destination is the ciphertext. For a decryption operation, the use is
485 * reversed - the source is the ciphertext and the destination is the plaintext.
486 */
487static inline void skcipher_request_set_crypt(
488 struct skcipher_request *req,
489 struct scatterlist *src, struct scatterlist *dst,
490 unsigned int cryptlen, void *iv)
491{
492 req->src = src;
493 req->dst = dst;
494 req->cryptlen = cryptlen;
495 req->iv = iv;
496}
497
109#endif /* _CRYPTO_SKCIPHER_H */ 498#endif /* _CRYPTO_SKCIPHER_H */
110 499
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index de13bfc35634..bae79f3c4d28 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -12,6 +12,8 @@
12 12
13#include <drm/drmP.h> 13#include <drm/drmP.h>
14 14
15struct dw_hdmi;
16
15enum { 17enum {
16 DW_HDMI_RES_8, 18 DW_HDMI_RES_8,
17 DW_HDMI_RES_10, 19 DW_HDMI_RES_10,
@@ -59,4 +61,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
59 void *data, struct drm_encoder *encoder, 61 void *data, struct drm_encoder *encoder,
60 struct resource *iores, int irq, 62 struct resource *iores, int irq,
61 const struct dw_hdmi_plat_data *plat_data); 63 const struct dw_hdmi_plat_data *plat_data);
64
65void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
66void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
67void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
68
62#endif /* __IMX_HDMI_H__ */ 69#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 5aa519711e0b..8b5ce7c5d9bb 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -137,17 +137,18 @@ void drm_err(const char *format, ...);
137/*@{*/ 137/*@{*/
138 138
139/* driver capabilities and requirements mask */ 139/* driver capabilities and requirements mask */
140#define DRIVER_USE_AGP 0x1 140#define DRIVER_USE_AGP 0x1
141#define DRIVER_PCI_DMA 0x8 141#define DRIVER_PCI_DMA 0x8
142#define DRIVER_SG 0x10 142#define DRIVER_SG 0x10
143#define DRIVER_HAVE_DMA 0x20 143#define DRIVER_HAVE_DMA 0x20
144#define DRIVER_HAVE_IRQ 0x40 144#define DRIVER_HAVE_IRQ 0x40
145#define DRIVER_IRQ_SHARED 0x80 145#define DRIVER_IRQ_SHARED 0x80
146#define DRIVER_GEM 0x1000 146#define DRIVER_GEM 0x1000
147#define DRIVER_MODESET 0x2000 147#define DRIVER_MODESET 0x2000
148#define DRIVER_PRIME 0x4000 148#define DRIVER_PRIME 0x4000
149#define DRIVER_RENDER 0x8000 149#define DRIVER_RENDER 0x8000
150#define DRIVER_ATOMIC 0x10000 150#define DRIVER_ATOMIC 0x10000
151#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
151 152
152/***********************************************************************/ 153/***********************************************************************/
153/** \name Macros to make printk easier */ 154/** \name Macros to make printk easier */
@@ -675,13 +676,12 @@ struct drm_minor {
675 676
676 /* currently active master for this node. Protected by master_mutex */ 677 /* currently active master for this node. Protected by master_mutex */
677 struct drm_master *master; 678 struct drm_master *master;
678 struct drm_mode_group mode_group;
679}; 679};
680 680
681 681
682struct drm_pending_vblank_event { 682struct drm_pending_vblank_event {
683 struct drm_pending_event base; 683 struct drm_pending_event base;
684 int pipe; 684 unsigned int pipe;
685 struct drm_event_vblank event; 685 struct drm_event_vblank event;
686}; 686};
687 687
@@ -700,7 +700,7 @@ struct drm_vblank_crtc {
700 /* for wraparound handling */ 700 /* for wraparound handling */
701 u32 last_wait; /* Last vblank seqno waited per CRTC */ 701 u32 last_wait; /* Last vblank seqno waited per CRTC */
702 unsigned int inmodeset; /* Display driver is setting mode */ 702 unsigned int inmodeset; /* Display driver is setting mode */
703 int crtc; /* crtc index */ 703 unsigned int pipe; /* crtc index */
704 bool enabled; /* so we don't call enable more than 704 bool enabled; /* so we don't call enable more than
705 once per disable */ 705 once per disable */
706}; 706};
@@ -887,6 +887,7 @@ static inline bool drm_is_primary_client(const struct drm_file *file_priv)
887/*@{*/ 887/*@{*/
888 888
889 /* Driver support (drm_drv.h) */ 889 /* Driver support (drm_drv.h) */
890extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
890extern long drm_ioctl(struct file *filp, 891extern long drm_ioctl(struct file *filp,
891 unsigned int cmd, unsigned long arg); 892 unsigned int cmd, unsigned long arg);
892extern long drm_compat_ioctl(struct file *filp, 893extern long drm_compat_ioctl(struct file *filp,
@@ -920,34 +921,34 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
920extern int drm_irq_install(struct drm_device *dev, int irq); 921extern int drm_irq_install(struct drm_device *dev, int irq);
921extern int drm_irq_uninstall(struct drm_device *dev); 922extern int drm_irq_uninstall(struct drm_device *dev);
922 923
923extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); 924extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
924extern int drm_wait_vblank(struct drm_device *dev, void *data, 925extern int drm_wait_vblank(struct drm_device *dev, void *data,
925 struct drm_file *filp); 926 struct drm_file *filp);
926extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 927extern u32 drm_vblank_count(struct drm_device *dev, int pipe);
927extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc); 928extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
928extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 929extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
929 struct timeval *vblanktime); 930 struct timeval *vblanktime);
930extern void drm_send_vblank_event(struct drm_device *dev, int crtc, 931extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
931 struct drm_pending_vblank_event *e); 932 struct drm_pending_vblank_event *e);
932extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 933extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
933 struct drm_pending_vblank_event *e); 934 struct drm_pending_vblank_event *e);
934extern bool drm_handle_vblank(struct drm_device *dev, int crtc); 935extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
935extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); 936extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
936extern int drm_vblank_get(struct drm_device *dev, int crtc); 937extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
937extern void drm_vblank_put(struct drm_device *dev, int crtc); 938extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
938extern int drm_crtc_vblank_get(struct drm_crtc *crtc); 939extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
939extern void drm_crtc_vblank_put(struct drm_crtc *crtc); 940extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
940extern void drm_wait_one_vblank(struct drm_device *dev, int crtc); 941extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
941extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); 942extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
942extern void drm_vblank_off(struct drm_device *dev, int crtc); 943extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
943extern void drm_vblank_on(struct drm_device *dev, int crtc); 944extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
944extern void drm_crtc_vblank_off(struct drm_crtc *crtc); 945extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
945extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); 946extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
946extern void drm_crtc_vblank_on(struct drm_crtc *crtc); 947extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
947extern void drm_vblank_cleanup(struct drm_device *dev); 948extern void drm_vblank_cleanup(struct drm_device *dev);
948 949
949extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 950extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
950 int crtc, int *max_error, 951 unsigned int pipe, int *max_error,
951 struct timeval *vblank_time, 952 struct timeval *vblank_time,
952 unsigned flags, 953 unsigned flags,
953 const struct drm_crtc *refcrtc, 954 const struct drm_crtc *refcrtc,
@@ -968,8 +969,8 @@ static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc
968} 969}
969 970
970/* Modesetting support */ 971/* Modesetting support */
971extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 972extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
972extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); 973extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
973 974
974 /* Stub support (drm_stub.h) */ 975 /* Stub support (drm_stub.h) */
975extern struct drm_master *drm_master_get(struct drm_master *master); 976extern struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 8a3a913320eb..e67aeac2aee0 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -166,7 +166,8 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
166static inline bool 166static inline bool
167drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state) 167drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
168{ 168{
169 return state->mode_changed || state->active_changed; 169 return state->mode_changed || state->active_changed ||
170 state->connectors_changed;
170} 171}
171 172
172 173
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index cc1fee8a12d0..11266d147a29 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -87,8 +87,8 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
87 struct drm_framebuffer *fb, 87 struct drm_framebuffer *fb,
88 struct drm_pending_vblank_event *event, 88 struct drm_pending_vblank_event *event,
89 uint32_t flags); 89 uint32_t flags);
90void drm_atomic_helper_connector_dpms(struct drm_connector *connector, 90int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
91 int mode); 91 int mode);
92 92
93/* default implementations for state handling */ 93/* default implementations for state handling */
94void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); 94void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3b4d8a4a23fb..faaeff7db684 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -255,12 +255,13 @@ struct drm_atomic_state;
255 * @crtc: backpointer to the CRTC 255 * @crtc: backpointer to the CRTC
256 * @enable: whether the CRTC should be enabled, gates all other state 256 * @enable: whether the CRTC should be enabled, gates all other state
257 * @active: whether the CRTC is actively displaying (used for DPMS) 257 * @active: whether the CRTC is actively displaying (used for DPMS)
258 * @mode_changed: for use by helpers and drivers when computing state updates 258 * @planes_changed: planes on this crtc are updated
259 * @active_changed: for use by helpers and drivers when computing state updates 259 * @mode_changed: crtc_state->mode or crtc_state->enable has been changed
260 * @active_changed: crtc_state->active has been toggled.
261 * @connectors_changed: connectors to this crtc have been updated
260 * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes 262 * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
261 * @last_vblank_count: for helpers and drivers to capture the vblank of the 263 * @last_vblank_count: for helpers and drivers to capture the vblank of the
262 * update to ensure framebuffer cleanup isn't done too early 264 * update to ensure framebuffer cleanup isn't done too early
263 * @planes_changed: for use by helpers and drivers when computing state updates
264 * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings 265 * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
265 * @mode: current mode timings 266 * @mode: current mode timings
266 * @event: optional pointer to a DRM event to signal upon completion of the 267 * @event: optional pointer to a DRM event to signal upon completion of the
@@ -283,6 +284,7 @@ struct drm_crtc_state {
283 bool planes_changed : 1; 284 bool planes_changed : 1;
284 bool mode_changed : 1; 285 bool mode_changed : 1;
285 bool active_changed : 1; 286 bool active_changed : 1;
287 bool connectors_changed : 1;
286 288
287 /* attached planes bitmask: 289 /* attached planes bitmask:
288 * WARNING: transitional helpers do not maintain plane_mask so 290 * WARNING: transitional helpers do not maintain plane_mask so
@@ -525,7 +527,7 @@ struct drm_connector_state {
525 * etc. 527 * etc.
526 */ 528 */
527struct drm_connector_funcs { 529struct drm_connector_funcs {
528 void (*dpms)(struct drm_connector *connector, int mode); 530 int (*dpms)(struct drm_connector *connector, int mode);
529 void (*save)(struct drm_connector *connector); 531 void (*save)(struct drm_connector *connector);
530 void (*restore)(struct drm_connector *connector); 532 void (*restore)(struct drm_connector *connector);
531 void (*reset)(struct drm_connector *connector); 533 void (*reset)(struct drm_connector *connector);
@@ -861,7 +863,7 @@ struct drm_plane {
861 863
862 uint32_t possible_crtcs; 864 uint32_t possible_crtcs;
863 uint32_t *format_types; 865 uint32_t *format_types;
864 uint32_t format_count; 866 unsigned int format_count;
865 bool format_default; 867 bool format_default;
866 868
867 struct drm_crtc *crtc; 869 struct drm_crtc *crtc;
@@ -1016,29 +1018,6 @@ struct drm_mode_config_funcs {
1016}; 1018};
1017 1019
1018/** 1020/**
1019 * struct drm_mode_group - group of mode setting resources for potential sub-grouping
1020 * @num_crtcs: CRTC count
1021 * @num_encoders: encoder count
1022 * @num_connectors: connector count
1023 * @num_bridges: bridge count
1024 * @id_list: list of KMS object IDs in this group
1025 *
1026 * Currently this simply tracks the global mode setting state. But in the
1027 * future it could allow groups of objects to be set aside into independent
1028 * control groups for use by different user level processes (e.g. two X servers
1029 * running simultaneously on different heads, each with their own mode
1030 * configuration and freedom of mode setting).
1031 */
1032struct drm_mode_group {
1033 uint32_t num_crtcs;
1034 uint32_t num_encoders;
1035 uint32_t num_connectors;
1036
1037 /* list of object IDs for this group */
1038 uint32_t *id_list;
1039};
1040
1041/**
1042 * struct drm_mode_config - Mode configuration control structure 1021 * struct drm_mode_config - Mode configuration control structure
1043 * @mutex: mutex protecting KMS related lists and structures 1022 * @mutex: mutex protecting KMS related lists and structures
1044 * @connection_mutex: ww mutex protecting connector state and routing 1023 * @connection_mutex: ww mutex protecting connector state and routing
@@ -1289,13 +1268,13 @@ extern int drm_universal_plane_init(struct drm_device *dev,
1289 unsigned long possible_crtcs, 1268 unsigned long possible_crtcs,
1290 const struct drm_plane_funcs *funcs, 1269 const struct drm_plane_funcs *funcs,
1291 const uint32_t *formats, 1270 const uint32_t *formats,
1292 uint32_t format_count, 1271 unsigned int format_count,
1293 enum drm_plane_type type); 1272 enum drm_plane_type type);
1294extern int drm_plane_init(struct drm_device *dev, 1273extern int drm_plane_init(struct drm_device *dev,
1295 struct drm_plane *plane, 1274 struct drm_plane *plane,
1296 unsigned long possible_crtcs, 1275 unsigned long possible_crtcs,
1297 const struct drm_plane_funcs *funcs, 1276 const struct drm_plane_funcs *funcs,
1298 const uint32_t *formats, uint32_t format_count, 1277 const uint32_t *formats, unsigned int format_count,
1299 bool is_primary); 1278 bool is_primary);
1300extern void drm_plane_cleanup(struct drm_plane *plane); 1279extern void drm_plane_cleanup(struct drm_plane *plane);
1301extern unsigned int drm_plane_index(struct drm_plane *plane); 1280extern unsigned int drm_plane_index(struct drm_plane *plane);
@@ -1322,9 +1301,6 @@ extern const char *drm_get_tv_select_name(int val);
1322extern void drm_fb_release(struct drm_file *file_priv); 1301extern void drm_fb_release(struct drm_file *file_priv);
1323extern void drm_property_destroy_user_blobs(struct drm_device *dev, 1302extern void drm_property_destroy_user_blobs(struct drm_device *dev,
1324 struct drm_file *file_priv); 1303 struct drm_file *file_priv);
1325extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
1326extern void drm_mode_group_destroy(struct drm_mode_group *group);
1327extern void drm_reinit_primary_mode_group(struct drm_device *dev);
1328extern bool drm_probe_ddc(struct i2c_adapter *adapter); 1304extern bool drm_probe_ddc(struct i2c_adapter *adapter);
1329extern struct edid *drm_get_edid(struct drm_connector *connector, 1305extern struct edid *drm_get_edid(struct drm_connector *connector,
1330 struct i2c_adapter *adapter); 1306 struct i2c_adapter *adapter);
@@ -1577,8 +1553,45 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev,
1577} 1553}
1578 1554
1579/* Plane list iterator for legacy (overlay only) planes. */ 1555/* Plane list iterator for legacy (overlay only) planes. */
1580#define drm_for_each_legacy_plane(plane, planelist) \ 1556#define drm_for_each_legacy_plane(plane, dev) \
1581 list_for_each_entry(plane, planelist, head) \ 1557 list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
1582 if (plane->type == DRM_PLANE_TYPE_OVERLAY) 1558 if (plane->type == DRM_PLANE_TYPE_OVERLAY)
1583 1559
1560#define drm_for_each_plane(plane, dev) \
1561 list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
1562
1563#define drm_for_each_crtc(crtc, dev) \
1564 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
1565
1566static inline void
1567assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
1568{
1569 /*
1570 * The connector hotadd/remove code currently grabs both locks when
1571 * updating lists. Hence readers need only hold either of them to be
1572 * safe and the check amounts to
1573 *
1574 * WARN_ON(not_holding(A) && not_holding(B)).
1575 */
1576 WARN_ON(!mutex_is_locked(&mode_config->mutex) &&
1577 !drm_modeset_is_locked(&mode_config->connection_mutex));
1578}
1579
1580#define drm_for_each_connector(connector, dev) \
1581 for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \
1582 connector = list_first_entry(&(dev)->mode_config.connector_list, \
1583 struct drm_connector, head); \
1584 &connector->head != (&(dev)->mode_config.connector_list); \
1585 connector = list_next_entry(connector, head))
1586
1587#define drm_for_each_encoder(encoder, dev) \
1588 list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
1589
1590#define drm_for_each_fb(fb, dev) \
1591 for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \
1592 fb = list_first_entry(&(dev)->mode_config.fb_list, \
1593 struct drm_framebuffer, head); \
1594 &fb->head != (&(dev)->mode_config.fb_list); \
1595 fb = list_next_entry(fb, head))
1596
1584#endif /* __DRM_CRTC_H__ */ 1597#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 918aa68b5199..2a747a91fded 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -108,8 +108,10 @@ struct drm_crtc_helper_funcs {
108 /* atomic helpers */ 108 /* atomic helpers */
109 int (*atomic_check)(struct drm_crtc *crtc, 109 int (*atomic_check)(struct drm_crtc *crtc,
110 struct drm_crtc_state *state); 110 struct drm_crtc_state *state);
111 void (*atomic_begin)(struct drm_crtc *crtc); 111 void (*atomic_begin)(struct drm_crtc *crtc,
112 void (*atomic_flush)(struct drm_crtc *crtc); 112 struct drm_crtc_state *old_crtc_state);
113 void (*atomic_flush)(struct drm_crtc *crtc,
114 struct drm_crtc_state *old_crtc_state);
113}; 115};
114 116
115/** 117/**
@@ -190,7 +192,7 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
190extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); 192extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
191extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); 193extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
192 194
193extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); 195extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
194 196
195extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); 197extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
196 198
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2e86f642fc33..499e9f625aef 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -420,7 +420,7 @@
420 420
421#define DP_TEST_SINK_MISC 0x246 421#define DP_TEST_SINK_MISC 0x246
422# define DP_TEST_CRC_SUPPORTED (1 << 5) 422# define DP_TEST_CRC_SUPPORTED (1 << 5)
423# define DP_TEST_COUNT_MASK 0x7 423# define DP_TEST_COUNT_MASK 0xf
424 424
425#define DP_TEST_RESPONSE 0x260 425#define DP_TEST_RESPONSE 0x260
426# define DP_TEST_ACK (1 << 0) 426# define DP_TEST_ACK (1 << 0)
@@ -578,6 +578,7 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
578u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], 578u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
579 int lane); 579 int lane);
580 580
581#define DP_BRANCH_OUI_HEADER_SIZE 0xc
581#define DP_RECEIVER_CAP_SIZE 0xf 582#define DP_RECEIVER_CAP_SIZE 0xf
582#define EDP_PSR_RECEIVER_CAP_SIZE 2 583#define EDP_PSR_RECEIVER_CAP_SIZE 2
583 584
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 0dfd94def593..dbab4622b58f 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -122,6 +122,7 @@ struct drm_fb_helper {
122 bool delayed_hotplug; 122 bool delayed_hotplug;
123}; 123};
124 124
125#ifdef CONFIG_DRM_FBDEV_EMULATION
125void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, 126void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
126 const struct drm_fb_helper_funcs *funcs); 127 const struct drm_fb_helper_funcs *funcs);
127int drm_fb_helper_init(struct drm_device *dev, 128int drm_fb_helper_init(struct drm_device *dev,
@@ -136,11 +137,38 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
136 struct fb_info *info); 137 struct fb_info *info);
137 138
138bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); 139bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
140
141struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
142void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
143void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper);
139void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 144void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
140 uint32_t fb_width, uint32_t fb_height); 145 uint32_t fb_width, uint32_t fb_height);
141void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 146void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
142 uint32_t depth); 147 uint32_t depth);
143 148
149void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
150
151ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
152 size_t count, loff_t *ppos);
153ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
154 size_t count, loff_t *ppos);
155
156void drm_fb_helper_sys_fillrect(struct fb_info *info,
157 const struct fb_fillrect *rect);
158void drm_fb_helper_sys_copyarea(struct fb_info *info,
159 const struct fb_copyarea *area);
160void drm_fb_helper_sys_imageblit(struct fb_info *info,
161 const struct fb_image *image);
162
163void drm_fb_helper_cfb_fillrect(struct fb_info *info,
164 const struct fb_fillrect *rect);
165void drm_fb_helper_cfb_copyarea(struct fb_info *info,
166 const struct fb_copyarea *area);
167void drm_fb_helper_cfb_imageblit(struct fb_info *info,
168 const struct fb_image *image);
169
170void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
171
144int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); 172int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
145 173
146int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); 174int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
@@ -158,4 +186,188 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
158int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); 186int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
159int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, 187int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
160 struct drm_connector *connector); 188 struct drm_connector *connector);
189#else
190static inline void drm_fb_helper_prepare(struct drm_device *dev,
191 struct drm_fb_helper *helper,
192 const struct drm_fb_helper_funcs *funcs)
193{
194}
195
196static inline int drm_fb_helper_init(struct drm_device *dev,
197 struct drm_fb_helper *helper, int crtc_count,
198 int max_conn)
199{
200 return 0;
201}
202
203static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
204{
205}
206
207static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
208{
209 return 0;
210}
211
212static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
213 struct fb_info *info)
214{
215 return 0;
216}
217
218static inline int drm_fb_helper_set_par(struct fb_info *info)
219{
220 return 0;
221}
222
223static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
224 struct fb_info *info)
225{
226 return 0;
227}
228
229static inline bool
230drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
231{
232 return true;
233}
234
235static inline struct fb_info *
236drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
237{
238 return NULL;
239}
240
241static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
242{
243}
244static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
245{
246}
247
248static inline void drm_fb_helper_fill_var(struct fb_info *info,
249 struct drm_fb_helper *fb_helper,
250 uint32_t fb_width, uint32_t fb_height)
251{
252}
253
254static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
255 uint32_t depth)
256{
257}
258
259static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
260 struct fb_info *info)
261{
262 return 0;
263}
264
265static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
266{
267}
268
269static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
270 char __user *buf, size_t count,
271 loff_t *ppos)
272{
273 return -ENODEV;
274}
275
276static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
277 const char __user *buf,
278 size_t count, loff_t *ppos)
279{
280 return -ENODEV;
281}
282
283static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
284 const struct fb_fillrect *rect)
285{
286}
287
288static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
289 const struct fb_copyarea *area)
290{
291}
292
293static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
294 const struct fb_image *image)
295{
296}
297
298static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
299 const struct fb_fillrect *rect)
300{
301}
302
303static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
304 const struct fb_copyarea *area)
305{
306}
307
308static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
309 const struct fb_image *image)
310{
311}
312
313static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
314 int state)
315{
316}
317
318static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
319{
320 return 0;
321}
322
323static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
324 int bpp_sel)
325{
326 return 0;
327}
328
329static inline int
330drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
331{
332 return 0;
333}
334
335static inline int drm_fb_helper_debug_enter(struct fb_info *info)
336{
337 return 0;
338}
339
340static inline int drm_fb_helper_debug_leave(struct fb_info *info)
341{
342 return 0;
343}
344
345static inline struct drm_display_mode *
346drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
347 int width, int height)
348{
349 return NULL;
350}
351
352static inline struct drm_display_mode *
353drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
354 int width, int height)
355{
356 return NULL;
357}
358
359static inline int
360drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
361 struct drm_connector *connector)
362{
363 return 0;
364}
365
366static inline int
367drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
368 struct drm_connector *connector)
369{
370 return 0;
371}
372#endif
161#endif 373#endif
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 70595ff565ba..5dd18bfdf601 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,7 +130,6 @@ struct drm_crtc;
130struct drm_plane; 130struct drm_plane;
131 131
132void drm_modeset_lock_all(struct drm_device *dev); 132void drm_modeset_lock_all(struct drm_device *dev);
133int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
134void drm_modeset_unlock_all(struct drm_device *dev); 133void drm_modeset_unlock_all(struct drm_device *dev);
135void drm_modeset_lock_crtc(struct drm_crtc *crtc, 134void drm_modeset_lock_crtc(struct drm_crtc *crtc,
136 struct drm_plane *plane); 135 struct drm_plane *plane);
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 96e16283afb9..dda401bf910e 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -43,9 +43,8 @@
43 * planes. 43 * planes.
44 */ 44 */
45 45
46extern int drm_crtc_init(struct drm_device *dev, 46int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
47 struct drm_crtc *crtc, 47 const struct drm_crtc_funcs *funcs);
48 const struct drm_crtc_funcs *funcs);
49 48
50/** 49/**
51 * drm_plane_helper_funcs - helper operations for CRTCs 50 * drm_plane_helper_funcs - helper operations for CRTCs
@@ -79,26 +78,26 @@ static inline void drm_plane_helper_add(struct drm_plane *plane,
79 plane->helper_private = funcs; 78 plane->helper_private = funcs;
80} 79}
81 80
82extern int drm_plane_helper_check_update(struct drm_plane *plane, 81int drm_plane_helper_check_update(struct drm_plane *plane,
83 struct drm_crtc *crtc, 82 struct drm_crtc *crtc,
84 struct drm_framebuffer *fb, 83 struct drm_framebuffer *fb,
85 struct drm_rect *src, 84 struct drm_rect *src,
86 struct drm_rect *dest, 85 struct drm_rect *dest,
87 const struct drm_rect *clip, 86 const struct drm_rect *clip,
88 int min_scale, 87 int min_scale,
89 int max_scale, 88 int max_scale,
90 bool can_position, 89 bool can_position,
91 bool can_update_disabled, 90 bool can_update_disabled,
92 bool *visible); 91 bool *visible);
93extern int drm_primary_helper_update(struct drm_plane *plane, 92int drm_primary_helper_update(struct drm_plane *plane,
94 struct drm_crtc *crtc, 93 struct drm_crtc *crtc,
95 struct drm_framebuffer *fb, 94 struct drm_framebuffer *fb,
96 int crtc_x, int crtc_y, 95 int crtc_x, int crtc_y,
97 unsigned int crtc_w, unsigned int crtc_h, 96 unsigned int crtc_w, unsigned int crtc_h,
98 uint32_t src_x, uint32_t src_y, 97 uint32_t src_x, uint32_t src_y,
99 uint32_t src_w, uint32_t src_h); 98 uint32_t src_w, uint32_t src_h);
100extern int drm_primary_helper_disable(struct drm_plane *plane); 99int drm_primary_helper_disable(struct drm_plane *plane);
101extern void drm_primary_helper_destroy(struct drm_plane *plane); 100void drm_primary_helper_destroy(struct drm_plane *plane);
102extern const struct drm_plane_funcs drm_primary_helper_funcs; 101extern const struct drm_plane_funcs drm_primary_helper_funcs;
103 102
104int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, 103int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index c9a8b64aa33b..b2d56dd483d9 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -34,6 +34,17 @@ struct i915_audio_component {
34 void (*codec_wake_override)(struct device *, bool enable); 34 void (*codec_wake_override)(struct device *, bool enable);
35 int (*get_cdclk_freq)(struct device *); 35 int (*get_cdclk_freq)(struct device *);
36 } *ops; 36 } *ops;
37
38 const struct i915_audio_component_audio_ops {
39 void *audio_ptr;
40 /**
41 * Call from i915 driver, notifying the HDA driver that
42 * pin sense and/or ELD information has changed.
43 * @audio_ptr: HDA driver object
44 * @port: Which port has changed (PORTA / PORTB / PORTC etc)
45 */
46 void (*pin_eld_notify)(void *audio_ptr, int port);
47 } *audio_ops;
37}; 48};
38 49
39#endif /* _I915_COMPONENT_H_ */ 50#endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index b08bdade6002..9e9bddaa58a5 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,8 +3,8 @@
3#ifndef _DRM_INTEL_GTT_H 3#ifndef _DRM_INTEL_GTT_H
4#define _DRM_INTEL_GTT_H 4#define _DRM_INTEL_GTT_H
5 5
6void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, 6void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
7 phys_addr_t *mappable_base, unsigned long *mappable_end); 7 phys_addr_t *mappable_base, u64 *mappable_end);
8 8
9int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, 9int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
10 struct agp_bridge_data *bridge); 10 struct agp_bridge_data *bridge);
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index aab088d30199..63d01c15d2b3 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -31,6 +31,7 @@
31#define CLK_FOUT_VPLL 4 31#define CLK_FOUT_VPLL 4
32#define CLK_FOUT_UPLL 5 32#define CLK_FOUT_UPLL 5
33#define CLK_FOUT_MPLL 6 33#define CLK_FOUT_MPLL 6
34#define CLK_ARM_CLK 7
34 35
35/* Muxes */ 36/* Muxes */
36#define CLK_MOUT_MPLL_USER_L 16 37#define CLK_MOUT_MPLL_USER_L 16
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index 4273891dc78e..8183d1c237d9 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -21,6 +21,7 @@
21#define CLK_FOUT_CPLL 6 21#define CLK_FOUT_CPLL 6
22#define CLK_FOUT_EPLL 7 22#define CLK_FOUT_EPLL 7
23#define CLK_FOUT_VPLL 8 23#define CLK_FOUT_VPLL 8
24#define CLK_ARM_CLK 9
24 25
25/* gate for special clocks (sclk) */ 26/* gate for special clocks (sclk) */
26#define CLK_SCLK_CAM_BAYER 128 27#define CLK_SCLK_CAM_BAYER 128
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 8780868458a0..8de173ff19f3 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -251,6 +251,9 @@
251#define IMX6QDL_CLK_VIDEO_27M 238 251#define IMX6QDL_CLK_VIDEO_27M 238
252#define IMX6QDL_CLK_MIPI_CORE_CFG 239 252#define IMX6QDL_CLK_MIPI_CORE_CFG 239
253#define IMX6QDL_CLK_MIPI_IPG 240 253#define IMX6QDL_CLK_MIPI_IPG 240
254#define IMX6QDL_CLK_END 241 254#define IMX6QDL_CLK_CAAM_MEM 241
255#define IMX6QDL_CLK_CAAM_ACLK 242
256#define IMX6QDL_CLK_CAAM_IPG 243
257#define IMX6QDL_CLK_END 244
255 258
256#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ 259#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
new file mode 100644
index 000000000000..c343894ce603
--- /dev/null
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -0,0 +1,240 @@
1/*
2 * Copyright (C) 2015 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#ifndef __DT_BINDINGS_CLOCK_IMX6UL_H
11#define __DT_BINDINGS_CLOCK_IMX6UL_H
12
13#define IMX6UL_CLK_DUMMY 0
14#define IMX6UL_CLK_CKIL 1
15#define IMX6UL_CLK_CKIH 2
16#define IMX6UL_CLK_OSC 3
17#define IMX6UL_PLL1_BYPASS_SRC 4
18#define IMX6UL_PLL2_BYPASS_SRC 5
19#define IMX6UL_PLL3_BYPASS_SRC 6
20#define IMX6UL_PLL4_BYPASS_SRC 7
21#define IMX6UL_PLL5_BYPASS_SRC 8
22#define IMX6UL_PLL6_BYPASS_SRC 9
23#define IMX6UL_PLL7_BYPASS_SRC 10
24#define IMX6UL_CLK_PLL1 11
25#define IMX6UL_CLK_PLL2 12
26#define IMX6UL_CLK_PLL3 13
27#define IMX6UL_CLK_PLL4 14
28#define IMX6UL_CLK_PLL5 15
29#define IMX6UL_CLK_PLL6 16
30#define IMX6UL_CLK_PLL7 17
31#define IMX6UL_PLL1_BYPASS 18
32#define IMX6UL_PLL2_BYPASS 19
33#define IMX6UL_PLL3_BYPASS 20
34#define IMX6UL_PLL4_BYPASS 21
35#define IMX6UL_PLL5_BYPASS 22
36#define IMX6UL_PLL6_BYPASS 23
37#define IMX6UL_PLL7_BYPASS 24
38#define IMX6UL_CLK_PLL1_SYS 25
39#define IMX6UL_CLK_PLL2_BUS 26
40#define IMX6UL_CLK_PLL3_USB_OTG 27
41#define IMX6UL_CLK_PLL4_AUDIO 28
42#define IMX6UL_CLK_PLL5_VIDEO 29
43#define IMX6UL_CLK_PLL6_ENET 30
44#define IMX6UL_CLK_PLL7_USB_HOST 31
45#define IMX6UL_CLK_USBPHY1 32
46#define IMX6UL_CLK_USBPHY2 33
47#define IMX6UL_CLK_USBPHY1_GATE 34
48#define IMX6UL_CLK_USBPHY2_GATE 35
49#define IMX6UL_CLK_PLL2_PFD0 36
50#define IMX6UL_CLK_PLL2_PFD1 37
51#define IMX6UL_CLK_PLL2_PFD2 38
52#define IMX6UL_CLK_PLL2_PFD3 39
53#define IMX6UL_CLK_PLL3_PFD0 40
54#define IMX6UL_CLK_PLL3_PFD1 41
55#define IMX6UL_CLK_PLL3_PFD2 42
56#define IMX6UL_CLK_PLL3_PFD3 43
57#define IMX6UL_CLK_ENET_REF 44
58#define IMX6UL_CLK_ENET2_REF 45
59#define IMX6UL_CLK_ENET2_REF_125M 46
60#define IMX6UL_CLK_ENET_PTP_REF 47
61#define IMX6UL_CLK_ENET_PTP 48
62#define IMX6UL_CLK_PLL4_POST_DIV 49
63#define IMX6UL_CLK_PLL4_AUDIO_DIV 50
64#define IMX6UL_CLK_PLL5_POST_DIV 51
65#define IMX6UL_CLK_PLL5_VIDEO_DIV 52
66#define IMX6UL_CLK_PLL2_198M 53
67#define IMX6UL_CLK_PLL3_80M 54
68#define IMX6UL_CLK_PLL3_60M 55
69#define IMX6UL_CLK_STEP 56
70#define IMX6UL_CLK_PLL1_SW 57
71#define IMX6UL_CLK_AXI_ALT_SEL 58
72#define IMX6UL_CLK_AXI_SEL 59
73#define IMX6UL_CLK_PERIPH_PRE 60
74#define IMX6UL_CLK_PERIPH2_PRE 61
75#define IMX6UL_CLK_PERIPH_CLK2_SEL 62
76#define IMX6UL_CLK_PERIPH2_CLK2_SEL 63
77#define IMX6UL_CLK_USDHC1_SEL 64
78#define IMX6UL_CLK_USDHC2_SEL 65
79#define IMX6UL_CLK_BCH_SEL 66
80#define IMX6UL_CLK_GPMI_SEL 67
81#define IMX6UL_CLK_EIM_SLOW_SEL 68
82#define IMX6UL_CLK_SPDIF_SEL 69
83#define IMX6UL_CLK_SAI1_SEL 70
84#define IMX6UL_CLK_SAI2_SEL 71
85#define IMX6UL_CLK_SAI3_SEL 72
86#define IMX6UL_CLK_LCDIF_PRE_SEL 73
87#define IMX6UL_CLK_SIM_PRE_SEL 74
88#define IMX6UL_CLK_LDB_DI0_SEL 75
89#define IMX6UL_CLK_LDB_DI1_SEL 76
90#define IMX6UL_CLK_ENFC_SEL 77
91#define IMX6UL_CLK_CAN_SEL 78
92#define IMX6UL_CLK_ECSPI_SEL 79
93#define IMX6UL_CLK_UART_SEL 80
94#define IMX6UL_CLK_QSPI1_SEL 81
95#define IMX6UL_CLK_PERCLK_SEL 82
96#define IMX6UL_CLK_LCDIF_SEL 83
97#define IMX6UL_CLK_SIM_SEL 84
98#define IMX6UL_CLK_PERIPH 85
99#define IMX6UL_CLK_PERIPH2 86
100#define IMX6UL_CLK_LDB_DI0_DIV_3_5 87
101#define IMX6UL_CLK_LDB_DI0_DIV_7 88
102#define IMX6UL_CLK_LDB_DI1_DIV_3_5 89
103#define IMX6UL_CLK_LDB_DI1_DIV_7 90
104#define IMX6UL_CLK_LDB_DI0_DIV_SEL 91
105#define IMX6UL_CLK_LDB_DI1_DIV_SEL 92
106#define IMX6UL_CLK_ARM 93
107#define IMX6UL_CLK_PERIPH_CLK2 94
108#define IMX6UL_CLK_PERIPH2_CLK2 95
109#define IMX6UL_CLK_AHB 96
110#define IMX6UL_CLK_MMDC_PODF 97
111#define IMX6UL_CLK_AXI_PODF 98
112#define IMX6UL_CLK_PERCLK 99
113#define IMX6UL_CLK_IPG 100
114#define IMX6UL_CLK_USDHC1_PODF 101
115#define IMX6UL_CLK_USDHC2_PODF 102
116#define IMX6UL_CLK_BCH_PODF 103
117#define IMX6UL_CLK_GPMI_PODF 104
118#define IMX6UL_CLK_EIM_SLOW_PODF 105
119#define IMX6UL_CLK_SPDIF_PRED 106
120#define IMX6UL_CLK_SPDIF_PODF 107
121#define IMX6UL_CLK_SAI1_PRED 108
122#define IMX6UL_CLK_SAI1_PODF 109
123#define IMX6UL_CLK_SAI2_PRED 110
124#define IMX6UL_CLK_SAI2_PODF 111
125#define IMX6UL_CLK_SAI3_PRED 112
126#define IMX6UL_CLK_SAI3_PODF 113
127#define IMX6UL_CLK_LCDIF_PRED 114
128#define IMX6UL_CLK_LCDIF_PODF 115
129#define IMX6UL_CLK_SIM_PODF 116
130#define IMX6UL_CLK_QSPI1_PDOF 117
131#define IMX6UL_CLK_ENFC_PRED 118
132#define IMX6UL_CLK_ENFC_PODF 119
133#define IMX6UL_CLK_CAN_PODF 120
134#define IMX6UL_CLK_ECSPI_PODF 121
135#define IMX6UL_CLK_UART_PODF 122
136#define IMX6UL_CLK_ADC1 123
137#define IMX6UL_CLK_ADC2 124
138#define IMX6UL_CLK_AIPSTZ1 125
139#define IMX6UL_CLK_AIPSTZ2 126
140#define IMX6UL_CLK_AIPSTZ3 127
141#define IMX6UL_CLK_APBHDMA 128
142#define IMX6UL_CLK_ASRC_IPG 129
143#define IMX6UL_CLK_ASRC_MEM 130
144#define IMX6UL_CLK_GPMI_BCH_APB 131
145#define IMX6UL_CLK_GPMI_BCH 132
146#define IMX6UL_CLK_GPMI_IO 133
147#define IMX6UL_CLK_GPMI_APB 134
148#define IMX6UL_CLK_CAAM_MEM 135
149#define IMX6UL_CLK_CAAM_ACLK 136
150#define IMX6UL_CLK_CAAM_IPG 137
151#define IMX6UL_CLK_CSI 138
152#define IMX6UL_CLK_ECSPI1 139
153#define IMX6UL_CLK_ECSPI2 140
154#define IMX6UL_CLK_ECSPI3 141
155#define IMX6UL_CLK_ECSPI4 142
156#define IMX6UL_CLK_EIM 143
157#define IMX6UL_CLK_ENET 144
158#define IMX6UL_CLK_ENET_AHB 145
159#define IMX6UL_CLK_EPIT1 146
160#define IMX6UL_CLK_EPIT2 147
161#define IMX6UL_CLK_CAN1_IPG 148
162#define IMX6UL_CLK_CAN1_SERIAL 149
163#define IMX6UL_CLK_CAN2_IPG 150
164#define IMX6UL_CLK_CAN2_SERIAL 151
165#define IMX6UL_CLK_GPT1_BUS 152
166#define IMX6UL_CLK_GPT1_SERIAL 153
167#define IMX6UL_CLK_GPT2_BUS 154
168#define IMX6UL_CLK_GPT2_SERIAL 155
169#define IMX6UL_CLK_I2C1 156
170#define IMX6UL_CLK_I2C2 157
171#define IMX6UL_CLK_I2C3 158
172#define IMX6UL_CLK_I2C4 159
173#define IMX6UL_CLK_IOMUXC 160
174#define IMX6UL_CLK_LCDIF_APB 161
175#define IMX6UL_CLK_LCDIF_PIX 162
176#define IMX6UL_CLK_MMDC_P0_FAST 163
177#define IMX6UL_CLK_MMDC_P0_IPG 164
178#define IMX6UL_CLK_OCOTP 165
179#define IMX6UL_CLK_OCRAM 166
180#define IMX6UL_CLK_PWM1 167
181#define IMX6UL_CLK_PWM2 168
182#define IMX6UL_CLK_PWM3 169
183#define IMX6UL_CLK_PWM4 170
184#define IMX6UL_CLK_PWM5 171
185#define IMX6UL_CLK_PWM6 172
186#define IMX6UL_CLK_PWM7 173
187#define IMX6UL_CLK_PWM8 174
188#define IMX6UL_CLK_PXP 175
189#define IMX6UL_CLK_QSPI 176
190#define IMX6UL_CLK_ROM 177
191#define IMX6UL_CLK_SAI1 178
192#define IMX6UL_CLK_SAI1_IPG 179
193#define IMX6UL_CLK_SAI2 180
194#define IMX6UL_CLK_SAI2_IPG 181
195#define IMX6UL_CLK_SAI3 182
196#define IMX6UL_CLK_SAI3_IPG 183
197#define IMX6UL_CLK_SDMA 184
198#define IMX6UL_CLK_SIM 185
199#define IMX6UL_CLK_SIM_S 186
200#define IMX6UL_CLK_SPBA 187
201#define IMX6UL_CLK_SPDIF 188
202#define IMX6UL_CLK_UART1_IPG 189
203#define IMX6UL_CLK_UART1_SERIAL 190
204#define IMX6UL_CLK_UART2_IPG 191
205#define IMX6UL_CLK_UART2_SERIAL 192
206#define IMX6UL_CLK_UART3_IPG 193
207#define IMX6UL_CLK_UART3_SERIAL 194
208#define IMX6UL_CLK_UART4_IPG 195
209#define IMX6UL_CLK_UART4_SERIAL 196
210#define IMX6UL_CLK_UART5_IPG 197
211#define IMX6UL_CLK_UART5_SERIAL 198
212#define IMX6UL_CLK_UART6_IPG 199
213#define IMX6UL_CLK_UART6_SERIAL 200
214#define IMX6UL_CLK_UART7_IPG 201
215#define IMX6UL_CLK_UART7_SERIAL 202
216#define IMX6UL_CLK_UART8_IPG 203
217#define IMX6UL_CLK_UART8_SERIAL 204
218#define IMX6UL_CLK_USBOH3 205
219#define IMX6UL_CLK_USDHC1 206
220#define IMX6UL_CLK_USDHC2 207
221#define IMX6UL_CLK_WDOG1 208
222#define IMX6UL_CLK_WDOG2 209
223#define IMX6UL_CLK_WDOG3 210
224#define IMX6UL_CLK_LDB_DI0 211
225#define IMX6UL_CLK_AXI 212
226#define IMX6UL_CLK_SPDIF_GCLK 213
227#define IMX6UL_CLK_GPT_3M 214
228#define IMX6UL_CLK_SIM2 215
229#define IMX6UL_CLK_SIM1 216
230#define IMX6UL_CLK_IPP_DI0 217
231#define IMX6UL_CLK_IPP_DI1 218
232#define IMX6UL_CA7_SECONDARY_SEL 219
233#define IMX6UL_CLK_PER_BCH 220
234#define IMX6UL_CLK_CSI_SEL 221
235#define IMX6UL_CLK_CSI_PODF 222
236#define IMX6UL_CLK_PLL3_120M 223
237
238#define IMX6UL_CLK_END 224
239
240#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index ff7ca3584e16..7b1ad8922eec 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -108,6 +108,7 @@
108#define R8A7790_CLK_VIN2 9 108#define R8A7790_CLK_VIN2 9
109#define R8A7790_CLK_VIN1 10 109#define R8A7790_CLK_VIN1 10
110#define R8A7790_CLK_VIN0 11 110#define R8A7790_CLK_VIN0 11
111#define R8A7790_CLK_ETHERAVB 12
111#define R8A7790_CLK_ETHER 13 112#define R8A7790_CLK_ETHER 13
112#define R8A7790_CLK_SATA1 14 113#define R8A7790_CLK_SATA1 14
113#define R8A7790_CLK_SATA0 15 114#define R8A7790_CLK_SATA0 15
@@ -143,6 +144,8 @@
143#define R8A7790_CLK_SCU_ALL 17 144#define R8A7790_CLK_SCU_ALL 17
144#define R8A7790_CLK_SCU_DVC1 18 145#define R8A7790_CLK_SCU_DVC1 18
145#define R8A7790_CLK_SCU_DVC0 19 146#define R8A7790_CLK_SCU_DVC0 19
147#define R8A7790_CLK_SCU_CTU1_MIX1 20
148#define R8A7790_CLK_SCU_CTU0_MIX0 21
146#define R8A7790_CLK_SCU_SRC9 22 149#define R8A7790_CLK_SCU_SRC9 22
147#define R8A7790_CLK_SCU_SRC8 23 150#define R8A7790_CLK_SCU_SRC8 23
148#define R8A7790_CLK_SCU_SRC7 24 151#define R8A7790_CLK_SCU_SRC7 24
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index 402268384b99..dd09b73c4aaf 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -141,6 +141,8 @@
141#define R8A7791_CLK_SCU_ALL 17 141#define R8A7791_CLK_SCU_ALL 17
142#define R8A7791_CLK_SCU_DVC1 18 142#define R8A7791_CLK_SCU_DVC1 18
143#define R8A7791_CLK_SCU_DVC0 19 143#define R8A7791_CLK_SCU_DVC0 19
144#define R8A7791_CLK_SCU_CTU1_MIX1 20
145#define R8A7791_CLK_SCU_CTU0_MIX0 21
144#define R8A7791_CLK_SCU_SRC9 22 146#define R8A7791_CLK_SCU_SRC9 22
145#define R8A7791_CLK_SCU_SRC8 23 147#define R8A7791_CLK_SCU_SRC8 23
146#define R8A7791_CLK_SCU_SRC7 24 148#define R8A7791_CLK_SCU_SRC7 24
diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h
new file mode 100644
index 000000000000..1579e07f96a3
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7793-clock.h
@@ -0,0 +1,164 @@
1/*
2 * r8a7793 clock definition
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __DT_BINDINGS_CLOCK_R8A7793_H__
17#define __DT_BINDINGS_CLOCK_R8A7793_H__
18
19/* CPG */
20#define R8A7793_CLK_MAIN 0
21#define R8A7793_CLK_PLL0 1
22#define R8A7793_CLK_PLL1 2
23#define R8A7793_CLK_PLL3 3
24#define R8A7793_CLK_LB 4
25#define R8A7793_CLK_QSPI 5
26#define R8A7793_CLK_SDH 6
27#define R8A7793_CLK_SD0 7
28#define R8A7793_CLK_Z 8
29#define R8A7793_CLK_RCAN 9
30#define R8A7793_CLK_ADSP 10
31
32/* MSTP0 */
33#define R8A7793_CLK_MSIOF0 0
34
35/* MSTP1 */
36#define R8A7793_CLK_VCP0 1
37#define R8A7793_CLK_VPC0 3
38#define R8A7793_CLK_SSP1 9
39#define R8A7793_CLK_TMU1 11
40#define R8A7793_CLK_3DG 12
41#define R8A7793_CLK_2DDMAC 15
42#define R8A7793_CLK_FDP1_1 18
43#define R8A7793_CLK_FDP1_0 19
44#define R8A7793_CLK_TMU3 21
45#define R8A7793_CLK_TMU2 22
46#define R8A7793_CLK_CMT0 24
47#define R8A7793_CLK_TMU0 25
48#define R8A7793_CLK_VSP1_DU1 27
49#define R8A7793_CLK_VSP1_DU0 28
50#define R8A7793_CLK_VSP1_S 31
51
52/* MSTP2 */
53#define R8A7793_CLK_SCIFA2 2
54#define R8A7793_CLK_SCIFA1 3
55#define R8A7793_CLK_SCIFA0 4
56#define R8A7793_CLK_MSIOF2 5
57#define R8A7793_CLK_SCIFB0 6
58#define R8A7793_CLK_SCIFB1 7
59#define R8A7793_CLK_MSIOF1 8
60#define R8A7793_CLK_SCIFB2 16
61#define R8A7793_CLK_SYS_DMAC1 18
62#define R8A7793_CLK_SYS_DMAC0 19
63
64/* MSTP3 */
65#define R8A7793_CLK_TPU0 4
66#define R8A7793_CLK_SDHI2 11
67#define R8A7793_CLK_SDHI1 12
68#define R8A7793_CLK_SDHI0 14
69#define R8A7793_CLK_MMCIF0 15
70#define R8A7793_CLK_IIC0 18
71#define R8A7793_CLK_PCIEC 19
72#define R8A7793_CLK_IIC1 23
73#define R8A7793_CLK_SSUSB 28
74#define R8A7793_CLK_CMT1 29
75#define R8A7793_CLK_USBDMAC0 30
76#define R8A7793_CLK_USBDMAC1 31
77
78/* MSTP4 */
79#define R8A7793_CLK_IRQC 7
80
81/* MSTP5 */
82#define R8A7793_CLK_AUDIO_DMAC1 1
83#define R8A7793_CLK_AUDIO_DMAC0 2
84#define R8A7793_CLK_ADSP_MOD 6
85#define R8A7793_CLK_THERMAL 22
86#define R8A7793_CLK_PWM 23
87
88/* MSTP7 */
89#define R8A7793_CLK_EHCI 3
90#define R8A7793_CLK_HSUSB 4
91#define R8A7793_CLK_HSCIF2 13
92#define R8A7793_CLK_SCIF5 14
93#define R8A7793_CLK_SCIF4 15
94#define R8A7793_CLK_HSCIF1 16
95#define R8A7793_CLK_HSCIF0 17
96#define R8A7793_CLK_SCIF3 18
97#define R8A7793_CLK_SCIF2 19
98#define R8A7793_CLK_SCIF1 20
99#define R8A7793_CLK_SCIF0 21
100#define R8A7793_CLK_DU1 23
101#define R8A7793_CLK_DU0 24
102#define R8A7793_CLK_LVDS0 26
103
104/* MSTP8 */
105#define R8A7793_CLK_IPMMU_SGX 0
106#define R8A7793_CLK_VIN2 9
107#define R8A7793_CLK_VIN1 10
108#define R8A7793_CLK_VIN0 11
109#define R8A7793_CLK_ETHER 13
110#define R8A7793_CLK_SATA1 14
111#define R8A7793_CLK_SATA0 15
112
113/* MSTP9 */
114#define R8A7793_CLK_GPIO7 4
115#define R8A7793_CLK_GPIO6 5
116#define R8A7793_CLK_GPIO5 7
117#define R8A7793_CLK_GPIO4 8
118#define R8A7793_CLK_GPIO3 9
119#define R8A7793_CLK_GPIO2 10
120#define R8A7793_CLK_GPIO1 11
121#define R8A7793_CLK_GPIO0 12
122#define R8A7793_CLK_RCAN1 15
123#define R8A7793_CLK_RCAN0 16
124#define R8A7793_CLK_QSPI_MOD 17
125#define R8A7793_CLK_I2C5 25
126#define R8A7793_CLK_IICDVFS 26
127#define R8A7793_CLK_I2C4 27
128#define R8A7793_CLK_I2C3 28
129#define R8A7793_CLK_I2C2 29
130#define R8A7793_CLK_I2C1 30
131#define R8A7793_CLK_I2C0 31
132
133/* MSTP10 */
134#define R8A7793_CLK_SSI_ALL 5
135#define R8A7793_CLK_SSI9 6
136#define R8A7793_CLK_SSI8 7
137#define R8A7793_CLK_SSI7 8
138#define R8A7793_CLK_SSI6 9
139#define R8A7793_CLK_SSI5 10
140#define R8A7793_CLK_SSI4 11
141#define R8A7793_CLK_SSI3 12
142#define R8A7793_CLK_SSI2 13
143#define R8A7793_CLK_SSI1 14
144#define R8A7793_CLK_SSI0 15
145#define R8A7793_CLK_SCU_ALL 17
146#define R8A7793_CLK_SCU_DVC1 18
147#define R8A7793_CLK_SCU_DVC0 19
148#define R8A7793_CLK_SCU_SRC9 22
149#define R8A7793_CLK_SCU_SRC8 23
150#define R8A7793_CLK_SCU_SRC7 24
151#define R8A7793_CLK_SCU_SRC6 25
152#define R8A7793_CLK_SCU_SRC5 26
153#define R8A7793_CLK_SCU_SRC4 27
154#define R8A7793_CLK_SCU_SRC3 28
155#define R8A7793_CLK_SCU_SRC2 29
156#define R8A7793_CLK_SCU_SRC1 30
157#define R8A7793_CLK_SCU_SRC0 31
158
159/* MSTP11 */
160#define R8A7793_CLK_SCIFA3 6
161#define R8A7793_CLK_SCIFA4 7
162#define R8A7793_CLK_SCIFA5 8
163
164#endif /* __DT_BINDINGS_CLOCK_R8A7793_H__ */
diff --git a/include/dt-bindings/clock/rk3066a-cru.h b/include/dt-bindings/clock/rk3066a-cru.h
index bc1ed1dbd855..d3a9824ef646 100644
--- a/include/dt-bindings/clock/rk3066a-cru.h
+++ b/include/dt-bindings/clock/rk3066a-cru.h
@@ -13,6 +13,9 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H
17#define _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H
18
16#include <dt-bindings/clock/rk3188-cru-common.h> 19#include <dt-bindings/clock/rk3188-cru-common.h>
17 20
18/* soft-reset indices */ 21/* soft-reset indices */
@@ -33,3 +36,5 @@
33#define SRST_HDMI 96 36#define SRST_HDMI 96
34#define SRST_HDMI_APB 97 37#define SRST_HDMI_APB 97
35#define SRST_CIF1 111 38#define SRST_CIF1 111
39
40#endif
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
index 6a370503c954..8df77a7c030b 100644
--- a/include/dt-bindings/clock/rk3188-cru-common.h
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -13,6 +13,9 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H
17#define _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H
18
16/* core clocks from */ 19/* core clocks from */
17#define PLL_APLL 1 20#define PLL_APLL 1
18#define PLL_DPLL 2 21#define PLL_DPLL 2
@@ -248,3 +251,5 @@
248#define SRST_PTM1_ATB 141 251#define SRST_PTM1_ATB 141
249#define SRST_CTM 142 252#define SRST_CTM 142
250#define SRST_TS 143 253#define SRST_TS 143
254
255#endif
diff --git a/include/dt-bindings/clock/rk3188-cru.h b/include/dt-bindings/clock/rk3188-cru.h
index 9fac8edd3f9d..9f2e631f2651 100644
--- a/include/dt-bindings/clock/rk3188-cru.h
+++ b/include/dt-bindings/clock/rk3188-cru.h
@@ -13,6 +13,9 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H
17#define _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H
18
16#include <dt-bindings/clock/rk3188-cru-common.h> 19#include <dt-bindings/clock/rk3188-cru-common.h>
17 20
18/* soft-reset indices */ 21/* soft-reset indices */
@@ -49,3 +52,5 @@
49#define SRST_GPU_BRIDGE 121 52#define SRST_GPU_BRIDGE 121
50#define SRST_CTI3 123 53#define SRST_CTI3 123
51#define SRST_CTI3_APB 124 54#define SRST_CTI3_APB 124
55
56#endif
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index dea419708d73..c719aacef14f 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -13,6 +13,9 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H
17#define _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H
18
16/* core clocks */ 19/* core clocks */
17#define PLL_APLL 1 20#define PLL_APLL 1
18#define PLL_DPLL 2 21#define PLL_DPLL 2
@@ -376,3 +379,5 @@
376#define SRST_TSP_CLKIN0 189 379#define SRST_TSP_CLKIN0 189
377#define SRST_TSP_CLKIN1 190 380#define SRST_TSP_CLKIN1 190
378#define SRST_TSP_27M 191 381#define SRST_TSP_27M 191
382
383#endif
diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h
new file mode 100644
index 000000000000..9c5dd9ba2f6c
--- /dev/null
+++ b/include/dt-bindings/clock/rk3368-cru.h
@@ -0,0 +1,384 @@
1/*
2 * Copyright (c) 2015 Heiko Stuebner <heiko@sntech.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H
16#define _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H
17
18/* core clocks */
19#define PLL_APLLB 1
20#define PLL_APLLL 2
21#define PLL_DPLL 3
22#define PLL_CPLL 4
23#define PLL_GPLL 5
24#define PLL_NPLL 6
25#define ARMCLKB 7
26#define ARMCLKL 8
27
28/* sclk gates (special clocks) */
29#define SCLK_GPU_CORE 64
30#define SCLK_SPI0 65
31#define SCLK_SPI1 66
32#define SCLK_SPI2 67
33#define SCLK_SDMMC 68
34#define SCLK_SDIO0 69
35#define SCLK_EMMC 71
36#define SCLK_TSADC 72
37#define SCLK_SARADC 73
38#define SCLK_NANDC0 75
39#define SCLK_UART0 77
40#define SCLK_UART1 78
41#define SCLK_UART2 79
42#define SCLK_UART3 80
43#define SCLK_UART4 81
44#define SCLK_I2S_8CH 82
45#define SCLK_SPDIF_8CH 83
46#define SCLK_I2S_2CH 84
47#define SCLK_TIMER0 85
48#define SCLK_TIMER1 86
49#define SCLK_TIMER2 87
50#define SCLK_TIMER3 88
51#define SCLK_TIMER4 89
52#define SCLK_TIMER5 90
53#define SCLK_TIMER6 91
54#define SCLK_OTGPHY0 93
55#define SCLK_OTG_ADP 96
56#define SCLK_HSICPHY480M 97
57#define SCLK_HSICPHY12M 98
58#define SCLK_MACREF 99
59#define SCLK_VOP0_PWM 100
60#define SCLK_MAC_RX 102
61#define SCLK_MAC_TX 103
62#define SCLK_EDP_24M 104
63#define SCLK_EDP 105
64#define SCLK_RGA 106
65#define SCLK_ISP 107
66#define SCLK_HDCP 108
67#define SCLK_HDMI_HDCP 109
68#define SCLK_HDMI_CEC 110
69#define SCLK_HEVC_CABAC 111
70#define SCLK_HEVC_CORE 112
71#define SCLK_I2S_8CH_OUT 113
72#define SCLK_SDMMC_DRV 114
73#define SCLK_SDIO0_DRV 115
74#define SCLK_EMMC_DRV 117
75#define SCLK_SDMMC_SAMPLE 118
76#define SCLK_SDIO0_SAMPLE 119
77#define SCLK_EMMC_SAMPLE 121
78#define SCLK_USBPHY480M 122
79#define SCLK_PVTM_CORE 123
80#define SCLK_PVTM_GPU 124
81#define SCLK_PVTM_PMU 125
82#define SCLK_SFC 126
83#define SCLK_MAC 127
84#define SCLK_MACREF_OUT 128
85
86#define DCLK_VOP 190
87#define MCLK_CRYPTO 191
88
89/* aclk gates */
90#define ACLK_GPU_MEM 192
91#define ACLK_GPU_CFG 193
92#define ACLK_DMAC_BUS 194
93#define ACLK_DMAC_PERI 195
94#define ACLK_PERI_MMU 196
95#define ACLK_GMAC 197
96#define ACLK_VOP 198
97#define ACLK_VOP_IEP 199
98#define ACLK_RGA 200
99#define ACLK_HDCP 201
100#define ACLK_IEP 202
101#define ACLK_VIO0_NOC 203
102#define ACLK_VIP 204
103#define ACLK_ISP 205
104#define ACLK_VIO1_NOC 206
105#define ACLK_VIDEO 208
106#define ACLK_BUS 209
107#define ACLK_PERI 210
108
109/* pclk gates */
110#define PCLK_GPIO0 320
111#define PCLK_GPIO1 321
112#define PCLK_GPIO2 322
113#define PCLK_GPIO3 323
114#define PCLK_PMUGRF 324
115#define PCLK_MAILBOX 325
116#define PCLK_GRF 329
117#define PCLK_SGRF 330
118#define PCLK_PMU 331
119#define PCLK_I2C0 332
120#define PCLK_I2C1 333
121#define PCLK_I2C2 334
122#define PCLK_I2C3 335
123#define PCLK_I2C4 336
124#define PCLK_I2C5 337
125#define PCLK_SPI0 338
126#define PCLK_SPI1 339
127#define PCLK_SPI2 340
128#define PCLK_UART0 341
129#define PCLK_UART1 342
130#define PCLK_UART2 343
131#define PCLK_UART3 344
132#define PCLK_UART4 345
133#define PCLK_TSADC 346
134#define PCLK_SARADC 347
135#define PCLK_SIM 348
136#define PCLK_GMAC 349
137#define PCLK_PWM0 350
138#define PCLK_PWM1 351
139#define PCLK_TIMER0 353
140#define PCLK_TIMER1 354
141#define PCLK_EDP_CTRL 355
142#define PCLK_MIPI_DSI0 356
143#define PCLK_MIPI_CSI 358
144#define PCLK_HDCP 359
145#define PCLK_HDMI_CTRL 360
146#define PCLK_VIO_H2P 361
147#define PCLK_BUS 362
148#define PCLK_PERI 363
149#define PCLK_DDRUPCTL 364
150#define PCLK_DDRPHY 365
151#define PCLK_ISP 366
152#define PCLK_VIP 367
153#define PCLK_WDT 368
154
155/* hclk gates */
156#define HCLK_SFC 448
157#define HCLK_OTG0 449
158#define HCLK_HOST0 450
159#define HCLK_HOST1 451
160#define HCLK_HSIC 452
161#define HCLK_NANDC0 453
162#define HCLK_TSP 455
163#define HCLK_SDMMC 456
164#define HCLK_SDIO0 457
165#define HCLK_EMMC 459
166#define HCLK_HSADC 460
167#define HCLK_CRYPTO 461
168#define HCLK_I2S_2CH 462
169#define HCLK_I2S_8CH 463
170#define HCLK_SPDIF 464
171#define HCLK_VOP 465
172#define HCLK_ROM 467
173#define HCLK_IEP 468
174#define HCLK_ISP 469
175#define HCLK_RGA 470
176#define HCLK_VIO_AHB_ARBI 471
177#define HCLK_VIO_NOC 472
178#define HCLK_VIP 473
179#define HCLK_VIO_H2P 474
180#define HCLK_VIO_HDCPMMU 475
181#define HCLK_VIDEO 476
182#define HCLK_BUS 477
183#define HCLK_PERI 478
184
185#define CLK_NR_CLKS (HCLK_PERI + 1)
186
187/* soft-reset indices */
188#define SRST_CORE_B0 0
189#define SRST_CORE_B1 1
190#define SRST_CORE_B2 2
191#define SRST_CORE_B3 3
192#define SRST_CORE_B0_PO 4
193#define SRST_CORE_B1_PO 5
194#define SRST_CORE_B2_PO 6
195#define SRST_CORE_B3_PO 7
196#define SRST_L2_B 8
197#define SRST_ADB_B 9
198#define SRST_PD_CORE_B_NIU 10
199#define SRST_PDBUS_STRSYS 11
200#define SRST_SOCDBG_B 14
201#define SRST_CORE_B_DBG 15
202
203#define SRST_DMAC1 18
204#define SRST_INTMEM 19
205#define SRST_ROM 20
206#define SRST_SPDIF8CH 21
207#define SRST_I2S8CH 23
208#define SRST_MAILBOX 24
209#define SRST_I2S2CH 25
210#define SRST_EFUSE_256 26
211#define SRST_MCU_SYS 28
212#define SRST_MCU_PO 29
213#define SRST_MCU_NOC 30
214#define SRST_EFUSE 31
215
216#define SRST_GPIO0 32
217#define SRST_GPIO1 33
218#define SRST_GPIO2 34
219#define SRST_GPIO3 35
220#define SRST_GPIO4 36
221#define SRST_PMUGRF 41
222#define SRST_I2C0 42
223#define SRST_I2C1 43
224#define SRST_I2C2 44
225#define SRST_I2C3 45
226#define SRST_I2C4 46
227#define SRST_I2C5 47
228
229#define SRST_DWPWM 48
230#define SRST_MMC_PERI 49
231#define SRST_PERIPH_MMU 50
232#define SRST_GRF 55
233#define SRST_PMU 56
234#define SRST_PERIPH_AXI 57
235#define SRST_PERIPH_AHB 58
236#define SRST_PERIPH_APB 59
237#define SRST_PERIPH_NIU 60
238#define SRST_PDPERI_AHB_ARBI 61
239#define SRST_EMEM 62
240#define SRST_USB_PERI 63
241
242#define SRST_DMAC2 64
243#define SRST_MAC 66
244#define SRST_GPS 67
245#define SRST_RKPWM 69
246#define SRST_USBHOST0 72
247#define SRST_HSIC 73
248#define SRST_HSIC_AUX 74
249#define SRST_HSIC_PHY 75
250#define SRST_HSADC 76
251#define SRST_NANDC0 77
252#define SRST_SFC 79
253
254#define SRST_SPI0 83
255#define SRST_SPI1 84
256#define SRST_SPI2 85
257#define SRST_SARADC 87
258#define SRST_PDALIVE_NIU 88
259#define SRST_PDPMU_INTMEM 89
260#define SRST_PDPMU_NIU 90
261#define SRST_SGRF 91
262
263#define SRST_VIO_ARBI 96
264#define SRST_RGA_NIU 97
265#define SRST_VIO0_NIU_AXI 98
266#define SRST_VIO_NIU_AHB 99
267#define SRST_LCDC0_AXI 100
268#define SRST_LCDC0_AHB 101
269#define SRST_LCDC0_DCLK 102
270#define SRST_VIP 104
271#define SRST_RGA_CORE 105
272#define SRST_IEP_AXI 106
273#define SRST_IEP_AHB 107
274#define SRST_RGA_AXI 108
275#define SRST_RGA_AHB 109
276#define SRST_ISP 110
277#define SRST_EDP_24M 111
278
279#define SRST_VIDEO_AXI 112
280#define SRST_VIDEO_AHB 113
281#define SRST_MIPIDPHYTX 114
282#define SRST_MIPIDSI0 115
283#define SRST_MIPIDPHYRX 116
284#define SRST_MIPICSI 117
285#define SRST_GPU 120
286#define SRST_HDMI 121
287#define SRST_EDP 122
288#define SRST_PMU_PVTM 123
289#define SRST_CORE_PVTM 124
290#define SRST_GPU_PVTM 125
291#define SRST_GPU_SYS 126
292#define SRST_GPU_MEM_NIU 127
293
294#define SRST_MMC0 128
295#define SRST_SDIO0 129
296#define SRST_EMMC 131
297#define SRST_USBOTG_AHB 132
298#define SRST_USBOTG_PHY 133
299#define SRST_USBOTG_CON 134
300#define SRST_USBHOST0_AHB 135
301#define SRST_USBHOST0_PHY 136
302#define SRST_USBHOST0_CON 137
303#define SRST_USBOTG_UTMI 138
304#define SRST_USBHOST1_UTMI 139
305#define SRST_USB_ADP 141
306
307#define SRST_CORESIGHT 144
308#define SRST_PD_CORE_AHB_NOC 145
309#define SRST_PD_CORE_APB_NOC 146
310#define SRST_GIC 148
311#define SRST_LCDC_PWM0 149
312#define SRST_RGA_H2P_BRG 153
313#define SRST_VIDEO 154
314#define SRST_GPU_CFG_NIU 157
315#define SRST_TSADC 159
316
317#define SRST_DDRPHY0 160
318#define SRST_DDRPHY0_APB 161
319#define SRST_DDRCTRL0 162
320#define SRST_DDRCTRL0_APB 163
321#define SRST_VIDEO_NIU 165
322#define SRST_VIDEO_NIU_AHB 167
323#define SRST_DDRMSCH0 170
324#define SRST_PDBUS_AHB 173
325#define SRST_CRYPTO 174
326
327#define SRST_UART0 179
328#define SRST_UART1 180
329#define SRST_UART2 181
330#define SRST_UART3 182
331#define SRST_UART4 183
332#define SRST_SIMC 186
333#define SRST_TSP 188
334#define SRST_TSP_CLKIN0 189
335
336#define SRST_CORE_L0 192
337#define SRST_CORE_L1 193
338#define SRST_CORE_L2 194
339#define SRST_CORE_L3 195
340#define SRST_CORE_L0_PO 195
341#define SRST_CORE_L1_PO 197
342#define SRST_CORE_L2_PO 198
343#define SRST_CORE_L3_PO 199
344#define SRST_L2_L 200
345#define SRST_ADB_L 201
346#define SRST_PD_CORE_L_NIU 202
347#define SRST_CCI_SYS 203
348#define SRST_CCI_DDR 204
349#define SRST_CCI 205
350#define SRST_SOCDBG_L 206
351#define SRST_CORE_L_DBG 207
352
353#define SRST_CORE_B0_NC 208
354#define SRST_CORE_B0_PO_NC 209
355#define SRST_L2_B_NC 210
356#define SRST_ADB_B_NC 211
357#define SRST_PD_CORE_B_NIU_NC 212
358#define SRST_PDBUS_STRSYS_NC 213
359#define SRST_CORE_L0_NC 214
360#define SRST_CORE_L0_PO_NC 215
361#define SRST_L2_L_NC 216
362#define SRST_ADB_L_NC 217
363#define SRST_PD_CORE_L_NIU_NC 218
364#define SRST_CCI_SYS_NC 219
365#define SRST_CCI_DDR_NC 220
366#define SRST_CCI_NC 221
367#define SRST_TRACE_NC 222
368
369#define SRST_TIMER00 224
370#define SRST_TIMER01 225
371#define SRST_TIMER02 226
372#define SRST_TIMER03 227
373#define SRST_TIMER04 228
374#define SRST_TIMER05 229
375#define SRST_TIMER10 230
376#define SRST_TIMER11 231
377#define SRST_TIMER12 232
378#define SRST_TIMER13 233
379#define SRST_TIMER14 234
380#define SRST_TIMER15 235
381#define SRST_TIMER0_APB 236
382#define SRST_TIMER1_APB 237
383
384#endif
diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h
index e683dbb7e7c5..26ee564b0e68 100644
--- a/include/dt-bindings/clock/zx296702-clock.h
+++ b/include/dt-bindings/clock/zx296702-clock.h
@@ -153,7 +153,16 @@
153#define ZX296702_I2S0_WCLK 9 153#define ZX296702_I2S0_WCLK 9
154#define ZX296702_I2S0_PCLK 10 154#define ZX296702_I2S0_PCLK 10
155#define ZX296702_I2S0_DIV 11 155#define ZX296702_I2S0_DIV 11
156#define ZX296702_LSP0CLK_END 12 156#define ZX296702_I2S1_WCLK_MUX 12
157#define ZX296702_I2S1_WCLK 13
158#define ZX296702_I2S1_PCLK 14
159#define ZX296702_I2S1_DIV 15
160#define ZX296702_I2S2_WCLK_MUX 16
161#define ZX296702_I2S2_WCLK 17
162#define ZX296702_I2S2_PCLK 18
163#define ZX296702_I2S2_DIV 19
164#define ZX296702_GPIO_CLK 20
165#define ZX296702_LSP0CLK_END 21
157 166
158#define ZX296702_UART0_WCLK_MUX 0 167#define ZX296702_UART0_WCLK_MUX 0
159#define ZX296702_UART0_WCLK 1 168#define ZX296702_UART0_WCLK 1
@@ -165,6 +174,10 @@
165#define ZX296702_SDMMC0_WCLK_DIV 7 174#define ZX296702_SDMMC0_WCLK_DIV 7
166#define ZX296702_SDMMC0_WCLK 8 175#define ZX296702_SDMMC0_WCLK 8
167#define ZX296702_SDMMC0_PCLK 9 176#define ZX296702_SDMMC0_PCLK 9
168#define ZX296702_LSP1CLK_END 10 177#define ZX296702_SPDIF1_WCLK_MUX 10
178#define ZX296702_SPDIF1_WCLK 11
179#define ZX296702_SPDIF1_PCLK 12
180#define ZX296702_SPDIF1_DIV 13
181#define ZX296702_LSP1CLK_END 14
169 182
170#endif /* __DT_BINDINGS_CLOCK_ZX296702_H */ 183#endif /* __DT_BINDINGS_CLOCK_ZX296702_H */
diff --git a/include/dt-bindings/dma/axi-dmac.h b/include/dt-bindings/dma/axi-dmac.h
new file mode 100644
index 000000000000..ad9e6ecb9c2f
--- /dev/null
+++ b/include/dt-bindings/dma/axi-dmac.h
@@ -0,0 +1,48 @@
1/*
2 * This file is dual-licensed: you can use it either under the terms
3 * of the GPL or the X11 license, at your option. Note that this dual
4 * licensing only applies to this file, and not this project as a
5 * whole.
6 *
7 * a) This file is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This file is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * Or, alternatively,
18 *
19 * b) Permission is hereby granted, free of charge, to any person
20 * obtaining a copy of this software and associated documentation
21 * files (the "Software"), to deal in the Software without
22 * restriction, including without limitation the rights to use,
23 * copy, modify, merge, publish, distribute, sublicense, and/or
24 * sell copies of the Software, and to permit persons to whom the
25 * Software is furnished to do so, subject to the following
26 * conditions:
27 *
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
33 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
35 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
36 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
37 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
38 * OTHER DEALINGS IN THE SOFTWARE.
39 */
40
41#ifndef __DT_BINDINGS_DMA_AXI_DMAC_H__
42#define __DT_BINDINGS_DMA_AXI_DMAC_H__
43
44#define AXI_DMAC_BUS_TYPE_AXI_MM 0
45#define AXI_DMAC_BUS_TYPE_AXI_STREAM 1
46#define AXI_DMAC_BUS_TYPE_FIFO 2
47
48#endif
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h
deleted file mode 100644
index df017fdfb44e..000000000000
--- a/include/dt-bindings/dma/jz4780-dma.h
+++ /dev/null
@@ -1,49 +0,0 @@
1#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__
2#define __DT_BINDINGS_DMA_JZ4780_DMA_H__
3
4/*
5 * Request type numbers for the JZ4780 DMA controller (written to the DRTn
6 * register for the channel).
7 */
8#define JZ4780_DMA_I2S1_TX 0x4
9#define JZ4780_DMA_I2S1_RX 0x5
10#define JZ4780_DMA_I2S0_TX 0x6
11#define JZ4780_DMA_I2S0_RX 0x7
12#define JZ4780_DMA_AUTO 0x8
13#define JZ4780_DMA_SADC_RX 0x9
14#define JZ4780_DMA_UART4_TX 0xc
15#define JZ4780_DMA_UART4_RX 0xd
16#define JZ4780_DMA_UART3_TX 0xe
17#define JZ4780_DMA_UART3_RX 0xf
18#define JZ4780_DMA_UART2_TX 0x10
19#define JZ4780_DMA_UART2_RX 0x11
20#define JZ4780_DMA_UART1_TX 0x12
21#define JZ4780_DMA_UART1_RX 0x13
22#define JZ4780_DMA_UART0_TX 0x14
23#define JZ4780_DMA_UART0_RX 0x15
24#define JZ4780_DMA_SSI0_TX 0x16
25#define JZ4780_DMA_SSI0_RX 0x17
26#define JZ4780_DMA_SSI1_TX 0x18
27#define JZ4780_DMA_SSI1_RX 0x19
28#define JZ4780_DMA_MSC0_TX 0x1a
29#define JZ4780_DMA_MSC0_RX 0x1b
30#define JZ4780_DMA_MSC1_TX 0x1c
31#define JZ4780_DMA_MSC1_RX 0x1d
32#define JZ4780_DMA_MSC2_TX 0x1e
33#define JZ4780_DMA_MSC2_RX 0x1f
34#define JZ4780_DMA_PCM0_TX 0x20
35#define JZ4780_DMA_PCM0_RX 0x21
36#define JZ4780_DMA_SMB0_TX 0x24
37#define JZ4780_DMA_SMB0_RX 0x25
38#define JZ4780_DMA_SMB1_TX 0x26
39#define JZ4780_DMA_SMB1_RX 0x27
40#define JZ4780_DMA_SMB2_TX 0x28
41#define JZ4780_DMA_SMB2_RX 0x29
42#define JZ4780_DMA_SMB3_TX 0x2a
43#define JZ4780_DMA_SMB3_RX 0x2b
44#define JZ4780_DMA_SMB4_TX 0x2c
45#define JZ4780_DMA_SMB4_RX 0x2d
46#define JZ4780_DMA_DES_TX 0x2e
47#define JZ4780_DMA_DES_RX 0x2f
48
49#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */
diff --git a/include/dt-bindings/i2c/i2c.h b/include/dt-bindings/i2c/i2c.h
new file mode 100644
index 000000000000..1d5da81d90f1
--- /dev/null
+++ b/include/dt-bindings/i2c/i2c.h
@@ -0,0 +1,18 @@
1/*
2 * This header provides constants for I2C bindings
3 *
4 * Copyright (C) 2015 by Sang Engineering
5 * Copyright (C) 2015 by Renesas Electronics Corporation
6 *
7 * Wolfram Sang <wsa@sang-engineering.com>
8 *
9 * GPLv2 only
10 */
11
12#ifndef _DT_BINDINGS_I2C_I2C_H
13#define _DT_BINDINGS_I2C_I2C_H
14
15#define I2C_TEN_BIT_ADDRESS (1 << 31)
16#define I2C_OWN_SLAVE_ADDRESS (1 << 30)
17
18#endif
diff --git a/include/dt-bindings/leds/leds-ns2.h b/include/dt-bindings/leds/leds-ns2.h
new file mode 100644
index 000000000000..491c5f974a92
--- /dev/null
+++ b/include/dt-bindings/leds/leds-ns2.h
@@ -0,0 +1,8 @@
1#ifndef _DT_BINDINGS_LEDS_NS2_H
2#define _DT_BINDINGS_LEDS_NS2_H
3
4#define NS_V2_LED_OFF 0
5#define NS_V2_LED_ON 1
6#define NS_V2_LED_SATA 2
7
8#endif
diff --git a/include/dt-bindings/media/c8sectpfe.h b/include/dt-bindings/media/c8sectpfe.h
new file mode 100644
index 000000000000..a0b5c7be683c
--- /dev/null
+++ b/include/dt-bindings/media/c8sectpfe.h
@@ -0,0 +1,12 @@
1#ifndef __DT_C8SECTPFE_H
2#define __DT_C8SECTPFE_H
3
4#define STV0367_TDA18212_NIMA_1 0
5#define STV0367_TDA18212_NIMA_2 1
6#define STV0367_TDA18212_NIMB_1 2
7#define STV0367_TDA18212_NIMB_2 3
8
9#define STV0903_6110_LNB24_NIMA 4
10#define STV0903_6110_LNB24_NIMB 5
11
12#endif /* __DT_C8SECTPFE_H */
diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h
new file mode 100644
index 000000000000..d1731bc14dbc
--- /dev/null
+++ b/include/dt-bindings/memory/tegra210-mc.h
@@ -0,0 +1,36 @@
1#ifndef DT_BINDINGS_MEMORY_TEGRA210_MC_H
2#define DT_BINDINGS_MEMORY_TEGRA210_MC_H
3
4#define TEGRA_SWGROUP_PTC 0
5#define TEGRA_SWGROUP_DC 1
6#define TEGRA_SWGROUP_DCB 2
7#define TEGRA_SWGROUP_AFI 3
8#define TEGRA_SWGROUP_AVPC 4
9#define TEGRA_SWGROUP_HDA 5
10#define TEGRA_SWGROUP_HC 6
11#define TEGRA_SWGROUP_NVENC 7
12#define TEGRA_SWGROUP_PPCS 8
13#define TEGRA_SWGROUP_SATA 9
14#define TEGRA_SWGROUP_MPCORE 10
15#define TEGRA_SWGROUP_ISP2 11
16#define TEGRA_SWGROUP_XUSB_HOST 12
17#define TEGRA_SWGROUP_XUSB_DEV 13
18#define TEGRA_SWGROUP_ISP2B 14
19#define TEGRA_SWGROUP_TSEC 15
20#define TEGRA_SWGROUP_A9AVP 16
21#define TEGRA_SWGROUP_GPU 17
22#define TEGRA_SWGROUP_SDMMC1A 18
23#define TEGRA_SWGROUP_SDMMC2A 19
24#define TEGRA_SWGROUP_SDMMC3A 20
25#define TEGRA_SWGROUP_SDMMC4A 21
26#define TEGRA_SWGROUP_VIC 22
27#define TEGRA_SWGROUP_VI 23
28#define TEGRA_SWGROUP_NVDEC 24
29#define TEGRA_SWGROUP_APE 25
30#define TEGRA_SWGROUP_NVJPG 26
31#define TEGRA_SWGROUP_SE 27
32#define TEGRA_SWGROUP_AXIAP 28
33#define TEGRA_SWGROUP_ETR 29
34#define TEGRA_SWGROUP_TSECB 30
35
36#endif
diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h
index e3e6c75d8822..d05894afa7e7 100644
--- a/include/dt-bindings/mfd/st-lpc.h
+++ b/include/dt-bindings/mfd/st-lpc.h
@@ -11,5 +11,6 @@
11 11
12#define ST_LPC_MODE_RTC 0 12#define ST_LPC_MODE_RTC 0
13#define ST_LPC_MODE_WDT 1 13#define ST_LPC_MODE_WDT 1
14#define ST_LPC_MODE_CLKSRC 2
14 15
15#endif /* __DT_BINDINGS_ST_LPC_H__ */ 16#endif /* __DT_BINDINGS_ST_LPC_H__ */
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
index b00bbc9c60b4..774dc1e843c5 100644
--- a/include/dt-bindings/pinctrl/am43xx.h
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -14,6 +14,7 @@
14#define MUX_MODE6 6 14#define MUX_MODE6 6
15#define MUX_MODE7 7 15#define MUX_MODE7 7
16#define MUX_MODE8 8 16#define MUX_MODE8 8
17#define MUX_MODE9 9
17 18
18#define PULL_DISABLE (1 << 16) 19#define PULL_DISABLE (1 << 16)
19#define PULL_UP (1 << 17) 20#define PULL_UP (1 << 17)
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 7448edff4723..4379e29f0460 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -30,6 +30,26 @@
30#define MUX_MODE14 0xe 30#define MUX_MODE14 0xe
31#define MUX_MODE15 0xf 31#define MUX_MODE15 0xf
32 32
33/* Certain pins need virtual mode, but note: they may glitch */
34#define MUX_VIRTUAL_MODE0 (MODE_SELECT | (0x0 << 4))
35#define MUX_VIRTUAL_MODE1 (MODE_SELECT | (0x1 << 4))
36#define MUX_VIRTUAL_MODE2 (MODE_SELECT | (0x2 << 4))
37#define MUX_VIRTUAL_MODE3 (MODE_SELECT | (0x3 << 4))
38#define MUX_VIRTUAL_MODE4 (MODE_SELECT | (0x4 << 4))
39#define MUX_VIRTUAL_MODE5 (MODE_SELECT | (0x5 << 4))
40#define MUX_VIRTUAL_MODE6 (MODE_SELECT | (0x6 << 4))
41#define MUX_VIRTUAL_MODE7 (MODE_SELECT | (0x7 << 4))
42#define MUX_VIRTUAL_MODE8 (MODE_SELECT | (0x8 << 4))
43#define MUX_VIRTUAL_MODE9 (MODE_SELECT | (0x9 << 4))
44#define MUX_VIRTUAL_MODE10 (MODE_SELECT | (0xa << 4))
45#define MUX_VIRTUAL_MODE11 (MODE_SELECT | (0xb << 4))
46#define MUX_VIRTUAL_MODE12 (MODE_SELECT | (0xc << 4))
47#define MUX_VIRTUAL_MODE13 (MODE_SELECT | (0xd << 4))
48#define MUX_VIRTUAL_MODE14 (MODE_SELECT | (0xe << 4))
49#define MUX_VIRTUAL_MODE15 (MODE_SELECT | (0xf << 4))
50
51#define MODE_SELECT (1 << 8)
52
33#define PULL_ENA (0 << 16) 53#define PULL_ENA (0 << 16)
34#define PULL_DIS (1 << 16) 54#define PULL_DIS (1 << 16)
35#define PULL_UP (1 << 17) 55#define PULL_UP (1 << 17)
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index c10205491f8d..a15c1704d0ec 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -7,6 +7,47 @@
7#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H 7#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H
8 8
9/* power-source */ 9/* power-source */
10
11/* Digital Input/Output: level [PM8058] */
12#define PM8058_MPP_VPH 0
13#define PM8058_MPP_S3 1
14#define PM8058_MPP_L2 2
15#define PM8058_MPP_L3 3
16
17/* Digital Input/Output: level [PM8901] */
18#define PM8901_MPP_MSMIO 0
19#define PM8901_MPP_DIG 1
20#define PM8901_MPP_L5 2
21#define PM8901_MPP_S4 3
22#define PM8901_MPP_VPH 4
23
24/* Digital Input/Output: level [PM8921] */
25#define PM8921_MPP_S4 1
26#define PM8921_MPP_L15 3
27#define PM8921_MPP_L17 4
28#define PM8921_MPP_VPH 7
29
30/* Digital Input/Output: level [PM8821] */
31#define PM8821_MPP_1P8 0
32#define PM8821_MPP_VPH 7
33
34/* Digital Input/Output: level [PM8018] */
35#define PM8018_MPP_L4 0
36#define PM8018_MPP_L14 1
37#define PM8018_MPP_S3 2
38#define PM8018_MPP_L6 3
39#define PM8018_MPP_L2 4
40#define PM8018_MPP_L5 5
41#define PM8018_MPP_VPH 7
42
43/* Digital Input/Output: level [PM8038] */
44#define PM8038_MPP_L20 0
45#define PM8038_MPP_L11 1
46#define PM8038_MPP_L5 2
47#define PM8038_MPP_L15 3
48#define PM8038_MPP_L17 4
49#define PM8038_MPP_VPH 7
50
10#define PM8841_MPP_VPH 0 51#define PM8841_MPP_VPH 0
11#define PM8841_MPP_S3 2 52#define PM8841_MPP_S3 2
12 53
@@ -37,6 +78,16 @@
37#define PMIC_MPP_AMUX_ROUTE_ABUS3 6 78#define PMIC_MPP_AMUX_ROUTE_ABUS3 6
38#define PMIC_MPP_AMUX_ROUTE_ABUS4 7 79#define PMIC_MPP_AMUX_ROUTE_ABUS4 7
39 80
81/* Analog Output: level */
82#define PMIC_MPP_AOUT_LVL_1V25 0
83#define PMIC_MPP_AOUT_LVL_1V25_2 1
84#define PMIC_MPP_AOUT_LVL_0V625 2
85#define PMIC_MPP_AOUT_LVL_0V3125 3
86#define PMIC_MPP_AOUT_LVL_MPP 4
87#define PMIC_MPP_AOUT_LVL_ABUS1 5
88#define PMIC_MPP_AOUT_LVL_ABUS2 6
89#define PMIC_MPP_AOUT_LVL_ABUS3 7
90
40/* To be used with "function" */ 91/* To be used with "function" */
41#define PMIC_MPP_FUNC_NORMAL "normal" 92#define PMIC_MPP_FUNC_NORMAL "normal"
42#define PMIC_MPP_FUNC_PAIRED "paired" 93#define PMIC_MPP_FUNC_PAIRED "paired"
diff --git a/include/dt-bindings/power/mt8173-power.h b/include/dt-bindings/power/mt8173-power.h
new file mode 100644
index 000000000000..b34cee95aa89
--- /dev/null
+++ b/include/dt-bindings/power/mt8173-power.h
@@ -0,0 +1,15 @@
1#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H
2#define _DT_BINDINGS_POWER_MT8183_POWER_H
3
4#define MT8173_POWER_DOMAIN_VDEC 0
5#define MT8173_POWER_DOMAIN_VENC 1
6#define MT8173_POWER_DOMAIN_ISP 2
7#define MT8173_POWER_DOMAIN_MM 3
8#define MT8173_POWER_DOMAIN_VENC_LT 4
9#define MT8173_POWER_DOMAIN_AUDIO 5
10#define MT8173_POWER_DOMAIN_USB 6
11#define MT8173_POWER_DOMAIN_MFG_ASYNC 7
12#define MT8173_POWER_DOMAIN_MFG_2D 8
13#define MT8173_POWER_DOMAIN_MFG 9
14
15#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */
diff --git a/include/dt-bindings/reset/altr,rst-mgr-a10.h b/include/dt-bindings/reset/altr,rst-mgr-a10.h
new file mode 100644
index 000000000000..acb0bbf4f9f5
--- /dev/null
+++ b/include/dt-bindings/reset/altr,rst-mgr-a10.h
@@ -0,0 +1,110 @@
1/*
2 * Copyright (c) 2014, Steffen Trumtrar <s.trumtrar@pengutronix.de>
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H
15#define _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H
16
17/* MPUMODRST */
18#define CPU0_RESET 0
19#define CPU1_RESET 1
20#define WDS_RESET 2
21#define SCUPER_RESET 3
22
23/* PER0MODRST */
24#define EMAC0_RESET 32
25#define EMAC1_RESET 33
26#define EMAC2_RESET 34
27#define USB0_RESET 35
28#define USB1_RESET 36
29#define NAND_RESET 37
30#define QSPI_RESET 38
31#define SDMMC_RESET 39
32#define EMAC0_OCP_RESET 40
33#define EMAC1_OCP_RESET 41
34#define EMAC2_OCP_RESET 42
35#define USB0_OCP_RESET 43
36#define USB1_OCP_RESET 44
37#define NAND_OCP_RESET 45
38#define QSPI_OCP_RESET 46
39#define SDMMC_OCP_RESET 47
40#define DMA_RESET 48
41#define SPIM0_RESET 49
42#define SPIM1_RESET 50
43#define SPIS0_RESET 51
44#define SPIS1_RESET 52
45#define DMA_OCP_RESET 53
46#define EMAC_PTP_RESET 54
47/* 55 is empty*/
48#define DMAIF0_RESET 56
49#define DMAIF1_RESET 57
50#define DMAIF2_RESET 58
51#define DMAIF3_RESET 59
52#define DMAIF4_RESET 60
53#define DMAIF5_RESET 61
54#define DMAIF6_RESET 62
55#define DMAIF7_RESET 63
56
57/* PER1MODRST */
58#define L4WD0_RESET 64
59#define L4WD1_RESET 65
60#define L4SYSTIMER0_RESET 66
61#define L4SYSTIMER1_RESET 67
62#define SPTIMER0_RESET 68
63#define SPTIMER1_RESET 69
64/* 70-71 is reserved */
65#define I2C0_RESET 72
66#define I2C1_RESET 73
67#define I2C2_RESET 74
68#define I2C3_RESET 75
69#define I2C4_RESET 76
70/* 77-79 is reserved */
71#define UART0_RESET 80
72#define UART1_RESET 81
73/* 82-87 is reserved */
74#define GPIO0_RESET 88
75#define GPIO1_RESET 89
76#define GPIO2_RESET 90
77
78/* BRGMODRST */
79#define HPS2FPGA_RESET 96
80#define LWHPS2FPGA_RESET 97
81#define FPGA2HPS_RESET 98
82#define F2SSDRAM0_RESET 99
83#define F2SSDRAM1_RESET 100
84#define F2SSDRAM2_RESET 101
85#define DDRSCH_RESET 102
86
87/* SYSMODRST*/
88#define ROM_RESET 128
89#define OCRAM_RESET 129
90/* 130 is reserved */
91#define FPGAMGR_RESET 131
92#define S2F_RESET 132
93#define SYSDBG_RESET 133
94#define OCRAM_OCP_RESET 134
95
96/* COLDMODRST */
97#define CLKMGRCOLD_RESET 160
98/* 161-162 is reserved */
99#define S2FCOLD_RESET 163
100#define TIMESTAMPCOLD_RESET 164
101#define TAPCOLD_RESET 165
102#define HMCCOLD_RESET 166
103#define IOMGRCOLD_RESET 167
104
105/* NRSTMODRST */
106#define NRSTPINOE_RESET 192
107
108/* DBGMODRST */
109#define DBG_RESET 224
110#endif
diff --git a/include/dt-bindings/reset-controller/stih407-resets.h b/include/dt-bindings/reset/stih407-resets.h
index 02d4328fe479..02d4328fe479 100644
--- a/include/dt-bindings/reset-controller/stih407-resets.h
+++ b/include/dt-bindings/reset/stih407-resets.h
diff --git a/include/dt-bindings/reset-controller/stih415-resets.h b/include/dt-bindings/reset/stih415-resets.h
index c2329fe29cf6..c2329fe29cf6 100644
--- a/include/dt-bindings/reset-controller/stih415-resets.h
+++ b/include/dt-bindings/reset/stih415-resets.h
diff --git a/include/dt-bindings/reset-controller/stih416-resets.h b/include/dt-bindings/reset/stih416-resets.h
index fcf9af1ac0b2..fcf9af1ac0b2 100644
--- a/include/dt-bindings/reset-controller/stih416-resets.h
+++ b/include/dt-bindings/reset/stih416-resets.h
diff --git a/include/dt-bindings/reset/tegra124-car.h b/include/dt-bindings/reset/tegra124-car.h
new file mode 100644
index 000000000000..070e4f6e7486
--- /dev/null
+++ b/include/dt-bindings/reset/tegra124-car.h
@@ -0,0 +1,12 @@
1/*
2 * This header provides Tegra124-specific constants for binding
3 * nvidia,tegra124-car.
4 */
5
6#ifndef _DT_BINDINGS_RESET_TEGRA124_CAR_H
7#define _DT_BINDINGS_RESET_TEGRA124_CAR_H
8
9#define TEGRA124_RESET(x) (6 * 32 + (x))
10#define TEGRA124_RST_DFLL_DVCO TEGRA124_RESET(0)
11
12#endif /* _DT_BINDINGS_RESET_TEGRA124_CAR_H */
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 72665eb80692..b20cd885c1fd 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -15,6 +15,7 @@
15#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING 15#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
16 16
17#include <linux/key.h> 17#include <linux/key.h>
18#include <crypto/public_key.h>
18 19
19extern struct key *system_trusted_keyring; 20extern struct key *system_trusted_keyring;
20static inline struct key *get_system_trusted_keyring(void) 21static inline struct key *get_system_trusted_keyring(void)
@@ -28,4 +29,10 @@ static inline struct key *get_system_trusted_keyring(void)
28} 29}
29#endif 30#endif
30 31
32#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
33extern int system_verify_data(const void *data, unsigned long len,
34 const void *raw_pkcs7, size_t pkcs7_len,
35 enum key_being_used_for usage);
36#endif
37
31#endif /* _KEYS_SYSTEM_KEYRING_H */ 38#endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index e5966758c093..e1e4d7c38dda 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -52,13 +52,16 @@ struct arch_timer_cpu {
52 52
53 /* Timer IRQ */ 53 /* Timer IRQ */
54 const struct kvm_irq_level *irq; 54 const struct kvm_irq_level *irq;
55
56 /* VGIC mapping */
57 struct irq_phys_map *map;
55}; 58};
56 59
57int kvm_timer_hyp_init(void); 60int kvm_timer_hyp_init(void);
58void kvm_timer_enable(struct kvm *kvm); 61void kvm_timer_enable(struct kvm *kvm);
59void kvm_timer_init(struct kvm *kvm); 62void kvm_timer_init(struct kvm *kvm);
60void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, 63int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
61 const struct kvm_irq_level *irq); 64 const struct kvm_irq_level *irq);
62void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); 65void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
63void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); 66void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
64void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); 67void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 133ea00aa83b..d901f1a47be6 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -95,11 +95,15 @@ enum vgic_type {
95#define LR_STATE_ACTIVE (1 << 1) 95#define LR_STATE_ACTIVE (1 << 1)
96#define LR_STATE_MASK (3 << 0) 96#define LR_STATE_MASK (3 << 0)
97#define LR_EOI_INT (1 << 2) 97#define LR_EOI_INT (1 << 2)
98#define LR_HW (1 << 3)
98 99
99struct vgic_lr { 100struct vgic_lr {
100 u16 irq; 101 unsigned irq:10;
101 u8 source; 102 union {
102 u8 state; 103 unsigned hwirq:10;
104 unsigned source:3;
105 };
106 unsigned state:4;
103}; 107};
104 108
105struct vgic_vmcr { 109struct vgic_vmcr {
@@ -155,6 +159,19 @@ struct vgic_io_device {
155 struct kvm_io_device dev; 159 struct kvm_io_device dev;
156}; 160};
157 161
162struct irq_phys_map {
163 u32 virt_irq;
164 u32 phys_irq;
165 u32 irq;
166 bool active;
167};
168
169struct irq_phys_map_entry {
170 struct list_head entry;
171 struct rcu_head rcu;
172 struct irq_phys_map map;
173};
174
158struct vgic_dist { 175struct vgic_dist {
159 spinlock_t lock; 176 spinlock_t lock;
160 bool in_kernel; 177 bool in_kernel;
@@ -252,6 +269,10 @@ struct vgic_dist {
252 struct vgic_vm_ops vm_ops; 269 struct vgic_vm_ops vm_ops;
253 struct vgic_io_device dist_iodev; 270 struct vgic_io_device dist_iodev;
254 struct vgic_io_device *redist_iodevs; 271 struct vgic_io_device *redist_iodevs;
272
273 /* Virtual irq to hwirq mapping */
274 spinlock_t irq_phys_map_lock;
275 struct list_head irq_phys_map_list;
255}; 276};
256 277
257struct vgic_v2_cpu_if { 278struct vgic_v2_cpu_if {
@@ -303,6 +324,9 @@ struct vgic_cpu {
303 struct vgic_v2_cpu_if vgic_v2; 324 struct vgic_v2_cpu_if vgic_v2;
304 struct vgic_v3_cpu_if vgic_v3; 325 struct vgic_v3_cpu_if vgic_v3;
305 }; 326 };
327
328 /* Protected by the distributor's irq_phys_map_lock */
329 struct list_head irq_phys_map_list;
306}; 330};
307 331
308#define LR_EMPTY 0xff 332#define LR_EMPTY 0xff
@@ -317,16 +341,25 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
317int kvm_vgic_hyp_init(void); 341int kvm_vgic_hyp_init(void);
318int kvm_vgic_map_resources(struct kvm *kvm); 342int kvm_vgic_map_resources(struct kvm *kvm);
319int kvm_vgic_get_max_vcpus(void); 343int kvm_vgic_get_max_vcpus(void);
344void kvm_vgic_early_init(struct kvm *kvm);
320int kvm_vgic_create(struct kvm *kvm, u32 type); 345int kvm_vgic_create(struct kvm *kvm, u32 type);
321void kvm_vgic_destroy(struct kvm *kvm); 346void kvm_vgic_destroy(struct kvm *kvm);
347void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
322void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); 348void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
323void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); 349void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
324void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); 350void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
325int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, 351int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
326 bool level); 352 bool level);
353int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
354 struct irq_phys_map *map, bool level);
327void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); 355void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
328int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); 356int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
329int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); 357int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
358struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
359 int virt_irq, int irq);
360int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
361bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map);
362void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
330 363
331#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 364#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
332#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) 365#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d2445fa9999f..7235c4851460 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,10 +15,6 @@
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */ 19 */
24 20
@@ -221,7 +217,7 @@ struct pci_dev;
221 217
222int acpi_pci_irq_enable (struct pci_dev *dev); 218int acpi_pci_irq_enable (struct pci_dev *dev);
223void acpi_penalize_isa_irq(int irq, int active); 219void acpi_penalize_isa_irq(int irq, int active);
224 220void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
225void acpi_pci_irq_disable (struct pci_dev *dev); 221void acpi_pci_irq_disable (struct pci_dev *dev);
226 222
227extern int ec_read(u8 addr, u8 *val); 223extern int ec_read(u8 addr, u8 *val);
diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h
index 945d44ae529c..ab3a6c002f7b 100644
--- a/include/linux/asn1_ber_bytecode.h
+++ b/include/linux/asn1_ber_bytecode.h
@@ -45,23 +45,27 @@ enum asn1_opcode {
45 ASN1_OP_MATCH_JUMP = 0x04, 45 ASN1_OP_MATCH_JUMP = 0x04,
46 ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05, 46 ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05,
47 ASN1_OP_MATCH_ANY = 0x08, 47 ASN1_OP_MATCH_ANY = 0x08,
48 ASN1_OP_MATCH_ANY_OR_SKIP = 0x09,
48 ASN1_OP_MATCH_ANY_ACT = 0x0a, 49 ASN1_OP_MATCH_ANY_ACT = 0x0a,
50 ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 0x0b,
49 /* Everything before here matches unconditionally */ 51 /* Everything before here matches unconditionally */
50 52
51 ASN1_OP_COND_MATCH_OR_SKIP = 0x11, 53 ASN1_OP_COND_MATCH_OR_SKIP = 0x11,
52 ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13, 54 ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13,
53 ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15, 55 ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15,
54 ASN1_OP_COND_MATCH_ANY = 0x18, 56 ASN1_OP_COND_MATCH_ANY = 0x18,
57 ASN1_OP_COND_MATCH_ANY_OR_SKIP = 0x19,
55 ASN1_OP_COND_MATCH_ANY_ACT = 0x1a, 58 ASN1_OP_COND_MATCH_ANY_ACT = 0x1a,
59 ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 0x1b,
56 60
57 /* Everything before here will want a tag from the data */ 61 /* Everything before here will want a tag from the data */
58#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT 62#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP
59 63
60 /* These are here to help fill up space */ 64 /* These are here to help fill up space */
61 ASN1_OP_COND_FAIL = 0x1b, 65 ASN1_OP_COND_FAIL = 0x1c,
62 ASN1_OP_COMPLETE = 0x1c, 66 ASN1_OP_COMPLETE = 0x1d,
63 ASN1_OP_ACT = 0x1d, 67 ASN1_OP_ACT = 0x1e,
64 ASN1_OP_RETURN = 0x1e, 68 ASN1_OP_MAYBE_ACT = 0x1f,
65 69
66 /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */ 70 /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */
67 ASN1_OP_END_SEQ = 0x20, 71 ASN1_OP_END_SEQ = 0x20,
@@ -76,6 +80,8 @@ enum asn1_opcode {
76#define ASN1_OP_END__OF 0x02 80#define ASN1_OP_END__OF 0x02
77#define ASN1_OP_END__ACT 0x04 81#define ASN1_OP_END__ACT 0x04
78 82
83 ASN1_OP_RETURN = 0x28,
84
79 ASN1_OP__NR 85 ASN1_OP__NR
80}; 86};
81 87
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index 00beddf6be20..ee696d7e8a43 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -16,115 +16,151 @@
16#ifndef ATMEL_SERIAL_H 16#ifndef ATMEL_SERIAL_H
17#define ATMEL_SERIAL_H 17#define ATMEL_SERIAL_H
18 18
19#define ATMEL_US_CR 0x00 /* Control Register */ 19#define ATMEL_US_CR 0x00 /* Control Register */
20#define ATMEL_US_RSTRX (1 << 2) /* Reset Receiver */ 20#define ATMEL_US_RSTRX BIT(2) /* Reset Receiver */
21#define ATMEL_US_RSTTX (1 << 3) /* Reset Transmitter */ 21#define ATMEL_US_RSTTX BIT(3) /* Reset Transmitter */
22#define ATMEL_US_RXEN (1 << 4) /* Receiver Enable */ 22#define ATMEL_US_RXEN BIT(4) /* Receiver Enable */
23#define ATMEL_US_RXDIS (1 << 5) /* Receiver Disable */ 23#define ATMEL_US_RXDIS BIT(5) /* Receiver Disable */
24#define ATMEL_US_TXEN (1 << 6) /* Transmitter Enable */ 24#define ATMEL_US_TXEN BIT(6) /* Transmitter Enable */
25#define ATMEL_US_TXDIS (1 << 7) /* Transmitter Disable */ 25#define ATMEL_US_TXDIS BIT(7) /* Transmitter Disable */
26#define ATMEL_US_RSTSTA (1 << 8) /* Reset Status Bits */ 26#define ATMEL_US_RSTSTA BIT(8) /* Reset Status Bits */
27#define ATMEL_US_STTBRK (1 << 9) /* Start Break */ 27#define ATMEL_US_STTBRK BIT(9) /* Start Break */
28#define ATMEL_US_STPBRK (1 << 10) /* Stop Break */ 28#define ATMEL_US_STPBRK BIT(10) /* Stop Break */
29#define ATMEL_US_STTTO (1 << 11) /* Start Time-out */ 29#define ATMEL_US_STTTO BIT(11) /* Start Time-out */
30#define ATMEL_US_SENDA (1 << 12) /* Send Address */ 30#define ATMEL_US_SENDA BIT(12) /* Send Address */
31#define ATMEL_US_RSTIT (1 << 13) /* Reset Iterations */ 31#define ATMEL_US_RSTIT BIT(13) /* Reset Iterations */
32#define ATMEL_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */ 32#define ATMEL_US_RSTNACK BIT(14) /* Reset Non Acknowledge */
33#define ATMEL_US_RETTO (1 << 15) /* Rearm Time-out */ 33#define ATMEL_US_RETTO BIT(15) /* Rearm Time-out */
34#define ATMEL_US_DTREN (1 << 16) /* Data Terminal Ready Enable [AT91RM9200 only] */ 34#define ATMEL_US_DTREN BIT(16) /* Data Terminal Ready Enable */
35#define ATMEL_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable [AT91RM9200 only] */ 35#define ATMEL_US_DTRDIS BIT(17) /* Data Terminal Ready Disable */
36#define ATMEL_US_RTSEN (1 << 18) /* Request To Send Enable */ 36#define ATMEL_US_RTSEN BIT(18) /* Request To Send Enable */
37#define ATMEL_US_RTSDIS (1 << 19) /* Request To Send Disable */ 37#define ATMEL_US_RTSDIS BIT(19) /* Request To Send Disable */
38#define ATMEL_US_TXFCLR BIT(24) /* Transmit FIFO Clear */
39#define ATMEL_US_RXFCLR BIT(25) /* Receive FIFO Clear */
40#define ATMEL_US_TXFLCLR BIT(26) /* Transmit FIFO Lock Clear */
41#define ATMEL_US_FIFOEN BIT(30) /* FIFO enable */
42#define ATMEL_US_FIFODIS BIT(31) /* FIFO disable */
38 43
39#define ATMEL_US_MR 0x04 /* Mode Register */ 44#define ATMEL_US_MR 0x04 /* Mode Register */
40#define ATMEL_US_USMODE (0xf << 0) /* Mode of the USART */ 45#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */
41#define ATMEL_US_USMODE_NORMAL 0 46#define ATMEL_US_USMODE_NORMAL 0
42#define ATMEL_US_USMODE_RS485 1 47#define ATMEL_US_USMODE_RS485 1
43#define ATMEL_US_USMODE_HWHS 2 48#define ATMEL_US_USMODE_HWHS 2
44#define ATMEL_US_USMODE_MODEM 3 49#define ATMEL_US_USMODE_MODEM 3
45#define ATMEL_US_USMODE_ISO7816_T0 4 50#define ATMEL_US_USMODE_ISO7816_T0 4
46#define ATMEL_US_USMODE_ISO7816_T1 6 51#define ATMEL_US_USMODE_ISO7816_T1 6
47#define ATMEL_US_USMODE_IRDA 8 52#define ATMEL_US_USMODE_IRDA 8
48#define ATMEL_US_USCLKS (3 << 4) /* Clock Selection */ 53#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */
49#define ATMEL_US_USCLKS_MCK (0 << 4) 54#define ATMEL_US_USCLKS_MCK (0 << 4)
50#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4) 55#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
51#define ATMEL_US_USCLKS_SCK (3 << 4) 56#define ATMEL_US_USCLKS_SCK (3 << 4)
52#define ATMEL_US_CHRL (3 << 6) /* Character Length */ 57#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */
53#define ATMEL_US_CHRL_5 (0 << 6) 58#define ATMEL_US_CHRL_5 (0 << 6)
54#define ATMEL_US_CHRL_6 (1 << 6) 59#define ATMEL_US_CHRL_6 (1 << 6)
55#define ATMEL_US_CHRL_7 (2 << 6) 60#define ATMEL_US_CHRL_7 (2 << 6)
56#define ATMEL_US_CHRL_8 (3 << 6) 61#define ATMEL_US_CHRL_8 (3 << 6)
57#define ATMEL_US_SYNC (1 << 8) /* Synchronous Mode Select */ 62#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */
58#define ATMEL_US_PAR (7 << 9) /* Parity Type */ 63#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */
59#define ATMEL_US_PAR_EVEN (0 << 9) 64#define ATMEL_US_PAR_EVEN (0 << 9)
60#define ATMEL_US_PAR_ODD (1 << 9) 65#define ATMEL_US_PAR_ODD (1 << 9)
61#define ATMEL_US_PAR_SPACE (2 << 9) 66#define ATMEL_US_PAR_SPACE (2 << 9)
62#define ATMEL_US_PAR_MARK (3 << 9) 67#define ATMEL_US_PAR_MARK (3 << 9)
63#define ATMEL_US_PAR_NONE (4 << 9) 68#define ATMEL_US_PAR_NONE (4 << 9)
64#define ATMEL_US_PAR_MULTI_DROP (6 << 9) 69#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
65#define ATMEL_US_NBSTOP (3 << 12) /* Number of Stop Bits */ 70#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */
66#define ATMEL_US_NBSTOP_1 (0 << 12) 71#define ATMEL_US_NBSTOP_1 (0 << 12)
67#define ATMEL_US_NBSTOP_1_5 (1 << 12) 72#define ATMEL_US_NBSTOP_1_5 (1 << 12)
68#define ATMEL_US_NBSTOP_2 (2 << 12) 73#define ATMEL_US_NBSTOP_2 (2 << 12)
69#define ATMEL_US_CHMODE (3 << 14) /* Channel Mode */ 74#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */
70#define ATMEL_US_CHMODE_NORMAL (0 << 14) 75#define ATMEL_US_CHMODE_NORMAL (0 << 14)
71#define ATMEL_US_CHMODE_ECHO (1 << 14) 76#define ATMEL_US_CHMODE_ECHO (1 << 14)
72#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14) 77#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
73#define ATMEL_US_CHMODE_REM_LOOP (3 << 14) 78#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
74#define ATMEL_US_MSBF (1 << 16) /* Bit Order */ 79#define ATMEL_US_MSBF BIT(16) /* Bit Order */
75#define ATMEL_US_MODE9 (1 << 17) /* 9-bit Character Length */ 80#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */
76#define ATMEL_US_CLKO (1 << 18) /* Clock Output Select */ 81#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */
77#define ATMEL_US_OVER (1 << 19) /* Oversampling Mode */ 82#define ATMEL_US_OVER BIT(19) /* Oversampling Mode */
78#define ATMEL_US_INACK (1 << 20) /* Inhibit Non Acknowledge */ 83#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */
79#define ATMEL_US_DSNACK (1 << 21) /* Disable Successive NACK */ 84#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */
80#define ATMEL_US_MAX_ITER (7 << 24) /* Max Iterations */ 85#define ATMEL_US_MAX_ITER GENMASK(26, 24) /* Max Iterations */
81#define ATMEL_US_FILTER (1 << 28) /* Infrared Receive Line Filter */ 86#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */
82 87
83#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */ 88#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
84#define ATMEL_US_RXRDY (1 << 0) /* Receiver Ready */ 89#define ATMEL_US_RXRDY BIT(0) /* Receiver Ready */
85#define ATMEL_US_TXRDY (1 << 1) /* Transmitter Ready */ 90#define ATMEL_US_TXRDY BIT(1) /* Transmitter Ready */
86#define ATMEL_US_RXBRK (1 << 2) /* Break Received / End of Break */ 91#define ATMEL_US_RXBRK BIT(2) /* Break Received / End of Break */
87#define ATMEL_US_ENDRX (1 << 3) /* End of Receiver Transfer */ 92#define ATMEL_US_ENDRX BIT(3) /* End of Receiver Transfer */
88#define ATMEL_US_ENDTX (1 << 4) /* End of Transmitter Transfer */ 93#define ATMEL_US_ENDTX BIT(4) /* End of Transmitter Transfer */
89#define ATMEL_US_OVRE (1 << 5) /* Overrun Error */ 94#define ATMEL_US_OVRE BIT(5) /* Overrun Error */
90#define ATMEL_US_FRAME (1 << 6) /* Framing Error */ 95#define ATMEL_US_FRAME BIT(6) /* Framing Error */
91#define ATMEL_US_PARE (1 << 7) /* Parity Error */ 96#define ATMEL_US_PARE BIT(7) /* Parity Error */
92#define ATMEL_US_TIMEOUT (1 << 8) /* Receiver Time-out */ 97#define ATMEL_US_TIMEOUT BIT(8) /* Receiver Time-out */
93#define ATMEL_US_TXEMPTY (1 << 9) /* Transmitter Empty */ 98#define ATMEL_US_TXEMPTY BIT(9) /* Transmitter Empty */
94#define ATMEL_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */ 99#define ATMEL_US_ITERATION BIT(10) /* Max number of Repetitions Reached */
95#define ATMEL_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */ 100#define ATMEL_US_TXBUFE BIT(11) /* Transmission Buffer Empty */
96#define ATMEL_US_RXBUFF (1 << 12) /* Reception Buffer Full */ 101#define ATMEL_US_RXBUFF BIT(12) /* Reception Buffer Full */
97#define ATMEL_US_NACK (1 << 13) /* Non Acknowledge */ 102#define ATMEL_US_NACK BIT(13) /* Non Acknowledge */
98#define ATMEL_US_RIIC (1 << 16) /* Ring Indicator Input Change [AT91RM9200 only] */ 103#define ATMEL_US_RIIC BIT(16) /* Ring Indicator Input Change */
99#define ATMEL_US_DSRIC (1 << 17) /* Data Set Ready Input Change [AT91RM9200 only] */ 104#define ATMEL_US_DSRIC BIT(17) /* Data Set Ready Input Change */
100#define ATMEL_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change [AT91RM9200 only] */ 105#define ATMEL_US_DCDIC BIT(18) /* Data Carrier Detect Input Change */
101#define ATMEL_US_CTSIC (1 << 19) /* Clear to Send Input Change */ 106#define ATMEL_US_CTSIC BIT(19) /* Clear to Send Input Change */
102#define ATMEL_US_RI (1 << 20) /* RI */ 107#define ATMEL_US_RI BIT(20) /* RI */
103#define ATMEL_US_DSR (1 << 21) /* DSR */ 108#define ATMEL_US_DSR BIT(21) /* DSR */
104#define ATMEL_US_DCD (1 << 22) /* DCD */ 109#define ATMEL_US_DCD BIT(22) /* DCD */
105#define ATMEL_US_CTS (1 << 23) /* CTS */ 110#define ATMEL_US_CTS BIT(23) /* CTS */
106 111
107#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */ 112#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
108#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */ 113#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
109#define ATMEL_US_CSR 0x14 /* Channel Status Register */ 114#define ATMEL_US_CSR 0x14 /* Channel Status Register */
110#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */ 115#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
111#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */ 116#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
112#define ATMEL_US_SYNH (1 << 15) /* Transmit/Receive Sync [AT91SAM9261 only] */ 117#define ATMEL_US_SYNH BIT(15) /* Transmit/Receive Sync */
113 118
114#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */ 119#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
115#define ATMEL_US_CD (0xffff << 0) /* Clock Divider */ 120#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
116 121
117#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */ 122#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
118#define ATMEL_US_TO (0xffff << 0) /* Time-out Value */ 123#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */
119 124
120#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */ 125#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
121#define ATMEL_US_TG (0xff << 0) /* Timeguard Value */ 126#define ATMEL_US_TG GENMASK(7, 0) /* Timeguard Value */
122 127
123#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */ 128#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
124#define ATMEL_US_NER 0x44 /* Number of Errors Register */ 129#define ATMEL_US_NER 0x44 /* Number of Errors Register */
125#define ATMEL_US_IF 0x4c /* IrDA Filter Register */ 130#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
126 131
127#define ATMEL_US_NAME 0xf0 /* Ip Name */ 132#define ATMEL_US_CMPR 0x90 /* Comparaison Register */
128#define ATMEL_US_VERSION 0xfc /* Ip Version */ 133#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */
134#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */
135#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */
136#define ATMEL_US_ONE_DATA 0x0
137#define ATMEL_US_TWO_DATA 0x1
138#define ATMEL_US_FOUR_DATA 0x2
139#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */
140#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */
141#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */
142#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */
143
144#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */
145#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */
146#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */
147
148#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */
149#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */
150#define ATMEL_US_FIMR 0xb0 /* FIFO Interrupt Mask Register */
151#define ATMEL_US_FESR 0xb4 /* FIFO Event Status Register */
152#define ATMEL_US_TXFEF BIT(0) /* Transmit FIFO Empty Flag */
153#define ATMEL_US_TXFFF BIT(1) /* Transmit FIFO Full Flag */
154#define ATMEL_US_TXFTHF BIT(2) /* Transmit FIFO Threshold Flag */
155#define ATMEL_US_RXFEF BIT(3) /* Receive FIFO Empty Flag */
156#define ATMEL_US_RXFFF BIT(4) /* Receive FIFO Full Flag */
157#define ATMEL_US_RXFTHF BIT(5) /* Receive FIFO Threshold Flag */
158#define ATMEL_US_TXFPTEF BIT(6) /* Transmit FIFO Pointer Error Flag */
159#define ATMEL_US_RXFPTEF BIT(7) /* Receive FIFO Pointer Error Flag */
160#define ATMEL_US_TXFLOCK BIT(8) /* Transmit FIFO Lock (FESR only) */
161#define ATMEL_US_RXFTHF2 BIT(9) /* Receive FIFO Threshold Flag 2 */
162
163#define ATMEL_US_NAME 0xf0 /* Ip Name */
164#define ATMEL_US_VERSION 0xfc /* Ip Version */
129 165
130#endif 166#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 5b08a8540ecf..00a5763e850e 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,329 @@
2#ifndef _LINUX_ATOMIC_H 2#ifndef _LINUX_ATOMIC_H
3#define _LINUX_ATOMIC_H 3#define _LINUX_ATOMIC_H
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5#include <asm/barrier.h>
6
7/*
8 * Relaxed variants of xchg, cmpxchg and some atomic operations.
9 *
10 * We support four variants:
11 *
12 * - Fully ordered: The default implementation, no suffix required.
13 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
14 * - Release: Provides RELEASE semantics, _release suffix.
15 * - Relaxed: No ordering guarantees, _relaxed suffix.
16 *
17 * For compound atomics performing both a load and a store, ACQUIRE
18 * semantics apply only to the load and RELEASE semantics only to the
19 * store portion of the operation. Note that a failed cmpxchg_acquire
20 * does -not- imply any memory ordering constraints.
21 *
22 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
23 */
24
25#ifndef atomic_read_acquire
26#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
27#endif
28
29#ifndef atomic_set_release
30#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
31#endif
32
33/*
34 * The idea here is to build acquire/release variants by adding explicit
35 * barriers on top of the relaxed variant. In the case where the relaxed
36 * variant is already fully ordered, no additional barriers are needed.
37 */
38#define __atomic_op_acquire(op, args...) \
39({ \
40 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
41 smp_mb__after_atomic(); \
42 __ret; \
43})
44
45#define __atomic_op_release(op, args...) \
46({ \
47 smp_mb__before_atomic(); \
48 op##_relaxed(args); \
49})
50
51#define __atomic_op_fence(op, args...) \
52({ \
53 typeof(op##_relaxed(args)) __ret; \
54 smp_mb__before_atomic(); \
55 __ret = op##_relaxed(args); \
56 smp_mb__after_atomic(); \
57 __ret; \
58})
59
60/* atomic_add_return_relaxed */
61#ifndef atomic_add_return_relaxed
62#define atomic_add_return_relaxed atomic_add_return
63#define atomic_add_return_acquire atomic_add_return
64#define atomic_add_return_release atomic_add_return
65
66#else /* atomic_add_return_relaxed */
67
68#ifndef atomic_add_return_acquire
69#define atomic_add_return_acquire(...) \
70 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
71#endif
72
73#ifndef atomic_add_return_release
74#define atomic_add_return_release(...) \
75 __atomic_op_release(atomic_add_return, __VA_ARGS__)
76#endif
77
78#ifndef atomic_add_return
79#define atomic_add_return(...) \
80 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
81#endif
82#endif /* atomic_add_return_relaxed */
83
84/* atomic_sub_return_relaxed */
85#ifndef atomic_sub_return_relaxed
86#define atomic_sub_return_relaxed atomic_sub_return
87#define atomic_sub_return_acquire atomic_sub_return
88#define atomic_sub_return_release atomic_sub_return
89
90#else /* atomic_sub_return_relaxed */
91
92#ifndef atomic_sub_return_acquire
93#define atomic_sub_return_acquire(...) \
94 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
95#endif
96
97#ifndef atomic_sub_return_release
98#define atomic_sub_return_release(...) \
99 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
100#endif
101
102#ifndef atomic_sub_return
103#define atomic_sub_return(...) \
104 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
105#endif
106#endif /* atomic_sub_return_relaxed */
107
108/* atomic_xchg_relaxed */
109#ifndef atomic_xchg_relaxed
110#define atomic_xchg_relaxed atomic_xchg
111#define atomic_xchg_acquire atomic_xchg
112#define atomic_xchg_release atomic_xchg
113
114#else /* atomic_xchg_relaxed */
115
116#ifndef atomic_xchg_acquire
117#define atomic_xchg_acquire(...) \
118 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
119#endif
120
121#ifndef atomic_xchg_release
122#define atomic_xchg_release(...) \
123 __atomic_op_release(atomic_xchg, __VA_ARGS__)
124#endif
125
126#ifndef atomic_xchg
127#define atomic_xchg(...) \
128 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
129#endif
130#endif /* atomic_xchg_relaxed */
131
132/* atomic_cmpxchg_relaxed */
133#ifndef atomic_cmpxchg_relaxed
134#define atomic_cmpxchg_relaxed atomic_cmpxchg
135#define atomic_cmpxchg_acquire atomic_cmpxchg
136#define atomic_cmpxchg_release atomic_cmpxchg
137
138#else /* atomic_cmpxchg_relaxed */
139
140#ifndef atomic_cmpxchg_acquire
141#define atomic_cmpxchg_acquire(...) \
142 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
143#endif
144
145#ifndef atomic_cmpxchg_release
146#define atomic_cmpxchg_release(...) \
147 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
148#endif
149
150#ifndef atomic_cmpxchg
151#define atomic_cmpxchg(...) \
152 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
153#endif
154#endif /* atomic_cmpxchg_relaxed */
155
156#ifndef atomic64_read_acquire
157#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
158#endif
159
160#ifndef atomic64_set_release
161#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
162#endif
163
164/* atomic64_add_return_relaxed */
165#ifndef atomic64_add_return_relaxed
166#define atomic64_add_return_relaxed atomic64_add_return
167#define atomic64_add_return_acquire atomic64_add_return
168#define atomic64_add_return_release atomic64_add_return
169
170#else /* atomic64_add_return_relaxed */
171
172#ifndef atomic64_add_return_acquire
173#define atomic64_add_return_acquire(...) \
174 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
175#endif
176
177#ifndef atomic64_add_return_release
178#define atomic64_add_return_release(...) \
179 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
180#endif
181
182#ifndef atomic64_add_return
183#define atomic64_add_return(...) \
184 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
185#endif
186#endif /* atomic64_add_return_relaxed */
187
188/* atomic64_sub_return_relaxed */
189#ifndef atomic64_sub_return_relaxed
190#define atomic64_sub_return_relaxed atomic64_sub_return
191#define atomic64_sub_return_acquire atomic64_sub_return
192#define atomic64_sub_return_release atomic64_sub_return
193
194#else /* atomic64_sub_return_relaxed */
195
196#ifndef atomic64_sub_return_acquire
197#define atomic64_sub_return_acquire(...) \
198 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
199#endif
200
201#ifndef atomic64_sub_return_release
202#define atomic64_sub_return_release(...) \
203 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
204#endif
205
206#ifndef atomic64_sub_return
207#define atomic64_sub_return(...) \
208 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
209#endif
210#endif /* atomic64_sub_return_relaxed */
211
212/* atomic64_xchg_relaxed */
213#ifndef atomic64_xchg_relaxed
214#define atomic64_xchg_relaxed atomic64_xchg
215#define atomic64_xchg_acquire atomic64_xchg
216#define atomic64_xchg_release atomic64_xchg
217
218#else /* atomic64_xchg_relaxed */
219
220#ifndef atomic64_xchg_acquire
221#define atomic64_xchg_acquire(...) \
222 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
223#endif
224
225#ifndef atomic64_xchg_release
226#define atomic64_xchg_release(...) \
227 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
228#endif
229
230#ifndef atomic64_xchg
231#define atomic64_xchg(...) \
232 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
233#endif
234#endif /* atomic64_xchg_relaxed */
235
236/* atomic64_cmpxchg_relaxed */
237#ifndef atomic64_cmpxchg_relaxed
238#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
239#define atomic64_cmpxchg_acquire atomic64_cmpxchg
240#define atomic64_cmpxchg_release atomic64_cmpxchg
241
242#else /* atomic64_cmpxchg_relaxed */
243
244#ifndef atomic64_cmpxchg_acquire
245#define atomic64_cmpxchg_acquire(...) \
246 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
247#endif
248
249#ifndef atomic64_cmpxchg_release
250#define atomic64_cmpxchg_release(...) \
251 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
252#endif
253
254#ifndef atomic64_cmpxchg
255#define atomic64_cmpxchg(...) \
256 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
257#endif
258#endif /* atomic64_cmpxchg_relaxed */
259
260/* cmpxchg_relaxed */
261#ifndef cmpxchg_relaxed
262#define cmpxchg_relaxed cmpxchg
263#define cmpxchg_acquire cmpxchg
264#define cmpxchg_release cmpxchg
265
266#else /* cmpxchg_relaxed */
267
268#ifndef cmpxchg_acquire
269#define cmpxchg_acquire(...) \
270 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
271#endif
272
273#ifndef cmpxchg_release
274#define cmpxchg_release(...) \
275 __atomic_op_release(cmpxchg, __VA_ARGS__)
276#endif
277
278#ifndef cmpxchg
279#define cmpxchg(...) \
280 __atomic_op_fence(cmpxchg, __VA_ARGS__)
281#endif
282#endif /* cmpxchg_relaxed */
283
284/* cmpxchg64_relaxed */
285#ifndef cmpxchg64_relaxed
286#define cmpxchg64_relaxed cmpxchg64
287#define cmpxchg64_acquire cmpxchg64
288#define cmpxchg64_release cmpxchg64
289
290#else /* cmpxchg64_relaxed */
291
292#ifndef cmpxchg64_acquire
293#define cmpxchg64_acquire(...) \
294 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
295#endif
296
297#ifndef cmpxchg64_release
298#define cmpxchg64_release(...) \
299 __atomic_op_release(cmpxchg64, __VA_ARGS__)
300#endif
301
302#ifndef cmpxchg64
303#define cmpxchg64(...) \
304 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
305#endif
306#endif /* cmpxchg64_relaxed */
307
308/* xchg_relaxed */
309#ifndef xchg_relaxed
310#define xchg_relaxed xchg
311#define xchg_acquire xchg
312#define xchg_release xchg
313
314#else /* xchg_relaxed */
315
316#ifndef xchg_acquire
317#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
318#endif
319
320#ifndef xchg_release
321#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
322#endif
323
324#ifndef xchg
325#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
326#endif
327#endif /* xchg_relaxed */
5 328
6/** 329/**
7 * atomic_add_unless - add unless the number is already a given value 330 * atomic_add_unless - add unless the number is already a given value
@@ -28,6 +351,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
28#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 351#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
29#endif 352#endif
30 353
354#ifndef atomic_andnot
355static inline void atomic_andnot(int i, atomic_t *v)
356{
357 atomic_and(~i, v);
358}
359#endif
360
361static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
362{
363 atomic_andnot(mask, v);
364}
365
366static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
367{
368 atomic_or(mask, v);
369}
370
31/** 371/**
32 * atomic_inc_not_zero_hint - increment if not null 372 * atomic_inc_not_zero_hint - increment if not null
33 * @v: pointer of type atomic_t 373 * @v: pointer of type atomic_t
@@ -111,21 +451,16 @@ static inline int atomic_dec_if_positive(atomic_t *v)
111} 451}
112#endif 452#endif
113 453
114#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
115static inline void atomic_or(int i, atomic_t *v)
116{
117 int old;
118 int new;
119
120 do {
121 old = atomic_read(v);
122 new = old | i;
123 } while (atomic_cmpxchg(v, old, new) != old);
124}
125#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
126
127#include <asm-generic/atomic-long.h> 454#include <asm-generic/atomic-long.h>
128#ifdef CONFIG_GENERIC_ATOMIC64 455#ifdef CONFIG_GENERIC_ATOMIC64
129#include <asm-generic/atomic64.h> 456#include <asm-generic/atomic64.h>
130#endif 457#endif
458
459#ifndef atomic64_andnot
460static inline void atomic64_andnot(long long i, atomic64_t *v)
461{
462 atomic64_and(~i, v);
463}
464#endif
465
131#endif /* _LINUX_ATOMIC_H */ 466#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index c2e7e3a83965..b2abc996c25d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -27,6 +27,9 @@
27#include <linux/ptrace.h> 27#include <linux/ptrace.h>
28#include <uapi/linux/audit.h> 28#include <uapi/linux/audit.h>
29 29
30#define AUDIT_INO_UNSET ((unsigned long)-1)
31#define AUDIT_DEV_UNSET ((dev_t)-1)
32
30struct audit_sig_info { 33struct audit_sig_info {
31 uid_t uid; 34 uid_t uid;
32 pid_t pid; 35 pid_t pid;
@@ -59,6 +62,7 @@ struct audit_krule {
59 struct audit_field *inode_f; /* quick access to an inode field */ 62 struct audit_field *inode_f; /* quick access to an inode field */
60 struct audit_watch *watch; /* associated watch */ 63 struct audit_watch *watch; /* associated watch */
61 struct audit_tree *tree; /* associated watched tree */ 64 struct audit_tree *tree; /* associated watched tree */
65 struct audit_fsnotify_mark *exe;
62 struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ 66 struct list_head rlist; /* entry in audit_{watch,tree}.rules list */
63 struct list_head list; /* for AUDIT_LIST* purposes only */ 67 struct list_head list; /* for AUDIT_LIST* purposes only */
64 u64 prio; 68 u64 prio;
diff --git a/include/linux/average.h b/include/linux/average.h
index c6028fd742c1..d04aa58280de 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -3,28 +3,43 @@
3 3
4/* Exponentially weighted moving average (EWMA) */ 4/* Exponentially weighted moving average (EWMA) */
5 5
6/* For more documentation see lib/average.c */ 6#define DECLARE_EWMA(name, _factor, _weight) \
7 7 struct ewma_##name { \
8struct ewma { 8 unsigned long internal; \
9 unsigned long internal; 9 }; \
10 unsigned long factor; 10 static inline void ewma_##name##_init(struct ewma_##name *e) \
11 unsigned long weight; 11 { \
12}; 12 BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
13 13 BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
14extern void ewma_init(struct ewma *avg, unsigned long factor, 14 BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
15 unsigned long weight); 15 BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
16 16 e->internal = 0; \
17extern struct ewma *ewma_add(struct ewma *avg, unsigned long val); 17 } \
18 18 static inline unsigned long \
19/** 19 ewma_##name##_read(struct ewma_##name *e) \
20 * ewma_read() - Get average value 20 { \
21 * @avg: Average structure 21 BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
22 * 22 BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
23 * Returns the average value held in @avg. 23 BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
24 */ 24 BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
25static inline unsigned long ewma_read(const struct ewma *avg) 25 return e->internal >> ilog2(_factor); \
26{ 26 } \
27 return avg->internal >> avg->factor; 27 static inline void ewma_##name##_add(struct ewma_##name *e, \
28} 28 unsigned long val) \
29 { \
30 unsigned long internal = ACCESS_ONCE(e->internal); \
31 unsigned long weight = ilog2(_weight); \
32 unsigned long factor = ilog2(_factor); \
33 \
34 BUILD_BUG_ON(!__builtin_constant_p(_factor)); \
35 BUILD_BUG_ON(!__builtin_constant_p(_weight)); \
36 BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \
37 BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \
38 \
39 ACCESS_ONCE(e->internal) = internal ? \
40 (((internal << weight) - internal) + \
41 (val << factor)) >> weight : \
42 (val << factor); \
43 }
29 44
30#endif /* _LINUX_AVERAGE_H */ 45#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0fe9df983ab7..5a5d79ee256f 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -286,7 +286,7 @@ static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi
286 * %current's blkcg equals the effective blkcg of its memcg. No 286 * %current's blkcg equals the effective blkcg of its memcg. No
287 * need to use the relatively expensive cgroup_get_e_css(). 287 * need to use the relatively expensive cgroup_get_e_css().
288 */ 288 */
289 if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id))) 289 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
290 return wb; 290 return wb;
291 return NULL; 291 return NULL;
292} 292}
@@ -402,7 +402,7 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
402} 402}
403 403
404struct wb_iter { 404struct wb_iter {
405 int start_blkcg_id; 405 int start_memcg_id;
406 struct radix_tree_iter tree_iter; 406 struct radix_tree_iter tree_iter;
407 void **slot; 407 void **slot;
408}; 408};
@@ -414,9 +414,9 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
414 414
415 WARN_ON_ONCE(!rcu_read_lock_held()); 415 WARN_ON_ONCE(!rcu_read_lock_held());
416 416
417 if (iter->start_blkcg_id >= 0) { 417 if (iter->start_memcg_id >= 0) {
418 iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id); 418 iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
419 iter->start_blkcg_id = -1; 419 iter->start_memcg_id = -1;
420 } else { 420 } else {
421 iter->slot = radix_tree_next_slot(iter->slot, titer, 0); 421 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
422 } 422 }
@@ -430,30 +430,30 @@ static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
430 430
431static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, 431static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
432 struct backing_dev_info *bdi, 432 struct backing_dev_info *bdi,
433 int start_blkcg_id) 433 int start_memcg_id)
434{ 434{
435 iter->start_blkcg_id = start_blkcg_id; 435 iter->start_memcg_id = start_memcg_id;
436 436
437 if (start_blkcg_id) 437 if (start_memcg_id)
438 return __wb_iter_next(iter, bdi); 438 return __wb_iter_next(iter, bdi);
439 else 439 else
440 return &bdi->wb; 440 return &bdi->wb;
441} 441}
442 442
443/** 443/**
444 * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order 444 * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
445 * @wb_cur: cursor struct bdi_writeback pointer 445 * @wb_cur: cursor struct bdi_writeback pointer
446 * @bdi: bdi to walk wb's of 446 * @bdi: bdi to walk wb's of
447 * @iter: pointer to struct wb_iter to be used as iteration buffer 447 * @iter: pointer to struct wb_iter to be used as iteration buffer
448 * @start_blkcg_id: blkcg ID to start iteration from 448 * @start_memcg_id: memcg ID to start iteration from
449 * 449 *
450 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending 450 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
451 * blkcg ID order starting from @start_blkcg_id. @iter is struct wb_iter 451 * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
452 * to be used as temp storage during iteration. rcu_read_lock() must be 452 * to be used as temp storage during iteration. rcu_read_lock() must be
453 * held throughout iteration. 453 * held throughout iteration.
454 */ 454 */
455#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ 455#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
456 for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id); \ 456 for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
457 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) 457 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
458 458
459#else /* CONFIG_CGROUP_WRITEBACK */ 459#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 14eea946e640..ed3768f4ecc7 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -75,5 +75,6 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
75#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */ 75#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
76#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3) 76#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
77#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ 77#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
78#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
78 79
79#endif /* __BASIC_MMIO_GPIO_H */ 80#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 6cceedf65ca2..cf038431a5cc 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -640,7 +640,6 @@ struct bcma_drv_cc {
640 spinlock_t gpio_lock; 640 spinlock_t gpio_lock;
641#ifdef CONFIG_BCMA_DRIVER_GPIO 641#ifdef CONFIG_BCMA_DRIVER_GPIO
642 struct gpio_chip gpio; 642 struct gpio_chip gpio;
643 struct irq_domain *irq_domain;
644#endif 643#endif
645}; 644};
646 645
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5e963a6d7c14..b9b6e046b52e 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -187,17 +187,6 @@ static inline void *bio_data(struct bio *bio)
187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
188 188
189/* 189/*
190 * Check if adding a bio_vec after bprv with offset would create a gap in
191 * the SG list. Most drivers don't care about this, but some do.
192 */
193static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
194{
195 return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
196}
197
198#define bio_io_error(bio) bio_endio((bio), -EIO)
199
200/*
201 * drivers should _never_ use the all version - the bio may have been split 190 * drivers should _never_ use the all version - the bio may have been split
202 * before it got to the driver and the driver won't own all of it 191 * before it got to the driver and the driver won't own all of it
203 */ 192 */
@@ -306,6 +295,21 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
306 atomic_set(&bio->__bi_cnt, count); 295 atomic_set(&bio->__bi_cnt, count);
307} 296}
308 297
298static inline bool bio_flagged(struct bio *bio, unsigned int bit)
299{
300 return (bio->bi_flags & (1U << bit)) != 0;
301}
302
303static inline void bio_set_flag(struct bio *bio, unsigned int bit)
304{
305 bio->bi_flags |= (1U << bit);
306}
307
308static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
309{
310 bio->bi_flags &= ~(1U << bit);
311}
312
309enum bip_flags { 313enum bip_flags {
310 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 314 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
311 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ 315 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -426,7 +430,14 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
426 430
427} 431}
428 432
429extern void bio_endio(struct bio *, int); 433extern void bio_endio(struct bio *);
434
435static inline void bio_io_error(struct bio *bio)
436{
437 bio->bi_error = -EIO;
438 bio_endio(bio);
439}
440
430struct request_queue; 441struct request_queue;
431extern int bio_phys_segments(struct request_queue *, struct bio *); 442extern int bio_phys_segments(struct request_queue *, struct bio *);
432 443
@@ -440,7 +451,6 @@ void bio_chain(struct bio *, struct bio *);
440extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 451extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
441extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 452extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
442 unsigned int, unsigned int); 453 unsigned int, unsigned int);
443extern int bio_get_nr_vecs(struct block_device *);
444struct rq_map_data; 454struct rq_map_data;
445extern struct bio *bio_map_user_iov(struct request_queue *, 455extern struct bio *bio_map_user_iov(struct request_queue *,
446 const struct iov_iter *, gfp_t); 456 const struct iov_iter *, gfp_t);
@@ -717,7 +727,7 @@ extern void bio_integrity_free(struct bio *);
717extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 727extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
718extern bool bio_integrity_enabled(struct bio *bio); 728extern bool bio_integrity_enabled(struct bio *bio);
719extern int bio_integrity_prep(struct bio *); 729extern int bio_integrity_prep(struct bio *);
720extern void bio_integrity_endio(struct bio *, int); 730extern void bio_integrity_endio(struct bio *);
721extern void bio_integrity_advance(struct bio *, unsigned int); 731extern void bio_integrity_advance(struct bio *, unsigned int);
722extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); 732extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
723extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 733extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index ea17cca9e685..9653fdb76a42 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -295,7 +295,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
295 return find_first_zero_bit(src, nbits) == nbits; 295 return find_first_zero_bit(src, nbits) == nbits;
296} 296}
297 297
298static inline int bitmap_weight(const unsigned long *src, unsigned int nbits) 298static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
299{ 299{
300 if (small_const_nbits(nbits)) 300 if (small_const_nbits(nbits))
301 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); 301 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 297f5bda4fdf..e63553386ae7 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -57,7 +57,7 @@ extern unsigned long __sw_hweight64(__u64 w);
57 (bit) < (size); \ 57 (bit) < (size); \
58 (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 58 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
59 59
60static __inline__ int get_bitmask_order(unsigned int count) 60static inline int get_bitmask_order(unsigned int count)
61{ 61{
62 int order; 62 int order;
63 63
@@ -65,7 +65,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
65 return order; /* We could be slightly more clever with -1 here... */ 65 return order; /* We could be slightly more clever with -1 here... */
66} 66}
67 67
68static __inline__ int get_count_order(unsigned int count) 68static inline int get_count_order(unsigned int count)
69{ 69{
70 int order; 70 int order;
71 71
@@ -75,7 +75,7 @@ static __inline__ int get_count_order(unsigned int count)
75 return order; 75 return order;
76} 76}
77 77
78static inline unsigned long hweight_long(unsigned long w) 78static __always_inline unsigned long hweight_long(unsigned long w)
79{ 79{
80 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 80 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
81} 81}
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 1b62d768c7df..0a5cc7a1109b 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -14,12 +14,15 @@
14 */ 14 */
15 15
16#include <linux/cgroup.h> 16#include <linux/cgroup.h>
17#include <linux/u64_stats_sync.h> 17#include <linux/percpu_counter.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <linux/radix-tree.h> 19#include <linux/radix-tree.h>
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <linux/atomic.h> 21#include <linux/atomic.h>
22 22
23/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
24#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
25
23/* Max limits for throttle policy */ 26/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX 27#define THROTL_IOPS_MAX UINT_MAX
25 28
@@ -45,7 +48,7 @@ struct blkcg {
45 struct blkcg_gq *blkg_hint; 48 struct blkcg_gq *blkg_hint;
46 struct hlist_head blkg_list; 49 struct hlist_head blkg_list;
47 50
48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS]; 51 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
49 52
50 struct list_head all_blkcgs_node; 53 struct list_head all_blkcgs_node;
51#ifdef CONFIG_CGROUP_WRITEBACK 54#ifdef CONFIG_CGROUP_WRITEBACK
@@ -53,14 +56,19 @@ struct blkcg {
53#endif 56#endif
54}; 57};
55 58
59/*
60 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
61 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
62 * to carry result values from read and sum operations.
63 */
56struct blkg_stat { 64struct blkg_stat {
57 struct u64_stats_sync syncp; 65 struct percpu_counter cpu_cnt;
58 uint64_t cnt; 66 atomic64_t aux_cnt;
59}; 67};
60 68
61struct blkg_rwstat { 69struct blkg_rwstat {
62 struct u64_stats_sync syncp; 70 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
63 uint64_t cnt[BLKG_RWSTAT_NR]; 71 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
64}; 72};
65 73
66/* 74/*
@@ -68,32 +76,28 @@ struct blkg_rwstat {
68 * request_queue (q). This is used by blkcg policies which need to track 76 * request_queue (q). This is used by blkcg policies which need to track
69 * information per blkcg - q pair. 77 * information per blkcg - q pair.
70 * 78 *
71 * There can be multiple active blkcg policies and each has its private 79 * There can be multiple active blkcg policies and each blkg:policy pair is
72 * data on each blkg, the size of which is determined by 80 * represented by a blkg_policy_data which is allocated and freed by each
73 * blkcg_policy->pd_size. blkcg core allocates and frees such areas 81 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
74 * together with blkg and invokes pd_init/exit_fn() methods. 82 * area by allocating larger data structure which embeds blkg_policy_data
75 * 83 * at the beginning.
76 * Such private data must embed struct blkg_policy_data (pd) at the
77 * beginning and pd_size can't be smaller than pd.
78 */ 84 */
79struct blkg_policy_data { 85struct blkg_policy_data {
80 /* the blkg and policy id this per-policy data belongs to */ 86 /* the blkg and policy id this per-policy data belongs to */
81 struct blkcg_gq *blkg; 87 struct blkcg_gq *blkg;
82 int plid; 88 int plid;
83
84 /* used during policy activation */
85 struct list_head alloc_node;
86}; 89};
87 90
88/* 91/*
89 * Policies that need to keep per-blkcg data which is independent 92 * Policies that need to keep per-blkcg data which is independent from any
90 * from any request_queue associated to it must specify its size 93 * request_queue associated to it should implement cpd_alloc/free_fn()
91 * with the cpd_size field of the blkcg_policy structure and 94 * methods. A policy can allocate private data area by allocating larger
92 * embed a blkcg_policy_data in it. cpd_init() is invoked to let 95 * data structure which embeds blkcg_policy_data at the beginning.
93 * each policy handle per-blkcg data. 96 * cpd_init() is invoked to let each policy handle per-blkcg data.
94 */ 97 */
95struct blkcg_policy_data { 98struct blkcg_policy_data {
96 /* the policy id this per-policy data belongs to */ 99 /* the blkcg and policy id this per-policy data belongs to */
100 struct blkcg *blkcg;
97 int plid; 101 int plid;
98}; 102};
99 103
@@ -123,40 +127,50 @@ struct blkcg_gq {
123 /* is this blkg online? protected by both blkcg and q locks */ 127 /* is this blkg online? protected by both blkcg and q locks */
124 bool online; 128 bool online;
125 129
130 struct blkg_rwstat stat_bytes;
131 struct blkg_rwstat stat_ios;
132
126 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 133 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
127 134
128 struct rcu_head rcu_head; 135 struct rcu_head rcu_head;
129}; 136};
130 137
131typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg); 138typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
132typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); 139typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
133typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg); 140typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
134typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg); 141typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
135typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg); 142typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
136typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); 143typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
144typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
145typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
146typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
147typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
137 148
138struct blkcg_policy { 149struct blkcg_policy {
139 int plid; 150 int plid;
140 /* policy specific private data size */
141 size_t pd_size;
142 /* policy specific per-blkcg data size */
143 size_t cpd_size;
144 /* cgroup files for the policy */ 151 /* cgroup files for the policy */
145 struct cftype *cftypes; 152 struct cftype *dfl_cftypes;
153 struct cftype *legacy_cftypes;
146 154
147 /* operations */ 155 /* operations */
156 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
148 blkcg_pol_init_cpd_fn *cpd_init_fn; 157 blkcg_pol_init_cpd_fn *cpd_init_fn;
158 blkcg_pol_free_cpd_fn *cpd_free_fn;
159 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
160
161 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
149 blkcg_pol_init_pd_fn *pd_init_fn; 162 blkcg_pol_init_pd_fn *pd_init_fn;
150 blkcg_pol_online_pd_fn *pd_online_fn; 163 blkcg_pol_online_pd_fn *pd_online_fn;
151 blkcg_pol_offline_pd_fn *pd_offline_fn; 164 blkcg_pol_offline_pd_fn *pd_offline_fn;
152 blkcg_pol_exit_pd_fn *pd_exit_fn; 165 blkcg_pol_free_pd_fn *pd_free_fn;
153 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 166 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
154}; 167};
155 168
156extern struct blkcg blkcg_root; 169extern struct blkcg blkcg_root;
157extern struct cgroup_subsys_state * const blkcg_root_css; 170extern struct cgroup_subsys_state * const blkcg_root_css;
158 171
159struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); 172struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
173 struct request_queue *q, bool update_hint);
160struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 174struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
161 struct request_queue *q); 175 struct request_queue *q);
162int blkcg_init_queue(struct request_queue *q); 176int blkcg_init_queue(struct request_queue *q);
@@ -171,6 +185,7 @@ int blkcg_activate_policy(struct request_queue *q,
171void blkcg_deactivate_policy(struct request_queue *q, 185void blkcg_deactivate_policy(struct request_queue *q,
172 const struct blkcg_policy *pol); 186 const struct blkcg_policy *pol);
173 187
188const char *blkg_dev_name(struct blkcg_gq *blkg);
174void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 189void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
175 u64 (*prfill)(struct seq_file *, 190 u64 (*prfill)(struct seq_file *,
176 struct blkg_policy_data *, int), 191 struct blkg_policy_data *, int),
@@ -182,19 +197,24 @@ u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
182u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); 197u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
183u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 198u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
184 int off); 199 int off);
200int blkg_print_stat_bytes(struct seq_file *sf, void *v);
201int blkg_print_stat_ios(struct seq_file *sf, void *v);
202int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
203int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
185 204
186u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off); 205u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
187struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, 206 struct blkcg_policy *pol, int off);
188 int off); 207struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
208 struct blkcg_policy *pol, int off);
189 209
190struct blkg_conf_ctx { 210struct blkg_conf_ctx {
191 struct gendisk *disk; 211 struct gendisk *disk;
192 struct blkcg_gq *blkg; 212 struct blkcg_gq *blkg;
193 u64 v; 213 char *body;
194}; 214};
195 215
196int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 216int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
197 const char *input, struct blkg_conf_ctx *ctx); 217 char *input, struct blkg_conf_ctx *ctx);
198void blkg_conf_finish(struct blkg_conf_ctx *ctx); 218void blkg_conf_finish(struct blkg_conf_ctx *ctx);
199 219
200 220
@@ -205,7 +225,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
205 225
206static inline struct blkcg *task_blkcg(struct task_struct *tsk) 226static inline struct blkcg *task_blkcg(struct task_struct *tsk)
207{ 227{
208 return css_to_blkcg(task_css(tsk, blkio_cgrp_id)); 228 return css_to_blkcg(task_css(tsk, io_cgrp_id));
209} 229}
210 230
211static inline struct blkcg *bio_blkcg(struct bio *bio) 231static inline struct blkcg *bio_blkcg(struct bio *bio)
@@ -218,7 +238,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
218static inline struct cgroup_subsys_state * 238static inline struct cgroup_subsys_state *
219task_get_blkcg_css(struct task_struct *task) 239task_get_blkcg_css(struct task_struct *task)
220{ 240{
221 return task_get_css(task, blkio_cgrp_id); 241 return task_get_css(task, io_cgrp_id);
222} 242}
223 243
224/** 244/**
@@ -233,6 +253,52 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
233} 253}
234 254
235/** 255/**
256 * __blkg_lookup - internal version of blkg_lookup()
257 * @blkcg: blkcg of interest
258 * @q: request_queue of interest
259 * @update_hint: whether to update lookup hint with the result or not
260 *
261 * This is internal version and shouldn't be used by policy
262 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
263 * @q's bypass state. If @update_hint is %true, the caller should be
264 * holding @q->queue_lock and lookup hint is updated on success.
265 */
266static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
267 struct request_queue *q,
268 bool update_hint)
269{
270 struct blkcg_gq *blkg;
271
272 if (blkcg == &blkcg_root)
273 return q->root_blkg;
274
275 blkg = rcu_dereference(blkcg->blkg_hint);
276 if (blkg && blkg->q == q)
277 return blkg;
278
279 return blkg_lookup_slowpath(blkcg, q, update_hint);
280}
281
282/**
283 * blkg_lookup - lookup blkg for the specified blkcg - q pair
284 * @blkcg: blkcg of interest
285 * @q: request_queue of interest
286 *
287 * Lookup blkg for the @blkcg - @q pair. This function should be called
288 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
289 * - see blk_queue_bypass_start() for details.
290 */
291static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
292 struct request_queue *q)
293{
294 WARN_ON_ONCE(!rcu_read_lock_held());
295
296 if (unlikely(blk_queue_bypass(q)))
297 return NULL;
298 return __blkg_lookup(blkcg, q, false);
299}
300
301/**
236 * blkg_to_pdata - get policy private data 302 * blkg_to_pdata - get policy private data
237 * @blkg: blkg of interest 303 * @blkg: blkg of interest
238 * @pol: policy of interest 304 * @pol: policy of interest
@@ -248,7 +314,7 @@ static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
248static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, 314static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
249 struct blkcg_policy *pol) 315 struct blkcg_policy *pol)
250{ 316{
251 return blkcg ? blkcg->pd[pol->plid] : NULL; 317 return blkcg ? blkcg->cpd[pol->plid] : NULL;
252} 318}
253 319
254/** 320/**
@@ -262,6 +328,11 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
262 return pd ? pd->blkg : NULL; 328 return pd ? pd->blkg : NULL;
263} 329}
264 330
331static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
332{
333 return cpd ? cpd->blkcg : NULL;
334}
335
265/** 336/**
266 * blkg_path - format cgroup path of blkg 337 * blkg_path - format cgroup path of blkg
267 * @blkg: blkg of interest 338 * @blkg: blkg of interest
@@ -309,9 +380,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
309 call_rcu(&blkg->rcu_head, __blkg_release_rcu); 380 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
310} 381}
311 382
312struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
313 bool update_hint);
314
315/** 383/**
316 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 384 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
317 * @d_blkg: loop cursor pointing to the current descendant 385 * @d_blkg: loop cursor pointing to the current descendant
@@ -373,8 +441,8 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
373 * or if either the blkcg or queue is going away. Fall back to 441 * or if either the blkcg or queue is going away. Fall back to
374 * root_rl in such cases. 442 * root_rl in such cases.
375 */ 443 */
376 blkg = blkg_lookup_create(blkcg, q); 444 blkg = blkg_lookup(blkcg, q);
377 if (unlikely(IS_ERR(blkg))) 445 if (unlikely(!blkg))
378 goto root_rl; 446 goto root_rl;
379 447
380 blkg_get(blkg); 448 blkg_get(blkg);
@@ -394,8 +462,7 @@ root_rl:
394 */ 462 */
395static inline void blk_put_rl(struct request_list *rl) 463static inline void blk_put_rl(struct request_list *rl)
396{ 464{
397 /* root_rl may not have blkg set */ 465 if (rl->blkg->blkcg != &blkcg_root)
398 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
399 blkg_put(rl->blkg); 466 blkg_put(rl->blkg);
400} 467}
401 468
@@ -433,9 +500,21 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
433#define blk_queue_for_each_rl(rl, q) \ 500#define blk_queue_for_each_rl(rl, q) \
434 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) 501 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
435 502
436static inline void blkg_stat_init(struct blkg_stat *stat) 503static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
437{ 504{
438 u64_stats_init(&stat->syncp); 505 int ret;
506
507 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
508 if (ret)
509 return ret;
510
511 atomic64_set(&stat->aux_cnt, 0);
512 return 0;
513}
514
515static inline void blkg_stat_exit(struct blkg_stat *stat)
516{
517 percpu_counter_destroy(&stat->cpu_cnt);
439} 518}
440 519
441/** 520/**
@@ -443,34 +522,21 @@ static inline void blkg_stat_init(struct blkg_stat *stat)
443 * @stat: target blkg_stat 522 * @stat: target blkg_stat
444 * @val: value to add 523 * @val: value to add
445 * 524 *
446 * Add @val to @stat. The caller is responsible for synchronizing calls to 525 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
447 * this function. 526 * don't re-enter this function for the same counter.
448 */ 527 */
449static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) 528static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
450{ 529{
451 u64_stats_update_begin(&stat->syncp); 530 __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
452 stat->cnt += val;
453 u64_stats_update_end(&stat->syncp);
454} 531}
455 532
456/** 533/**
457 * blkg_stat_read - read the current value of a blkg_stat 534 * blkg_stat_read - read the current value of a blkg_stat
458 * @stat: blkg_stat to read 535 * @stat: blkg_stat to read
459 *
460 * Read the current value of @stat. This function can be called without
461 * synchroniztion and takes care of u64 atomicity.
462 */ 536 */
463static inline uint64_t blkg_stat_read(struct blkg_stat *stat) 537static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
464{ 538{
465 unsigned int start; 539 return percpu_counter_sum_positive(&stat->cpu_cnt);
466 uint64_t v;
467
468 do {
469 start = u64_stats_fetch_begin_irq(&stat->syncp);
470 v = stat->cnt;
471 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
472
473 return v;
474} 540}
475 541
476/** 542/**
@@ -479,24 +545,46 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
479 */ 545 */
480static inline void blkg_stat_reset(struct blkg_stat *stat) 546static inline void blkg_stat_reset(struct blkg_stat *stat)
481{ 547{
482 stat->cnt = 0; 548 percpu_counter_set(&stat->cpu_cnt, 0);
549 atomic64_set(&stat->aux_cnt, 0);
483} 550}
484 551
485/** 552/**
486 * blkg_stat_merge - merge a blkg_stat into another 553 * blkg_stat_add_aux - add a blkg_stat into another's aux count
487 * @to: the destination blkg_stat 554 * @to: the destination blkg_stat
488 * @from: the source 555 * @from: the source
489 * 556 *
490 * Add @from's count to @to. 557 * Add @from's count including the aux one to @to's aux count.
491 */ 558 */
492static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from) 559static inline void blkg_stat_add_aux(struct blkg_stat *to,
560 struct blkg_stat *from)
493{ 561{
494 blkg_stat_add(to, blkg_stat_read(from)); 562 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
563 &to->aux_cnt);
495} 564}
496 565
497static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) 566static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
498{ 567{
499 u64_stats_init(&rwstat->syncp); 568 int i, ret;
569
570 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
571 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
572 if (ret) {
573 while (--i >= 0)
574 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
575 return ret;
576 }
577 atomic64_set(&rwstat->aux_cnt[i], 0);
578 }
579 return 0;
580}
581
582static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
583{
584 int i;
585
586 for (i = 0; i < BLKG_RWSTAT_NR; i++)
587 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
500} 588}
501 589
502/** 590/**
@@ -511,39 +599,38 @@ static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
511static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, 599static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
512 int rw, uint64_t val) 600 int rw, uint64_t val)
513{ 601{
514 u64_stats_update_begin(&rwstat->syncp); 602 struct percpu_counter *cnt;
515 603
516 if (rw & REQ_WRITE) 604 if (rw & REQ_WRITE)
517 rwstat->cnt[BLKG_RWSTAT_WRITE] += val; 605 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
518 else 606 else
519 rwstat->cnt[BLKG_RWSTAT_READ] += val; 607 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
608
609 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
610
520 if (rw & REQ_SYNC) 611 if (rw & REQ_SYNC)
521 rwstat->cnt[BLKG_RWSTAT_SYNC] += val; 612 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
522 else 613 else
523 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val; 614 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
524 615
525 u64_stats_update_end(&rwstat->syncp); 616 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
526} 617}
527 618
528/** 619/**
529 * blkg_rwstat_read - read the current values of a blkg_rwstat 620 * blkg_rwstat_read - read the current values of a blkg_rwstat
530 * @rwstat: blkg_rwstat to read 621 * @rwstat: blkg_rwstat to read
531 * 622 *
532 * Read the current snapshot of @rwstat and return it as the return value. 623 * Read the current snapshot of @rwstat and return it in the aux counts.
533 * This function can be called without synchronization and takes care of
534 * u64 atomicity.
535 */ 624 */
536static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) 625static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
537{ 626{
538 unsigned int start; 627 struct blkg_rwstat result;
539 struct blkg_rwstat tmp; 628 int i;
540
541 do {
542 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
543 tmp = *rwstat;
544 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
545 629
546 return tmp; 630 for (i = 0; i < BLKG_RWSTAT_NR; i++)
631 atomic64_set(&result.aux_cnt[i],
632 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
633 return result;
547} 634}
548 635
549/** 636/**
@@ -558,7 +645,8 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
558{ 645{
559 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); 646 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
560 647
561 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; 648 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
649 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
562} 650}
563 651
564/** 652/**
@@ -567,26 +655,71 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
567 */ 655 */
568static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) 656static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
569{ 657{
570 memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); 658 int i;
659
660 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
661 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
662 atomic64_set(&rwstat->aux_cnt[i], 0);
663 }
571} 664}
572 665
573/** 666/**
574 * blkg_rwstat_merge - merge a blkg_rwstat into another 667 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
575 * @to: the destination blkg_rwstat 668 * @to: the destination blkg_rwstat
576 * @from: the source 669 * @from: the source
577 * 670 *
578 * Add @from's counts to @to. 671 * Add @from's count including the aux one to @to's aux count.
579 */ 672 */
580static inline void blkg_rwstat_merge(struct blkg_rwstat *to, 673static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
581 struct blkg_rwstat *from) 674 struct blkg_rwstat *from)
582{ 675{
583 struct blkg_rwstat v = blkg_rwstat_read(from); 676 struct blkg_rwstat v = blkg_rwstat_read(from);
584 int i; 677 int i;
585 678
586 u64_stats_update_begin(&to->syncp);
587 for (i = 0; i < BLKG_RWSTAT_NR; i++) 679 for (i = 0; i < BLKG_RWSTAT_NR; i++)
588 to->cnt[i] += v.cnt[i]; 680 atomic64_add(atomic64_read(&v.aux_cnt[i]) +
589 u64_stats_update_end(&to->syncp); 681 atomic64_read(&from->aux_cnt[i]),
682 &to->aux_cnt[i]);
683}
684
685#ifdef CONFIG_BLK_DEV_THROTTLING
686extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
687 struct bio *bio);
688#else
689static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
690 struct bio *bio) { return false; }
691#endif
692
693static inline bool blkcg_bio_issue_check(struct request_queue *q,
694 struct bio *bio)
695{
696 struct blkcg *blkcg;
697 struct blkcg_gq *blkg;
698 bool throtl = false;
699
700 rcu_read_lock();
701 blkcg = bio_blkcg(bio);
702
703 blkg = blkg_lookup(blkcg, q);
704 if (unlikely(!blkg)) {
705 spin_lock_irq(q->queue_lock);
706 blkg = blkg_lookup_create(blkcg, q);
707 if (IS_ERR(blkg))
708 blkg = NULL;
709 spin_unlock_irq(q->queue_lock);
710 }
711
712 throtl = blk_throtl_bio(q, blkg, bio);
713
714 if (!throtl) {
715 blkg = blkg ?: q->root_blkg;
716 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
717 bio->bi_iter.bi_size);
718 blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
719 }
720
721 rcu_read_unlock();
722 return !throtl;
590} 723}
591 724
592#else /* CONFIG_BLK_CGROUP */ 725#else /* CONFIG_BLK_CGROUP */
@@ -642,6 +775,9 @@ static inline void blk_put_rl(struct request_list *rl) { }
642static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } 775static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
643static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } 776static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
644 777
778static inline bool blkcg_bio_issue_check(struct request_queue *q,
779 struct bio *bio) { return true; }
780
645#define blk_queue_for_each_rl(rl, q) \ 781#define blk_queue_for_each_rl(rl, q) \
646 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 782 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
647 783
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 7303b3405520..e8130138f29d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,7 +14,7 @@ struct page;
14struct block_device; 14struct block_device;
15struct io_context; 15struct io_context;
16struct cgroup_subsys_state; 16struct cgroup_subsys_state;
17typedef void (bio_end_io_t) (struct bio *, int); 17typedef void (bio_end_io_t) (struct bio *);
18typedef void (bio_destructor_t) (struct bio *); 18typedef void (bio_destructor_t) (struct bio *);
19 19
20/* 20/*
@@ -46,7 +46,8 @@ struct bvec_iter {
46struct bio { 46struct bio {
47 struct bio *bi_next; /* request queue link */ 47 struct bio *bi_next; /* request queue link */
48 struct block_device *bi_bdev; 48 struct block_device *bi_bdev;
49 unsigned long bi_flags; /* status, command, etc */ 49 unsigned int bi_flags; /* status, command, etc */
50 int bi_error;
50 unsigned long bi_rw; /* bottom bits READ/WRITE, 51 unsigned long bi_rw; /* bottom bits READ/WRITE,
51 * top bits priority 52 * top bits priority
52 */ 53 */
@@ -111,16 +112,14 @@ struct bio {
111/* 112/*
112 * bio flags 113 * bio flags
113 */ 114 */
114#define BIO_UPTODATE 0 /* ok after I/O completion */
115#define BIO_SEG_VALID 1 /* bi_phys_segments valid */ 115#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
116#define BIO_CLONED 2 /* doesn't own data */ 116#define BIO_CLONED 2 /* doesn't own data */
117#define BIO_BOUNCED 3 /* bio is a bounce bio */ 117#define BIO_BOUNCED 3 /* bio is a bounce bio */
118#define BIO_USER_MAPPED 4 /* contains user pages */ 118#define BIO_USER_MAPPED 4 /* contains user pages */
119#define BIO_NULL_MAPPED 5 /* contains invalid user pages */ 119#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
120#define BIO_QUIET 6 /* Make BIO Quiet */ 120#define BIO_QUIET 6 /* Make BIO Quiet */
121#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */ 121#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
122#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */ 122#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
123#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
124 123
125/* 124/*
126 * Flags starting here get preserved by bio_reset() - this includes 125 * Flags starting here get preserved by bio_reset() - this includes
@@ -129,14 +128,12 @@ struct bio {
129#define BIO_RESET_BITS 13 128#define BIO_RESET_BITS 13
130#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */ 129#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
131 130
132#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
133
134/* 131/*
135 * top 4 bits of bio flags indicate the pool this bio came from 132 * top 4 bits of bio flags indicate the pool this bio came from
136 */ 133 */
137#define BIO_POOL_BITS (4) 134#define BIO_POOL_BITS (4)
138#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) 135#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
139#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) 136#define BIO_POOL_OFFSET (32 - BIO_POOL_BITS)
140#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) 137#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
141#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) 138#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
142 139
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d4068c17d0df..708923b9b623 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -213,14 +213,6 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
213typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 213typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
214 214
215struct bio_vec; 215struct bio_vec;
216struct bvec_merge_data {
217 struct block_device *bi_bdev;
218 sector_t bi_sector;
219 unsigned bi_size;
220 unsigned long bi_rw;
221};
222typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
223 struct bio_vec *);
224typedef void (softirq_done_fn)(struct request *); 216typedef void (softirq_done_fn)(struct request *);
225typedef int (dma_drain_needed_fn)(struct request *); 217typedef int (dma_drain_needed_fn)(struct request *);
226typedef int (lld_busy_fn) (struct request_queue *q); 218typedef int (lld_busy_fn) (struct request_queue *q);
@@ -258,6 +250,7 @@ struct blk_queue_tag {
258struct queue_limits { 250struct queue_limits {
259 unsigned long bounce_pfn; 251 unsigned long bounce_pfn;
260 unsigned long seg_boundary_mask; 252 unsigned long seg_boundary_mask;
253 unsigned long virt_boundary_mask;
261 254
262 unsigned int max_hw_sectors; 255 unsigned int max_hw_sectors;
263 unsigned int chunk_sectors; 256 unsigned int chunk_sectors;
@@ -268,6 +261,7 @@ struct queue_limits {
268 unsigned int io_min; 261 unsigned int io_min;
269 unsigned int io_opt; 262 unsigned int io_opt;
270 unsigned int max_discard_sectors; 263 unsigned int max_discard_sectors;
264 unsigned int max_hw_discard_sectors;
271 unsigned int max_write_same_sectors; 265 unsigned int max_write_same_sectors;
272 unsigned int discard_granularity; 266 unsigned int discard_granularity;
273 unsigned int discard_alignment; 267 unsigned int discard_alignment;
@@ -305,7 +299,6 @@ struct request_queue {
305 make_request_fn *make_request_fn; 299 make_request_fn *make_request_fn;
306 prep_rq_fn *prep_rq_fn; 300 prep_rq_fn *prep_rq_fn;
307 unprep_rq_fn *unprep_rq_fn; 301 unprep_rq_fn *unprep_rq_fn;
308 merge_bvec_fn *merge_bvec_fn;
309 softirq_done_fn *softirq_done_fn; 302 softirq_done_fn *softirq_done_fn;
310 rq_timed_out_fn *rq_timed_out_fn; 303 rq_timed_out_fn *rq_timed_out_fn;
311 dma_drain_needed_fn *dma_drain_needed; 304 dma_drain_needed_fn *dma_drain_needed;
@@ -462,6 +455,7 @@ struct request_queue {
462 455
463 struct blk_mq_tag_set *tag_set; 456 struct blk_mq_tag_set *tag_set;
464 struct list_head tag_set_list; 457 struct list_head tag_set_list;
458 struct bio_set *bio_split;
465}; 459};
466 460
467#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 461#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -486,7 +480,6 @@ struct request_queue {
486#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 480#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
487#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 481#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
488#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 482#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
489#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
490 483
491#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 484#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
492 (1 << QUEUE_FLAG_STACKABLE) | \ 485 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -782,6 +775,8 @@ extern void blk_rq_unprep_clone(struct request *rq);
782extern int blk_insert_cloned_request(struct request_queue *q, 775extern int blk_insert_cloned_request(struct request_queue *q,
783 struct request *rq); 776 struct request *rq);
784extern void blk_delay_queue(struct request_queue *, unsigned long); 777extern void blk_delay_queue(struct request_queue *, unsigned long);
778extern void blk_queue_split(struct request_queue *, struct bio **,
779 struct bio_set *);
785extern void blk_recount_segments(struct request_queue *, struct bio *); 780extern void blk_recount_segments(struct request_queue *, struct bio *);
786extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 781extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
787extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 782extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
@@ -986,9 +981,9 @@ extern int blk_queue_dma_drain(struct request_queue *q,
986 void *buf, unsigned int size); 981 void *buf, unsigned int size);
987extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 982extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
988extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 983extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
984extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
989extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 985extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
990extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 986extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
991extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
992extern void blk_queue_dma_alignment(struct request_queue *, int); 987extern void blk_queue_dma_alignment(struct request_queue *, int);
993extern void blk_queue_update_dma_alignment(struct request_queue *, int); 988extern void blk_queue_update_dma_alignment(struct request_queue *, int);
994extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 989extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
@@ -1138,6 +1133,7 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1138enum blk_default_limits { 1133enum blk_default_limits {
1139 BLK_MAX_SEGMENTS = 128, 1134 BLK_MAX_SEGMENTS = 128,
1140 BLK_SAFE_MAX_SECTORS = 255, 1135 BLK_SAFE_MAX_SECTORS = 255,
1136 BLK_DEF_MAX_SECTORS = 2560,
1141 BLK_MAX_SEGMENT_SIZE = 65536, 1137 BLK_MAX_SEGMENT_SIZE = 65536,
1142 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1138 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1143}; 1139};
@@ -1154,6 +1150,11 @@ static inline unsigned long queue_segment_boundary(struct request_queue *q)
1154 return q->limits.seg_boundary_mask; 1150 return q->limits.seg_boundary_mask;
1155} 1151}
1156 1152
1153static inline unsigned long queue_virt_boundary(struct request_queue *q)
1154{
1155 return q->limits.virt_boundary_mask;
1156}
1157
1157static inline unsigned int queue_max_sectors(struct request_queue *q) 1158static inline unsigned int queue_max_sectors(struct request_queue *q)
1158{ 1159{
1159 return q->limits.max_sectors; 1160 return q->limits.max_sectors;
@@ -1354,6 +1355,19 @@ static inline void put_dev_sector(Sector p)
1354 page_cache_release(p.v); 1355 page_cache_release(p.v);
1355} 1356}
1356 1357
1358/*
1359 * Check if adding a bio_vec after bprv with offset would create a gap in
1360 * the SG list. Most drivers don't care about this, but some do.
1361 */
1362static inline bool bvec_gap_to_prev(struct request_queue *q,
1363 struct bio_vec *bprv, unsigned int offset)
1364{
1365 if (!queue_virt_boundary(q))
1366 return false;
1367 return offset ||
1368 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1369}
1370
1357struct work_struct; 1371struct work_struct;
1358int kblockd_schedule_work(struct work_struct *work); 1372int kblockd_schedule_work(struct work_struct *work);
1359int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1373int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1555,8 +1569,8 @@ struct block_device_operations {
1555 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); 1569 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
1556 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1570 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1557 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1571 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1558 long (*direct_access)(struct block_device *, sector_t, 1572 long (*direct_access)(struct block_device *, sector_t, void __pmem **,
1559 void **, unsigned long *pfn, long size); 1573 unsigned long *pfn);
1560 unsigned int (*check_events) (struct gendisk *disk, 1574 unsigned int (*check_events) (struct gendisk *disk,
1561 unsigned int clearing); 1575 unsigned int clearing);
1562 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1576 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1574,8 +1588,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1574extern int bdev_read_page(struct block_device *, sector_t, struct page *); 1588extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1575extern int bdev_write_page(struct block_device *, sector_t, struct page *, 1589extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1576 struct writeback_control *); 1590 struct writeback_control *);
1577extern long bdev_direct_access(struct block_device *, sector_t, void **addr, 1591extern long bdev_direct_access(struct block_device *, sector_t,
1578 unsigned long *pfn, long size); 1592 void __pmem **addr, unsigned long *pfn, long size);
1579#else /* CONFIG_BLOCK */ 1593#else /* CONFIG_BLOCK */
1580 1594
1581struct block_device; 1595struct block_device;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4383476a0d48..f57d7fed9ec3 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
10#include <uapi/linux/bpf.h> 10#include <uapi/linux/bpf.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/file.h> 12#include <linux/file.h>
13#include <linux/perf_event.h>
13 14
14struct bpf_map; 15struct bpf_map;
15 16
@@ -24,6 +25,10 @@ struct bpf_map_ops {
24 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 25 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
25 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 26 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
26 int (*map_delete_elem)(struct bpf_map *map, void *key); 27 int (*map_delete_elem)(struct bpf_map *map, void *key);
28
29 /* funcs called by prog_array and perf_event_array map */
30 void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
31 void (*map_fd_put_ptr) (void *ptr);
27}; 32};
28 33
29struct bpf_map { 34struct bpf_map {
@@ -142,13 +147,13 @@ struct bpf_array {
142 bool owner_jited; 147 bool owner_jited;
143 union { 148 union {
144 char value[0] __aligned(8); 149 char value[0] __aligned(8);
145 struct bpf_prog *prog[0] __aligned(8); 150 void *ptrs[0] __aligned(8);
146 }; 151 };
147}; 152};
148#define MAX_TAIL_CALL_CNT 32 153#define MAX_TAIL_CALL_CNT 32
149 154
150u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); 155u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
151void bpf_prog_array_map_clear(struct bpf_map *map); 156void bpf_fd_array_map_clear(struct bpf_map *map);
152bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 157bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
153const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 158const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
154 159
@@ -185,6 +190,7 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
185extern const struct bpf_func_proto bpf_map_update_elem_proto; 190extern const struct bpf_func_proto bpf_map_update_elem_proto;
186extern const struct bpf_func_proto bpf_map_delete_elem_proto; 191extern const struct bpf_func_proto bpf_map_delete_elem_proto;
187 192
193extern const struct bpf_func_proto bpf_perf_event_read_proto;
188extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 194extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
189extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 195extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
190extern const struct bpf_func_proto bpf_tail_call_proto; 196extern const struct bpf_func_proto bpf_tail_call_proto;
@@ -192,5 +198,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
192extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 198extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
193extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 199extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
194extern const struct bpf_func_proto bpf_get_current_comm_proto; 200extern const struct bpf_func_proto bpf_get_current_comm_proto;
201extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
202extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
195 203
196#endif /* _LINUX_BPF_H */ 204#endif /* _LINUX_BPF_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 93755a629299..4d8fcf2187dc 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -34,12 +34,17 @@ struct seq_file;
34 34
35/* define the enumeration of all cgroup subsystems */ 35/* define the enumeration of all cgroup subsystems */
36#define SUBSYS(_x) _x ## _cgrp_id, 36#define SUBSYS(_x) _x ## _cgrp_id,
37#define SUBSYS_TAG(_t) CGROUP_ ## _t, \
38 __unused_tag_ ## _t = CGROUP_ ## _t - 1,
37enum cgroup_subsys_id { 39enum cgroup_subsys_id {
38#include <linux/cgroup_subsys.h> 40#include <linux/cgroup_subsys.h>
39 CGROUP_SUBSYS_COUNT, 41 CGROUP_SUBSYS_COUNT,
40}; 42};
43#undef SUBSYS_TAG
41#undef SUBSYS 44#undef SUBSYS
42 45
46#define CGROUP_CANFORK_COUNT (CGROUP_CANFORK_END - CGROUP_CANFORK_START)
47
43/* bits in struct cgroup_subsys_state flags field */ 48/* bits in struct cgroup_subsys_state flags field */
44enum { 49enum {
45 CSS_NO_REF = (1 << 0), /* no reference counting for this css */ 50 CSS_NO_REF = (1 << 0), /* no reference counting for this css */
@@ -318,7 +323,7 @@ struct cftype {
318 * end of cftype array. 323 * end of cftype array.
319 */ 324 */
320 char name[MAX_CFTYPE_NAME]; 325 char name[MAX_CFTYPE_NAME];
321 int private; 326 unsigned long private;
322 /* 327 /*
323 * If not 0, file mode is set to this value, otherwise it will 328 * If not 0, file mode is set to this value, otherwise it will
324 * be figured out automatically 329 * be figured out automatically
@@ -406,7 +411,9 @@ struct cgroup_subsys {
406 struct cgroup_taskset *tset); 411 struct cgroup_taskset *tset);
407 void (*attach)(struct cgroup_subsys_state *css, 412 void (*attach)(struct cgroup_subsys_state *css,
408 struct cgroup_taskset *tset); 413 struct cgroup_taskset *tset);
409 void (*fork)(struct task_struct *task); 414 int (*can_fork)(struct task_struct *task, void **priv_p);
415 void (*cancel_fork)(struct task_struct *task, void *priv);
416 void (*fork)(struct task_struct *task, void *priv);
410 void (*exit)(struct cgroup_subsys_state *css, 417 void (*exit)(struct cgroup_subsys_state *css,
411 struct cgroup_subsys_state *old_css, 418 struct cgroup_subsys_state *old_css,
412 struct task_struct *task); 419 struct task_struct *task);
@@ -434,6 +441,9 @@ struct cgroup_subsys {
434 int id; 441 int id;
435 const char *name; 442 const char *name;
436 443
444 /* optional, initialized automatically during boot if not set */
445 const char *legacy_name;
446
437 /* link to parent, protected by cgroup_lock() */ 447 /* link to parent, protected by cgroup_lock() */
438 struct cgroup_root *root; 448 struct cgroup_root *root;
439 449
@@ -491,6 +501,7 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
491 501
492#else /* CONFIG_CGROUPS */ 502#else /* CONFIG_CGROUPS */
493 503
504#define CGROUP_CANFORK_COUNT 0
494#define CGROUP_SUBSYS_COUNT 0 505#define CGROUP_SUBSYS_COUNT 0
495 506
496static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} 507static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a593e299162e..eb7ca55f72ef 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -22,6 +22,15 @@
22 22
23#ifdef CONFIG_CGROUPS 23#ifdef CONFIG_CGROUPS
24 24
25/*
26 * All weight knobs on the default hierarhcy should use the following min,
27 * default and max values. The default value is the logarithmic center of
28 * MIN and MAX and allows 100x to be expressed in both directions.
29 */
30#define CGROUP_WEIGHT_MIN 1
31#define CGROUP_WEIGHT_DFL 100
32#define CGROUP_WEIGHT_MAX 10000
33
25/* a css_task_iter should be treated as an opaque object */ 34/* a css_task_iter should be treated as an opaque object */
26struct css_task_iter { 35struct css_task_iter {
27 struct cgroup_subsys *ss; 36 struct cgroup_subsys *ss;
@@ -62,7 +71,12 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
62 struct pid *pid, struct task_struct *tsk); 71 struct pid *pid, struct task_struct *tsk);
63 72
64void cgroup_fork(struct task_struct *p); 73void cgroup_fork(struct task_struct *p);
65void cgroup_post_fork(struct task_struct *p); 74extern int cgroup_can_fork(struct task_struct *p,
75 void *ss_priv[CGROUP_CANFORK_COUNT]);
76extern void cgroup_cancel_fork(struct task_struct *p,
77 void *ss_priv[CGROUP_CANFORK_COUNT]);
78extern void cgroup_post_fork(struct task_struct *p,
79 void *old_ss_priv[CGROUP_CANFORK_COUNT]);
66void cgroup_exit(struct task_struct *p); 80void cgroup_exit(struct task_struct *p);
67 81
68int cgroup_init_early(void); 82int cgroup_init_early(void);
@@ -524,7 +538,13 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
524 struct dentry *dentry) { return -EINVAL; } 538 struct dentry *dentry) { return -EINVAL; }
525 539
526static inline void cgroup_fork(struct task_struct *p) {} 540static inline void cgroup_fork(struct task_struct *p) {}
527static inline void cgroup_post_fork(struct task_struct *p) {} 541static inline int cgroup_can_fork(struct task_struct *p,
542 void *ss_priv[CGROUP_CANFORK_COUNT])
543{ return 0; }
544static inline void cgroup_cancel_fork(struct task_struct *p,
545 void *ss_priv[CGROUP_CANFORK_COUNT]) {}
546static inline void cgroup_post_fork(struct task_struct *p,
547 void *ss_priv[CGROUP_CANFORK_COUNT]) {}
528static inline void cgroup_exit(struct task_struct *p) {} 548static inline void cgroup_exit(struct task_struct *p) {}
529 549
530static inline int cgroup_init_early(void) { return 0; } 550static inline int cgroup_init_early(void) { return 0; }
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index e4a96fb14403..1a96fdaa33d5 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -3,6 +3,17 @@
3 * 3 *
4 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. 4 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
5 */ 5 */
6
7/*
8 * This file *must* be included with SUBSYS() defined.
9 * SUBSYS_TAG() is a noop if undefined.
10 */
11
12#ifndef SUBSYS_TAG
13#define __TMP_SUBSYS_TAG
14#define SUBSYS_TAG(_x)
15#endif
16
6#if IS_ENABLED(CONFIG_CPUSETS) 17#if IS_ENABLED(CONFIG_CPUSETS)
7SUBSYS(cpuset) 18SUBSYS(cpuset)
8#endif 19#endif
@@ -16,7 +27,7 @@ SUBSYS(cpuacct)
16#endif 27#endif
17 28
18#if IS_ENABLED(CONFIG_BLK_CGROUP) 29#if IS_ENABLED(CONFIG_BLK_CGROUP)
19SUBSYS(blkio) 30SUBSYS(io)
20#endif 31#endif
21 32
22#if IS_ENABLED(CONFIG_MEMCG) 33#if IS_ENABLED(CONFIG_MEMCG)
@@ -48,11 +59,28 @@ SUBSYS(hugetlb)
48#endif 59#endif
49 60
50/* 61/*
62 * Subsystems that implement the can_fork() family of callbacks.
63 */
64SUBSYS_TAG(CANFORK_START)
65
66#if IS_ENABLED(CONFIG_CGROUP_PIDS)
67SUBSYS(pids)
68#endif
69
70SUBSYS_TAG(CANFORK_END)
71
72/*
51 * The following subsystems are not supported on the default hierarchy. 73 * The following subsystems are not supported on the default hierarchy.
52 */ 74 */
53#if IS_ENABLED(CONFIG_CGROUP_DEBUG) 75#if IS_ENABLED(CONFIG_CGROUP_DEBUG)
54SUBSYS(debug) 76SUBSYS(debug)
55#endif 77#endif
78
79#ifdef __TMP_SUBSYS_TAG
80#undef __TMP_SUBSYS_TAG
81#undef SUBSYS_TAG
82#endif
83
56/* 84/*
57 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. 85 * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
58 */ 86 */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 78842f46f152..3ecc07d0da77 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -11,7 +11,6 @@
11#ifndef __LINUX_CLK_PROVIDER_H 11#ifndef __LINUX_CLK_PROVIDER_H
12#define __LINUX_CLK_PROVIDER_H 12#define __LINUX_CLK_PROVIDER_H
13 13
14#include <linux/clk.h>
15#include <linux/io.h> 14#include <linux/io.h>
16#include <linux/of.h> 15#include <linux/of.h>
17 16
@@ -33,11 +32,34 @@
33#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ 32#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
34#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ 33#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */
35 34
35struct clk;
36struct clk_hw; 36struct clk_hw;
37struct clk_core; 37struct clk_core;
38struct dentry; 38struct dentry;
39 39
40/** 40/**
41 * struct clk_rate_request - Structure encoding the clk constraints that
42 * a clock user might require.
43 *
44 * @rate: Requested clock rate. This field will be adjusted by
45 * clock drivers according to hardware capabilities.
46 * @min_rate: Minimum rate imposed by clk users.
47 * @max_rate: Maximum rate a imposed by clk users.
48 * @best_parent_rate: The best parent rate a parent can provide to fulfill the
49 * requested constraints.
50 * @best_parent_hw: The most appropriate parent clock that fulfills the
51 * requested constraints.
52 *
53 */
54struct clk_rate_request {
55 unsigned long rate;
56 unsigned long min_rate;
57 unsigned long max_rate;
58 unsigned long best_parent_rate;
59 struct clk_hw *best_parent_hw;
60};
61
62/**
41 * struct clk_ops - Callback operations for hardware clocks; these are to 63 * struct clk_ops - Callback operations for hardware clocks; these are to
42 * be provided by the clock implementation, and will be called by drivers 64 * be provided by the clock implementation, and will be called by drivers
43 * through the clk_* api. 65 * through the clk_* api.
@@ -176,12 +198,8 @@ struct clk_ops {
176 unsigned long parent_rate); 198 unsigned long parent_rate);
177 long (*round_rate)(struct clk_hw *hw, unsigned long rate, 199 long (*round_rate)(struct clk_hw *hw, unsigned long rate,
178 unsigned long *parent_rate); 200 unsigned long *parent_rate);
179 long (*determine_rate)(struct clk_hw *hw, 201 int (*determine_rate)(struct clk_hw *hw,
180 unsigned long rate, 202 struct clk_rate_request *req);
181 unsigned long min_rate,
182 unsigned long max_rate,
183 unsigned long *best_parent_rate,
184 struct clk_hw **best_parent_hw);
185 int (*set_parent)(struct clk_hw *hw, u8 index); 203 int (*set_parent)(struct clk_hw *hw, u8 index);
186 u8 (*get_parent)(struct clk_hw *hw); 204 u8 (*get_parent)(struct clk_hw *hw);
187 int (*set_rate)(struct clk_hw *hw, unsigned long rate, 205 int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -343,6 +361,9 @@ struct clk_div_table {
343 * to the closest integer instead of the up one. 361 * to the closest integer instead of the up one.
344 * CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should 362 * CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should
345 * not be changed by the clock framework. 363 * not be changed by the clock framework.
364 * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED
365 * except when the value read from the register is zero, the divisor is
366 * 2^width of the field.
346 */ 367 */
347struct clk_divider { 368struct clk_divider {
348 struct clk_hw hw; 369 struct clk_hw hw;
@@ -360,6 +381,7 @@ struct clk_divider {
360#define CLK_DIVIDER_HIWORD_MASK BIT(3) 381#define CLK_DIVIDER_HIWORD_MASK BIT(3)
361#define CLK_DIVIDER_ROUND_CLOSEST BIT(4) 382#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
362#define CLK_DIVIDER_READ_ONLY BIT(5) 383#define CLK_DIVIDER_READ_ONLY BIT(5)
384#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
363 385
364extern const struct clk_ops clk_divider_ops; 386extern const struct clk_ops clk_divider_ops;
365 387
@@ -550,6 +572,23 @@ struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
550void of_gpio_clk_gate_setup(struct device_node *node); 572void of_gpio_clk_gate_setup(struct device_node *node);
551 573
552/** 574/**
575 * struct clk_gpio_mux - gpio controlled clock multiplexer
576 *
577 * @hw: see struct clk_gpio
578 * @gpiod: gpio descriptor to select the parent of this clock multiplexer
579 *
580 * Clock with a gpio control for selecting the parent clock.
581 * Implements .get_parent, .set_parent and .determine_rate
582 */
583
584extern const struct clk_ops clk_gpio_mux_ops;
585struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
586 const char * const *parent_names, u8 num_parents, unsigned gpio,
587 bool active_low, unsigned long flags);
588
589void of_gpio_mux_clk_setup(struct device_node *node);
590
591/**
553 * clk_register - allocate a new clock, register it and return an opaque cookie 592 * clk_register - allocate a new clock, register it and return an opaque cookie
554 * @dev: device that is registering this clock 593 * @dev: device that is registering this clock
555 * @hw: link to hardware-specific clock data 594 * @hw: link to hardware-specific clock data
@@ -568,31 +607,27 @@ void devm_clk_unregister(struct device *dev, struct clk *clk);
568 607
569/* helper functions */ 608/* helper functions */
570const char *__clk_get_name(struct clk *clk); 609const char *__clk_get_name(struct clk *clk);
610const char *clk_hw_get_name(const struct clk_hw *hw);
571struct clk_hw *__clk_get_hw(struct clk *clk); 611struct clk_hw *__clk_get_hw(struct clk *clk);
572u8 __clk_get_num_parents(struct clk *clk); 612unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
573struct clk *__clk_get_parent(struct clk *clk); 613struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
574struct clk *clk_get_parent_by_index(struct clk *clk, u8 index); 614struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
615 unsigned int index);
575unsigned int __clk_get_enable_count(struct clk *clk); 616unsigned int __clk_get_enable_count(struct clk *clk);
576unsigned long __clk_get_rate(struct clk *clk); 617unsigned long clk_hw_get_rate(const struct clk_hw *hw);
577unsigned long __clk_get_flags(struct clk *clk); 618unsigned long __clk_get_flags(struct clk *clk);
578bool __clk_is_prepared(struct clk *clk); 619unsigned long clk_hw_get_flags(const struct clk_hw *hw);
620bool clk_hw_is_prepared(const struct clk_hw *hw);
579bool __clk_is_enabled(struct clk *clk); 621bool __clk_is_enabled(struct clk *clk);
580struct clk *__clk_lookup(const char *name); 622struct clk *__clk_lookup(const char *name);
581long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 623int __clk_mux_determine_rate(struct clk_hw *hw,
582 unsigned long min_rate, 624 struct clk_rate_request *req);
583 unsigned long max_rate, 625int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
584 unsigned long *best_parent_rate, 626int __clk_mux_determine_rate_closest(struct clk_hw *hw,
585 struct clk_hw **best_parent_p); 627 struct clk_rate_request *req);
586unsigned long __clk_determine_rate(struct clk_hw *core,
587 unsigned long rate,
588 unsigned long min_rate,
589 unsigned long max_rate);
590long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
591 unsigned long min_rate,
592 unsigned long max_rate,
593 unsigned long *best_parent_rate,
594 struct clk_hw **best_parent_p);
595void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); 628void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
629void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
630 unsigned long max_rate);
596 631
597static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) 632static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
598{ 633{
@@ -603,7 +638,7 @@ static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
603/* 638/*
604 * FIXME clock api without lock protection 639 * FIXME clock api without lock protection
605 */ 640 */
606unsigned long __clk_round_rate(struct clk *clk, unsigned long rate); 641unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
607 642
608struct of_device_id; 643struct of_device_id;
609 644
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index f3050e15f833..e0c362363c38 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -7,6 +7,8 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <linux/types.h>
11
10struct device_node; 12struct device_node;
11 13
12#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) 14#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/shmobile.h
index 63a8159c4e64..cb19cc1865ca 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/shmobile.h
@@ -16,8 +16,20 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18 18
19struct device;
20struct device_node;
21struct generic_pm_domain;
22
19void r8a7778_clocks_init(u32 mode); 23void r8a7778_clocks_init(u32 mode);
20void r8a7779_clocks_init(u32 mode); 24void r8a7779_clocks_init(u32 mode);
21void rcar_gen2_clocks_init(u32 mode); 25void rcar_gen2_clocks_init(u32 mode);
22 26
27#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
28void cpg_mstp_add_clk_domain(struct device_node *np);
29int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
30void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
31#else
32static inline void cpg_mstp_add_clk_domain(struct device_node *np) {}
33#endif
34
23#endif 35#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 19c4208f4752..57bf7aab4516 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -17,7 +17,8 @@
17#ifndef __LINUX_CLK_TEGRA_H_ 17#ifndef __LINUX_CLK_TEGRA_H_
18#define __LINUX_CLK_TEGRA_H_ 18#define __LINUX_CLK_TEGRA_H_
19 19
20#include <linux/clk.h> 20#include <linux/types.h>
21#include <linux/bug.h>
21 22
22/* 23/*
23 * Tegra CPU clock and reset control ops 24 * Tegra CPU clock and reset control ops
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 79b76e13d904..223be696df27 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -188,33 +188,6 @@ struct clk_hw_omap {
188/* DPLL Type and DCO Selection Flags */ 188/* DPLL Type and DCO Selection Flags */
189#define DPLL_J_TYPE 0x1 189#define DPLL_J_TYPE 0x1
190 190
191/* Composite clock component types */
192enum {
193 CLK_COMPONENT_TYPE_GATE = 0,
194 CLK_COMPONENT_TYPE_DIVIDER,
195 CLK_COMPONENT_TYPE_MUX,
196 CLK_COMPONENT_TYPE_MAX,
197};
198
199/**
200 * struct ti_dt_clk - OMAP DT clock alias declarations
201 * @lk: clock lookup definition
202 * @node_name: clock DT node to map to
203 */
204struct ti_dt_clk {
205 struct clk_lookup lk;
206 char *node_name;
207};
208
209#define DT_CLK(dev, con, name) \
210 { \
211 .lk = { \
212 .dev_id = dev, \
213 .con_id = con, \
214 }, \
215 .node_name = name, \
216 }
217
218/* Static memmap indices */ 191/* Static memmap indices */
219enum { 192enum {
220 TI_CLKM_CM = 0, 193 TI_CLKM_CM = 0,
@@ -225,8 +198,6 @@ enum {
225 CLK_MAX_MEMMAPS 198 CLK_MAX_MEMMAPS
226}; 199};
227 200
228typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
229
230/** 201/**
231 * struct clk_omap_reg - OMAP register declaration 202 * struct clk_omap_reg - OMAP register declaration
232 * @offset: offset from the master IP module base address 203 * @offset: offset from the master IP module base address
@@ -238,98 +209,62 @@ struct clk_omap_reg {
238}; 209};
239 210
240/** 211/**
241 * struct ti_clk_ll_ops - low-level register access ops for a clock 212 * struct ti_clk_ll_ops - low-level ops for clocks
242 * @clk_readl: pointer to register read function 213 * @clk_readl: pointer to register read function
243 * @clk_writel: pointer to register write function 214 * @clk_writel: pointer to register write function
215 * @clkdm_clk_enable: pointer to clockdomain enable function
216 * @clkdm_clk_disable: pointer to clockdomain disable function
217 * @cm_wait_module_ready: pointer to CM module wait ready function
218 * @cm_split_idlest_reg: pointer to CM module function to split idlest reg
244 * 219 *
245 * Low-level register access ops are generally used by the basic clock types 220 * Low-level ops are generally used by the basic clock types (clk-gate,
246 * (clk-gate, clk-mux, clk-divider etc.) to provide support for various 221 * clk-mux, clk-divider etc.) to provide support for various low-level
247 * low-level hardware interfaces (direct MMIO, regmap etc.), but can also be 222 * hadrware interfaces (direct MMIO, regmap etc.), and is initialized
248 * used by other hardware-specific clock drivers if needed. 223 * by board code. Low-level ops also contain some other platform specific
224 * operations not provided directly by clock drivers.
249 */ 225 */
250struct ti_clk_ll_ops { 226struct ti_clk_ll_ops {
251 u32 (*clk_readl)(void __iomem *reg); 227 u32 (*clk_readl)(void __iomem *reg);
252 void (*clk_writel)(u32 val, void __iomem *reg); 228 void (*clk_writel)(u32 val, void __iomem *reg);
229 int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
230 int (*clkdm_clk_disable)(struct clockdomain *clkdm,
231 struct clk *clk);
232 int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg,
233 u8 idlest_shift);
234 int (*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst,
235 u8 *idlest_reg_id);
253}; 236};
254 237
255extern struct ti_clk_ll_ops *ti_clk_ll_ops;
256
257extern const struct clk_ops ti_clk_divider_ops;
258extern const struct clk_ops ti_clk_mux_ops;
259
260#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw) 238#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
261 239
262void omap2_init_clk_hw_omap_clocks(struct clk *clk);
263int omap3_noncore_dpll_enable(struct clk_hw *hw);
264void omap3_noncore_dpll_disable(struct clk_hw *hw);
265int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
266int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
267 unsigned long parent_rate);
268int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
269 unsigned long rate,
270 unsigned long parent_rate,
271 u8 index);
272long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
273 unsigned long rate,
274 unsigned long min_rate,
275 unsigned long max_rate,
276 unsigned long *best_parent_rate,
277 struct clk_hw **best_parent_clk);
278unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
279 unsigned long parent_rate);
280long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
281 unsigned long target_rate,
282 unsigned long *parent_rate);
283long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
284 unsigned long rate,
285 unsigned long min_rate,
286 unsigned long max_rate,
287 unsigned long *best_parent_rate,
288 struct clk_hw **best_parent_clk);
289u8 omap2_init_dpll_parent(struct clk_hw *hw);
290unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
291long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
292 unsigned long *parent_rate);
293void omap2_init_clk_clkdm(struct clk_hw *clk); 240void omap2_init_clk_clkdm(struct clk_hw *clk);
294unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
295 unsigned long parent_rate);
296int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
297 unsigned long parent_rate);
298long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
299 unsigned long *prate);
300int omap2_clkops_enable_clkdm(struct clk_hw *hw);
301void omap2_clkops_disable_clkdm(struct clk_hw *hw);
302int omap2_clk_disable_autoidle_all(void); 241int omap2_clk_disable_autoidle_all(void);
303void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); 242int omap2_clk_enable_autoidle_all(void);
304int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate, 243int omap2_clk_allow_idle(struct clk *clk);
305 unsigned long parent_rate); 244int omap2_clk_deny_idle(struct clk *clk);
306int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
307 unsigned long parent_rate, u8 index);
308int omap2_dflt_clk_enable(struct clk_hw *hw);
309void omap2_dflt_clk_disable(struct clk_hw *hw);
310int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
311void omap3_clk_lock_dpll5(void);
312unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, 245unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
313 unsigned long parent_rate); 246 unsigned long parent_rate);
314int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate, 247int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
315 unsigned long parent_rate); 248 unsigned long parent_rate);
316void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); 249void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
317void omap2xxx_clkt_vps_init(void); 250void omap2xxx_clkt_vps_init(void);
251unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk);
318 252
319void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
320void ti_dt_clocks_register(struct ti_dt_clk *oclks);
321void ti_dt_clk_init_provider(struct device_node *np, int index);
322void ti_dt_clk_init_retry_clks(void); 253void ti_dt_clk_init_retry_clks(void);
323void ti_dt_clockdomains_setup(void); 254void ti_dt_clockdomains_setup(void);
324int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw, 255int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops);
325 ti_of_clk_init_cb_t func); 256
326int of_ti_clk_autoidle_setup(struct device_node *node); 257struct regmap;
327int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type); 258
259int omap2_clk_provider_init(struct device_node *parent, int index,
260 struct regmap *syscon, void __iomem *mem);
261void omap2_clk_legacy_provider_init(int index, void __iomem *mem);
328 262
329int omap3430_dt_clk_init(void); 263int omap3430_dt_clk_init(void);
330int omap3630_dt_clk_init(void); 264int omap3630_dt_clk_init(void);
331int am35xx_dt_clk_init(void); 265int am35xx_dt_clk_init(void);
332int ti81xx_dt_clk_init(void); 266int dm814x_dt_clk_init(void);
267int dm816x_dt_clk_init(void);
333int omap4xxx_dt_clk_init(void); 268int omap4xxx_dt_clk_init(void);
334int omap5xxx_dt_clk_init(void); 269int omap5xxx_dt_clk_init(void);
335int dra7xx_dt_clk_init(void); 270int dra7xx_dt_clk_init(void);
@@ -338,27 +273,24 @@ int am43xx_dt_clk_init(void);
338int omap2420_dt_clk_init(void); 273int omap2420_dt_clk_init(void);
339int omap2430_dt_clk_init(void); 274int omap2430_dt_clk_init(void);
340 275
341#ifdef CONFIG_OF 276struct ti_clk_features {
342void of_ti_clk_allow_autoidle_all(void); 277 u32 flags;
343void of_ti_clk_deny_autoidle_all(void); 278 long fint_min;
344#else 279 long fint_max;
345static inline void of_ti_clk_allow_autoidle_all(void) { } 280 long fint_band1_max;
346static inline void of_ti_clk_deny_autoidle_all(void) { } 281 long fint_band2_min;
347#endif 282 u8 dpll_bypass_vals;
283 u8 cm_idlest_val;
284};
285
286#define TI_CLK_DPLL_HAS_FREQSEL BIT(0)
287#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
288#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
289
290void ti_clk_setup_features(struct ti_clk_features *features);
291const struct ti_clk_features *ti_clk_get_features(void);
348 292
349extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; 293extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
350extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
351extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
352extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
353extern const struct clk_hw_omap_ops clkhwops_wait;
354extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
355extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
356extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
357extern const struct clk_hw_omap_ops clkhwops_iclk;
358extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
359extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
360extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
361extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
362 294
363#ifdef CONFIG_ATAGS 295#ifdef CONFIG_ATAGS
364int omap3430_clk_legacy_init(void); 296int omap3430_clk_legacy_init(void);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 597a1e836f22..31ce435981fe 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -234,13 +234,10 @@ static inline int tick_check_broadcast_expired(void) { return 0; }
234static inline void tick_setup_hrtimer_broadcast(void) { } 234static inline void tick_setup_hrtimer_broadcast(void) { }
235# endif 235# endif
236 236
237extern int clockevents_notify(unsigned long reason, void *arg);
238
239#else /* !CONFIG_GENERIC_CLOCKEVENTS: */ 237#else /* !CONFIG_GENERIC_CLOCKEVENTS: */
240 238
241static inline void clockevents_suspend(void) { } 239static inline void clockevents_suspend(void) { }
242static inline void clockevents_resume(void) { } 240static inline void clockevents_resume(void) { }
243static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
244static inline int tick_check_broadcast_expired(void) { return 0; } 241static inline int tick_check_broadcast_expired(void) { return 0; }
245static inline void tick_setup_hrtimer_broadcast(void) { } 242static inline void tick_setup_hrtimer_broadcast(void) { }
246 243
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e08a6ae7c0a4..c836eb2dc44d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -252,7 +252,12 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
252 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 252 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
253 253
254#define WRITE_ONCE(x, val) \ 254#define WRITE_ONCE(x, val) \
255 ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 255({ \
256 union { typeof(x) __val; char __c[1]; } __u = \
257 { .__val = (__force typeof(x)) (val) }; \
258 __write_once_size(&(x), __u.__c, sizeof(x)); \
259 __u.__val; \
260})
256 261
257/** 262/**
258 * READ_ONCE_CTRL - Read a value heading a control dependency 263 * READ_ONCE_CTRL - Read a value heading a control dependency
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index b96bd299966f..008fc67d0d96 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -49,13 +49,28 @@ static inline void exception_exit(enum ctx_state prev_ctx)
49 } 49 }
50} 50}
51 51
52
53/**
54 * ct_state() - return the current context tracking state if known
55 *
56 * Returns the current cpu's context tracking state if context tracking
57 * is enabled. If context tracking is disabled, returns
58 * CONTEXT_DISABLED. This should be used primarily for debugging.
59 */
60static inline enum ctx_state ct_state(void)
61{
62 return context_tracking_is_enabled() ?
63 this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
64}
52#else 65#else
53static inline void user_enter(void) { } 66static inline void user_enter(void) { }
54static inline void user_exit(void) { } 67static inline void user_exit(void) { }
55static inline enum ctx_state exception_enter(void) { return 0; } 68static inline enum ctx_state exception_enter(void) { return 0; }
56static inline void exception_exit(enum ctx_state prev_ctx) { } 69static inline void exception_exit(enum ctx_state prev_ctx) { }
70static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
57#endif /* !CONFIG_CONTEXT_TRACKING */ 71#endif /* !CONFIG_CONTEXT_TRACKING */
58 72
73#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
59 74
60#ifdef CONFIG_CONTEXT_TRACKING_FORCE 75#ifdef CONFIG_CONTEXT_TRACKING_FORCE
61extern void context_tracking_init(void); 76extern void context_tracking_init(void);
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 678ecdf90cf6..ee956c528fab 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -14,6 +14,7 @@ struct context_tracking {
14 bool active; 14 bool active;
15 int recursion; 15 int recursion;
16 enum ctx_state { 16 enum ctx_state {
17 CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
17 CONTEXT_KERNEL = 0, 18 CONTEXT_KERNEL = 0,
18 CONTEXT_USER, 19 CONTEXT_USER,
19 CONTEXT_GUEST, 20 CONTEXT_GUEST,
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 3486b9082adb..c69e1b932809 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -14,6 +14,7 @@
14#define _LINUX_CORESIGHT_H 14#define _LINUX_CORESIGHT_H
15 15
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/sched.h>
17 18
18/* Peripheral id registers (0xFD0-0xFEC) */ 19/* Peripheral id registers (0xFD0-0xFEC) */
19#define CORESIGHT_PERIPHIDR4 0xfd0 20#define CORESIGHT_PERIPHIDR4 0xfd0
@@ -248,4 +249,24 @@ static inline struct coresight_platform_data *of_get_coresight_platform_data(
248 struct device *dev, struct device_node *node) { return NULL; } 249 struct device *dev, struct device_node *node) { return NULL; }
249#endif 250#endif
250 251
252#ifdef CONFIG_PID_NS
253static inline unsigned long
254coresight_vpid_to_pid(unsigned long vpid)
255{
256 struct task_struct *task = NULL;
257 unsigned long pid = 0;
258
259 rcu_read_lock();
260 task = find_task_by_vpid(vpid);
261 if (task)
262 pid = task_pid_nr(task);
263 rcu_read_unlock();
264
265 return pid;
266}
267#else
268static inline unsigned long
269coresight_vpid_to_pid(unsigned long vpid) { return vpid; }
270#endif
271
251#endif 272#endif
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index c4d4eb8ac9fe..986c06c88d81 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -11,6 +11,7 @@
11 11
12#ifdef CONFIG_GENERIC_CPU_AUTOPROBE 12#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
13 13
14#include <linux/init.h>
14#include <linux/mod_devicetable.h> 15#include <linux/mod_devicetable.h>
15#include <asm/cpufeature.h> 16#include <asm/cpufeature.h>
16 17
@@ -43,16 +44,16 @@
43 * For a list of legal values for 'feature', please consult the file 44 * For a list of legal values for 'feature', please consult the file
44 * 'asm/cpufeature.h' of your favorite architecture. 45 * 'asm/cpufeature.h' of your favorite architecture.
45 */ 46 */
46#define module_cpu_feature_match(x, __init) \ 47#define module_cpu_feature_match(x, __initfunc) \
47static struct cpu_feature const cpu_feature_match_ ## x[] = \ 48static struct cpu_feature const cpu_feature_match_ ## x[] = \
48 { { .feature = cpu_feature(x) }, { } }; \ 49 { { .feature = cpu_feature(x) }, { } }; \
49MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ 50MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
50 \ 51 \
51static int cpu_feature_match_ ## x ## _init(void) \ 52static int __init cpu_feature_match_ ## x ## _init(void) \
52{ \ 53{ \
53 if (!cpu_have_feature(cpu_feature(x))) \ 54 if (!cpu_have_feature(cpu_feature(x))) \
54 return -ENODEV; \ 55 return -ENODEV; \
55 return __init(); \ 56 return __initfunc(); \
56} \ 57} \
57module_init(cpu_feature_match_ ## x ## _init) 58module_init(cpu_feature_match_ ## x ## _init)
58 59
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index bde1e567b3a9..430efcbea48e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -51,11 +51,9 @@ struct cpufreq_cpuinfo {
51 unsigned int transition_latency; 51 unsigned int transition_latency;
52}; 52};
53 53
54struct cpufreq_real_policy { 54struct cpufreq_user_policy {
55 unsigned int min; /* in kHz */ 55 unsigned int min; /* in kHz */
56 unsigned int max; /* in kHz */ 56 unsigned int max; /* in kHz */
57 unsigned int policy; /* see above */
58 struct cpufreq_governor *governor; /* see below */
59}; 57};
60 58
61struct cpufreq_policy { 59struct cpufreq_policy {
@@ -88,7 +86,7 @@ struct cpufreq_policy {
88 struct work_struct update; /* if update_policy() needs to be 86 struct work_struct update; /* if update_policy() needs to be
89 * called, but you're in IRQ context */ 87 * called, but you're in IRQ context */
90 88
91 struct cpufreq_real_policy user_policy; 89 struct cpufreq_user_policy user_policy;
92 struct cpufreq_frequency_table *freq_table; 90 struct cpufreq_frequency_table *freq_table;
93 91
94 struct list_head policy_list; 92 struct list_head policy_list;
@@ -369,11 +367,10 @@ static inline void cpufreq_resume(void) {}
369 367
370/* Policy Notifiers */ 368/* Policy Notifiers */
371#define CPUFREQ_ADJUST (0) 369#define CPUFREQ_ADJUST (0)
372#define CPUFREQ_INCOMPATIBLE (1) 370#define CPUFREQ_NOTIFY (1)
373#define CPUFREQ_NOTIFY (2) 371#define CPUFREQ_START (2)
374#define CPUFREQ_START (3) 372#define CPUFREQ_CREATE_POLICY (3)
375#define CPUFREQ_CREATE_POLICY (4) 373#define CPUFREQ_REMOVE_POLICY (4)
376#define CPUFREQ_REMOVE_POLICY (5)
377 374
378#ifdef CONFIG_CPU_FREQ 375#ifdef CONFIG_CPU_FREQ
379int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); 376int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -578,6 +575,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
578int cpufreq_boost_trigger_state(int state); 575int cpufreq_boost_trigger_state(int state);
579int cpufreq_boost_supported(void); 576int cpufreq_boost_supported(void);
580int cpufreq_boost_enabled(void); 577int cpufreq_boost_enabled(void);
578int cpufreq_enable_boost_support(void);
579bool policy_has_boost_freq(struct cpufreq_policy *policy);
581#else 580#else
582static inline int cpufreq_boost_trigger_state(int state) 581static inline int cpufreq_boost_trigger_state(int state)
583{ 582{
@@ -591,12 +590,23 @@ static inline int cpufreq_boost_enabled(void)
591{ 590{
592 return 0; 591 return 0;
593} 592}
593
594static inline int cpufreq_enable_boost_support(void)
595{
596 return -EINVAL;
597}
598
599static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
600{
601 return false;
602}
594#endif 603#endif
595/* the following funtion is for cpufreq core use only */ 604/* the following funtion is for cpufreq core use only */
596struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); 605struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
597 606
598/* the following are really really optional */ 607/* the following are really really optional */
599extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; 608extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
609extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
600extern struct freq_attr *cpufreq_generic_attr[]; 610extern struct freq_attr *cpufreq_generic_attr[];
601int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, 611int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
602 struct cpufreq_frequency_table *table); 612 struct cpufreq_frequency_table *table);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index d075d34279df..786ad32631a6 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -84,7 +84,6 @@ struct cpuidle_device {
84 struct list_head device_list; 84 struct list_head device_list;
85 85
86#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 86#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
87 int safe_state_index;
88 cpumask_t coupled_cpus; 87 cpumask_t coupled_cpus;
89 struct cpuidle_coupled *coupled; 88 struct cpuidle_coupled *coupled;
90#endif 89#endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 8b6c083e68a7..8d70e1361ecd 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -137,6 +137,7 @@ struct cred {
137 kernel_cap_t cap_permitted; /* caps we're permitted */ 137 kernel_cap_t cap_permitted; /* caps we're permitted */
138 kernel_cap_t cap_effective; /* caps we can actually use */ 138 kernel_cap_t cap_effective; /* caps we can actually use */
139 kernel_cap_t cap_bset; /* capability bounding set */ 139 kernel_cap_t cap_bset; /* capability bounding set */
140 kernel_cap_t cap_ambient; /* Ambient capability set */
140#ifdef CONFIG_KEYS 141#ifdef CONFIG_KEYS
141 unsigned char jit_keyring; /* default keyring to attach requested 142 unsigned char jit_keyring; /* default keyring to attach requested
142 * keys to */ 143 * keys to */
@@ -212,6 +213,13 @@ static inline void validate_process_creds(void)
212} 213}
213#endif 214#endif
214 215
216static inline bool cap_ambient_invariant_ok(const struct cred *cred)
217{
218 return cap_issubset(cred->cap_ambient,
219 cap_intersect(cred->cap_permitted,
220 cred->cap_inheritable));
221}
222
215/** 223/**
216 * get_new_cred - Get a reference on a new set of credentials 224 * get_new_cred - Get a reference on a new set of credentials
217 * @cred: The new credentials to reference 225 * @cred: The new credentials to reference
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 81ef938b0a8e..e71cb70a1ac2 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -102,12 +102,6 @@
102#define CRYPTO_ALG_INTERNAL 0x00002000 102#define CRYPTO_ALG_INTERNAL 0x00002000
103 103
104/* 104/*
105 * Temporary flag used to prevent legacy AEAD implementations from
106 * being used by user-space.
107 */
108#define CRYPTO_ALG_AEAD_NEW 0x00004000
109
110/*
111 * Transform masks and values (for crt_flags). 105 * Transform masks and values (for crt_flags).
112 */ 106 */
113#define CRYPTO_TFM_REQ_MASK 0x000fff00 107#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -142,13 +136,10 @@
142struct scatterlist; 136struct scatterlist;
143struct crypto_ablkcipher; 137struct crypto_ablkcipher;
144struct crypto_async_request; 138struct crypto_async_request;
145struct crypto_aead;
146struct crypto_blkcipher; 139struct crypto_blkcipher;
147struct crypto_hash; 140struct crypto_hash;
148struct crypto_tfm; 141struct crypto_tfm;
149struct crypto_type; 142struct crypto_type;
150struct aead_request;
151struct aead_givcrypt_request;
152struct skcipher_givcrypt_request; 143struct skcipher_givcrypt_request;
153 144
154typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 145typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -275,47 +266,6 @@ struct ablkcipher_alg {
275}; 266};
276 267
277/** 268/**
278 * struct old_aead_alg - AEAD cipher definition
279 * @maxauthsize: Set the maximum authentication tag size supported by the
280 * transformation. A transformation may support smaller tag sizes.
281 * As the authentication tag is a message digest to ensure the
282 * integrity of the encrypted data, a consumer typically wants the
283 * largest authentication tag possible as defined by this
284 * variable.
285 * @setauthsize: Set authentication size for the AEAD transformation. This
286 * function is used to specify the consumer requested size of the
287 * authentication tag to be either generated by the transformation
288 * during encryption or the size of the authentication tag to be
289 * supplied during the decryption operation. This function is also
290 * responsible for checking the authentication tag size for
291 * validity.
292 * @setkey: see struct ablkcipher_alg
293 * @encrypt: see struct ablkcipher_alg
294 * @decrypt: see struct ablkcipher_alg
295 * @givencrypt: see struct ablkcipher_alg
296 * @givdecrypt: see struct ablkcipher_alg
297 * @geniv: see struct ablkcipher_alg
298 * @ivsize: see struct ablkcipher_alg
299 *
300 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
301 * mandatory and must be filled.
302 */
303struct old_aead_alg {
304 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
305 unsigned int keylen);
306 int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
307 int (*encrypt)(struct aead_request *req);
308 int (*decrypt)(struct aead_request *req);
309 int (*givencrypt)(struct aead_givcrypt_request *req);
310 int (*givdecrypt)(struct aead_givcrypt_request *req);
311
312 const char *geniv;
313
314 unsigned int ivsize;
315 unsigned int maxauthsize;
316};
317
318/**
319 * struct blkcipher_alg - synchronous block cipher definition 269 * struct blkcipher_alg - synchronous block cipher definition
320 * @min_keysize: see struct ablkcipher_alg 270 * @min_keysize: see struct ablkcipher_alg
321 * @max_keysize: see struct ablkcipher_alg 271 * @max_keysize: see struct ablkcipher_alg
@@ -409,7 +359,6 @@ struct compress_alg {
409 359
410 360
411#define cra_ablkcipher cra_u.ablkcipher 361#define cra_ablkcipher cra_u.ablkcipher
412#define cra_aead cra_u.aead
413#define cra_blkcipher cra_u.blkcipher 362#define cra_blkcipher cra_u.blkcipher
414#define cra_cipher cra_u.cipher 363#define cra_cipher cra_u.cipher
415#define cra_compress cra_u.compress 364#define cra_compress cra_u.compress
@@ -460,7 +409,7 @@ struct compress_alg {
460 * struct crypto_type, which implements callbacks common for all 409 * struct crypto_type, which implements callbacks common for all
461 * transformation types. There are multiple options: 410 * transformation types. There are multiple options:
462 * &crypto_blkcipher_type, &crypto_ablkcipher_type, 411 * &crypto_blkcipher_type, &crypto_ablkcipher_type,
463 * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. 412 * &crypto_ahash_type, &crypto_rng_type.
464 * This field might be empty. In that case, there are no common 413 * This field might be empty. In that case, there are no common
465 * callbacks. This is the case for: cipher, compress, shash. 414 * callbacks. This is the case for: cipher, compress, shash.
466 * @cra_u: Callbacks implementing the transformation. This is a union of 415 * @cra_u: Callbacks implementing the transformation. This is a union of
@@ -508,7 +457,6 @@ struct crypto_alg {
508 457
509 union { 458 union {
510 struct ablkcipher_alg ablkcipher; 459 struct ablkcipher_alg ablkcipher;
511 struct old_aead_alg aead;
512 struct blkcipher_alg blkcipher; 460 struct blkcipher_alg blkcipher;
513 struct cipher_alg cipher; 461 struct cipher_alg cipher;
514 struct compress_alg compress; 462 struct compress_alg compress;
diff --git a/include/linux/dax.h b/include/linux/dax.h
new file mode 100644
index 000000000000..b415e521528d
--- /dev/null
+++ b/include/linux/dax.h
@@ -0,0 +1,39 @@
1#ifndef _LINUX_DAX_H
2#define _LINUX_DAX_H
3
4#include <linux/fs.h>
5#include <linux/mm.h>
6#include <asm/pgtable.h>
7
8ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
9 get_block_t, dio_iodone_t, int flags);
10int dax_clear_blocks(struct inode *, sector_t block, long size);
11int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
12int dax_truncate_page(struct inode *, loff_t from, get_block_t);
13int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
14 dax_iodone_t);
15int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
16 dax_iodone_t);
17#ifdef CONFIG_TRANSPARENT_HUGEPAGE
18int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
19 unsigned int flags, get_block_t, dax_iodone_t);
20int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
21 unsigned int flags, get_block_t, dax_iodone_t);
22#else
23static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
24 pmd_t *pmd, unsigned int flags, get_block_t gb,
25 dax_iodone_t di)
26{
27 return VM_FAULT_FALLBACK;
28}
29#define __dax_pmd_fault dax_pmd_fault
30#endif
31int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
32#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
33#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
34
35static inline bool vma_is_dax(struct vm_area_struct *vma)
36{
37 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
38}
39#endif
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 420311bcee38..9beb636b97eb 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -116,6 +116,12 @@ struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
116 116
117bool debugfs_initialized(void); 117bool debugfs_initialized(void);
118 118
119ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
120 size_t count, loff_t *ppos);
121
122ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
123 size_t count, loff_t *ppos);
124
119#else 125#else
120 126
121#include <linux/err.h> 127#include <linux/err.h>
@@ -282,6 +288,20 @@ static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
282 return ERR_PTR(-ENODEV); 288 return ERR_PTR(-ENODEV);
283} 289}
284 290
291static inline ssize_t debugfs_read_file_bool(struct file *file,
292 char __user *user_buf,
293 size_t count, loff_t *ppos)
294{
295 return -ENODEV;
296}
297
298static inline ssize_t debugfs_write_file_bool(struct file *file,
299 const char __user *user_buf,
300 size_t count, loff_t *ppos)
301{
302 return -ENODEV;
303}
304
285#endif 305#endif
286 306
287#endif 307#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 51cc1deb7af3..76d23fa8c7d3 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -82,9 +82,6 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
82typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, 82typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
83 unsigned long arg); 83 unsigned long arg);
84 84
85typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
86 struct bio_vec *biovec, int max_size);
87
88/* 85/*
89 * These iteration functions are typically used to check (and combine) 86 * These iteration functions are typically used to check (and combine)
90 * properties of underlying devices. 87 * properties of underlying devices.
@@ -160,7 +157,6 @@ struct target_type {
160 dm_status_fn status; 157 dm_status_fn status;
161 dm_message_fn message; 158 dm_message_fn message;
162 dm_ioctl_fn ioctl; 159 dm_ioctl_fn ioctl;
163 dm_merge_fn merge;
164 dm_busy_fn busy; 160 dm_busy_fn busy;
165 dm_iterate_devices_fn iterate_devices; 161 dm_iterate_devices_fn iterate_devices;
166 dm_io_hints_fn io_hints; 162 dm_io_hints_fn io_hints;
diff --git a/include/linux/device.h b/include/linux/device.h
index a2b4ea70a946..5d7bc6349930 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -341,7 +341,7 @@ struct subsys_interface {
341 struct bus_type *subsys; 341 struct bus_type *subsys;
342 struct list_head node; 342 struct list_head node;
343 int (*add_dev)(struct device *dev, struct subsys_interface *sif); 343 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
344 int (*remove_dev)(struct device *dev, struct subsys_interface *sif); 344 void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
345}; 345};
346 346
347int subsys_interface_register(struct subsys_interface *sif); 347int subsys_interface_register(struct subsys_interface *sif);
@@ -714,6 +714,8 @@ struct device_dma_parameters {
714 * along with subsystem-level and driver-level callbacks. 714 * along with subsystem-level and driver-level callbacks.
715 * @pins: For device pin management. 715 * @pins: For device pin management.
716 * See Documentation/pinctrl.txt for details. 716 * See Documentation/pinctrl.txt for details.
717 * @msi_list: Hosts MSI descriptors
718 * @msi_domain: The generic MSI domain this device is using.
717 * @numa_node: NUMA node this device is close to. 719 * @numa_node: NUMA node this device is close to.
718 * @dma_mask: Dma mask (if dma'ble device). 720 * @dma_mask: Dma mask (if dma'ble device).
719 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all 721 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -774,9 +776,15 @@ struct device {
774 struct dev_pm_info power; 776 struct dev_pm_info power;
775 struct dev_pm_domain *pm_domain; 777 struct dev_pm_domain *pm_domain;
776 778
779#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
780 struct irq_domain *msi_domain;
781#endif
777#ifdef CONFIG_PINCTRL 782#ifdef CONFIG_PINCTRL
778 struct dev_pin_info *pins; 783 struct dev_pin_info *pins;
779#endif 784#endif
785#ifdef CONFIG_GENERIC_MSI_IRQ
786 struct list_head msi_list;
787#endif
780 788
781#ifdef CONFIG_NUMA 789#ifdef CONFIG_NUMA
782 int numa_node; /* NUMA node this device is close to */ 790 int numa_node; /* NUMA node this device is close to */
@@ -861,6 +869,22 @@ static inline void set_dev_node(struct device *dev, int node)
861} 869}
862#endif 870#endif
863 871
872static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
873{
874#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
875 return dev->msi_domain;
876#else
877 return NULL;
878#endif
879}
880
881static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
882{
883#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
884 dev->msi_domain = d;
885#endif
886}
887
864static inline void *dev_get_drvdata(const struct device *dev) 888static inline void *dev_get_drvdata(const struct device *dev)
865{ 889{
866 return dev->driver_data; 890 return dev->driver_data;
@@ -959,6 +983,8 @@ extern int __must_check device_add(struct device *dev);
959extern void device_del(struct device *dev); 983extern void device_del(struct device *dev);
960extern int device_for_each_child(struct device *dev, void *data, 984extern int device_for_each_child(struct device *dev, void *data,
961 int (*fn)(struct device *dev, void *data)); 985 int (*fn)(struct device *dev, void *data));
986extern int device_for_each_child_reverse(struct device *dev, void *data,
987 int (*fn)(struct device *dev, void *data));
962extern struct device *device_find_child(struct device *dev, void *data, 988extern struct device *device_find_child(struct device *dev, void *data,
963 int (*match)(struct device *dev, void *data)); 989 int (*match)(struct device *dev, void *data));
964extern int device_rename(struct device *dev, const char *new_name); 990extern int device_rename(struct device *dev, const char *new_name);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e2f5eb419976..7ea9184eaa13 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -66,6 +66,7 @@ enum dma_transaction_type {
66 DMA_XOR_VAL, 66 DMA_XOR_VAL,
67 DMA_PQ_VAL, 67 DMA_PQ_VAL,
68 DMA_MEMSET, 68 DMA_MEMSET,
69 DMA_MEMSET_SG,
69 DMA_INTERRUPT, 70 DMA_INTERRUPT,
70 DMA_SG, 71 DMA_SG,
71 DMA_PRIVATE, 72 DMA_PRIVATE,
@@ -183,6 +184,8 @@ struct dma_interleaved_template {
183 * operation it continues the calculation with new sources 184 * operation it continues the calculation with new sources
184 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend 185 * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
185 * on the result of this operation 186 * on the result of this operation
187 * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
188 * cleared or freed
186 */ 189 */
187enum dma_ctrl_flags { 190enum dma_ctrl_flags {
188 DMA_PREP_INTERRUPT = (1 << 0), 191 DMA_PREP_INTERRUPT = (1 << 0),
@@ -191,6 +194,7 @@ enum dma_ctrl_flags {
191 DMA_PREP_PQ_DISABLE_Q = (1 << 3), 194 DMA_PREP_PQ_DISABLE_Q = (1 << 3),
192 DMA_PREP_CONTINUE = (1 << 4), 195 DMA_PREP_CONTINUE = (1 << 4),
193 DMA_PREP_FENCE = (1 << 5), 196 DMA_PREP_FENCE = (1 << 5),
197 DMA_CTRL_REUSE = (1 << 6),
194}; 198};
195 199
196/** 200/**
@@ -400,6 +404,8 @@ enum dma_residue_granularity {
400 * @cmd_pause: true, if pause and thereby resume is supported 404 * @cmd_pause: true, if pause and thereby resume is supported
401 * @cmd_terminate: true, if terminate cmd is supported 405 * @cmd_terminate: true, if terminate cmd is supported
402 * @residue_granularity: granularity of the reported transfer residue 406 * @residue_granularity: granularity of the reported transfer residue
407 * @descriptor_reuse: if a descriptor can be reused by client and
408 * resubmitted multiple times
403 */ 409 */
404struct dma_slave_caps { 410struct dma_slave_caps {
405 u32 src_addr_widths; 411 u32 src_addr_widths;
@@ -408,6 +414,7 @@ struct dma_slave_caps {
408 bool cmd_pause; 414 bool cmd_pause;
409 bool cmd_terminate; 415 bool cmd_terminate;
410 enum dma_residue_granularity residue_granularity; 416 enum dma_residue_granularity residue_granularity;
417 bool descriptor_reuse;
411}; 418};
412 419
413static inline const char *dma_chan_name(struct dma_chan *chan) 420static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -467,6 +474,7 @@ struct dma_async_tx_descriptor {
467 dma_addr_t phys; 474 dma_addr_t phys;
468 struct dma_chan *chan; 475 struct dma_chan *chan;
469 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 476 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
477 int (*desc_free)(struct dma_async_tx_descriptor *tx);
470 dma_async_tx_callback callback; 478 dma_async_tx_callback callback;
471 void *callback_param; 479 void *callback_param;
472 struct dmaengine_unmap_data *unmap; 480 struct dmaengine_unmap_data *unmap;
@@ -585,6 +593,20 @@ struct dma_tx_state {
585}; 593};
586 594
587/** 595/**
596 * enum dmaengine_alignment - defines alignment of the DMA async tx
597 * buffers
598 */
599enum dmaengine_alignment {
600 DMAENGINE_ALIGN_1_BYTE = 0,
601 DMAENGINE_ALIGN_2_BYTES = 1,
602 DMAENGINE_ALIGN_4_BYTES = 2,
603 DMAENGINE_ALIGN_8_BYTES = 3,
604 DMAENGINE_ALIGN_16_BYTES = 4,
605 DMAENGINE_ALIGN_32_BYTES = 5,
606 DMAENGINE_ALIGN_64_BYTES = 6,
607};
608
609/**
588 * struct dma_device - info on the entity supplying DMA services 610 * struct dma_device - info on the entity supplying DMA services
589 * @chancnt: how many DMA channels are supported 611 * @chancnt: how many DMA channels are supported
590 * @privatecnt: how many DMA channels are requested by dma_request_channel 612 * @privatecnt: how many DMA channels are requested by dma_request_channel
@@ -616,6 +638,7 @@ struct dma_tx_state {
616 * @device_prep_dma_pq: prepares a pq operation 638 * @device_prep_dma_pq: prepares a pq operation
617 * @device_prep_dma_pq_val: prepares a pqzero_sum operation 639 * @device_prep_dma_pq_val: prepares a pqzero_sum operation
618 * @device_prep_dma_memset: prepares a memset operation 640 * @device_prep_dma_memset: prepares a memset operation
641 * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
619 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 642 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
620 * @device_prep_slave_sg: prepares a slave dma operation 643 * @device_prep_slave_sg: prepares a slave dma operation
621 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 644 * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
@@ -645,10 +668,10 @@ struct dma_device {
645 dma_cap_mask_t cap_mask; 668 dma_cap_mask_t cap_mask;
646 unsigned short max_xor; 669 unsigned short max_xor;
647 unsigned short max_pq; 670 unsigned short max_pq;
648 u8 copy_align; 671 enum dmaengine_alignment copy_align;
649 u8 xor_align; 672 enum dmaengine_alignment xor_align;
650 u8 pq_align; 673 enum dmaengine_alignment pq_align;
651 u8 fill_align; 674 enum dmaengine_alignment fill_align;
652 #define DMA_HAS_PQ_CONTINUE (1 << 15) 675 #define DMA_HAS_PQ_CONTINUE (1 << 15)
653 676
654 int dev_id; 677 int dev_id;
@@ -682,6 +705,9 @@ struct dma_device {
682 struct dma_async_tx_descriptor *(*device_prep_dma_memset)( 705 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
683 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 706 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
684 unsigned long flags); 707 unsigned long flags);
708 struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
709 struct dma_chan *chan, struct scatterlist *sg,
710 unsigned int nents, int value, unsigned long flags);
685 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 711 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
686 struct dma_chan *chan, unsigned long flags); 712 struct dma_chan *chan, unsigned long flags);
687 struct dma_async_tx_descriptor *(*device_prep_dma_sg)( 713 struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -833,7 +859,8 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
833 return desc->tx_submit(desc); 859 return desc->tx_submit(desc);
834} 860}
835 861
836static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) 862static inline bool dmaengine_check_align(enum dmaengine_alignment align,
863 size_t off1, size_t off2, size_t len)
837{ 864{
838 size_t mask; 865 size_t mask;
839 866
@@ -1155,6 +1182,39 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
1155} 1182}
1156#endif 1183#endif
1157 1184
1185static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1186{
1187 struct dma_slave_caps caps;
1188
1189 dma_get_slave_caps(tx->chan, &caps);
1190
1191 if (caps.descriptor_reuse) {
1192 tx->flags |= DMA_CTRL_REUSE;
1193 return 0;
1194 } else {
1195 return -EPERM;
1196 }
1197}
1198
1199static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
1200{
1201 tx->flags &= ~DMA_CTRL_REUSE;
1202}
1203
1204static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
1205{
1206 return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
1207}
1208
1209static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
1210{
1211 /* this is supported for reusable desc, so check that */
1212 if (dmaengine_desc_test_reuse(desc))
1213 return desc->desc_free(desc);
1214 else
1215 return -EPERM;
1216}
1217
1158/* --- DMA device --- */ 1218/* --- DMA device --- */
1159 1219
1160int dma_async_device_register(struct dma_device *device); 1220int dma_async_device_register(struct dma_device *device);
@@ -1169,7 +1229,7 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
1169static inline struct dma_chan 1229static inline struct dma_chan
1170*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, 1230*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
1171 dma_filter_fn fn, void *fn_param, 1231 dma_filter_fn fn, void *fn_param,
1172 struct device *dev, char *name) 1232 struct device *dev, const char *name)
1173{ 1233{
1174 struct dma_chan *chan; 1234 struct dma_chan *chan;
1175 1235
@@ -1177,6 +1237,9 @@ static inline struct dma_chan
1177 if (chan) 1237 if (chan)
1178 return chan; 1238 return chan;
1179 1239
1240 if (!fn || !fn_param)
1241 return NULL;
1242
1180 return __dma_request_channel(mask, fn, fn_param); 1243 return __dma_request_channel(mask, fn, fn_param);
1181} 1244}
1182#endif /* DMAENGINE_H */ 1245#endif /* DMAENGINE_H */
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index e1043f79122f..53ba737505df 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -24,6 +24,12 @@ void dma_pool_destroy(struct dma_pool *pool);
24void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 24void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
25 dma_addr_t *handle); 25 dma_addr_t *handle);
26 26
27static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
28 dma_addr_t *handle)
29{
30 return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
31}
32
27void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); 33void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
28 34
29/* 35/*
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9012f8775208..eb049c622208 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -76,7 +76,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
76 76
77#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 77#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
78 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) | 78 return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
79 ((a[2] ^ b[2]) & m)) == 0; 79 (__force int)((a[2] ^ b[2]) & m)) == 0;
80#else 80#else
81 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; 81 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
82#endif 82#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b16d929fa75f..c0f8c4fc5d45 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -27,8 +27,6 @@
27#define __LINUX_EXTCON_H__ 27#define __LINUX_EXTCON_H__
28 28
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/notifier.h>
31#include <linux/sysfs.h>
32 30
33/* 31/*
34 * Define the unique id of supported external connectors 32 * Define the unique id of supported external connectors
@@ -77,8 +75,6 @@ struct extcon_cable;
77 * be attached simulataneously. {0x7, 0} is equivalent to 75 * be attached simulataneously. {0x7, 0} is equivalent to
78 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there 76 * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
79 * can be no simultaneous connections. 77 * can be no simultaneous connections.
80 * @print_state: An optional callback to override the method to print the
81 * status of the extcon device.
82 * @dev: Device of this extcon. 78 * @dev: Device of this extcon.
83 * @state: Attach/detach state of this extcon. Do not provide at 79 * @state: Attach/detach state of this extcon. Do not provide at
84 * register-time. 80 * register-time.
@@ -102,9 +98,6 @@ struct extcon_dev {
102 const unsigned int *supported_cable; 98 const unsigned int *supported_cable;
103 const u32 *mutually_exclusive; 99 const u32 *mutually_exclusive;
104 100
105 /* Optional callbacks to override class functions */
106 ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
107
108 /* Internal data. Please do not set. */ 101 /* Internal data. Please do not set. */
109 struct device dev; 102 struct device dev;
110 struct raw_notifier_head *nh; 103 struct raw_notifier_head *nh;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 920408a21ffd..25c6324a0dd0 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -417,15 +417,25 @@ typedef __le32 f2fs_hash_t;
417 417
418#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS) 418#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
419 419
420/* the number of dentry in a block */
421#define NR_DENTRY_IN_BLOCK 214
422
423/* MAX level for dir lookup */ 420/* MAX level for dir lookup */
424#define MAX_DIR_HASH_DEPTH 63 421#define MAX_DIR_HASH_DEPTH 63
425 422
426/* MAX buckets in one level of dir */ 423/* MAX buckets in one level of dir */
427#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1)) 424#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
428 425
426/*
427 * space utilization of regular dentry and inline dentry
428 * regular dentry inline dentry
429 * bitmap 1 * 27 = 27 1 * 23 = 23
430 * reserved 1 * 3 = 3 1 * 7 = 7
431 * dentry 11 * 214 = 2354 11 * 182 = 2002
432 * filename 8 * 214 = 1712 8 * 182 = 1456
433 * total 4096 3488
434 *
435 * Note: there are more reserved space in inline dentry than in regular
436 * dentry, when converting inline dentry we should handle this carefully.
437 */
438#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
429#define SIZE_OF_DIR_ENTRY 11 /* by byte */ 439#define SIZE_OF_DIR_ENTRY 11 /* by byte */
430#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \ 440#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
431 BITS_PER_BYTE) 441 BITS_PER_BYTE)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 043f3283b71c..bc9afa74ee11 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -788,7 +788,7 @@ struct dmt_videomode {
788 788
789extern const char *fb_mode_option; 789extern const char *fb_mode_option;
790extern const struct fb_videomode vesa_modes[]; 790extern const struct fb_videomode vesa_modes[];
791extern const struct fb_videomode cea_modes[64]; 791extern const struct fb_videomode cea_modes[65];
792extern const struct dmt_videomode dmt_modes[]; 792extern const struct dmt_videomode dmt_modes[];
793 793
794struct fb_modelist { 794struct fb_modelist {
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index fbb88740634a..674e3e226465 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -86,8 +86,8 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
86 86
87static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd) 87static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
88{ 88{
89 rcu_lockdep_assert(rcu_read_lock_held() || 89 RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
90 lockdep_is_held(&files->file_lock), 90 !lockdep_is_held(&files->file_lock),
91 "suspicious rcu_dereference_check() usage"); 91 "suspicious rcu_dereference_check() usage");
92 return __fcheck_files(files, fd); 92 return __fcheck_files(files, fd);
93} 93}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 17724f6ea983..fa2cab985e57 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -12,6 +12,7 @@
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <linux/printk.h> 13#include <linux/printk.h>
14#include <linux/workqueue.h> 14#include <linux/workqueue.h>
15#include <linux/sched.h>
15 16
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
17 18
@@ -354,6 +355,16 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
354 offsetof(struct bpf_prog, insns[proglen])); 355 offsetof(struct bpf_prog, insns[proglen]));
355} 356}
356 357
358static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
359{
360 /* When classic BPF programs have been loaded and the arch
361 * does not have a classic BPF JIT (anymore), they have been
362 * converted via bpf_migrate_filter() to eBPF and thus always
363 * have an unspec program type.
364 */
365 return prog->type == BPF_PROG_TYPE_UNSPEC;
366}
367
357#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) 368#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
358 369
359#ifdef CONFIG_DEBUG_SET_MODULE_RONX 370#ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
411 422
412u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 423u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
413void bpf_int_jit_compile(struct bpf_prog *fp); 424void bpf_int_jit_compile(struct bpf_prog *fp);
425bool bpf_helper_changes_skb_data(void *func);
414 426
415#ifdef CONFIG_BPF_JIT 427#ifdef CONFIG_BPF_JIT
416typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); 428typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@ void bpf_jit_free(struct bpf_prog *fp);
427static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, 439static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
428 u32 pass, void *image) 440 u32 pass, void *image)
429{ 441{
430 pr_err("flen=%u proglen=%u pass=%u image=%pK\n", 442 pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
431 flen, proglen, pass, image); 443 proglen, pass, image, current->comm, task_pid_nr(current));
444
432 if (image) 445 if (image)
433 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 446 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
434 16, 1, image, proglen, false); 447 16, 1, image, proglen, false);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 84b783f277f7..72d8a844c692 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1,7 +1,6 @@
1#ifndef _LINUX_FS_H 1#ifndef _LINUX_FS_H
2#define _LINUX_FS_H 2#define _LINUX_FS_H
3 3
4
5#include <linux/linkage.h> 4#include <linux/linkage.h>
6#include <linux/wait.h> 5#include <linux/wait.h>
7#include <linux/kdev_t.h> 6#include <linux/kdev_t.h>
@@ -30,6 +29,8 @@
30#include <linux/lockdep.h> 29#include <linux/lockdep.h>
31#include <linux/percpu-rwsem.h> 30#include <linux/percpu-rwsem.h>
32#include <linux/blk_types.h> 31#include <linux/blk_types.h>
32#include <linux/workqueue.h>
33#include <linux/percpu-rwsem.h>
33 34
34#include <asm/byteorder.h> 35#include <asm/byteorder.h>
35#include <uapi/linux/fs.h> 36#include <uapi/linux/fs.h>
@@ -51,7 +52,6 @@ struct swap_info_struct;
51struct seq_file; 52struct seq_file;
52struct workqueue_struct; 53struct workqueue_struct;
53struct iov_iter; 54struct iov_iter;
54struct vm_fault;
55 55
56extern void __init inode_init(void); 56extern void __init inode_init(void);
57extern void __init inode_init_early(void); 57extern void __init inode_init_early(void);
@@ -636,7 +636,7 @@ struct inode {
636 unsigned long dirtied_time_when; 636 unsigned long dirtied_time_when;
637 637
638 struct hlist_node i_hash; 638 struct hlist_node i_hash;
639 struct list_head i_wb_list; /* backing dev IO list */ 639 struct list_head i_io_list; /* backing dev IO list */
640#ifdef CONFIG_CGROUP_WRITEBACK 640#ifdef CONFIG_CGROUP_WRITEBACK
641 struct bdi_writeback *i_wb; /* the associated cgroup wb */ 641 struct bdi_writeback *i_wb; /* the associated cgroup wb */
642 642
@@ -943,12 +943,18 @@ struct lock_manager_operations {
943 943
944struct lock_manager { 944struct lock_manager {
945 struct list_head list; 945 struct list_head list;
946 /*
947 * NFSv4 and up also want opens blocked during the grace period;
948 * NLM doesn't care:
949 */
950 bool block_opens;
946}; 951};
947 952
948struct net; 953struct net;
949void locks_start_grace(struct net *, struct lock_manager *); 954void locks_start_grace(struct net *, struct lock_manager *);
950void locks_end_grace(struct lock_manager *); 955void locks_end_grace(struct lock_manager *);
951int locks_in_grace(struct net *); 956int locks_in_grace(struct net *);
957int opens_in_grace(struct net *);
952 958
953/* that will die - we need it for nfs_lock_info */ 959/* that will die - we need it for nfs_lock_info */
954#include <linux/nfs_fs_i.h> 960#include <linux/nfs_fs_i.h>
@@ -1260,6 +1266,7 @@ struct mm_struct;
1260 1266
1261/* sb->s_iflags */ 1267/* sb->s_iflags */
1262#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ 1268#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
1269#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
1263 1270
1264/* Possible states of 'frozen' field */ 1271/* Possible states of 'frozen' field */
1265enum { 1272enum {
@@ -1274,16 +1281,9 @@ enum {
1274#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) 1281#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
1275 1282
1276struct sb_writers { 1283struct sb_writers {
1277 /* Counters for counting writers at each level */ 1284 int frozen; /* Is sb frozen? */
1278 struct percpu_counter counter[SB_FREEZE_LEVELS]; 1285 wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
1279 wait_queue_head_t wait; /* queue for waiting for 1286 struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
1280 writers / faults to finish */
1281 int frozen; /* Is sb frozen? */
1282 wait_queue_head_t wait_unfrozen; /* queue for waiting for
1283 sb to be thawed */
1284#ifdef CONFIG_DEBUG_LOCK_ALLOC
1285 struct lockdep_map lock_map[SB_FREEZE_LEVELS];
1286#endif
1287}; 1287};
1288 1288
1289struct super_block { 1289struct super_block {
@@ -1309,7 +1309,6 @@ struct super_block {
1309#endif 1309#endif
1310 const struct xattr_handler **s_xattr; 1310 const struct xattr_handler **s_xattr;
1311 1311
1312 struct list_head s_inodes; /* all inodes */
1313 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ 1312 struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
1314 struct list_head s_mounts; /* list of mounts; _not_ for fs use */ 1313 struct list_head s_mounts; /* list of mounts; _not_ for fs use */
1315 struct block_device *s_bdev; 1314 struct block_device *s_bdev;
@@ -1375,11 +1374,18 @@ struct super_block {
1375 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; 1374 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
1376 struct list_lru s_inode_lru ____cacheline_aligned_in_smp; 1375 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
1377 struct rcu_head rcu; 1376 struct rcu_head rcu;
1377 struct work_struct destroy_work;
1378
1379 struct mutex s_sync_lock; /* sync serialisation lock */
1378 1380
1379 /* 1381 /*
1380 * Indicates how deep in a filesystem stack this SB is 1382 * Indicates how deep in a filesystem stack this SB is
1381 */ 1383 */
1382 int s_stack_depth; 1384 int s_stack_depth;
1385
1386 /* s_inode_list_lock protects s_inodes */
1387 spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
1388 struct list_head s_inodes; /* all inodes */
1383}; 1389};
1384 1390
1385extern struct timespec current_fs_time(struct super_block *sb); 1391extern struct timespec current_fs_time(struct super_block *sb);
@@ -1391,6 +1397,11 @@ extern struct timespec current_fs_time(struct super_block *sb);
1391void __sb_end_write(struct super_block *sb, int level); 1397void __sb_end_write(struct super_block *sb, int level);
1392int __sb_start_write(struct super_block *sb, int level, bool wait); 1398int __sb_start_write(struct super_block *sb, int level, bool wait);
1393 1399
1400#define __sb_writers_acquired(sb, lev) \
1401 percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
1402#define __sb_writers_release(sb, lev) \
1403 percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
1404
1394/** 1405/**
1395 * sb_end_write - drop write access to a superblock 1406 * sb_end_write - drop write access to a superblock
1396 * @sb: the super we wrote to 1407 * @sb: the super we wrote to
@@ -1611,7 +1622,6 @@ struct file_operations {
1611 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 1622 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
1612 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 1623 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
1613 int (*mmap) (struct file *, struct vm_area_struct *); 1624 int (*mmap) (struct file *, struct vm_area_struct *);
1614 int (*mremap)(struct file *, struct vm_area_struct *);
1615 int (*open) (struct inode *, struct file *); 1625 int (*open) (struct inode *, struct file *);
1616 int (*flush) (struct file *, fl_owner_t id); 1626 int (*flush) (struct file *, fl_owner_t id);
1617 int (*release) (struct inode *, struct file *); 1627 int (*release) (struct inode *, struct file *);
@@ -2608,7 +2618,7 @@ static inline void insert_inode_hash(struct inode *inode)
2608extern void __remove_inode_hash(struct inode *); 2618extern void __remove_inode_hash(struct inode *);
2609static inline void remove_inode_hash(struct inode *inode) 2619static inline void remove_inode_hash(struct inode *inode)
2610{ 2620{
2611 if (!inode_unhashed(inode)) 2621 if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
2612 __remove_inode_hash(inode); 2622 __remove_inode_hash(inode);
2613} 2623}
2614 2624
@@ -2667,19 +2677,6 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
2667extern int generic_file_open(struct inode * inode, struct file * filp); 2677extern int generic_file_open(struct inode * inode, struct file * filp);
2668extern int nonseekable_open(struct inode * inode, struct file * filp); 2678extern int nonseekable_open(struct inode * inode, struct file * filp);
2669 2679
2670ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
2671 get_block_t, dio_iodone_t, int flags);
2672int dax_clear_blocks(struct inode *, sector_t block, long size);
2673int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
2674int dax_truncate_page(struct inode *, loff_t from, get_block_t);
2675int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
2676 dax_iodone_t);
2677int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
2678 dax_iodone_t);
2679int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
2680#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
2681#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
2682
2683#ifdef CONFIG_BLOCK 2680#ifdef CONFIG_BLOCK
2684typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, 2681typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
2685 loff_t file_offset); 2682 loff_t file_offset);
@@ -3041,4 +3038,6 @@ static inline bool dir_relax(struct inode *inode)
3041 return !IS_DEADDIR(inode); 3038 return !IS_DEADDIR(inode);
3042} 3039}
3043 3040
3041extern bool path_noexec(const struct path *path);
3042
3044#endif /* _LINUX_FS_H */ 3043#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 2a2f56b292c1..f2912914141a 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -20,11 +20,6 @@
20#define FSL_UTMI_PHY_DLY 10 /*As per P1010RM, delay for UTMI 20#define FSL_UTMI_PHY_DLY 10 /*As per P1010RM, delay for UTMI
21 PHY CLK to become stable - 10ms*/ 21 PHY CLK to become stable - 10ms*/
22#define FSL_USB_PHY_CLK_TIMEOUT 10000 /* uSec */ 22#define FSL_USB_PHY_CLK_TIMEOUT 10000 /* uSec */
23#define FSL_USB_VER_OLD 0
24#define FSL_USB_VER_1_6 1
25#define FSL_USB_VER_2_2 2
26#define FSL_USB_VER_2_4 3
27#define FSL_USB_VER_2_5 4
28 23
29#include <linux/types.h> 24#include <linux/types.h>
30 25
@@ -52,6 +47,15 @@
52 * 47 *
53 */ 48 */
54 49
50enum fsl_usb2_controller_ver {
51 FSL_USB_VER_NONE = -1,
52 FSL_USB_VER_OLD = 0,
53 FSL_USB_VER_1_6 = 1,
54 FSL_USB_VER_2_2 = 2,
55 FSL_USB_VER_2_4 = 3,
56 FSL_USB_VER_2_5 = 4,
57};
58
55enum fsl_usb2_operating_modes { 59enum fsl_usb2_operating_modes {
56 FSL_USB2_MPH_HOST, 60 FSL_USB2_MPH_HOST,
57 FSL_USB2_DR_HOST, 61 FSL_USB2_DR_HOST,
@@ -65,6 +69,7 @@ enum fsl_usb2_phy_modes {
65 FSL_USB2_PHY_UTMI, 69 FSL_USB2_PHY_UTMI,
66 FSL_USB2_PHY_UTMI_WIDE, 70 FSL_USB2_PHY_UTMI_WIDE,
67 FSL_USB2_PHY_SERIAL, 71 FSL_USB2_PHY_SERIAL,
72 FSL_USB2_PHY_UTMI_DUAL,
68}; 73};
69 74
70struct clk; 75struct clk;
@@ -72,7 +77,7 @@ struct platform_device;
72 77
73struct fsl_usb2_platform_data { 78struct fsl_usb2_platform_data {
74 /* board specific information */ 79 /* board specific information */
75 int controller_ver; 80 enum fsl_usb2_controller_ver controller_ver;
76 enum fsl_usb2_operating_modes operating_mode; 81 enum fsl_usb2_operating_modes operating_mode;
77 enum fsl_usb2_phy_modes phy_mode; 82 enum fsl_usb2_phy_modes phy_mode;
78 unsigned int port_enables; 83 unsigned int port_enables;
@@ -93,6 +98,9 @@ struct fsl_usb2_platform_data {
93 98
94 unsigned suspended:1; 99 unsigned suspended:1;
95 unsigned already_suspended:1; 100 unsigned already_suspended:1;
101 unsigned has_fsl_erratum_a007792:1;
102 unsigned has_fsl_erratum_a005275:1;
103 unsigned check_phy_clk_valid:1;
96 104
97 /* register save area for suspend/resume */ 105 /* register save area for suspend/resume */
98 u32 pm_command; 106 u32 pm_command;
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index bf0321eabbda..0023088b253b 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -841,9 +841,59 @@ struct fsl_ifc_ctrl {
841 841
842 u32 nand_stat; 842 u32 nand_stat;
843 wait_queue_head_t nand_wait; 843 wait_queue_head_t nand_wait;
844 bool little_endian;
844}; 845};
845 846
846extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; 847extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
847 848
849static inline u32 ifc_in32(void __iomem *addr)
850{
851 u32 val;
852
853 if (fsl_ifc_ctrl_dev->little_endian)
854 val = ioread32(addr);
855 else
856 val = ioread32be(addr);
857
858 return val;
859}
860
861static inline u16 ifc_in16(void __iomem *addr)
862{
863 u16 val;
864
865 if (fsl_ifc_ctrl_dev->little_endian)
866 val = ioread16(addr);
867 else
868 val = ioread16be(addr);
869
870 return val;
871}
872
873static inline u8 ifc_in8(void __iomem *addr)
874{
875 return ioread8(addr);
876}
877
878static inline void ifc_out32(u32 val, void __iomem *addr)
879{
880 if (fsl_ifc_ctrl_dev->little_endian)
881 iowrite32(val, addr);
882 else
883 iowrite32be(val, addr);
884}
885
886static inline void ifc_out16(u16 val, void __iomem *addr)
887{
888 if (fsl_ifc_ctrl_dev->little_endian)
889 iowrite16(val, addr);
890 else
891 iowrite16be(val, addr);
892}
893
894static inline void ifc_out8(u8 val, void __iomem *addr)
895{
896 iowrite8(val, addr);
897}
848 898
849#endif /* __ASM_FSL_IFC_H */ 899#endif /* __ASM_FSL_IFC_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 65a517dd32f7..533c4408529a 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -195,40 +195,49 @@ struct fsnotify_group {
195#define FSNOTIFY_EVENT_INODE 2 195#define FSNOTIFY_EVENT_INODE 2
196 196
197/* 197/*
198 * a mark is simply an object attached to an in core inode which allows an 198 * A mark is simply an object attached to an in core inode which allows an
199 * fsnotify listener to indicate they are either no longer interested in events 199 * fsnotify listener to indicate they are either no longer interested in events
200 * of a type matching mask or only interested in those events. 200 * of a type matching mask or only interested in those events.
201 * 201 *
202 * these are flushed when an inode is evicted from core and may be flushed 202 * These are flushed when an inode is evicted from core and may be flushed
203 * when the inode is modified (as seen by fsnotify_access). Some fsnotify users 203 * when the inode is modified (as seen by fsnotify_access). Some fsnotify
204 * (such as dnotify) will flush these when the open fd is closed and not at 204 * users (such as dnotify) will flush these when the open fd is closed and not
205 * inode eviction or modification. 205 * at inode eviction or modification.
206 *
207 * Text in brackets is showing the lock(s) protecting modifications of a
208 * particular entry. obj_lock means either inode->i_lock or
209 * mnt->mnt_root->d_lock depending on the mark type.
206 */ 210 */
207struct fsnotify_mark { 211struct fsnotify_mark {
208 __u32 mask; /* mask this mark is for */ 212 /* Mask this mark is for [mark->lock, group->mark_mutex] */
209 /* we hold ref for each i_list and g_list. also one ref for each 'thing' 213 __u32 mask;
214 /* We hold one for presence in g_list. Also one ref for each 'thing'
210 * in kernel that found and may be using this mark. */ 215 * in kernel that found and may be using this mark. */
211 atomic_t refcnt; /* active things looking at this mark */ 216 atomic_t refcnt;
212 struct fsnotify_group *group; /* group this mark is for */ 217 /* Group this mark is for. Set on mark creation, stable until last ref
213 struct list_head g_list; /* list of marks by group->i_fsnotify_marks 218 * is dropped */
214 * Also reused for queueing mark into 219 struct fsnotify_group *group;
215 * destroy_list when it's waiting for 220 /* List of marks by group->i_fsnotify_marks. Also reused for queueing
216 * the end of SRCU period before it can 221 * mark into destroy_list when it's waiting for the end of SRCU period
217 * be freed */ 222 * before it can be freed. [group->mark_mutex] */
218 spinlock_t lock; /* protect group and inode */ 223 struct list_head g_list;
219 struct hlist_node obj_list; /* list of marks for inode / vfsmount */ 224 /* Protects inode / mnt pointers, flags, masks */
220 struct list_head free_list; /* tmp list used when freeing this mark */ 225 spinlock_t lock;
221 union { 226 /* List of marks for inode / vfsmount [obj_lock] */
227 struct hlist_node obj_list;
228 union { /* Object pointer [mark->lock, group->mark_mutex] */
222 struct inode *inode; /* inode this mark is associated with */ 229 struct inode *inode; /* inode this mark is associated with */
223 struct vfsmount *mnt; /* vfsmount this mark is associated with */ 230 struct vfsmount *mnt; /* vfsmount this mark is associated with */
224 }; 231 };
225 __u32 ignored_mask; /* events types to ignore */ 232 /* Events types to ignore [mark->lock, group->mark_mutex] */
233 __u32 ignored_mask;
226#define FSNOTIFY_MARK_FLAG_INODE 0x01 234#define FSNOTIFY_MARK_FLAG_INODE 0x01
227#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02 235#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
228#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04 236#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
229#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 237#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
230#define FSNOTIFY_MARK_FLAG_ALIVE 0x10 238#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
231 unsigned int flags; /* vfsmount or inode mark? */ 239#define FSNOTIFY_MARK_FLAG_ATTACHED 0x20
240 unsigned int flags; /* flags [mark->lock] */
232 void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ 241 void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
233}; 242};
234 243
@@ -345,8 +354,10 @@ extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_
345/* given a group and a mark, flag mark to be freed when all references are dropped */ 354/* given a group and a mark, flag mark to be freed when all references are dropped */
346extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, 355extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
347 struct fsnotify_group *group); 356 struct fsnotify_group *group);
348extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, 357/* detach mark from inode / mount list, group list, drop inode reference */
349 struct fsnotify_group *group); 358extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
359/* free mark */
360extern void fsnotify_free_mark(struct fsnotify_mark *mark);
350/* run all the marks in a group, and clear all of the vfsmount marks */ 361/* run all the marks in a group, and clear all of the vfsmount marks */
351extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group); 362extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
352/* run all the marks in a group, and clear all of the inode marks */ 363/* run all the marks in a group, and clear all of the inode marks */
@@ -357,7 +368,7 @@ extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, un
357extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); 368extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
358extern void fsnotify_get_mark(struct fsnotify_mark *mark); 369extern void fsnotify_get_mark(struct fsnotify_mark *mark);
359extern void fsnotify_put_mark(struct fsnotify_mark *mark); 370extern void fsnotify_put_mark(struct fsnotify_mark *mark);
360extern void fsnotify_unmount_inodes(struct list_head *list); 371extern void fsnotify_unmount_inodes(struct super_block *sb);
361 372
362/* put here because inotify does some weird stuff when destroying watches */ 373/* put here because inotify does some weird stuff when destroying watches */
363extern void fsnotify_init_event(struct fsnotify_event *event, 374extern void fsnotify_init_event(struct fsnotify_event *event,
@@ -393,7 +404,7 @@ static inline u32 fsnotify_get_cookie(void)
393 return 0; 404 return 0;
394} 405}
395 406
396static inline void fsnotify_unmount_inodes(struct list_head *list) 407static inline void fsnotify_unmount_inodes(struct super_block *sb)
397{} 408{}
398 409
399#endif /* CONFIG_FSNOTIFY */ 410#endif /* CONFIG_FSNOTIFY */
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 5383bb1394a1..7ff168d06967 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -59,6 +59,8 @@ struct gen_pool {
59 59
60 genpool_algo_t algo; /* allocation function */ 60 genpool_algo_t algo; /* allocation function */
61 void *data; 61 void *data;
62
63 const char *name;
62}; 64};
63 65
64/* 66/*
@@ -118,8 +120,8 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
118 unsigned long start, unsigned int nr, void *data); 120 unsigned long start, unsigned int nr, void *data);
119 121
120extern struct gen_pool *devm_gen_pool_create(struct device *dev, 122extern struct gen_pool *devm_gen_pool_create(struct device *dev,
121 int min_alloc_order, int nid); 123 int min_alloc_order, int nid, const char *name);
122extern struct gen_pool *gen_pool_get(struct device *dev); 124extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
123 125
124bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, 126bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
125 size_t size); 127 size_t size);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index ec274e0f4ed2..2adbfa6d02bc 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -13,6 +13,7 @@
13#include <linux/kdev_t.h> 13#include <linux/kdev_t.h>
14#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/percpu-refcount.h>
16 17
17#ifdef CONFIG_BLOCK 18#ifdef CONFIG_BLOCK
18 19
@@ -124,7 +125,7 @@ struct hd_struct {
124#else 125#else
125 struct disk_stats dkstats; 126 struct disk_stats dkstats;
126#endif 127#endif
127 atomic_t ref; 128 struct percpu_ref ref;
128 struct rcu_head rcu_head; 129 struct rcu_head rcu_head;
129}; 130};
130 131
@@ -611,7 +612,7 @@ extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
611 sector_t len, int flags, 612 sector_t len, int flags,
612 struct partition_meta_info 613 struct partition_meta_info
613 *info); 614 *info);
614extern void __delete_partition(struct hd_struct *); 615extern void __delete_partition(struct percpu_ref *);
615extern void delete_partition(struct gendisk *, int); 616extern void delete_partition(struct gendisk *, int);
616extern void printk_all_partitions(void); 617extern void printk_all_partitions(void);
617 618
@@ -640,27 +641,39 @@ extern ssize_t part_fail_store(struct device *dev,
640 const char *buf, size_t count); 641 const char *buf, size_t count);
641#endif /* CONFIG_FAIL_MAKE_REQUEST */ 642#endif /* CONFIG_FAIL_MAKE_REQUEST */
642 643
643static inline void hd_ref_init(struct hd_struct *part) 644static inline int hd_ref_init(struct hd_struct *part)
644{ 645{
645 atomic_set(&part->ref, 1); 646 if (percpu_ref_init(&part->ref, __delete_partition, 0,
646 smp_mb(); 647 GFP_KERNEL))
648 return -ENOMEM;
649 return 0;
647} 650}
648 651
649static inline void hd_struct_get(struct hd_struct *part) 652static inline void hd_struct_get(struct hd_struct *part)
650{ 653{
651 atomic_inc(&part->ref); 654 percpu_ref_get(&part->ref);
652 smp_mb__after_atomic();
653} 655}
654 656
655static inline int hd_struct_try_get(struct hd_struct *part) 657static inline int hd_struct_try_get(struct hd_struct *part)
656{ 658{
657 return atomic_inc_not_zero(&part->ref); 659 return percpu_ref_tryget_live(&part->ref);
658} 660}
659 661
660static inline void hd_struct_put(struct hd_struct *part) 662static inline void hd_struct_put(struct hd_struct *part)
661{ 663{
662 if (atomic_dec_and_test(&part->ref)) 664 percpu_ref_put(&part->ref);
663 __delete_partition(part); 665}
666
667static inline void hd_struct_kill(struct hd_struct *part)
668{
669 percpu_ref_kill(&part->ref);
670}
671
672static inline void hd_free_part(struct hd_struct *part)
673{
674 free_part_stats(part);
675 free_part_info(part);
676 percpu_ref_exit(&part->ref);
664} 677}
665 678
666/* 679/*
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index ad35f300b9a4..f92cbd2f4450 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -63,7 +63,10 @@ struct vm_area_struct;
63 * but it is definitely preferable to use the flag rather than opencode endless 63 * but it is definitely preferable to use the flag rather than opencode endless
64 * loop around allocator. 64 * loop around allocator.
65 * 65 *
66 * __GFP_NORETRY: The VM implementation must not retry indefinitely. 66 * __GFP_NORETRY: The VM implementation must not retry indefinitely and will
67 * return NULL when direct reclaim and memory compaction have failed to allow
68 * the allocation to succeed. The OOM killer is not called with the current
69 * implementation.
67 * 70 *
68 * __GFP_MOVABLE: Flag that this page will be movable by the page migration 71 * __GFP_MOVABLE: Flag that this page will be movable by the page migration
69 * mechanism or reclaimed 72 * mechanism or reclaimed
@@ -300,22 +303,31 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
300 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); 303 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
301} 304}
302 305
303static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 306/*
304 unsigned int order) 307 * Allocate pages, preferring the node given as nid. The node must be valid and
308 * online. For more general interface, see alloc_pages_node().
309 */
310static inline struct page *
311__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
305{ 312{
306 /* Unknown node is current node */ 313 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
307 if (nid < 0) 314 VM_WARN_ON(!node_online(nid));
308 nid = numa_node_id();
309 315
310 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 316 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
311} 317}
312 318
313static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, 319/*
320 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
321 * prefer the current CPU's closest node. Otherwise node must be valid and
322 * online.
323 */
324static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
314 unsigned int order) 325 unsigned int order)
315{ 326{
316 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid)); 327 if (nid == NUMA_NO_NODE)
328 nid = numa_mem_id();
317 329
318 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 330 return __alloc_pages_node(nid, gfp_mask, order);
319} 331}
320 332
321#ifdef CONFIG_NUMA 333#ifdef CONFIG_NUMA
@@ -354,7 +366,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
354 366
355void *alloc_pages_exact(size_t size, gfp_t gfp_mask); 367void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
356void free_pages_exact(void *virt, size_t size); 368void free_pages_exact(void *virt, size_t size);
357/* This is different from alloc_pages_exact_node !!! */
358void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); 369void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
359 370
360#define __get_free_page(gfp_mask) \ 371#define __get_free_page(gfp_mask) \
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index adac255aee86..14cac67c2012 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -47,17 +47,17 @@ enum gpiod_flags {
47int gpiod_count(struct device *dev, const char *con_id); 47int gpiod_count(struct device *dev, const char *con_id);
48 48
49/* Acquire and dispose GPIOs */ 49/* Acquire and dispose GPIOs */
50struct gpio_desc *__must_check __gpiod_get(struct device *dev, 50struct gpio_desc *__must_check gpiod_get(struct device *dev,
51 const char *con_id, 51 const char *con_id,
52 enum gpiod_flags flags); 52 enum gpiod_flags flags);
53struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, 53struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
54 const char *con_id, 54 const char *con_id,
55 unsigned int idx, 55 unsigned int idx,
56 enum gpiod_flags flags); 56 enum gpiod_flags flags);
57struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev, 57struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
58 const char *con_id, 58 const char *con_id,
59 enum gpiod_flags flags); 59 enum gpiod_flags flags);
60struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev, 60struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
61 const char *con_id, 61 const char *con_id,
62 unsigned int index, 62 unsigned int index,
63 enum gpiod_flags flags); 63 enum gpiod_flags flags);
@@ -70,18 +70,18 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
70void gpiod_put(struct gpio_desc *desc); 70void gpiod_put(struct gpio_desc *desc);
71void gpiod_put_array(struct gpio_descs *descs); 71void gpiod_put_array(struct gpio_descs *descs);
72 72
73struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev, 73struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
74 const char *con_id, 74 const char *con_id,
75 enum gpiod_flags flags); 75 enum gpiod_flags flags);
76struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev, 76struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
77 const char *con_id, 77 const char *con_id,
78 unsigned int idx, 78 unsigned int idx,
79 enum gpiod_flags flags); 79 enum gpiod_flags flags);
80struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev, 80struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
81 const char *con_id, 81 const char *con_id,
82 enum gpiod_flags flags); 82 enum gpiod_flags flags);
83struct gpio_desc *__must_check 83struct gpio_desc *__must_check
84__devm_gpiod_get_index_optional(struct device *dev, const char *con_id, 84devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
85 unsigned int index, enum gpiod_flags flags); 85 unsigned int index, enum gpiod_flags flags);
86struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev, 86struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
87 const char *con_id, 87 const char *con_id,
@@ -146,31 +146,31 @@ static inline int gpiod_count(struct device *dev, const char *con_id)
146 return 0; 146 return 0;
147} 147}
148 148
149static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, 149static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
150 const char *con_id, 150 const char *con_id,
151 enum gpiod_flags flags) 151 enum gpiod_flags flags)
152{ 152{
153 return ERR_PTR(-ENOSYS); 153 return ERR_PTR(-ENOSYS);
154} 154}
155static inline struct gpio_desc *__must_check 155static inline struct gpio_desc *__must_check
156__gpiod_get_index(struct device *dev, 156gpiod_get_index(struct device *dev,
157 const char *con_id, 157 const char *con_id,
158 unsigned int idx, 158 unsigned int idx,
159 enum gpiod_flags flags) 159 enum gpiod_flags flags)
160{ 160{
161 return ERR_PTR(-ENOSYS); 161 return ERR_PTR(-ENOSYS);
162} 162}
163 163
164static inline struct gpio_desc *__must_check 164static inline struct gpio_desc *__must_check
165__gpiod_get_optional(struct device *dev, const char *con_id, 165gpiod_get_optional(struct device *dev, const char *con_id,
166 enum gpiod_flags flags) 166 enum gpiod_flags flags)
167{ 167{
168 return ERR_PTR(-ENOSYS); 168 return ERR_PTR(-ENOSYS);
169} 169}
170 170
171static inline struct gpio_desc *__must_check 171static inline struct gpio_desc *__must_check
172__gpiod_get_index_optional(struct device *dev, const char *con_id, 172gpiod_get_index_optional(struct device *dev, const char *con_id,
173 unsigned int index, enum gpiod_flags flags) 173 unsigned int index, enum gpiod_flags flags)
174{ 174{
175 return ERR_PTR(-ENOSYS); 175 return ERR_PTR(-ENOSYS);
176} 176}
@@ -206,7 +206,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
206} 206}
207 207
208static inline struct gpio_desc *__must_check 208static inline struct gpio_desc *__must_check
209__devm_gpiod_get(struct device *dev, 209devm_gpiod_get(struct device *dev,
210 const char *con_id, 210 const char *con_id,
211 enum gpiod_flags flags) 211 enum gpiod_flags flags)
212{ 212{
@@ -214,7 +214,7 @@ __devm_gpiod_get(struct device *dev,
214} 214}
215static inline 215static inline
216struct gpio_desc *__must_check 216struct gpio_desc *__must_check
217__devm_gpiod_get_index(struct device *dev, 217devm_gpiod_get_index(struct device *dev,
218 const char *con_id, 218 const char *con_id,
219 unsigned int idx, 219 unsigned int idx,
220 enum gpiod_flags flags) 220 enum gpiod_flags flags)
@@ -223,14 +223,14 @@ __devm_gpiod_get_index(struct device *dev,
223} 223}
224 224
225static inline struct gpio_desc *__must_check 225static inline struct gpio_desc *__must_check
226__devm_gpiod_get_optional(struct device *dev, const char *con_id, 226devm_gpiod_get_optional(struct device *dev, const char *con_id,
227 enum gpiod_flags flags) 227 enum gpiod_flags flags)
228{ 228{
229 return ERR_PTR(-ENOSYS); 229 return ERR_PTR(-ENOSYS);
230} 230}
231 231
232static inline struct gpio_desc *__must_check 232static inline struct gpio_desc *__must_check
233__devm_gpiod_get_index_optional(struct device *dev, const char *con_id, 233devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
234 unsigned int index, enum gpiod_flags flags) 234 unsigned int index, enum gpiod_flags flags)
235{ 235{
236 return ERR_PTR(-ENOSYS); 236 return ERR_PTR(-ENOSYS);
@@ -424,42 +424,6 @@ static inline struct gpio_desc *devm_get_gpiod_from_child(
424 424
425#endif /* CONFIG_GPIOLIB */ 425#endif /* CONFIG_GPIOLIB */
426 426
427/*
428 * Vararg-hacks! This is done to transition the kernel to always pass
429 * the options flags argument to the below functions. During a transition
430 * phase these vararg macros make both old-and-newstyle code compile,
431 * but when all calls to the elder API are removed, these should go away
432 * and the __gpiod_get() etc functions above be renamed just gpiod_get()
433 * etc.
434 */
435#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
436#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
437#define __gpiod_get_index(dev, con_id, index, flags, ...) \
438 __gpiod_get_index(dev, con_id, index, flags)
439#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
440#define __gpiod_get_optional(dev, con_id, flags, ...) \
441 __gpiod_get_optional(dev, con_id, flags)
442#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
443#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
444 __gpiod_get_index_optional(dev, con_id, index, flags)
445#define gpiod_get_index_optional(varargs...) \
446 __gpiod_get_index_optional(varargs, GPIOD_ASIS)
447#define __devm_gpiod_get(dev, con_id, flags, ...) \
448 __devm_gpiod_get(dev, con_id, flags)
449#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
450#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
451 __devm_gpiod_get_index(dev, con_id, index, flags)
452#define devm_gpiod_get_index(varargs...) \
453 __devm_gpiod_get_index(varargs, GPIOD_ASIS)
454#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
455 __devm_gpiod_get_optional(dev, con_id, flags)
456#define devm_gpiod_get_optional(varargs...) \
457 __devm_gpiod_get_optional(varargs, GPIOD_ASIS)
458#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
459 __devm_gpiod_get_index_optional(dev, con_id, index, flags)
460#define devm_gpiod_get_index_optional(varargs...) \
461 __devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
462
463#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) 427#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
464 428
465int gpiod_export(struct gpio_desc *desc, bool direction_may_change); 429int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c8393cd4d44f..1aed31c5ffba 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
6#include <linux/irq.h> 6#include <linux/irq.h>
7#include <linux/irqchip/chained_irq.h> 7#include <linux/irqchip/chained_irq.h>
8#include <linux/irqdomain.h> 8#include <linux/irqdomain.h>
9#include <linux/lockdep.h>
9#include <linux/pinctrl/pinctrl.h> 10#include <linux/pinctrl/pinctrl.h>
10 11
11struct device; 12struct device;
@@ -64,6 +65,17 @@ struct seq_file;
64 * registers. 65 * registers.
65 * @irq_not_threaded: flag must be set if @can_sleep is set but the 66 * @irq_not_threaded: flag must be set if @can_sleep is set but the
66 * IRQs don't need to be threaded 67 * IRQs don't need to be threaded
68 * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
69 * @irqdomain: Interrupt translation domain; responsible for mapping
70 * between GPIO hwirq number and linux irq number
71 * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
72 * @irq_handler: the irq handler to use (often a predefined irq core function)
73 * for GPIO IRQs, provided by GPIO driver
74 * @irq_default_type: default IRQ triggering type applied during GPIO driver
75 * initialization, provided by GPIO driver
76 * @irq_parent: GPIO IRQ chip parent/bank linux irq number,
77 * provided by GPIO driver
78 * @lock_key: per GPIO IRQ chip lockdep class
67 * 79 *
68 * A gpio_chip can help platforms abstract various sources of GPIOs so 80 * A gpio_chip can help platforms abstract various sources of GPIOs so
69 * they can all be accessed through a common programing interface. 81 * they can all be accessed through a common programing interface.
@@ -126,6 +138,7 @@ struct gpio_chip {
126 irq_flow_handler_t irq_handler; 138 irq_flow_handler_t irq_handler;
127 unsigned int irq_default_type; 139 unsigned int irq_default_type;
128 int irq_parent; 140 int irq_parent;
141 struct lock_class_key *lock_key;
129#endif 142#endif
130 143
131#if defined(CONFIG_OF_GPIO) 144#if defined(CONFIG_OF_GPIO)
@@ -171,11 +184,25 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
171 int parent_irq, 184 int parent_irq,
172 irq_flow_handler_t parent_handler); 185 irq_flow_handler_t parent_handler);
173 186
174int gpiochip_irqchip_add(struct gpio_chip *gpiochip, 187int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
175 struct irq_chip *irqchip, 188 struct irq_chip *irqchip,
176 unsigned int first_irq, 189 unsigned int first_irq,
177 irq_flow_handler_t handler, 190 irq_flow_handler_t handler,
178 unsigned int type); 191 unsigned int type,
192 struct lock_class_key *lock_key);
193
194#ifdef CONFIG_LOCKDEP
195#define gpiochip_irqchip_add(...) \
196( \
197 ({ \
198 static struct lock_class_key _key; \
199 _gpiochip_irqchip_add(__VA_ARGS__, &_key); \
200 }) \
201)
202#else
203#define gpiochip_irqchip_add(...) \
204 _gpiochip_irqchip_add(__VA_ARGS__, NULL)
205#endif
179 206
180#endif /* CONFIG_GPIOLIB_IRQCHIP */ 207#endif /* CONFIG_GPIOLIB_IRQCHIP */
181 208
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index e2706140eaff..c0d712d22b07 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -57,5 +57,6 @@ struct gpiod_lookup_table {
57} 57}
58 58
59void gpiod_add_lookup_table(struct gpiod_lookup_table *table); 59void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
60void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
60 61
61#endif /* __LINUX_GPIO_MACHINE_H */ 62#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f10b20f05159..ecb080d6ff42 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -33,6 +33,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
33extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 33extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr, pgprot_t newprot, 34 unsigned long addr, pgprot_t newprot,
35 int prot_numa); 35 int prot_numa);
36int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
37 unsigned long pfn, bool write);
36 38
37enum transparent_hugepage_flag { 39enum transparent_hugepage_flag {
38 TRANSPARENT_HUGEPAGE_FLAG, 40 TRANSPARENT_HUGEPAGE_FLAG,
@@ -122,7 +124,7 @@ extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
122#endif 124#endif
123extern int hugepage_madvise(struct vm_area_struct *vma, 125extern int hugepage_madvise(struct vm_area_struct *vma,
124 unsigned long *vm_flags, int advice); 126 unsigned long *vm_flags, int advice);
125extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, 127extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
126 unsigned long start, 128 unsigned long start,
127 unsigned long end, 129 unsigned long end,
128 long adjust_next); 130 long adjust_next);
@@ -138,15 +140,6 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
138 else 140 else
139 return 0; 141 return 0;
140} 142}
141static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
142 unsigned long start,
143 unsigned long end,
144 long adjust_next)
145{
146 if (!vma->anon_vma || vma->vm_ops)
147 return;
148 __vma_adjust_trans_huge(vma, start, end, adjust_next);
149}
150static inline int hpage_nr_pages(struct page *page) 143static inline int hpage_nr_pages(struct page *page)
151{ 144{
152 if (unlikely(PageTransHuge(page))) 145 if (unlikely(PageTransHuge(page)))
@@ -164,6 +157,13 @@ static inline bool is_huge_zero_page(struct page *page)
164 return ACCESS_ONCE(huge_zero_page) == page; 157 return ACCESS_ONCE(huge_zero_page) == page;
165} 158}
166 159
160static inline bool is_huge_zero_pmd(pmd_t pmd)
161{
162 return is_huge_zero_page(pmd_page(pmd));
163}
164
165struct page *get_huge_zero_page(void);
166
167#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 167#else /* CONFIG_TRANSPARENT_HUGEPAGE */
168#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 168#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
169#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 169#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d891f949466a..5e35379f58a5 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -35,6 +35,9 @@ struct resv_map {
35 struct kref refs; 35 struct kref refs;
36 spinlock_t lock; 36 spinlock_t lock;
37 struct list_head regions; 37 struct list_head regions;
38 long adds_in_progress;
39 struct list_head region_cache;
40 long region_cache_count;
38}; 41};
39extern struct resv_map *resv_map_alloc(void); 42extern struct resv_map *resv_map_alloc(void);
40void resv_map_release(struct kref *ref); 43void resv_map_release(struct kref *ref);
@@ -80,11 +83,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
80int hugetlb_reserve_pages(struct inode *inode, long from, long to, 83int hugetlb_reserve_pages(struct inode *inode, long from, long to,
81 struct vm_area_struct *vma, 84 struct vm_area_struct *vma,
82 vm_flags_t vm_flags); 85 vm_flags_t vm_flags);
83void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); 86long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
87 long freed);
84int dequeue_hwpoisoned_huge_page(struct page *page); 88int dequeue_hwpoisoned_huge_page(struct page *page);
85bool isolate_huge_page(struct page *page, struct list_head *list); 89bool isolate_huge_page(struct page *page, struct list_head *list);
86void putback_active_hugepage(struct page *page); 90void putback_active_hugepage(struct page *page);
87void free_huge_page(struct page *page); 91void free_huge_page(struct page *page);
92void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
93extern struct mutex *hugetlb_fault_mutex_table;
94u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
95 struct vm_area_struct *vma,
96 struct address_space *mapping,
97 pgoff_t idx, unsigned long address);
88 98
89#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 99#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
90pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 100pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -320,9 +330,13 @@ struct huge_bootmem_page {
320#endif 330#endif
321}; 331};
322 332
333struct page *alloc_huge_page(struct vm_area_struct *vma,
334 unsigned long addr, int avoid_reserve);
323struct page *alloc_huge_page_node(struct hstate *h, int nid); 335struct page *alloc_huge_page_node(struct hstate *h, int nid);
324struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, 336struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
325 unsigned long addr, int avoid_reserve); 337 unsigned long addr, int avoid_reserve);
338int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
339 pgoff_t idx);
326 340
327/* arch callback */ 341/* arch callback */
328int __init alloc_bootmem_huge_page(struct hstate *h); 342int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -471,6 +485,7 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
471 485
472#else /* CONFIG_HUGETLB_PAGE */ 486#else /* CONFIG_HUGETLB_PAGE */
473struct hstate {}; 487struct hstate {};
488#define alloc_huge_page(v, a, r) NULL
474#define alloc_huge_page_node(h, nid) NULL 489#define alloc_huge_page_node(h, nid) NULL
475#define alloc_huge_page_noerr(v, a, r) NULL 490#define alloc_huge_page_noerr(v, a, r) NULL
476#define alloc_bootmem_huge_page(h) NULL 491#define alloc_bootmem_huge_page(h) NULL
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 30d3a1f79450..54733d5b503e 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -977,6 +977,11 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
977 const char *mod_name); 977 const char *mod_name);
978void vmbus_driver_unregister(struct hv_driver *hv_driver); 978void vmbus_driver_unregister(struct hv_driver *hv_driver);
979 979
980int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
981 resource_size_t min, resource_size_t max,
982 resource_size_t size, resource_size_t align,
983 bool fb_overlap_ok);
984
980/** 985/**
981 * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device 986 * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
982 * 987 *
@@ -1233,8 +1238,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1233 1238
1234void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1239void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1235 1240
1236extern struct resource hyperv_mmio;
1237
1238/* 1241/*
1239 * Negotiated version with the Host. 1242 * Negotiated version with the Host.
1240 */ 1243 */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e83a738a3b87..768063baafbf 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -121,6 +121,9 @@ extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
121extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, 121extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
122 u8 command, u8 length, 122 u8 command, u8 length,
123 const u8 *values); 123 const u8 *values);
124extern s32
125i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
126 u8 command, u8 length, u8 *values);
124#endif /* I2C */ 127#endif /* I2C */
125 128
126/** 129/**
@@ -550,11 +553,12 @@ void i2c_lock_adapter(struct i2c_adapter *);
550void i2c_unlock_adapter(struct i2c_adapter *); 553void i2c_unlock_adapter(struct i2c_adapter *);
551 554
552/*flags for the client struct: */ 555/*flags for the client struct: */
553#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ 556#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
554#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ 557#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
555 /* Must equal I2C_M_TEN below */ 558 /* Must equal I2C_M_TEN below */
556#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ 559#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
557#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ 560#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
561#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
558 /* Must match I2C_M_STOP|IGNORE_NAK */ 562 /* Must match I2C_M_STOP|IGNORE_NAK */
559 563
560/* i2c adapter classes (bitmask) */ 564/* i2c adapter classes (bitmask) */
@@ -638,6 +642,8 @@ extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
638/* must call put_device() when done with returned i2c_adapter device */ 642/* must call put_device() when done with returned i2c_adapter device */
639extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node); 643extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
640 644
645/* must call i2c_put_adapter() when done with returned i2c_adapter device */
646struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
641#else 647#else
642 648
643static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) 649static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -649,6 +655,11 @@ static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node
649{ 655{
650 return NULL; 656 return NULL;
651} 657}
658
659static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
660{
661 return NULL;
662}
652#endif /* CONFIG_OF */ 663#endif /* CONFIG_OF */
653 664
654#endif /* _LINUX_I2C_H */ 665#endif /* _LINUX_I2C_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b9c7897dc566..cfa906f28b7a 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2074,8 +2074,8 @@ enum ieee80211_tdls_actioncode {
2074#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) 2074#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
2075#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) 2075#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
2076 2076
2077#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5)
2077#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) 2078#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
2078#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
2079 2079
2080/* TDLS specific payload type in the LLC/SNAP header */ 2080/* TDLS specific payload type in the LLC/SNAP header */
2081#define WLAN_TDLS_SNAP_RFTYPE 0x2 2081#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 193ad488d3e2..908429216d9f 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -37,6 +37,7 @@ static inline struct igmpv3_query *
37 return (struct igmpv3_query *)skb_transport_header(skb); 37 return (struct igmpv3_query *)skb_transport_header(skb);
38} 38}
39 39
40extern int sysctl_igmp_llm_reports;
40extern int sysctl_igmp_max_memberships; 41extern int sysctl_igmp_max_memberships;
41extern int sysctl_igmp_max_msf; 42extern int sysctl_igmp_max_msf;
42extern int sysctl_igmp_qrv; 43extern int sysctl_igmp_qrv;
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 2c476acb87d9..3c17cd7fdf06 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -166,6 +166,7 @@ struct st_sensor_transfer_function {
166/** 166/**
167 * struct st_sensor_settings - ST specific sensor settings 167 * struct st_sensor_settings - ST specific sensor settings
168 * @wai: Contents of WhoAmI register. 168 * @wai: Contents of WhoAmI register.
169 * @wai_addr: The address of WhoAmI register.
169 * @sensors_supported: List of supported sensors by struct itself. 170 * @sensors_supported: List of supported sensors by struct itself.
170 * @ch: IIO channels for the sensor. 171 * @ch: IIO channels for the sensor.
171 * @odr: Output data rate register and ODR list available. 172 * @odr: Output data rate register and ODR list available.
@@ -179,6 +180,7 @@ struct st_sensor_transfer_function {
179 */ 180 */
180struct st_sensor_settings { 181struct st_sensor_settings {
181 u8 wai; 182 u8 wai;
183 u8 wai_addr;
182 char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; 184 char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
183 struct iio_chan_spec *ch; 185 struct iio_chan_spec *ch;
184 int num_ch; 186 int num_ch;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 26fb8f6342bb..fad58671c49e 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -100,7 +100,7 @@ void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff);
100 100
101/** 101/**
102 * iio_channel_cb_get_channels() - get access to the underlying channels. 102 * iio_channel_cb_get_channels() - get access to the underlying channels.
103 * @cb_buff: The callback buffer from whom we want the channel 103 * @cb_buffer: The callback buffer from whom we want the channel
104 * information. 104 * information.
105 * 105 *
106 * This function allows one to obtain information about the channels. 106 * This function allows one to obtain information about the channels.
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index f79148261d16..7bb7f673cb3f 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -645,6 +645,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
645#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL) 645#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
646 646
647/** 647/**
648 * IIO_RAD_TO_DEGREE() - Convert rad to degree
649 * @rad: A value in rad
650 *
651 * Returns the given value converted from rad to degree
652 */
653#define IIO_RAD_TO_DEGREE(rad) \
654 (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
655
656/**
648 * IIO_G_TO_M_S_2() - Convert g to meter / second**2 657 * IIO_G_TO_M_S_2() - Convert g to meter / second**2
649 * @g: A value in g 658 * @g: A value in g
650 * 659 *
@@ -652,4 +661,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
652 */ 661 */
653#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL) 662#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
654 663
664/**
665 * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
666 * @ms2: A value in meter / second**2
667 *
668 * Returns the given value converted from meter / second**2 to g
669 */
670#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
671
655#endif /* _INDUSTRIAL_IO_H_ */ 672#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 8a1d18640ab9..9cd8f747212f 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -18,7 +18,8 @@ struct iio_chan_spec;
18 * struct iio_dev_attr - iio specific device attribute 18 * struct iio_dev_attr - iio specific device attribute
19 * @dev_attr: underlying device attribute 19 * @dev_attr: underlying device attribute
20 * @address: associated register address 20 * @address: associated register address
21 * @l: list head for maintaining list of dynamically created attrs. 21 * @l: list head for maintaining list of dynamically created attrs
22 * @c: specification for the underlying channel
22 */ 23 */
23struct iio_dev_attr { 24struct iio_dev_attr {
24 struct device_attribute dev_attr; 25 struct device_attribute dev_attr;
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index fa76c79a52a1..1c9e028e0d4a 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -18,6 +18,9 @@ struct iio_subirq {
18 bool enabled; 18 bool enabled;
19}; 19};
20 20
21struct iio_dev;
22struct iio_trigger;
23
21/** 24/**
22 * struct iio_trigger_ops - operations structure for an iio_trigger. 25 * struct iio_trigger_ops - operations structure for an iio_trigger.
23 * @owner: used to monitor usage count of the trigger. 26 * @owner: used to monitor usage count of the trigger.
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index c378ebec605e..f72f70d5a97b 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -7,8 +7,8 @@ struct iio_dev;
7struct iio_buffer_setup_ops; 7struct iio_buffer_setup_ops;
8 8
9int iio_triggered_buffer_setup(struct iio_dev *indio_dev, 9int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
10 irqreturn_t (*pollfunc_bh)(int irq, void *p), 10 irqreturn_t (*h)(int irq, void *p),
11 irqreturn_t (*pollfunc_th)(int irq, void *p), 11 irqreturn_t (*thread)(int irq, void *p),
12 const struct iio_buffer_setup_ops *setup_ops); 12 const struct iio_buffer_setup_ops *setup_ops);
13void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev); 13void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
14 14
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e8493fee8160..d0b380ee7d67 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -32,6 +32,14 @@ extern struct fs_struct init_fs;
32#define INIT_CPUSET_SEQ(tsk) 32#define INIT_CPUSET_SEQ(tsk)
33#endif 33#endif
34 34
35#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
36#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
37 .lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock), \
38},
39#else
40#define INIT_PREV_CPUTIME(x)
41#endif
42
35#define INIT_SIGNALS(sig) { \ 43#define INIT_SIGNALS(sig) { \
36 .nr_threads = 1, \ 44 .nr_threads = 1, \
37 .thread_head = LIST_HEAD_INIT(init_task.thread_node), \ 45 .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
@@ -46,6 +54,7 @@ extern struct fs_struct init_fs;
46 .cputime_atomic = INIT_CPUTIME_ATOMIC, \ 54 .cputime_atomic = INIT_CPUTIME_ATOMIC, \
47 .running = 0, \ 55 .running = 0, \
48 }, \ 56 }, \
57 INIT_PREV_CPUTIME(sig) \
49 .cred_guard_mutex = \ 58 .cred_guard_mutex = \
50 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 59 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
51} 60}
@@ -246,6 +255,7 @@ extern struct task_group root_task_group;
246 INIT_TASK_RCU_TASKS(tsk) \ 255 INIT_TASK_RCU_TASKS(tsk) \
247 INIT_CPUSET_SEQ(tsk) \ 256 INIT_CPUSET_SEQ(tsk) \
248 INIT_RT_MUTEXES(tsk) \ 257 INIT_RT_MUTEXES(tsk) \
258 INIT_PREV_CPUTIME(tsk) \
249 INIT_VTIME(tsk) \ 259 INIT_VTIME(tsk) \
250 INIT_NUMA_BALANCING(tsk) \ 260 INIT_NUMA_BALANCING(tsk) \
251 INIT_KASAN(tsk) \ 261 INIT_KASAN(tsk) \
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index eecc9ea6cd58..c91e1376132b 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -9,15 +9,8 @@
9#ifndef _TOUCHSCREEN_H 9#ifndef _TOUCHSCREEN_H
10#define _TOUCHSCREEN_H 10#define _TOUCHSCREEN_H
11 11
12#include <linux/input.h> 12struct input_dev;
13 13
14#ifdef CONFIG_OF 14void touchscreen_parse_properties(struct input_dev *dev, bool multitouch);
15void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch);
16#else
17static inline void touchscreen_parse_of_params(struct input_dev *dev,
18 bool multitouch)
19{
20}
21#endif
22 15
23#endif 16#endif
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d9a366d24e3b..6240063bdcac 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -344,7 +344,7 @@ struct intel_iommu {
344 344
345#ifdef CONFIG_INTEL_IOMMU 345#ifdef CONFIG_INTEL_IOMMU
346 unsigned long *domain_ids; /* bitmap of domains */ 346 unsigned long *domain_ids; /* bitmap of domains */
347 struct dmar_domain **domains; /* ptr to domains */ 347 struct dmar_domain ***domains; /* ptr to domains */
348 spinlock_t lock; /* protect context, domain ids */ 348 spinlock_t lock; /* protect context, domain ids */
349 struct root_entry *root_entry; /* virtual address */ 349 struct root_entry *root_entry; /* virtual address */
350 350
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index c27dde7215b5..e399029b68c5 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -21,7 +21,7 @@
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/bug.h> 23#include <linux/bug.h>
24#include <asm/io.h> 24#include <linux/io.h>
25#include <asm/page.h> 25#include <asm/page.h>
26 26
27/* 27/*
diff --git a/include/linux/io.h b/include/linux/io.h
index fb5a99800e77..de64c1e53612 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -20,10 +20,13 @@
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/bug.h>
24#include <linux/err.h>
23#include <asm/io.h> 25#include <asm/io.h>
24#include <asm/page.h> 26#include <asm/page.h>
25 27
26struct device; 28struct device;
29struct resource;
27 30
28__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); 31__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
29void __iowrite64_copy(void __iomem *to, const void *from, size_t count); 32void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
@@ -80,6 +83,27 @@ int check_signature(const volatile void __iomem *io_addr,
80 const unsigned char *signature, int length); 83 const unsigned char *signature, int length);
81void devm_ioremap_release(struct device *dev, void *res); 84void devm_ioremap_release(struct device *dev, void *res);
82 85
86void *devm_memremap(struct device *dev, resource_size_t offset,
87 size_t size, unsigned long flags);
88void devm_memunmap(struct device *dev, void *addr);
89
90void *__devm_memremap_pages(struct device *dev, struct resource *res);
91
92#ifdef CONFIG_ZONE_DEVICE
93void *devm_memremap_pages(struct device *dev, struct resource *res);
94#else
95static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
96{
97 /*
98 * Fail attempts to call devm_memremap_pages() without
99 * ZONE_DEVICE support enabled, this requires callers to fall
100 * back to plain devm_memremap() based on config
101 */
102 WARN_ON_ONCE(1);
103 return ERR_PTR(-ENXIO);
104}
105#endif
106
83/* 107/*
84 * Some systems do not have legacy ISA devices. 108 * Some systems do not have legacy ISA devices.
85 * /dev/port is not a valid interface on these systems. 109 * /dev/port is not a valid interface on these systems.
@@ -121,4 +145,13 @@ static inline int arch_phys_wc_index(int handle)
121#endif 145#endif
122#endif 146#endif
123 147
148enum {
149 /* See memremap() kernel-doc for usage description... */
150 MEMREMAP_WB = 1 << 0,
151 MEMREMAP_WT = 1 << 1,
152};
153
154void *memremap(resource_size_t offset, size_t size, unsigned long flags);
155void memunmap(void *addr);
156
124#endif /* _LINUX_IO_H */ 157#endif /* _LINUX_IO_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 0b1e569f5ff5..f8cea14485dd 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -115,6 +115,11 @@ struct ipmi_smi_handlers {
115 implement it. */ 115 implement it. */
116 void (*set_need_watch)(void *send_info, bool enable); 116 void (*set_need_watch)(void *send_info, bool enable);
117 117
118 /*
119 * Called when flushing all pending messages.
120 */
121 void (*flush_messages)(void *send_info);
122
118 /* Called when the interface should go into "run to 123 /* Called when the interface should go into "run to
119 completion" mode. If this call sets the value to true, the 124 completion" mode. If this call sets the value to true, the
120 interface should make sure that all messages are flushed 125 interface should make sure that all messages are flushed
@@ -207,7 +212,7 @@ static inline int ipmi_demangle_device_id(const unsigned char *data,
207 upper layer until the start_processing() function in the handlers 212 upper layer until the start_processing() function in the handlers
208 is called, and the lower layer must get the interface from that 213 is called, and the lower layer must get the interface from that
209 call. */ 214 call. */
210int ipmi_register_smi(struct ipmi_smi_handlers *handlers, 215int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
211 void *send_info, 216 void *send_info,
212 struct ipmi_device_id *device_id, 217 struct ipmi_device_id *device_id,
213 struct device *dev, 218 struct device *dev,
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 82806c60aa42..f1f32af6d9b9 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -29,7 +29,9 @@ struct ipv6_devconf {
29 __s32 max_desync_factor; 29 __s32 max_desync_factor;
30 __s32 max_addresses; 30 __s32 max_addresses;
31 __s32 accept_ra_defrtr; 31 __s32 accept_ra_defrtr;
32 __s32 accept_ra_min_hop_limit;
32 __s32 accept_ra_pinfo; 33 __s32 accept_ra_pinfo;
34 __s32 ignore_routes_with_linkdown;
33#ifdef CONFIG_IPV6_ROUTER_PREF 35#ifdef CONFIG_IPV6_ROUTER_PREF
34 __s32 accept_ra_rtr_pref; 36 __s32 accept_ra_rtr_pref;
35 __s32 rtr_probe_interval; 37 __s32 rtr_probe_interval;
@@ -57,6 +59,7 @@ struct ipv6_devconf {
57 bool initialized; 59 bool initialized;
58 struct in6_addr secret; 60 struct in6_addr secret;
59 } stable_secret; 61 } stable_secret;
62 __s32 use_oif_addrs_only;
60 void *sysctl; 63 void *sysctl;
61}; 64};
62 65
@@ -94,7 +97,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
94struct inet6_skb_parm { 97struct inet6_skb_parm {
95 int iif; 98 int iif;
96 __be16 ra; 99 __be16 ra;
97 __u16 hop;
98 __u16 dst0; 100 __u16 dst0;
99 __u16 srcrt; 101 __u16 srcrt;
100 __u16 dst1; 102 __u16 dst1;
@@ -111,6 +113,7 @@ struct inet6_skb_parm {
111#define IP6SKB_REROUTED 4 113#define IP6SKB_REROUTED 4
112#define IP6SKB_ROUTERALERT 8 114#define IP6SKB_ROUTERALERT 8
113#define IP6SKB_FRAGMENTED 16 115#define IP6SKB_FRAGMENTED 16
116#define IP6SKB_HOPBYHOP 32
114}; 117};
115 118
116#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) 119#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 51744bcf74ee..6f8b34066442 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -324,8 +324,10 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
324 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips 324 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
325 * @irq_cpu_online: configure an interrupt source for a secondary CPU 325 * @irq_cpu_online: configure an interrupt source for a secondary CPU
326 * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU 326 * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
327 * @irq_suspend: function called from core code on suspend once per chip 327 * @irq_suspend: function called from core code on suspend once per
328 * @irq_resume: function called from core code on resume once per chip 328 * chip, when one or more interrupts are installed
329 * @irq_resume: function called from core code on resume once per chip,
330 * when one ore more interrupts are installed
329 * @irq_pm_shutdown: function called from core code on shutdown once per chip 331 * @irq_pm_shutdown: function called from core code on shutdown once per chip
330 * @irq_calc_mask: Optional function to set irq_data.mask for special cases 332 * @irq_calc_mask: Optional function to set irq_data.mask for special cases
331 * @irq_print_chip: optional to print special chip info in show_interrupts 333 * @irq_print_chip: optional to print special chip info in show_interrupts
@@ -488,8 +490,7 @@ extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
488#endif 490#endif
489 491
490/* Handling of unhandled and spurious interrupts: */ 492/* Handling of unhandled and spurious interrupts: */
491extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 493extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
492 irqreturn_t action_ret);
493 494
494 495
495/* Enable/disable irq debugging output: */ 496/* Enable/disable irq debugging output: */
@@ -640,7 +641,7 @@ static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
640 return d ? d->msi_desc : NULL; 641 return d ? d->msi_desc : NULL;
641} 642}
642 643
643static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) 644static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
644{ 645{
645 return d->msi_desc; 646 return d->msi_desc;
646} 647}
@@ -762,6 +763,12 @@ struct irq_chip_type {
762 * @reg_base: Register base address (virtual) 763 * @reg_base: Register base address (virtual)
763 * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) 764 * @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
764 * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) 765 * @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
766 * @suspend: Function called from core code on suspend once per
767 * chip; can be useful instead of irq_chip::suspend to
768 * handle chip details even when no interrupts are in use
769 * @resume: Function called from core code on resume once per chip;
770 * can be useful instead of irq_chip::suspend to handle
771 * chip details even when no interrupts are in use
765 * @irq_base: Interrupt base nr for this chip 772 * @irq_base: Interrupt base nr for this chip
766 * @irq_cnt: Number of interrupts handled by this chip 773 * @irq_cnt: Number of interrupts handled by this chip
767 * @mask_cache: Cached mask register shared between all chip types 774 * @mask_cache: Cached mask register shared between all chip types
@@ -788,6 +795,8 @@ struct irq_chip_generic {
788 void __iomem *reg_base; 795 void __iomem *reg_base;
789 u32 (*reg_readl)(void __iomem *addr); 796 u32 (*reg_readl)(void __iomem *addr);
790 void (*reg_writel)(u32 val, void __iomem *addr); 797 void (*reg_writel)(u32 val, void __iomem *addr);
798 void (*suspend)(struct irq_chip_generic *gc);
799 void (*resume)(struct irq_chip_generic *gc);
791 unsigned int irq_base; 800 unsigned int irq_base;
792 unsigned int irq_cnt; 801 unsigned int irq_cnt;
793 u32 mask_cache; 802 u32 mask_cache;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index ffbc034c8810..9eeeb9589acf 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -104,6 +104,8 @@
104#define GICR_SYNCR 0x00C0 104#define GICR_SYNCR 0x00C0
105#define GICR_MOVLPIR 0x0100 105#define GICR_MOVLPIR 0x0100
106#define GICR_MOVALLR 0x0110 106#define GICR_MOVALLR 0x0110
107#define GICR_ISACTIVER GICD_ISACTIVER
108#define GICR_ICACTIVER GICD_ICACTIVER
107#define GICR_IDREGS GICD_IDREGS 109#define GICR_IDREGS GICD_IDREGS
108#define GICR_PIDR2 GICD_PIDR2 110#define GICR_PIDR2 GICD_PIDR2
109 111
@@ -268,9 +270,12 @@
268 270
269#define ICH_LR_EOI (1UL << 41) 271#define ICH_LR_EOI (1UL << 41)
270#define ICH_LR_GROUP (1UL << 60) 272#define ICH_LR_GROUP (1UL << 60)
273#define ICH_LR_HW (1UL << 61)
271#define ICH_LR_STATE (3UL << 62) 274#define ICH_LR_STATE (3UL << 62)
272#define ICH_LR_PENDING_BIT (1UL << 62) 275#define ICH_LR_PENDING_BIT (1UL << 62)
273#define ICH_LR_ACTIVE_BIT (1UL << 63) 276#define ICH_LR_ACTIVE_BIT (1UL << 63)
277#define ICH_LR_PHYS_ID_SHIFT 32
278#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT)
274 279
275#define ICH_MISR_EOI (1 << 0) 280#define ICH_MISR_EOI (1 << 0)
276#define ICH_MISR_U (1 << 1) 281#define ICH_MISR_U (1 << 1)
@@ -288,6 +293,7 @@
288#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) 293#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
289 294
290#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) 295#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
296#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
291#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) 297#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
292#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) 298#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
293#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) 299#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
@@ -360,6 +366,7 @@
360#ifndef __ASSEMBLY__ 366#ifndef __ASSEMBLY__
361 367
362#include <linux/stringify.h> 368#include <linux/stringify.h>
369#include <asm/msi.h>
363 370
364/* 371/*
365 * We need a value to serve as a irq-type for LPIs. Choose one that will 372 * We need a value to serve as a irq-type for LPIs. Choose one that will
@@ -384,6 +391,12 @@ static inline void gic_write_eoir(u64 irq)
384 isb(); 391 isb();
385} 392}
386 393
394static inline void gic_write_dir(u64 irq)
395{
396 asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq));
397 isb();
398}
399
387struct irq_domain; 400struct irq_domain;
388int its_cpu_init(void); 401int its_cpu_init(void);
389int its_init(struct device_node *node, struct rdists *rdists, 402int its_init(struct device_node *node, struct rdists *rdists,
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 9de976b4f9a7..b8901dfd9e95 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -20,9 +20,13 @@
20#define GIC_CPU_ALIAS_BINPOINT 0x1c 20#define GIC_CPU_ALIAS_BINPOINT 0x1c
21#define GIC_CPU_ACTIVEPRIO 0xd0 21#define GIC_CPU_ACTIVEPRIO 0xd0
22#define GIC_CPU_IDENT 0xfc 22#define GIC_CPU_IDENT 0xfc
23#define GIC_CPU_DEACTIVATE 0x1000
23 24
24#define GICC_ENABLE 0x1 25#define GICC_ENABLE 0x1
25#define GICC_INT_PRI_THRESHOLD 0xf0 26#define GICC_INT_PRI_THRESHOLD 0xf0
27
28#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
29
26#define GICC_IAR_INT_ID_MASK 0x3ff 30#define GICC_IAR_INT_ID_MASK 0x3ff
27#define GICC_INT_SPURIOUS 1023 31#define GICC_INT_SPURIOUS 1023
28#define GICC_DIS_BYPASS_MASK 0x1e0 32#define GICC_DIS_BYPASS_MASK 0x1e0
@@ -71,11 +75,12 @@
71 75
72#define GICH_LR_VIRTUALID (0x3ff << 0) 76#define GICH_LR_VIRTUALID (0x3ff << 0)
73#define GICH_LR_PHYSID_CPUID_SHIFT (10) 77#define GICH_LR_PHYSID_CPUID_SHIFT (10)
74#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT) 78#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
75#define GICH_LR_STATE (3 << 28) 79#define GICH_LR_STATE (3 << 28)
76#define GICH_LR_PENDING_BIT (1 << 28) 80#define GICH_LR_PENDING_BIT (1 << 28)
77#define GICH_LR_ACTIVE_BIT (1 << 29) 81#define GICH_LR_ACTIVE_BIT (1 << 29)
78#define GICH_LR_EOI (1 << 19) 82#define GICH_LR_EOI (1 << 19)
83#define GICH_LR_HW (1 << 31)
79 84
80#define GICH_VMCR_CTRL_SHIFT 0 85#define GICH_VMCR_CTRL_SHIFT 0
81#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) 86#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
@@ -95,11 +100,10 @@
95 100
96struct device_node; 101struct device_node;
97 102
98void gic_set_irqchip_flags(unsigned long flags);
99void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, 103void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
100 u32 offset, struct device_node *); 104 u32 offset, struct device_node *);
101void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 105void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
102void gic_cpu_if_down(void); 106int gic_cpu_if_down(unsigned int gic_nr);
103 107
104static inline void gic_init(unsigned int nr, int start, 108static inline void gic_init(unsigned int nr, int start,
105 void __iomem *dist , void __iomem *cpu) 109 void __iomem *dist , void __iomem *cpu)
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 9b1ad3734911..4e6861605050 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -41,12 +41,20 @@
41 41
42/* Shared Global Counter */ 42/* Shared Global Counter */
43#define GIC_SH_COUNTER_31_00_OFS 0x0010 43#define GIC_SH_COUNTER_31_00_OFS 0x0010
44/* 64-bit counter register for CM3 */
45#define GIC_SH_COUNTER_OFS GIC_SH_COUNTER_31_00_OFS
44#define GIC_SH_COUNTER_63_32_OFS 0x0014 46#define GIC_SH_COUNTER_63_32_OFS 0x0014
45#define GIC_SH_REVISIONID_OFS 0x0020 47#define GIC_SH_REVISIONID_OFS 0x0020
46 48
47/* Convert an interrupt number to a byte offset/bit for multi-word registers */ 49/* Convert an interrupt number to a byte offset/bit for multi-word registers */
48#define GIC_INTR_OFS(intr) (((intr) / 32) * 4) 50#define GIC_INTR_OFS(intr) ({ \
49#define GIC_INTR_BIT(intr) ((intr) % 32) 51 unsigned bits = mips_cm_is64 ? 64 : 32; \
52 unsigned reg_idx = (intr) / bits; \
53 unsigned reg_width = bits / 8; \
54 \
55 reg_idx * reg_width; \
56})
57#define GIC_INTR_BIT(intr) ((intr) % (mips_cm_is64 ? 64 : 32))
50 58
51/* Polarity : Reset Value is always 0 */ 59/* Polarity : Reset Value is always 0 */
52#define GIC_SH_SET_POLARITY_OFS 0x0100 60#define GIC_SH_SET_POLARITY_OFS 0x0100
@@ -98,6 +106,8 @@
98#define GIC_VPE_WD_COUNT0_OFS 0x0094 106#define GIC_VPE_WD_COUNT0_OFS 0x0094
99#define GIC_VPE_WD_INITIAL0_OFS 0x0098 107#define GIC_VPE_WD_INITIAL0_OFS 0x0098
100#define GIC_VPE_COMPARE_LO_OFS 0x00a0 108#define GIC_VPE_COMPARE_LO_OFS 0x00a0
109/* 64-bit Compare register on CM3 */
110#define GIC_VPE_COMPARE_OFS GIC_VPE_COMPARE_LO_OFS
101#define GIC_VPE_COMPARE_HI_OFS 0x00a4 111#define GIC_VPE_COMPARE_HI_OFS 0x00a4
102 112
103#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100 113#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index fcea4e48e21f..5acfa26602e1 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -166,12 +166,16 @@ static inline int handle_domain_irq(struct irq_domain *domain,
166#endif 166#endif
167 167
168/* Test to see if a driver has successfully requested an irq */ 168/* Test to see if a driver has successfully requested an irq */
169static inline int irq_has_action(unsigned int irq) 169static inline int irq_desc_has_action(struct irq_desc *desc)
170{ 170{
171 struct irq_desc *desc = irq_to_desc(irq);
172 return desc->action != NULL; 171 return desc->action != NULL;
173} 172}
174 173
174static inline int irq_has_action(unsigned int irq)
175{
176 return irq_desc_has_action(irq_to_desc(irq));
177}
178
175/* caller has locked the irq_desc and both params are valid */ 179/* caller has locked the irq_desc and both params are valid */
176static inline void __irq_set_handler_locked(unsigned int irq, 180static inline void __irq_set_handler_locked(unsigned int irq,
177 irq_flow_handler_t handler) 181 irq_flow_handler_t handler)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 744ac0ec98eb..d3ca79236fb0 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -45,6 +45,20 @@ struct irq_data;
45/* Number of irqs reserved for a legacy isa controller */ 45/* Number of irqs reserved for a legacy isa controller */
46#define NUM_ISA_INTERRUPTS 16 46#define NUM_ISA_INTERRUPTS 16
47 47
48/*
49 * Should several domains have the same device node, but serve
50 * different purposes (for example one domain is for PCI/MSI, and the
51 * other for wired IRQs), they can be distinguished using a
52 * bus-specific token. Most domains are expected to only carry
53 * DOMAIN_BUS_ANY.
54 */
55enum irq_domain_bus_token {
56 DOMAIN_BUS_ANY = 0,
57 DOMAIN_BUS_PCI_MSI,
58 DOMAIN_BUS_PLATFORM_MSI,
59 DOMAIN_BUS_NEXUS,
60};
61
48/** 62/**
49 * struct irq_domain_ops - Methods for irq_domain objects 63 * struct irq_domain_ops - Methods for irq_domain objects
50 * @match: Match an interrupt controller device node to a host, returns 64 * @match: Match an interrupt controller device node to a host, returns
@@ -61,7 +75,8 @@ struct irq_data;
61 * to setup the irq_desc when returning from map(). 75 * to setup the irq_desc when returning from map().
62 */ 76 */
63struct irq_domain_ops { 77struct irq_domain_ops {
64 int (*match)(struct irq_domain *d, struct device_node *node); 78 int (*match)(struct irq_domain *d, struct device_node *node,
79 enum irq_domain_bus_token bus_token);
65 int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); 80 int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
66 void (*unmap)(struct irq_domain *d, unsigned int virq); 81 void (*unmap)(struct irq_domain *d, unsigned int virq);
67 int (*xlate)(struct irq_domain *d, struct device_node *node, 82 int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -116,6 +131,7 @@ struct irq_domain {
116 131
117 /* Optional data */ 132 /* Optional data */
118 struct device_node *of_node; 133 struct device_node *of_node;
134 enum irq_domain_bus_token bus_token;
119 struct irq_domain_chip_generic *gc; 135 struct irq_domain_chip_generic *gc;
120#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 136#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
121 struct irq_domain *parent; 137 struct irq_domain *parent;
@@ -161,9 +177,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
161 irq_hw_number_t first_hwirq, 177 irq_hw_number_t first_hwirq,
162 const struct irq_domain_ops *ops, 178 const struct irq_domain_ops *ops,
163 void *host_data); 179 void *host_data);
164extern struct irq_domain *irq_find_host(struct device_node *node); 180extern struct irq_domain *irq_find_matching_host(struct device_node *node,
181 enum irq_domain_bus_token bus_token);
165extern void irq_set_default_host(struct irq_domain *host); 182extern void irq_set_default_host(struct irq_domain *host);
166 183
184static inline struct irq_domain *irq_find_host(struct device_node *node)
185{
186 return irq_find_matching_host(node, DOMAIN_BUS_ANY);
187}
188
167/** 189/**
168 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. 190 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
169 * @of_node: pointer to interrupt controller's device tree node. 191 * @of_node: pointer to interrupt controller's device tree node.
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
deleted file mode 100644
index d32615280be9..000000000000
--- a/include/linux/jbd.h
+++ /dev/null
@@ -1,1047 +0,0 @@
1/*
2 * linux/include/linux/jbd.h
3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>
5 *
6 * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
7 *
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
11 *
12 * Definitions for transaction data structures for the buffer cache
13 * filesystem journaling support.
14 */
15
16#ifndef _LINUX_JBD_H
17#define _LINUX_JBD_H
18
19/* Allow this file to be included directly into e2fsprogs */
20#ifndef __KERNEL__
21#include "jfs_compat.h"
22#define JFS_DEBUG
23#define jfs_debug jbd_debug
24#else
25
26#include <linux/types.h>
27#include <linux/buffer_head.h>
28#include <linux/journal-head.h>
29#include <linux/stddef.h>
30#include <linux/mutex.h>
31#include <linux/timer.h>
32#include <linux/lockdep.h>
33#include <linux/slab.h>
34
35#define journal_oom_retry 1
36
37/*
38 * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
39 * certain classes of error which can occur due to failed IOs. Under
40 * normal use we want ext3 to continue after such errors, because
41 * hardware _can_ fail, but for debugging purposes when running tests on
42 * known-good hardware we may want to trap these errors.
43 */
44#undef JBD_PARANOID_IOFAIL
45
46/*
47 * The default maximum commit age, in seconds.
48 */
49#define JBD_DEFAULT_MAX_COMMIT_AGE 5
50
51#ifdef CONFIG_JBD_DEBUG
52/*
53 * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
54 * consistency checks. By default we don't do this unless
55 * CONFIG_JBD_DEBUG is on.
56 */
57#define JBD_EXPENSIVE_CHECKING
58extern u8 journal_enable_debug;
59
60void __jbd_debug(int level, const char *file, const char *func,
61 unsigned int line, const char *fmt, ...);
62
63#define jbd_debug(n, fmt, a...) \
64 __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
65#else
66#define jbd_debug(n, fmt, a...) /**/
67#endif
68
69static inline void *jbd_alloc(size_t size, gfp_t flags)
70{
71 return (void *)__get_free_pages(flags, get_order(size));
72}
73
74static inline void jbd_free(void *ptr, size_t size)
75{
76 free_pages((unsigned long)ptr, get_order(size));
77}
78
79#define JFS_MIN_JOURNAL_BLOCKS 1024
80
81
82/**
83 * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
84 *
85 * All filesystem modifications made by the process go
86 * through this handle. Recursive operations (such as quota operations)
87 * are gathered into a single update.
88 *
89 * The buffer credits field is used to account for journaled buffers
90 * being modified by the running process. To ensure that there is
91 * enough log space for all outstanding operations, we need to limit the
92 * number of outstanding buffers possible at any time. When the
93 * operation completes, any buffer credits not used are credited back to
94 * the transaction, so that at all times we know how many buffers the
95 * outstanding updates on a transaction might possibly touch.
96 *
97 * This is an opaque datatype.
98 **/
99typedef struct handle_s handle_t; /* Atomic operation type */
100
101
102/**
103 * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
104 *
105 * journal_t is linked to from the fs superblock structure.
106 *
107 * We use the journal_t to keep track of all outstanding transaction
108 * activity on the filesystem, and to manage the state of the log
109 * writing process.
110 *
111 * This is an opaque datatype.
112 **/
113typedef struct journal_s journal_t; /* Journal control structure */
114#endif
115
116/*
117 * Internal structures used by the logging mechanism:
118 */
119
120#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
121
122/*
123 * On-disk structures
124 */
125
126/*
127 * Descriptor block types:
128 */
129
130#define JFS_DESCRIPTOR_BLOCK 1
131#define JFS_COMMIT_BLOCK 2
132#define JFS_SUPERBLOCK_V1 3
133#define JFS_SUPERBLOCK_V2 4
134#define JFS_REVOKE_BLOCK 5
135
136/*
137 * Standard header for all descriptor blocks:
138 */
139typedef struct journal_header_s
140{
141 __be32 h_magic;
142 __be32 h_blocktype;
143 __be32 h_sequence;
144} journal_header_t;
145
146
147/*
148 * The block tag: used to describe a single buffer in the journal
149 */
150typedef struct journal_block_tag_s
151{
152 __be32 t_blocknr; /* The on-disk block number */
153 __be32 t_flags; /* See below */
154} journal_block_tag_t;
155
156/*
157 * The revoke descriptor: used on disk to describe a series of blocks to
158 * be revoked from the log
159 */
160typedef struct journal_revoke_header_s
161{
162 journal_header_t r_header;
163 __be32 r_count; /* Count of bytes used in the block */
164} journal_revoke_header_t;
165
166
167/* Definitions for the journal tag flags word: */
168#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */
169#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */
170#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */
171#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
172
173
174/*
175 * The journal superblock. All fields are in big-endian byte order.
176 */
177typedef struct journal_superblock_s
178{
179/* 0x0000 */
180 journal_header_t s_header;
181
182/* 0x000C */
183 /* Static information describing the journal */
184 __be32 s_blocksize; /* journal device blocksize */
185 __be32 s_maxlen; /* total blocks in journal file */
186 __be32 s_first; /* first block of log information */
187
188/* 0x0018 */
189 /* Dynamic information describing the current state of the log */
190 __be32 s_sequence; /* first commit ID expected in log */
191 __be32 s_start; /* blocknr of start of log */
192
193/* 0x0020 */
194 /* Error value, as set by journal_abort(). */
195 __be32 s_errno;
196
197/* 0x0024 */
198 /* Remaining fields are only valid in a version-2 superblock */
199 __be32 s_feature_compat; /* compatible feature set */
200 __be32 s_feature_incompat; /* incompatible feature set */
201 __be32 s_feature_ro_compat; /* readonly-compatible feature set */
202/* 0x0030 */
203 __u8 s_uuid[16]; /* 128-bit uuid for journal */
204
205/* 0x0040 */
206 __be32 s_nr_users; /* Nr of filesystems sharing log */
207
208 __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/
209
210/* 0x0048 */
211 __be32 s_max_transaction; /* Limit of journal blocks per trans.*/
212 __be32 s_max_trans_data; /* Limit of data blocks per trans. */
213
214/* 0x0050 */
215 __u32 s_padding[44];
216
217/* 0x0100 */
218 __u8 s_users[16*48]; /* ids of all fs'es sharing the log */
219/* 0x0400 */
220} journal_superblock_t;
221
222#define JFS_HAS_COMPAT_FEATURE(j,mask) \
223 ((j)->j_format_version >= 2 && \
224 ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
225#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \
226 ((j)->j_format_version >= 2 && \
227 ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
228#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \
229 ((j)->j_format_version >= 2 && \
230 ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
231
232#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001
233
234/* Features known to this kernel version: */
235#define JFS_KNOWN_COMPAT_FEATURES 0
236#define JFS_KNOWN_ROCOMPAT_FEATURES 0
237#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE
238
239#ifdef __KERNEL__
240
241#include <linux/fs.h>
242#include <linux/sched.h>
243
244enum jbd_state_bits {
245 BH_JBD /* Has an attached ext3 journal_head */
246 = BH_PrivateStart,
247 BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
248 BH_Freed, /* Has been freed (truncated) */
249 BH_Revoked, /* Has been revoked from the log */
250 BH_RevokeValid, /* Revoked flag is valid */
251 BH_JBDDirty, /* Is dirty but journaled */
252 BH_State, /* Pins most journal_head state */
253 BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
254 BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
255 BH_JBDPrivateStart, /* First bit available for private use by FS */
256};
257
258BUFFER_FNS(JBD, jbd)
259BUFFER_FNS(JWrite, jwrite)
260BUFFER_FNS(JBDDirty, jbddirty)
261TAS_BUFFER_FNS(JBDDirty, jbddirty)
262BUFFER_FNS(Revoked, revoked)
263TAS_BUFFER_FNS(Revoked, revoked)
264BUFFER_FNS(RevokeValid, revokevalid)
265TAS_BUFFER_FNS(RevokeValid, revokevalid)
266BUFFER_FNS(Freed, freed)
267
268#include <linux/jbd_common.h>
269
270#define J_ASSERT(assert) BUG_ON(!(assert))
271
272#define J_ASSERT_BH(bh, expr) J_ASSERT(expr)
273#define J_ASSERT_JH(jh, expr) J_ASSERT(expr)
274
275#if defined(JBD_PARANOID_IOFAIL)
276#define J_EXPECT(expr, why...) J_ASSERT(expr)
277#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
278#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
279#else
280#define __journal_expect(expr, why...) \
281 ({ \
282 int val = (expr); \
283 if (!val) { \
284 printk(KERN_ERR \
285 "EXT3-fs unexpected failure: %s;\n",# expr); \
286 printk(KERN_ERR why "\n"); \
287 } \
288 val; \
289 })
290#define J_EXPECT(expr, why...) __journal_expect(expr, ## why)
291#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why)
292#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
293#endif
294
295struct jbd_revoke_table_s;
296
297/**
298 * struct handle_s - this is the concrete type associated with handle_t.
299 * @h_transaction: Which compound transaction is this update a part of?
300 * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
301 * @h_ref: Reference count on this handle
302 * @h_err: Field for caller's use to track errors through large fs operations
303 * @h_sync: flag for sync-on-close
304 * @h_jdata: flag to force data journaling
305 * @h_aborted: flag indicating fatal error on handle
306 * @h_lockdep_map: lockdep info for debugging lock problems
307 */
308struct handle_s
309{
310 /* Which compound transaction is this update a part of? */
311 transaction_t *h_transaction;
312
313 /* Number of remaining buffers we are allowed to dirty: */
314 int h_buffer_credits;
315
316 /* Reference count on this handle */
317 int h_ref;
318
319 /* Field for caller's use to track errors through large fs */
320 /* operations */
321 int h_err;
322
323 /* Flags [no locking] */
324 unsigned int h_sync: 1; /* sync-on-close */
325 unsigned int h_jdata: 1; /* force data journaling */
326 unsigned int h_aborted: 1; /* fatal error on handle */
327
328#ifdef CONFIG_DEBUG_LOCK_ALLOC
329 struct lockdep_map h_lockdep_map;
330#endif
331};
332
333
334/* The transaction_t type is the guts of the journaling mechanism. It
335 * tracks a compound transaction through its various states:
336 *
337 * RUNNING: accepting new updates
338 * LOCKED: Updates still running but we don't accept new ones
339 * RUNDOWN: Updates are tidying up but have finished requesting
340 * new buffers to modify (state not used for now)
341 * FLUSH: All updates complete, but we are still writing to disk
342 * COMMIT: All data on disk, writing commit record
343 * FINISHED: We still have to keep the transaction for checkpointing.
344 *
345 * The transaction keeps track of all of the buffers modified by a
346 * running transaction, and all of the buffers committed but not yet
347 * flushed to home for finished transactions.
348 */
349
350/*
351 * Lock ranking:
352 *
353 * j_list_lock
354 * ->jbd_lock_bh_journal_head() (This is "innermost")
355 *
356 * j_state_lock
357 * ->jbd_lock_bh_state()
358 *
359 * jbd_lock_bh_state()
360 * ->j_list_lock
361 *
362 * j_state_lock
363 * ->t_handle_lock
364 *
365 * j_state_lock
366 * ->j_list_lock (journal_unmap_buffer)
367 *
368 */
369
370struct transaction_s
371{
372 /* Pointer to the journal for this transaction. [no locking] */
373 journal_t *t_journal;
374
375 /* Sequence number for this transaction [no locking] */
376 tid_t t_tid;
377
378 /*
379 * Transaction's current state
380 * [no locking - only kjournald alters this]
381 * [j_list_lock] guards transition of a transaction into T_FINISHED
382 * state and subsequent call of __journal_drop_transaction()
383 * FIXME: needs barriers
384 * KLUDGE: [use j_state_lock]
385 */
386 enum {
387 T_RUNNING,
388 T_LOCKED,
389 T_FLUSH,
390 T_COMMIT,
391 T_COMMIT_RECORD,
392 T_FINISHED
393 } t_state;
394
395 /*
396 * Where in the log does this transaction's commit start? [no locking]
397 */
398 unsigned int t_log_start;
399
400 /* Number of buffers on the t_buffers list [j_list_lock] */
401 int t_nr_buffers;
402
403 /*
404 * Doubly-linked circular list of all buffers reserved but not yet
405 * modified by this transaction [j_list_lock]
406 */
407 struct journal_head *t_reserved_list;
408
409 /*
410 * Doubly-linked circular list of all buffers under writeout during
411 * commit [j_list_lock]
412 */
413 struct journal_head *t_locked_list;
414
415 /*
416 * Doubly-linked circular list of all metadata buffers owned by this
417 * transaction [j_list_lock]
418 */
419 struct journal_head *t_buffers;
420
421 /*
422 * Doubly-linked circular list of all data buffers still to be
423 * flushed before this transaction can be committed [j_list_lock]
424 */
425 struct journal_head *t_sync_datalist;
426
427 /*
428 * Doubly-linked circular list of all forget buffers (superseded
429 * buffers which we can un-checkpoint once this transaction commits)
430 * [j_list_lock]
431 */
432 struct journal_head *t_forget;
433
434 /*
435 * Doubly-linked circular list of all buffers still to be flushed before
436 * this transaction can be checkpointed. [j_list_lock]
437 */
438 struct journal_head *t_checkpoint_list;
439
440 /*
441 * Doubly-linked circular list of all buffers submitted for IO while
442 * checkpointing. [j_list_lock]
443 */
444 struct journal_head *t_checkpoint_io_list;
445
446 /*
447 * Doubly-linked circular list of temporary buffers currently undergoing
448 * IO in the log [j_list_lock]
449 */
450 struct journal_head *t_iobuf_list;
451
452 /*
453 * Doubly-linked circular list of metadata buffers being shadowed by log
454 * IO. The IO buffers on the iobuf list and the shadow buffers on this
455 * list match each other one for one at all times. [j_list_lock]
456 */
457 struct journal_head *t_shadow_list;
458
459 /*
460 * Doubly-linked circular list of control buffers being written to the
461 * log. [j_list_lock]
462 */
463 struct journal_head *t_log_list;
464
465 /*
466 * Protects info related to handles
467 */
468 spinlock_t t_handle_lock;
469
470 /*
471 * Number of outstanding updates running on this transaction
472 * [t_handle_lock]
473 */
474 int t_updates;
475
476 /*
477 * Number of buffers reserved for use by all handles in this transaction
478 * handle but not yet modified. [t_handle_lock]
479 */
480 int t_outstanding_credits;
481
482 /*
483 * Forward and backward links for the circular list of all transactions
484 * awaiting checkpoint. [j_list_lock]
485 */
486 transaction_t *t_cpnext, *t_cpprev;
487
488 /*
489 * When will the transaction expire (become due for commit), in jiffies?
490 * [no locking]
491 */
492 unsigned long t_expires;
493
494 /*
495 * When this transaction started, in nanoseconds [no locking]
496 */
497 ktime_t t_start_time;
498
499 /*
500 * How many handles used this transaction? [t_handle_lock]
501 */
502 int t_handle_count;
503};
504
505/**
506 * struct journal_s - this is the concrete type associated with journal_t.
507 * @j_flags: General journaling state flags
508 * @j_errno: Is there an outstanding uncleared error on the journal (from a
509 * prior abort)?
510 * @j_sb_buffer: First part of superblock buffer
511 * @j_superblock: Second part of superblock buffer
512 * @j_format_version: Version of the superblock format
513 * @j_state_lock: Protect the various scalars in the journal
514 * @j_barrier_count: Number of processes waiting to create a barrier lock
515 * @j_running_transaction: The current running transaction..
516 * @j_committing_transaction: the transaction we are pushing to disk
517 * @j_checkpoint_transactions: a linked circular list of all transactions
518 * waiting for checkpointing
519 * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
520 * to start committing, or for a barrier lock to be released
521 * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
522 * @j_wait_done_commit: Wait queue for waiting for commit to complete
523 * @j_wait_checkpoint: Wait queue to trigger checkpointing
524 * @j_wait_commit: Wait queue to trigger commit
525 * @j_wait_updates: Wait queue to wait for updates to complete
526 * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
527 * @j_head: Journal head - identifies the first unused block in the journal
528 * @j_tail: Journal tail - identifies the oldest still-used block in the
529 * journal.
530 * @j_free: Journal free - how many free blocks are there in the journal?
531 * @j_first: The block number of the first usable block
532 * @j_last: The block number one beyond the last usable block
533 * @j_dev: Device where we store the journal
534 * @j_blocksize: blocksize for the location where we store the journal.
535 * @j_blk_offset: starting block offset for into the device where we store the
536 * journal
537 * @j_fs_dev: Device which holds the client fs. For internal journal this will
538 * be equal to j_dev
539 * @j_maxlen: Total maximum capacity of the journal region on disk.
540 * @j_list_lock: Protects the buffer lists and internal buffer state.
541 * @j_inode: Optional inode where we store the journal. If present, all journal
542 * block numbers are mapped into this inode via bmap().
543 * @j_tail_sequence: Sequence number of the oldest transaction in the log
544 * @j_transaction_sequence: Sequence number of the next transaction to grant
545 * @j_commit_sequence: Sequence number of the most recently committed
546 * transaction
547 * @j_commit_request: Sequence number of the most recent transaction wanting
548 * commit
549 * @j_commit_waited: Sequence number of the most recent transaction someone
550 * is waiting for to commit.
551 * @j_uuid: Uuid of client object.
552 * @j_task: Pointer to the current commit thread for this journal
553 * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
554 * single compound commit transaction
555 * @j_commit_interval: What is the maximum transaction lifetime before we begin
556 * a commit?
557 * @j_commit_timer: The timer used to wakeup the commit thread
558 * @j_revoke_lock: Protect the revoke table
559 * @j_revoke: The revoke table - maintains the list of revoked blocks in the
560 * current transaction.
561 * @j_revoke_table: alternate revoke tables for j_revoke
562 * @j_wbuf: array of buffer_heads for journal_commit_transaction
563 * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
564 * number that will fit in j_blocksize
565 * @j_last_sync_writer: most recent pid which did a synchronous write
566 * @j_average_commit_time: the average amount of time in nanoseconds it
567 * takes to commit a transaction to the disk.
568 * @j_private: An opaque pointer to fs-private information.
569 */
570
571struct journal_s
572{
573 /* General journaling state flags [j_state_lock] */
574 unsigned long j_flags;
575
576 /*
577 * Is there an outstanding uncleared error on the journal (from a prior
578 * abort)? [j_state_lock]
579 */
580 int j_errno;
581
582 /* The superblock buffer */
583 struct buffer_head *j_sb_buffer;
584 journal_superblock_t *j_superblock;
585
586 /* Version of the superblock format */
587 int j_format_version;
588
589 /*
590 * Protect the various scalars in the journal
591 */
592 spinlock_t j_state_lock;
593
594 /*
595 * Number of processes waiting to create a barrier lock [j_state_lock]
596 */
597 int j_barrier_count;
598
599 /*
600 * Transactions: The current running transaction...
601 * [j_state_lock] [caller holding open handle]
602 */
603 transaction_t *j_running_transaction;
604
605 /*
606 * the transaction we are pushing to disk
607 * [j_state_lock] [caller holding open handle]
608 */
609 transaction_t *j_committing_transaction;
610
611 /*
612 * ... and a linked circular list of all transactions waiting for
613 * checkpointing. [j_list_lock]
614 */
615 transaction_t *j_checkpoint_transactions;
616
617 /*
618 * Wait queue for waiting for a locked transaction to start committing,
619 * or for a barrier lock to be released
620 */
621 wait_queue_head_t j_wait_transaction_locked;
622
623 /* Wait queue for waiting for checkpointing to complete */
624 wait_queue_head_t j_wait_logspace;
625
626 /* Wait queue for waiting for commit to complete */
627 wait_queue_head_t j_wait_done_commit;
628
629 /* Wait queue to trigger checkpointing */
630 wait_queue_head_t j_wait_checkpoint;
631
632 /* Wait queue to trigger commit */
633 wait_queue_head_t j_wait_commit;
634
635 /* Wait queue to wait for updates to complete */
636 wait_queue_head_t j_wait_updates;
637
638 /* Semaphore for locking against concurrent checkpoints */
639 struct mutex j_checkpoint_mutex;
640
641 /*
642 * Journal head: identifies the first unused block in the journal.
643 * [j_state_lock]
644 */
645 unsigned int j_head;
646
647 /*
648 * Journal tail: identifies the oldest still-used block in the journal.
649 * [j_state_lock]
650 */
651 unsigned int j_tail;
652
653 /*
654 * Journal free: how many free blocks are there in the journal?
655 * [j_state_lock]
656 */
657 unsigned int j_free;
658
659 /*
660 * Journal start and end: the block numbers of the first usable block
661 * and one beyond the last usable block in the journal. [j_state_lock]
662 */
663 unsigned int j_first;
664 unsigned int j_last;
665
666 /*
667 * Device, blocksize and starting block offset for the location where we
668 * store the journal.
669 */
670 struct block_device *j_dev;
671 int j_blocksize;
672 unsigned int j_blk_offset;
673
674 /*
675 * Device which holds the client fs. For internal journal this will be
676 * equal to j_dev.
677 */
678 struct block_device *j_fs_dev;
679
680 /* Total maximum capacity of the journal region on disk. */
681 unsigned int j_maxlen;
682
683 /*
684 * Protects the buffer lists and internal buffer state.
685 */
686 spinlock_t j_list_lock;
687
688 /* Optional inode where we store the journal. If present, all */
689 /* journal block numbers are mapped into this inode via */
690 /* bmap(). */
691 struct inode *j_inode;
692
693 /*
694 * Sequence number of the oldest transaction in the log [j_state_lock]
695 */
696 tid_t j_tail_sequence;
697
698 /*
699 * Sequence number of the next transaction to grant [j_state_lock]
700 */
701 tid_t j_transaction_sequence;
702
703 /*
704 * Sequence number of the most recently committed transaction
705 * [j_state_lock].
706 */
707 tid_t j_commit_sequence;
708
709 /*
710 * Sequence number of the most recent transaction wanting commit
711 * [j_state_lock]
712 */
713 tid_t j_commit_request;
714
715 /*
716 * Sequence number of the most recent transaction someone is waiting
717 * for to commit.
718 * [j_state_lock]
719 */
720 tid_t j_commit_waited;
721
722 /*
723 * Journal uuid: identifies the object (filesystem, LVM volume etc)
724 * backed by this journal. This will eventually be replaced by an array
725 * of uuids, allowing us to index multiple devices within a single
726 * journal and to perform atomic updates across them.
727 */
728 __u8 j_uuid[16];
729
730 /* Pointer to the current commit thread for this journal */
731 struct task_struct *j_task;
732
733 /*
734 * Maximum number of metadata buffers to allow in a single compound
735 * commit transaction
736 */
737 int j_max_transaction_buffers;
738
739 /*
740 * What is the maximum transaction lifetime before we begin a commit?
741 */
742 unsigned long j_commit_interval;
743
744 /* The timer used to wakeup the commit thread: */
745 struct timer_list j_commit_timer;
746
747 /*
748 * The revoke table: maintains the list of revoked blocks in the
749 * current transaction. [j_revoke_lock]
750 */
751 spinlock_t j_revoke_lock;
752 struct jbd_revoke_table_s *j_revoke;
753 struct jbd_revoke_table_s *j_revoke_table[2];
754
755 /*
756 * array of bhs for journal_commit_transaction
757 */
758 struct buffer_head **j_wbuf;
759 int j_wbufsize;
760
761 /*
762 * this is the pid of the last person to run a synchronous operation
763 * through the journal.
764 */
765 pid_t j_last_sync_writer;
766
767 /*
768 * the average amount of time in nanoseconds it takes to commit a
769 * transaction to the disk. [j_state_lock]
770 */
771 u64 j_average_commit_time;
772
773 /*
774 * An opaque pointer to fs-private information. ext3 puts its
775 * superblock pointer here
776 */
777 void *j_private;
778};
779
780/*
781 * Journal flag definitions
782 */
783#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
784#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
785#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */
786#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
787#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
788#define JFS_BARRIER 0x020 /* Use IDE barriers */
789#define JFS_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
790 * data write error in ordered
791 * mode */
792
793/*
794 * Function declarations for the journaling transaction and buffer
795 * management
796 */
797
798/* Filing buffers */
799extern void journal_unfile_buffer(journal_t *, struct journal_head *);
800extern void __journal_unfile_buffer(struct journal_head *);
801extern void __journal_refile_buffer(struct journal_head *);
802extern void journal_refile_buffer(journal_t *, struct journal_head *);
803extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
804extern void __journal_free_buffer(struct journal_head *bh);
805extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
806extern void __journal_clean_data_list(transaction_t *transaction);
807
808/* Log buffer allocation */
809extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
810int journal_next_log_block(journal_t *, unsigned int *);
811
812/* Commit management */
813extern void journal_commit_transaction(journal_t *);
814
815/* Checkpoint list management */
816int __journal_clean_checkpoint_list(journal_t *journal);
817int __journal_remove_checkpoint(struct journal_head *);
818void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
819
820/* Buffer IO */
821extern int
822journal_write_metadata_buffer(transaction_t *transaction,
823 struct journal_head *jh_in,
824 struct journal_head **jh_out,
825 unsigned int blocknr);
826
827/* Transaction locking */
828extern void __wait_on_journal (journal_t *);
829
830/*
831 * Journal locking.
832 *
833 * We need to lock the journal during transaction state changes so that nobody
834 * ever tries to take a handle on the running transaction while we are in the
835 * middle of moving it to the commit phase. j_state_lock does this.
836 *
837 * Note that the locking is completely interrupt unsafe. We never touch
838 * journal structures from interrupts.
839 */
840
841static inline handle_t *journal_current_handle(void)
842{
843 return current->journal_info;
844}
845
846/* The journaling code user interface:
847 *
848 * Create and destroy handles
849 * Register buffer modifications against the current transaction.
850 */
851
852extern handle_t *journal_start(journal_t *, int nblocks);
853extern int journal_restart (handle_t *, int nblocks);
854extern int journal_extend (handle_t *, int nblocks);
855extern int journal_get_write_access(handle_t *, struct buffer_head *);
856extern int journal_get_create_access (handle_t *, struct buffer_head *);
857extern int journal_get_undo_access(handle_t *, struct buffer_head *);
858extern int journal_dirty_data (handle_t *, struct buffer_head *);
859extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
860extern void journal_release_buffer (handle_t *, struct buffer_head *);
861extern int journal_forget (handle_t *, struct buffer_head *);
862extern void journal_sync_buffer (struct buffer_head *);
863extern void journal_invalidatepage(journal_t *,
864 struct page *, unsigned int, unsigned int);
865extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
866extern int journal_stop(handle_t *);
867extern int journal_flush (journal_t *);
868extern void journal_lock_updates (journal_t *);
869extern void journal_unlock_updates (journal_t *);
870
871extern journal_t * journal_init_dev(struct block_device *bdev,
872 struct block_device *fs_dev,
873 int start, int len, int bsize);
874extern journal_t * journal_init_inode (struct inode *);
875extern int journal_update_format (journal_t *);
876extern int journal_check_used_features
877 (journal_t *, unsigned long, unsigned long, unsigned long);
878extern int journal_check_available_features
879 (journal_t *, unsigned long, unsigned long, unsigned long);
880extern int journal_set_features
881 (journal_t *, unsigned long, unsigned long, unsigned long);
882extern int journal_create (journal_t *);
883extern int journal_load (journal_t *journal);
884extern int journal_destroy (journal_t *);
885extern int journal_recover (journal_t *journal);
886extern int journal_wipe (journal_t *, int);
887extern int journal_skip_recovery (journal_t *);
888extern void journal_update_sb_log_tail (journal_t *, tid_t, unsigned int,
889 int);
890extern void journal_abort (journal_t *, int);
891extern int journal_errno (journal_t *);
892extern void journal_ack_err (journal_t *);
893extern int journal_clear_err (journal_t *);
894extern int journal_bmap(journal_t *, unsigned int, unsigned int *);
895extern int journal_force_commit(journal_t *);
896
897/*
898 * journal_head management
899 */
900struct journal_head *journal_add_journal_head(struct buffer_head *bh);
901struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
902void journal_put_journal_head(struct journal_head *jh);
903
904/*
905 * handle management
906 */
907extern struct kmem_cache *jbd_handle_cache;
908
909static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
910{
911 return kmem_cache_zalloc(jbd_handle_cache, gfp_flags);
912}
913
914static inline void jbd_free_handle(handle_t *handle)
915{
916 kmem_cache_free(jbd_handle_cache, handle);
917}
918
919/* Primary revoke support */
920#define JOURNAL_REVOKE_DEFAULT_HASH 256
921extern int journal_init_revoke(journal_t *, int);
922extern void journal_destroy_revoke_caches(void);
923extern int journal_init_revoke_caches(void);
924
925extern void journal_destroy_revoke(journal_t *);
926extern int journal_revoke (handle_t *,
927 unsigned int, struct buffer_head *);
928extern int journal_cancel_revoke(handle_t *, struct journal_head *);
929extern void journal_write_revoke_records(journal_t *,
930 transaction_t *, int);
931
932/* Recovery revoke support */
933extern int journal_set_revoke(journal_t *, unsigned int, tid_t);
934extern int journal_test_revoke(journal_t *, unsigned int, tid_t);
935extern void journal_clear_revoke(journal_t *);
936extern void journal_switch_revoke_table(journal_t *journal);
937extern void journal_clear_buffer_revoked_flags(journal_t *journal);
938
939/*
940 * The log thread user interface:
941 *
942 * Request space in the current transaction, and force transaction commit
943 * transitions on demand.
944 */
945
946int __log_space_left(journal_t *); /* Called with journal locked */
947int log_start_commit(journal_t *journal, tid_t tid);
948int __log_start_commit(journal_t *journal, tid_t tid);
949int journal_start_commit(journal_t *journal, tid_t *tid);
950int journal_force_commit_nested(journal_t *journal);
951int log_wait_commit(journal_t *journal, tid_t tid);
952int log_do_checkpoint(journal_t *journal);
953int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
954
955void __log_wait_for_space(journal_t *journal);
956extern void __journal_drop_transaction(journal_t *, transaction_t *);
957extern int cleanup_journal_tail(journal_t *);
958
959/*
960 * is_journal_abort
961 *
962 * Simple test wrapper function to test the JFS_ABORT state flag. This
963 * bit, when set, indicates that we have had a fatal error somewhere,
964 * either inside the journaling layer or indicated to us by the client
965 * (eg. ext3), and that we and should not commit any further
966 * transactions.
967 */
968
969static inline int is_journal_aborted(journal_t *journal)
970{
971 return journal->j_flags & JFS_ABORT;
972}
973
974static inline int is_handle_aborted(handle_t *handle)
975{
976 if (handle->h_aborted)
977 return 1;
978 return is_journal_aborted(handle->h_transaction->t_journal);
979}
980
981static inline void journal_abort_handle(handle_t *handle)
982{
983 handle->h_aborted = 1;
984}
985
986#endif /* __KERNEL__ */
987
988/* Comparison functions for transaction IDs: perform comparisons using
989 * modulo arithmetic so that they work over sequence number wraps. */
990
991static inline int tid_gt(tid_t x, tid_t y)
992{
993 int difference = (x - y);
994 return (difference > 0);
995}
996
997static inline int tid_geq(tid_t x, tid_t y)
998{
999 int difference = (x - y);
1000 return (difference >= 0);
1001}
1002
1003extern int journal_blocks_per_page(struct inode *inode);
1004
1005/*
1006 * Return the minimum number of blocks which must be free in the journal
1007 * before a new transaction may be started. Must be called under j_state_lock.
1008 */
1009static inline int jbd_space_needed(journal_t *journal)
1010{
1011 int nblocks = journal->j_max_transaction_buffers;
1012 if (journal->j_committing_transaction)
1013 nblocks += journal->j_committing_transaction->
1014 t_outstanding_credits;
1015 return nblocks;
1016}
1017
1018/*
1019 * Definitions which augment the buffer_head layer
1020 */
1021
1022/* journaling buffer types */
1023#define BJ_None 0 /* Not journaled */
1024#define BJ_SyncData 1 /* Normal data: flush before commit */
1025#define BJ_Metadata 2 /* Normal journaled metadata */
1026#define BJ_Forget 3 /* Buffer superseded by this transaction */
1027#define BJ_IO 4 /* Buffer is for temporary IO use */
1028#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
1029#define BJ_LogCtl 6 /* Buffer contains log descriptors */
1030#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
1031#define BJ_Locked 8 /* Locked for I/O during commit */
1032#define BJ_Types 9
1033
1034extern int jbd_blocks_per_page(struct inode *inode);
1035
1036#ifdef __KERNEL__
1037
1038#define buffer_trace_init(bh) do {} while (0)
1039#define print_buffer_fields(bh) do {} while (0)
1040#define print_buffer_trace(bh) do {} while (0)
1041#define BUFFER_TRACE(bh, info) do {} while (0)
1042#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
1043#define JBUFFER_TRACE(jh, info) do {} while (0)
1044
1045#endif /* __KERNEL__ */
1046
1047#endif /* _LINUX_JBD_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index edb640ae9a94..df07e78487d5 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -29,6 +29,7 @@
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/timer.h> 30#include <linux/timer.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/bit_spinlock.h>
32#include <crypto/hash.h> 33#include <crypto/hash.h>
33#endif 34#endif
34 35
@@ -336,7 +337,45 @@ BUFFER_FNS(Freed, freed)
336BUFFER_FNS(Shadow, shadow) 337BUFFER_FNS(Shadow, shadow)
337BUFFER_FNS(Verified, verified) 338BUFFER_FNS(Verified, verified)
338 339
339#include <linux/jbd_common.h> 340static inline struct buffer_head *jh2bh(struct journal_head *jh)
341{
342 return jh->b_bh;
343}
344
345static inline struct journal_head *bh2jh(struct buffer_head *bh)
346{
347 return bh->b_private;
348}
349
350static inline void jbd_lock_bh_state(struct buffer_head *bh)
351{
352 bit_spin_lock(BH_State, &bh->b_state);
353}
354
355static inline int jbd_trylock_bh_state(struct buffer_head *bh)
356{
357 return bit_spin_trylock(BH_State, &bh->b_state);
358}
359
360static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
361{
362 return bit_spin_is_locked(BH_State, &bh->b_state);
363}
364
365static inline void jbd_unlock_bh_state(struct buffer_head *bh)
366{
367 bit_spin_unlock(BH_State, &bh->b_state);
368}
369
370static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
371{
372 bit_spin_lock(BH_JournalHead, &bh->b_state);
373}
374
375static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
376{
377 bit_spin_unlock(BH_JournalHead, &bh->b_state);
378}
340 379
341#define J_ASSERT(assert) BUG_ON(!(assert)) 380#define J_ASSERT(assert) BUG_ON(!(assert))
342 381
@@ -1042,8 +1081,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
1042extern void jbd2_journal_commit_transaction(journal_t *); 1081extern void jbd2_journal_commit_transaction(journal_t *);
1043 1082
1044/* Checkpoint list management */ 1083/* Checkpoint list management */
1045void __jbd2_journal_clean_checkpoint_list(journal_t *journal); 1084void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
1046int __jbd2_journal_remove_checkpoint(struct journal_head *); 1085int __jbd2_journal_remove_checkpoint(struct journal_head *);
1086void jbd2_journal_destroy_checkpoint(journal_t *journal);
1047void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); 1087void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
1048 1088
1049 1089
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
deleted file mode 100644
index 3dc53432355f..000000000000
--- a/include/linux/jbd_common.h
+++ /dev/null
@@ -1,46 +0,0 @@
1#ifndef _LINUX_JBD_STATE_H
2#define _LINUX_JBD_STATE_H
3
4#include <linux/bit_spinlock.h>
5
6static inline struct buffer_head *jh2bh(struct journal_head *jh)
7{
8 return jh->b_bh;
9}
10
11static inline struct journal_head *bh2jh(struct buffer_head *bh)
12{
13 return bh->b_private;
14}
15
16static inline void jbd_lock_bh_state(struct buffer_head *bh)
17{
18 bit_spin_lock(BH_State, &bh->b_state);
19}
20
21static inline int jbd_trylock_bh_state(struct buffer_head *bh)
22{
23 return bit_spin_trylock(BH_State, &bh->b_state);
24}
25
26static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
27{
28 return bit_spin_is_locked(BH_State, &bh->b_state);
29}
30
31static inline void jbd_unlock_bh_state(struct buffer_head *bh)
32{
33 bit_spin_unlock(BH_State, &bh->b_state);
34}
35
36static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
37{
38 bit_spin_lock(BH_JournalHead, &bh->b_state);
39}
40
41static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
42{
43 bit_spin_unlock(BH_JournalHead, &bh->b_state);
44}
45
46#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 535fd3bb1ba8..5fdc55312334 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -351,7 +351,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
351 * directly here and from __msecs_to_jiffies() in the case where 351 * directly here and from __msecs_to_jiffies() in the case where
352 * constant folding is not possible. 352 * constant folding is not possible.
353 */ 353 */
354static inline unsigned long msecs_to_jiffies(const unsigned int m) 354static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
355{ 355{
356 if (__builtin_constant_p(m)) { 356 if (__builtin_constant_p(m)) {
357 if ((int)m < 0) 357 if ((int)m < 0)
@@ -363,18 +363,11 @@ static inline unsigned long msecs_to_jiffies(const unsigned int m)
363} 363}
364 364
365extern unsigned long __usecs_to_jiffies(const unsigned int u); 365extern unsigned long __usecs_to_jiffies(const unsigned int u);
366#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) 366#if !(USEC_PER_SEC % HZ)
367static inline unsigned long _usecs_to_jiffies(const unsigned int u) 367static inline unsigned long _usecs_to_jiffies(const unsigned int u)
368{ 368{
369 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); 369 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
370} 370}
371#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
372static inline unsigned long _usecs_to_jiffies(const unsigned int u)
373{
374 return u * (HZ / USEC_PER_SEC);
375}
376static inline unsigned long _usecs_to_jiffies(const unsigned int u)
377{
378#else 371#else
379static inline unsigned long _usecs_to_jiffies(const unsigned int u) 372static inline unsigned long _usecs_to_jiffies(const unsigned int u)
380{ 373{
@@ -405,7 +398,7 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u)
405 * directly here and from __msecs_to_jiffies() in the case where 398 * directly here and from __msecs_to_jiffies() in the case where
406 * constant folding is not possible. 399 * constant folding is not possible.
407 */ 400 */
408static inline unsigned long usecs_to_jiffies(const unsigned int u) 401static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
409{ 402{
410 if (__builtin_constant_p(u)) { 403 if (__builtin_constant_p(u)) {
411 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) 404 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
@@ -416,9 +409,25 @@ static inline unsigned long usecs_to_jiffies(const unsigned int u)
416 } 409 }
417} 410}
418 411
419extern unsigned long timespec_to_jiffies(const struct timespec *value); 412extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
420extern void jiffies_to_timespec(const unsigned long jiffies, 413extern void jiffies_to_timespec64(const unsigned long jiffies,
421 struct timespec *value); 414 struct timespec64 *value);
415static inline unsigned long timespec_to_jiffies(const struct timespec *value)
416{
417 struct timespec64 ts = timespec_to_timespec64(*value);
418
419 return timespec64_to_jiffies(&ts);
420}
421
422static inline void jiffies_to_timespec(const unsigned long jiffies,
423 struct timespec *value)
424{
425 struct timespec64 ts;
426
427 jiffies_to_timespec64(jiffies, &ts);
428 *value = timespec64_to_timespec(ts);
429}
430
422extern unsigned long timeval_to_jiffies(const struct timeval *value); 431extern unsigned long timeval_to_jiffies(const struct timeval *value);
423extern void jiffies_to_timeval(const unsigned long jiffies, 432extern void jiffies_to_timeval(const unsigned long jiffies,
424 struct timeval *value); 433 struct timeval *value);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f4de473f226b..7f653e8f6690 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -7,17 +7,52 @@
7 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com> 7 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
9 * 9 *
10 * DEPRECATED API:
11 *
12 * The use of 'struct static_key' directly, is now DEPRECATED. In addition
13 * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
14 *
15 * struct static_key false = STATIC_KEY_INIT_FALSE;
16 * struct static_key true = STATIC_KEY_INIT_TRUE;
17 * static_key_true()
18 * static_key_false()
19 *
20 * The updated API replacements are:
21 *
22 * DEFINE_STATIC_KEY_TRUE(key);
23 * DEFINE_STATIC_KEY_FALSE(key);
24 * static_key_likely()
25 * statick_key_unlikely()
26 *
10 * Jump labels provide an interface to generate dynamic branches using 27 * Jump labels provide an interface to generate dynamic branches using
11 * self-modifying code. Assuming toolchain and architecture support, the result 28 * self-modifying code. Assuming toolchain and architecture support, if we
12 * of a "if (static_key_false(&key))" statement is an unconditional branch (which 29 * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
13 * defaults to false - and the true block is placed out of line). 30 * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
31 * (which defaults to false - and the true block is placed out of line).
32 * Similarly, we can define an initially true key via
33 * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
34 * "if (static_branch_unlikely(&key))", in which case we will generate an
35 * unconditional branch to the out-of-line true branch. Keys that are
36 * initially true or false can be using in both static_branch_unlikely()
37 * and static_branch_likely() statements.
38 *
39 * At runtime we can change the branch target by setting the key
40 * to true via a call to static_branch_enable(), or false using
41 * static_branch_disable(). If the direction of the branch is switched by
42 * these calls then we run-time modify the branch target via a
43 * no-op -> jump or jump -> no-op conversion. For example, for an
44 * initially false key that is used in an "if (static_branch_unlikely(&key))"
45 * statement, setting the key to true requires us to patch in a jump
46 * to the out-of-line of true branch.
14 * 47 *
15 * However at runtime we can change the branch target using 48 * In addtion to static_branch_{enable,disable}, we can also reference count
16 * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key 49 * the key or branch direction via static_branch_{inc,dec}. Thus,
17 * object, and for as long as there are references all branches referring to 50 * static_branch_inc() can be thought of as a 'make more true' and
18 * that particular key will point to the (out of line) true block. 51 * static_branch_dec() as a 'make more false'. The inc()/dec()
52 * interface is meant to be used exclusively from the inc()/dec() for a given
53 * key.
19 * 54 *
20 * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions 55 * Since this relies on modifying code, the branch modifying functions
21 * must be considered absolute slow paths (machine wide synchronization etc.). 56 * must be considered absolute slow paths (machine wide synchronization etc.).
22 * OTOH, since the affected branches are unconditional, their runtime overhead 57 * OTOH, since the affected branches are unconditional, their runtime overhead
23 * will be absolutely minimal, esp. in the default (off) case where the total 58 * will be absolutely minimal, esp. in the default (off) case where the total
@@ -29,20 +64,10 @@
29 * cause significant performance degradation. Struct static_key_deferred and 64 * cause significant performance degradation. Struct static_key_deferred and
30 * static_key_slow_dec_deferred() provide for this. 65 * static_key_slow_dec_deferred() provide for this.
31 * 66 *
32 * Lacking toolchain and or architecture support, jump labels fall back to a simple 67 * Lacking toolchain and or architecture support, static keys fall back to a
33 * conditional branch. 68 * simple conditional branch.
34 *
35 * struct static_key my_key = STATIC_KEY_INIT_TRUE;
36 *
37 * if (static_key_true(&my_key)) {
38 * }
39 * 69 *
40 * will result in the true case being in-line and starts the key with a single 70 * Additional babbling in: Documentation/static-keys.txt
41 * reference. Mixing static_key_true() and static_key_false() on the same key is not
42 * allowed.
43 *
44 * Not initializing the key (static data is initialized to 0s anyway) is the
45 * same as using STATIC_KEY_INIT_FALSE.
46 */ 71 */
47 72
48#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) 73#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -86,8 +111,8 @@ struct static_key {
86#ifndef __ASSEMBLY__ 111#ifndef __ASSEMBLY__
87 112
88enum jump_label_type { 113enum jump_label_type {
89 JUMP_LABEL_DISABLE = 0, 114 JUMP_LABEL_NOP = 0,
90 JUMP_LABEL_ENABLE, 115 JUMP_LABEL_JMP,
91}; 116};
92 117
93struct module; 118struct module;
@@ -101,33 +126,18 @@ static inline int static_key_count(struct static_key *key)
101 126
102#ifdef HAVE_JUMP_LABEL 127#ifdef HAVE_JUMP_LABEL
103 128
104#define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL 129#define JUMP_TYPE_FALSE 0UL
105#define JUMP_LABEL_TYPE_TRUE_BRANCH 1UL 130#define JUMP_TYPE_TRUE 1UL
106#define JUMP_LABEL_TYPE_MASK 1UL 131#define JUMP_TYPE_MASK 1UL
107
108static
109inline struct jump_entry *jump_label_get_entries(struct static_key *key)
110{
111 return (struct jump_entry *)((unsigned long)key->entries
112 & ~JUMP_LABEL_TYPE_MASK);
113}
114
115static inline bool jump_label_get_branch_default(struct static_key *key)
116{
117 if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
118 JUMP_LABEL_TYPE_TRUE_BRANCH)
119 return true;
120 return false;
121}
122 132
123static __always_inline bool static_key_false(struct static_key *key) 133static __always_inline bool static_key_false(struct static_key *key)
124{ 134{
125 return arch_static_branch(key); 135 return arch_static_branch(key, false);
126} 136}
127 137
128static __always_inline bool static_key_true(struct static_key *key) 138static __always_inline bool static_key_true(struct static_key *key)
129{ 139{
130 return !static_key_false(key); 140 return !arch_static_branch(key, true);
131} 141}
132 142
133extern struct jump_entry __start___jump_table[]; 143extern struct jump_entry __start___jump_table[];
@@ -145,12 +155,12 @@ extern void static_key_slow_inc(struct static_key *key);
145extern void static_key_slow_dec(struct static_key *key); 155extern void static_key_slow_dec(struct static_key *key);
146extern void jump_label_apply_nops(struct module *mod); 156extern void jump_label_apply_nops(struct module *mod);
147 157
148#define STATIC_KEY_INIT_TRUE ((struct static_key) \ 158#define STATIC_KEY_INIT_TRUE \
149 { .enabled = ATOMIC_INIT(1), \ 159 { .enabled = ATOMIC_INIT(1), \
150 .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH }) 160 .entries = (void *)JUMP_TYPE_TRUE }
151#define STATIC_KEY_INIT_FALSE ((struct static_key) \ 161#define STATIC_KEY_INIT_FALSE \
152 { .enabled = ATOMIC_INIT(0), \ 162 { .enabled = ATOMIC_INIT(0), \
153 .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH }) 163 .entries = (void *)JUMP_TYPE_FALSE }
154 164
155#else /* !HAVE_JUMP_LABEL */ 165#else /* !HAVE_JUMP_LABEL */
156 166
@@ -198,10 +208,8 @@ static inline int jump_label_apply_nops(struct module *mod)
198 return 0; 208 return 0;
199} 209}
200 210
201#define STATIC_KEY_INIT_TRUE ((struct static_key) \ 211#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
202 { .enabled = ATOMIC_INIT(1) }) 212#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
203#define STATIC_KEY_INIT_FALSE ((struct static_key) \
204 { .enabled = ATOMIC_INIT(0) })
205 213
206#endif /* HAVE_JUMP_LABEL */ 214#endif /* HAVE_JUMP_LABEL */
207 215
@@ -213,6 +221,157 @@ static inline bool static_key_enabled(struct static_key *key)
213 return static_key_count(key) > 0; 221 return static_key_count(key) > 0;
214} 222}
215 223
224static inline void static_key_enable(struct static_key *key)
225{
226 int count = static_key_count(key);
227
228 WARN_ON_ONCE(count < 0 || count > 1);
229
230 if (!count)
231 static_key_slow_inc(key);
232}
233
234static inline void static_key_disable(struct static_key *key)
235{
236 int count = static_key_count(key);
237
238 WARN_ON_ONCE(count < 0 || count > 1);
239
240 if (count)
241 static_key_slow_dec(key);
242}
243
244/* -------------------------------------------------------------------------- */
245
246/*
247 * Two type wrappers around static_key, such that we can use compile time
248 * type differentiation to emit the right code.
249 *
250 * All the below code is macros in order to play type games.
251 */
252
253struct static_key_true {
254 struct static_key key;
255};
256
257struct static_key_false {
258 struct static_key key;
259};
260
261#define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
262#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
263
264#define DEFINE_STATIC_KEY_TRUE(name) \
265 struct static_key_true name = STATIC_KEY_TRUE_INIT
266
267#define DEFINE_STATIC_KEY_FALSE(name) \
268 struct static_key_false name = STATIC_KEY_FALSE_INIT
269
270#ifdef HAVE_JUMP_LABEL
271
272/*
273 * Combine the right initial value (type) with the right branch order
274 * to generate the desired result.
275 *
276 *
277 * type\branch| likely (1) | unlikely (0)
278 * -----------+-----------------------+------------------
279 * | |
280 * true (1) | ... | ...
281 * | NOP | JMP L
282 * | <br-stmts> | 1: ...
283 * | L: ... |
284 * | |
285 * | | L: <br-stmts>
286 * | | jmp 1b
287 * | |
288 * -----------+-----------------------+------------------
289 * | |
290 * false (0) | ... | ...
291 * | JMP L | NOP
292 * | <br-stmts> | 1: ...
293 * | L: ... |
294 * | |
295 * | | L: <br-stmts>
296 * | | jmp 1b
297 * | |
298 * -----------+-----------------------+------------------
299 *
300 * The initial value is encoded in the LSB of static_key::entries,
301 * type: 0 = false, 1 = true.
302 *
303 * The branch type is encoded in the LSB of jump_entry::key,
304 * branch: 0 = unlikely, 1 = likely.
305 *
306 * This gives the following logic table:
307 *
308 * enabled type branch instuction
309 * -----------------------------+-----------
310 * 0 0 0 | NOP
311 * 0 0 1 | JMP
312 * 0 1 0 | NOP
313 * 0 1 1 | JMP
314 *
315 * 1 0 0 | JMP
316 * 1 0 1 | NOP
317 * 1 1 0 | JMP
318 * 1 1 1 | NOP
319 *
320 * Which gives the following functions:
321 *
322 * dynamic: instruction = enabled ^ branch
323 * static: instruction = type ^ branch
324 *
325 * See jump_label_type() / jump_label_init_type().
326 */
327
328extern bool ____wrong_branch_error(void);
329
330#define static_branch_likely(x) \
331({ \
332 bool branch; \
333 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
334 branch = !arch_static_branch(&(x)->key, true); \
335 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
336 branch = !arch_static_branch_jump(&(x)->key, true); \
337 else \
338 branch = ____wrong_branch_error(); \
339 branch; \
340})
341
342#define static_branch_unlikely(x) \
343({ \
344 bool branch; \
345 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
346 branch = arch_static_branch_jump(&(x)->key, false); \
347 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
348 branch = arch_static_branch(&(x)->key, false); \
349 else \
350 branch = ____wrong_branch_error(); \
351 branch; \
352})
353
354#else /* !HAVE_JUMP_LABEL */
355
356#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
357#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
358
359#endif /* HAVE_JUMP_LABEL */
360
361/*
362 * Advanced usage; refcount, branch is enabled when: count != 0
363 */
364
365#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
366#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
367
368/*
369 * Normal usage; boolean enable/disable.
370 */
371
372#define static_branch_enable(x) static_key_enable(&(x)->key)
373#define static_branch_disable(x) static_key_disable(&(x)->key)
374
216#endif /* _LINUX_JUMP_LABEL_H */ 375#endif /* _LINUX_JUMP_LABEL_H */
217 376
218#endif /* __ASSEMBLY__ */ 377#endif /* __ASSEMBLY__ */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5486d777b706..4b9f85c963d0 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -10,11 +10,19 @@ struct vm_struct;
10#ifdef CONFIG_KASAN 10#ifdef CONFIG_KASAN
11 11
12#define KASAN_SHADOW_SCALE_SHIFT 3 12#define KASAN_SHADOW_SCALE_SHIFT 3
13#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
14 13
15#include <asm/kasan.h> 14#include <asm/kasan.h>
15#include <asm/pgtable.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17 17
18extern unsigned char kasan_zero_page[PAGE_SIZE];
19extern pte_t kasan_zero_pte[PTRS_PER_PTE];
20extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
21extern pud_t kasan_zero_pud[PTRS_PER_PUD];
22
23void kasan_populate_zero_shadow(const void *shadow_start,
24 const void *shadow_end);
25
18static inline void *kasan_mem_to_shadow(const void *addr) 26static inline void *kasan_mem_to_shadow(const void *addr)
19{ 27{
20 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 28 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 123be25ea15a..5d4e9c4b821d 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -266,6 +266,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
266} 266}
267 267
268int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen); 268int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
269size_t kernfs_path_len(struct kernfs_node *kn);
269char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, 270char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
270 size_t buflen); 271 size_t buflen);
271void pr_cont_kernfs_name(struct kernfs_node *kn); 272void pr_cont_kernfs_name(struct kernfs_node *kn);
@@ -332,6 +333,9 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
332static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) 333static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
333{ return -ENOSYS; } 334{ return -ENOSYS; }
334 335
336static inline size_t kernfs_path_len(struct kernfs_node *kn)
337{ return 0; }
338
335static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf, 339static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
336 size_t buflen) 340 size_t buflen)
337{ return NULL; } 341{ return NULL; }
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e804306ef5e8..d140b1e9faa7 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -16,7 +16,7 @@
16 16
17#include <uapi/linux/kexec.h> 17#include <uapi/linux/kexec.h>
18 18
19#ifdef CONFIG_KEXEC 19#ifdef CONFIG_KEXEC_CORE
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/linkage.h> 21#include <linux/linkage.h>
22#include <linux/compat.h> 22#include <linux/compat.h>
@@ -318,12 +318,24 @@ int crash_shrink_memory(unsigned long new_size);
318size_t crash_get_memory_size(void); 318size_t crash_get_memory_size(void);
319void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); 319void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
320 320
321#else /* !CONFIG_KEXEC */ 321int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
322 unsigned long buf_len);
323void * __weak arch_kexec_kernel_image_load(struct kimage *image);
324int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
325int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
326 unsigned long buf_len);
327int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
328 Elf_Shdr *sechdrs, unsigned int relsec);
329int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
330 unsigned int relsec);
331
332#else /* !CONFIG_KEXEC_CORE */
322struct pt_regs; 333struct pt_regs;
323struct task_struct; 334struct task_struct;
324static inline void crash_kexec(struct pt_regs *regs) { } 335static inline void crash_kexec(struct pt_regs *regs) { }
325static inline int kexec_should_crash(struct task_struct *p) { return 0; } 336static inline int kexec_should_crash(struct task_struct *p) { return 0; }
326#endif /* CONFIG_KEXEC */ 337#define kexec_in_progress false
338#endif /* CONFIG_KEXEC_CORE */
327 339
328#endif /* !defined(__ASSEBMLY__) */ 340#endif /* !defined(__ASSEBMLY__) */
329 341
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 61e5b723ae73..953f283f8451 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -63,6 +63,7 @@ extern void klist_iter_init(struct klist *k, struct klist_iter *i);
63extern void klist_iter_init_node(struct klist *k, struct klist_iter *i, 63extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
64 struct klist_node *n); 64 struct klist_node *n);
65extern void klist_iter_exit(struct klist_iter *i); 65extern void klist_iter_exit(struct klist_iter *i);
66extern struct klist_node *klist_prev(struct klist_iter *i);
66extern struct klist_node *klist_next(struct klist_iter *i); 67extern struct klist_node *klist_next(struct klist_iter *i);
67 68
68#endif 69#endif
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 0555cc66a15b..fcfd2bf14d3f 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -85,8 +85,6 @@ enum umh_disable_depth {
85 UMH_DISABLED, 85 UMH_DISABLED,
86}; 86};
87 87
88extern void usermodehelper_init(void);
89
90extern int __usermodehelper_disable(enum umh_disable_depth depth); 88extern int __usermodehelper_disable(enum umh_disable_depth depth);
91extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth); 89extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
92 90
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 1ab54754a86d..8f6849084248 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -267,6 +267,8 @@ extern void show_registers(struct pt_regs *regs);
267extern void kprobes_inc_nmissed_count(struct kprobe *p); 267extern void kprobes_inc_nmissed_count(struct kprobe *p);
268extern bool arch_within_kprobe_blacklist(unsigned long addr); 268extern bool arch_within_kprobe_blacklist(unsigned long addr);
269 269
270extern bool within_kprobe_blacklist(unsigned long addr);
271
270struct kprobe_insn_cache { 272struct kprobe_insn_cache {
271 struct mutex mutex; 273 struct mutex mutex;
272 void *(*alloc)(void); /* allocate insn page */ 274 void *(*alloc)(void); /* allocate insn page */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 13d55206ccf6..e691b6a23f72 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -11,7 +11,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
11 const char namefmt[], ...); 11 const char namefmt[], ...);
12 12
13#define kthread_create(threadfn, data, namefmt, arg...) \ 13#define kthread_create(threadfn, data, namefmt, arg...) \
14 kthread_create_on_node(threadfn, data, -1, namefmt, ##arg) 14 kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
15 15
16 16
17struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 17struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
@@ -38,6 +38,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
38}) 38})
39 39
40void kthread_bind(struct task_struct *k, unsigned int cpu); 40void kthread_bind(struct task_struct *k, unsigned int cpu);
41void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
41int kthread_stop(struct task_struct *k); 42int kthread_stop(struct task_struct *k);
42bool kthread_should_stop(void); 43bool kthread_should_stop(void);
43bool kthread_should_park(void); 44bool kthread_should_park(void);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 05e99b8ef465..1bef9e21e725 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -139,6 +139,7 @@ static inline bool is_error_page(struct page *page)
139#define KVM_REQ_DISABLE_IBS 24 139#define KVM_REQ_DISABLE_IBS 24
140#define KVM_REQ_APIC_PAGE_RELOAD 25 140#define KVM_REQ_APIC_PAGE_RELOAD 25
141#define KVM_REQ_SMI 26 141#define KVM_REQ_SMI 26
142#define KVM_REQ_HV_CRASH 27
142 143
143#define KVM_USERSPACE_IRQ_SOURCE_ID 0 144#define KVM_USERSPACE_IRQ_SOURCE_ID 0
144#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 145#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -241,6 +242,7 @@ struct kvm_vcpu {
241 int sigset_active; 242 int sigset_active;
242 sigset_t sigset; 243 sigset_t sigset;
243 struct kvm_vcpu_stat stat; 244 struct kvm_vcpu_stat stat;
245 unsigned int halt_poll_ns;
244 246
245#ifdef CONFIG_HAS_IOMEM 247#ifdef CONFIG_HAS_IOMEM
246 int mmio_needed; 248 int mmio_needed;
@@ -363,9 +365,6 @@ struct kvm {
363 struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; 365 struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
364 struct srcu_struct srcu; 366 struct srcu_struct srcu;
365 struct srcu_struct irq_srcu; 367 struct srcu_struct irq_srcu;
366#ifdef CONFIG_KVM_APIC_ARCHITECTURE
367 u32 bsp_vcpu_id;
368#endif
369 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; 368 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
370 atomic_t online_vcpus; 369 atomic_t online_vcpus;
371 int last_boosted_vcpu; 370 int last_boosted_vcpu;
@@ -424,8 +423,15 @@ struct kvm {
424#define vcpu_unimpl(vcpu, fmt, ...) \ 423#define vcpu_unimpl(vcpu, fmt, ...) \
425 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 424 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
426 425
426#define vcpu_debug(vcpu, fmt, ...) \
427 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
428
427static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 429static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
428{ 430{
431 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
432 * the caller has read kvm->online_vcpus before (as is the case
433 * for kvm_for_each_vcpu, for example).
434 */
429 smp_rmb(); 435 smp_rmb();
430 return kvm->vcpus[i]; 436 return kvm->vcpus[i];
431} 437}
@@ -1055,22 +1061,9 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1055#endif /* CONFIG_HAVE_KVM_EVENTFD */ 1061#endif /* CONFIG_HAVE_KVM_EVENTFD */
1056 1062
1057#ifdef CONFIG_KVM_APIC_ARCHITECTURE 1063#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1058static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
1059{
1060 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
1061}
1062
1063static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
1064{
1065 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
1066}
1067
1068bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); 1064bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
1069
1070#else 1065#else
1071
1072static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } 1066static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
1073
1074#endif 1067#endif
1075 1068
1076static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 1069static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 75e3af01ee32..3f021dc5da8c 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -31,6 +31,9 @@ enum {
31 ND_CMD_ARS_STATUS_MAX = SZ_4K, 31 ND_CMD_ARS_STATUS_MAX = SZ_4K,
32 ND_MAX_MAPPINGS = 32, 32 ND_MAX_MAPPINGS = 32,
33 33
34 /* region flag indicating to direct-map persistent memory by default */
35 ND_REGION_PAGEMAP = 0,
36
34 /* mark newly adjusted resources as requiring a label update */ 37 /* mark newly adjusted resources as requiring a label update */
35 DPA_RESOURCE_ADJUSTED = 1 << 0, 38 DPA_RESOURCE_ADJUSTED = 1 << 0,
36}; 39};
@@ -91,6 +94,7 @@ struct nd_region_desc {
91 void *provider_data; 94 void *provider_data;
92 int num_lanes; 95 int num_lanes;
93 int numa_node; 96 int numa_node;
97 unsigned long flags;
94}; 98};
95 99
96struct nvdimm_bus; 100struct nvdimm_bus;
diff --git a/include/linux/list.h b/include/linux/list.h
index feb773c76ee0..3e3e64a61002 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -672,6 +672,11 @@ static inline void hlist_add_fake(struct hlist_node *n)
672 n->pprev = &n->next; 672 n->pprev = &n->next;
673} 673}
674 674
675static inline bool hlist_fake(struct hlist_node *h)
676{
677 return h->pprev == &h->next;
678}
679
675/* 680/*
676 * Move a list from one list head to another. Fixup the pprev 681 * Move a list from one list head to another. Fixup the pprev
677 * reference of the first entry if it exists. 682 * reference of the first entry if it exists.
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fbf10a0bc095..fd4ca0b4fe0f 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -55,8 +55,8 @@
55 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 55 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
56 */ 56 */
57 57
58#include <linux/atomic.h>
58#include <linux/kernel.h> 59#include <linux/kernel.h>
59#include <asm/cmpxchg.h>
60 60
61struct llist_head { 61struct llist_head {
62 struct llist_node *first; 62 struct llist_node *first;
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 1cc89e9df480..ffb9c9da4f39 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -40,6 +40,11 @@ struct lsm_network_audit {
40 } fam; 40 } fam;
41}; 41};
42 42
43struct lsm_ioctlop_audit {
44 struct path path;
45 u16 cmd;
46};
47
43/* Auxiliary data to use in generating the audit record. */ 48/* Auxiliary data to use in generating the audit record. */
44struct common_audit_data { 49struct common_audit_data {
45 char type; 50 char type;
@@ -53,6 +58,7 @@ struct common_audit_data {
53#define LSM_AUDIT_DATA_KMOD 8 58#define LSM_AUDIT_DATA_KMOD 8
54#define LSM_AUDIT_DATA_INODE 9 59#define LSM_AUDIT_DATA_INODE 9
55#define LSM_AUDIT_DATA_DENTRY 10 60#define LSM_AUDIT_DATA_DENTRY 10
61#define LSM_AUDIT_DATA_IOCTL_OP 11
56 union { 62 union {
57 struct path path; 63 struct path path;
58 struct dentry *dentry; 64 struct dentry *dentry;
@@ -68,6 +74,7 @@ struct common_audit_data {
68 } key_struct; 74 } key_struct;
69#endif 75#endif
70 char *kmod_name; 76 char *kmod_name;
77 struct lsm_ioctlop_audit *op;
71 } u; 78 } u;
72 /* this union contains LSM specific data */ 79 /* this union contains LSM specific data */
73 union { 80 union {
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 9429f054c323..ec3a6bab29de 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1881,8 +1881,10 @@ static inline void security_delete_hooks(struct security_hook_list *hooks,
1881 1881
1882extern int __init security_module_enable(const char *module); 1882extern int __init security_module_enable(const char *module);
1883extern void __init capability_add_hooks(void); 1883extern void __init capability_add_hooks(void);
1884#ifdef CONFIG_SECURITY_YAMA_STACKED 1884#ifdef CONFIG_SECURITY_YAMA
1885void __init yama_add_hooks(void); 1885extern void __init yama_add_hooks(void);
1886#else
1887static inline void __init yama_add_hooks(void) { }
1886#endif 1888#endif
1887 1889
1888#endif /* ! __LINUX_LSM_HOOKS_H */ 1890#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 68c42454439b..74deadb42d76 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -9,7 +9,7 @@
9 9
10#include <linux/of.h> 10#include <linux/of.h>
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/timer.h> 12#include <linux/hrtimer.h>
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/completion.h> 14#include <linux/completion.h>
15 15
@@ -67,7 +67,8 @@ struct mbox_chan_ops {
67 * @txpoll_period: If 'txdone_poll' is in effect, the API polls for 67 * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
68 * last TX's status after these many millisecs 68 * last TX's status after these many millisecs
69 * @of_xlate: Controller driver specific mapping of channel via DT 69 * @of_xlate: Controller driver specific mapping of channel via DT
70 * @poll: API private. Used to poll for TXDONE on all channels. 70 * @poll_hrt: API private. hrtimer used to poll for TXDONE on all
71 * channels.
71 * @node: API private. To hook into list of controllers. 72 * @node: API private. To hook into list of controllers.
72 */ 73 */
73struct mbox_controller { 74struct mbox_controller {
@@ -81,7 +82,7 @@ struct mbox_controller {
81 struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox, 82 struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
82 const struct of_phandle_args *sp); 83 const struct of_phandle_args *sp);
83 /* Internal to API */ 84 /* Internal to API */
84 struct timer_list poll; 85 struct hrtimer poll_hrt;
85 struct list_head node; 86 struct list_head node;
86}; 87};
87 88
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index a16b1f9c1aca..0962b2ca628a 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -6,6 +6,7 @@
6#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
7 7
8struct mei_cl_device; 8struct mei_cl_device;
9struct mei_device;
9 10
10typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, 11typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
11 u32 events, void *context); 12 u32 events, void *context);
@@ -17,6 +18,8 @@ typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
17 * Drivers for MEI devices will get an mei_cl_device pointer 18 * Drivers for MEI devices will get an mei_cl_device pointer
18 * when being probed and shall use it for doing ME bus I/O. 19 * when being probed and shall use it for doing ME bus I/O.
19 * 20 *
21 * @bus_list: device on the bus list
22 * @bus: parent mei device
20 * @dev: linux driver model device pointer 23 * @dev: linux driver model device pointer
21 * @me_cl: me client 24 * @me_cl: me client
22 * @cl: mei client 25 * @cl: mei client
@@ -25,10 +28,16 @@ typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
25 * @event_cb: Drivers register this callback to get asynchronous ME 28 * @event_cb: Drivers register this callback to get asynchronous ME
26 * events (e.g. Rx buffer pending) notifications. 29 * events (e.g. Rx buffer pending) notifications.
27 * @event_context: event callback run context 30 * @event_context: event callback run context
31 * @events_mask: Events bit mask requested by driver.
28 * @events: Events bitmask sent to the driver. 32 * @events: Events bitmask sent to the driver.
33 *
34 * @do_match: wheather device can be matched with a driver
35 * @is_added: device is already scanned
29 * @priv_data: client private data 36 * @priv_data: client private data
30 */ 37 */
31struct mei_cl_device { 38struct mei_cl_device {
39 struct list_head bus_list;
40 struct mei_device *bus;
32 struct device dev; 41 struct device dev;
33 42
34 struct mei_me_client *me_cl; 43 struct mei_me_client *me_cl;
@@ -38,8 +47,12 @@ struct mei_cl_device {
38 struct work_struct event_work; 47 struct work_struct event_work;
39 mei_cl_event_cb_t event_cb; 48 mei_cl_event_cb_t event_cb;
40 void *event_context; 49 void *event_context;
50 unsigned long events_mask;
41 unsigned long events; 51 unsigned long events;
42 52
53 unsigned int do_match:1;
54 unsigned int is_added:1;
55
43 void *priv_data; 56 void *priv_data;
44}; 57};
45 58
@@ -65,10 +78,12 @@ ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
65ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); 78ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
66 79
67int mei_cl_register_event_cb(struct mei_cl_device *device, 80int mei_cl_register_event_cb(struct mei_cl_device *device,
81 unsigned long event_mask,
68 mei_cl_event_cb_t read_cb, void *context); 82 mei_cl_event_cb_t read_cb, void *context);
69 83
70#define MEI_CL_EVENT_RX 0 84#define MEI_CL_EVENT_RX 0
71#define MEI_CL_EVENT_TX 1 85#define MEI_CL_EVENT_TX 1
86#define MEI_CL_EVENT_NOTIF 2
72 87
73void *mei_cl_get_drvdata(const struct mei_cl_device *device); 88void *mei_cl_get_drvdata(const struct mei_cl_device *device);
74void mei_cl_set_drvdata(struct mei_cl_device *device, void *data); 89void mei_cl_set_drvdata(struct mei_cl_device *device, void *data);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index cc4b01972060..c518eb589260 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -77,6 +77,8 @@ int memblock_remove(phys_addr_t base, phys_addr_t size);
77int memblock_free(phys_addr_t base, phys_addr_t size); 77int memblock_free(phys_addr_t base, phys_addr_t size);
78int memblock_reserve(phys_addr_t base, phys_addr_t size); 78int memblock_reserve(phys_addr_t base, phys_addr_t size);
79void memblock_trim_memory(phys_addr_t align); 79void memblock_trim_memory(phys_addr_t align);
80bool memblock_overlaps_region(struct memblock_type *type,
81 phys_addr_t base, phys_addr_t size);
80int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); 82int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
81int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); 83int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
82int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); 84int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
@@ -323,7 +325,7 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit);
323int memblock_is_memory(phys_addr_t addr); 325int memblock_is_memory(phys_addr_t addr);
324int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); 326int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
325int memblock_is_reserved(phys_addr_t addr); 327int memblock_is_reserved(phys_addr_t addr);
326int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); 328bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
327 329
328extern void __memblock_dump_all(void); 330extern void __memblock_dump_all(void);
329 331
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 73b02b0a8f60..ad800e62cb7a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -23,6 +23,11 @@
23#include <linux/vm_event_item.h> 23#include <linux/vm_event_item.h>
24#include <linux/hardirq.h> 24#include <linux/hardirq.h>
25#include <linux/jump_label.h> 25#include <linux/jump_label.h>
26#include <linux/page_counter.h>
27#include <linux/vmpressure.h>
28#include <linux/eventfd.h>
29#include <linux/mmzone.h>
30#include <linux/writeback.h>
26 31
27struct mem_cgroup; 32struct mem_cgroup;
28struct page; 33struct page;
@@ -67,12 +72,221 @@ enum mem_cgroup_events_index {
67 MEMCG_NR_EVENTS, 72 MEMCG_NR_EVENTS,
68}; 73};
69 74
75/*
76 * Per memcg event counter is incremented at every pagein/pageout. With THP,
77 * it will be incremated by the number of pages. This counter is used for
78 * for trigger some periodic events. This is straightforward and better
79 * than using jiffies etc. to handle periodic memcg event.
80 */
81enum mem_cgroup_events_target {
82 MEM_CGROUP_TARGET_THRESH,
83 MEM_CGROUP_TARGET_SOFTLIMIT,
84 MEM_CGROUP_TARGET_NUMAINFO,
85 MEM_CGROUP_NTARGETS,
86};
87
88/*
89 * Bits in struct cg_proto.flags
90 */
91enum cg_proto_flags {
92 /* Currently active and new sockets should be assigned to cgroups */
93 MEMCG_SOCK_ACTIVE,
94 /* It was ever activated; we must disarm static keys on destruction */
95 MEMCG_SOCK_ACTIVATED,
96};
97
98struct cg_proto {
99 struct page_counter memory_allocated; /* Current allocated memory. */
100 struct percpu_counter sockets_allocated; /* Current number of sockets. */
101 int memory_pressure;
102 long sysctl_mem[3];
103 unsigned long flags;
104 /*
105 * memcg field is used to find which memcg we belong directly
106 * Each memcg struct can hold more than one cg_proto, so container_of
107 * won't really cut.
108 *
109 * The elegant solution would be having an inverse function to
110 * proto_cgroup in struct proto, but that means polluting the structure
111 * for everybody, instead of just for memcg users.
112 */
113 struct mem_cgroup *memcg;
114};
115
70#ifdef CONFIG_MEMCG 116#ifdef CONFIG_MEMCG
117struct mem_cgroup_stat_cpu {
118 long count[MEM_CGROUP_STAT_NSTATS];
119 unsigned long events[MEMCG_NR_EVENTS];
120 unsigned long nr_page_events;
121 unsigned long targets[MEM_CGROUP_NTARGETS];
122};
123
124struct mem_cgroup_reclaim_iter {
125 struct mem_cgroup *position;
126 /* scan generation, increased every round-trip */
127 unsigned int generation;
128};
129
130/*
131 * per-zone information in memory controller.
132 */
133struct mem_cgroup_per_zone {
134 struct lruvec lruvec;
135 unsigned long lru_size[NR_LRU_LISTS];
136
137 struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
138
139 struct rb_node tree_node; /* RB tree node */
140 unsigned long usage_in_excess;/* Set to the value by which */
141 /* the soft limit is exceeded*/
142 bool on_tree;
143 struct mem_cgroup *memcg; /* Back pointer, we cannot */
144 /* use container_of */
145};
146
147struct mem_cgroup_per_node {
148 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
149};
150
151struct mem_cgroup_threshold {
152 struct eventfd_ctx *eventfd;
153 unsigned long threshold;
154};
155
156/* For threshold */
157struct mem_cgroup_threshold_ary {
158 /* An array index points to threshold just below or equal to usage. */
159 int current_threshold;
160 /* Size of entries[] */
161 unsigned int size;
162 /* Array of thresholds */
163 struct mem_cgroup_threshold entries[0];
164};
165
166struct mem_cgroup_thresholds {
167 /* Primary thresholds array */
168 struct mem_cgroup_threshold_ary *primary;
169 /*
170 * Spare threshold array.
171 * This is needed to make mem_cgroup_unregister_event() "never fail".
172 * It must be able to store at least primary->size - 1 entries.
173 */
174 struct mem_cgroup_threshold_ary *spare;
175};
176
177/*
178 * The memory controller data structure. The memory controller controls both
179 * page cache and RSS per cgroup. We would eventually like to provide
180 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
181 * to help the administrator determine what knobs to tune.
182 */
183struct mem_cgroup {
184 struct cgroup_subsys_state css;
185
186 /* Accounted resources */
187 struct page_counter memory;
188 struct page_counter memsw;
189 struct page_counter kmem;
190
191 /* Normal memory consumption range */
192 unsigned long low;
193 unsigned long high;
194
195 unsigned long soft_limit;
196
197 /* vmpressure notifications */
198 struct vmpressure vmpressure;
199
200 /* css_online() has been completed */
201 int initialized;
202
203 /*
204 * Should the accounting and control be hierarchical, per subtree?
205 */
206 bool use_hierarchy;
207
208 /* protected by memcg_oom_lock */
209 bool oom_lock;
210 int under_oom;
211
212 int swappiness;
213 /* OOM-Killer disable */
214 int oom_kill_disable;
215
216 /* protect arrays of thresholds */
217 struct mutex thresholds_lock;
218
219 /* thresholds for memory usage. RCU-protected */
220 struct mem_cgroup_thresholds thresholds;
221
222 /* thresholds for mem+swap usage. RCU-protected */
223 struct mem_cgroup_thresholds memsw_thresholds;
224
225 /* For oom notifier event fd */
226 struct list_head oom_notify;
227
228 /*
229 * Should we move charges of a task when a task is moved into this
230 * mem_cgroup ? And what type of charges should we move ?
231 */
232 unsigned long move_charge_at_immigrate;
233 /*
234 * set > 0 if pages under this cgroup are moving to other cgroup.
235 */
236 atomic_t moving_account;
237 /* taken only while moving_account > 0 */
238 spinlock_t move_lock;
239 struct task_struct *move_lock_task;
240 unsigned long move_lock_flags;
241 /*
242 * percpu counter.
243 */
244 struct mem_cgroup_stat_cpu __percpu *stat;
245 spinlock_t pcp_counter_lock;
246
247#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
248 struct cg_proto tcp_mem;
249#endif
250#if defined(CONFIG_MEMCG_KMEM)
251 /* Index in the kmem_cache->memcg_params.memcg_caches array */
252 int kmemcg_id;
253 bool kmem_acct_activated;
254 bool kmem_acct_active;
255#endif
256
257 int last_scanned_node;
258#if MAX_NUMNODES > 1
259 nodemask_t scan_nodes;
260 atomic_t numainfo_events;
261 atomic_t numainfo_updating;
262#endif
263
264#ifdef CONFIG_CGROUP_WRITEBACK
265 struct list_head cgwb_list;
266 struct wb_domain cgwb_domain;
267#endif
268
269 /* List of events which userspace want to receive */
270 struct list_head event_list;
271 spinlock_t event_list_lock;
272
273 struct mem_cgroup_per_node *nodeinfo[0];
274 /* WARNING: nodeinfo must be the last member here */
275};
71extern struct cgroup_subsys_state *mem_cgroup_root_css; 276extern struct cgroup_subsys_state *mem_cgroup_root_css;
72 277
73void mem_cgroup_events(struct mem_cgroup *memcg, 278/**
279 * mem_cgroup_events - count memory events against a cgroup
280 * @memcg: the memory cgroup
281 * @idx: the event index
282 * @nr: the number of events to account for
283 */
284static inline void mem_cgroup_events(struct mem_cgroup *memcg,
74 enum mem_cgroup_events_index idx, 285 enum mem_cgroup_events_index idx,
75 unsigned int nr); 286 unsigned int nr)
287{
288 this_cpu_add(memcg->stat->events[idx], nr);
289}
76 290
77bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); 291bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
78 292
@@ -90,15 +304,29 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
90struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 304struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
91struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 305struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
92 306
93bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
94 struct mem_cgroup *root);
95bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); 307bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
308struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
309struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
96 310
97extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 311static inline
98extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 312struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
313 return css ? container_of(css, struct mem_cgroup, css) : NULL;
314}
99 315
100extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 316struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
101extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); 317 struct mem_cgroup *,
318 struct mem_cgroup_reclaim_cookie *);
319void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
320
321static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
322 struct mem_cgroup *root)
323{
324 if (root == memcg)
325 return true;
326 if (!root->use_hierarchy)
327 return false;
328 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
329}
102 330
103static inline bool mm_match_cgroup(struct mm_struct *mm, 331static inline bool mm_match_cgroup(struct mm_struct *mm,
104 struct mem_cgroup *memcg) 332 struct mem_cgroup *memcg)
@@ -114,24 +342,68 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
114 return match; 342 return match;
115} 343}
116 344
117extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 345struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
118extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 346ino_t page_cgroup_ino(struct page *page);
119 347
120struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 348static inline bool mem_cgroup_disabled(void)
121 struct mem_cgroup *, 349{
122 struct mem_cgroup_reclaim_cookie *); 350 if (memory_cgrp_subsys.disabled)
123void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 351 return true;
352 return false;
353}
124 354
125/* 355/*
126 * For memory reclaim. 356 * For memory reclaim.
127 */ 357 */
128int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
129bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
130int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 358int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
131unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); 359
132void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); 360void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
133extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 361 int nr_pages);
134 struct task_struct *p); 362
363static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
364{
365 struct mem_cgroup_per_zone *mz;
366 struct mem_cgroup *memcg;
367
368 if (mem_cgroup_disabled())
369 return true;
370
371 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
372 memcg = mz->memcg;
373
374 return !!(memcg->css.flags & CSS_ONLINE);
375}
376
377static inline
378unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
379{
380 struct mem_cgroup_per_zone *mz;
381
382 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
383 return mz->lru_size[lru];
384}
385
386static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
387{
388 unsigned long inactive_ratio;
389 unsigned long inactive;
390 unsigned long active;
391 unsigned long gb;
392
393 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
394 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
395
396 gb = (inactive + active) >> (30 - PAGE_SHIFT);
397 if (gb)
398 inactive_ratio = int_sqrt(10 * gb);
399 else
400 inactive_ratio = 1;
401
402 return inactive * inactive_ratio < active;
403}
404
405void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
406 struct task_struct *p);
135 407
136static inline void mem_cgroup_oom_enable(void) 408static inline void mem_cgroup_oom_enable(void)
137{ 409{
@@ -156,18 +428,26 @@ bool mem_cgroup_oom_synchronize(bool wait);
156extern int do_swap_account; 428extern int do_swap_account;
157#endif 429#endif
158 430
159static inline bool mem_cgroup_disabled(void)
160{
161 if (memory_cgrp_subsys.disabled)
162 return true;
163 return false;
164}
165
166struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); 431struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
167void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
168 enum mem_cgroup_stat_index idx, int val);
169void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); 432void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
170 433
434/**
435 * mem_cgroup_update_page_stat - update page state statistics
436 * @memcg: memcg to account against
437 * @idx: page state item to account
438 * @val: number of pages (positive or negative)
439 *
440 * See mem_cgroup_begin_page_stat() for locking requirements.
441 */
442static inline void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
443 enum mem_cgroup_stat_index idx, int val)
444{
445 VM_BUG_ON(!rcu_read_lock_held());
446
447 if (memcg)
448 this_cpu_add(memcg->stat->count[idx], val);
449}
450
171static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, 451static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
172 enum mem_cgroup_stat_index idx) 452 enum mem_cgroup_stat_index idx)
173{ 453{
@@ -184,13 +464,31 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
184 gfp_t gfp_mask, 464 gfp_t gfp_mask,
185 unsigned long *total_scanned); 465 unsigned long *total_scanned);
186 466
187void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
188static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 467static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
189 enum vm_event_item idx) 468 enum vm_event_item idx)
190{ 469{
470 struct mem_cgroup *memcg;
471
191 if (mem_cgroup_disabled()) 472 if (mem_cgroup_disabled())
192 return; 473 return;
193 __mem_cgroup_count_vm_event(mm, idx); 474
475 rcu_read_lock();
476 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
477 if (unlikely(!memcg))
478 goto out;
479
480 switch (idx) {
481 case PGFAULT:
482 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
483 break;
484 case PGMAJFAULT:
485 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
486 break;
487 default:
488 BUG();
489 }
490out:
491 rcu_read_unlock();
194} 492}
195#ifdef CONFIG_TRANSPARENT_HUGEPAGE 493#ifdef CONFIG_TRANSPARENT_HUGEPAGE
196void mem_cgroup_split_huge_fixup(struct page *head); 494void mem_cgroup_split_huge_fixup(struct page *head);
@@ -199,8 +497,6 @@ void mem_cgroup_split_huge_fixup(struct page *head);
199#else /* CONFIG_MEMCG */ 497#else /* CONFIG_MEMCG */
200struct mem_cgroup; 498struct mem_cgroup;
201 499
202#define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
203
204static inline void mem_cgroup_events(struct mem_cgroup *memcg, 500static inline void mem_cgroup_events(struct mem_cgroup *memcg,
205 enum mem_cgroup_events_index idx, 501 enum mem_cgroup_events_index idx,
206 unsigned int nr) 502 unsigned int nr)
@@ -258,11 +554,6 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
258 return &zone->lruvec; 554 return &zone->lruvec;
259} 555}
260 556
261static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
262{
263 return NULL;
264}
265
266static inline bool mm_match_cgroup(struct mm_struct *mm, 557static inline bool mm_match_cgroup(struct mm_struct *mm,
267 struct mem_cgroup *memcg) 558 struct mem_cgroup *memcg)
268{ 559{
@@ -275,12 +566,6 @@ static inline bool task_in_mem_cgroup(struct task_struct *task,
275 return true; 566 return true;
276} 567}
277 568
278static inline struct cgroup_subsys_state
279 *mem_cgroup_css(struct mem_cgroup *memcg)
280{
281 return NULL;
282}
283
284static inline struct mem_cgroup * 569static inline struct mem_cgroup *
285mem_cgroup_iter(struct mem_cgroup *root, 570mem_cgroup_iter(struct mem_cgroup *root,
286 struct mem_cgroup *prev, 571 struct mem_cgroup *prev,
@@ -428,8 +713,8 @@ static inline void sock_release_memcg(struct sock *sk)
428extern struct static_key memcg_kmem_enabled_key; 713extern struct static_key memcg_kmem_enabled_key;
429 714
430extern int memcg_nr_cache_ids; 715extern int memcg_nr_cache_ids;
431extern void memcg_get_cache_ids(void); 716void memcg_get_cache_ids(void);
432extern void memcg_put_cache_ids(void); 717void memcg_put_cache_ids(void);
433 718
434/* 719/*
435 * Helper macro to loop through all memcg-specific caches. Callers must still 720 * Helper macro to loop through all memcg-specific caches. Callers must still
@@ -444,7 +729,10 @@ static inline bool memcg_kmem_enabled(void)
444 return static_key_false(&memcg_kmem_enabled_key); 729 return static_key_false(&memcg_kmem_enabled_key);
445} 730}
446 731
447bool memcg_kmem_is_active(struct mem_cgroup *memcg); 732static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
733{
734 return memcg->kmem_acct_active;
735}
448 736
449/* 737/*
450 * In general, we'll do everything in our power to not incur in any overhead 738 * In general, we'll do everything in our power to not incur in any overhead
@@ -463,7 +751,15 @@ void __memcg_kmem_commit_charge(struct page *page,
463 struct mem_cgroup *memcg, int order); 751 struct mem_cgroup *memcg, int order);
464void __memcg_kmem_uncharge_pages(struct page *page, int order); 752void __memcg_kmem_uncharge_pages(struct page *page, int order);
465 753
466int memcg_cache_id(struct mem_cgroup *memcg); 754/*
755 * helper for acessing a memcg's index. It will be used as an index in the
756 * child cache array in kmem_cache, and also to derive its name. This function
757 * will return -1 when this is not a kmem-limited memcg.
758 */
759static inline int memcg_cache_id(struct mem_cgroup *memcg)
760{
761 return memcg ? memcg->kmemcg_id : -1;
762}
467 763
468struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); 764struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
469void __memcg_kmem_put_cache(struct kmem_cache *cachep); 765void __memcg_kmem_put_cache(struct kmem_cache *cachep);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 6ffa0ac7f7d6..8f60e899b33c 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -266,8 +266,9 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
266extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 266extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
267 void *arg, int (*func)(struct memory_block *, void *)); 267 void *arg, int (*func)(struct memory_block *, void *));
268extern int add_memory(int nid, u64 start, u64 size); 268extern int add_memory(int nid, u64 start, u64 size);
269extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default); 269extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
270extern int arch_add_memory(int nid, u64 start, u64 size); 270 bool for_device);
271extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
271extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 272extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
272extern bool is_memblock_offlined(struct memory_block *mem); 273extern bool is_memblock_offlined(struct memory_block *mem);
273extern void remove_memory(int nid, u64 start, u64 size); 274extern void remove_memory(int nid, u64 start, u64 size);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index 97cb283cc8e1..8fcad63fab55 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -60,60 +60,60 @@ enum {
60/* page 0 basic: slave adder 0x60 */ 60/* page 0 basic: slave adder 0x60 */
61 61
62#define PM800_STATUS_1 (0x01) 62#define PM800_STATUS_1 (0x01)
63#define PM800_ONKEY_STS1 (1 << 0) 63#define PM800_ONKEY_STS1 BIT(0)
64#define PM800_EXTON_STS1 (1 << 1) 64#define PM800_EXTON_STS1 BIT(1)
65#define PM800_CHG_STS1 (1 << 2) 65#define PM800_CHG_STS1 BIT(2)
66#define PM800_BAT_STS1 (1 << 3) 66#define PM800_BAT_STS1 BIT(3)
67#define PM800_VBUS_STS1 (1 << 4) 67#define PM800_VBUS_STS1 BIT(4)
68#define PM800_LDO_PGOOD_STS1 (1 << 5) 68#define PM800_LDO_PGOOD_STS1 BIT(5)
69#define PM800_BUCK_PGOOD_STS1 (1 << 6) 69#define PM800_BUCK_PGOOD_STS1 BIT(6)
70 70
71#define PM800_STATUS_2 (0x02) 71#define PM800_STATUS_2 (0x02)
72#define PM800_RTC_ALARM_STS2 (1 << 0) 72#define PM800_RTC_ALARM_STS2 BIT(0)
73 73
74/* Wakeup Registers */ 74/* Wakeup Registers */
75#define PM800_WAKEUP1 (0x0D) 75#define PM800_WAKEUP1 (0x0D)
76 76
77#define PM800_WAKEUP2 (0x0E) 77#define PM800_WAKEUP2 (0x0E)
78#define PM800_WAKEUP2_INV_INT (1 << 0) 78#define PM800_WAKEUP2_INV_INT BIT(0)
79#define PM800_WAKEUP2_INT_CLEAR (1 << 1) 79#define PM800_WAKEUP2_INT_CLEAR BIT(1)
80#define PM800_WAKEUP2_INT_MASK (1 << 2) 80#define PM800_WAKEUP2_INT_MASK BIT(2)
81 81
82#define PM800_POWER_UP_LOG (0x10) 82#define PM800_POWER_UP_LOG (0x10)
83 83
84/* Referance and low power registers */ 84/* Referance and low power registers */
85#define PM800_LOW_POWER1 (0x20) 85#define PM800_LOW_POWER1 (0x20)
86#define PM800_LOW_POWER2 (0x21) 86#define PM800_LOW_POWER2 (0x21)
87#define PM800_LOW_POWER_CONFIG3 (0x22) 87#define PM800_LOW_POWER_CONFIG3 (0x22)
88#define PM800_LOW_POWER_CONFIG4 (0x23) 88#define PM800_LOW_POWER_CONFIG4 (0x23)
89 89
90/* GPIO register */ 90/* GPIO register */
91#define PM800_GPIO_0_1_CNTRL (0x30) 91#define PM800_GPIO_0_1_CNTRL (0x30)
92#define PM800_GPIO0_VAL (1 << 0) 92#define PM800_GPIO0_VAL BIT(0)
93#define PM800_GPIO0_GPIO_MODE(x) (x << 1) 93#define PM800_GPIO0_GPIO_MODE(x) (x << 1)
94#define PM800_GPIO1_VAL (1 << 4) 94#define PM800_GPIO1_VAL BIT(4)
95#define PM800_GPIO1_GPIO_MODE(x) (x << 5) 95#define PM800_GPIO1_GPIO_MODE(x) (x << 5)
96 96
97#define PM800_GPIO_2_3_CNTRL (0x31) 97#define PM800_GPIO_2_3_CNTRL (0x31)
98#define PM800_GPIO2_VAL (1 << 0) 98#define PM800_GPIO2_VAL BIT(0)
99#define PM800_GPIO2_GPIO_MODE(x) (x << 1) 99#define PM800_GPIO2_GPIO_MODE(x) (x << 1)
100#define PM800_GPIO3_VAL (1 << 4) 100#define PM800_GPIO3_VAL BIT(4)
101#define PM800_GPIO3_GPIO_MODE(x) (x << 5) 101#define PM800_GPIO3_GPIO_MODE(x) (x << 5)
102#define PM800_GPIO3_MODE_MASK 0x1F 102#define PM800_GPIO3_MODE_MASK 0x1F
103#define PM800_GPIO3_HEADSET_MODE PM800_GPIO3_GPIO_MODE(6) 103#define PM800_GPIO3_HEADSET_MODE PM800_GPIO3_GPIO_MODE(6)
104 104
105#define PM800_GPIO_4_CNTRL (0x32) 105#define PM800_GPIO_4_CNTRL (0x32)
106#define PM800_GPIO4_VAL (1 << 0) 106#define PM800_GPIO4_VAL BIT(0)
107#define PM800_GPIO4_GPIO_MODE(x) (x << 1) 107#define PM800_GPIO4_GPIO_MODE(x) (x << 1)
108 108
109#define PM800_HEADSET_CNTRL (0x38) 109#define PM800_HEADSET_CNTRL (0x38)
110#define PM800_HEADSET_DET_EN (1 << 7) 110#define PM800_HEADSET_DET_EN BIT(7)
111#define PM800_HSDET_SLP (1 << 1) 111#define PM800_HSDET_SLP BIT(1)
112/* PWM register */ 112/* PWM register */
113#define PM800_PWM1 (0x40) 113#define PM800_PWM1 (0x40)
114#define PM800_PWM2 (0x41) 114#define PM800_PWM2 (0x41)
115#define PM800_PWM3 (0x42) 115#define PM800_PWM3 (0x42)
116#define PM800_PWM4 (0x43) 116#define PM800_PWM4 (0x43)
117 117
118/* RTC Registers */ 118/* RTC Registers */
119#define PM800_RTC_CONTROL (0xD0) 119#define PM800_RTC_CONTROL (0xD0)
@@ -123,55 +123,55 @@ enum {
123#define PM800_RTC_MISC4 (0xE4) 123#define PM800_RTC_MISC4 (0xE4)
124#define PM800_RTC_MISC5 (0xE7) 124#define PM800_RTC_MISC5 (0xE7)
125/* bit definitions of RTC Register 1 (0xD0) */ 125/* bit definitions of RTC Register 1 (0xD0) */
126#define PM800_ALARM1_EN (1 << 0) 126#define PM800_ALARM1_EN BIT(0)
127#define PM800_ALARM_WAKEUP (1 << 4) 127#define PM800_ALARM_WAKEUP BIT(4)
128#define PM800_ALARM (1 << 5) 128#define PM800_ALARM BIT(5)
129#define PM800_RTC1_USE_XO (1 << 7) 129#define PM800_RTC1_USE_XO BIT(7)
130 130
131/* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */ 131/* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */
132 132
133/* buck registers */ 133/* buck registers */
134#define PM800_SLEEP_BUCK1 (0x30) 134#define PM800_SLEEP_BUCK1 (0x30)
135 135
136/* BUCK Sleep Mode Register 1: BUCK[1..4] */ 136/* BUCK Sleep Mode Register 1: BUCK[1..4] */
137#define PM800_BUCK_SLP1 (0x5A) 137#define PM800_BUCK_SLP1 (0x5A)
138#define PM800_BUCK1_SLP1_SHIFT 0 138#define PM800_BUCK1_SLP1_SHIFT 0
139#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT) 139#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT)
140 140
141/* page 2 GPADC: slave adder 0x02 */ 141/* page 2 GPADC: slave adder 0x02 */
142#define PM800_GPADC_MEAS_EN1 (0x01) 142#define PM800_GPADC_MEAS_EN1 (0x01)
143#define PM800_MEAS_EN1_VBAT (1 << 2) 143#define PM800_MEAS_EN1_VBAT BIT(2)
144#define PM800_GPADC_MEAS_EN2 (0x02) 144#define PM800_GPADC_MEAS_EN2 (0x02)
145#define PM800_MEAS_EN2_RFTMP (1 << 0) 145#define PM800_MEAS_EN2_RFTMP BIT(0)
146#define PM800_MEAS_GP0_EN (1 << 2) 146#define PM800_MEAS_GP0_EN BIT(2)
147#define PM800_MEAS_GP1_EN (1 << 3) 147#define PM800_MEAS_GP1_EN BIT(3)
148#define PM800_MEAS_GP2_EN (1 << 4) 148#define PM800_MEAS_GP2_EN BIT(4)
149#define PM800_MEAS_GP3_EN (1 << 5) 149#define PM800_MEAS_GP3_EN BIT(5)
150#define PM800_MEAS_GP4_EN (1 << 6) 150#define PM800_MEAS_GP4_EN BIT(6)
151 151
152#define PM800_GPADC_MISC_CONFIG1 (0x05) 152#define PM800_GPADC_MISC_CONFIG1 (0x05)
153#define PM800_GPADC_MISC_CONFIG2 (0x06) 153#define PM800_GPADC_MISC_CONFIG2 (0x06)
154#define PM800_GPADC_MISC_GPFSM_EN (1 << 0) 154#define PM800_GPADC_MISC_GPFSM_EN BIT(0)
155#define PM800_GPADC_SLOW_MODE(x) (x << 3) 155#define PM800_GPADC_SLOW_MODE(x) (x << 3)
156 156
157#define PM800_GPADC_MISC_CONFIG3 (0x09) 157#define PM800_GPADC_MISC_CONFIG3 (0x09)
158#define PM800_GPADC_MISC_CONFIG4 (0x0A) 158#define PM800_GPADC_MISC_CONFIG4 (0x0A)
159 159
160#define PM800_GPADC_PREBIAS1 (0x0F) 160#define PM800_GPADC_PREBIAS1 (0x0F)
161#define PM800_GPADC0_GP_PREBIAS_TIME(x) (x << 0) 161#define PM800_GPADC0_GP_PREBIAS_TIME(x) (x << 0)
162#define PM800_GPADC_PREBIAS2 (0x10) 162#define PM800_GPADC_PREBIAS2 (0x10)
163 163
164#define PM800_GP_BIAS_ENA1 (0x14) 164#define PM800_GP_BIAS_ENA1 (0x14)
165#define PM800_GPADC_GP_BIAS_EN0 (1 << 0) 165#define PM800_GPADC_GP_BIAS_EN0 BIT(0)
166#define PM800_GPADC_GP_BIAS_EN1 (1 << 1) 166#define PM800_GPADC_GP_BIAS_EN1 BIT(1)
167#define PM800_GPADC_GP_BIAS_EN2 (1 << 2) 167#define PM800_GPADC_GP_BIAS_EN2 BIT(2)
168#define PM800_GPADC_GP_BIAS_EN3 (1 << 3) 168#define PM800_GPADC_GP_BIAS_EN3 BIT(3)
169 169
170#define PM800_GP_BIAS_OUT1 (0x15) 170#define PM800_GP_BIAS_OUT1 (0x15)
171#define PM800_BIAS_OUT_GP0 (1 << 0) 171#define PM800_BIAS_OUT_GP0 BIT(0)
172#define PM800_BIAS_OUT_GP1 (1 << 1) 172#define PM800_BIAS_OUT_GP1 BIT(1)
173#define PM800_BIAS_OUT_GP2 (1 << 2) 173#define PM800_BIAS_OUT_GP2 BIT(2)
174#define PM800_BIAS_OUT_GP3 (1 << 3) 174#define PM800_BIAS_OUT_GP3 BIT(3)
175 175
176#define PM800_GPADC0_LOW_TH 0x20 176#define PM800_GPADC0_LOW_TH 0x20
177#define PM800_GPADC1_LOW_TH 0x21 177#define PM800_GPADC1_LOW_TH 0x21
@@ -222,37 +222,37 @@ enum {
222 222
223#define PM805_INT_STATUS1 (0x03) 223#define PM805_INT_STATUS1 (0x03)
224 224
225#define PM805_INT1_HP1_SHRT (1 << 0) 225#define PM805_INT1_HP1_SHRT BIT(0)
226#define PM805_INT1_HP2_SHRT (1 << 1) 226#define PM805_INT1_HP2_SHRT BIT(1)
227#define PM805_INT1_MIC_CONFLICT (1 << 2) 227#define PM805_INT1_MIC_CONFLICT BIT(2)
228#define PM805_INT1_CLIP_FAULT (1 << 3) 228#define PM805_INT1_CLIP_FAULT BIT(3)
229#define PM805_INT1_LDO_OFF (1 << 4) 229#define PM805_INT1_LDO_OFF BIT(4)
230#define PM805_INT1_SRC_DPLL_LOCK (1 << 5) 230#define PM805_INT1_SRC_DPLL_LOCK BIT(5)
231 231
232#define PM805_INT_STATUS2 (0x04) 232#define PM805_INT_STATUS2 (0x04)
233 233
234#define PM805_INT2_MIC_DET (1 << 0) 234#define PM805_INT2_MIC_DET BIT(0)
235#define PM805_INT2_SHRT_BTN_DET (1 << 1) 235#define PM805_INT2_SHRT_BTN_DET BIT(1)
236#define PM805_INT2_VOLM_BTN_DET (1 << 2) 236#define PM805_INT2_VOLM_BTN_DET BIT(2)
237#define PM805_INT2_VOLP_BTN_DET (1 << 3) 237#define PM805_INT2_VOLP_BTN_DET BIT(3)
238#define PM805_INT2_RAW_PLL_FAULT (1 << 4) 238#define PM805_INT2_RAW_PLL_FAULT BIT(4)
239#define PM805_INT2_FINE_PLL_FAULT (1 << 5) 239#define PM805_INT2_FINE_PLL_FAULT BIT(5)
240 240
241#define PM805_INT_MASK1 (0x05) 241#define PM805_INT_MASK1 (0x05)
242#define PM805_INT_MASK2 (0x06) 242#define PM805_INT_MASK2 (0x06)
243#define PM805_SHRT_BTN_DET (1 << 1) 243#define PM805_SHRT_BTN_DET BIT(1)
244 244
245/* number of status and int reg in a row */ 245/* number of status and int reg in a row */
246#define PM805_INT_REG_NUM (2) 246#define PM805_INT_REG_NUM (2)
247 247
248#define PM805_MIC_DET1 (0x07) 248#define PM805_MIC_DET1 (0x07)
249#define PM805_MIC_DET_EN_MIC_DET (1 << 0) 249#define PM805_MIC_DET_EN_MIC_DET BIT(0)
250#define PM805_MIC_DET2 (0x08) 250#define PM805_MIC_DET2 (0x08)
251#define PM805_MIC_DET_STATUS1 (0x09) 251#define PM805_MIC_DET_STATUS1 (0x09)
252 252
253#define PM805_MIC_DET_STATUS3 (0x0A) 253#define PM805_MIC_DET_STATUS3 (0x0A)
254#define PM805_AUTO_SEQ_STATUS1 (0x0B) 254#define PM805_AUTO_SEQ_STATUS1 (0x0B)
255#define PM805_AUTO_SEQ_STATUS2 (0x0C) 255#define PM805_AUTO_SEQ_STATUS2 (0x0C)
256 256
257#define PM805_ADC_SETTING1 (0x10) 257#define PM805_ADC_SETTING1 (0x10)
258#define PM805_ADC_SETTING2 (0x11) 258#define PM805_ADC_SETTING2 (0x11)
@@ -261,7 +261,7 @@ enum {
261#define PM805_ADC_GAIN2 (0x13) 261#define PM805_ADC_GAIN2 (0x13)
262#define PM805_DMIC_SETTING (0x15) 262#define PM805_DMIC_SETTING (0x15)
263#define PM805_DWS_SETTING (0x16) 263#define PM805_DWS_SETTING (0x16)
264#define PM805_MIC_CONFLICT_STS (0x17) 264#define PM805_MIC_CONFLICT_STS (0x17)
265 265
266#define PM805_PDM_SETTING1 (0x20) 266#define PM805_PDM_SETTING1 (0x20)
267#define PM805_PDM_SETTING2 (0x21) 267#define PM805_PDM_SETTING2 (0x21)
@@ -270,11 +270,11 @@ enum {
270#define PM805_PDM_CONTROL2 (0x24) 270#define PM805_PDM_CONTROL2 (0x24)
271#define PM805_PDM_CONTROL3 (0x25) 271#define PM805_PDM_CONTROL3 (0x25)
272 272
273#define PM805_HEADPHONE_SETTING (0x26) 273#define PM805_HEADPHONE_SETTING (0x26)
274#define PM805_HEADPHONE_GAIN_A2A (0x27) 274#define PM805_HEADPHONE_GAIN_A2A (0x27)
275#define PM805_HEADPHONE_SHORT_STATE (0x28) 275#define PM805_HEADPHONE_SHORT_STATE (0x28)
276#define PM805_EARPHONE_SETTING (0x29) 276#define PM805_EARPHONE_SETTING (0x29)
277#define PM805_AUTO_SEQ_SETTING (0x2A) 277#define PM805_AUTO_SEQ_SETTING (0x2A)
278 278
279struct pm80x_rtc_pdata { 279struct pm80x_rtc_pdata {
280 int vrtc; 280 int vrtc;
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 2f434f4f79a1..79e607e2f081 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -25,6 +25,8 @@ enum arizona_type {
25 WM5110 = 2, 25 WM5110 = 2,
26 WM8997 = 3, 26 WM8997 = 3,
27 WM8280 = 4, 27 WM8280 = 4,
28 WM8998 = 5,
29 WM1814 = 6,
28}; 30};
29 31
30#define ARIZONA_IRQ_GP1 0 32#define ARIZONA_IRQ_GP1 0
@@ -165,6 +167,7 @@ static inline int wm5102_patch(struct arizona *arizona)
165 167
166int wm5110_patch(struct arizona *arizona); 168int wm5110_patch(struct arizona *arizona);
167int wm8997_patch(struct arizona *arizona); 169int wm8997_patch(struct arizona *arizona);
170int wm8998_patch(struct arizona *arizona);
168 171
169extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop, 172extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
170 bool mandatory); 173 bool mandatory);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 43db4faad143..1dc385850ba2 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -101,7 +101,7 @@ struct arizona_pdata {
101 * useful for systems where and I2S bus with multiple data 101 * useful for systems where and I2S bus with multiple data
102 * lines is mastered. 102 * lines is mastered.
103 */ 103 */
104 int max_channels_clocked[ARIZONA_MAX_AIF]; 104 unsigned int max_channels_clocked[ARIZONA_MAX_AIF];
105 105
106 /** GPIO5 is used for jack detection */ 106 /** GPIO5 is used for jack detection */
107 bool jd_gpio5; 107 bool jd_gpio5;
@@ -125,22 +125,22 @@ struct arizona_pdata {
125 unsigned int hpdet_channel; 125 unsigned int hpdet_channel;
126 126
127 /** Extra debounce timeout used during initial mic detection (ms) */ 127 /** Extra debounce timeout used during initial mic detection (ms) */
128 int micd_detect_debounce; 128 unsigned int micd_detect_debounce;
129 129
130 /** GPIO for mic detection polarity */ 130 /** GPIO for mic detection polarity */
131 int micd_pol_gpio; 131 int micd_pol_gpio;
132 132
133 /** Mic detect ramp rate */ 133 /** Mic detect ramp rate */
134 int micd_bias_start_time; 134 unsigned int micd_bias_start_time;
135 135
136 /** Mic detect sample rate */ 136 /** Mic detect sample rate */
137 int micd_rate; 137 unsigned int micd_rate;
138 138
139 /** Mic detect debounce level */ 139 /** Mic detect debounce level */
140 int micd_dbtime; 140 unsigned int micd_dbtime;
141 141
142 /** Mic detect timeout (ms) */ 142 /** Mic detect timeout (ms) */
143 int micd_timeout; 143 unsigned int micd_timeout;
144 144
145 /** Force MICBIAS on for mic detect */ 145 /** Force MICBIAS on for mic detect */
146 bool micd_force_micbias; 146 bool micd_force_micbias;
@@ -162,6 +162,8 @@ struct arizona_pdata {
162 /** 162 /**
163 * Mode of input structures 163 * Mode of input structures
164 * One of the ARIZONA_INMODE_xxx values 164 * One of the ARIZONA_INMODE_xxx values
165 * wm5102/wm5110/wm8280/wm8997: [0]=IN1 [1]=IN2 [2]=IN3 [3]=IN4
166 * wm8998: [0]=IN1A [1]=IN2A [2]=IN1B [3]=IN2B
165 */ 167 */
166 int inmode[ARIZONA_MAX_INPUT]; 168 int inmode[ARIZONA_MAX_INPUT];
167 169
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 3499d36e6067..fdd70b3c7418 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -39,6 +39,7 @@
39#define ARIZONA_PWM_DRIVE_3 0x32 39#define ARIZONA_PWM_DRIVE_3 0x32
40#define ARIZONA_WAKE_CONTROL 0x40 40#define ARIZONA_WAKE_CONTROL 0x40
41#define ARIZONA_SEQUENCE_CONTROL 0x41 41#define ARIZONA_SEQUENCE_CONTROL 0x41
42#define ARIZONA_SPARE_TRIGGERS 0x42
42#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61 43#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61
43#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62 44#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62
44#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63 45#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63
@@ -139,6 +140,7 @@
139#define ARIZONA_MIC_DETECT_LEVEL_2 0x2A7 140#define ARIZONA_MIC_DETECT_LEVEL_2 0x2A7
140#define ARIZONA_MIC_DETECT_LEVEL_3 0x2A8 141#define ARIZONA_MIC_DETECT_LEVEL_3 0x2A8
141#define ARIZONA_MIC_DETECT_LEVEL_4 0x2A9 142#define ARIZONA_MIC_DETECT_LEVEL_4 0x2A9
143#define ARIZONA_MIC_DETECT_4 0x2AB
142#define ARIZONA_MIC_NOISE_MIX_CONTROL_1 0x2C3 144#define ARIZONA_MIC_NOISE_MIX_CONTROL_1 0x2C3
143#define ARIZONA_ISOLATION_CONTROL 0x2CB 145#define ARIZONA_ISOLATION_CONTROL 0x2CB
144#define ARIZONA_JACK_DETECT_ANALOGUE 0x2D3 146#define ARIZONA_JACK_DETECT_ANALOGUE 0x2D3
@@ -225,14 +227,18 @@
225#define ARIZONA_DAC_VOLUME_LIMIT_6R 0x43E 227#define ARIZONA_DAC_VOLUME_LIMIT_6R 0x43E
226#define ARIZONA_NOISE_GATE_SELECT_6R 0x43F 228#define ARIZONA_NOISE_GATE_SELECT_6R 0x43F
227#define ARIZONA_DRE_ENABLE 0x440 229#define ARIZONA_DRE_ENABLE 0x440
230#define ARIZONA_DRE_CONTROL_1 0x441
228#define ARIZONA_DRE_CONTROL_2 0x442 231#define ARIZONA_DRE_CONTROL_2 0x442
229#define ARIZONA_DRE_CONTROL_3 0x443 232#define ARIZONA_DRE_CONTROL_3 0x443
233#define ARIZONA_EDRE_ENABLE 0x448
230#define ARIZONA_DAC_AEC_CONTROL_1 0x450 234#define ARIZONA_DAC_AEC_CONTROL_1 0x450
235#define ARIZONA_DAC_AEC_CONTROL_2 0x451
231#define ARIZONA_NOISE_GATE_CONTROL 0x458 236#define ARIZONA_NOISE_GATE_CONTROL 0x458
232#define ARIZONA_PDM_SPK1_CTRL_1 0x490 237#define ARIZONA_PDM_SPK1_CTRL_1 0x490
233#define ARIZONA_PDM_SPK1_CTRL_2 0x491 238#define ARIZONA_PDM_SPK1_CTRL_2 0x491
234#define ARIZONA_PDM_SPK2_CTRL_1 0x492 239#define ARIZONA_PDM_SPK2_CTRL_1 0x492
235#define ARIZONA_PDM_SPK2_CTRL_2 0x493 240#define ARIZONA_PDM_SPK2_CTRL_2 0x493
241#define ARIZONA_HP_TEST_CTRL_13 0x49A
236#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0 242#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0
237#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1 243#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1
238#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2 244#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2
@@ -310,6 +316,10 @@
310#define ARIZONA_AIF3_TX_ENABLES 0x599 316#define ARIZONA_AIF3_TX_ENABLES 0x599
311#define ARIZONA_AIF3_RX_ENABLES 0x59A 317#define ARIZONA_AIF3_RX_ENABLES 0x59A
312#define ARIZONA_AIF3_FORCE_WRITE 0x59B 318#define ARIZONA_AIF3_FORCE_WRITE 0x59B
319#define ARIZONA_SPD1_TX_CONTROL 0x5C2
320#define ARIZONA_SPD1_TX_CHANNEL_STATUS_1 0x5C3
321#define ARIZONA_SPD1_TX_CHANNEL_STATUS_2 0x5C4
322#define ARIZONA_SPD1_TX_CHANNEL_STATUS_3 0x5C5
313#define ARIZONA_SLIMBUS_FRAMER_REF_GEAR 0x5E3 323#define ARIZONA_SLIMBUS_FRAMER_REF_GEAR 0x5E3
314#define ARIZONA_SLIMBUS_RATES_1 0x5E5 324#define ARIZONA_SLIMBUS_RATES_1 0x5E5
315#define ARIZONA_SLIMBUS_RATES_2 0x5E6 325#define ARIZONA_SLIMBUS_RATES_2 0x5E6
@@ -643,6 +653,10 @@
643#define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD 653#define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD
644#define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE 654#define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE
645#define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF 655#define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF
656#define ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE 0x800
657#define ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME 0x801
658#define ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE 0x808
659#define ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME 0x809
646#define ARIZONA_EQ1MIX_INPUT_1_SOURCE 0x880 660#define ARIZONA_EQ1MIX_INPUT_1_SOURCE 0x880
647#define ARIZONA_EQ1MIX_INPUT_1_VOLUME 0x881 661#define ARIZONA_EQ1MIX_INPUT_1_VOLUME 0x881
648#define ARIZONA_EQ1MIX_INPUT_2_SOURCE 0x882 662#define ARIZONA_EQ1MIX_INPUT_2_SOURCE 0x882
@@ -868,6 +882,7 @@
868#define ARIZONA_GPIO5_CTRL 0xC04 882#define ARIZONA_GPIO5_CTRL 0xC04
869#define ARIZONA_IRQ_CTRL_1 0xC0F 883#define ARIZONA_IRQ_CTRL_1 0xC0F
870#define ARIZONA_GPIO_DEBOUNCE_CONFIG 0xC10 884#define ARIZONA_GPIO_DEBOUNCE_CONFIG 0xC10
885#define ARIZONA_GP_SWITCH_1 0xC18
871#define ARIZONA_MISC_PAD_CTRL_1 0xC20 886#define ARIZONA_MISC_PAD_CTRL_1 0xC20
872#define ARIZONA_MISC_PAD_CTRL_2 0xC21 887#define ARIZONA_MISC_PAD_CTRL_2 0xC21
873#define ARIZONA_MISC_PAD_CTRL_3 0xC22 888#define ARIZONA_MISC_PAD_CTRL_3 0xC22
@@ -1169,6 +1184,13 @@
1169#define ARIZONA_DSP4_SCRATCH_1 0x1441 1184#define ARIZONA_DSP4_SCRATCH_1 0x1441
1170#define ARIZONA_DSP4_SCRATCH_2 0x1442 1185#define ARIZONA_DSP4_SCRATCH_2 0x1442
1171#define ARIZONA_DSP4_SCRATCH_3 0x1443 1186#define ARIZONA_DSP4_SCRATCH_3 0x1443
1187#define ARIZONA_FRF_COEFF_1 0x1700
1188#define ARIZONA_FRF_COEFF_2 0x1701
1189#define ARIZONA_FRF_COEFF_3 0x1702
1190#define ARIZONA_FRF_COEFF_4 0x1703
1191#define ARIZONA_V2_DAC_COMP_1 0x1704
1192#define ARIZONA_V2_DAC_COMP_2 0x1705
1193
1172 1194
1173/* 1195/*
1174 * Field Definitions. 1196 * Field Definitions.
@@ -1431,6 +1453,42 @@
1431#define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH 1 /* WSEQ_ENA_JD2_RISE */ 1453#define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH 1 /* WSEQ_ENA_JD2_RISE */
1432 1454
1433/* 1455/*
1456 * R66 (0x42) - Spare Triggers
1457 */
1458#define ARIZONA_WS_TRG8 0x0080 /* WS_TRG8 */
1459#define ARIZONA_WS_TRG8_MASK 0x0080 /* WS_TRG8 */
1460#define ARIZONA_WS_TRG8_SHIFT 7 /* WS_TRG8 */
1461#define ARIZONA_WS_TRG8_WIDTH 1 /* WS_TRG8 */
1462#define ARIZONA_WS_TRG7 0x0040 /* WS_TRG7 */
1463#define ARIZONA_WS_TRG7_MASK 0x0040 /* WS_TRG7 */
1464#define ARIZONA_WS_TRG7_SHIFT 6 /* WS_TRG7 */
1465#define ARIZONA_WS_TRG7_WIDTH 1 /* WS_TRG7 */
1466#define ARIZONA_WS_TRG6 0x0020 /* WS_TRG6 */
1467#define ARIZONA_WS_TRG6_MASK 0x0020 /* WS_TRG6 */
1468#define ARIZONA_WS_TRG6_SHIFT 5 /* WS_TRG6 */
1469#define ARIZONA_WS_TRG6_WIDTH 1 /* WS_TRG6 */
1470#define ARIZONA_WS_TRG5 0x0010 /* WS_TRG5 */
1471#define ARIZONA_WS_TRG5_MASK 0x0010 /* WS_TRG5 */
1472#define ARIZONA_WS_TRG5_SHIFT 4 /* WS_TRG5 */
1473#define ARIZONA_WS_TRG5_WIDTH 1 /* WS_TRG5 */
1474#define ARIZONA_WS_TRG4 0x0008 /* WS_TRG4 */
1475#define ARIZONA_WS_TRG4_MASK 0x0008 /* WS_TRG4 */
1476#define ARIZONA_WS_TRG4_SHIFT 3 /* WS_TRG4 */
1477#define ARIZONA_WS_TRG4_WIDTH 1 /* WS_TRG4 */
1478#define ARIZONA_WS_TRG3 0x0004 /* WS_TRG3 */
1479#define ARIZONA_WS_TRG3_MASK 0x0004 /* WS_TRG3 */
1480#define ARIZONA_WS_TRG3_SHIFT 2 /* WS_TRG3 */
1481#define ARIZONA_WS_TRG3_WIDTH 1 /* WS_TRG3 */
1482#define ARIZONA_WS_TRG2 0x0002 /* WS_TRG2 */
1483#define ARIZONA_WS_TRG2_MASK 0x0002 /* WS_TRG2 */
1484#define ARIZONA_WS_TRG2_SHIFT 1 /* WS_TRG2 */
1485#define ARIZONA_WS_TRG2_WIDTH 1 /* WS_TRG2 */
1486#define ARIZONA_WS_TRG1 0x0001 /* WS_TRG1 */
1487#define ARIZONA_WS_TRG1_MASK 0x0001 /* WS_TRG1 */
1488#define ARIZONA_WS_TRG1_SHIFT 0 /* WS_TRG1 */
1489#define ARIZONA_WS_TRG1_WIDTH 1 /* WS_TRG1 */
1490
1491/*
1434 * R97 (0x61) - Sample Rate Sequence Select 1 1492 * R97 (0x61) - Sample Rate Sequence Select 1
1435 */ 1493 */
1436#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */ 1494#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
@@ -2325,6 +2383,9 @@
2325#define ARIZONA_HP_IDAC_STEER_MASK 0x0004 /* HP_IDAC_STEER */ 2383#define ARIZONA_HP_IDAC_STEER_MASK 0x0004 /* HP_IDAC_STEER */
2326#define ARIZONA_HP_IDAC_STEER_SHIFT 2 /* HP_IDAC_STEER */ 2384#define ARIZONA_HP_IDAC_STEER_SHIFT 2 /* HP_IDAC_STEER */
2327#define ARIZONA_HP_IDAC_STEER_WIDTH 1 /* HP_IDAC_STEER */ 2385#define ARIZONA_HP_IDAC_STEER_WIDTH 1 /* HP_IDAC_STEER */
2386#define WM8998_HP_RATE_MASK 0x0006 /* HP_RATE - [2:1] */
2387#define WM8998_HP_RATE_SHIFT 1 /* HP_RATE - [2:1] */
2388#define WM8998_HP_RATE_WIDTH 2 /* HP_RATE - [2:1] */
2328#define ARIZONA_HP_RATE 0x0002 /* HP_RATE */ 2389#define ARIZONA_HP_RATE 0x0002 /* HP_RATE */
2329#define ARIZONA_HP_RATE_MASK 0x0002 /* HP_RATE */ 2390#define ARIZONA_HP_RATE_MASK 0x0002 /* HP_RATE */
2330#define ARIZONA_HP_RATE_SHIFT 1 /* HP_RATE */ 2391#define ARIZONA_HP_RATE_SHIFT 1 /* HP_RATE */
@@ -2413,6 +2474,16 @@
2413#define ARIZONA_MICD_STS_WIDTH 1 /* MICD_STS */ 2474#define ARIZONA_MICD_STS_WIDTH 1 /* MICD_STS */
2414 2475
2415/* 2476/*
2477 * R683 (0x2AB) - Mic Detect 4
2478 */
2479#define ARIZONA_MICDET_ADCVAL_DIFF_MASK 0xFF00 /* MICDET_ADCVAL_DIFF - [15:8] */
2480#define ARIZONA_MICDET_ADCVAL_DIFF_SHIFT 8 /* MICDET_ADCVAL_DIFF - [15:8] */
2481#define ARIZONA_MICDET_ADCVAL_DIFF_WIDTH 8 /* MICDET_ADCVAL_DIFF - [15:8] */
2482#define ARIZONA_MICDET_ADCVAL_MASK 0x007F /* MICDET_ADCVAL - [15:8] */
2483#define ARIZONA_MICDET_ADCVAL_SHIFT 0 /* MICDET_ADCVAL - [15:8] */
2484#define ARIZONA_MICDET_ADCVAL_WIDTH 7 /* MICDET_ADCVAL - [15:8] */
2485
2486/*
2416 * R707 (0x2C3) - Mic noise mix control 1 2487 * R707 (0x2C3) - Mic noise mix control 1
2417 */ 2488 */
2418#define ARIZONA_MICMUTE_RATE_MASK 0x7800 /* MICMUTE_RATE - [14:11] */ 2489#define ARIZONA_MICMUTE_RATE_MASK 0x7800 /* MICMUTE_RATE - [14:11] */
@@ -2528,6 +2599,12 @@
2528/* 2599/*
2529 * R785 (0x311) - ADC Digital Volume 1L 2600 * R785 (0x311) - ADC Digital Volume 1L
2530 */ 2601 */
2602#define ARIZONA_IN1L_SRC_MASK 0x4000 /* IN1L_SRC - [14] */
2603#define ARIZONA_IN1L_SRC_SHIFT 14 /* IN1L_SRC - [14] */
2604#define ARIZONA_IN1L_SRC_WIDTH 1 /* IN1L_SRC - [14] */
2605#define ARIZONA_IN1L_SRC_SE_MASK 0x2000 /* IN1L_SRC - [13] */
2606#define ARIZONA_IN1L_SRC_SE_SHIFT 13 /* IN1L_SRC - [13] */
2607#define ARIZONA_IN1L_SRC_SE_WIDTH 1 /* IN1L_SRC - [13] */
2531#define ARIZONA_IN_VU 0x0200 /* IN_VU */ 2608#define ARIZONA_IN_VU 0x0200 /* IN_VU */
2532#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ 2609#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
2533#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ 2610#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2560,6 +2637,12 @@
2560/* 2637/*
2561 * R789 (0x315) - ADC Digital Volume 1R 2638 * R789 (0x315) - ADC Digital Volume 1R
2562 */ 2639 */
2640#define ARIZONA_IN1R_SRC_MASK 0x4000 /* IN1R_SRC - [14] */
2641#define ARIZONA_IN1R_SRC_SHIFT 14 /* IN1R_SRC - [14] */
2642#define ARIZONA_IN1R_SRC_WIDTH 1 /* IN1R_SRC - [14] */
2643#define ARIZONA_IN1R_SRC_SE_MASK 0x2000 /* IN1R_SRC - [13] */
2644#define ARIZONA_IN1R_SRC_SE_SHIFT 13 /* IN1R_SRC - [13] */
2645#define ARIZONA_IN1R_SRC_SE_WIDTH 1 /* IN1R_SRC - [13] */
2563#define ARIZONA_IN_VU 0x0200 /* IN_VU */ 2646#define ARIZONA_IN_VU 0x0200 /* IN_VU */
2564#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ 2647#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
2565#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ 2648#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -2604,6 +2687,12 @@
2604/* 2687/*
2605 * R793 (0x319) - ADC Digital Volume 2L 2688 * R793 (0x319) - ADC Digital Volume 2L
2606 */ 2689 */
2690#define ARIZONA_IN2L_SRC_MASK 0x4000 /* IN2L_SRC - [14] */
2691#define ARIZONA_IN2L_SRC_SHIFT 14 /* IN2L_SRC - [14] */
2692#define ARIZONA_IN2L_SRC_WIDTH 1 /* IN2L_SRC - [14] */
2693#define ARIZONA_IN2L_SRC_SE_MASK 0x2000 /* IN2L_SRC - [13] */
2694#define ARIZONA_IN2L_SRC_SE_SHIFT 13 /* IN2L_SRC - [13] */
2695#define ARIZONA_IN2L_SRC_SE_WIDTH 1 /* IN2L_SRC - [13] */
2607#define ARIZONA_IN_VU 0x0200 /* IN_VU */ 2696#define ARIZONA_IN_VU 0x0200 /* IN_VU */
2608#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */ 2697#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
2609#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */ 2698#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
@@ -3412,11 +3501,45 @@
3412#define ARIZONA_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */ 3501#define ARIZONA_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
3413 3502
3414/* 3503/*
3504 * R1088 (0x440) - DRE Enable (WM8998)
3505 */
3506#define WM8998_DRE3L_ENA 0x0020 /* DRE3L_ENA */
3507#define WM8998_DRE3L_ENA_MASK 0x0020 /* DRE3L_ENA */
3508#define WM8998_DRE3L_ENA_SHIFT 5 /* DRE3L_ENA */
3509#define WM8998_DRE3L_ENA_WIDTH 1 /* DRE3L_ENA */
3510#define WM8998_DRE2L_ENA 0x0008 /* DRE2L_ENA */
3511#define WM8998_DRE2L_ENA_MASK 0x0008 /* DRE2L_ENA */
3512#define WM8998_DRE2L_ENA_SHIFT 3 /* DRE2L_ENA */
3513#define WM8998_DRE2L_ENA_WIDTH 1 /* DRE2L_ENA */
3514#define WM8998_DRE2R_ENA 0x0004 /* DRE2R_ENA */
3515#define WM8998_DRE2R_ENA_MASK 0x0004 /* DRE2R_ENA */
3516#define WM8998_DRE2R_ENA_SHIFT 2 /* DRE2R_ENA */
3517#define WM8998_DRE2R_ENA_WIDTH 1 /* DRE2R_ENA */
3518#define WM8998_DRE1L_ENA 0x0002 /* DRE1L_ENA */
3519#define WM8998_DRE1L_ENA_MASK 0x0002 /* DRE1L_ENA */
3520#define WM8998_DRE1L_ENA_SHIFT 1 /* DRE1L_ENA */
3521#define WM8998_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
3522#define WM8998_DRE1R_ENA 0x0001 /* DRE1R_ENA */
3523#define WM8998_DRE1R_ENA_MASK 0x0001 /* DRE1R_ENA */
3524#define WM8998_DRE1R_ENA_SHIFT 0 /* DRE1R_ENA */
3525#define WM8998_DRE1R_ENA_WIDTH 1 /* DRE1R_ENA */
3526
3527/*
3528 * R1089 (0x441) - DRE Control 1
3529 */
3530#define ARIZONA_DRE_ENV_TC_FAST_MASK 0x0F00 /* DRE_ENV_TC_FAST - [11:8] */
3531#define ARIZONA_DRE_ENV_TC_FAST_SHIFT 8 /* DRE_ENV_TC_FAST - [11:8] */
3532#define ARIZONA_DRE_ENV_TC_FAST_WIDTH 4 /* DRE_ENV_TC_FAST - [11:8] */
3533
3534/*
3415 * R1090 (0x442) - DRE Control 2 3535 * R1090 (0x442) - DRE Control 2
3416 */ 3536 */
3417#define ARIZONA_DRE_T_LOW_MASK 0x3F00 /* DRE_T_LOW - [13:8] */ 3537#define ARIZONA_DRE_T_LOW_MASK 0x3F00 /* DRE_T_LOW - [13:8] */
3418#define ARIZONA_DRE_T_LOW_SHIFT 8 /* DRE_T_LOW - [13:8] */ 3538#define ARIZONA_DRE_T_LOW_SHIFT 8 /* DRE_T_LOW - [13:8] */
3419#define ARIZONA_DRE_T_LOW_WIDTH 6 /* DRE_T_LOW - [13:8] */ 3539#define ARIZONA_DRE_T_LOW_WIDTH 6 /* DRE_T_LOW - [13:8] */
3540#define ARIZONA_DRE_ALOG_VOL_DELAY_MASK 0x000F /* DRE_ALOG_VOL_DELAY - [3:0] */
3541#define ARIZONA_DRE_ALOG_VOL_DELAY_SHIFT 0 /* DRE_ALOG_VOL_DELAY - [3:0] */
3542#define ARIZONA_DRE_ALOG_VOL_DELAY_WIDTH 4 /* DRE_ALOG_VOL_DELAY - [3:0] */
3420 3543
3421/* 3544/*
3422 * R1091 (0x443) - DRE Control 3 3545 * R1091 (0x443) - DRE Control 3
@@ -3428,6 +3551,49 @@
3428#define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT 0 /* LOW_LEVEL_ABS - [3:0] */ 3551#define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT 0 /* LOW_LEVEL_ABS - [3:0] */
3429#define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH 4 /* LOW_LEVEL_ABS - [3:0] */ 3552#define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH 4 /* LOW_LEVEL_ABS - [3:0] */
3430 3553
3554/* R486 (0x448) - EDRE_Enable
3555 */
3556#define ARIZONA_EDRE_OUT4L_THR2_ENA 0x0200 /* EDRE_OUT4L_THR2_ENA */
3557#define ARIZONA_EDRE_OUT4L_THR2_ENA_MASK 0x0200 /* EDRE_OUT4L_THR2_ENA */
3558#define ARIZONA_EDRE_OUT4L_THR2_ENA_SHIFT 9 /* EDRE_OUT4L_THR2_ENA */
3559#define ARIZONA_EDRE_OUT4L_THR2_ENA_WIDTH 1 /* EDRE_OUT4L_THR2_ENA */
3560#define ARIZONA_EDRE_OUT4R_THR2_ENA 0x0100 /* EDRE_OUT4R_THR2_ENA */
3561#define ARIZONA_EDRE_OUT4R_THR2_ENA_MASK 0x0100 /* EDRE_OUT4R_THR2_ENA */
3562#define ARIZONA_EDRE_OUT4R_THR2_ENA_SHIFT 8 /* EDRE_OUT4R_THR2_ENA */
3563#define ARIZONA_EDRE_OUT4R_THR2_ENA_WIDTH 1 /* EDRE_OUT4R_THR2_ENA */
3564#define ARIZONA_EDRE_OUT4L_THR1_ENA 0x0080 /* EDRE_OUT4L_THR1_ENA */
3565#define ARIZONA_EDRE_OUT4L_THR1_ENA_MASK 0x0080 /* EDRE_OUT4L_THR1_ENA */
3566#define ARIZONA_EDRE_OUT4L_THR1_ENA_SHIFT 7 /* EDRE_OUT4L_THR1_ENA */
3567#define ARIZONA_EDRE_OUT4L_THR1_ENA_WIDTH 1 /* EDRE_OUT4L_THR1_ENA */
3568#define ARIZONA_EDRE_OUT4R_THR1_ENA 0x0040 /* EDRE_OUT4R_THR1_ENA */
3569#define ARIZONA_EDRE_OUT4R_THR1_ENA_MASK 0x0040 /* EDRE_OUT4R_THR1_ENA */
3570#define ARIZONA_EDRE_OUT4R_THR1_ENA_SHIFT 6 /* EDRE_OUT4R_THR1_ENA */
3571#define ARIZONA_EDRE_OUT4R_THR1_ENA_WIDTH 1 /* EDRE_OUT4R_THR1_ENA */
3572#define ARIZONA_EDRE_OUT3L_THR1_ENA 0x0020 /* EDRE_OUT3L_THR1_ENA */
3573#define ARIZONA_EDRE_OUT3L_THR1_ENA_MASK 0x0020 /* EDRE_OUT3L_THR1_ENA */
3574#define ARIZONA_EDRE_OUT3L_THR1_ENA_SHIFT 5 /* EDRE_OUT3L_THR1_ENA */
3575#define ARIZONA_EDRE_OUT3L_THR1_ENA_WIDTH 1 /* EDRE_OUT3L_THR1_ENA */
3576#define ARIZONA_EDRE_OUT3R_THR1_ENA 0x0010 /* EDRE_OUT3R_THR1_ENA */
3577#define ARIZONA_EDRE_OUT3R_THR1_ENA_MASK 0x0010 /* EDRE_OUT3R_THR1_ENA */
3578#define ARIZONA_EDRE_OUT3R_THR1_ENA_SHIFT 4 /* EDRE_OUT3R_THR1_ENA */
3579#define ARIZONA_EDRE_OUT3R_THR1_ENA_WIDTH 1 /* EDRE_OUT3R_THR1_ENA */
3580#define ARIZONA_EDRE_OUT2L_THR1_ENA 0x0008 /* EDRE_OUT2L_THR1_ENA */
3581#define ARIZONA_EDRE_OUT2L_THR1_ENA_MASK 0x0008 /* EDRE_OUT2L_THR1_ENA */
3582#define ARIZONA_EDRE_OUT2L_THR1_ENA_SHIFT 3 /* EDRE_OUT2L_THR1_ENA */
3583#define ARIZONA_EDRE_OUT2L_THR1_ENA_WIDTH 1 /* EDRE_OUT2L_THR1_ENA */
3584#define ARIZONA_EDRE_OUT2R_THR1_ENA 0x0004 /* EDRE_OUT2R_THR1_ENA */
3585#define ARIZONA_EDRE_OUT2R_THR1_ENA_MASK 0x0004 /* EDRE_OUT2R_THR1_ENA */
3586#define ARIZONA_EDRE_OUT2R_THR1_ENA_SHIFT 2 /* EDRE_OUT2R_THR1_ENA */
3587#define ARIZONA_EDRE_OUT2R_THR1_ENA_WIDTH 1 /* EDRE_OUT2R_THR1_ENA */
3588#define ARIZONA_EDRE_OUT1L_THR1_ENA 0x0002 /* EDRE_OUT1L_THR1_ENA */
3589#define ARIZONA_EDRE_OUT1L_THR1_ENA_MASK 0x0002 /* EDRE_OUT1L_THR1_ENA */
3590#define ARIZONA_EDRE_OUT1L_THR1_ENA_SHIFT 1 /* EDRE_OUT1L_THR1_ENA */
3591#define ARIZONA_EDRE_OUT1L_THR1_ENA_WIDTH 1 /* EDRE_OUT1L_THR1_ENA */
3592#define ARIZONA_EDRE_OUT1R_THR1_ENA 0x0001 /* EDRE_OUT1R_THR1_ENA */
3593#define ARIZONA_EDRE_OUT1R_THR1_ENA_MASK 0x0001 /* EDRE_OUT1R_THR1_ENA */
3594#define ARIZONA_EDRE_OUT1R_THR1_ENA_SHIFT 0 /* EDRE_OUT1R_THR1_ENA */
3595#define ARIZONA_EDRE_OUT1R_THR1_ENA_WIDTH 1 /* EDRE_OUT1R_THR1_ENA */
3596
3431/* 3597/*
3432 * R1104 (0x450) - DAC AEC Control 1 3598 * R1104 (0x450) - DAC AEC Control 1
3433 */ 3599 */
@@ -4308,6 +4474,86 @@
4308#define ARIZONA_AIF3_FRC_WR_WIDTH 1 /* AIF3_FRC_WR */ 4474#define ARIZONA_AIF3_FRC_WR_WIDTH 1 /* AIF3_FRC_WR */
4309 4475
4310/* 4476/*
4477 * R1474 (0x5C2) - SPD1 TX Control
4478 */
4479#define ARIZONA_SPD1_VAL2 0x2000 /* SPD1_VAL2 */
4480#define ARIZONA_SPD1_VAL2_MASK 0x2000 /* SPD1_VAL2 */
4481#define ARIZONA_SPD1_VAL2_SHIFT 13 /* SPD1_VAL2 */
4482#define ARIZONA_SPD1_VAL2_WIDTH 1 /* SPD1_VAL2 */
4483#define ARIZONA_SPD1_VAL1 0x1000 /* SPD1_VAL1 */
4484#define ARIZONA_SPD1_VAL1_MASK 0x1000 /* SPD1_VAL1 */
4485#define ARIZONA_SPD1_VAL1_SHIFT 12 /* SPD1_VAL1 */
4486#define ARIZONA_SPD1_VAL1_WIDTH 1 /* SPD1_VAL1 */
4487#define ARIZONA_SPD1_RATE_MASK 0x00F0 /* SPD1_RATE */
4488#define ARIZONA_SPD1_RATE_SHIFT 4 /* SPD1_RATE */
4489#define ARIZONA_SPD1_RATE_WIDTH 4 /* SPD1_RATE */
4490#define ARIZONA_SPD1_ENA 0x0001 /* SPD1_ENA */
4491#define ARIZONA_SPD1_ENA_MASK 0x0001 /* SPD1_ENA */
4492#define ARIZONA_SPD1_ENA_SHIFT 0 /* SPD1_ENA */
4493#define ARIZONA_SPD1_ENA_WIDTH 1 /* SPD1_ENA */
4494
4495/*
4496 * R1475 (0x5C3) - SPD1 TX Channel Status 1
4497 */
4498#define ARIZONA_SPD1_CATCODE_MASK 0xFF00 /* SPD1_CATCODE */
4499#define ARIZONA_SPD1_CATCODE_SHIFT 8 /* SPD1_CATCODE */
4500#define ARIZONA_SPD1_CATCODE_WIDTH 8 /* SPD1_CATCODE */
4501#define ARIZONA_SPD1_CHSTMODE_MASK 0x00C0 /* SPD1_CHSTMODE */
4502#define ARIZONA_SPD1_CHSTMODE_SHIFT 6 /* SPD1_CHSTMODE */
4503#define ARIZONA_SPD1_CHSTMODE_WIDTH 2 /* SPD1_CHSTMODE */
4504#define ARIZONA_SPD1_PREEMPH_MASK 0x0038 /* SPD1_PREEMPH */
4505#define ARIZONA_SPD1_PREEMPH_SHIFT 3 /* SPD1_PREEMPH */
4506#define ARIZONA_SPD1_PREEMPH_WIDTH 3 /* SPD1_PREEMPH */
4507#define ARIZONA_SPD1_NOCOPY 0x0004 /* SPD1_NOCOPY */
4508#define ARIZONA_SPD1_NOCOPY_MASK 0x0004 /* SPD1_NOCOPY */
4509#define ARIZONA_SPD1_NOCOPY_SHIFT 2 /* SPD1_NOCOPY */
4510#define ARIZONA_SPD1_NOCOPY_WIDTH 1 /* SPD1_NOCOPY */
4511#define ARIZONA_SPD1_NOAUDIO 0x0002 /* SPD1_NOAUDIO */
4512#define ARIZONA_SPD1_NOAUDIO_MASK 0x0002 /* SPD1_NOAUDIO */
4513#define ARIZONA_SPD1_NOAUDIO_SHIFT 1 /* SPD1_NOAUDIO */
4514#define ARIZONA_SPD1_NOAUDIO_WIDTH 1 /* SPD1_NOAUDIO */
4515#define ARIZONA_SPD1_PRO 0x0001 /* SPD1_PRO */
4516#define ARIZONA_SPD1_PRO_MASK 0x0001 /* SPD1_PRO */
4517#define ARIZONA_SPD1_PRO_SHIFT 0 /* SPD1_PRO */
4518#define ARIZONA_SPD1_PRO_WIDTH 1 /* SPD1_PRO */
4519
4520/*
4521 * R1475 (0x5C4) - SPD1 TX Channel Status 2
4522 */
4523#define ARIZONA_SPD1_FREQ_MASK 0xF000 /* SPD1_FREQ */
4524#define ARIZONA_SPD1_FREQ_SHIFT 12 /* SPD1_FREQ */
4525#define ARIZONA_SPD1_FREQ_WIDTH 4 /* SPD1_FREQ */
4526#define ARIZONA_SPD1_CHNUM2_MASK 0x0F00 /* SPD1_CHNUM2 */
4527#define ARIZONA_SPD1_CHNUM2_SHIFT 8 /* SPD1_CHNUM2 */
4528#define ARIZONA_SPD1_CHNUM2_WIDTH 4 /* SPD1_CHNUM2 */
4529#define ARIZONA_SPD1_CHNUM1_MASK 0x00F0 /* SPD1_CHNUM1 */
4530#define ARIZONA_SPD1_CHNUM1_SHIFT 4 /* SPD1_CHNUM1 */
4531#define ARIZONA_SPD1_CHNUM1_WIDTH 4 /* SPD1_CHNUM1 */
4532#define ARIZONA_SPD1_SRCNUM_MASK 0x000F /* SPD1_SRCNUM */
4533#define ARIZONA_SPD1_SRCNUM_SHIFT 0 /* SPD1_SRCNUM */
4534#define ARIZONA_SPD1_SRCNUM_WIDTH 4 /* SPD1_SRCNUM */
4535
4536/*
4537 * R1475 (0x5C5) - SPD1 TX Channel Status 3
4538 */
4539#define ARIZONA_SPD1_ORGSAMP_MASK 0x0F00 /* SPD1_ORGSAMP */
4540#define ARIZONA_SPD1_ORGSAMP_SHIFT 8 /* SPD1_ORGSAMP */
4541#define ARIZONA_SPD1_ORGSAMP_WIDTH 4 /* SPD1_ORGSAMP */
4542#define ARIZONA_SPD1_TXWL_MASK 0x00E0 /* SPD1_TXWL */
4543#define ARIZONA_SPD1_TXWL_SHIFT 5 /* SPD1_TXWL */
4544#define ARIZONA_SPD1_TXWL_WIDTH 3 /* SPD1_TXWL */
4545#define ARIZONA_SPD1_MAXWL 0x0010 /* SPD1_MAXWL */
4546#define ARIZONA_SPD1_MAXWL_MASK 0x0010 /* SPD1_MAXWL */
4547#define ARIZONA_SPD1_MAXWL_SHIFT 4 /* SPD1_MAXWL */
4548#define ARIZONA_SPD1_MAXWL_WIDTH 1 /* SPD1_MAXWL */
4549#define ARIZONA_SPD1_CS31_30_MASK 0x000C /* SPD1_CS31_30 */
4550#define ARIZONA_SPD1_CS31_30_SHIFT 2 /* SPD1_CS31_30 */
4551#define ARIZONA_SPD1_CS31_30_WIDTH 2 /* SPD1_CS31_30 */
4552#define ARIZONA_SPD1_CLKACU_MASK 0x0003 /* SPD1_CLKACU */
4553#define ARIZONA_SPD1_CLKACU_SHIFT 2 /* SPD1_CLKACU */
4554#define ARIZONA_SPD1_CLKACU_WIDTH 0 /* SPD1_CLKACU */
4555
4556/*
4311 * R1507 (0x5E3) - SLIMbus Framer Ref Gear 4557 * R1507 (0x5E3) - SLIMbus Framer Ref Gear
4312 */ 4558 */
4313#define ARIZONA_SLIMCLK_SRC 0x0010 /* SLIMCLK_SRC */ 4559#define ARIZONA_SLIMCLK_SRC 0x0010 /* SLIMCLK_SRC */
@@ -4562,6 +4808,13 @@
4562#define ARIZONA_GP_DBTIME_WIDTH 4 /* GP_DBTIME - [15:12] */ 4808#define ARIZONA_GP_DBTIME_WIDTH 4 /* GP_DBTIME - [15:12] */
4563 4809
4564/* 4810/*
4811 * R3096 (0xC18) - GP Switch 1
4812 */
4813#define ARIZONA_SW1_MODE_MASK 0x0003 /* SW1_MODE - [1:0] */
4814#define ARIZONA_SW1_MODE_SHIFT 0 /* SW1_MODE - [1:0] */
4815#define ARIZONA_SW1_MODE_WIDTH 2 /* SW1_MODE - [1:0] */
4816
4817/*
4565 * R3104 (0xC20) - Misc Pad Ctrl 1 4818 * R3104 (0xC20) - Misc Pad Ctrl 1
4566 */ 4819 */
4567#define ARIZONA_LDO1ENA_PD 0x8000 /* LDO1ENA_PD */ 4820#define ARIZONA_LDO1ENA_PD 0x8000 /* LDO1ENA_PD */
@@ -6301,6 +6554,10 @@
6301/* 6554/*
6302 * R3366 (0xD26) - Interrupt Raw Status 8 6555 * R3366 (0xD26) - Interrupt Raw Status 8
6303 */ 6556 */
6557#define ARIZONA_SPDIF_OVERCLOCKED_STS 0x8000 /* SPDIF_OVERCLOCKED_STS */
6558#define ARIZONA_SPDIF_OVERCLOCKED_STS_MASK 0x8000 /* SPDIF_OVERCLOCKED_STS */
6559#define ARIZONA_SPDIF_OVERCLOCKED_STS_SHIFT 15 /* SPDIF_OVERCLOCKED_STS */
6560#define ARIZONA_SPDIF_OVERCLOCKED_STS_WIDTH 1 /* SPDIF_OVERCLOCKED_STS */
6304#define ARIZONA_AIF3_UNDERCLOCKED_STS 0x0400 /* AIF3_UNDERCLOCKED_STS */ 6561#define ARIZONA_AIF3_UNDERCLOCKED_STS 0x0400 /* AIF3_UNDERCLOCKED_STS */
6305#define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK 0x0400 /* AIF3_UNDERCLOCKED_STS */ 6562#define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK 0x0400 /* AIF3_UNDERCLOCKED_STS */
6306#define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT 10 /* AIF3_UNDERCLOCKED_STS */ 6563#define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT 10 /* AIF3_UNDERCLOCKED_STS */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index c2aa853fb412..cc8ad1e1a307 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,7 +12,8 @@
12#define __LINUX_MFD_AXP20X_H 12#define __LINUX_MFD_AXP20X_H
13 13
14enum { 14enum {
15 AXP202_ID = 0, 15 AXP152_ID = 0,
16 AXP202_ID,
16 AXP209_ID, 17 AXP209_ID,
17 AXP221_ID, 18 AXP221_ID,
18 AXP288_ID, 19 AXP288_ID,
@@ -22,6 +23,24 @@ enum {
22#define AXP20X_DATACACHE(m) (0x04 + (m)) 23#define AXP20X_DATACACHE(m) (0x04 + (m))
23 24
24/* Power supply */ 25/* Power supply */
26#define AXP152_PWR_OP_MODE 0x01
27#define AXP152_LDO3456_DC1234_CTRL 0x12
28#define AXP152_ALDO_OP_MODE 0x13
29#define AXP152_LDO0_CTRL 0x15
30#define AXP152_DCDC2_V_OUT 0x23
31#define AXP152_DCDC2_V_SCAL 0x25
32#define AXP152_DCDC1_V_OUT 0x26
33#define AXP152_DCDC3_V_OUT 0x27
34#define AXP152_ALDO12_V_OUT 0x28
35#define AXP152_DLDO1_V_OUT 0x29
36#define AXP152_DLDO2_V_OUT 0x2a
37#define AXP152_DCDC4_V_OUT 0x2b
38#define AXP152_V_OFF 0x31
39#define AXP152_OFF_CTRL 0x32
40#define AXP152_PEK_KEY 0x36
41#define AXP152_DCDC_FREQ 0x37
42#define AXP152_DCDC_MODE 0x80
43
25#define AXP20X_PWR_INPUT_STATUS 0x00 44#define AXP20X_PWR_INPUT_STATUS 0x00
26#define AXP20X_PWR_OP_MODE 0x01 45#define AXP20X_PWR_OP_MODE 0x01
27#define AXP20X_USB_OTG_STATUS 0x02 46#define AXP20X_USB_OTG_STATUS 0x02
@@ -69,6 +88,13 @@ enum {
69#define AXP22X_CHRG_CTRL3 0x35 88#define AXP22X_CHRG_CTRL3 0x35
70 89
71/* Interrupt */ 90/* Interrupt */
91#define AXP152_IRQ1_EN 0x40
92#define AXP152_IRQ2_EN 0x41
93#define AXP152_IRQ3_EN 0x42
94#define AXP152_IRQ1_STATE 0x48
95#define AXP152_IRQ2_STATE 0x49
96#define AXP152_IRQ3_STATE 0x4a
97
72#define AXP20X_IRQ1_EN 0x40 98#define AXP20X_IRQ1_EN 0x40
73#define AXP20X_IRQ2_EN 0x41 99#define AXP20X_IRQ2_EN 0x41
74#define AXP20X_IRQ3_EN 0x42 100#define AXP20X_IRQ3_EN 0x42
@@ -127,6 +153,19 @@ enum {
127#define AXP22X_PWREN_CTRL2 0x8d 153#define AXP22X_PWREN_CTRL2 0x8d
128 154
129/* GPIO */ 155/* GPIO */
156#define AXP152_GPIO0_CTRL 0x90
157#define AXP152_GPIO1_CTRL 0x91
158#define AXP152_GPIO2_CTRL 0x92
159#define AXP152_GPIO3_CTRL 0x93
160#define AXP152_LDOGPIO2_V_OUT 0x96
161#define AXP152_GPIO_INPUT 0x97
162#define AXP152_PWM0_FREQ_X 0x98
163#define AXP152_PWM0_FREQ_Y 0x99
164#define AXP152_PWM0_DUTY_CYCLE 0x9a
165#define AXP152_PWM1_FREQ_X 0x9b
166#define AXP152_PWM1_FREQ_Y 0x9c
167#define AXP152_PWM1_DUTY_CYCLE 0x9d
168
130#define AXP20X_GPIO0_CTRL 0x90 169#define AXP20X_GPIO0_CTRL 0x90
131#define AXP20X_LDO5_V_OUT 0x91 170#define AXP20X_LDO5_V_OUT 0x91
132#define AXP20X_GPIO1_CTRL 0x92 171#define AXP20X_GPIO1_CTRL 0x92
@@ -151,6 +190,12 @@ enum {
151#define AXP20X_CC_CTRL 0xb8 190#define AXP20X_CC_CTRL 0xb8
152#define AXP20X_FG_RES 0xb9 191#define AXP20X_FG_RES 0xb9
153 192
193/* OCV */
194#define AXP20X_RDC_H 0xba
195#define AXP20X_RDC_L 0xbb
196#define AXP20X_OCV(m) (0xc0 + (m))
197#define AXP20X_OCV_MAX 0xf
198
154/* AXP22X specific registers */ 199/* AXP22X specific registers */
155#define AXP22X_BATLOW_THRES1 0xe6 200#define AXP22X_BATLOW_THRES1 0xe6
156 201
@@ -218,6 +263,26 @@ enum {
218 263
219/* IRQs */ 264/* IRQs */
220enum { 265enum {
266 AXP152_IRQ_LDO0IN_CONNECT = 1,
267 AXP152_IRQ_LDO0IN_REMOVAL,
268 AXP152_IRQ_ALDO0IN_CONNECT,
269 AXP152_IRQ_ALDO0IN_REMOVAL,
270 AXP152_IRQ_DCDC1_V_LOW,
271 AXP152_IRQ_DCDC2_V_LOW,
272 AXP152_IRQ_DCDC3_V_LOW,
273 AXP152_IRQ_DCDC4_V_LOW,
274 AXP152_IRQ_PEK_SHORT,
275 AXP152_IRQ_PEK_LONG,
276 AXP152_IRQ_TIMER,
277 AXP152_IRQ_PEK_RIS_EDGE,
278 AXP152_IRQ_PEK_FAL_EDGE,
279 AXP152_IRQ_GPIO3_INPUT,
280 AXP152_IRQ_GPIO2_INPUT,
281 AXP152_IRQ_GPIO1_INPUT,
282 AXP152_IRQ_GPIO0_INPUT,
283};
284
285enum {
221 AXP20X_IRQ_ACIN_OVER_V = 1, 286 AXP20X_IRQ_ACIN_OVER_V = 1,
222 AXP20X_IRQ_ACIN_PLUGIN, 287 AXP20X_IRQ_ACIN_PLUGIN,
223 AXP20X_IRQ_ACIN_REMOVAL, 288 AXP20X_IRQ_ACIN_REMOVAL,
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
new file mode 100644
index 000000000000..376ba84366a0
--- /dev/null
+++ b/include/linux/mfd/da9062/core.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2015 Dialog Semiconductor Ltd.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __MFD_DA9062_CORE_H__
16#define __MFD_DA9062_CORE_H__
17
18#include <linux/interrupt.h>
19#include <linux/mfd/da9062/registers.h>
20
21/* Interrupts */
22enum da9062_irqs {
23 /* IRQ A */
24 DA9062_IRQ_ONKEY,
25 DA9062_IRQ_ALARM,
26 DA9062_IRQ_TICK,
27 DA9062_IRQ_WDG_WARN,
28 DA9062_IRQ_SEQ_RDY,
29 /* IRQ B*/
30 DA9062_IRQ_TEMP,
31 DA9062_IRQ_LDO_LIM,
32 DA9062_IRQ_DVC_RDY,
33 DA9062_IRQ_VDD_WARN,
34 /* IRQ C */
35 DA9062_IRQ_GPI0,
36 DA9062_IRQ_GPI1,
37 DA9062_IRQ_GPI2,
38 DA9062_IRQ_GPI3,
39 DA9062_IRQ_GPI4,
40
41 DA9062_NUM_IRQ,
42};
43
44struct da9062 {
45 struct device *dev;
46 struct regmap *regmap;
47 struct regmap_irq_chip_data *regmap_irq;
48};
49
50#endif /* __MFD_DA9062_CORE_H__ */
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
new file mode 100644
index 000000000000..97790d1b02c5
--- /dev/null
+++ b/include/linux/mfd/da9062/registers.h
@@ -0,0 +1,1108 @@
1/*
2 * registers.h - REGISTERS H for DA9062
3 * Copyright (C) 2015 Dialog Semiconductor Ltd.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __DA9062_H__
17#define __DA9062_H__
18
19#define DA9062_PMIC_DEVICE_ID 0x62
20#define DA9062_PMIC_VARIANT_MRC_AA 0x01
21
22#define DA9062_I2C_PAGE_SEL_SHIFT 1
23
24/*
25 * Registers
26 */
27
28#define DA9062AA_PAGE_CON 0x000
29#define DA9062AA_STATUS_A 0x001
30#define DA9062AA_STATUS_B 0x002
31#define DA9062AA_STATUS_D 0x004
32#define DA9062AA_FAULT_LOG 0x005
33#define DA9062AA_EVENT_A 0x006
34#define DA9062AA_EVENT_B 0x007
35#define DA9062AA_EVENT_C 0x008
36#define DA9062AA_IRQ_MASK_A 0x00A
37#define DA9062AA_IRQ_MASK_B 0x00B
38#define DA9062AA_IRQ_MASK_C 0x00C
39#define DA9062AA_CONTROL_A 0x00E
40#define DA9062AA_CONTROL_B 0x00F
41#define DA9062AA_CONTROL_C 0x010
42#define DA9062AA_CONTROL_D 0x011
43#define DA9062AA_CONTROL_E 0x012
44#define DA9062AA_CONTROL_F 0x013
45#define DA9062AA_PD_DIS 0x014
46#define DA9062AA_GPIO_0_1 0x015
47#define DA9062AA_GPIO_2_3 0x016
48#define DA9062AA_GPIO_4 0x017
49#define DA9062AA_GPIO_WKUP_MODE 0x01C
50#define DA9062AA_GPIO_MODE0_4 0x01D
51#define DA9062AA_GPIO_OUT0_2 0x01E
52#define DA9062AA_GPIO_OUT3_4 0x01F
53#define DA9062AA_BUCK2_CONT 0x020
54#define DA9062AA_BUCK1_CONT 0x021
55#define DA9062AA_BUCK4_CONT 0x022
56#define DA9062AA_BUCK3_CONT 0x024
57#define DA9062AA_LDO1_CONT 0x026
58#define DA9062AA_LDO2_CONT 0x027
59#define DA9062AA_LDO3_CONT 0x028
60#define DA9062AA_LDO4_CONT 0x029
61#define DA9062AA_DVC_1 0x032
62#define DA9062AA_COUNT_S 0x040
63#define DA9062AA_COUNT_MI 0x041
64#define DA9062AA_COUNT_H 0x042
65#define DA9062AA_COUNT_D 0x043
66#define DA9062AA_COUNT_MO 0x044
67#define DA9062AA_COUNT_Y 0x045
68#define DA9062AA_ALARM_S 0x046
69#define DA9062AA_ALARM_MI 0x047
70#define DA9062AA_ALARM_H 0x048
71#define DA9062AA_ALARM_D 0x049
72#define DA9062AA_ALARM_MO 0x04A
73#define DA9062AA_ALARM_Y 0x04B
74#define DA9062AA_SECOND_A 0x04C
75#define DA9062AA_SECOND_B 0x04D
76#define DA9062AA_SECOND_C 0x04E
77#define DA9062AA_SECOND_D 0x04F
78#define DA9062AA_SEQ 0x081
79#define DA9062AA_SEQ_TIMER 0x082
80#define DA9062AA_ID_2_1 0x083
81#define DA9062AA_ID_4_3 0x084
82#define DA9062AA_ID_12_11 0x088
83#define DA9062AA_ID_14_13 0x089
84#define DA9062AA_ID_16_15 0x08A
85#define DA9062AA_ID_22_21 0x08D
86#define DA9062AA_ID_24_23 0x08E
87#define DA9062AA_ID_26_25 0x08F
88#define DA9062AA_ID_28_27 0x090
89#define DA9062AA_ID_30_29 0x091
90#define DA9062AA_ID_32_31 0x092
91#define DA9062AA_SEQ_A 0x095
92#define DA9062AA_SEQ_B 0x096
93#define DA9062AA_WAIT 0x097
94#define DA9062AA_EN_32K 0x098
95#define DA9062AA_RESET 0x099
96#define DA9062AA_BUCK_ILIM_A 0x09A
97#define DA9062AA_BUCK_ILIM_B 0x09B
98#define DA9062AA_BUCK_ILIM_C 0x09C
99#define DA9062AA_BUCK2_CFG 0x09D
100#define DA9062AA_BUCK1_CFG 0x09E
101#define DA9062AA_BUCK4_CFG 0x09F
102#define DA9062AA_BUCK3_CFG 0x0A0
103#define DA9062AA_VBUCK2_A 0x0A3
104#define DA9062AA_VBUCK1_A 0x0A4
105#define DA9062AA_VBUCK4_A 0x0A5
106#define DA9062AA_VBUCK3_A 0x0A7
107#define DA9062AA_VLDO1_A 0x0A9
108#define DA9062AA_VLDO2_A 0x0AA
109#define DA9062AA_VLDO3_A 0x0AB
110#define DA9062AA_VLDO4_A 0x0AC
111#define DA9062AA_VBUCK2_B 0x0B4
112#define DA9062AA_VBUCK1_B 0x0B5
113#define DA9062AA_VBUCK4_B 0x0B6
114#define DA9062AA_VBUCK3_B 0x0B8
115#define DA9062AA_VLDO1_B 0x0BA
116#define DA9062AA_VLDO2_B 0x0BB
117#define DA9062AA_VLDO3_B 0x0BC
118#define DA9062AA_VLDO4_B 0x0BD
119#define DA9062AA_BBAT_CONT 0x0C5
120#define DA9062AA_INTERFACE 0x105
121#define DA9062AA_CONFIG_A 0x106
122#define DA9062AA_CONFIG_B 0x107
123#define DA9062AA_CONFIG_C 0x108
124#define DA9062AA_CONFIG_D 0x109
125#define DA9062AA_CONFIG_E 0x10A
126#define DA9062AA_CONFIG_G 0x10C
127#define DA9062AA_CONFIG_H 0x10D
128#define DA9062AA_CONFIG_I 0x10E
129#define DA9062AA_CONFIG_J 0x10F
130#define DA9062AA_CONFIG_K 0x110
131#define DA9062AA_CONFIG_M 0x112
132#define DA9062AA_TRIM_CLDR 0x120
133#define DA9062AA_GP_ID_0 0x121
134#define DA9062AA_GP_ID_1 0x122
135#define DA9062AA_GP_ID_2 0x123
136#define DA9062AA_GP_ID_3 0x124
137#define DA9062AA_GP_ID_4 0x125
138#define DA9062AA_GP_ID_5 0x126
139#define DA9062AA_GP_ID_6 0x127
140#define DA9062AA_GP_ID_7 0x128
141#define DA9062AA_GP_ID_8 0x129
142#define DA9062AA_GP_ID_9 0x12A
143#define DA9062AA_GP_ID_10 0x12B
144#define DA9062AA_GP_ID_11 0x12C
145#define DA9062AA_GP_ID_12 0x12D
146#define DA9062AA_GP_ID_13 0x12E
147#define DA9062AA_GP_ID_14 0x12F
148#define DA9062AA_GP_ID_15 0x130
149#define DA9062AA_GP_ID_16 0x131
150#define DA9062AA_GP_ID_17 0x132
151#define DA9062AA_GP_ID_18 0x133
152#define DA9062AA_GP_ID_19 0x134
153#define DA9062AA_DEVICE_ID 0x181
154#define DA9062AA_VARIANT_ID 0x182
155#define DA9062AA_CUSTOMER_ID 0x183
156#define DA9062AA_CONFIG_ID 0x184
157
158/*
159 * Bit fields
160 */
161
162/* DA9062AA_PAGE_CON = 0x000 */
163#define DA9062AA_PAGE_SHIFT 0
164#define DA9062AA_PAGE_MASK 0x3f
165#define DA9062AA_WRITE_MODE_SHIFT 6
166#define DA9062AA_WRITE_MODE_MASK BIT(6)
167#define DA9062AA_REVERT_SHIFT 7
168#define DA9062AA_REVERT_MASK BIT(7)
169
170/* DA9062AA_STATUS_A = 0x001 */
171#define DA9062AA_NONKEY_SHIFT 0
172#define DA9062AA_NONKEY_MASK 0x01
173#define DA9062AA_DVC_BUSY_SHIFT 2
174#define DA9062AA_DVC_BUSY_MASK BIT(2)
175
176/* DA9062AA_STATUS_B = 0x002 */
177#define DA9062AA_GPI0_SHIFT 0
178#define DA9062AA_GPI0_MASK 0x01
179#define DA9062AA_GPI1_SHIFT 1
180#define DA9062AA_GPI1_MASK BIT(1)
181#define DA9062AA_GPI2_SHIFT 2
182#define DA9062AA_GPI2_MASK BIT(2)
183#define DA9062AA_GPI3_SHIFT 3
184#define DA9062AA_GPI3_MASK BIT(3)
185#define DA9062AA_GPI4_SHIFT 4
186#define DA9062AA_GPI4_MASK BIT(4)
187
188/* DA9062AA_STATUS_D = 0x004 */
189#define DA9062AA_LDO1_ILIM_SHIFT 0
190#define DA9062AA_LDO1_ILIM_MASK 0x01
191#define DA9062AA_LDO2_ILIM_SHIFT 1
192#define DA9062AA_LDO2_ILIM_MASK BIT(1)
193#define DA9062AA_LDO3_ILIM_SHIFT 2
194#define DA9062AA_LDO3_ILIM_MASK BIT(2)
195#define DA9062AA_LDO4_ILIM_SHIFT 3
196#define DA9062AA_LDO4_ILIM_MASK BIT(3)
197
198/* DA9062AA_FAULT_LOG = 0x005 */
199#define DA9062AA_TWD_ERROR_SHIFT 0
200#define DA9062AA_TWD_ERROR_MASK 0x01
201#define DA9062AA_POR_SHIFT 1
202#define DA9062AA_POR_MASK BIT(1)
203#define DA9062AA_VDD_FAULT_SHIFT 2
204#define DA9062AA_VDD_FAULT_MASK BIT(2)
205#define DA9062AA_VDD_START_SHIFT 3
206#define DA9062AA_VDD_START_MASK BIT(3)
207#define DA9062AA_TEMP_CRIT_SHIFT 4
208#define DA9062AA_TEMP_CRIT_MASK BIT(4)
209#define DA9062AA_KEY_RESET_SHIFT 5
210#define DA9062AA_KEY_RESET_MASK BIT(5)
211#define DA9062AA_NSHUTDOWN_SHIFT 6
212#define DA9062AA_NSHUTDOWN_MASK BIT(6)
213#define DA9062AA_WAIT_SHUT_SHIFT 7
214#define DA9062AA_WAIT_SHUT_MASK BIT(7)
215
216/* DA9062AA_EVENT_A = 0x006 */
217#define DA9062AA_E_NONKEY_SHIFT 0
218#define DA9062AA_E_NONKEY_MASK 0x01
219#define DA9062AA_E_ALARM_SHIFT 1
220#define DA9062AA_E_ALARM_MASK BIT(1)
221#define DA9062AA_E_TICK_SHIFT 2
222#define DA9062AA_E_TICK_MASK BIT(2)
223#define DA9062AA_E_WDG_WARN_SHIFT 3
224#define DA9062AA_E_WDG_WARN_MASK BIT(3)
225#define DA9062AA_E_SEQ_RDY_SHIFT 4
226#define DA9062AA_E_SEQ_RDY_MASK BIT(4)
227#define DA9062AA_EVENTS_B_SHIFT 5
228#define DA9062AA_EVENTS_B_MASK BIT(5)
229#define DA9062AA_EVENTS_C_SHIFT 6
230#define DA9062AA_EVENTS_C_MASK BIT(6)
231
232/* DA9062AA_EVENT_B = 0x007 */
233#define DA9062AA_E_TEMP_SHIFT 1
234#define DA9062AA_E_TEMP_MASK BIT(1)
235#define DA9062AA_E_LDO_LIM_SHIFT 3
236#define DA9062AA_E_LDO_LIM_MASK BIT(3)
237#define DA9062AA_E_DVC_RDY_SHIFT 5
238#define DA9062AA_E_DVC_RDY_MASK BIT(5)
239#define DA9062AA_E_VDD_WARN_SHIFT 7
240#define DA9062AA_E_VDD_WARN_MASK BIT(7)
241
242/* DA9062AA_EVENT_C = 0x008 */
243#define DA9062AA_E_GPI0_SHIFT 0
244#define DA9062AA_E_GPI0_MASK 0x01
245#define DA9062AA_E_GPI1_SHIFT 1
246#define DA9062AA_E_GPI1_MASK BIT(1)
247#define DA9062AA_E_GPI2_SHIFT 2
248#define DA9062AA_E_GPI2_MASK BIT(2)
249#define DA9062AA_E_GPI3_SHIFT 3
250#define DA9062AA_E_GPI3_MASK BIT(3)
251#define DA9062AA_E_GPI4_SHIFT 4
252#define DA9062AA_E_GPI4_MASK BIT(4)
253
254/* DA9062AA_IRQ_MASK_A = 0x00A */
255#define DA9062AA_M_NONKEY_SHIFT 0
256#define DA9062AA_M_NONKEY_MASK 0x01
257#define DA9062AA_M_ALARM_SHIFT 1
258#define DA9062AA_M_ALARM_MASK BIT(1)
259#define DA9062AA_M_TICK_SHIFT 2
260#define DA9062AA_M_TICK_MASK BIT(2)
261#define DA9062AA_M_WDG_WARN_SHIFT 3
262#define DA9062AA_M_WDG_WARN_MASK BIT(3)
263#define DA9062AA_M_SEQ_RDY_SHIFT 4
264#define DA9062AA_M_SEQ_RDY_MASK BIT(4)
265
266/* DA9062AA_IRQ_MASK_B = 0x00B */
267#define DA9062AA_M_TEMP_SHIFT 1
268#define DA9062AA_M_TEMP_MASK BIT(1)
269#define DA9062AA_M_LDO_LIM_SHIFT 3
270#define DA9062AA_M_LDO_LIM_MASK BIT(3)
271#define DA9062AA_M_DVC_RDY_SHIFT 5
272#define DA9062AA_M_DVC_RDY_MASK BIT(5)
273#define DA9062AA_M_VDD_WARN_SHIFT 7
274#define DA9062AA_M_VDD_WARN_MASK BIT(7)
275
276/* DA9062AA_IRQ_MASK_C = 0x00C */
277#define DA9062AA_M_GPI0_SHIFT 0
278#define DA9062AA_M_GPI0_MASK 0x01
279#define DA9062AA_M_GPI1_SHIFT 1
280#define DA9062AA_M_GPI1_MASK BIT(1)
281#define DA9062AA_M_GPI2_SHIFT 2
282#define DA9062AA_M_GPI2_MASK BIT(2)
283#define DA9062AA_M_GPI3_SHIFT 3
284#define DA9062AA_M_GPI3_MASK BIT(3)
285#define DA9062AA_M_GPI4_SHIFT 4
286#define DA9062AA_M_GPI4_MASK BIT(4)
287
288/* DA9062AA_CONTROL_A = 0x00E */
289#define DA9062AA_SYSTEM_EN_SHIFT 0
290#define DA9062AA_SYSTEM_EN_MASK 0x01
291#define DA9062AA_POWER_EN_SHIFT 1
292#define DA9062AA_POWER_EN_MASK BIT(1)
293#define DA9062AA_POWER1_EN_SHIFT 2
294#define DA9062AA_POWER1_EN_MASK BIT(2)
295#define DA9062AA_STANDBY_SHIFT 3
296#define DA9062AA_STANDBY_MASK BIT(3)
297#define DA9062AA_M_SYSTEM_EN_SHIFT 4
298#define DA9062AA_M_SYSTEM_EN_MASK BIT(4)
299#define DA9062AA_M_POWER_EN_SHIFT 5
300#define DA9062AA_M_POWER_EN_MASK BIT(5)
301#define DA9062AA_M_POWER1_EN_SHIFT 6
302#define DA9062AA_M_POWER1_EN_MASK BIT(6)
303
304/* DA9062AA_CONTROL_B = 0x00F */
305#define DA9062AA_WATCHDOG_PD_SHIFT 1
306#define DA9062AA_WATCHDOG_PD_MASK BIT(1)
307#define DA9062AA_FREEZE_EN_SHIFT 2
308#define DA9062AA_FREEZE_EN_MASK BIT(2)
309#define DA9062AA_NRES_MODE_SHIFT 3
310#define DA9062AA_NRES_MODE_MASK BIT(3)
311#define DA9062AA_NONKEY_LOCK_SHIFT 4
312#define DA9062AA_NONKEY_LOCK_MASK BIT(4)
313#define DA9062AA_NFREEZE_SHIFT 5
314#define DA9062AA_NFREEZE_MASK (0x03 << 5)
315#define DA9062AA_BUCK_SLOWSTART_SHIFT 7
316#define DA9062AA_BUCK_SLOWSTART_MASK BIT(7)
317
318/* DA9062AA_CONTROL_C = 0x010 */
319#define DA9062AA_DEBOUNCING_SHIFT 0
320#define DA9062AA_DEBOUNCING_MASK 0x07
321#define DA9062AA_AUTO_BOOT_SHIFT 3
322#define DA9062AA_AUTO_BOOT_MASK BIT(3)
323#define DA9062AA_OTPREAD_EN_SHIFT 4
324#define DA9062AA_OTPREAD_EN_MASK BIT(4)
325#define DA9062AA_SLEW_RATE_SHIFT 5
326#define DA9062AA_SLEW_RATE_MASK (0x03 << 5)
327#define DA9062AA_DEF_SUPPLY_SHIFT 7
328#define DA9062AA_DEF_SUPPLY_MASK BIT(7)
329
330/* DA9062AA_CONTROL_D = 0x011 */
331#define DA9062AA_TWDSCALE_SHIFT 0
332#define DA9062AA_TWDSCALE_MASK 0x07
333
334/* DA9062AA_CONTROL_E = 0x012 */
335#define DA9062AA_RTC_MODE_PD_SHIFT 0
336#define DA9062AA_RTC_MODE_PD_MASK 0x01
337#define DA9062AA_RTC_MODE_SD_SHIFT 1
338#define DA9062AA_RTC_MODE_SD_MASK BIT(1)
339#define DA9062AA_RTC_EN_SHIFT 2
340#define DA9062AA_RTC_EN_MASK BIT(2)
341#define DA9062AA_V_LOCK_SHIFT 7
342#define DA9062AA_V_LOCK_MASK BIT(7)
343
344/* DA9062AA_CONTROL_F = 0x013 */
345#define DA9062AA_WATCHDOG_SHIFT 0
346#define DA9062AA_WATCHDOG_MASK 0x01
347#define DA9062AA_SHUTDOWN_SHIFT 1
348#define DA9062AA_SHUTDOWN_MASK BIT(1)
349#define DA9062AA_WAKE_UP_SHIFT 2
350#define DA9062AA_WAKE_UP_MASK BIT(2)
351
352/* DA9062AA_PD_DIS = 0x014 */
353#define DA9062AA_GPI_DIS_SHIFT 0
354#define DA9062AA_GPI_DIS_MASK 0x01
355#define DA9062AA_PMIF_DIS_SHIFT 2
356#define DA9062AA_PMIF_DIS_MASK BIT(2)
357#define DA9062AA_CLDR_PAUSE_SHIFT 4
358#define DA9062AA_CLDR_PAUSE_MASK BIT(4)
359#define DA9062AA_BBAT_DIS_SHIFT 5
360#define DA9062AA_BBAT_DIS_MASK BIT(5)
361#define DA9062AA_OUT32K_PAUSE_SHIFT 6
362#define DA9062AA_OUT32K_PAUSE_MASK BIT(6)
363#define DA9062AA_PMCONT_DIS_SHIFT 7
364#define DA9062AA_PMCONT_DIS_MASK BIT(7)
365
366/* DA9062AA_GPIO_0_1 = 0x015 */
367#define DA9062AA_GPIO0_PIN_SHIFT 0
368#define DA9062AA_GPIO0_PIN_MASK 0x03
369#define DA9062AA_GPIO0_TYPE_SHIFT 2
370#define DA9062AA_GPIO0_TYPE_MASK BIT(2)
371#define DA9062AA_GPIO0_WEN_SHIFT 3
372#define DA9062AA_GPIO0_WEN_MASK BIT(3)
373#define DA9062AA_GPIO1_PIN_SHIFT 4
374#define DA9062AA_GPIO1_PIN_MASK (0x03 << 4)
375#define DA9062AA_GPIO1_TYPE_SHIFT 6
376#define DA9062AA_GPIO1_TYPE_MASK BIT(6)
377#define DA9062AA_GPIO1_WEN_SHIFT 7
378#define DA9062AA_GPIO1_WEN_MASK BIT(7)
379
380/* DA9062AA_GPIO_2_3 = 0x016 */
381#define DA9062AA_GPIO2_PIN_SHIFT 0
382#define DA9062AA_GPIO2_PIN_MASK 0x03
383#define DA9062AA_GPIO2_TYPE_SHIFT 2
384#define DA9062AA_GPIO2_TYPE_MASK BIT(2)
385#define DA9062AA_GPIO2_WEN_SHIFT 3
386#define DA9062AA_GPIO2_WEN_MASK BIT(3)
387#define DA9062AA_GPIO3_PIN_SHIFT 4
388#define DA9062AA_GPIO3_PIN_MASK (0x03 << 4)
389#define DA9062AA_GPIO3_TYPE_SHIFT 6
390#define DA9062AA_GPIO3_TYPE_MASK BIT(6)
391#define DA9062AA_GPIO3_WEN_SHIFT 7
392#define DA9062AA_GPIO3_WEN_MASK BIT(7)
393
394/* DA9062AA_GPIO_4 = 0x017 */
395#define DA9062AA_GPIO4_PIN_SHIFT 0
396#define DA9062AA_GPIO4_PIN_MASK 0x03
397#define DA9062AA_GPIO4_TYPE_SHIFT 2
398#define DA9062AA_GPIO4_TYPE_MASK BIT(2)
399#define DA9062AA_GPIO4_WEN_SHIFT 3
400#define DA9062AA_GPIO4_WEN_MASK BIT(3)
401
402/* DA9062AA_GPIO_WKUP_MODE = 0x01C */
403#define DA9062AA_GPIO0_WKUP_MODE_SHIFT 0
404#define DA9062AA_GPIO0_WKUP_MODE_MASK 0x01
405#define DA9062AA_GPIO1_WKUP_MODE_SHIFT 1
406#define DA9062AA_GPIO1_WKUP_MODE_MASK BIT(1)
407#define DA9062AA_GPIO2_WKUP_MODE_SHIFT 2
408#define DA9062AA_GPIO2_WKUP_MODE_MASK BIT(2)
409#define DA9062AA_GPIO3_WKUP_MODE_SHIFT 3
410#define DA9062AA_GPIO3_WKUP_MODE_MASK BIT(3)
411#define DA9062AA_GPIO4_WKUP_MODE_SHIFT 4
412#define DA9062AA_GPIO4_WKUP_MODE_MASK BIT(4)
413
414/* DA9062AA_GPIO_MODE0_4 = 0x01D */
415#define DA9062AA_GPIO0_MODE_SHIFT 0
416#define DA9062AA_GPIO0_MODE_MASK 0x01
417#define DA9062AA_GPIO1_MODE_SHIFT 1
418#define DA9062AA_GPIO1_MODE_MASK BIT(1)
419#define DA9062AA_GPIO2_MODE_SHIFT 2
420#define DA9062AA_GPIO2_MODE_MASK BIT(2)
421#define DA9062AA_GPIO3_MODE_SHIFT 3
422#define DA9062AA_GPIO3_MODE_MASK BIT(3)
423#define DA9062AA_GPIO4_MODE_SHIFT 4
424#define DA9062AA_GPIO4_MODE_MASK BIT(4)
425
426/* DA9062AA_GPIO_OUT0_2 = 0x01E */
427#define DA9062AA_GPIO0_OUT_SHIFT 0
428#define DA9062AA_GPIO0_OUT_MASK 0x07
429#define DA9062AA_GPIO1_OUT_SHIFT 3
430#define DA9062AA_GPIO1_OUT_MASK (0x07 << 3)
431#define DA9062AA_GPIO2_OUT_SHIFT 6
432#define DA9062AA_GPIO2_OUT_MASK (0x03 << 6)
433
434/* DA9062AA_GPIO_OUT3_4 = 0x01F */
435#define DA9062AA_GPIO3_OUT_SHIFT 0
436#define DA9062AA_GPIO3_OUT_MASK 0x07
437#define DA9062AA_GPIO4_OUT_SHIFT 3
438#define DA9062AA_GPIO4_OUT_MASK (0x03 << 3)
439
440/* DA9062AA_BUCK2_CONT = 0x020 */
441#define DA9062AA_BUCK2_EN_SHIFT 0
442#define DA9062AA_BUCK2_EN_MASK 0x01
443#define DA9062AA_BUCK2_GPI_SHIFT 1
444#define DA9062AA_BUCK2_GPI_MASK (0x03 << 1)
445#define DA9062AA_BUCK2_CONF_SHIFT 3
446#define DA9062AA_BUCK2_CONF_MASK BIT(3)
447#define DA9062AA_VBUCK2_GPI_SHIFT 5
448#define DA9062AA_VBUCK2_GPI_MASK (0x03 << 5)
449
450/* DA9062AA_BUCK1_CONT = 0x021 */
451#define DA9062AA_BUCK1_EN_SHIFT 0
452#define DA9062AA_BUCK1_EN_MASK 0x01
453#define DA9062AA_BUCK1_GPI_SHIFT 1
454#define DA9062AA_BUCK1_GPI_MASK (0x03 << 1)
455#define DA9062AA_BUCK1_CONF_SHIFT 3
456#define DA9062AA_BUCK1_CONF_MASK BIT(3)
457#define DA9062AA_VBUCK1_GPI_SHIFT 5
458#define DA9062AA_VBUCK1_GPI_MASK (0x03 << 5)
459
460/* DA9062AA_BUCK4_CONT = 0x022 */
461#define DA9062AA_BUCK4_EN_SHIFT 0
462#define DA9062AA_BUCK4_EN_MASK 0x01
463#define DA9062AA_BUCK4_GPI_SHIFT 1
464#define DA9062AA_BUCK4_GPI_MASK (0x03 << 1)
465#define DA9062AA_BUCK4_CONF_SHIFT 3
466#define DA9062AA_BUCK4_CONF_MASK BIT(3)
467#define DA9062AA_VBUCK4_GPI_SHIFT 5
468#define DA9062AA_VBUCK4_GPI_MASK (0x03 << 5)
469
470/* DA9062AA_BUCK3_CONT = 0x024 */
471#define DA9062AA_BUCK3_EN_SHIFT 0
472#define DA9062AA_BUCK3_EN_MASK 0x01
473#define DA9062AA_BUCK3_GPI_SHIFT 1
474#define DA9062AA_BUCK3_GPI_MASK (0x03 << 1)
475#define DA9062AA_BUCK3_CONF_SHIFT 3
476#define DA9062AA_BUCK3_CONF_MASK BIT(3)
477#define DA9062AA_VBUCK3_GPI_SHIFT 5
478#define DA9062AA_VBUCK3_GPI_MASK (0x03 << 5)
479
480/* DA9062AA_LDO1_CONT = 0x026 */
481#define DA9062AA_LDO1_EN_SHIFT 0
482#define DA9062AA_LDO1_EN_MASK 0x01
483#define DA9062AA_LDO1_GPI_SHIFT 1
484#define DA9062AA_LDO1_GPI_MASK (0x03 << 1)
485#define DA9062AA_LDO1_PD_DIS_SHIFT 3
486#define DA9062AA_LDO1_PD_DIS_MASK BIT(3)
487#define DA9062AA_VLDO1_GPI_SHIFT 5
488#define DA9062AA_VLDO1_GPI_MASK (0x03 << 5)
489#define DA9062AA_LDO1_CONF_SHIFT 7
490#define DA9062AA_LDO1_CONF_MASK BIT(7)
491
492/* DA9062AA_LDO2_CONT = 0x027 */
493#define DA9062AA_LDO2_EN_SHIFT 0
494#define DA9062AA_LDO2_EN_MASK 0x01
495#define DA9062AA_LDO2_GPI_SHIFT 1
496#define DA9062AA_LDO2_GPI_MASK (0x03 << 1)
497#define DA9062AA_LDO2_PD_DIS_SHIFT 3
498#define DA9062AA_LDO2_PD_DIS_MASK BIT(3)
499#define DA9062AA_VLDO2_GPI_SHIFT 5
500#define DA9062AA_VLDO2_GPI_MASK (0x03 << 5)
501#define DA9062AA_LDO2_CONF_SHIFT 7
502#define DA9062AA_LDO2_CONF_MASK BIT(7)
503
504/* DA9062AA_LDO3_CONT = 0x028 */
505#define DA9062AA_LDO3_EN_SHIFT 0
506#define DA9062AA_LDO3_EN_MASK 0x01
507#define DA9062AA_LDO3_GPI_SHIFT 1
508#define DA9062AA_LDO3_GPI_MASK (0x03 << 1)
509#define DA9062AA_LDO3_PD_DIS_SHIFT 3
510#define DA9062AA_LDO3_PD_DIS_MASK BIT(3)
511#define DA9062AA_VLDO3_GPI_SHIFT 5
512#define DA9062AA_VLDO3_GPI_MASK (0x03 << 5)
513#define DA9062AA_LDO3_CONF_SHIFT 7
514#define DA9062AA_LDO3_CONF_MASK BIT(7)
515
516/* DA9062AA_LDO4_CONT = 0x029 */
517#define DA9062AA_LDO4_EN_SHIFT 0
518#define DA9062AA_LDO4_EN_MASK 0x01
519#define DA9062AA_LDO4_GPI_SHIFT 1
520#define DA9062AA_LDO4_GPI_MASK (0x03 << 1)
521#define DA9062AA_LDO4_PD_DIS_SHIFT 3
522#define DA9062AA_LDO4_PD_DIS_MASK BIT(3)
523#define DA9062AA_VLDO4_GPI_SHIFT 5
524#define DA9062AA_VLDO4_GPI_MASK (0x03 << 5)
525#define DA9062AA_LDO4_CONF_SHIFT 7
526#define DA9062AA_LDO4_CONF_MASK BIT(7)
527
528/* DA9062AA_DVC_1 = 0x032 */
529#define DA9062AA_VBUCK1_SEL_SHIFT 0
530#define DA9062AA_VBUCK1_SEL_MASK 0x01
531#define DA9062AA_VBUCK2_SEL_SHIFT 1
532#define DA9062AA_VBUCK2_SEL_MASK BIT(1)
533#define DA9062AA_VBUCK4_SEL_SHIFT 2
534#define DA9062AA_VBUCK4_SEL_MASK BIT(2)
535#define DA9062AA_VBUCK3_SEL_SHIFT 3
536#define DA9062AA_VBUCK3_SEL_MASK BIT(3)
537#define DA9062AA_VLDO1_SEL_SHIFT 4
538#define DA9062AA_VLDO1_SEL_MASK BIT(4)
539#define DA9062AA_VLDO2_SEL_SHIFT 5
540#define DA9062AA_VLDO2_SEL_MASK BIT(5)
541#define DA9062AA_VLDO3_SEL_SHIFT 6
542#define DA9062AA_VLDO3_SEL_MASK BIT(6)
543#define DA9062AA_VLDO4_SEL_SHIFT 7
544#define DA9062AA_VLDO4_SEL_MASK BIT(7)
545
546/* DA9062AA_COUNT_S = 0x040 */
547#define DA9062AA_COUNT_SEC_SHIFT 0
548#define DA9062AA_COUNT_SEC_MASK 0x3f
549#define DA9062AA_RTC_READ_SHIFT 7
550#define DA9062AA_RTC_READ_MASK BIT(7)
551
552/* DA9062AA_COUNT_MI = 0x041 */
553#define DA9062AA_COUNT_MIN_SHIFT 0
554#define DA9062AA_COUNT_MIN_MASK 0x3f
555
556/* DA9062AA_COUNT_H = 0x042 */
557#define DA9062AA_COUNT_HOUR_SHIFT 0
558#define DA9062AA_COUNT_HOUR_MASK 0x1f
559
560/* DA9062AA_COUNT_D = 0x043 */
561#define DA9062AA_COUNT_DAY_SHIFT 0
562#define DA9062AA_COUNT_DAY_MASK 0x1f
563
564/* DA9062AA_COUNT_MO = 0x044 */
565#define DA9062AA_COUNT_MONTH_SHIFT 0
566#define DA9062AA_COUNT_MONTH_MASK 0x0f
567
568/* DA9062AA_COUNT_Y = 0x045 */
569#define DA9062AA_COUNT_YEAR_SHIFT 0
570#define DA9062AA_COUNT_YEAR_MASK 0x3f
571#define DA9062AA_MONITOR_SHIFT 6
572#define DA9062AA_MONITOR_MASK BIT(6)
573
574/* DA9062AA_ALARM_S = 0x046 */
575#define DA9062AA_ALARM_SEC_SHIFT 0
576#define DA9062AA_ALARM_SEC_MASK 0x3f
577#define DA9062AA_ALARM_STATUS_SHIFT 6
578#define DA9062AA_ALARM_STATUS_MASK (0x03 << 6)
579
580/* DA9062AA_ALARM_MI = 0x047 */
581#define DA9062AA_ALARM_MIN_SHIFT 0
582#define DA9062AA_ALARM_MIN_MASK 0x3f
583
584/* DA9062AA_ALARM_H = 0x048 */
585#define DA9062AA_ALARM_HOUR_SHIFT 0
586#define DA9062AA_ALARM_HOUR_MASK 0x1f
587
588/* DA9062AA_ALARM_D = 0x049 */
589#define DA9062AA_ALARM_DAY_SHIFT 0
590#define DA9062AA_ALARM_DAY_MASK 0x1f
591
592/* DA9062AA_ALARM_MO = 0x04A */
593#define DA9062AA_ALARM_MONTH_SHIFT 0
594#define DA9062AA_ALARM_MONTH_MASK 0x0f
595#define DA9062AA_TICK_TYPE_SHIFT 4
596#define DA9062AA_TICK_TYPE_MASK BIT(4)
597#define DA9062AA_TICK_WAKE_SHIFT 5
598#define DA9062AA_TICK_WAKE_MASK BIT(5)
599
600/* DA9062AA_ALARM_Y = 0x04B */
601#define DA9062AA_ALARM_YEAR_SHIFT 0
602#define DA9062AA_ALARM_YEAR_MASK 0x3f
603#define DA9062AA_ALARM_ON_SHIFT 6
604#define DA9062AA_ALARM_ON_MASK BIT(6)
605#define DA9062AA_TICK_ON_SHIFT 7
606#define DA9062AA_TICK_ON_MASK BIT(7)
607
608/* DA9062AA_SECOND_A = 0x04C */
609#define DA9062AA_SECONDS_A_SHIFT 0
610#define DA9062AA_SECONDS_A_MASK 0xff
611
612/* DA9062AA_SECOND_B = 0x04D */
613#define DA9062AA_SECONDS_B_SHIFT 0
614#define DA9062AA_SECONDS_B_MASK 0xff
615
616/* DA9062AA_SECOND_C = 0x04E */
617#define DA9062AA_SECONDS_C_SHIFT 0
618#define DA9062AA_SECONDS_C_MASK 0xff
619
620/* DA9062AA_SECOND_D = 0x04F */
621#define DA9062AA_SECONDS_D_SHIFT 0
622#define DA9062AA_SECONDS_D_MASK 0xff
623
624/* DA9062AA_SEQ = 0x081 */
625#define DA9062AA_SEQ_POINTER_SHIFT 0
626#define DA9062AA_SEQ_POINTER_MASK 0x0f
627#define DA9062AA_NXT_SEQ_START_SHIFT 4
628#define DA9062AA_NXT_SEQ_START_MASK (0x0f << 4)
629
630/* DA9062AA_SEQ_TIMER = 0x082 */
631#define DA9062AA_SEQ_TIME_SHIFT 0
632#define DA9062AA_SEQ_TIME_MASK 0x0f
633#define DA9062AA_SEQ_DUMMY_SHIFT 4
634#define DA9062AA_SEQ_DUMMY_MASK (0x0f << 4)
635
636/* DA9062AA_ID_2_1 = 0x083 */
637#define DA9062AA_LDO1_STEP_SHIFT 0
638#define DA9062AA_LDO1_STEP_MASK 0x0f
639#define DA9062AA_LDO2_STEP_SHIFT 4
640#define DA9062AA_LDO2_STEP_MASK (0x0f << 4)
641
642/* DA9062AA_ID_4_3 = 0x084 */
643#define DA9062AA_LDO3_STEP_SHIFT 0
644#define DA9062AA_LDO3_STEP_MASK 0x0f
645#define DA9062AA_LDO4_STEP_SHIFT 4
646#define DA9062AA_LDO4_STEP_MASK (0x0f << 4)
647
648/* DA9062AA_ID_12_11 = 0x088 */
649#define DA9062AA_PD_DIS_STEP_SHIFT 4
650#define DA9062AA_PD_DIS_STEP_MASK (0x0f << 4)
651
652/* DA9062AA_ID_14_13 = 0x089 */
653#define DA9062AA_BUCK1_STEP_SHIFT 0
654#define DA9062AA_BUCK1_STEP_MASK 0x0f
655#define DA9062AA_BUCK2_STEP_SHIFT 4
656#define DA9062AA_BUCK2_STEP_MASK (0x0f << 4)
657
658/* DA9062AA_ID_16_15 = 0x08A */
659#define DA9062AA_BUCK4_STEP_SHIFT 0
660#define DA9062AA_BUCK4_STEP_MASK 0x0f
661#define DA9062AA_BUCK3_STEP_SHIFT 4
662#define DA9062AA_BUCK3_STEP_MASK (0x0f << 4)
663
664/* DA9062AA_ID_22_21 = 0x08D */
665#define DA9062AA_GP_RISE1_STEP_SHIFT 0
666#define DA9062AA_GP_RISE1_STEP_MASK 0x0f
667#define DA9062AA_GP_FALL1_STEP_SHIFT 4
668#define DA9062AA_GP_FALL1_STEP_MASK (0x0f << 4)
669
670/* DA9062AA_ID_24_23 = 0x08E */
671#define DA9062AA_GP_RISE2_STEP_SHIFT 0
672#define DA9062AA_GP_RISE2_STEP_MASK 0x0f
673#define DA9062AA_GP_FALL2_STEP_SHIFT 4
674#define DA9062AA_GP_FALL2_STEP_MASK (0x0f << 4)
675
676/* DA9062AA_ID_26_25 = 0x08F */
677#define DA9062AA_GP_RISE3_STEP_SHIFT 0
678#define DA9062AA_GP_RISE3_STEP_MASK 0x0f
679#define DA9062AA_GP_FALL3_STEP_SHIFT 4
680#define DA9062AA_GP_FALL3_STEP_MASK (0x0f << 4)
681
682/* DA9062AA_ID_28_27 = 0x090 */
683#define DA9062AA_GP_RISE4_STEP_SHIFT 0
684#define DA9062AA_GP_RISE4_STEP_MASK 0x0f
685#define DA9062AA_GP_FALL4_STEP_SHIFT 4
686#define DA9062AA_GP_FALL4_STEP_MASK (0x0f << 4)
687
688/* DA9062AA_ID_30_29 = 0x091 */
689#define DA9062AA_GP_RISE5_STEP_SHIFT 0
690#define DA9062AA_GP_RISE5_STEP_MASK 0x0f
691#define DA9062AA_GP_FALL5_STEP_SHIFT 4
692#define DA9062AA_GP_FALL5_STEP_MASK (0x0f << 4)
693
694/* DA9062AA_ID_32_31 = 0x092 */
695#define DA9062AA_WAIT_STEP_SHIFT 0
696#define DA9062AA_WAIT_STEP_MASK 0x0f
697#define DA9062AA_EN32K_STEP_SHIFT 4
698#define DA9062AA_EN32K_STEP_MASK (0x0f << 4)
699
700/* DA9062AA_SEQ_A = 0x095 */
701#define DA9062AA_SYSTEM_END_SHIFT 0
702#define DA9062AA_SYSTEM_END_MASK 0x0f
703#define DA9062AA_POWER_END_SHIFT 4
704#define DA9062AA_POWER_END_MASK (0x0f << 4)
705
706/* DA9062AA_SEQ_B = 0x096 */
707#define DA9062AA_MAX_COUNT_SHIFT 0
708#define DA9062AA_MAX_COUNT_MASK 0x0f
709#define DA9062AA_PART_DOWN_SHIFT 4
710#define DA9062AA_PART_DOWN_MASK (0x0f << 4)
711
712/* DA9062AA_WAIT = 0x097 */
713#define DA9062AA_WAIT_TIME_SHIFT 0
714#define DA9062AA_WAIT_TIME_MASK 0x0f
715#define DA9062AA_WAIT_MODE_SHIFT 4
716#define DA9062AA_WAIT_MODE_MASK BIT(4)
717#define DA9062AA_TIME_OUT_SHIFT 5
718#define DA9062AA_TIME_OUT_MASK BIT(5)
719#define DA9062AA_WAIT_DIR_SHIFT 6
720#define DA9062AA_WAIT_DIR_MASK (0x03 << 6)
721
722/* DA9062AA_EN_32K = 0x098 */
723#define DA9062AA_STABILISATION_TIME_SHIFT 0
724#define DA9062AA_STABILISATION_TIME_MASK 0x07
725#define DA9062AA_CRYSTAL_SHIFT 3
726#define DA9062AA_CRYSTAL_MASK BIT(3)
727#define DA9062AA_DELAY_MODE_SHIFT 4
728#define DA9062AA_DELAY_MODE_MASK BIT(4)
729#define DA9062AA_OUT_CLOCK_SHIFT 5
730#define DA9062AA_OUT_CLOCK_MASK BIT(5)
731#define DA9062AA_RTC_CLOCK_SHIFT 6
732#define DA9062AA_RTC_CLOCK_MASK BIT(6)
733#define DA9062AA_EN_32KOUT_SHIFT 7
734#define DA9062AA_EN_32KOUT_MASK BIT(7)
735
736/* DA9062AA_RESET = 0x099 */
737#define DA9062AA_RESET_TIMER_SHIFT 0
738#define DA9062AA_RESET_TIMER_MASK 0x3f
739#define DA9062AA_RESET_EVENT_SHIFT 6
740#define DA9062AA_RESET_EVENT_MASK (0x03 << 6)
741
742/* DA9062AA_BUCK_ILIM_A = 0x09A */
743#define DA9062AA_BUCK3_ILIM_SHIFT 0
744#define DA9062AA_BUCK3_ILIM_MASK 0x0f
745
746/* DA9062AA_BUCK_ILIM_B = 0x09B */
747#define DA9062AA_BUCK4_ILIM_SHIFT 0
748#define DA9062AA_BUCK4_ILIM_MASK 0x0f
749
750/* DA9062AA_BUCK_ILIM_C = 0x09C */
751#define DA9062AA_BUCK1_ILIM_SHIFT 0
752#define DA9062AA_BUCK1_ILIM_MASK 0x0f
753#define DA9062AA_BUCK2_ILIM_SHIFT 4
754#define DA9062AA_BUCK2_ILIM_MASK (0x0f << 4)
755
756/* DA9062AA_BUCK2_CFG = 0x09D */
757#define DA9062AA_BUCK2_PD_DIS_SHIFT 5
758#define DA9062AA_BUCK2_PD_DIS_MASK BIT(5)
759#define DA9062AA_BUCK2_MODE_SHIFT 6
760#define DA9062AA_BUCK2_MODE_MASK (0x03 << 6)
761
762/* DA9062AA_BUCK1_CFG = 0x09E */
763#define DA9062AA_BUCK1_PD_DIS_SHIFT 5
764#define DA9062AA_BUCK1_PD_DIS_MASK BIT(5)
765#define DA9062AA_BUCK1_MODE_SHIFT 6
766#define DA9062AA_BUCK1_MODE_MASK (0x03 << 6)
767
768/* DA9062AA_BUCK4_CFG = 0x09F */
769#define DA9062AA_BUCK4_VTTR_EN_SHIFT 3
770#define DA9062AA_BUCK4_VTTR_EN_MASK BIT(3)
771#define DA9062AA_BUCK4_VTT_EN_SHIFT 4
772#define DA9062AA_BUCK4_VTT_EN_MASK BIT(4)
773#define DA9062AA_BUCK4_PD_DIS_SHIFT 5
774#define DA9062AA_BUCK4_PD_DIS_MASK BIT(5)
775#define DA9062AA_BUCK4_MODE_SHIFT 6
776#define DA9062AA_BUCK4_MODE_MASK (0x03 << 6)
777
778/* DA9062AA_BUCK3_CFG = 0x0A0 */
779#define DA9062AA_BUCK3_PD_DIS_SHIFT 5
780#define DA9062AA_BUCK3_PD_DIS_MASK BIT(5)
781#define DA9062AA_BUCK3_MODE_SHIFT 6
782#define DA9062AA_BUCK3_MODE_MASK (0x03 << 6)
783
784/* DA9062AA_VBUCK2_A = 0x0A3 */
785#define DA9062AA_VBUCK2_A_SHIFT 0
786#define DA9062AA_VBUCK2_A_MASK 0x7f
787#define DA9062AA_BUCK2_SL_A_SHIFT 7
788#define DA9062AA_BUCK2_SL_A_MASK BIT(7)
789
790/* DA9062AA_VBUCK1_A = 0x0A4 */
791#define DA9062AA_VBUCK1_A_SHIFT 0
792#define DA9062AA_VBUCK1_A_MASK 0x7f
793#define DA9062AA_BUCK1_SL_A_SHIFT 7
794#define DA9062AA_BUCK1_SL_A_MASK BIT(7)
795
796/* DA9062AA_VBUCK4_A = 0x0A5 */
797#define DA9062AA_VBUCK4_A_SHIFT 0
798#define DA9062AA_VBUCK4_A_MASK 0x7f
799#define DA9062AA_BUCK4_SL_A_SHIFT 7
800#define DA9062AA_BUCK4_SL_A_MASK BIT(7)
801
802/* DA9062AA_VBUCK3_A = 0x0A7 */
803#define DA9062AA_VBUCK3_A_SHIFT 0
804#define DA9062AA_VBUCK3_A_MASK 0x7f
805#define DA9062AA_BUCK3_SL_A_SHIFT 7
806#define DA9062AA_BUCK3_SL_A_MASK BIT(7)
807
808/* DA9062AA_VLDO1_A = 0x0A9 */
809#define DA9062AA_VLDO1_A_SHIFT 0
810#define DA9062AA_VLDO1_A_MASK 0x3f
811#define DA9062AA_LDO1_SL_A_SHIFT 7
812#define DA9062AA_LDO1_SL_A_MASK BIT(7)
813
814/* DA9062AA_VLDO2_A = 0x0AA */
815#define DA9062AA_VLDO2_A_SHIFT 0
816#define DA9062AA_VLDO2_A_MASK 0x3f
817#define DA9062AA_LDO2_SL_A_SHIFT 7
818#define DA9062AA_LDO2_SL_A_MASK BIT(7)
819
820/* DA9062AA_VLDO3_A = 0x0AB */
821#define DA9062AA_VLDO3_A_SHIFT 0
822#define DA9062AA_VLDO3_A_MASK 0x3f
823#define DA9062AA_LDO3_SL_A_SHIFT 7
824#define DA9062AA_LDO3_SL_A_MASK BIT(7)
825
826/* DA9062AA_VLDO4_A = 0x0AC */
827#define DA9062AA_VLDO4_A_SHIFT 0
828#define DA9062AA_VLDO4_A_MASK 0x3f
829#define DA9062AA_LDO4_SL_A_SHIFT 7
830#define DA9062AA_LDO4_SL_A_MASK BIT(7)
831
832/* DA9062AA_VBUCK2_B = 0x0B4 */
833#define DA9062AA_VBUCK2_B_SHIFT 0
834#define DA9062AA_VBUCK2_B_MASK 0x7f
835#define DA9062AA_BUCK2_SL_B_SHIFT 7
836#define DA9062AA_BUCK2_SL_B_MASK BIT(7)
837
838/* DA9062AA_VBUCK1_B = 0x0B5 */
839#define DA9062AA_VBUCK1_B_SHIFT 0
840#define DA9062AA_VBUCK1_B_MASK 0x7f
841#define DA9062AA_BUCK1_SL_B_SHIFT 7
842#define DA9062AA_BUCK1_SL_B_MASK BIT(7)
843
844/* DA9062AA_VBUCK4_B = 0x0B6 */
845#define DA9062AA_VBUCK4_B_SHIFT 0
846#define DA9062AA_VBUCK4_B_MASK 0x7f
847#define DA9062AA_BUCK4_SL_B_SHIFT 7
848#define DA9062AA_BUCK4_SL_B_MASK BIT(7)
849
850/* DA9062AA_VBUCK3_B = 0x0B8 */
851#define DA9062AA_VBUCK3_B_SHIFT 0
852#define DA9062AA_VBUCK3_B_MASK 0x7f
853#define DA9062AA_BUCK3_SL_B_SHIFT 7
854#define DA9062AA_BUCK3_SL_B_MASK BIT(7)
855
856/* DA9062AA_VLDO1_B = 0x0BA */
857#define DA9062AA_VLDO1_B_SHIFT 0
858#define DA9062AA_VLDO1_B_MASK 0x3f
859#define DA9062AA_LDO1_SL_B_SHIFT 7
860#define DA9062AA_LDO1_SL_B_MASK BIT(7)
861
862/* DA9062AA_VLDO2_B = 0x0BB */
863#define DA9062AA_VLDO2_B_SHIFT 0
864#define DA9062AA_VLDO2_B_MASK 0x3f
865#define DA9062AA_LDO2_SL_B_SHIFT 7
866#define DA9062AA_LDO2_SL_B_MASK BIT(7)
867
868/* DA9062AA_VLDO3_B = 0x0BC */
869#define DA9062AA_VLDO3_B_SHIFT 0
870#define DA9062AA_VLDO3_B_MASK 0x3f
871#define DA9062AA_LDO3_SL_B_SHIFT 7
872#define DA9062AA_LDO3_SL_B_MASK BIT(7)
873
874/* DA9062AA_VLDO4_B = 0x0BD */
875#define DA9062AA_VLDO4_B_SHIFT 0
876#define DA9062AA_VLDO4_B_MASK 0x3f
877#define DA9062AA_LDO4_SL_B_SHIFT 7
878#define DA9062AA_LDO4_SL_B_MASK BIT(7)
879
880/* DA9062AA_BBAT_CONT = 0x0C5 */
881#define DA9062AA_BCHG_VSET_SHIFT 0
882#define DA9062AA_BCHG_VSET_MASK 0x0f
883#define DA9062AA_BCHG_ISET_SHIFT 4
884#define DA9062AA_BCHG_ISET_MASK (0x0f << 4)
885
886/* DA9062AA_INTERFACE = 0x105 */
887#define DA9062AA_IF_BASE_ADDR_SHIFT 4
888#define DA9062AA_IF_BASE_ADDR_MASK (0x0f << 4)
889
890/* DA9062AA_CONFIG_A = 0x106 */
891#define DA9062AA_PM_I_V_SHIFT 0
892#define DA9062AA_PM_I_V_MASK 0x01
893#define DA9062AA_PM_O_TYPE_SHIFT 2
894#define DA9062AA_PM_O_TYPE_MASK BIT(2)
895#define DA9062AA_IRQ_TYPE_SHIFT 3
896#define DA9062AA_IRQ_TYPE_MASK BIT(3)
897#define DA9062AA_PM_IF_V_SHIFT 4
898#define DA9062AA_PM_IF_V_MASK BIT(4)
899#define DA9062AA_PM_IF_FMP_SHIFT 5
900#define DA9062AA_PM_IF_FMP_MASK BIT(5)
901#define DA9062AA_PM_IF_HSM_SHIFT 6
902#define DA9062AA_PM_IF_HSM_MASK BIT(6)
903
904/* DA9062AA_CONFIG_B = 0x107 */
905#define DA9062AA_VDD_FAULT_ADJ_SHIFT 0
906#define DA9062AA_VDD_FAULT_ADJ_MASK 0x0f
907#define DA9062AA_VDD_HYST_ADJ_SHIFT 4
908#define DA9062AA_VDD_HYST_ADJ_MASK (0x07 << 4)
909
910/* DA9062AA_CONFIG_C = 0x108 */
911#define DA9062AA_BUCK_ACTV_DISCHRG_SHIFT 2
912#define DA9062AA_BUCK_ACTV_DISCHRG_MASK BIT(2)
913#define DA9062AA_BUCK1_CLK_INV_SHIFT 3
914#define DA9062AA_BUCK1_CLK_INV_MASK BIT(3)
915#define DA9062AA_BUCK4_CLK_INV_SHIFT 4
916#define DA9062AA_BUCK4_CLK_INV_MASK BIT(4)
917#define DA9062AA_BUCK3_CLK_INV_SHIFT 6
918#define DA9062AA_BUCK3_CLK_INV_MASK BIT(6)
919
920/* DA9062AA_CONFIG_D = 0x109 */
921#define DA9062AA_GPI_V_SHIFT 0
922#define DA9062AA_GPI_V_MASK 0x01
923#define DA9062AA_NIRQ_MODE_SHIFT 1
924#define DA9062AA_NIRQ_MODE_MASK BIT(1)
925#define DA9062AA_SYSTEM_EN_RD_SHIFT 2
926#define DA9062AA_SYSTEM_EN_RD_MASK BIT(2)
927#define DA9062AA_FORCE_RESET_SHIFT 5
928#define DA9062AA_FORCE_RESET_MASK BIT(5)
929
930/* DA9062AA_CONFIG_E = 0x10A */
931#define DA9062AA_BUCK1_AUTO_SHIFT 0
932#define DA9062AA_BUCK1_AUTO_MASK 0x01
933#define DA9062AA_BUCK2_AUTO_SHIFT 1
934#define DA9062AA_BUCK2_AUTO_MASK BIT(1)
935#define DA9062AA_BUCK4_AUTO_SHIFT 2
936#define DA9062AA_BUCK4_AUTO_MASK BIT(2)
937#define DA9062AA_BUCK3_AUTO_SHIFT 4
938#define DA9062AA_BUCK3_AUTO_MASK BIT(4)
939
940/* DA9062AA_CONFIG_G = 0x10C */
941#define DA9062AA_LDO1_AUTO_SHIFT 0
942#define DA9062AA_LDO1_AUTO_MASK 0x01
943#define DA9062AA_LDO2_AUTO_SHIFT 1
944#define DA9062AA_LDO2_AUTO_MASK BIT(1)
945#define DA9062AA_LDO3_AUTO_SHIFT 2
946#define DA9062AA_LDO3_AUTO_MASK BIT(2)
947#define DA9062AA_LDO4_AUTO_SHIFT 3
948#define DA9062AA_LDO4_AUTO_MASK BIT(3)
949
950/* DA9062AA_CONFIG_H = 0x10D */
951#define DA9062AA_BUCK1_2_MERGE_SHIFT 3
952#define DA9062AA_BUCK1_2_MERGE_MASK BIT(3)
953#define DA9062AA_BUCK2_OD_SHIFT 5
954#define DA9062AA_BUCK2_OD_MASK BIT(5)
955#define DA9062AA_BUCK1_OD_SHIFT 6
956#define DA9062AA_BUCK1_OD_MASK BIT(6)
957
958/* DA9062AA_CONFIG_I = 0x10E */
959#define DA9062AA_NONKEY_PIN_SHIFT 0
960#define DA9062AA_NONKEY_PIN_MASK 0x03
961#define DA9062AA_nONKEY_SD_SHIFT 2
962#define DA9062AA_nONKEY_SD_MASK BIT(2)
963#define DA9062AA_WATCHDOG_SD_SHIFT 3
964#define DA9062AA_WATCHDOG_SD_MASK BIT(3)
965#define DA9062AA_KEY_SD_MODE_SHIFT 4
966#define DA9062AA_KEY_SD_MODE_MASK BIT(4)
967#define DA9062AA_HOST_SD_MODE_SHIFT 5
968#define DA9062AA_HOST_SD_MODE_MASK BIT(5)
969#define DA9062AA_INT_SD_MODE_SHIFT 6
970#define DA9062AA_INT_SD_MODE_MASK BIT(6)
971#define DA9062AA_LDO_SD_SHIFT 7
972#define DA9062AA_LDO_SD_MASK BIT(7)
973
974/* DA9062AA_CONFIG_J = 0x10F */
975#define DA9062AA_KEY_DELAY_SHIFT 0
976#define DA9062AA_KEY_DELAY_MASK 0x03
977#define DA9062AA_SHUT_DELAY_SHIFT 2
978#define DA9062AA_SHUT_DELAY_MASK (0x03 << 2)
979#define DA9062AA_RESET_DURATION_SHIFT 4
980#define DA9062AA_RESET_DURATION_MASK (0x03 << 4)
981#define DA9062AA_TWOWIRE_TO_SHIFT 6
982#define DA9062AA_TWOWIRE_TO_MASK BIT(6)
983#define DA9062AA_IF_RESET_SHIFT 7
984#define DA9062AA_IF_RESET_MASK BIT(7)
985
986/* DA9062AA_CONFIG_K = 0x110 */
987#define DA9062AA_GPIO0_PUPD_SHIFT 0
988#define DA9062AA_GPIO0_PUPD_MASK 0x01
989#define DA9062AA_GPIO1_PUPD_SHIFT 1
990#define DA9062AA_GPIO1_PUPD_MASK BIT(1)
991#define DA9062AA_GPIO2_PUPD_SHIFT 2
992#define DA9062AA_GPIO2_PUPD_MASK BIT(2)
993#define DA9062AA_GPIO3_PUPD_SHIFT 3
994#define DA9062AA_GPIO3_PUPD_MASK BIT(3)
995#define DA9062AA_GPIO4_PUPD_SHIFT 4
996#define DA9062AA_GPIO4_PUPD_MASK BIT(4)
997
998/* DA9062AA_CONFIG_M = 0x112 */
999#define DA9062AA_NSHUTDOWN_PU_SHIFT 1
1000#define DA9062AA_NSHUTDOWN_PU_MASK BIT(1)
1001#define DA9062AA_WDG_MODE_SHIFT 3
1002#define DA9062AA_WDG_MODE_MASK BIT(3)
1003#define DA9062AA_OSC_FRQ_SHIFT 4
1004#define DA9062AA_OSC_FRQ_MASK (0x0f << 4)
1005
1006/* DA9062AA_TRIM_CLDR = 0x120 */
1007#define DA9062AA_TRIM_CLDR_SHIFT 0
1008#define DA9062AA_TRIM_CLDR_MASK 0xff
1009
1010/* DA9062AA_GP_ID_0 = 0x121 */
1011#define DA9062AA_GP_0_SHIFT 0
1012#define DA9062AA_GP_0_MASK 0xff
1013
1014/* DA9062AA_GP_ID_1 = 0x122 */
1015#define DA9062AA_GP_1_SHIFT 0
1016#define DA9062AA_GP_1_MASK 0xff
1017
1018/* DA9062AA_GP_ID_2 = 0x123 */
1019#define DA9062AA_GP_2_SHIFT 0
1020#define DA9062AA_GP_2_MASK 0xff
1021
1022/* DA9062AA_GP_ID_3 = 0x124 */
1023#define DA9062AA_GP_3_SHIFT 0
1024#define DA9062AA_GP_3_MASK 0xff
1025
1026/* DA9062AA_GP_ID_4 = 0x125 */
1027#define DA9062AA_GP_4_SHIFT 0
1028#define DA9062AA_GP_4_MASK 0xff
1029
1030/* DA9062AA_GP_ID_5 = 0x126 */
1031#define DA9062AA_GP_5_SHIFT 0
1032#define DA9062AA_GP_5_MASK 0xff
1033
1034/* DA9062AA_GP_ID_6 = 0x127 */
1035#define DA9062AA_GP_6_SHIFT 0
1036#define DA9062AA_GP_6_MASK 0xff
1037
1038/* DA9062AA_GP_ID_7 = 0x128 */
1039#define DA9062AA_GP_7_SHIFT 0
1040#define DA9062AA_GP_7_MASK 0xff
1041
1042/* DA9062AA_GP_ID_8 = 0x129 */
1043#define DA9062AA_GP_8_SHIFT 0
1044#define DA9062AA_GP_8_MASK 0xff
1045
1046/* DA9062AA_GP_ID_9 = 0x12A */
1047#define DA9062AA_GP_9_SHIFT 0
1048#define DA9062AA_GP_9_MASK 0xff
1049
1050/* DA9062AA_GP_ID_10 = 0x12B */
1051#define DA9062AA_GP_10_SHIFT 0
1052#define DA9062AA_GP_10_MASK 0xff
1053
1054/* DA9062AA_GP_ID_11 = 0x12C */
1055#define DA9062AA_GP_11_SHIFT 0
1056#define DA9062AA_GP_11_MASK 0xff
1057
1058/* DA9062AA_GP_ID_12 = 0x12D */
1059#define DA9062AA_GP_12_SHIFT 0
1060#define DA9062AA_GP_12_MASK 0xff
1061
1062/* DA9062AA_GP_ID_13 = 0x12E */
1063#define DA9062AA_GP_13_SHIFT 0
1064#define DA9062AA_GP_13_MASK 0xff
1065
1066/* DA9062AA_GP_ID_14 = 0x12F */
1067#define DA9062AA_GP_14_SHIFT 0
1068#define DA9062AA_GP_14_MASK 0xff
1069
1070/* DA9062AA_GP_ID_15 = 0x130 */
1071#define DA9062AA_GP_15_SHIFT 0
1072#define DA9062AA_GP_15_MASK 0xff
1073
1074/* DA9062AA_GP_ID_16 = 0x131 */
1075#define DA9062AA_GP_16_SHIFT 0
1076#define DA9062AA_GP_16_MASK 0xff
1077
1078/* DA9062AA_GP_ID_17 = 0x132 */
1079#define DA9062AA_GP_17_SHIFT 0
1080#define DA9062AA_GP_17_MASK 0xff
1081
1082/* DA9062AA_GP_ID_18 = 0x133 */
1083#define DA9062AA_GP_18_SHIFT 0
1084#define DA9062AA_GP_18_MASK 0xff
1085
1086/* DA9062AA_GP_ID_19 = 0x134 */
1087#define DA9062AA_GP_19_SHIFT 0
1088#define DA9062AA_GP_19_MASK 0xff
1089
1090/* DA9062AA_DEVICE_ID = 0x181 */
1091#define DA9062AA_DEV_ID_SHIFT 0
1092#define DA9062AA_DEV_ID_MASK 0xff
1093
1094/* DA9062AA_VARIANT_ID = 0x182 */
1095#define DA9062AA_VRC_SHIFT 0
1096#define DA9062AA_VRC_MASK 0x0f
1097#define DA9062AA_MRC_SHIFT 4
1098#define DA9062AA_MRC_MASK (0x0f << 4)
1099
1100/* DA9062AA_CUSTOMER_ID = 0x183 */
1101#define DA9062AA_CUST_ID_SHIFT 0
1102#define DA9062AA_CUST_ID_MASK 0xff
1103
1104/* DA9062AA_CONFIG_ID = 0x184 */
1105#define DA9062AA_CONFIG_REV_SHIFT 0
1106#define DA9062AA_CONFIG_REV_MASK 0xff
1107
1108#endif /* __DA9062_H__ */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 79f4d822ba13..621af82123c6 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -51,6 +51,7 @@ enum da9063_irqs {
51 DA9063_IRQ_COMP_1V2, 51 DA9063_IRQ_COMP_1V2,
52 DA9063_IRQ_LDO_LIM, 52 DA9063_IRQ_LDO_LIM,
53 DA9063_IRQ_REG_UVOV, 53 DA9063_IRQ_REG_UVOV,
54 DA9063_IRQ_DVC_RDY,
54 DA9063_IRQ_VDD_MON, 55 DA9063_IRQ_VDD_MON,
55 DA9063_IRQ_WARN, 56 DA9063_IRQ_WARN,
56 DA9063_IRQ_GPI0, 57 DA9063_IRQ_GPI0,
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 8feac782fa83..2b300b44f994 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -20,12 +20,6 @@
20#ifndef LPC_ICH_H 20#ifndef LPC_ICH_H
21#define LPC_ICH_H 21#define LPC_ICH_H
22 22
23/* Watchdog resources */
24#define ICH_RES_IO_TCO 0
25#define ICH_RES_IO_SMI 1
26#define ICH_RES_MEM_OFF 2
27#define ICH_RES_MEM_GCS_PMC 0
28
29/* GPIO resources */ 23/* GPIO resources */
30#define ICH_RES_GPIO 0 24#define ICH_RES_GPIO 0
31#define ICH_RES_GPE0 1 25#define ICH_RES_GPE0 1
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
new file mode 100644
index 000000000000..095b121aa725
--- /dev/null
+++ b/include/linux/mfd/max77693-common.h
@@ -0,0 +1,49 @@
1/*
2 * Common data shared between Maxim 77693 and 77843 drivers
3 *
4 * Copyright (C) 2015 Samsung Electronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __LINUX_MFD_MAX77693_COMMON_H
13#define __LINUX_MFD_MAX77693_COMMON_H
14
15enum max77693_types {
16 TYPE_MAX77693_UNKNOWN,
17 TYPE_MAX77693,
18 TYPE_MAX77843,
19
20 TYPE_MAX77693_NUM,
21};
22
23/*
24 * Shared also with max77843.
25 */
26struct max77693_dev {
27 struct device *dev;
28 struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
29 struct i2c_client *i2c_muic; /* 0x4A , MUIC */
30 struct i2c_client *i2c_haptic; /* MAX77693: 0x90 , Haptic */
31 struct i2c_client *i2c_chg; /* MAX77843: 0xD2, Charger */
32
33 enum max77693_types type;
34
35 struct regmap *regmap;
36 struct regmap *regmap_muic;
37 struct regmap *regmap_haptic; /* Only MAX77693 */
38 struct regmap *regmap_chg; /* Only MAX77843 */
39
40 struct regmap_irq_chip_data *irq_data_led;
41 struct regmap_irq_chip_data *irq_data_topsys;
42 struct regmap_irq_chip_data *irq_data_chg; /* Only MAX77693 */
43 struct regmap_irq_chip_data *irq_data_muic;
44
45 int irq;
46};
47
48
49#endif /* __LINUX_MFD_MAX77693_COMMON_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 51633ea6f910..3c7a63b98ad6 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -310,30 +310,30 @@ enum max77693_muic_reg {
310#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT) 310#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT)
311 311
312/* MAX77693 MUIC - STATUS1~3 Register */ 312/* MAX77693 MUIC - STATUS1~3 Register */
313#define STATUS1_ADC_SHIFT (0) 313#define MAX77693_STATUS1_ADC_SHIFT 0
314#define STATUS1_ADCLOW_SHIFT (5) 314#define MAX77693_STATUS1_ADCLOW_SHIFT 5
315#define STATUS1_ADCERR_SHIFT (6) 315#define MAX77693_STATUS1_ADCERR_SHIFT 6
316#define STATUS1_ADC1K_SHIFT (7) 316#define MAX77693_STATUS1_ADC1K_SHIFT 7
317#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) 317#define MAX77693_STATUS1_ADC_MASK (0x1f << MAX77693_STATUS1_ADC_SHIFT)
318#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT) 318#define MAX77693_STATUS1_ADCLOW_MASK BIT(MAX77693_STATUS1_ADCLOW_SHIFT)
319#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT) 319#define MAX77693_STATUS1_ADCERR_MASK BIT(MAX77693_STATUS1_ADCERR_SHIFT)
320#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT) 320#define MAX77693_STATUS1_ADC1K_MASK BIT(MAX77693_STATUS1_ADC1K_SHIFT)
321 321
322#define STATUS2_CHGTYP_SHIFT (0) 322#define MAX77693_STATUS2_CHGTYP_SHIFT 0
323#define STATUS2_CHGDETRUN_SHIFT (3) 323#define MAX77693_STATUS2_CHGDETRUN_SHIFT 3
324#define STATUS2_DCDTMR_SHIFT (4) 324#define MAX77693_STATUS2_DCDTMR_SHIFT 4
325#define STATUS2_DXOVP_SHIFT (5) 325#define MAX77693_STATUS2_DXOVP_SHIFT 5
326#define STATUS2_VBVOLT_SHIFT (6) 326#define MAX77693_STATUS2_VBVOLT_SHIFT 6
327#define STATUS2_VIDRM_SHIFT (7) 327#define MAX77693_STATUS2_VIDRM_SHIFT 7
328#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) 328#define MAX77693_STATUS2_CHGTYP_MASK (0x7 << MAX77693_STATUS2_CHGTYP_SHIFT)
329#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT) 329#define MAX77693_STATUS2_CHGDETRUN_MASK BIT(MAX77693_STATUS2_CHGDETRUN_SHIFT)
330#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT) 330#define MAX77693_STATUS2_DCDTMR_MASK BIT(MAX77693_STATUS2_DCDTMR_SHIFT)
331#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT) 331#define MAX77693_STATUS2_DXOVP_MASK BIT(MAX77693_STATUS2_DXOVP_SHIFT)
332#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT) 332#define MAX77693_STATUS2_VBVOLT_MASK BIT(MAX77693_STATUS2_VBVOLT_SHIFT)
333#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT) 333#define MAX77693_STATUS2_VIDRM_MASK BIT(MAX77693_STATUS2_VIDRM_SHIFT)
334 334
335#define STATUS3_OVP_SHIFT (2) 335#define MAX77693_STATUS3_OVP_SHIFT 2
336#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT) 336#define MAX77693_STATUS3_OVP_MASK BIT(MAX77693_STATUS3_OVP_SHIFT)
337 337
338/* MAX77693 CDETCTRL1~2 register */ 338/* MAX77693 CDETCTRL1~2 register */
339#define CDETCTRL1_CHGDETEN_SHIFT (0) 339#define CDETCTRL1_CHGDETEN_SHIFT (0)
@@ -362,38 +362,38 @@ enum max77693_muic_reg {
362#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT) 362#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
363#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT) 363#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
364#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK) 364#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
365#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \ 365#define MAX77693_CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
366 | (1 << COMN1SW_SHIFT)) 366 | (1 << COMN1SW_SHIFT))
367#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \ 367#define MAX77693_CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
368 | (2 << COMN1SW_SHIFT)) 368 | (2 << COMN1SW_SHIFT))
369#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \ 369#define MAX77693_CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
370 | (3 << COMN1SW_SHIFT)) 370 | (3 << COMN1SW_SHIFT))
371#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \ 371#define MAX77693_CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
372 | (0 << COMN1SW_SHIFT)) 372 | (0 << COMN1SW_SHIFT))
373 373
374#define CONTROL2_LOWPWR_SHIFT (0) 374#define MAX77693_CONTROL2_LOWPWR_SHIFT 0
375#define CONTROL2_ADCEN_SHIFT (1) 375#define MAX77693_CONTROL2_ADCEN_SHIFT 1
376#define CONTROL2_CPEN_SHIFT (2) 376#define MAX77693_CONTROL2_CPEN_SHIFT 2
377#define CONTROL2_SFOUTASRT_SHIFT (3) 377#define MAX77693_CONTROL2_SFOUTASRT_SHIFT 3
378#define CONTROL2_SFOUTORD_SHIFT (4) 378#define MAX77693_CONTROL2_SFOUTORD_SHIFT 4
379#define CONTROL2_ACCDET_SHIFT (5) 379#define MAX77693_CONTROL2_ACCDET_SHIFT 5
380#define CONTROL2_USBCPINT_SHIFT (6) 380#define MAX77693_CONTROL2_USBCPINT_SHIFT 6
381#define CONTROL2_RCPS_SHIFT (7) 381#define MAX77693_CONTROL2_RCPS_SHIFT 7
382#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT) 382#define MAX77693_CONTROL2_LOWPWR_MASK BIT(MAX77693_CONTROL2_LOWPWR_SHIFT)
383#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT) 383#define MAX77693_CONTROL2_ADCEN_MASK BIT(MAX77693_CONTROL2_ADCEN_SHIFT)
384#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT) 384#define MAX77693_CONTROL2_CPEN_MASK BIT(MAX77693_CONTROL2_CPEN_SHIFT)
385#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT) 385#define MAX77693_CONTROL2_SFOUTASRT_MASK BIT(MAX77693_CONTROL2_SFOUTASRT_SHIFT)
386#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT) 386#define MAX77693_CONTROL2_SFOUTORD_MASK BIT(MAX77693_CONTROL2_SFOUTORD_SHIFT)
387#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT) 387#define MAX77693_CONTROL2_ACCDET_MASK BIT(MAX77693_CONTROL2_ACCDET_SHIFT)
388#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT) 388#define MAX77693_CONTROL2_USBCPINT_MASK BIT(MAX77693_CONTROL2_USBCPINT_SHIFT)
389#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT) 389#define MAX77693_CONTROL2_RCPS_MASK BIT(MAX77693_CONTROL2_RCPS_SHIFT)
390 390
391#define CONTROL3_JIGSET_SHIFT (0) 391#define MAX77693_CONTROL3_JIGSET_SHIFT 0
392#define CONTROL3_BTLDSET_SHIFT (2) 392#define MAX77693_CONTROL3_BTLDSET_SHIFT 2
393#define CONTROL3_ADCDBSET_SHIFT (4) 393#define MAX77693_CONTROL3_ADCDBSET_SHIFT 4
394#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT) 394#define MAX77693_CONTROL3_JIGSET_MASK (0x3 << MAX77693_CONTROL3_JIGSET_SHIFT)
395#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT) 395#define MAX77693_CONTROL3_BTLDSET_MASK (0x3 << MAX77693_CONTROL3_BTLDSET_SHIFT)
396#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT) 396#define MAX77693_CONTROL3_ADCDBSET_MASK (0x3 << MAX77693_CONTROL3_ADCDBSET_SHIFT)
397 397
398/* Slave addr = 0x90: Haptic */ 398/* Slave addr = 0x90: Haptic */
399enum max77693_haptic_reg { 399enum max77693_haptic_reg {
@@ -529,36 +529,4 @@ enum max77693_irq_muic {
529 MAX77693_MUIC_IRQ_NR, 529 MAX77693_MUIC_IRQ_NR,
530}; 530};
531 531
532struct max77693_dev {
533 struct device *dev;
534 struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
535 struct i2c_client *muic; /* 0x4A , MUIC */
536 struct i2c_client *haptic; /* 0x90 , Haptic */
537
538 int type;
539
540 struct regmap *regmap;
541 struct regmap *regmap_muic;
542 struct regmap *regmap_haptic;
543
544 struct regmap_irq_chip_data *irq_data_led;
545 struct regmap_irq_chip_data *irq_data_topsys;
546 struct regmap_irq_chip_data *irq_data_charger;
547 struct regmap_irq_chip_data *irq_data_muic;
548
549 int irq;
550 int irq_gpio;
551 struct mutex irqlock;
552 int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
553 int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
554};
555
556enum max77693_types {
557 TYPE_MAX77693,
558};
559
560extern int max77693_irq_init(struct max77693_dev *max77686);
561extern void max77693_irq_exit(struct max77693_dev *max77686);
562extern int max77693_irq_resume(struct max77693_dev *max77686);
563
564#endif /* __LINUX_MFD_MAX77693_PRIV_H */ 532#endif /* __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index 7178ace8379e..c19303b0ccfd 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -318,62 +318,62 @@ enum max77843_irq_muic {
318 MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK) 318 MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
319 319
320/* MAX77843 STATUS register*/ 320/* MAX77843 STATUS register*/
321#define STATUS1_ADC_SHIFT 0 321#define MAX77843_MUIC_STATUS1_ADC_SHIFT 0
322#define STATUS1_ADCERROR_SHIFT 6 322#define MAX77843_MUIC_STATUS1_ADCERROR_SHIFT 6
323#define STATUS1_ADC1K_SHIFT 7 323#define MAX77843_MUIC_STATUS1_ADC1K_SHIFT 7
324#define STATUS2_CHGTYP_SHIFT 0 324#define MAX77843_MUIC_STATUS2_CHGTYP_SHIFT 0
325#define STATUS2_CHGDETRUN_SHIFT 3 325#define MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT 3
326#define STATUS2_DCDTMR_SHIFT 4 326#define MAX77843_MUIC_STATUS2_DCDTMR_SHIFT 4
327#define STATUS2_DXOVP_SHIFT 5 327#define MAX77843_MUIC_STATUS2_DXOVP_SHIFT 5
328#define STATUS2_VBVOLT_SHIFT 6 328#define MAX77843_MUIC_STATUS2_VBVOLT_SHIFT 6
329#define STATUS3_VBADC_SHIFT 0 329#define MAX77843_MUIC_STATUS3_VBADC_SHIFT 0
330#define STATUS3_VDNMON_SHIFT 4 330#define MAX77843_MUIC_STATUS3_VDNMON_SHIFT 4
331#define STATUS3_DNRES_SHIFT 5 331#define MAX77843_MUIC_STATUS3_DNRES_SHIFT 5
332#define STATUS3_MPNACK_SHIFT 6 332#define MAX77843_MUIC_STATUS3_MPNACK_SHIFT 6
333 333
334#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT) 334#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << MAX77843_MUIC_STATUS1_ADC_SHIFT)
335#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(STATUS1_ADCERROR_SHIFT) 335#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(MAX77843_MUIC_STATUS1_ADCERROR_SHIFT)
336#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(STATUS1_ADC1K_SHIFT) 336#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(MAX77843_MUIC_STATUS1_ADC1K_SHIFT)
337#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) 337#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << MAX77843_MUIC_STATUS2_CHGTYP_SHIFT)
338#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT) 338#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT)
339#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT) 339#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(MAX77843_MUIC_STATUS2_DCDTMR_SHIFT)
340#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(STATUS2_DXOVP_SHIFT) 340#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(MAX77843_MUIC_STATUS2_DXOVP_SHIFT)
341#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT) 341#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(MAX77843_MUIC_STATUS2_VBVOLT_SHIFT)
342#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << STATUS3_VBADC_SHIFT) 342#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << MAX77843_MUIC_STATUS3_VBADC_SHIFT)
343#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(STATUS3_VDNMON_SHIFT) 343#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(MAX77843_MUIC_STATUS3_VDNMON_SHIFT)
344#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(STATUS3_DNRES_SHIFT) 344#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(MAX77843_MUIC_STATUS3_DNRES_SHIFT)
345#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(STATUS3_MPNACK_SHIFT) 345#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(MAX77843_MUIC_STATUS3_MPNACK_SHIFT)
346 346
347/* MAX77843 CONTROL register */ 347/* MAX77843 CONTROL register */
348#define CONTROL1_COMP1SW_SHIFT 0 348#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0
349#define CONTROL1_COMP2SW_SHIFT 3 349#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3
350#define CONTROL1_IDBEN_SHIFT 7 350#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7
351#define CONTROL2_LOWPWR_SHIFT 0 351#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0
352#define CONTROL2_ADCEN_SHIFT 1 352#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1
353#define CONTROL2_CPEN_SHIFT 2 353#define MAX77843_MUIC_CONTROL2_CPEN_SHIFT 2
354#define CONTROL2_ACC_DET_SHIFT 5 354#define MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT 5
355#define CONTROL2_USBCPINT_SHIFT 6 355#define MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT 6
356#define CONTROL2_RCPS_SHIFT 7 356#define MAX77843_MUIC_CONTROL2_RCPS_SHIFT 7
357#define CONTROL3_JIGSET_SHIFT 0 357#define MAX77843_MUIC_CONTROL3_JIGSET_SHIFT 0
358#define CONTROL4_ADCDBSET_SHIFT 0 358#define MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT 0
359#define CONTROL4_USBAUTO_SHIFT 4 359#define MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT 4
360#define CONTROL4_FCTAUTO_SHIFT 5 360#define MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT 5
361#define CONTROL4_ADCMODE_SHIFT 6 361#define MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT 6
362 362
363#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << CONTROL1_COMP1SW_SHIFT) 363#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT)
364#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << CONTROL1_COMP2SW_SHIFT) 364#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)
365#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(CONTROL1_IDBEN_SHIFT) 365#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT)
366#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(CONTROL2_LOWPWR_SHIFT) 366#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT)
367#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(CONTROL2_ADCEN_SHIFT) 367#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT)
368#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(CONTROL2_CPEN_SHIFT) 368#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT)
369#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(CONTROL2_ACC_DET_SHIFT) 369#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT)
370#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(CONTROL2_USBCPINT_SHIFT) 370#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT)
371#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(CONTROL2_RCPS_SHIFT) 371#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(MAX77843_MUIC_CONTROL2_RCPS_SHIFT)
372#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT) 372#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << MAX77843_MUIC_CONTROL3_JIGSET_SHIFT)
373#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << CONTROL4_ADCDBSET_SHIFT) 373#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT)
374#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(CONTROL4_USBAUTO_SHIFT) 374#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT)
375#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(CONTROL4_FCTAUTO_SHIFT) 375#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT)
376#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << CONTROL4_ADCMODE_SHIFT) 376#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT)
377 377
378/* MAX77843 switch port */ 378/* MAX77843 switch port */
379#define COM_OPEN 0 379#define COM_OPEN 0
@@ -383,38 +383,38 @@ enum max77843_irq_muic {
383#define COM_AUX_USB 4 383#define COM_AUX_USB 4
384#define COM_AUX_UART 5 384#define COM_AUX_UART 5
385 385
386#define CONTROL1_COM_SW \ 386#define MAX77843_MUIC_CONTROL1_COM_SW \
387 ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \ 387 ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
388 MAX77843_MUIC_CONTROL1_COMP2SW_MASK)) 388 MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
389 389
390#define CONTROL1_SW_OPEN \ 390#define MAX77843_MUIC_CONTROL1_SW_OPEN \
391 ((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \ 391 ((COM_OPEN << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
392 COM_OPEN << CONTROL1_COMP2SW_SHIFT)) 392 COM_OPEN << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
393#define CONTROL1_SW_USB \ 393#define MAX77843_MUIC_CONTROL1_SW_USB \
394 ((COM_USB << CONTROL1_COMP1SW_SHIFT | \ 394 ((COM_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
395 COM_USB << CONTROL1_COMP2SW_SHIFT)) 395 COM_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
396#define CONTROL1_SW_AUDIO \ 396#define MAX77843_MUIC_CONTROL1_SW_AUDIO \
397 ((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \ 397 ((COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
398 COM_AUDIO << CONTROL1_COMP2SW_SHIFT)) 398 COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
399#define CONTROL1_SW_UART \ 399#define MAX77843_MUIC_CONTROL1_SW_UART \
400 ((COM_UART << CONTROL1_COMP1SW_SHIFT | \ 400 ((COM_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
401 COM_UART << CONTROL1_COMP2SW_SHIFT)) 401 COM_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
402#define CONTROL1_SW_AUX_USB \ 402#define MAX77843_MUIC_CONTROL1_SW_AUX_USB \
403 ((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \ 403 ((COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
404 COM_AUX_USB << CONTROL1_COMP2SW_SHIFT)) 404 COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
405#define CONTROL1_SW_AUX_UART \ 405#define MAX77843_MUIC_CONTROL1_SW_AUX_UART \
406 ((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \ 406 ((COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
407 COM_AUX_UART << CONTROL1_COMP2SW_SHIFT)) 407 COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
408 408
409#define MAX77843_DISABLE 0 409#define MAX77843_DISABLE 0
410#define MAX77843_ENABLE 1 410#define MAX77843_ENABLE 1
411 411
412#define CONTROL4_AUTO_DISABLE \ 412#define CONTROL4_AUTO_DISABLE \
413 ((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \ 413 ((MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
414 (MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT)) 414 (MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
415#define CONTROL4_AUTO_ENABLE \ 415#define CONTROL4_AUTO_ENABLE \
416 ((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \ 416 ((MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
417 (MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT)) 417 (MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
418 418
419/* MAX77843 SAFEOUT LDO Control register */ 419/* MAX77843 SAFEOUT LDO Control register */
420#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0 420#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0
@@ -431,24 +431,4 @@ enum max77843_irq_muic {
431#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \ 431#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
432 (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT) 432 (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
433 433
434struct max77843 {
435 struct device *dev;
436
437 struct i2c_client *i2c;
438 struct i2c_client *i2c_chg;
439 struct i2c_client *i2c_fuel;
440 struct i2c_client *i2c_muic;
441
442 struct regmap *regmap;
443 struct regmap *regmap_chg;
444 struct regmap *regmap_fuel;
445 struct regmap *regmap_muic;
446
447 struct regmap_irq_chip_data *irq_data;
448 struct regmap_irq_chip_data *irq_data_chg;
449 struct regmap_irq_chip_data *irq_data_fuel;
450 struct regmap_irq_chip_data *irq_data_muic;
451
452 int irq;
453};
454#endif /* __MAX77843_H__ */ 434#endif /* __MAX77843_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index cf5265b0d1c1..45b8e8aa1fbf 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -57,6 +57,7 @@ struct mt6397_chip {
57 int irq; 57 int irq;
58 struct irq_domain *irq_domain; 58 struct irq_domain *irq_domain;
59 struct mutex irqlock; 59 struct mutex irqlock;
60 u16 wake_mask[2];
60 u16 irq_masks_cur[2]; 61 u16 irq_masks_cur[2];
61 u16 irq_masks_cache[2]; 62 u16 irq_masks_cache[2];
62}; 63};
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index bb270bd03eed..13e1d96935ed 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -21,6 +21,7 @@
21#include <linux/regmap.h> 21#include <linux/regmap.h>
22#include <linux/regulator/driver.h> 22#include <linux/regulator/driver.h>
23#include <linux/extcon.h> 23#include <linux/extcon.h>
24#include <linux/of_gpio.h>
24#include <linux/usb/phy_companion.h> 25#include <linux/usb/phy_companion.h>
25 26
26#define PALMAS_NUM_CLIENTS 3 27#define PALMAS_NUM_CLIENTS 3
@@ -551,10 +552,16 @@ struct palmas_usb {
551 int vbus_otg_irq; 552 int vbus_otg_irq;
552 int vbus_irq; 553 int vbus_irq;
553 554
555 int gpio_id_irq;
556 struct gpio_desc *id_gpiod;
557 unsigned long sw_debounce_jiffies;
558 struct delayed_work wq_detectid;
559
554 enum palmas_usb_state linkstat; 560 enum palmas_usb_state linkstat;
555 int wakeup; 561 int wakeup;
556 bool enable_vbus_detection; 562 bool enable_vbus_detection;
557 bool enable_id_detection; 563 bool enable_id_detection;
564 bool enable_gpio_id_detection;
558}; 565};
559 566
560#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator) 567#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index d16f4c82c568..558a485d03ab 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -435,4 +435,12 @@
435#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1) 435#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
436#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) 436#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
437 437
438/* For imx6ul iomux gpr register field define */
439#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
440#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
441#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17)
442#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
443#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
444#define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17)
445
438#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ 446#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
new file mode 100644
index 000000000000..eb492d47f717
--- /dev/null
+++ b/include/linux/microchipphy.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _MICROCHIPPHY_H
19#define _MICROCHIPPHY_H
20
21#define LAN88XX_INT_MASK (0x19)
22#define LAN88XX_INT_MASK_MDINTPIN_EN_ (0x8000)
23#define LAN88XX_INT_MASK_SPEED_CHANGE_ (0x4000)
24#define LAN88XX_INT_MASK_LINK_CHANGE_ (0x2000)
25#define LAN88XX_INT_MASK_FDX_CHANGE_ (0x1000)
26#define LAN88XX_INT_MASK_AUTONEG_ERR_ (0x0800)
27#define LAN88XX_INT_MASK_AUTONEG_DONE_ (0x0400)
28#define LAN88XX_INT_MASK_POE_DETECT_ (0x0200)
29#define LAN88XX_INT_MASK_SYMBOL_ERR_ (0x0100)
30#define LAN88XX_INT_MASK_FAST_LINK_FAIL_ (0x0080)
31#define LAN88XX_INT_MASK_WOL_EVENT_ (0x0040)
32#define LAN88XX_INT_MASK_EXTENDED_INT_ (0x0020)
33#define LAN88XX_INT_MASK_RESERVED_ (0x0010)
34#define LAN88XX_INT_MASK_FALSE_CARRIER_ (0x0008)
35#define LAN88XX_INT_MASK_LINK_SPEED_DS_ (0x0004)
36#define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_ (0x0002)
37#define LAN88XX_INT_MASK_RX__ER_ (0x0001)
38
39#define LAN88XX_INT_STS (0x1A)
40#define LAN88XX_INT_STS_INT_ACTIVE_ (0x8000)
41#define LAN88XX_INT_STS_SPEED_CHANGE_ (0x4000)
42#define LAN88XX_INT_STS_LINK_CHANGE_ (0x2000)
43#define LAN88XX_INT_STS_FDX_CHANGE_ (0x1000)
44#define LAN88XX_INT_STS_AUTONEG_ERR_ (0x0800)
45#define LAN88XX_INT_STS_AUTONEG_DONE_ (0x0400)
46#define LAN88XX_INT_STS_POE_DETECT_ (0x0200)
47#define LAN88XX_INT_STS_SYMBOL_ERR_ (0x0100)
48#define LAN88XX_INT_STS_FAST_LINK_FAIL_ (0x0080)
49#define LAN88XX_INT_STS_WOL_EVENT_ (0x0040)
50#define LAN88XX_INT_STS_EXTENDED_INT_ (0x0020)
51#define LAN88XX_INT_STS_RESERVED_ (0x0010)
52#define LAN88XX_INT_STS_FALSE_CARRIER_ (0x0008)
53#define LAN88XX_INT_STS_LINK_SPEED_DS_ (0x0004)
54#define LAN88XX_INT_STS_MASTER_SLAVE_DONE_ (0x0002)
55#define LAN88XX_INT_STS_RX_ER_ (0x0001)
56
57#define LAN88XX_EXT_PAGE_ACCESS (0x1F)
58#define LAN88XX_EXT_PAGE_SPACE_0 (0x0000)
59#define LAN88XX_EXT_PAGE_SPACE_1 (0x0001)
60#define LAN88XX_EXT_PAGE_SPACE_2 (0x0002)
61
62/* Extended Register Page 1 space */
63#define LAN88XX_EXT_MODE_CTRL (0x13)
64#define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_ (0x000C)
65#define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000)
66#define LAN88XX_EXT_MODE_CTRL_MDI_ (0x0008)
67#define LAN88XX_EXT_MODE_CTRL_MDI_X_ (0x000C)
68
69/* MMD 3 Registers */
70#define LAN88XX_MMD3_CHIP_ID (32877)
71#define LAN88XX_MMD3_CHIP_REV (32878)
72
73#endif /* _MICROCHIPPHY_H */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 819077c32690..81f6e427ba6b 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -67,7 +67,7 @@ struct miscdevice {
67}; 67};
68 68
69extern int misc_register(struct miscdevice *misc); 69extern int misc_register(struct miscdevice *misc);
70extern int misc_deregister(struct miscdevice *misc); 70extern void misc_deregister(struct miscdevice *misc);
71 71
72#define MODULE_ALIAS_MISCDEV(minor) \ 72#define MODULE_ALIAS_MISCDEV(minor) \
73 MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \ 73 MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index e7ecc12a1163..09cebe528488 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -88,7 +88,8 @@ struct mlx4_ts_cqe {
88 88
89enum { 89enum {
90 MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31, 90 MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
91 MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, 91 MLX4_CQE_CVLAN_PRESENT_MASK = 1 << 29,
92 MLX4_CQE_SVLAN_PRESENT_MASK = 1 << 30,
92 MLX4_CQE_L2_TUNNEL = 1 << 27, 93 MLX4_CQE_L2_TUNNEL = 1 << 27,
93 MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26, 94 MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
94 MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25, 95 MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index fd13c1ce3b4a..baad4cb8e9b0 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -79,7 +79,8 @@ enum {
79 79
80enum { 80enum {
81 MLX4_MAX_PORTS = 2, 81 MLX4_MAX_PORTS = 2,
82 MLX4_MAX_PORT_PKEYS = 128 82 MLX4_MAX_PORT_PKEYS = 128,
83 MLX4_MAX_PORT_GIDS = 128
83}; 84};
84 85
85/* base qkey for use in sriov tunnel-qp/proxy-qp communication. 86/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
@@ -211,6 +212,8 @@ enum {
211 MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26, 212 MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
212 MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27, 213 MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
213 MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28, 214 MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
215 MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
216 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
214}; 217};
215 218
216enum { 219enum {
@@ -581,6 +584,7 @@ struct mlx4_caps {
581 u64 phys_port_id[MLX4_MAX_PORTS + 1]; 584 u64 phys_port_id[MLX4_MAX_PORTS + 1];
582 int tunnel_offload_mode; 585 int tunnel_offload_mode;
583 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; 586 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
587 u8 phv_bit[MLX4_MAX_PORTS + 1];
584 u8 alloc_res_qp_mask; 588 u8 alloc_res_qp_mask;
585 u32 dmfs_high_rate_qpn_base; 589 u32 dmfs_high_rate_qpn_base;
586 u32 dmfs_high_rate_qpn_range; 590 u32 dmfs_high_rate_qpn_range;
@@ -1332,6 +1336,8 @@ int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
1332int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, 1336int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
1333 u8 ignore_fcs_value); 1337 u8 ignore_fcs_value);
1334int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable); 1338int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
1339int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
1340int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
1335int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); 1341int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
1336int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 1342int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1337int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1343int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 9553a73d2049..5a06d969338e 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -59,6 +59,7 @@ struct mlx4_interface {
59 void (*event) (struct mlx4_dev *dev, void *context, 59 void (*event) (struct mlx4_dev *dev, void *context,
60 enum mlx4_dev_event event, unsigned long param); 60 enum mlx4_dev_event event, unsigned long param);
61 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); 61 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
62 void (*activate)(struct mlx4_dev *dev, void *context);
62 struct list_head list; 63 struct list_head list;
63 enum mlx4_protocol protocol; 64 enum mlx4_protocol protocol;
64 int flags; 65 int flags;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 6fed539e5456..de45a51b3f04 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -272,7 +272,8 @@ enum {
272 MLX4_WQE_CTRL_SOLICITED = 1 << 1, 272 MLX4_WQE_CTRL_SOLICITED = 1 << 1,
273 MLX4_WQE_CTRL_IP_CSUM = 1 << 4, 273 MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
274 MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, 274 MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
275 MLX4_WQE_CTRL_INS_VLAN = 1 << 6, 275 MLX4_WQE_CTRL_INS_CVLAN = 1 << 6,
276 MLX4_WQE_CTRL_INS_SVLAN = 1 << 7,
276 MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7, 277 MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
277 MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0, 278 MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
278}; 279};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b943cd9e2097..8eb3b19af2a4 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -402,6 +402,17 @@ struct mlx5_cmd_teardown_hca_mbox_out {
402 u8 rsvd[8]; 402 u8 rsvd[8];
403}; 403};
404 404
405struct mlx5_cmd_query_special_contexts_mbox_in {
406 struct mlx5_inbox_hdr hdr;
407 u8 rsvd[8];
408};
409
410struct mlx5_cmd_query_special_contexts_mbox_out {
411 struct mlx5_outbox_hdr hdr;
412 __be32 dump_fill_mkey;
413 __be32 resd_lkey;
414};
415
405struct mlx5_cmd_layout { 416struct mlx5_cmd_layout {
406 u8 type; 417 u8 type;
407 u8 rsvd0[3]; 418 u8 rsvd0[3];
@@ -1182,6 +1193,16 @@ enum {
1182 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, 1193 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1183}; 1194};
1184 1195
1196enum {
1197 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1198 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1199 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1200 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1201 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1202 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1203 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
1204};
1205
1185static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1206static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1186{ 1207{
1187 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) 1208 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5722d88c2429..27b53f9a24ad 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -103,6 +103,8 @@ enum {
103 MLX5_REG_PMTU = 0x5003, 103 MLX5_REG_PMTU = 0x5003,
104 MLX5_REG_PTYS = 0x5004, 104 MLX5_REG_PTYS = 0x5004,
105 MLX5_REG_PAOS = 0x5006, 105 MLX5_REG_PAOS = 0x5006,
106 MLX5_REG_PFCC = 0x5007,
107 MLX5_REG_PPCNT = 0x5008,
106 MLX5_REG_PMAOS = 0x5012, 108 MLX5_REG_PMAOS = 0x5012,
107 MLX5_REG_PUDE = 0x5009, 109 MLX5_REG_PUDE = 0x5009,
108 MLX5_REG_PMPE = 0x5010, 110 MLX5_REG_PMPE = 0x5010,
@@ -151,8 +153,8 @@ enum mlx5_dev_event {
151}; 153};
152 154
153enum mlx5_port_status { 155enum mlx5_port_status {
154 MLX5_PORT_UP = 1 << 1, 156 MLX5_PORT_UP = 1,
155 MLX5_PORT_DOWN = 1 << 2, 157 MLX5_PORT_DOWN = 2,
156}; 158};
157 159
158struct mlx5_uuar_info { 160struct mlx5_uuar_info {
@@ -380,7 +382,7 @@ struct mlx5_uar {
380 u32 index; 382 u32 index;
381 struct list_head bf_list; 383 struct list_head bf_list;
382 unsigned free_bf_bmap; 384 unsigned free_bf_bmap;
383 void __iomem *wc_map; 385 void __iomem *bf_map;
384 void __iomem *map; 386 void __iomem *map;
385}; 387};
386 388
@@ -435,6 +437,8 @@ struct mlx5_priv {
435 struct mlx5_uuar_info uuari; 437 struct mlx5_uuar_info uuari;
436 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); 438 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
437 439
440 struct io_mapping *bf_mapping;
441
438 /* pages stuff */ 442 /* pages stuff */
439 struct workqueue_struct *pg_wq; 443 struct workqueue_struct *pg_wq;
440 struct rb_root page_root; 444 struct rb_root page_root;
@@ -463,6 +467,10 @@ struct mlx5_priv {
463 /* end: mr staff */ 467 /* end: mr staff */
464 468
465 /* start: alloc staff */ 469 /* start: alloc staff */
470 /* protect buffer alocation according to numa node */
471 struct mutex alloc_mutex;
472 int numa_node;
473
466 struct mutex pgdir_mutex; 474 struct mutex pgdir_mutex;
467 struct list_head pgdir_list; 475 struct list_head pgdir_list;
468 /* end: alloc staff */ 476 /* end: alloc staff */
@@ -672,6 +680,8 @@ void mlx5_health_cleanup(void);
672void __init mlx5_health_init(void); 680void __init mlx5_health_init(void);
673void mlx5_start_health_poll(struct mlx5_core_dev *dev); 681void mlx5_start_health_poll(struct mlx5_core_dev *dev);
674void mlx5_stop_health_poll(struct mlx5_core_dev *dev); 682void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
683int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
684 struct mlx5_buf *buf, int node);
675int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); 685int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
676void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); 686void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
677struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, 687struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -752,9 +762,10 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
752 u8 local_port); 762 u8 local_port);
753int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, 763int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
754 int proto_mask); 764 int proto_mask);
755int mlx5_set_port_status(struct mlx5_core_dev *dev, 765int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
756 enum mlx5_port_status status); 766 enum mlx5_port_status status);
757int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); 767int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
768 enum mlx5_port_status *status);
758 769
759int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); 770int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
760void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); 771void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
@@ -764,6 +775,10 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
764int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, 775int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
765 u8 *vl_hw_cap, u8 local_port); 776 u8 *vl_hw_cap, u8 local_port);
766 777
778int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
779int mlx5_query_port_pause(struct mlx5_core_dev *dev,
780 u32 *rx_pause, u32 *tx_pause);
781
767int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 782int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
768void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 783void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
769int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 784int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
@@ -773,6 +788,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
773int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); 788int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
774void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); 789void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
775int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); 790int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
791int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
792 int node);
776void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); 793void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
777 794
778const char *mlx5_command_str(int command); 795const char *mlx5_command_str(int command);
@@ -828,6 +845,7 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
828int mlx5_register_interface(struct mlx5_interface *intf); 845int mlx5_register_interface(struct mlx5_interface *intf);
829void mlx5_unregister_interface(struct mlx5_interface *intf); 846void mlx5_unregister_interface(struct mlx5_interface *intf);
830int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); 847int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
848int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey);
831 849
832struct mlx5_profile { 850struct mlx5_profile {
833 u64 mask; 851 u64 mask;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d2f6fee041c..dd2097455a2e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1936,9 +1936,9 @@ enum {
1936}; 1936};
1937 1937
1938enum { 1938enum {
1939 MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0, 1939 MLX5_RX_HASH_FN_NONE = 0x0,
1940 MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1, 1940 MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
1941 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2, 1941 MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
1942}; 1942};
1943 1943
1944enum { 1944enum {
@@ -4050,6 +4050,13 @@ struct mlx5_ifc_modify_tis_in_bits {
4050 struct mlx5_ifc_tisc_bits ctx; 4050 struct mlx5_ifc_tisc_bits ctx;
4051}; 4051};
4052 4052
4053struct mlx5_ifc_modify_tir_bitmask_bits {
4054 u8 reserved[0x20];
4055
4056 u8 reserved1[0x1f];
4057 u8 lro[0x1];
4058};
4059
4053struct mlx5_ifc_modify_tir_out_bits { 4060struct mlx5_ifc_modify_tir_out_bits {
4054 u8 status[0x8]; 4061 u8 status[0x8];
4055 u8 reserved_0[0x18]; 4062 u8 reserved_0[0x18];
@@ -4071,7 +4078,7 @@ struct mlx5_ifc_modify_tir_in_bits {
4071 4078
4072 u8 reserved_3[0x20]; 4079 u8 reserved_3[0x20];
4073 4080
4074 u8 modify_bitmask[0x40]; 4081 struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
4075 4082
4076 u8 reserved_4[0x40]; 4083 u8 reserved_4[0x40];
4077 4084
@@ -4116,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits {
4116 u8 reserved_1[0x40]; 4123 u8 reserved_1[0x40];
4117}; 4124};
4118 4125
4126struct mlx5_ifc_rqt_bitmask_bits {
4127 u8 reserved[0x20];
4128
4129 u8 reserved1[0x1f];
4130 u8 rqn_list[0x1];
4131};
4132
4119struct mlx5_ifc_modify_rqt_in_bits { 4133struct mlx5_ifc_modify_rqt_in_bits {
4120 u8 opcode[0x10]; 4134 u8 opcode[0x10];
4121 u8 reserved_0[0x10]; 4135 u8 reserved_0[0x10];
@@ -4128,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits {
4128 4142
4129 u8 reserved_3[0x20]; 4143 u8 reserved_3[0x20];
4130 4144
4131 u8 modify_bitmask[0x40]; 4145 struct mlx5_ifc_rqt_bitmask_bits bitmask;
4132 4146
4133 u8 reserved_4[0x40]; 4147 u8 reserved_4[0x40];
4134 4148
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf6f117fcf4d..fda728e3c27d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -124,8 +124,10 @@ extern unsigned int kobjsize(const void *objp);
124#define VM_MAYSHARE 0x00000080 124#define VM_MAYSHARE 0x00000080
125 125
126#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 126#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
127#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
127#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 128#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
128#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 129#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
130#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
129 131
130#define VM_LOCKED 0x00002000 132#define VM_LOCKED 0x00002000
131#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 133#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
@@ -245,7 +247,10 @@ struct vm_fault {
245struct vm_operations_struct { 247struct vm_operations_struct {
246 void (*open)(struct vm_area_struct * area); 248 void (*open)(struct vm_area_struct * area);
247 void (*close)(struct vm_area_struct * area); 249 void (*close)(struct vm_area_struct * area);
250 int (*mremap)(struct vm_area_struct * area);
248 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 251 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
252 int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
253 pmd_t *, unsigned int flags);
249 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); 254 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
250 255
251 /* notification that a previously read-only page is about to become 256 /* notification that a previously read-only page is about to become
@@ -304,18 +309,6 @@ struct inode;
304#define page_private(page) ((page)->private) 309#define page_private(page) ((page)->private)
305#define set_page_private(page, v) ((page)->private = (v)) 310#define set_page_private(page, v) ((page)->private = (v))
306 311
307/* It's valid only if the page is free path or free_list */
308static inline void set_freepage_migratetype(struct page *page, int migratetype)
309{
310 page->index = migratetype;
311}
312
313/* It's valid only if the page is free path or free_list */
314static inline int get_freepage_migratetype(struct page *page)
315{
316 return page->index;
317}
318
319/* 312/*
320 * FIXME: take this include out, include page-flags.h in 313 * FIXME: take this include out, include page-flags.h in
321 * files which need it (119 of them) 314 * files which need it (119 of them)
@@ -356,20 +349,15 @@ static inline int get_page_unless_zero(struct page *page)
356 return atomic_inc_not_zero(&page->_count); 349 return atomic_inc_not_zero(&page->_count);
357} 350}
358 351
359/*
360 * Try to drop a ref unless the page has a refcount of one, return false if
361 * that is the case.
362 * This is to make sure that the refcount won't become zero after this drop.
363 * This can be called when MMU is off so it must not access
364 * any of the virtual mappings.
365 */
366static inline int put_page_unless_one(struct page *page)
367{
368 return atomic_add_unless(&page->_count, -1, 1);
369}
370
371extern int page_is_ram(unsigned long pfn); 352extern int page_is_ram(unsigned long pfn);
372extern int region_is_ram(resource_size_t phys_addr, unsigned long size); 353
354enum {
355 REGION_INTERSECTS,
356 REGION_DISJOINT,
357 REGION_MIXED,
358};
359
360int region_intersects(resource_size_t offset, size_t size, const char *type);
373 361
374/* Support for virtually mapped pages */ 362/* Support for virtually mapped pages */
375struct page *vmalloc_to_page(const void *addr); 363struct page *vmalloc_to_page(const void *addr);
@@ -1257,6 +1245,11 @@ static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1257 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1245 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1258} 1246}
1259 1247
1248static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1249{
1250 return !vma->vm_ops;
1251}
1252
1260static inline int stack_guard_page_start(struct vm_area_struct *vma, 1253static inline int stack_guard_page_start(struct vm_area_struct *vma,
1261 unsigned long addr) 1254 unsigned long addr)
1262{ 1255{
@@ -1833,7 +1826,7 @@ extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1833extern struct vm_area_struct *vma_merge(struct mm_struct *, 1826extern struct vm_area_struct *vma_merge(struct mm_struct *,
1834 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 1827 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1835 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 1828 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1836 struct mempolicy *); 1829 struct mempolicy *, struct vm_userfaultfd_ctx);
1837extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 1830extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1838extern int split_vma(struct mm_struct *, 1831extern int split_vma(struct mm_struct *,
1839 struct vm_area_struct *, unsigned long addr, int new_below); 1832 struct vm_area_struct *, unsigned long addr, int new_below);
@@ -1880,11 +1873,19 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo
1880 1873
1881extern unsigned long mmap_region(struct file *file, unsigned long addr, 1874extern unsigned long mmap_region(struct file *file, unsigned long addr,
1882 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); 1875 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1883extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 1876extern unsigned long do_mmap(struct file *file, unsigned long addr,
1884 unsigned long len, unsigned long prot, unsigned long flags, 1877 unsigned long len, unsigned long prot, unsigned long flags,
1885 unsigned long pgoff, unsigned long *populate); 1878 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
1886extern int do_munmap(struct mm_struct *, unsigned long, size_t); 1879extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1887 1880
1881static inline unsigned long
1882do_mmap_pgoff(struct file *file, unsigned long addr,
1883 unsigned long len, unsigned long prot, unsigned long flags,
1884 unsigned long pgoff, unsigned long *populate)
1885{
1886 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate);
1887}
1888
1888#ifdef CONFIG_MMU 1889#ifdef CONFIG_MMU
1889extern int __mm_populate(unsigned long addr, unsigned long len, 1890extern int __mm_populate(unsigned long addr, unsigned long len,
1890 int ignore_errors); 1891 int ignore_errors);
@@ -2183,6 +2184,7 @@ extern int memory_failure(unsigned long pfn, int trapno, int flags);
2183extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 2184extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2184extern int unpoison_memory(unsigned long pfn); 2185extern int unpoison_memory(unsigned long pfn);
2185extern int get_hwpoison_page(struct page *page); 2186extern int get_hwpoison_page(struct page *page);
2187extern void put_hwpoison_page(struct page *page);
2186extern int sysctl_memory_failure_early_kill; 2188extern int sysctl_memory_failure_early_kill;
2187extern int sysctl_memory_failure_recovery; 2189extern int sysctl_memory_failure_recovery;
2188extern void shake_page(struct page *p, int access); 2190extern void shake_page(struct page *p, int access);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 15549578d559..3d6baa7d4534 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -235,7 +235,7 @@ struct page_frag_cache {
235 bool pfmemalloc; 235 bool pfmemalloc;
236}; 236};
237 237
238typedef unsigned long __nocast vm_flags_t; 238typedef unsigned long vm_flags_t;
239 239
240/* 240/*
241 * A region containing a mapping of a non-memory backed file under NOMMU 241 * A region containing a mapping of a non-memory backed file under NOMMU
@@ -256,6 +256,16 @@ struct vm_region {
256 * this region */ 256 * this region */
257}; 257};
258 258
259#ifdef CONFIG_USERFAULTFD
260#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
261struct vm_userfaultfd_ctx {
262 struct userfaultfd_ctx *ctx;
263};
264#else /* CONFIG_USERFAULTFD */
265#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
266struct vm_userfaultfd_ctx {};
267#endif /* CONFIG_USERFAULTFD */
268
259/* 269/*
260 * This struct defines a memory VMM memory area. There is one of these 270 * This struct defines a memory VMM memory area. There is one of these
261 * per VM-area/task. A VM area is any part of the process virtual memory 271 * per VM-area/task. A VM area is any part of the process virtual memory
@@ -322,6 +332,7 @@ struct vm_area_struct {
322#ifdef CONFIG_NUMA 332#ifdef CONFIG_NUMA
323 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 333 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
324#endif 334#endif
335 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
325}; 336};
326 337
327struct core_thread { 338struct core_thread {
@@ -543,6 +554,7 @@ enum tlb_flush_reason {
543 TLB_REMOTE_SHOOTDOWN, 554 TLB_REMOTE_SHOOTDOWN,
544 TLB_LOCAL_SHOOTDOWN, 555 TLB_LOCAL_SHOOTDOWN,
545 TLB_LOCAL_MM_SHOOTDOWN, 556 TLB_LOCAL_MM_SHOOTDOWN,
557 TLB_REMOTE_SEND_IPI,
546 NR_TLB_FLUSH_REASONS, 558 NR_TLB_FLUSH_REASONS,
547}; 559};
548 560
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 4d3776d25925..fdd0779ccdfa 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -279,10 +279,13 @@ struct mmc_card {
279#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ 279#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
280#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ 280#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
281#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ 281#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
282#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
283
282 284
283 unsigned int erase_size; /* erase size in sectors */ 285 unsigned int erase_size; /* erase size in sectors */
284 unsigned int erase_shift; /* if erase unit is power 2 */ 286 unsigned int erase_shift; /* if erase unit is power 2 */
285 unsigned int pref_erase; /* in sectors */ 287 unsigned int pref_erase; /* in sectors */
288 unsigned int eg_boundary; /* don't cross erase-group boundaries */
286 u8 erased_byte; /* value of erased bytes */ 289 u8 erased_byte; /* value of erased bytes */
287 290
288 u32 raw_cid[4]; /* raw card CID */ 291 u32 raw_cid[4]; /* raw card CID */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 5be97676f1fa..134c57422740 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -98,6 +98,7 @@ struct mmc_data;
98 * @irq_flags: The flags to be passed to request_irq. 98 * @irq_flags: The flags to be passed to request_irq.
99 * @irq: The irq value to be passed to request_irq. 99 * @irq: The irq value to be passed to request_irq.
100 * @sdio_id0: Number of slot0 in the SDIO interrupt registers. 100 * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
101 * @dto_timer: Timer for broken data transfer over scheme.
101 * 102 *
102 * Locking 103 * Locking
103 * ======= 104 * =======
@@ -153,11 +154,7 @@ struct dw_mci {
153 dma_addr_t sg_dma; 154 dma_addr_t sg_dma;
154 void *sg_cpu; 155 void *sg_cpu;
155 const struct dw_mci_dma_ops *dma_ops; 156 const struct dw_mci_dma_ops *dma_ops;
156#ifdef CONFIG_MMC_DW_IDMAC
157 unsigned int ring_size; 157 unsigned int ring_size;
158#else
159 struct dw_mci_dma_data *dma_data;
160#endif
161 u32 cmd_status; 158 u32 cmd_status;
162 u32 data_status; 159 u32 data_status;
163 u32 stop_cmdr; 160 u32 stop_cmdr;
@@ -204,6 +201,7 @@ struct dw_mci {
204 int sdio_id0; 201 int sdio_id0;
205 202
206 struct timer_list cmd11_timer; 203 struct timer_list cmd11_timer;
204 struct timer_list dto_timer;
207}; 205};
208 206
209/* DMA ops for Internal/External DMAC interface */ 207/* DMA ops for Internal/External DMAC interface */
@@ -226,6 +224,8 @@ struct dw_mci_dma_ops {
226#define DW_MCI_QUIRK_HIGHSPEED BIT(2) 224#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
227/* Unreliable card detection */ 225/* Unreliable card detection */
228#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) 226#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
227/* Timer for broken data transfer over scheme */
228#define DW_MCI_QUIRK_BROKEN_DTO BIT(4)
229 229
230struct dma_pdata; 230struct dma_pdata;
231 231
@@ -259,7 +259,6 @@ struct dw_mci_board {
259 259
260 struct dw_mci_dma_ops *dma_ops; 260 struct dw_mci_dma_ops *dma_ops;
261 struct dma_pdata *data; 261 struct dma_pdata *data;
262 struct block_settings *blk_settings;
263}; 262};
264 263
265#endif /* LINUX_MMC_DW_MMC_H */ 264#endif /* LINUX_MMC_DW_MMC_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1369e54faeb7..83b81fd865f3 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -412,7 +412,8 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
412{ 412{
413 host->ops->enable_sdio_irq(host, 0); 413 host->ops->enable_sdio_irq(host, 0);
414 host->sdio_irq_pending = true; 414 host->sdio_irq_pending = true;
415 wake_up_process(host->sdio_irq_thread); 415 if (host->sdio_irq_thread)
416 wake_up_process(host->sdio_irq_thread);
416} 417}
417 418
418void sdio_run_irqs(struct mmc_host *host); 419void sdio_run_irqs(struct mmc_host *host);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 61cd67f4d788..a1a210d59961 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -66,6 +66,16 @@ struct mmu_notifier_ops {
66 unsigned long end); 66 unsigned long end);
67 67
68 /* 68 /*
69 * clear_young is a lightweight version of clear_flush_young. Like the
70 * latter, it is supposed to test-and-clear the young/accessed bitflag
71 * in the secondary pte, but it may omit flushing the secondary tlb.
72 */
73 int (*clear_young)(struct mmu_notifier *mn,
74 struct mm_struct *mm,
75 unsigned long start,
76 unsigned long end);
77
78 /*
69 * test_young is called to check the young/accessed bitflag in 79 * test_young is called to check the young/accessed bitflag in
70 * the secondary pte. This is used to know if the page is 80 * the secondary pte. This is used to know if the page is
71 * frequently used without actually clearing the flag or tearing 81 * frequently used without actually clearing the flag or tearing
@@ -203,6 +213,9 @@ extern void __mmu_notifier_release(struct mm_struct *mm);
203extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, 213extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
204 unsigned long start, 214 unsigned long start,
205 unsigned long end); 215 unsigned long end);
216extern int __mmu_notifier_clear_young(struct mm_struct *mm,
217 unsigned long start,
218 unsigned long end);
206extern int __mmu_notifier_test_young(struct mm_struct *mm, 219extern int __mmu_notifier_test_young(struct mm_struct *mm,
207 unsigned long address); 220 unsigned long address);
208extern void __mmu_notifier_change_pte(struct mm_struct *mm, 221extern void __mmu_notifier_change_pte(struct mm_struct *mm,
@@ -231,6 +244,15 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
231 return 0; 244 return 0;
232} 245}
233 246
247static inline int mmu_notifier_clear_young(struct mm_struct *mm,
248 unsigned long start,
249 unsigned long end)
250{
251 if (mm_has_notifiers(mm))
252 return __mmu_notifier_clear_young(mm, start, end);
253 return 0;
254}
255
234static inline int mmu_notifier_test_young(struct mm_struct *mm, 256static inline int mmu_notifier_test_young(struct mm_struct *mm,
235 unsigned long address) 257 unsigned long address)
236{ 258{
@@ -311,6 +333,28 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
311 __young; \ 333 __young; \
312}) 334})
313 335
336#define ptep_clear_young_notify(__vma, __address, __ptep) \
337({ \
338 int __young; \
339 struct vm_area_struct *___vma = __vma; \
340 unsigned long ___address = __address; \
341 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
342 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
343 ___address + PAGE_SIZE); \
344 __young; \
345})
346
347#define pmdp_clear_young_notify(__vma, __address, __pmdp) \
348({ \
349 int __young; \
350 struct vm_area_struct *___vma = __vma; \
351 unsigned long ___address = __address; \
352 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
353 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
354 ___address + PMD_SIZE); \
355 __young; \
356})
357
314#define ptep_clear_flush_notify(__vma, __address, __ptep) \ 358#define ptep_clear_flush_notify(__vma, __address, __ptep) \
315({ \ 359({ \
316 unsigned long ___addr = __address & PAGE_MASK; \ 360 unsigned long ___addr = __address & PAGE_MASK; \
@@ -427,6 +471,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
427 471
428#define ptep_clear_flush_young_notify ptep_clear_flush_young 472#define ptep_clear_flush_young_notify ptep_clear_flush_young
429#define pmdp_clear_flush_young_notify pmdp_clear_flush_young 473#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
474#define ptep_clear_young_notify ptep_test_and_clear_young
475#define pmdp_clear_young_notify pmdp_test_and_clear_young
430#define ptep_clear_flush_notify ptep_clear_flush 476#define ptep_clear_flush_notify ptep_clear_flush
431#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush 477#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
432#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear 478#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 754c25966a0a..d94347737292 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -319,7 +319,11 @@ enum zone_type {
319 ZONE_HIGHMEM, 319 ZONE_HIGHMEM,
320#endif 320#endif
321 ZONE_MOVABLE, 321 ZONE_MOVABLE,
322#ifdef CONFIG_ZONE_DEVICE
323 ZONE_DEVICE,
324#endif
322 __MAX_NR_ZONES 325 __MAX_NR_ZONES
326
323}; 327};
324 328
325#ifndef __GENERATING_BOUNDS_H 329#ifndef __GENERATING_BOUNDS_H
@@ -690,14 +694,6 @@ struct zonelist {
690#endif 694#endif
691}; 695};
692 696
693#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
694struct node_active_region {
695 unsigned long start_pfn;
696 unsigned long end_pfn;
697 int nid;
698};
699#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
700
701#ifndef CONFIG_DISCONTIGMEM 697#ifndef CONFIG_DISCONTIGMEM
702/* The array of struct pages - for discontigmem use pgdat->lmem_map */ 698/* The array of struct pages - for discontigmem use pgdat->lmem_map */
703extern struct page *mem_map; 699extern struct page *mem_map;
@@ -794,6 +790,25 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
794 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; 790 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
795} 791}
796 792
793static inline int zone_id(const struct zone *zone)
794{
795 struct pglist_data *pgdat = zone->zone_pgdat;
796
797 return zone - pgdat->node_zones;
798}
799
800#ifdef CONFIG_ZONE_DEVICE
801static inline bool is_dev_zone(const struct zone *zone)
802{
803 return zone_id(zone) == ZONE_DEVICE;
804}
805#else
806static inline bool is_dev_zone(const struct zone *zone)
807{
808 return false;
809}
810#endif
811
797#include <linux/memory_hotplug.h> 812#include <linux/memory_hotplug.h>
798 813
799extern struct mutex zonelists_mutex; 814extern struct mutex zonelists_mutex;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 34f25b7bf642..688997a24aad 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -253,7 +253,7 @@ struct pcmcia_device_id {
253 253
254 __u32 prod_id_hash[4]; 254 __u32 prod_id_hash[4];
255 255
256 /* not matched against in kernelspace*/ 256 /* not matched against in kernelspace */
257 const char * prod_id[4]; 257 const char * prod_id[4];
258 258
259 /* not matched against */ 259 /* not matched against */
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
new file mode 100644
index 000000000000..ef29eb2d6dfd
--- /dev/null
+++ b/include/linux/mpls_iptunnel.h
@@ -0,0 +1,6 @@
1#ifndef _LINUX_MPLS_IPTUNNEL_H
2#define _LINUX_MPLS_IPTUNNEL_H
3
4#include <uapi/linux/mpls_iptunnel.h>
5
6#endif /* _LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8ac4a68ffae2..ad939d0ba816 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -14,38 +14,85 @@ extern int pci_msi_ignore_mask;
14/* Helper functions */ 14/* Helper functions */
15struct irq_data; 15struct irq_data;
16struct msi_desc; 16struct msi_desc;
17struct pci_dev;
18struct platform_msi_priv_data;
17void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
18void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 20void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
19 21
22typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
23 struct msi_msg *msg);
24
25/**
26 * platform_msi_desc - Platform device specific msi descriptor data
27 * @msi_priv_data: Pointer to platform private data
28 * @msi_index: The index of the MSI descriptor for multi MSI
29 */
30struct platform_msi_desc {
31 struct platform_msi_priv_data *msi_priv_data;
32 u16 msi_index;
33};
34
35/**
36 * struct msi_desc - Descriptor structure for MSI based interrupts
37 * @list: List head for management
38 * @irq: The base interrupt number
39 * @nvec_used: The number of vectors used
40 * @dev: Pointer to the device which uses this descriptor
41 * @msg: The last set MSI message cached for reuse
42 *
43 * @masked: [PCI MSI/X] Mask bits
44 * @is_msix: [PCI MSI/X] True if MSI-X
45 * @multiple: [PCI MSI/X] log2 num of messages allocated
46 * @multi_cap: [PCI MSI/X] log2 num of messages supported
47 * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
48 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
49 * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
50 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
51 * @mask_pos: [PCI MSI] Mask register position
52 * @mask_base: [PCI MSI-X] Mask register base address
53 * @platform: [platform] Platform device specific msi descriptor data
54 */
20struct msi_desc { 55struct msi_desc {
21 struct { 56 /* Shared device/bus type independent data */
22 __u8 is_msix : 1; 57 struct list_head list;
23 __u8 multiple: 3; /* log2 num of messages allocated */ 58 unsigned int irq;
24 __u8 multi_cap : 3; /* log2 num of messages supported */ 59 unsigned int nvec_used;
25 __u8 maskbit : 1; /* mask-pending bit supported ? */ 60 struct device *dev;
26 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 61 struct msi_msg msg;
27 __u16 entry_nr; /* specific enabled entry */
28 unsigned default_irq; /* default pre-assigned irq */
29 } msi_attrib;
30
31 u32 masked; /* mask bits */
32 unsigned int irq;
33 unsigned int nvec_used; /* number of messages */
34 struct list_head list;
35 62
36 union { 63 union {
37 void __iomem *mask_base; 64 /* PCI MSI/X specific data */
38 u8 mask_pos; 65 struct {
39 }; 66 u32 masked;
40 struct pci_dev *dev; 67 struct {
68 __u8 is_msix : 1;
69 __u8 multiple : 3;
70 __u8 multi_cap : 3;
71 __u8 maskbit : 1;
72 __u8 is_64 : 1;
73 __u16 entry_nr;
74 unsigned default_irq;
75 } msi_attrib;
76 union {
77 u8 mask_pos;
78 void __iomem *mask_base;
79 };
80 };
41 81
42 /* Last set MSI message */ 82 /*
43 struct msi_msg msg; 83 * Non PCI variants add their data structure here. New
84 * entries need to use a named structure. We want
85 * proper name spaces for this. The PCI part is
86 * anonymous for now as it would require an immediate
87 * tree wide cleanup.
88 */
89 struct platform_msi_desc platform;
90 };
44}; 91};
45 92
46/* Helpers to hide struct msi_desc implementation details */ 93/* Helpers to hide struct msi_desc implementation details */
47#define msi_desc_to_dev(desc) (&(desc)->dev.dev) 94#define msi_desc_to_dev(desc) ((desc)->dev)
48#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list) 95#define dev_to_msi_list(dev) (&(dev)->msi_list)
49#define first_msi_entry(dev) \ 96#define first_msi_entry(dev) \
50 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) 97 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
51#define for_each_msi_entry(desc, dev) \ 98#define for_each_msi_entry(desc, dev) \
@@ -56,12 +103,17 @@ struct msi_desc {
56#define for_each_pci_msi_entry(desc, pdev) \ 103#define for_each_pci_msi_entry(desc, pdev) \
57 for_each_msi_entry((desc), &(pdev)->dev) 104 for_each_msi_entry((desc), &(pdev)->dev)
58 105
59static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) 106struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
107void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
108#else /* CONFIG_PCI_MSI */
109static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
60{ 110{
61 return desc->dev; 111 return NULL;
62} 112}
63#endif /* CONFIG_PCI_MSI */ 113#endif /* CONFIG_PCI_MSI */
64 114
115struct msi_desc *alloc_msi_entry(struct device *dev);
116void free_msi_entry(struct msi_desc *entry);
65void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 117void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
66void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 118void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
67void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); 119void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
@@ -108,9 +160,6 @@ struct msi_controller {
108 struct device *dev; 160 struct device *dev;
109 struct device_node *of_node; 161 struct device_node *of_node;
110 struct list_head list; 162 struct list_head list;
111#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
112 struct irq_domain *domain;
113#endif
114 163
115 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, 164 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
116 struct msi_desc *desc); 165 struct msi_desc *desc);
@@ -221,6 +270,12 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
221void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); 270void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
222struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); 271struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
223 272
273struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
274 struct msi_domain_info *info,
275 struct irq_domain *parent);
276int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
277 irq_write_msi_msg_t write_msi_msg);
278void platform_msi_domain_free_irqs(struct device *dev);
224#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ 279#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
225 280
226#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN 281#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 29975c73a953..366cf77953b5 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -27,9 +27,9 @@
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/bug.h> 28#include <linux/bug.h>
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/io.h>
30 31
31#include <asm/unaligned.h> 32#include <asm/unaligned.h>
32#include <asm/io.h>
33#include <asm/barrier.h> 33#include <asm/barrier.h>
34 34
35#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 35#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
diff --git a/include/linux/net.h b/include/linux/net.h
index 04aa06852771..049d4b03c4c4 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -239,8 +239,16 @@ do { \
239 net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) 239 net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
240#define net_info_ratelimited(fmt, ...) \ 240#define net_info_ratelimited(fmt, ...) \
241 net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) 241 net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
242#if defined(DEBUG)
242#define net_dbg_ratelimited(fmt, ...) \ 243#define net_dbg_ratelimited(fmt, ...) \
243 net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) 244 net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
245#else
246#define net_dbg_ratelimited(fmt, ...) \
247 do { \
248 if (0) \
249 no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
250 } while (0)
251#endif
244 252
245bool __net_get_random_once(void *buf, int nbytes, bool *done, 253bool __net_get_random_once(void *buf, int nbytes, bool *done,
246 struct static_key *done_key); 254 struct static_key *done_key);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e20979dfd6a9..88a00694eda5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -766,6 +766,13 @@ struct netdev_phys_item_id {
766 unsigned char id_len; 766 unsigned char id_len;
767}; 767};
768 768
769static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
770 struct netdev_phys_item_id *b)
771{
772 return a->id_len == b->id_len &&
773 memcmp(a->id, b->id, a->id_len) == 0;
774}
775
769typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 776typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
770 struct sk_buff *skb); 777 struct sk_buff *skb);
771 778
@@ -1041,6 +1048,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1041 * TX queue. 1048 * TX queue.
1042 * int (*ndo_get_iflink)(const struct net_device *dev); 1049 * int (*ndo_get_iflink)(const struct net_device *dev);
1043 * Called to get the iflink value of this device. 1050 * Called to get the iflink value of this device.
1051 * void (*ndo_change_proto_down)(struct net_device *dev,
1052 * bool proto_down);
1053 * This function is used to pass protocol port error state information
1054 * to the switch driver. The switch driver can react to the proto_down
1055 * by doing a phys down on the associated switch port.
1056 *
1044 */ 1057 */
1045struct net_device_ops { 1058struct net_device_ops {
1046 int (*ndo_init)(struct net_device *dev); 1059 int (*ndo_init)(struct net_device *dev);
@@ -1211,6 +1224,8 @@ struct net_device_ops {
1211 int queue_index, 1224 int queue_index,
1212 u32 maxrate); 1225 u32 maxrate);
1213 int (*ndo_get_iflink)(const struct net_device *dev); 1226 int (*ndo_get_iflink)(const struct net_device *dev);
1227 int (*ndo_change_proto_down)(struct net_device *dev,
1228 bool proto_down);
1214}; 1229};
1215 1230
1216/** 1231/**
@@ -1225,13 +1240,8 @@ struct net_device_ops {
1225 * 1240 *
1226 * @IFF_802_1Q_VLAN: 802.1Q VLAN device 1241 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1227 * @IFF_EBRIDGE: Ethernet bridging device 1242 * @IFF_EBRIDGE: Ethernet bridging device
1228 * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
1229 * @IFF_MASTER_8023AD: bonding master, 802.3ad
1230 * @IFF_MASTER_ALB: bonding master, balance-alb
1231 * @IFF_BONDING: bonding master or slave 1243 * @IFF_BONDING: bonding master or slave
1232 * @IFF_SLAVE_NEEDARP: need ARPs for validation
1233 * @IFF_ISATAP: ISATAP interface (RFC4214) 1244 * @IFF_ISATAP: ISATAP interface (RFC4214)
1234 * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
1235 * @IFF_WAN_HDLC: WAN HDLC device 1245 * @IFF_WAN_HDLC: WAN HDLC device
1236 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to 1246 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1237 * release skb->dst 1247 * release skb->dst
@@ -1247,44 +1257,40 @@ struct net_device_ops {
1247 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address 1257 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1248 * change when it's running 1258 * change when it's running
1249 * @IFF_MACVLAN: Macvlan device 1259 * @IFF_MACVLAN: Macvlan device
1260 * @IFF_VRF_MASTER: device is a VRF master
1261 * @IFF_NO_QUEUE: device can run without qdisc attached
1262 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1250 */ 1263 */
1251enum netdev_priv_flags { 1264enum netdev_priv_flags {
1252 IFF_802_1Q_VLAN = 1<<0, 1265 IFF_802_1Q_VLAN = 1<<0,
1253 IFF_EBRIDGE = 1<<1, 1266 IFF_EBRIDGE = 1<<1,
1254 IFF_SLAVE_INACTIVE = 1<<2, 1267 IFF_BONDING = 1<<2,
1255 IFF_MASTER_8023AD = 1<<3, 1268 IFF_ISATAP = 1<<3,
1256 IFF_MASTER_ALB = 1<<4, 1269 IFF_WAN_HDLC = 1<<4,
1257 IFF_BONDING = 1<<5, 1270 IFF_XMIT_DST_RELEASE = 1<<5,
1258 IFF_SLAVE_NEEDARP = 1<<6, 1271 IFF_DONT_BRIDGE = 1<<6,
1259 IFF_ISATAP = 1<<7, 1272 IFF_DISABLE_NETPOLL = 1<<7,
1260 IFF_MASTER_ARPMON = 1<<8, 1273 IFF_MACVLAN_PORT = 1<<8,
1261 IFF_WAN_HDLC = 1<<9, 1274 IFF_BRIDGE_PORT = 1<<9,
1262 IFF_XMIT_DST_RELEASE = 1<<10, 1275 IFF_OVS_DATAPATH = 1<<10,
1263 IFF_DONT_BRIDGE = 1<<11, 1276 IFF_TX_SKB_SHARING = 1<<11,
1264 IFF_DISABLE_NETPOLL = 1<<12, 1277 IFF_UNICAST_FLT = 1<<12,
1265 IFF_MACVLAN_PORT = 1<<13, 1278 IFF_TEAM_PORT = 1<<13,
1266 IFF_BRIDGE_PORT = 1<<14, 1279 IFF_SUPP_NOFCS = 1<<14,
1267 IFF_OVS_DATAPATH = 1<<15, 1280 IFF_LIVE_ADDR_CHANGE = 1<<15,
1268 IFF_TX_SKB_SHARING = 1<<16, 1281 IFF_MACVLAN = 1<<16,
1269 IFF_UNICAST_FLT = 1<<17, 1282 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1270 IFF_TEAM_PORT = 1<<18, 1283 IFF_IPVLAN_MASTER = 1<<18,
1271 IFF_SUPP_NOFCS = 1<<19, 1284 IFF_IPVLAN_SLAVE = 1<<19,
1272 IFF_LIVE_ADDR_CHANGE = 1<<20, 1285 IFF_VRF_MASTER = 1<<20,
1273 IFF_MACVLAN = 1<<21, 1286 IFF_NO_QUEUE = 1<<21,
1274 IFF_XMIT_DST_RELEASE_PERM = 1<<22, 1287 IFF_OPENVSWITCH = 1<<22,
1275 IFF_IPVLAN_MASTER = 1<<23,
1276 IFF_IPVLAN_SLAVE = 1<<24,
1277}; 1288};
1278 1289
1279#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1290#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1280#define IFF_EBRIDGE IFF_EBRIDGE 1291#define IFF_EBRIDGE IFF_EBRIDGE
1281#define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
1282#define IFF_MASTER_8023AD IFF_MASTER_8023AD
1283#define IFF_MASTER_ALB IFF_MASTER_ALB
1284#define IFF_BONDING IFF_BONDING 1292#define IFF_BONDING IFF_BONDING
1285#define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
1286#define IFF_ISATAP IFF_ISATAP 1293#define IFF_ISATAP IFF_ISATAP
1287#define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
1288#define IFF_WAN_HDLC IFF_WAN_HDLC 1294#define IFF_WAN_HDLC IFF_WAN_HDLC
1289#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE 1295#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1290#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE 1296#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
@@ -1301,6 +1307,9 @@ enum netdev_priv_flags {
1301#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM 1307#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1302#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER 1308#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1303#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE 1309#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1310#define IFF_VRF_MASTER IFF_VRF_MASTER
1311#define IFF_NO_QUEUE IFF_NO_QUEUE
1312#define IFF_OPENVSWITCH IFF_OPENVSWITCH
1304 1313
1305/** 1314/**
1306 * struct net_device - The DEVICE structure. 1315 * struct net_device - The DEVICE structure.
@@ -1417,6 +1426,7 @@ enum netdev_priv_flags {
1417 * @dn_ptr: DECnet specific data 1426 * @dn_ptr: DECnet specific data
1418 * @ip6_ptr: IPv6 specific data 1427 * @ip6_ptr: IPv6 specific data
1419 * @ax25_ptr: AX.25 specific data 1428 * @ax25_ptr: AX.25 specific data
1429 * @vrf_ptr: VRF specific data
1420 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering 1430 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1421 * 1431 *
1422 * @last_rx: Time of last Rx 1432 * @last_rx: Time of last Rx
@@ -1448,6 +1458,8 @@ enum netdev_priv_flags {
1448 * 1458 *
1449 * @xps_maps: XXX: need comments on this one 1459 * @xps_maps: XXX: need comments on this one
1450 * 1460 *
1461 * @offload_fwd_mark: Offload device fwding mark
1462 *
1451 * @trans_start: Time (in jiffies) of last Tx 1463 * @trans_start: Time (in jiffies) of last Tx
1452 * @watchdog_timeo: Represents the timeout that is used by 1464 * @watchdog_timeo: Represents the timeout that is used by
1453 * the watchdog ( see dev_watchdog() ) 1465 * the watchdog ( see dev_watchdog() )
@@ -1502,6 +1514,10 @@ enum netdev_priv_flags {
1502 * 1514 *
1503 * @qdisc_tx_busylock: XXX: need comments on this one 1515 * @qdisc_tx_busylock: XXX: need comments on this one
1504 * 1516 *
1517 * @proto_down: protocol port state information can be sent to the
1518 * switch driver and used to set the phys state of the
1519 * switch port.
1520 *
1505 * FIXME: cleanup struct net_device such that network protocol info 1521 * FIXME: cleanup struct net_device such that network protocol info
1506 * moves out. 1522 * moves out.
1507 */ 1523 */
@@ -1629,6 +1645,7 @@ struct net_device {
1629 struct dn_dev __rcu *dn_ptr; 1645 struct dn_dev __rcu *dn_ptr;
1630 struct inet6_dev __rcu *ip6_ptr; 1646 struct inet6_dev __rcu *ip6_ptr;
1631 void *ax25_ptr; 1647 void *ax25_ptr;
1648 struct net_vrf_dev __rcu *vrf_ptr;
1632 struct wireless_dev *ieee80211_ptr; 1649 struct wireless_dev *ieee80211_ptr;
1633 struct wpan_dev *ieee802154_ptr; 1650 struct wpan_dev *ieee802154_ptr;
1634#if IS_ENABLED(CONFIG_MPLS_ROUTING) 1651#if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -1685,6 +1702,10 @@ struct net_device {
1685 struct xps_dev_maps __rcu *xps_maps; 1702 struct xps_dev_maps __rcu *xps_maps;
1686#endif 1703#endif
1687 1704
1705#ifdef CONFIG_NET_SWITCHDEV
1706 u32 offload_fwd_mark;
1707#endif
1708
1688 /* These may be needed for future network-power-down code. */ 1709 /* These may be needed for future network-power-down code. */
1689 1710
1690 /* 1711 /*
@@ -1762,6 +1783,7 @@ struct net_device {
1762#endif 1783#endif
1763 struct phy_device *phydev; 1784 struct phy_device *phydev;
1764 struct lock_class_key *qdisc_tx_busylock; 1785 struct lock_class_key *qdisc_tx_busylock;
1786 bool proto_down;
1765}; 1787};
1766#define to_net_dev(d) container_of(d, struct net_device, dev) 1788#define to_net_dev(d) container_of(d, struct net_device, dev)
1767 1789
@@ -2093,6 +2115,13 @@ struct netdev_notifier_change_info {
2093 unsigned int flags_changed; 2115 unsigned int flags_changed;
2094}; 2116};
2095 2117
2118struct netdev_notifier_changeupper_info {
2119 struct netdev_notifier_info info; /* must be first */
2120 struct net_device *upper_dev; /* new upper dev */
2121 bool master; /* is upper dev master */
2122 bool linking; /* is the nofication for link or unlink */
2123};
2124
2096static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 2125static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2097 struct net_device *dev) 2126 struct net_device *dev)
2098{ 2127{
@@ -2277,8 +2306,7 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2277 2306
2278static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) 2307static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
2279{ 2308{
2280 return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) == 2309 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
2281 skb_gro_offset(skb));
2282} 2310}
2283 2311
2284static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, 2312static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
@@ -2374,37 +2402,58 @@ static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
2374 grc->delta = 0; 2402 grc->delta = 0;
2375} 2403}
2376 2404
2377static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, 2405static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
2378 int start, int offset, 2406 unsigned int off, size_t hdrlen,
2379 struct gro_remcsum *grc, 2407 int start, int offset,
2380 bool nopartial) 2408 struct gro_remcsum *grc,
2409 bool nopartial)
2381{ 2410{
2382 __wsum delta; 2411 __wsum delta;
2412 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
2383 2413
2384 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); 2414 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
2385 2415
2386 if (!nopartial) { 2416 if (!nopartial) {
2387 NAPI_GRO_CB(skb)->gro_remcsum_start = 2417 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
2388 ((unsigned char *)ptr + start) - skb->head; 2418 return ptr;
2389 return; 2419 }
2420
2421 ptr = skb_gro_header_fast(skb, off);
2422 if (skb_gro_header_hard(skb, off + plen)) {
2423 ptr = skb_gro_header_slow(skb, off + plen, off);
2424 if (!ptr)
2425 return NULL;
2390 } 2426 }
2391 2427
2392 delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset); 2428 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
2429 start, offset);
2393 2430
2394 /* Adjust skb->csum since we changed the packet */ 2431 /* Adjust skb->csum since we changed the packet */
2395 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); 2432 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
2396 2433
2397 grc->offset = (ptr + offset) - (void *)skb->head; 2434 grc->offset = off + hdrlen + offset;
2398 grc->delta = delta; 2435 grc->delta = delta;
2436
2437 return ptr;
2399} 2438}
2400 2439
2401static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, 2440static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2402 struct gro_remcsum *grc) 2441 struct gro_remcsum *grc)
2403{ 2442{
2443 void *ptr;
2444 size_t plen = grc->offset + sizeof(u16);
2445
2404 if (!grc->delta) 2446 if (!grc->delta)
2405 return; 2447 return;
2406 2448
2407 remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta); 2449 ptr = skb_gro_header_fast(skb, grc->offset);
2450 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
2451 ptr = skb_gro_header_slow(skb, plen, grc->offset);
2452 if (!ptr)
2453 return;
2454 }
2455
2456 remcsum_unadjust((__sum16 *)ptr, grc->delta);
2408} 2457}
2409 2458
2410static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 2459static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -2982,6 +3031,7 @@ int dev_get_phys_port_id(struct net_device *dev,
2982 struct netdev_phys_item_id *ppid); 3031 struct netdev_phys_item_id *ppid);
2983int dev_get_phys_port_name(struct net_device *dev, 3032int dev_get_phys_port_name(struct net_device *dev,
2984 char *name, size_t len); 3033 char *name, size_t len);
3034int dev_change_proto_down(struct net_device *dev, bool proto_down);
2985struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); 3035struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
2986struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 3036struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2987 struct netdev_queue *txq, int *ret); 3037 struct netdev_queue *txq, int *ret);
@@ -3781,6 +3831,42 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
3781 return dev->priv_flags & IFF_SUPP_NOFCS; 3831 return dev->priv_flags & IFF_SUPP_NOFCS;
3782} 3832}
3783 3833
3834static inline bool netif_is_vrf(const struct net_device *dev)
3835{
3836 return dev->priv_flags & IFF_VRF_MASTER;
3837}
3838
3839static inline bool netif_is_bridge_master(const struct net_device *dev)
3840{
3841 return dev->priv_flags & IFF_EBRIDGE;
3842}
3843
3844static inline bool netif_is_ovs_master(const struct net_device *dev)
3845{
3846 return dev->priv_flags & IFF_OPENVSWITCH;
3847}
3848
3849static inline bool netif_index_is_vrf(struct net *net, int ifindex)
3850{
3851 bool rc = false;
3852
3853#if IS_ENABLED(CONFIG_NET_VRF)
3854 struct net_device *dev;
3855
3856 if (ifindex == 0)
3857 return false;
3858
3859 rcu_read_lock();
3860
3861 dev = dev_get_by_index_rcu(net, ifindex);
3862 if (dev)
3863 rc = netif_is_vrf(dev);
3864
3865 rcu_read_unlock();
3866#endif
3867 return rc;
3868}
3869
3784/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ 3870/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
3785static inline void netif_keep_dst(struct net_device *dev) 3871static inline void netif_keep_dst(struct net_device *dev)
3786{ 3872{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 00050dfd9f23..36a652531791 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,8 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/static_key.h> 12#include <linux/static_key.h>
13#include <linux/netfilter_defs.h> 13#include <linux/netfilter_defs.h>
14#include <linux/netdevice.h>
15#include <net/net_namespace.h>
14 16
15#ifdef CONFIG_NETFILTER 17#ifdef CONFIG_NETFILTER
16static inline int NF_DROP_GETERR(int verdict) 18static inline int NF_DROP_GETERR(int verdict)
@@ -118,6 +120,13 @@ struct nf_sockopt_ops {
118}; 120};
119 121
120/* Function to register/unregister hook points. */ 122/* Function to register/unregister hook points. */
123int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
124void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
125int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
126 unsigned int n);
127void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
128 unsigned int n);
129
121int nf_register_hook(struct nf_hook_ops *reg); 130int nf_register_hook(struct nf_hook_ops *reg);
122void nf_unregister_hook(struct nf_hook_ops *reg); 131void nf_unregister_hook(struct nf_hook_ops *reg);
123int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n); 132int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
@@ -128,33 +137,26 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
128int nf_register_sockopt(struct nf_sockopt_ops *reg); 137int nf_register_sockopt(struct nf_sockopt_ops *reg);
129void nf_unregister_sockopt(struct nf_sockopt_ops *reg); 138void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
130 139
131extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
132
133#ifdef HAVE_JUMP_LABEL 140#ifdef HAVE_JUMP_LABEL
134extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 141extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
135 142
136static inline bool nf_hook_list_active(struct list_head *nf_hook_list, 143static inline bool nf_hook_list_active(struct list_head *hook_list,
137 u_int8_t pf, unsigned int hook) 144 u_int8_t pf, unsigned int hook)
138{ 145{
139 if (__builtin_constant_p(pf) && 146 if (__builtin_constant_p(pf) &&
140 __builtin_constant_p(hook)) 147 __builtin_constant_p(hook))
141 return static_key_false(&nf_hooks_needed[pf][hook]); 148 return static_key_false(&nf_hooks_needed[pf][hook]);
142 149
143 return !list_empty(nf_hook_list); 150 return !list_empty(hook_list);
144} 151}
145#else 152#else
146static inline bool nf_hook_list_active(struct list_head *nf_hook_list, 153static inline bool nf_hook_list_active(struct list_head *hook_list,
147 u_int8_t pf, unsigned int hook) 154 u_int8_t pf, unsigned int hook)
148{ 155{
149 return !list_empty(nf_hook_list); 156 return !list_empty(hook_list);
150} 157}
151#endif 158#endif
152 159
153static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
154{
155 return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
156}
157
158int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); 160int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
159 161
160/** 162/**
@@ -172,10 +174,13 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
172 int (*okfn)(struct sock *, struct sk_buff *), 174 int (*okfn)(struct sock *, struct sk_buff *),
173 int thresh) 175 int thresh)
174{ 176{
175 if (nf_hooks_active(pf, hook)) { 177 struct net *net = dev_net(indev ? indev : outdev);
178 struct list_head *hook_list = &net->nf.hooks[pf][hook];
179
180 if (nf_hook_list_active(hook_list, pf, hook)) {
176 struct nf_hook_state state; 181 struct nf_hook_state state;
177 182
178 nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh, 183 nf_hook_state_init(&state, hook_list, hook, thresh,
179 pf, indev, outdev, sk, okfn); 184 pf, indev, outdev, sk, okfn);
180 return nf_hook_slow(skb, &state); 185 return nf_hook_slow(skb, &state);
181 } 186 }
@@ -363,6 +368,8 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
363#endif /*CONFIG_NETFILTER*/ 368#endif /*CONFIG_NETFILTER*/
364 369
365#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 370#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
371#include <linux/netfilter/nf_conntrack_zones_common.h>
372
366extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; 373extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
367void nf_ct_attach(struct sk_buff *, const struct sk_buff *); 374void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
368extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; 375extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
@@ -385,4 +392,15 @@ extern struct nfq_ct_hook __rcu *nfq_ct_hook;
385static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} 392static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
386#endif 393#endif
387 394
395/**
396 * nf_skb_duplicated - TEE target has sent a packet
397 *
398 * When a xtables target sends a packet, the OUTPUT and POSTROUTING
399 * hooks are traversed again, i.e. nft and xtables are invoked recursively.
400 *
401 * This is used by xtables TEE target to prevent the duplicated skb from
402 * being duplicated again.
403 */
404DECLARE_PER_CPU(bool, nf_skb_duplicated);
405
388#endif /*__LINUX_NETFILTER_H*/ 406#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/nf_conntrack_zones_common.h b/include/linux/netfilter/nf_conntrack_zones_common.h
new file mode 100644
index 000000000000..5d7cf36d4766
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_zones_common.h
@@ -0,0 +1,23 @@
1#ifndef _NF_CONNTRACK_ZONES_COMMON_H
2#define _NF_CONNTRACK_ZONES_COMMON_H
3
4#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
5
6#define NF_CT_DEFAULT_ZONE_ID 0
7
8#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL)
9#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY)
10
11#define NF_CT_DEFAULT_ZONE_DIR (NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
12
13#define NF_CT_FLAG_MARK 1
14
15struct nf_conntrack_zone {
16 u16 id;
17 u8 flags;
18 u8 dir;
19};
20
21extern const struct nf_conntrack_zone nf_ct_zone_dflt;
22
23#endif /* _NF_CONNTRACK_ZONES_COMMON_H */
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 6ec975748742..80ca889b164e 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -2,6 +2,7 @@
2#define _NFNL_ACCT_H_ 2#define _NFNL_ACCT_H_
3 3
4#include <uapi/linux/netfilter/nfnetlink_acct.h> 4#include <uapi/linux/netfilter/nfnetlink_acct.h>
5#include <net/net_namespace.h>
5 6
6enum { 7enum {
7 NFACCT_NO_QUOTA = -1, 8 NFACCT_NO_QUOTA = -1,
@@ -11,7 +12,7 @@ enum {
11 12
12struct nf_acct; 13struct nf_acct;
13 14
14struct nf_acct *nfnl_acct_find_get(const char *filter_name); 15struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
15void nfnl_acct_put(struct nf_acct *acct); 16void nfnl_acct_put(struct nf_acct *acct);
16void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); 17void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
17extern int nfnl_acct_overquota(const struct sk_buff *skb, 18extern int nfnl_acct_overquota(const struct sk_buff *skb,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 286098a5667f..b006b719183f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,6 +3,7 @@
3 3
4 4
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/static_key.h>
6#include <uapi/linux/netfilter/x_tables.h> 7#include <uapi/linux/netfilter/x_tables.h>
7 8
8/** 9/**
@@ -222,7 +223,6 @@ struct xt_table_info {
222 * @stacksize jumps (number of user chains) can possibly be made. 223 * @stacksize jumps (number of user chains) can possibly be made.
223 */ 224 */
224 unsigned int stacksize; 225 unsigned int stacksize;
225 unsigned int __percpu *stackptr;
226 void ***jumpstack; 226 void ***jumpstack;
227 227
228 unsigned char entries[0] __aligned(8); 228 unsigned char entries[0] __aligned(8);
@@ -281,6 +281,12 @@ void xt_free_table_info(struct xt_table_info *info);
281 */ 281 */
282DECLARE_PER_CPU(seqcount_t, xt_recseq); 282DECLARE_PER_CPU(seqcount_t, xt_recseq);
283 283
284/* xt_tee_enabled - true if x_tables needs to handle reentrancy
285 *
286 * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
287 */
288extern struct static_key xt_tee_enabled;
289
284/** 290/**
285 * xt_write_recseq_begin - start of a write section 291 * xt_write_recseq_begin - start of a write section
286 * 292 *
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 6d80fc686323..2437b8a5d7a9 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,9 +17,6 @@ enum nf_br_hook_priorities {
17 17
18#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 18#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
19 19
20#define BRNF_BRIDGED_DNAT 0x02
21#define BRNF_NF_BRIDGE_PREROUTING 0x08
22
23int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); 20int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
24 21
25static inline void br_drop_fake_rtable(struct sk_buff *skb) 22static inline void br_drop_fake_rtable(struct sk_buff *skb)
@@ -63,8 +60,17 @@ nf_bridge_get_physoutdev(const struct sk_buff *skb)
63{ 60{
64 return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL; 61 return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
65} 62}
63
64static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
65{
66 return skb->nf_bridge && skb->nf_bridge->in_prerouting;
67}
66#else 68#else
67#define br_drop_fake_rtable(skb) do { } while (0) 69#define br_drop_fake_rtable(skb) do { } while (0)
70static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
71{
72 return false;
73}
68#endif /* CONFIG_BRIDGE_NETFILTER */ 74#endif /* CONFIG_BRIDGE_NETFILTER */
69 75
70#endif 76#endif
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 8b7d28f3aada..771574677e83 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,15 +9,6 @@
9 9
10#include <uapi/linux/netfilter_ipv6.h> 10#include <uapi/linux/netfilter_ipv6.h>
11 11
12
13#ifdef CONFIG_NETFILTER
14int ip6_route_me_harder(struct sk_buff *skb);
15__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
16 unsigned int dataoff, u_int8_t protocol);
17
18int ipv6_netfilter_init(void);
19void ipv6_netfilter_fini(void);
20
21/* 12/*
22 * Hook functions for ipv6 to allow xt_* modules to be built-in even 13 * Hook functions for ipv6 to allow xt_* modules to be built-in even
23 * if IPv6 is a module. 14 * if IPv6 is a module.
@@ -30,6 +21,14 @@ struct nf_ipv6_ops {
30 int (*output)(struct sock *, struct sk_buff *)); 21 int (*output)(struct sock *, struct sk_buff *));
31}; 22};
32 23
24#ifdef CONFIG_NETFILTER
25int ip6_route_me_harder(struct sk_buff *skb);
26__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
27 unsigned int dataoff, u_int8_t protocol);
28
29int ipv6_netfilter_init(void);
30void ipv6_netfilter_fini(void);
31
33extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; 32extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
34static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) 33static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
35{ 34{
@@ -39,6 +38,7 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
39#else /* CONFIG_NETFILTER */ 38#else /* CONFIG_NETFILTER */
40static inline int ipv6_netfilter_init(void) { return 0; } 39static inline int ipv6_netfilter_init(void) { return 0; }
41static inline void ipv6_netfilter_fini(void) { return; } 40static inline void ipv6_netfilter_fini(void) { return; }
41static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; }
42#endif /* CONFIG_NETFILTER */ 42#endif /* CONFIG_NETFILTER */
43 43
44#endif /*__LINUX_IP6_NETFILTER_H*/ 44#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9120edb650a0..639e9b8b0e4d 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -68,8 +68,17 @@ extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
68extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); 68extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
69extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); 69extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
70extern int netlink_has_listeners(struct sock *sk, unsigned int group); 70extern int netlink_has_listeners(struct sock *sk, unsigned int group);
71extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size, 71
72 u32 dst_portid, gfp_t gfp_mask); 72extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
73 unsigned int ldiff, u32 dst_portid,
74 gfp_t gfp_mask);
75static inline struct sk_buff *
76netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
77 gfp_t gfp_mask)
78{
79 return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
80}
81
73extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); 82extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
74extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, 83extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
75 __u32 group, gfp_t allocation); 84 __u32 group, gfp_t allocation);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index b8e72aad919c..00121f298269 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -547,6 +547,24 @@ enum pnfs_notify_deviceid_type4 {
547 NOTIFY_DEVICEID4_DELETE = 1 << 2, 547 NOTIFY_DEVICEID4_DELETE = 1 << 2,
548}; 548};
549 549
550enum pnfs_block_volume_type {
551 PNFS_BLOCK_VOLUME_SIMPLE = 0,
552 PNFS_BLOCK_VOLUME_SLICE = 1,
553 PNFS_BLOCK_VOLUME_CONCAT = 2,
554 PNFS_BLOCK_VOLUME_STRIPE = 3,
555};
556
557enum pnfs_block_extent_state {
558 PNFS_BLOCK_READWRITE_DATA = 0,
559 PNFS_BLOCK_READ_DATA = 1,
560 PNFS_BLOCK_INVALID_DATA = 2,
561 PNFS_BLOCK_NONE_DATA = 3,
562};
563
564/* on the wire size of a block layout extent */
565#define PNFS_BLOCK_EXTENT_SIZE \
566 (7 * sizeof(__be32) + NFS4_DEVICEID4_SIZE)
567
550#define NFL4_UFLG_MASK 0x0000003F 568#define NFL4_UFLG_MASK 0x0000003F
551#define NFL4_UFLG_DENSE 0x00000001 569#define NFL4_UFLG_DENSE 0x00000001
552#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002 570#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 874b77228fb9..c0e961474a52 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -353,7 +353,6 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
353extern void nfs_access_set_mask(struct nfs_access_entry *, u32); 353extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
354extern int nfs_permission(struct inode *, int); 354extern int nfs_permission(struct inode *, int);
355extern int nfs_open(struct inode *, struct file *); 355extern int nfs_open(struct inode *, struct file *);
356extern int nfs_release(struct inode *, struct file *);
357extern int nfs_attribute_timeout(struct inode *inode); 356extern int nfs_attribute_timeout(struct inode *inode);
358extern int nfs_attribute_cache_expired(struct inode *inode); 357extern int nfs_attribute_cache_expired(struct inode *inode);
359extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); 358extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
@@ -371,6 +370,7 @@ extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struc
371extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode); 370extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
372extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); 371extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
373extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); 372extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
373extern void nfs_file_clear_open_context(struct file *flip);
374extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); 374extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx);
375extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); 375extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
376extern u64 nfs_compat_user_ino64(u64 fileid); 376extern u64 nfs_compat_user_ino64(u64 fileid);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 20bc8e51b161..570a7df2775b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -173,6 +173,11 @@ struct nfs_server {
173 set of attributes supported 173 set of attributes supported
174 on this filesystem excluding 174 on this filesystem excluding
175 the label support bit. */ 175 the label support bit. */
176 u32 exclcreat_bitmask[3];
177 /* V4 bitmask representing the
178 set of attributes supported
179 on this filesystem for the
180 exclusive create. */
176 u32 cache_consistency_bitmask[3]; 181 u32 cache_consistency_bitmask[3];
177 /* V4 bitmask representing the subset 182 /* V4 bitmask representing the subset
178 of change attribute, size, ctime 183 of change attribute, size, ctime
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7bbe50504211..52faf7e96c65 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -379,7 +379,7 @@ struct nfs_openargs {
379 struct stateowner_id id; 379 struct stateowner_id id;
380 union { 380 union {
381 struct { 381 struct {
382 struct iattr * attrs; /* UNCHECKED, GUARDED */ 382 struct iattr * attrs; /* UNCHECKED, GUARDED, EXCLUSIVE4_1 */
383 nfs4_verifier verifier; /* EXCLUSIVE */ 383 nfs4_verifier verifier; /* EXCLUSIVE */
384 }; 384 };
385 nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ 385 nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
@@ -389,7 +389,7 @@ struct nfs_openargs {
389 const struct nfs_server *server; /* Needed for ID mapping */ 389 const struct nfs_server *server; /* Needed for ID mapping */
390 const u32 * bitmask; 390 const u32 * bitmask;
391 const u32 * open_bitmap; 391 const u32 * open_bitmap;
392 __u32 claim; 392 enum open_claim_type4 claim;
393 enum createmode4 createmode; 393 enum createmode4 createmode;
394 const struct nfs4_label *label; 394 const struct nfs4_label *label;
395}; 395};
@@ -406,8 +406,8 @@ struct nfs_openres {
406 const struct nfs_server *server; 406 const struct nfs_server *server;
407 fmode_t delegation_type; 407 fmode_t delegation_type;
408 nfs4_stateid delegation; 408 nfs4_stateid delegation;
409 unsigned long pagemod_limit;
409 __u32 do_recall; 410 __u32 do_recall;
410 __u64 maxsize;
411 __u32 attrset[NFS4_BITMAP_SIZE]; 411 __u32 attrset[NFS4_BITMAP_SIZE];
412 struct nfs4_string *owner; 412 struct nfs4_string *owner;
413 struct nfs4_string *group_owner; 413 struct nfs4_string *group_owner;
@@ -1057,11 +1057,13 @@ struct nfs4_statfs_res {
1057struct nfs4_server_caps_arg { 1057struct nfs4_server_caps_arg {
1058 struct nfs4_sequence_args seq_args; 1058 struct nfs4_sequence_args seq_args;
1059 struct nfs_fh *fhandle; 1059 struct nfs_fh *fhandle;
1060 const u32 * bitmask;
1060}; 1061};
1061 1062
1062struct nfs4_server_caps_res { 1063struct nfs4_server_caps_res {
1063 struct nfs4_sequence_res seq_res; 1064 struct nfs4_sequence_res seq_res;
1064 u32 attr_bitmask[3]; 1065 u32 attr_bitmask[3];
1066 u32 exclcreat_bitmask[3];
1065 u32 acl_bitmask; 1067 u32 acl_bitmask;
1066 u32 has_links; 1068 u32 has_links;
1067 u32 has_symlinks; 1069 u32 has_symlinks;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index f94da0e65dea..78488e099ce7 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -27,9 +27,7 @@ static inline void touch_nmi_watchdog(void)
27#if defined(CONFIG_HARDLOCKUP_DETECTOR) 27#if defined(CONFIG_HARDLOCKUP_DETECTOR)
28extern void hardlockup_detector_disable(void); 28extern void hardlockup_detector_disable(void);
29#else 29#else
30static inline void hardlockup_detector_disable(void) 30static inline void hardlockup_detector_disable(void) {}
31{
32}
33#endif 31#endif
34 32
35/* 33/*
@@ -49,6 +47,12 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
49 arch_trigger_all_cpu_backtrace(false); 47 arch_trigger_all_cpu_backtrace(false);
50 return true; 48 return true;
51} 49}
50
51/* generic implementation */
52void nmi_trigger_all_cpu_backtrace(bool include_self,
53 void (*raise)(cpumask_t *mask));
54bool nmi_cpu_backtrace(struct pt_regs *regs);
55
52#else 56#else
53static inline bool trigger_all_cpu_backtrace(void) 57static inline bool trigger_all_cpu_backtrace(void)
54{ 58{
@@ -80,6 +84,17 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
80 void __user *, size_t *, loff_t *); 84 void __user *, size_t *, loff_t *);
81extern int proc_watchdog_cpumask(struct ctl_table *, int, 85extern int proc_watchdog_cpumask(struct ctl_table *, int,
82 void __user *, size_t *, loff_t *); 86 void __user *, size_t *, loff_t *);
87extern int lockup_detector_suspend(void);
88extern void lockup_detector_resume(void);
89#else
90static inline int lockup_detector_suspend(void)
91{
92 return 0;
93}
94
95static inline void lockup_detector_resume(void)
96{
97}
83#endif 98#endif
84 99
85#ifdef CONFIG_HAVE_ACPI_APEI_NMI 100#ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c0d94ed8ce9a..b5812c395351 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -28,18 +28,32 @@ struct nvme_bar {
28 __u32 cc; /* Controller Configuration */ 28 __u32 cc; /* Controller Configuration */
29 __u32 rsvd1; /* Reserved */ 29 __u32 rsvd1; /* Reserved */
30 __u32 csts; /* Controller Status */ 30 __u32 csts; /* Controller Status */
31 __u32 rsvd2; /* Reserved */ 31 __u32 nssr; /* Subsystem Reset */
32 __u32 aqa; /* Admin Queue Attributes */ 32 __u32 aqa; /* Admin Queue Attributes */
33 __u64 asq; /* Admin SQ Base Address */ 33 __u64 asq; /* Admin SQ Base Address */
34 __u64 acq; /* Admin CQ Base Address */ 34 __u64 acq; /* Admin CQ Base Address */
35 __u32 cmbloc; /* Controller Memory Buffer Location */
36 __u32 cmbsz; /* Controller Memory Buffer Size */
35}; 37};
36 38
37#define NVME_CAP_MQES(cap) ((cap) & 0xffff) 39#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
38#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) 40#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
39#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) 41#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
42#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
40#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) 43#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
41#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) 44#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
42 45
46#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
47#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
48#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
49#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
50
51#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
52#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
53#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
54#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
55#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
56
43enum { 57enum {
44 NVME_CC_ENABLE = 1 << 0, 58 NVME_CC_ENABLE = 1 << 0,
45 NVME_CC_CSS_NVM = 0 << 4, 59 NVME_CC_CSS_NVM = 0 << 4,
@@ -55,6 +69,7 @@ enum {
55 NVME_CC_IOCQES = 4 << 20, 69 NVME_CC_IOCQES = 4 << 20,
56 NVME_CSTS_RDY = 1 << 0, 70 NVME_CSTS_RDY = 1 << 0,
57 NVME_CSTS_CFS = 1 << 1, 71 NVME_CSTS_CFS = 1 << 1,
72 NVME_CSTS_NSSRO = 1 << 4,
58 NVME_CSTS_SHST_NORMAL = 0 << 2, 73 NVME_CSTS_SHST_NORMAL = 0 << 2,
59 NVME_CSTS_SHST_OCCUR = 1 << 2, 74 NVME_CSTS_SHST_OCCUR = 1 << 2,
60 NVME_CSTS_SHST_CMPLT = 2 << 2, 75 NVME_CSTS_SHST_CMPLT = 2 << 2,
@@ -97,9 +112,14 @@ struct nvme_dev {
97 char serial[20]; 112 char serial[20];
98 char model[40]; 113 char model[40];
99 char firmware_rev[8]; 114 char firmware_rev[8];
115 bool subsystem;
100 u32 max_hw_sectors; 116 u32 max_hw_sectors;
101 u32 stripe_size; 117 u32 stripe_size;
102 u32 page_size; 118 u32 page_size;
119 void __iomem *cmb;
120 dma_addr_t cmb_dma_addr;
121 u64 cmb_size;
122 u32 cmbsz;
103 u16 oncs; 123 u16 oncs;
104 u16 abort_limit; 124 u16 abort_limit;
105 u8 event_limit; 125 u8 event_limit;
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
new file mode 100644
index 000000000000..9bb77d3ed6e0
--- /dev/null
+++ b/include/linux/nvmem-consumer.h
@@ -0,0 +1,157 @@
1/*
2 * nvmem framework consumer.
3 *
4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#ifndef _LINUX_NVMEM_CONSUMER_H
13#define _LINUX_NVMEM_CONSUMER_H
14
15struct device;
16struct device_node;
17/* consumer cookie */
18struct nvmem_cell;
19struct nvmem_device;
20
21struct nvmem_cell_info {
22 const char *name;
23 unsigned int offset;
24 unsigned int bytes;
25 unsigned int bit_offset;
26 unsigned int nbits;
27};
28
29#if IS_ENABLED(CONFIG_NVMEM)
30
31/* Cell based interface */
32struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
33struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
34void nvmem_cell_put(struct nvmem_cell *cell);
35void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
36void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
37int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
38
39/* direct nvmem device read/write interface */
40struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
41struct nvmem_device *devm_nvmem_device_get(struct device *dev,
42 const char *name);
43void nvmem_device_put(struct nvmem_device *nvmem);
44void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem);
45int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset,
46 size_t bytes, void *buf);
47int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset,
48 size_t bytes, void *buf);
49ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
50 struct nvmem_cell_info *info, void *buf);
51int nvmem_device_cell_write(struct nvmem_device *nvmem,
52 struct nvmem_cell_info *info, void *buf);
53
54#else
55
56static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
57 const char *name)
58{
59 return ERR_PTR(-ENOSYS);
60}
61
62static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
63 const char *name)
64{
65 return ERR_PTR(-ENOSYS);
66}
67
68static inline void devm_nvmem_cell_put(struct device *dev,
69 struct nvmem_cell *cell)
70{
71
72}
73static inline void nvmem_cell_put(struct nvmem_cell *cell)
74{
75}
76
77static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
78{
79 return ERR_PTR(-ENOSYS);
80}
81
82static inline int nvmem_cell_write(struct nvmem_cell *cell,
83 const char *buf, size_t len)
84{
85 return -ENOSYS;
86}
87
88static inline struct nvmem_device *nvmem_device_get(struct device *dev,
89 const char *name)
90{
91 return ERR_PTR(-ENOSYS);
92}
93
94static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
95 const char *name)
96{
97 return ERR_PTR(-ENOSYS);
98}
99
100static inline void nvmem_device_put(struct nvmem_device *nvmem)
101{
102}
103
104static inline void devm_nvmem_device_put(struct device *dev,
105 struct nvmem_device *nvmem)
106{
107}
108
109static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
110 struct nvmem_cell_info *info,
111 void *buf)
112{
113 return -ENOSYS;
114}
115
116static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
117 struct nvmem_cell_info *info,
118 void *buf)
119{
120 return -ENOSYS;
121}
122
123static inline int nvmem_device_read(struct nvmem_device *nvmem,
124 unsigned int offset, size_t bytes,
125 void *buf)
126{
127 return -ENOSYS;
128}
129
130static inline int nvmem_device_write(struct nvmem_device *nvmem,
131 unsigned int offset, size_t bytes,
132 void *buf)
133{
134 return -ENOSYS;
135}
136#endif /* CONFIG_NVMEM */
137
138#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
139struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
140 const char *name);
141struct nvmem_device *of_nvmem_device_get(struct device_node *np,
142 const char *name);
143#else
144static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
145 const char *name)
146{
147 return ERR_PTR(-ENOSYS);
148}
149
150static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
151 const char *name)
152{
153 return ERR_PTR(-ENOSYS);
154}
155#endif /* CONFIG_NVMEM && CONFIG_OF */
156
157#endif /* ifndef _LINUX_NVMEM_CONSUMER_H */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
new file mode 100644
index 000000000000..0b68caff1b3c
--- /dev/null
+++ b/include/linux/nvmem-provider.h
@@ -0,0 +1,47 @@
1/*
2 * nvmem framework provider.
3 *
4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#ifndef _LINUX_NVMEM_PROVIDER_H
13#define _LINUX_NVMEM_PROVIDER_H
14
15struct nvmem_device;
16struct nvmem_cell_info;
17
18struct nvmem_config {
19 struct device *dev;
20 const char *name;
21 int id;
22 struct module *owner;
23 const struct nvmem_cell_info *cells;
24 int ncells;
25 bool read_only;
26};
27
28#if IS_ENABLED(CONFIG_NVMEM)
29
30struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
31int nvmem_unregister(struct nvmem_device *nvmem);
32
33#else
34
35static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
36{
37 return ERR_PTR(-ENOSYS);
38}
39
40static inline int nvmem_unregister(struct nvmem_device *nvmem)
41{
42 return -ENOSYS;
43}
44
45#endif /* CONFIG_NVMEM */
46
47#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index edc068d19c79..2194b8ca41f9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -136,7 +136,8 @@ static inline bool is_of_node(struct fwnode_handle *fwnode)
136 136
137static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) 137static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
138{ 138{
139 return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; 139 return is_of_node(fwnode) ?
140 container_of(fwnode, struct device_node, fwnode) : NULL;
140} 141}
141 142
142static inline bool of_have_populated_dt(void) 143static inline bool of_have_populated_dt(void)
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 69dbe312b11b..f3191828f037 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -54,7 +54,7 @@ extern int of_mm_gpiochip_add(struct device_node *np,
54 struct of_mm_gpio_chip *mm_gc); 54 struct of_mm_gpio_chip *mm_gc);
55extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); 55extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
56 56
57extern void of_gpiochip_add(struct gpio_chip *gc); 57extern int of_gpiochip_add(struct gpio_chip *gc);
58extern void of_gpiochip_remove(struct gpio_chip *gc); 58extern void of_gpiochip_remove(struct gpio_chip *gc);
59extern int of_gpio_simple_xlate(struct gpio_chip *gc, 59extern int of_gpio_simple_xlate(struct gpio_chip *gc,
60 const struct of_phandle_args *gpiospec, 60 const struct of_phandle_args *gpiospec,
@@ -76,7 +76,7 @@ static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
76 return -ENOSYS; 76 return -ENOSYS;
77} 77}
78 78
79static inline void of_gpiochip_add(struct gpio_chip *gc) { } 79static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
80static inline void of_gpiochip_remove(struct gpio_chip *gc) { } 80static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
81 81
82#endif /* CONFIG_OF_GPIO */ 82#endif /* CONFIG_OF_GPIO */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index d884929a7747..4bcbd586a672 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -74,6 +74,7 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
74 */ 74 */
75extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); 75extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
76extern struct device_node *of_irq_find_parent(struct device_node *child); 76extern struct device_node *of_irq_find_parent(struct device_node *child);
77extern void of_msi_configure(struct device *dev, struct device_node *np);
77 78
78#else /* !CONFIG_OF */ 79#else /* !CONFIG_OF */
79static inline unsigned int irq_of_parse_and_map(struct device_node *dev, 80static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 611a691145c4..956a1006aefc 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -72,6 +72,9 @@ extern int of_platform_populate(struct device_node *root,
72 const struct of_device_id *matches, 72 const struct of_device_id *matches,
73 const struct of_dev_auxdata *lookup, 73 const struct of_dev_auxdata *lookup,
74 struct device *parent); 74 struct device *parent);
75extern int of_platform_default_populate(struct device_node *root,
76 const struct of_dev_auxdata *lookup,
77 struct device *parent);
75extern void of_platform_depopulate(struct device *parent); 78extern void of_platform_depopulate(struct device *parent);
76#else 79#else
77static inline int of_platform_populate(struct device_node *root, 80static inline int of_platform_populate(struct device_node *root,
@@ -81,6 +84,12 @@ static inline int of_platform_populate(struct device_node *root,
81{ 84{
82 return -ENODEV; 85 return -ENODEV;
83} 86}
87static inline int of_platform_default_populate(struct device_node *root,
88 const struct of_dev_auxdata *lookup,
89 struct device *parent)
90{
91 return -ENODEV;
92}
84static inline void of_platform_depopulate(struct device *parent) { } 93static inline void of_platform_depopulate(struct device *parent) { }
85#endif 94#endif
86 95
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index c2bbf672b84e..d2fa9ca42e9a 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -41,7 +41,7 @@ enum OID {
41 OID_signed_data, /* 1.2.840.113549.1.7.2 */ 41 OID_signed_data, /* 1.2.840.113549.1.7.2 */
42 /* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */ 42 /* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */
43 OID_email_address, /* 1.2.840.113549.1.9.1 */ 43 OID_email_address, /* 1.2.840.113549.1.9.1 */
44 OID_content_type, /* 1.2.840.113549.1.9.3 */ 44 OID_contentType, /* 1.2.840.113549.1.9.3 */
45 OID_messageDigest, /* 1.2.840.113549.1.9.4 */ 45 OID_messageDigest, /* 1.2.840.113549.1.9.4 */
46 OID_signingTime, /* 1.2.840.113549.1.9.5 */ 46 OID_signingTime, /* 1.2.840.113549.1.9.5 */
47 OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */ 47 OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */
@@ -54,6 +54,8 @@ enum OID {
54 54
55 /* Microsoft Authenticode & Software Publishing */ 55 /* Microsoft Authenticode & Software Publishing */
56 OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */ 56 OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
57 OID_msStatementType, /* 1.3.6.1.4.1.311.2.1.11 */
58 OID_msSpOpusInfo, /* 1.3.6.1.4.1.311.2.1.12 */
57 OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */ 59 OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */
58 OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */ 60 OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
59 OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ 61 OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
@@ -61,6 +63,9 @@ enum OID {
61 OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ 63 OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
62 OID_sha1, /* 1.3.14.3.2.26 */ 64 OID_sha1, /* 1.3.14.3.2.26 */
63 OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ 65 OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
66 OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
67 OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
68 OID_sha224, /* 2.16.840.1.101.3.4.2.4 */
64 69
65 /* Distinguished Name attribute IDs [RFC 2256] */ 70 /* Distinguished Name attribute IDs [RFC 2256] */
66 OID_commonName, /* 2.5.4.3 */ 71 OID_commonName, /* 2.5.4.3 */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 7deecb7bca5e..03e6257321f0 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -13,6 +13,27 @@ struct mem_cgroup;
13struct task_struct; 13struct task_struct;
14 14
15/* 15/*
16 * Details of the page allocation that triggered the oom killer that are used to
17 * determine what should be killed.
18 */
19struct oom_control {
20 /* Used to determine cpuset */
21 struct zonelist *zonelist;
22
23 /* Used to determine mempolicy */
24 nodemask_t *nodemask;
25
26 /* Used to determine cpuset and node locality requirement */
27 const gfp_t gfp_mask;
28
29 /*
30 * order == -1 means the oom kill is required by sysrq, otherwise only
31 * for display purposes.
32 */
33 const int order;
34};
35
36/*
16 * Types of limitations to the nodes from which allocations may occur 37 * Types of limitations to the nodes from which allocations may occur
17 */ 38 */
18enum oom_constraint { 39enum oom_constraint {
@@ -57,21 +78,18 @@ extern unsigned long oom_badness(struct task_struct *p,
57 78
58extern int oom_kills_count(void); 79extern int oom_kills_count(void);
59extern void note_oom_kill(void); 80extern void note_oom_kill(void);
60extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 81extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
61 unsigned int points, unsigned long totalpages, 82 unsigned int points, unsigned long totalpages,
62 struct mem_cgroup *memcg, nodemask_t *nodemask, 83 struct mem_cgroup *memcg, const char *message);
63 const char *message);
64 84
65extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, 85extern void check_panic_on_oom(struct oom_control *oc,
66 int order, const nodemask_t *nodemask, 86 enum oom_constraint constraint,
67 struct mem_cgroup *memcg); 87 struct mem_cgroup *memcg);
68 88
69extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task, 89extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
70 unsigned long totalpages, const nodemask_t *nodemask, 90 struct task_struct *task, unsigned long totalpages);
71 bool force_kill);
72 91
73extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 92extern bool out_of_memory(struct oom_control *oc);
74 int order, nodemask_t *mask, bool force_kill);
75 93
76extern void exit_oom_victim(void); 94extern void exit_oom_victim(void);
77 95
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 41c93844fb1d..416509e26d6d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -109,6 +109,10 @@ enum pageflags {
109#ifdef CONFIG_TRANSPARENT_HUGEPAGE 109#ifdef CONFIG_TRANSPARENT_HUGEPAGE
110 PG_compound_lock, 110 PG_compound_lock,
111#endif 111#endif
112#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
113 PG_young,
114 PG_idle,
115#endif
112 __NR_PAGEFLAGS, 116 __NR_PAGEFLAGS,
113 117
114 /* Filesystems */ 118 /* Filesystems */
@@ -289,6 +293,13 @@ PAGEFLAG_FALSE(HWPoison)
289#define __PG_HWPOISON 0 293#define __PG_HWPOISON 0
290#endif 294#endif
291 295
296#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
297TESTPAGEFLAG(Young, young)
298SETPAGEFLAG(Young, young)
299TESTCLEARFLAG(Young, young)
300PAGEFLAG(Idle, idle)
301#endif
302
292/* 303/*
293 * On an anonymous page mapped into a user virtual memory area, 304 * On an anonymous page mapped into a user virtual memory area,
294 * page->mapping points to its anon_vma, not to a struct address_space; 305 * page->mapping points to its anon_vma, not to a struct address_space;
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 2dc1e1697b45..047d64706f2a 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -65,11 +65,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
65int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, 65int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
66 bool skip_hwpoisoned_pages); 66 bool skip_hwpoisoned_pages);
67 67
68/*
69 * Internal functions. Changes pageblock's migrate type.
70 */
71int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages);
72void unset_migratetype_isolate(struct page *page, unsigned migratetype);
73struct page *alloc_migrate_target(struct page *page, unsigned long private, 68struct page *alloc_migrate_target(struct page *page, unsigned long private,
74 int **resultp); 69 int **resultp);
75 70
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index c42981cd99aa..17f118a82854 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -26,6 +26,10 @@ enum page_ext_flags {
26 PAGE_EXT_DEBUG_POISON, /* Page is poisoned */ 26 PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
27 PAGE_EXT_DEBUG_GUARD, 27 PAGE_EXT_DEBUG_GUARD,
28 PAGE_EXT_OWNER, 28 PAGE_EXT_OWNER,
29#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
30 PAGE_EXT_YOUNG,
31 PAGE_EXT_IDLE,
32#endif
29}; 33};
30 34
31/* 35/*
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
new file mode 100644
index 000000000000..bf268fa92c5b
--- /dev/null
+++ b/include/linux/page_idle.h
@@ -0,0 +1,110 @@
1#ifndef _LINUX_MM_PAGE_IDLE_H
2#define _LINUX_MM_PAGE_IDLE_H
3
4#include <linux/bitops.h>
5#include <linux/page-flags.h>
6#include <linux/page_ext.h>
7
8#ifdef CONFIG_IDLE_PAGE_TRACKING
9
10#ifdef CONFIG_64BIT
11static inline bool page_is_young(struct page *page)
12{
13 return PageYoung(page);
14}
15
16static inline void set_page_young(struct page *page)
17{
18 SetPageYoung(page);
19}
20
21static inline bool test_and_clear_page_young(struct page *page)
22{
23 return TestClearPageYoung(page);
24}
25
26static inline bool page_is_idle(struct page *page)
27{
28 return PageIdle(page);
29}
30
31static inline void set_page_idle(struct page *page)
32{
33 SetPageIdle(page);
34}
35
36static inline void clear_page_idle(struct page *page)
37{
38 ClearPageIdle(page);
39}
40#else /* !CONFIG_64BIT */
41/*
42 * If there is not enough space to store Idle and Young bits in page flags, use
43 * page ext flags instead.
44 */
45extern struct page_ext_operations page_idle_ops;
46
47static inline bool page_is_young(struct page *page)
48{
49 return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
50}
51
52static inline void set_page_young(struct page *page)
53{
54 set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
55}
56
57static inline bool test_and_clear_page_young(struct page *page)
58{
59 return test_and_clear_bit(PAGE_EXT_YOUNG,
60 &lookup_page_ext(page)->flags);
61}
62
63static inline bool page_is_idle(struct page *page)
64{
65 return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
66}
67
68static inline void set_page_idle(struct page *page)
69{
70 set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
71}
72
73static inline void clear_page_idle(struct page *page)
74{
75 clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
76}
77#endif /* CONFIG_64BIT */
78
79#else /* !CONFIG_IDLE_PAGE_TRACKING */
80
81static inline bool page_is_young(struct page *page)
82{
83 return false;
84}
85
86static inline void set_page_young(struct page *page)
87{
88}
89
90static inline bool test_and_clear_page_young(struct page *page)
91{
92 return false;
93}
94
95static inline bool page_is_idle(struct page *page)
96{
97 return false;
98}
99
100static inline void set_page_idle(struct page *page)
101{
102}
103
104static inline void clear_page_idle(struct page *page)
105{
106}
107
108#endif /* CONFIG_IDLE_PAGE_TRACKING */
109
110#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 72031785fe1d..57e0b8250947 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -3,55 +3,6 @@
3 3
4#include <linux/pci.h> 4#include <linux/pci.h>
5 5
6/* Address Translation Service */
7struct pci_ats {
8 int pos; /* capability position */
9 int stu; /* Smallest Translation Unit */
10 int qdep; /* Invalidate Queue Depth */
11 int ref_cnt; /* Physical Function reference count */
12 unsigned int is_enabled:1; /* Enable bit is set */
13};
14
15#ifdef CONFIG_PCI_ATS
16
17int pci_enable_ats(struct pci_dev *dev, int ps);
18void pci_disable_ats(struct pci_dev *dev);
19int pci_ats_queue_depth(struct pci_dev *dev);
20
21/**
22 * pci_ats_enabled - query the ATS status
23 * @dev: the PCI device
24 *
25 * Returns 1 if ATS capability is enabled, or 0 if not.
26 */
27static inline int pci_ats_enabled(struct pci_dev *dev)
28{
29 return dev->ats && dev->ats->is_enabled;
30}
31
32#else /* CONFIG_PCI_ATS */
33
34static inline int pci_enable_ats(struct pci_dev *dev, int ps)
35{
36 return -ENODEV;
37}
38
39static inline void pci_disable_ats(struct pci_dev *dev)
40{
41}
42
43static inline int pci_ats_queue_depth(struct pci_dev *dev)
44{
45 return -ENODEV;
46}
47
48static inline int pci_ats_enabled(struct pci_dev *dev)
49{
50 return 0;
51}
52
53#endif /* CONFIG_PCI_ATS */
54
55#ifdef CONFIG_PCI_PRI 6#ifdef CONFIG_PCI_PRI
56 7
57int pci_enable_pri(struct pci_dev *pdev, u32 reqs); 8int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 860c751810fc..e90eb22de628 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -180,6 +180,8 @@ enum pci_dev_flags {
180 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6), 180 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
181 /* Do not use PM reset even if device advertises NoSoftRst- */ 181 /* Do not use PM reset even if device advertises NoSoftRst- */
182 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), 182 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
183 /* Get VPD from function 0 VPD */
184 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
183}; 185};
184 186
185enum pci_irq_reroute_variant { 187enum pci_irq_reroute_variant {
@@ -343,6 +345,7 @@ struct pci_dev {
343 unsigned int msi_enabled:1; 345 unsigned int msi_enabled:1;
344 unsigned int msix_enabled:1; 346 unsigned int msix_enabled:1;
345 unsigned int ari_enabled:1; /* ARI forwarding */ 347 unsigned int ari_enabled:1; /* ARI forwarding */
348 unsigned int ats_enabled:1; /* Address Translation Service */
346 unsigned int is_managed:1; 349 unsigned int is_managed:1;
347 unsigned int needs_freset:1; /* Dev requires fundamental reset */ 350 unsigned int needs_freset:1; /* Dev requires fundamental reset */
348 unsigned int state_saved:1; 351 unsigned int state_saved:1;
@@ -366,7 +369,6 @@ struct pci_dev {
366 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 369 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
367 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 370 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
368#ifdef CONFIG_PCI_MSI 371#ifdef CONFIG_PCI_MSI
369 struct list_head msi_list;
370 const struct attribute_group **msi_irq_groups; 372 const struct attribute_group **msi_irq_groups;
371#endif 373#endif
372 struct pci_vpd *vpd; 374 struct pci_vpd *vpd;
@@ -375,7 +377,9 @@ struct pci_dev {
375 struct pci_sriov *sriov; /* SR-IOV capability related */ 377 struct pci_sriov *sriov; /* SR-IOV capability related */
376 struct pci_dev *physfn; /* the PF this VF is associated with */ 378 struct pci_dev *physfn; /* the PF this VF is associated with */
377 }; 379 };
378 struct pci_ats *ats; /* Address Translation Service */ 380 u16 ats_cap; /* ATS Capability offset */
381 u8 ats_stu; /* ATS Smallest Translation Unit */
382 atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
379#endif 383#endif
380 phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ 384 phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
381 size_t romlen; /* Length of ROM if it's not from the BAR */ 385 size_t romlen; /* Length of ROM if it's not from the BAR */
@@ -446,7 +450,8 @@ struct pci_bus {
446 struct list_head children; /* list of child buses */ 450 struct list_head children; /* list of child buses */
447 struct list_head devices; /* list of devices on this bus */ 451 struct list_head devices; /* list of devices on this bus */
448 struct pci_dev *self; /* bridge device as seen by parent */ 452 struct pci_dev *self; /* bridge device as seen by parent */
449 struct list_head slots; /* list of slots on this bus */ 453 struct list_head slots; /* list of slots on this bus;
454 protected by pci_slot_mutex */
450 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; 455 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
451 struct list_head resources; /* address space routed to this bus */ 456 struct list_head resources; /* address space routed to this bus */
452 struct resource busn_res; /* bus numbers routed to this bus */ 457 struct resource busn_res; /* bus numbers routed to this bus */
@@ -738,10 +743,11 @@ struct pci_driver {
738void pcie_bus_configure_settings(struct pci_bus *bus); 743void pcie_bus_configure_settings(struct pci_bus *bus);
739 744
740enum pcie_bus_config_types { 745enum pcie_bus_config_types {
741 PCIE_BUS_TUNE_OFF, 746 PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */
742 PCIE_BUS_SAFE, 747 PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */
743 PCIE_BUS_PERFORMANCE, 748 PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */
744 PCIE_BUS_PEER2PEER, 749 PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */
750 PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */
745}; 751};
746 752
747extern enum pcie_bus_config_types pcie_bus_config; 753extern enum pcie_bus_config_types pcie_bus_config;
@@ -787,6 +793,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
787int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); 793int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
788int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); 794int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
789void pci_bus_release_busn_res(struct pci_bus *b); 795void pci_bus_release_busn_res(struct pci_bus *b);
796struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
797 struct pci_ops *ops, void *sysdata,
798 struct list_head *resources,
799 struct msi_controller *msi);
790struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 800struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
791 struct pci_ops *ops, void *sysdata, 801 struct pci_ops *ops, void *sysdata,
792 struct list_head *resources); 802 struct list_head *resources);
@@ -797,6 +807,11 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
797 const char *name, 807 const char *name,
798 struct hotplug_slot *hotplug); 808 struct hotplug_slot *hotplug);
799void pci_destroy_slot(struct pci_slot *slot); 809void pci_destroy_slot(struct pci_slot *slot);
810#ifdef CONFIG_SYSFS
811void pci_dev_assign_slot(struct pci_dev *dev);
812#else
813static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
814#endif
800int pci_scan_slot(struct pci_bus *bus, int devfn); 815int pci_scan_slot(struct pci_bus *bus, int devfn);
801struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); 816struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
802void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); 817void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -963,6 +978,23 @@ static inline int pci_is_managed(struct pci_dev *pdev)
963 return pdev->is_managed; 978 return pdev->is_managed;
964} 979}
965 980
981static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
982{
983 pdev->irq = irq;
984 pdev->irq_managed = 1;
985}
986
987static inline void pci_reset_managed_irq(struct pci_dev *pdev)
988{
989 pdev->irq = 0;
990 pdev->irq_managed = 0;
991}
992
993static inline bool pci_has_managed_irq(struct pci_dev *pdev)
994{
995 return pdev->irq_managed && pdev->irq > 0;
996}
997
966void pci_disable_device(struct pci_dev *dev); 998void pci_disable_device(struct pci_dev *dev);
967 999
968extern unsigned int pcibios_max_latency; 1000extern unsigned int pcibios_max_latency;
@@ -1195,6 +1227,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1195 dma_pool_create(name, &pdev->dev, size, align, allocation) 1227 dma_pool_create(name, &pdev->dev, size, align, allocation)
1196#define pci_pool_destroy(pool) dma_pool_destroy(pool) 1228#define pci_pool_destroy(pool) dma_pool_destroy(pool)
1197#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) 1229#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
1230#define pci_pool_zalloc(pool, flags, handle) \
1231 dma_pool_zalloc(pool, flags, handle)
1198#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) 1232#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
1199 1233
1200struct msix_entry { 1234struct msix_entry {
@@ -1295,6 +1329,19 @@ int ht_create_irq(struct pci_dev *dev, int idx);
1295void ht_destroy_irq(unsigned int irq); 1329void ht_destroy_irq(unsigned int irq);
1296#endif /* CONFIG_HT_IRQ */ 1330#endif /* CONFIG_HT_IRQ */
1297 1331
1332#ifdef CONFIG_PCI_ATS
1333/* Address Translation Service */
1334void pci_ats_init(struct pci_dev *dev);
1335int pci_enable_ats(struct pci_dev *dev, int ps);
1336void pci_disable_ats(struct pci_dev *dev);
1337int pci_ats_queue_depth(struct pci_dev *dev);
1338#else
1339static inline void pci_ats_init(struct pci_dev *d) { }
1340static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
1341static inline void pci_disable_ats(struct pci_dev *d) { }
1342static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
1343#endif
1344
1298void pci_cfg_access_lock(struct pci_dev *dev); 1345void pci_cfg_access_lock(struct pci_dev *dev);
1299bool pci_cfg_access_trylock(struct pci_dev *dev); 1346bool pci_cfg_access_trylock(struct pci_dev *dev);
1300void pci_cfg_access_unlock(struct pci_dev *dev); 1347void pci_cfg_access_unlock(struct pci_dev *dev);
@@ -1646,6 +1693,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
1646int pcibios_add_device(struct pci_dev *dev); 1693int pcibios_add_device(struct pci_dev *dev);
1647void pcibios_release_device(struct pci_dev *dev); 1694void pcibios_release_device(struct pci_dev *dev);
1648void pcibios_penalize_isa_irq(int irq, int active); 1695void pcibios_penalize_isa_irq(int irq, int active);
1696int pcibios_alloc_irq(struct pci_dev *dev);
1697void pcibios_free_irq(struct pci_dev *dev);
1649 1698
1650#ifdef CONFIG_HIBERNATE_CALLBACKS 1699#ifdef CONFIG_HIBERNATE_CALLBACKS
1651extern struct dev_pm_ops pcibios_pm_ops; 1700extern struct dev_pm_ops pcibios_pm_ops;
@@ -1662,6 +1711,7 @@ static inline void pci_mmcfg_late_init(void) { }
1662int pci_ext_cfg_avail(void); 1711int pci_ext_cfg_avail(void);
1663 1712
1664void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); 1713void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
1714void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
1665 1715
1666#ifdef CONFIG_PCI_IOV 1716#ifdef CONFIG_PCI_IOV
1667int pci_iov_virtfn_bus(struct pci_dev *dev, int id); 1717int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
@@ -1843,10 +1893,12 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
1843/* PCI <-> OF binding helpers */ 1893/* PCI <-> OF binding helpers */
1844#ifdef CONFIG_OF 1894#ifdef CONFIG_OF
1845struct device_node; 1895struct device_node;
1896struct irq_domain;
1846void pci_set_of_node(struct pci_dev *dev); 1897void pci_set_of_node(struct pci_dev *dev);
1847void pci_release_of_node(struct pci_dev *dev); 1898void pci_release_of_node(struct pci_dev *dev);
1848void pci_set_bus_of_node(struct pci_bus *bus); 1899void pci_set_bus_of_node(struct pci_bus *bus);
1849void pci_release_bus_of_node(struct pci_bus *bus); 1900void pci_release_bus_of_node(struct pci_bus *bus);
1901struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
1850 1902
1851/* Arch may override this (weak) */ 1903/* Arch may override this (weak) */
1852struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); 1904struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -1869,6 +1921,8 @@ static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
1869static inline void pci_release_bus_of_node(struct pci_bus *bus) { } 1921static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
1870static inline struct device_node * 1922static inline struct device_node *
1871pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } 1923pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
1924static inline struct irq_domain *
1925pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
1872#endif /* CONFIG_OF */ 1926#endif /* CONFIG_OF */
1873 1927
1874#ifdef CONFIG_EEH 1928#ifdef CONFIG_EEH
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fcff8f865341..d9ba49cedc5d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2332,6 +2332,15 @@
2332 2332
2333#define PCI_VENDOR_ID_CAVIUM 0x177d 2333#define PCI_VENDOR_ID_CAVIUM 0x177d
2334 2334
2335#define PCI_VENDOR_ID_TECHWELL 0x1797
2336#define PCI_DEVICE_ID_TECHWELL_6800 0x6800
2337#define PCI_DEVICE_ID_TECHWELL_6801 0x6801
2338#define PCI_DEVICE_ID_TECHWELL_6804 0x6804
2339#define PCI_DEVICE_ID_TECHWELL_6816_1 0x6810
2340#define PCI_DEVICE_ID_TECHWELL_6816_2 0x6811
2341#define PCI_DEVICE_ID_TECHWELL_6816_3 0x6812
2342#define PCI_DEVICE_ID_TECHWELL_6816_4 0x6813
2343
2335#define PCI_VENDOR_ID_BELKIN 0x1799 2344#define PCI_VENDOR_ID_BELKIN 0x1799
2336#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f 2345#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
2337 2346
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57f3a1c550dc..8f16299ca068 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -488,10 +488,8 @@ do { \
488#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) 488#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
489 489
490/* 490/*
491 * Operations with implied preemption protection. These operations can be 491 * Operations with implied preemption/interrupt protection. These
492 * used without worrying about preemption. Note that interrupts may still 492 * operations can be used without worrying about preemption or interrupt.
493 * occur while an operation is in progress and if the interrupt modifies
494 * the variable too then RMW actions may not be reliable.
495 */ 493 */
496#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) 494#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp)
497#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) 495#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val)
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 3e88c9a7d57f..834c4e52cb2d 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -16,6 +16,7 @@ struct percpu_rw_semaphore {
16}; 16};
17 17
18extern void percpu_down_read(struct percpu_rw_semaphore *); 18extern void percpu_down_read(struct percpu_rw_semaphore *);
19extern int percpu_down_read_trylock(struct percpu_rw_semaphore *);
19extern void percpu_up_read(struct percpu_rw_semaphore *); 20extern void percpu_up_read(struct percpu_rw_semaphore *);
20 21
21extern void percpu_down_write(struct percpu_rw_semaphore *); 22extern void percpu_down_write(struct percpu_rw_semaphore *);
@@ -31,4 +32,23 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
31 __percpu_init_rwsem(brw, #brw, &rwsem_key); \ 32 __percpu_init_rwsem(brw, #brw, &rwsem_key); \
32}) 33})
33 34
35
36#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
37
38static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
39 bool read, unsigned long ip)
40{
41 lock_release(&sem->rw_sem.dep_map, 1, ip);
42#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
43 if (!read)
44 sem->rw_sem.owner = NULL;
45#endif
46}
47
48static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
49 bool read, unsigned long ip)
50{
51 lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
52}
53
34#endif 54#endif
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
new file mode 100644
index 000000000000..bfa673bb822d
--- /dev/null
+++ b/include/linux/perf/arm_pmu.h
@@ -0,0 +1,154 @@
1/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
15#include <linux/interrupt.h>
16#include <linux/perf_event.h>
17
18#include <asm/cputype.h>
19
20/*
21 * struct arm_pmu_platdata - ARM PMU platform data
22 *
23 * @handle_irq: an optional handler which will be called from the
24 * interrupt and passed the address of the low level handler,
25 * and can be used to implement any platform specific handling
26 * before or after calling it.
27 */
28struct arm_pmu_platdata {
29 irqreturn_t (*handle_irq)(int irq, void *dev,
30 irq_handler_t pmu_handler);
31};
32
33#ifdef CONFIG_ARM_PMU
34
35/*
36 * The ARMv7 CPU PMU supports up to 32 event counters.
37 */
38#define ARMPMU_MAX_HWEVENTS 32
39
40#define HW_OP_UNSUPPORTED 0xFFFF
41#define C(_x) PERF_COUNT_HW_CACHE_##_x
42#define CACHE_OP_UNSUPPORTED 0xFFFF
43
44#define PERF_MAP_ALL_UNSUPPORTED \
45 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
46
47#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
48[0 ... C(MAX) - 1] = { \
49 [0 ... C(OP_MAX) - 1] = { \
50 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
51 }, \
52}
53
54/* The events for a given PMU register set. */
55struct pmu_hw_events {
56 /*
57 * The events that are active on the PMU for the given index.
58 */
59 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
60
61 /*
62 * A 1 bit for an index indicates that the counter is being used for
63 * an event. A 0 means that the counter can be used.
64 */
65 DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
66
67 /*
68 * Hardware lock to serialize accesses to PMU registers. Needed for the
69 * read/modify/write sequences.
70 */
71 raw_spinlock_t pmu_lock;
72
73 /*
74 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
75 * already have to allocate this struct per cpu.
76 */
77 struct arm_pmu *percpu_pmu;
78};
79
80struct arm_pmu {
81 struct pmu pmu;
82 cpumask_t active_irqs;
83 cpumask_t supported_cpus;
84 int *irq_affinity;
85 char *name;
86 irqreturn_t (*handle_irq)(int irq_num, void *dev);
87 void (*enable)(struct perf_event *event);
88 void (*disable)(struct perf_event *event);
89 int (*get_event_idx)(struct pmu_hw_events *hw_events,
90 struct perf_event *event);
91 void (*clear_event_idx)(struct pmu_hw_events *hw_events,
92 struct perf_event *event);
93 int (*set_event_filter)(struct hw_perf_event *evt,
94 struct perf_event_attr *attr);
95 u32 (*read_counter)(struct perf_event *event);
96 void (*write_counter)(struct perf_event *event, u32 val);
97 void (*start)(struct arm_pmu *);
98 void (*stop)(struct arm_pmu *);
99 void (*reset)(void *);
100 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
101 void (*free_irq)(struct arm_pmu *);
102 int (*map_event)(struct perf_event *event);
103 int num_events;
104 atomic_t active_events;
105 struct mutex reserve_mutex;
106 u64 max_period;
107 struct platform_device *plat_device;
108 struct pmu_hw_events __percpu *hw_events;
109 struct notifier_block hotplug_nb;
110};
111
112#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
113
114int armpmu_register(struct arm_pmu *armpmu, int type);
115
116u64 armpmu_event_update(struct perf_event *event);
117
118int armpmu_event_set_period(struct perf_event *event);
119
120int armpmu_map_event(struct perf_event *event,
121 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
122 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
123 [PERF_COUNT_HW_CACHE_OP_MAX]
124 [PERF_COUNT_HW_CACHE_RESULT_MAX],
125 u32 raw_event_mask);
126
127struct pmu_probe_info {
128 unsigned int cpuid;
129 unsigned int mask;
130 int (*init)(struct arm_pmu *);
131};
132
133#define PMU_PROBE(_cpuid, _mask, _fn) \
134{ \
135 .cpuid = (_cpuid), \
136 .mask = (_mask), \
137 .init = (_fn), \
138}
139
140#define ARM_PMU_PROBE(_cpuid, _fn) \
141 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
142
143#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
144
145#define XSCALE_PMU_PROBE(_version, _fn) \
146 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
147
148int arm_pmu_device_probe(struct platform_device *pdev,
149 const struct of_device_id *of_table,
150 const struct pmu_probe_info *probe_table);
151
152#endif /* CONFIG_ARM_PMU */
153
154#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2027809433b3..092a0e8a479a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -641,6 +641,8 @@ extern int perf_event_init_task(struct task_struct *child);
641extern void perf_event_exit_task(struct task_struct *child); 641extern void perf_event_exit_task(struct task_struct *child);
642extern void perf_event_free_task(struct task_struct *task); 642extern void perf_event_free_task(struct task_struct *task);
643extern void perf_event_delayed_put(struct task_struct *task); 643extern void perf_event_delayed_put(struct task_struct *task);
644extern struct perf_event *perf_event_get(unsigned int fd);
645extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
644extern void perf_event_print_debug(void); 646extern void perf_event_print_debug(void);
645extern void perf_pmu_disable(struct pmu *pmu); 647extern void perf_pmu_disable(struct pmu *pmu);
646extern void perf_pmu_enable(struct pmu *pmu); 648extern void perf_pmu_enable(struct pmu *pmu);
@@ -659,6 +661,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
659 void *context); 661 void *context);
660extern void perf_pmu_migrate_context(struct pmu *pmu, 662extern void perf_pmu_migrate_context(struct pmu *pmu,
661 int src_cpu, int dst_cpu); 663 int src_cpu, int dst_cpu);
664extern u64 perf_event_read_local(struct perf_event *event);
662extern u64 perf_event_read_value(struct perf_event *event, 665extern u64 perf_event_read_value(struct perf_event *event,
663 u64 *enabled, u64 *running); 666 u64 *enabled, u64 *running);
664 667
@@ -979,6 +982,12 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
979static inline void perf_event_exit_task(struct task_struct *child) { } 982static inline void perf_event_exit_task(struct task_struct *child) { }
980static inline void perf_event_free_task(struct task_struct *task) { } 983static inline void perf_event_free_task(struct task_struct *task) { }
981static inline void perf_event_delayed_put(struct task_struct *task) { } 984static inline void perf_event_delayed_put(struct task_struct *task) { }
985static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
986static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
987{
988 return ERR_PTR(-EINVAL);
989}
990static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
982static inline void perf_event_print_debug(void) { } 991static inline void perf_event_print_debug(void) { }
983static inline int perf_event_task_disable(void) { return -EINVAL; } 992static inline int perf_event_task_disable(void) { return -EINVAL; }
984static inline int perf_event_task_enable(void) { return -EINVAL; } 993static inline int perf_event_task_enable(void) { return -EINVAL; }
@@ -1011,6 +1020,7 @@ static inline void perf_event_enable(struct perf_event *event) { }
1011static inline void perf_event_disable(struct perf_event *event) { } 1020static inline void perf_event_disable(struct perf_event *event) { }
1012static inline int __perf_event_disable(void *info) { return -1; } 1021static inline int __perf_event_disable(void *info) { return -1; }
1013static inline void perf_event_task_tick(void) { } 1022static inline void perf_event_task_tick(void) { }
1023static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
1014#endif 1024#endif
1015 1025
1016#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL) 1026#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a26c3f84b8dd..962387a192f1 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -330,6 +330,7 @@ struct phy_c45_device_ids {
330 * c45_ids: 802.3-c45 Device Identifers if is_c45. 330 * c45_ids: 802.3-c45 Device Identifers if is_c45.
331 * is_c45: Set to true if this phy uses clause 45 addressing. 331 * is_c45: Set to true if this phy uses clause 45 addressing.
332 * is_internal: Set to true if this phy is internal to a MAC. 332 * is_internal: Set to true if this phy is internal to a MAC.
333 * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
333 * has_fixups: Set to true if this phy has fixups/quirks. 334 * has_fixups: Set to true if this phy has fixups/quirks.
334 * suspended: Set to true if this phy has been suspended successfully. 335 * suspended: Set to true if this phy has been suspended successfully.
335 * state: state of the PHY for management purposes 336 * state: state of the PHY for management purposes
@@ -368,6 +369,7 @@ struct phy_device {
368 struct phy_c45_device_ids c45_ids; 369 struct phy_c45_device_ids c45_ids;
369 bool is_c45; 370 bool is_c45;
370 bool is_internal; 371 bool is_internal;
372 bool is_pseudo_fixed_link;
371 bool has_fixups; 373 bool has_fixups;
372 bool suspended; 374 bool suspended;
373 375
@@ -424,6 +426,8 @@ struct phy_device {
424 426
425 struct net_device *attached_dev; 427 struct net_device *attached_dev;
426 428
429 u8 mdix;
430
427 void (*adjust_link)(struct net_device *dev); 431 void (*adjust_link)(struct net_device *dev);
428}; 432};
429#define to_phy_device(d) container_of(d, struct phy_device, dev) 433#define to_phy_device(d) container_of(d, struct phy_device, dev)
@@ -686,6 +690,16 @@ static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
686{ 690{
687 return phydev->interface >= PHY_INTERFACE_MODE_RGMII && 691 return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
688 phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID; 692 phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
693};
694
695/*
696 * phy_is_pseudo_fixed_link - Convenience function for testing if this
697 * PHY is the CPU port facing side of an Ethernet switch, or similar.
698 * @phydev: the phy_device struct
699 */
700static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
701{
702 return phydev->is_pseudo_fixed_link;
689} 703}
690 704
691/** 705/**
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index fe5732d53eda..2400d2ea4f34 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,9 +13,11 @@ struct device_node;
13 13
14#if IS_ENABLED(CONFIG_FIXED_PHY) 14#if IS_ENABLED(CONFIG_FIXED_PHY)
15extern int fixed_phy_add(unsigned int irq, int phy_id, 15extern int fixed_phy_add(unsigned int irq, int phy_id,
16 struct fixed_phy_status *status); 16 struct fixed_phy_status *status,
17 int link_gpio);
17extern struct phy_device *fixed_phy_register(unsigned int irq, 18extern struct phy_device *fixed_phy_register(unsigned int irq,
18 struct fixed_phy_status *status, 19 struct fixed_phy_status *status,
20 int link_gpio,
19 struct device_node *np); 21 struct device_node *np);
20extern void fixed_phy_del(int phy_addr); 22extern void fixed_phy_del(int phy_addr);
21extern int fixed_phy_set_link_update(struct phy_device *phydev, 23extern int fixed_phy_set_link_update(struct phy_device *phydev,
@@ -26,12 +28,14 @@ extern int fixed_phy_update_state(struct phy_device *phydev,
26 const struct fixed_phy_status *changed); 28 const struct fixed_phy_status *changed);
27#else 29#else
28static inline int fixed_phy_add(unsigned int irq, int phy_id, 30static inline int fixed_phy_add(unsigned int irq, int phy_id,
29 struct fixed_phy_status *status) 31 struct fixed_phy_status *status,
32 int link_gpio)
30{ 33{
31 return -ENODEV; 34 return -ENODEV;
32} 35}
33static inline struct phy_device *fixed_phy_register(unsigned int irq, 36static inline struct phy_device *fixed_phy_register(unsigned int irq,
34 struct fixed_phy_status *status, 37 struct fixed_phy_status *status,
38 int gpio_link,
35 struct device_node *np) 39 struct device_node *np)
36{ 40{
37 return ERR_PTR(-ENODEV); 41 return ERR_PTR(-ENODEV);
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 4b452c6a2f7b..527a85c61924 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -46,18 +46,6 @@ struct at91_cf_data {
46#define AT91_IDE_SWAP_A0_A2 0x02 46#define AT91_IDE_SWAP_A0_A2 0x02
47}; 47};
48 48
49 /* USB Host */
50#define AT91_MAX_USBH_PORTS 3
51struct at91_usbh_data {
52 int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */
53 int overcurrent_pin[AT91_MAX_USBH_PORTS];
54 u8 ports; /* number of ports on root hub */
55 u8 overcurrent_supported;
56 u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
57 u8 overcurrent_status[AT91_MAX_USBH_PORTS];
58 u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
59};
60
61 /* NAND / SmartMedia */ 49 /* NAND / SmartMedia */
62struct atmel_nand_data { 50struct atmel_nand_data {
63 int enable_pin; /* chip enable */ 51 int enable_pin; /* chip enable */
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h
index 02bf6ea31701..695035a8d7fb 100644
--- a/include/linux/i2c/atmel_mxt_ts.h
+++ b/include/linux/platform_data/atmel_mxt_ts.h
@@ -10,16 +10,22 @@
10 * option) any later version. 10 * option) any later version.
11 */ 11 */
12 12
13#ifndef __LINUX_ATMEL_MXT_TS_H 13#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
14#define __LINUX_ATMEL_MXT_TS_H 14#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18enum mxt_suspend_mode {
19 MXT_SUSPEND_DEEP_SLEEP = 0,
20 MXT_SUSPEND_T9_CTRL = 1,
21};
22
18/* The platform data for the Atmel maXTouch touchscreen driver */ 23/* The platform data for the Atmel maXTouch touchscreen driver */
19struct mxt_platform_data { 24struct mxt_platform_data {
20 unsigned long irqflags; 25 unsigned long irqflags;
21 u8 t19_num_keys; 26 u8 t19_num_keys;
22 const unsigned int *t19_keymap; 27 const unsigned int *t19_keymap;
28 enum mxt_suspend_mode suspend_mode;
23}; 29};
24 30
25#endif /* __LINUX_ATMEL_MXT_TS_H */ 31#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */
diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h
index 97baf831e071..3af0da1f3be5 100644
--- a/include/linux/platform_data/clk-ux500.h
+++ b/include/linux/platform_data/clk-ux500.h
@@ -10,14 +10,8 @@
10#ifndef __CLK_UX500_H 10#ifndef __CLK_UX500_H
11#define __CLK_UX500_H 11#define __CLK_UX500_H
12 12
13void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, 13void u8500_clk_init(void);
14 u32 clkrst5_base, u32 clkrst6_base); 14void u9540_clk_init(void);
15 15void u8540_clk_init(void);
16void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
17 u32 clkrst5_base, u32 clkrst6_base);
18void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
19 u32 clkrst5_base, u32 clkrst6_base);
20void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
21 u32 clkrst5_base, u32 clkrst6_base);
22 16
23#endif /* __CLK_UX500_H */ 17#endif /* __CLK_UX500_H */
diff --git a/include/linux/platform_data/gpio-em.h b/include/linux/platform_data/gpio-em.h
deleted file mode 100644
index 7c5a519d2dcd..000000000000
--- a/include/linux/platform_data/gpio-em.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef __GPIO_EM_H__
2#define __GPIO_EM_H__
3
4struct gpio_em_config {
5 unsigned int gpio_base;
6 unsigned int irq_base;
7 unsigned int number_of_pins;
8 const char *pctl_name;
9};
10
11#endif /* __GPIO_EM_H__ */
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h
new file mode 100644
index 000000000000..c68712aadf43
--- /dev/null
+++ b/include/linux/platform_data/i2c-mux-reg.h
@@ -0,0 +1,44 @@
1/*
2 * I2C multiplexer using a single register
3 *
4 * Copyright 2015 Freescale Semiconductor
5 * York Sun <yorksun@freescale.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#ifndef __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
14#define __LINUX_PLATFORM_DATA_I2C_MUX_REG_H
15
16/**
17 * struct i2c_mux_reg_platform_data - Platform-dependent data for i2c-mux-reg
18 * @parent: Parent I2C bus adapter number
19 * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
20 * @values: Array of value for each channel
21 * @n_values: Number of multiplexer channels
22 * @little_endian: Indicating if the register is in little endian
23 * @write_only: Reading the register is not allowed by hardware
24 * @classes: Optional I2C auto-detection classes
25 * @idle: Value to write to mux when idle
26 * @idle_in_use: indicate if idle value is in use
27 * @reg: Virtual address of the register to switch channel
28 * @reg_size: register size in bytes
29 */
30struct i2c_mux_reg_platform_data {
31 int parent;
32 int base_nr;
33 const unsigned int *values;
34 int n_values;
35 bool little_endian;
36 bool write_only;
37 const unsigned int *classes;
38 u32 idle;
39 bool idle_in_use;
40 void __iomem *reg;
41 resource_size_t reg_size;
42};
43
44#endif /* __LINUX_PLATFORM_DATA_I2C_MUX_REG_H */
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
new file mode 100644
index 000000000000..f16542c77ff7
--- /dev/null
+++ b/include/linux/platform_data/itco_wdt.h
@@ -0,0 +1,19 @@
1/*
2 * Platform data for the Intel TCO Watchdog
3 */
4
5#ifndef _ITCO_WDT_H_
6#define _ITCO_WDT_H_
7
8/* Watchdog resources */
9#define ICH_RES_IO_TCO 0
10#define ICH_RES_IO_SMI 1
11#define ICH_RES_MEM_OFF 2
12#define ICH_RES_MEM_GCS_PMC 0
13
14struct itco_wdt_platform_data {
15 char name[32];
16 unsigned int version;
17};
18
19#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h
index 6a9fed57f346..eb8a6860e816 100644
--- a/include/linux/platform_data/leds-kirkwood-ns2.h
+++ b/include/linux/platform_data/leds-kirkwood-ns2.h
@@ -9,11 +9,25 @@
9#ifndef __LEDS_KIRKWOOD_NS2_H 9#ifndef __LEDS_KIRKWOOD_NS2_H
10#define __LEDS_KIRKWOOD_NS2_H 10#define __LEDS_KIRKWOOD_NS2_H
11 11
12enum ns2_led_modes {
13 NS_V2_LED_OFF,
14 NS_V2_LED_ON,
15 NS_V2_LED_SATA,
16};
17
18struct ns2_led_modval {
19 enum ns2_led_modes mode;
20 int cmd_level;
21 int slow_level;
22};
23
12struct ns2_led { 24struct ns2_led {
13 const char *name; 25 const char *name;
14 const char *default_trigger; 26 const char *default_trigger;
15 unsigned cmd; 27 unsigned cmd;
16 unsigned slow; 28 unsigned slow;
29 int num_modes;
30 struct ns2_led_modval *modval;
17}; 31};
18 32
19struct ns2_led_platform_data { 33struct ns2_led_platform_data {
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 9c7fd1efe495..1b2ba24e4e03 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,7 +136,6 @@ struct lp855x_rom_data {
136 Only valid when mode is PWM_BASED. 136 Only valid when mode is PWM_BASED.
137 * @size_program : total size of lp855x_rom_data 137 * @size_program : total size of lp855x_rom_data
138 * @rom_data : list of new eeprom/eprom registers 138 * @rom_data : list of new eeprom/eprom registers
139 * @supply : regulator that supplies 3V input
140 */ 139 */
141struct lp855x_platform_data { 140struct lp855x_platform_data {
142 const char *name; 141 const char *name;
@@ -145,7 +144,6 @@ struct lp855x_platform_data {
145 unsigned int period_ns; 144 unsigned int period_ns;
146 int size_program; 145 int size_program;
147 struct lp855x_rom_data *rom_data; 146 struct lp855x_rom_data *rom_data;
148 struct regulator *supply;
149}; 147};
150 148
151#endif 149#endif
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index e1571efa3f2b..95ccab3f454a 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -45,5 +45,6 @@ struct esdhc_platform_data {
45 int max_bus_width; 45 int max_bus_width;
46 bool support_vsel; 46 bool support_vsel;
47 unsigned int delay_line; 47 unsigned int delay_line;
48 unsigned int tuning_step; /* The delay cell steps in tuning procedure */
48}; 49};
49#endif /* __ASM_ARCH_IMX_ESDHC_H */ 50#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h
index 7bae83b7c396..646af6f8b838 100644
--- a/include/linux/input/pixcir_ts.h
+++ b/include/linux/platform_data/pixcir_i2c_ts.h
@@ -57,7 +57,6 @@ struct pixcir_i2c_chip_data {
57struct pixcir_ts_platform_data { 57struct pixcir_ts_platform_data {
58 int x_max; 58 int x_max;
59 int y_max; 59 int y_max;
60 int gpio_attb; /* GPIO connected to ATTB line */
61 struct pixcir_i2c_chip_data chip; 60 struct pixcir_i2c_chip_data chip;
62}; 61};
63 62
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
index 8dc2fa47a2aa..f4edcb03c40c 100644
--- a/include/linux/platform_data/spi-davinci.h
+++ b/include/linux/platform_data/spi-davinci.h
@@ -49,6 +49,7 @@ struct davinci_spi_platform_data {
49 u8 num_chipselect; 49 u8 num_chipselect;
50 u8 intr_line; 50 u8 intr_line;
51 u8 *chip_sel; 51 u8 *chip_sel;
52 u8 prescaler_limit;
52 bool cshold_bug; 53 bool cshold_bug;
53 enum dma_event_q dma_event_q; 54 enum dma_event_q dma_event_q;
54}; 55};
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
new file mode 100644
index 000000000000..54b04483976c
--- /dev/null
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -0,0 +1,20 @@
1/*
2 * MTK SPI bus driver definitions
3 *
4 * Copyright (c) 2015 MediaTek Inc.
5 * Author: Leilk Liu <leilk.liu@mediatek.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef ____LINUX_PLATFORM_DATA_SPI_MTK_H
13#define ____LINUX_PLATFORM_DATA_SPI_MTK_H
14
15/* Board specific platform_data */
16struct mtk_chip_config {
17 u32 tx_mlsb;
18 u32 rx_mlsb;
19};
20#endif
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
deleted file mode 100644
index d9d400a297bd..000000000000
--- a/include/linux/platform_data/st_nci.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Driver include for ST NCI NFC chip family.
3 *
4 * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _ST_NCI_H_
20#define _ST_NCI_H_
21
22#define ST_NCI_DRIVER_NAME "st_nci"
23
24struct st_nci_nfc_platform_data {
25 unsigned int gpio_reset;
26 unsigned int irq_polarity;
27};
28
29#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/video-ep93xx.h b/include/linux/platform_data/video-ep93xx.h
index 92fc2b2232e7..699ac4109366 100644
--- a/include/linux/platform_data/video-ep93xx.h
+++ b/include/linux/platform_data/video-ep93xx.h
@@ -2,11 +2,8 @@
2#define __VIDEO_EP93XX_H 2#define __VIDEO_EP93XX_H
3 3
4struct platform_device; 4struct platform_device;
5struct fb_videomode;
6struct fb_info; 5struct fb_info;
7 6
8#define EP93XXFB_USE_MODEDB 0
9
10/* VideoAttributes flags */ 7/* VideoAttributes flags */
11#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0) 8#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0)
12#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1) 9#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1)
@@ -38,12 +35,7 @@ struct fb_info;
38 EP93XXFB_PIXEL_DATA_ENABLE) 35 EP93XXFB_PIXEL_DATA_ENABLE)
39 36
40struct ep93xxfb_mach_info { 37struct ep93xxfb_mach_info {
41 unsigned int num_modes;
42 const struct fb_videomode *modes;
43 const struct fb_videomode *default_mode;
44 int bpp;
45 unsigned int flags; 38 unsigned int flags;
46
47 int (*setup)(struct platform_device *pdev); 39 int (*setup)(struct platform_device *pdev);
48 void (*teardown)(struct platform_device *pdev); 40 void (*teardown)(struct platform_device *pdev);
49 void (*blank)(int blank_mode, struct fb_info *info); 41 void (*blank)(int blank_mode, struct fb_info *info);
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
index 0472ab2f6ede..7bdece8ef33e 100644
--- a/include/linux/platform_data/zforce_ts.h
+++ b/include/linux/platform_data/zforce_ts.h
@@ -16,9 +16,6 @@
16#define _LINUX_INPUT_ZFORCE_TS_H 16#define _LINUX_INPUT_ZFORCE_TS_H
17 17
18struct zforce_ts_platdata { 18struct zforce_ts_platdata {
19 int gpio_int;
20 int gpio_rst;
21
22 unsigned int x_max; 19 unsigned int x_max;
23 unsigned int y_max; 20 unsigned int y_max;
24}; 21};
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 681ccb053f72..b1cf7e797892 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -22,9 +22,6 @@
22 22
23enum gpd_status { 23enum gpd_status {
24 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 24 GPD_STATE_ACTIVE = 0, /* PM domain is active */
25 GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
26 GPD_STATE_BUSY, /* Something is happening to the PM domain */
27 GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
28 GPD_STATE_POWER_OFF, /* PM domain is off */ 25 GPD_STATE_POWER_OFF, /* PM domain is off */
29}; 26};
30 27
@@ -59,9 +56,6 @@ struct generic_pm_domain {
59 unsigned int in_progress; /* Number of devices being suspended now */ 56 unsigned int in_progress; /* Number of devices being suspended now */
60 atomic_t sd_count; /* Number of subdomains with power "on" */ 57 atomic_t sd_count; /* Number of subdomains with power "on" */
61 enum gpd_status status; /* Current state of the domain */ 58 enum gpd_status status; /* Current state of the domain */
62 wait_queue_head_t status_wait_queue;
63 struct task_struct *poweroff_task; /* Powering off task */
64 unsigned int resume_count; /* Number of devices being resumed */
65 unsigned int device_count; /* Number of devices */ 59 unsigned int device_count; /* Number of devices */
66 unsigned int suspended_count; /* System suspend device counter */ 60 unsigned int suspended_count; /* System suspend device counter */
67 unsigned int prepared_count; /* Suspend counter of prepared devices */ 61 unsigned int prepared_count; /* Suspend counter of prepared devices */
@@ -113,7 +107,6 @@ struct generic_pm_domain_data {
113 struct pm_domain_data base; 107 struct pm_domain_data base;
114 struct gpd_timing_data td; 108 struct gpd_timing_data td;
115 struct notifier_block nb; 109 struct notifier_block nb;
116 int need_restore;
117}; 110};
118 111
119#ifdef CONFIG_PM_GENERIC_DOMAINS 112#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -228,8 +221,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
228 return -ENOSYS; 221 return -ENOSYS;
229} 222}
230static inline void pm_genpd_poweroff_unused(void) {} 223static inline void pm_genpd_poweroff_unused(void) {}
231#define simple_qos_governor NULL
232#define pm_domain_always_on_gov NULL
233#endif 224#endif
234 225
235static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 226static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cec2d4540914..cab7ba55bedb 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -30,7 +30,10 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
30 30
31unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); 31unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
32 32
33bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
34
33int dev_pm_opp_get_opp_count(struct device *dev); 35int dev_pm_opp_get_opp_count(struct device *dev);
36unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
34 37
35struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 38struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
36 unsigned long freq, 39 unsigned long freq,
@@ -62,11 +65,21 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
62 return 0; 65 return 0;
63} 66}
64 67
68static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
69{
70 return false;
71}
72
65static inline int dev_pm_opp_get_opp_count(struct device *dev) 73static inline int dev_pm_opp_get_opp_count(struct device *dev)
66{ 74{
67 return 0; 75 return 0;
68} 76}
69 77
78static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
79{
80 return 0;
81}
82
70static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 83static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
71 unsigned long freq, bool available) 84 unsigned long freq, bool available)
72{ 85{
@@ -115,6 +128,10 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
115#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) 128#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
116int of_init_opp_table(struct device *dev); 129int of_init_opp_table(struct device *dev);
117void of_free_opp_table(struct device *dev); 130void of_free_opp_table(struct device *dev);
131int of_cpumask_init_opp_table(cpumask_var_t cpumask);
132void of_cpumask_free_opp_table(cpumask_var_t cpumask);
133int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
134int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
118#else 135#else
119static inline int of_init_opp_table(struct device *dev) 136static inline int of_init_opp_table(struct device *dev)
120{ 137{
@@ -124,6 +141,25 @@ static inline int of_init_opp_table(struct device *dev)
124static inline void of_free_opp_table(struct device *dev) 141static inline void of_free_opp_table(struct device *dev)
125{ 142{
126} 143}
144
145static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask)
146{
147 return -ENOSYS;
148}
149
150static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask)
151{
152}
153
154static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
155{
156 return -ENOSYS;
157}
158
159static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
160{
161 return -ENOSYS;
162}
127#endif 163#endif
128 164
129#endif /* __LINUX_OPP_H__ */ 165#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 7b3ae0cffc05..0f65d36c2a75 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -161,6 +161,8 @@ void dev_pm_qos_hide_flags(struct device *dev);
161int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); 161int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
162s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); 162s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
163int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); 163int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
164int dev_pm_qos_expose_latency_tolerance(struct device *dev);
165void dev_pm_qos_hide_latency_tolerance(struct device *dev);
164 166
165static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) 167static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
166{ 168{
@@ -229,6 +231,9 @@ static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
229 { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } 231 { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
230static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) 232static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
231 { return 0; } 233 { return 0; }
234static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
235 { return 0; }
236static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
232 237
233static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } 238static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
234static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } 239static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 30e84d48bfea..3bdbb4189780 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -98,11 +98,6 @@ static inline bool pm_runtime_status_suspended(struct device *dev)
98 return dev->power.runtime_status == RPM_SUSPENDED; 98 return dev->power.runtime_status == RPM_SUSPENDED;
99} 99}
100 100
101static inline bool pm_runtime_suspended_if_enabled(struct device *dev)
102{
103 return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1;
104}
105
106static inline bool pm_runtime_enabled(struct device *dev) 101static inline bool pm_runtime_enabled(struct device *dev)
107{ 102{
108 return !dev->power.disable_depth; 103 return !dev->power.disable_depth;
@@ -164,7 +159,6 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {}
164static inline bool pm_runtime_suspended(struct device *dev) { return false; } 159static inline bool pm_runtime_suspended(struct device *dev) { return false; }
165static inline bool pm_runtime_active(struct device *dev) { return true; } 160static inline bool pm_runtime_active(struct device *dev) { return true; }
166static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } 161static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
167static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; }
168static inline bool pm_runtime_enabled(struct device *dev) { return false; } 162static inline bool pm_runtime_enabled(struct device *dev) { return false; }
169 163
170static inline void pm_runtime_no_callbacks(struct device *dev) {} 164static inline void pm_runtime_no_callbacks(struct device *dev) {}
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index d2114045a6c4..85f810b33917 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -14,28 +14,42 @@
14#define __PMEM_H__ 14#define __PMEM_H__
15 15
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/uio.h>
17 18
18#ifdef CONFIG_ARCH_HAS_PMEM_API 19#ifdef CONFIG_ARCH_HAS_PMEM_API
19#include <asm/cacheflush.h> 20#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
21#include <asm/pmem.h>
20#else 22#else
23#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
24/*
25 * These are simply here to enable compilation, all call sites gate
26 * calling these symbols with arch_has_pmem_api() and redirect to the
27 * implementation in asm/pmem.h.
28 */
29static inline bool __arch_has_wmb_pmem(void)
30{
31 return false;
32}
33
21static inline void arch_wmb_pmem(void) 34static inline void arch_wmb_pmem(void)
22{ 35{
23 BUG(); 36 BUG();
24} 37}
25 38
26static inline bool __arch_has_wmb_pmem(void) 39static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
40 size_t n)
27{ 41{
28 return false; 42 BUG();
29} 43}
30 44
31static inline void __pmem *arch_memremap_pmem(resource_size_t offset, 45static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
32 unsigned long size) 46 struct iov_iter *i)
33{ 47{
34 return NULL; 48 BUG();
49 return 0;
35} 50}
36 51
37static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, 52static inline void arch_clear_pmem(void __pmem *addr, size_t size)
38 size_t n)
39{ 53{
40 BUG(); 54 BUG();
41} 55}
@@ -43,18 +57,22 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
43 57
44/* 58/*
45 * Architectures that define ARCH_HAS_PMEM_API must provide 59 * Architectures that define ARCH_HAS_PMEM_API must provide
46 * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(), 60 * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
47 * arch_wmb_pmem(), and __arch_has_wmb_pmem(). 61 * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
48 */ 62 */
49
50static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) 63static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
51{ 64{
52 memcpy(dst, (void __force const *) src, size); 65 memcpy(dst, (void __force const *) src, size);
53} 66}
54 67
55static inline void memunmap_pmem(void __pmem *addr) 68static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
69{
70 devm_memunmap(dev, (void __force *) addr);
71}
72
73static inline bool arch_has_pmem_api(void)
56{ 74{
57 iounmap((void __force __iomem *) addr); 75 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
58} 76}
59 77
60/** 78/**
@@ -68,14 +86,7 @@ static inline void memunmap_pmem(void __pmem *addr)
68 */ 86 */
69static inline bool arch_has_wmb_pmem(void) 87static inline bool arch_has_wmb_pmem(void)
70{ 88{
71 if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) 89 return arch_has_pmem_api() && __arch_has_wmb_pmem();
72 return __arch_has_wmb_pmem();
73 return false;
74}
75
76static inline bool arch_has_pmem_api(void)
77{
78 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
79} 90}
80 91
81/* 92/*
@@ -85,16 +96,24 @@ static inline bool arch_has_pmem_api(void)
85 * default_memremap_pmem + default_memcpy_to_pmem is sufficient for 96 * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
86 * making data durable relative to i/o completion. 97 * making data durable relative to i/o completion.
87 */ 98 */
88static void default_memcpy_to_pmem(void __pmem *dst, const void *src, 99static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
89 size_t size) 100 size_t size)
90{ 101{
91 memcpy((void __force *) dst, src, size); 102 memcpy((void __force *) dst, src, size);
92} 103}
93 104
94static void __pmem *default_memremap_pmem(resource_size_t offset, 105static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
95 unsigned long size) 106 size_t bytes, struct iov_iter *i)
107{
108 return copy_from_iter_nocache((void __force *)addr, bytes, i);
109}
110
111static inline void default_clear_pmem(void __pmem *addr, size_t size)
96{ 112{
97 return (void __pmem __force *)ioremap_wt(offset, size); 113 if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
114 clear_page((void __force *)addr);
115 else
116 memset((void __force *)addr, 0, size);
98} 117}
99 118
100/** 119/**
@@ -109,12 +128,11 @@ static void __pmem *default_memremap_pmem(resource_size_t offset,
109 * wmb_pmem() arrange for the data to be written through the 128 * wmb_pmem() arrange for the data to be written through the
110 * cache to persistent media. 129 * cache to persistent media.
111 */ 130 */
112static inline void __pmem *memremap_pmem(resource_size_t offset, 131static inline void __pmem *memremap_pmem(struct device *dev,
113 unsigned long size) 132 resource_size_t offset, unsigned long size)
114{ 133{
115 if (arch_has_pmem_api()) 134 return (void __pmem *) devm_memremap(dev, offset, size,
116 return arch_memremap_pmem(offset, size); 135 ARCH_MEMREMAP_PMEM);
117 return default_memremap_pmem(offset, size);
118} 136}
119 137
120/** 138/**
@@ -146,7 +164,42 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
146 */ 164 */
147static inline void wmb_pmem(void) 165static inline void wmb_pmem(void)
148{ 166{
149 if (arch_has_pmem_api()) 167 if (arch_has_wmb_pmem())
150 arch_wmb_pmem(); 168 arch_wmb_pmem();
169 else
170 wmb();
171}
172
173/**
174 * copy_from_iter_pmem - copy data from an iterator to PMEM
175 * @addr: PMEM destination address
176 * @bytes: number of bytes to copy
177 * @i: iterator with source data
178 *
179 * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
180 * This function requires explicit ordering with a wmb_pmem() call.
181 */
182static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
183 struct iov_iter *i)
184{
185 if (arch_has_pmem_api())
186 return arch_copy_from_iter_pmem(addr, bytes, i);
187 return default_copy_from_iter_pmem(addr, bytes, i);
188}
189
190/**
191 * clear_pmem - zero a PMEM memory range
192 * @addr: virtual start address
193 * @size: number of bytes to zero
194 *
195 * Write zeros into the memory range starting at 'addr' for 'size' bytes.
196 * This function requires explicit ordering with a wmb_pmem() call.
197 */
198static inline void clear_pmem(void __pmem *addr, size_t size)
199{
200 if (arch_has_pmem_api())
201 arch_clear_pmem(addr, size);
202 else
203 default_clear_pmem(addr, size);
151} 204}
152#endif /* __PMEM_H__ */ 205#endif /* __PMEM_H__ */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 2110a81c5e2a..317e16de09e5 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -19,8 +19,8 @@
19 * under normal circumstances, used to verify that nobody uses 19 * under normal circumstances, used to verify that nobody uses
20 * non-initialized list entries. 20 * non-initialized list entries.
21 */ 21 */
22#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) 22#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
23#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) 23#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
24 24
25/********** include/linux/timer.h **********/ 25/********** include/linux/timer.h **********/
26/* 26/*
@@ -69,10 +69,6 @@
69#define ATM_POISON_FREE 0x12 69#define ATM_POISON_FREE 0x12
70#define ATM_POISON 0xdeadbeef 70#define ATM_POISON 0xdeadbeef
71 71
72/********** net/ **********/
73#define NEIGHBOR_DEAD 0xdeadbeef
74#define NETFILTER_LINK_POISON 0xdead57ac
75
76/********** kernel/mutexes **********/ 72/********** kernel/mutexes **********/
77#define MUTEX_DEBUG_INIT 0x11 73#define MUTEX_DEBUG_INIT 0x11
78#define MUTEX_DEBUG_FREE 0x22 74#define MUTEX_DEBUG_FREE 0x22
@@ -83,7 +79,4 @@
83/********** security/ **********/ 79/********** security/ **********/
84#define KEY_DESTROY 0xbd 80#define KEY_DESTROY 0xbd
85 81
86/********** sound/oss/ **********/
87#define OSS_POISON_FREE 0xAB
88
89#endif 82#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 84991f185173..bea8dd8ff5e0 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -84,13 +84,21 @@
84 */ 84 */
85#define in_nmi() (preempt_count() & NMI_MASK) 85#define in_nmi() (preempt_count() & NMI_MASK)
86 86
87/*
88 * The preempt_count offset after preempt_disable();
89 */
87#if defined(CONFIG_PREEMPT_COUNT) 90#if defined(CONFIG_PREEMPT_COUNT)
88# define PREEMPT_DISABLE_OFFSET 1 91# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
89#else 92#else
90# define PREEMPT_DISABLE_OFFSET 0 93# define PREEMPT_DISABLE_OFFSET 0
91#endif 94#endif
92 95
93/* 96/*
97 * The preempt_count offset after spin_lock()
98 */
99#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
100
101/*
94 * The preempt_count offset needed for things like: 102 * The preempt_count offset needed for things like:
95 * 103 *
96 * spin_lock_bh() 104 * spin_lock_bh()
@@ -103,7 +111,7 @@
103 * 111 *
104 * Work as expected. 112 * Work as expected.
105 */ 113 */
106#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET) 114#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
107 115
108/* 116/*
109 * Are we running in atomic context? WARNING: this macro cannot 117 * Are we running in atomic context? WARNING: this macro cannot
@@ -124,7 +132,8 @@
124#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 132#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
125extern void preempt_count_add(int val); 133extern void preempt_count_add(int val);
126extern void preempt_count_sub(int val); 134extern void preempt_count_sub(int val);
127#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) 135#define preempt_count_dec_and_test() \
136 ({ preempt_count_sub(1); should_resched(0); })
128#else 137#else
129#define preempt_count_add(val) __preempt_count_add(val) 138#define preempt_count_add(val) __preempt_count_add(val)
130#define preempt_count_sub(val) __preempt_count_sub(val) 139#define preempt_count_sub(val) __preempt_count_sub(val)
@@ -184,7 +193,7 @@ do { \
184 193
185#define preempt_check_resched() \ 194#define preempt_check_resched() \
186do { \ 195do { \
187 if (should_resched()) \ 196 if (should_resched(0)) \
188 __preempt_schedule(); \ 197 __preempt_schedule(); \
189} while (0) 198} while (0)
190 199
diff --git a/include/linux/printk.h b/include/linux/printk.h
index a6298b27ac99..9729565c25ff 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -404,10 +404,10 @@ do { \
404 static DEFINE_RATELIMIT_STATE(_rs, \ 404 static DEFINE_RATELIMIT_STATE(_rs, \
405 DEFAULT_RATELIMIT_INTERVAL, \ 405 DEFAULT_RATELIMIT_INTERVAL, \
406 DEFAULT_RATELIMIT_BURST); \ 406 DEFAULT_RATELIMIT_BURST); \
407 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 407 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
408 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ 408 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
409 __ratelimit(&_rs)) \ 409 __ratelimit(&_rs)) \
410 __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ 410 __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
411} while (0) 411} while (0)
412#elif defined(DEBUG) 412#elif defined(DEBUG)
413#define pr_debug_ratelimited(fmt, ...) \ 413#define pr_debug_ratelimited(fmt, ...) \
@@ -456,11 +456,17 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
456 groupsize, buf, len, ascii) \ 456 groupsize, buf, len, ascii) \
457 dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ 457 dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
458 groupsize, buf, len, ascii) 458 groupsize, buf, len, ascii)
459#else 459#elif defined(DEBUG)
460#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ 460#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
461 groupsize, buf, len, ascii) \ 461 groupsize, buf, len, ascii) \
462 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \ 462 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
463 groupsize, buf, len, ascii) 463 groupsize, buf, len, ascii)
464#endif /* defined(CONFIG_DYNAMIC_DEBUG) */ 464#else
465static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
466 int rowsize, int groupsize,
467 const void *buf, size_t len, bool ascii)
468{
469}
470#endif
465 471
466#endif 472#endif
diff --git a/include/linux/property.h b/include/linux/property.h
index 76ebde9c11d4..a59c6ee566c2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -166,4 +166,8 @@ void device_add_property_set(struct device *dev, struct property_set *pset);
166 166
167bool device_dma_is_coherent(struct device *dev); 167bool device_dma_is_coherent(struct device *dev);
168 168
169int device_get_phy_mode(struct device *dev);
170
171void *device_get_mac_address(struct device *dev, char *addr, int alen);
172
169#endif /* _LINUX_PROPERTY_H_ */ 173#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 00e8e8fa7358..5440f64d2942 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -33,7 +33,7 @@ struct prop_global {
33/* 33/*
34 * global proportion descriptor 34 * global proportion descriptor
35 * 35 *
36 * this is needed to consitently flip prop_global structures. 36 * this is needed to consistently flip prop_global structures.
37 */ 37 */
38struct prop_descriptor { 38struct prop_descriptor {
39 int index; 39 int index;
diff --git a/include/linux/psci.h b/include/linux/psci.h
new file mode 100644
index 000000000000..a682fcc91c33
--- /dev/null
+++ b/include/linux/psci.h
@@ -0,0 +1,52 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2015 ARM Limited
12 */
13
14#ifndef __LINUX_PSCI_H
15#define __LINUX_PSCI_H
16
17#include <linux/init.h>
18#include <linux/types.h>
19
20#define PSCI_POWER_STATE_TYPE_STANDBY 0
21#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
22
23bool psci_tos_resident_on(int cpu);
24
25struct psci_operations {
26 int (*cpu_suspend)(u32 state, unsigned long entry_point);
27 int (*cpu_off)(u32 state);
28 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
29 int (*migrate)(unsigned long cpuid);
30 int (*affinity_info)(unsigned long target_affinity,
31 unsigned long lowest_affinity_level);
32 int (*migrate_info_type)(void);
33};
34
35extern struct psci_operations psci_ops;
36
37#if defined(CONFIG_ARM_PSCI_FW)
38int __init psci_dt_init(void);
39#else
40static inline int psci_dt_init(void) { return 0; }
41#endif
42
43#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
44int __init psci_acpi_init(void);
45bool __init acpi_psci_present(void);
46bool __init acpi_psci_use_hvc(void);
47#else
48static inline int psci_acpi_init(void) { return 0; }
49static inline bool acpi_psci_present(void) { return false; }
50#endif
51
52#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 987a73a40ef8..061265f92876 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -34,6 +34,7 @@
34#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP) 34#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
35 35
36#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT) 36#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
37#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
37 38
38/* single stepping state bits (used on ARM and PA-RISC) */ 39/* single stepping state bits (used on ARM and PA-RISC) */
39#define PT_SINGLESTEP_BIT 31 40#define PT_SINGLESTEP_BIT 31
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 36262d08a9da..d681f6875aef 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -79,26 +79,43 @@ enum {
79 PWMF_EXPORTED = 1 << 2, 79 PWMF_EXPORTED = 1 << 2,
80}; 80};
81 81
82/**
83 * struct pwm_device - PWM channel object
84 * @label: name of the PWM device
85 * @flags: flags associated with the PWM device
86 * @hwpwm: per-chip relative index of the PWM device
87 * @pwm: global index of the PWM device
88 * @chip: PWM chip providing this PWM device
89 * @chip_data: chip-private data associated with the PWM device
90 * @period: period of the PWM signal (in nanoseconds)
91 * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
92 * @polarity: polarity of the PWM signal
93 */
82struct pwm_device { 94struct pwm_device {
83 const char *label; 95 const char *label;
84 unsigned long flags; 96 unsigned long flags;
85 unsigned int hwpwm; 97 unsigned int hwpwm;
86 unsigned int pwm; 98 unsigned int pwm;
87 struct pwm_chip *chip; 99 struct pwm_chip *chip;
88 void *chip_data; 100 void *chip_data;
89 101
90 unsigned int period; /* in nanoseconds */ 102 unsigned int period;
91 unsigned int duty_cycle; /* in nanoseconds */ 103 unsigned int duty_cycle;
92 enum pwm_polarity polarity; 104 enum pwm_polarity polarity;
93}; 105};
94 106
107static inline bool pwm_is_enabled(const struct pwm_device *pwm)
108{
109 return test_bit(PWMF_ENABLED, &pwm->flags);
110}
111
95static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) 112static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
96{ 113{
97 if (pwm) 114 if (pwm)
98 pwm->period = period; 115 pwm->period = period;
99} 116}
100 117
101static inline unsigned int pwm_get_period(struct pwm_device *pwm) 118static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
102{ 119{
103 return pwm ? pwm->period : 0; 120 return pwm ? pwm->period : 0;
104} 121}
@@ -109,7 +126,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
109 pwm->duty_cycle = duty; 126 pwm->duty_cycle = duty;
110} 127}
111 128
112static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm) 129static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
113{ 130{
114 return pwm ? pwm->duty_cycle : 0; 131 return pwm ? pwm->duty_cycle : 0;
115} 132}
@@ -119,6 +136,11 @@ static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
119 */ 136 */
120int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity); 137int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
121 138
139static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
140{
141 return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
142}
143
122/** 144/**
123 * struct pwm_ops - PWM controller operations 145 * struct pwm_ops - PWM controller operations
124 * @request: optional hook for requesting a PWM 146 * @request: optional hook for requesting a PWM
@@ -131,25 +153,18 @@ int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
131 * @owner: helps prevent removal of modules exporting active PWMs 153 * @owner: helps prevent removal of modules exporting active PWMs
132 */ 154 */
133struct pwm_ops { 155struct pwm_ops {
134 int (*request)(struct pwm_chip *chip, 156 int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
135 struct pwm_device *pwm); 157 void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
136 void (*free)(struct pwm_chip *chip, 158 int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
137 struct pwm_device *pwm); 159 int duty_ns, int period_ns);
138 int (*config)(struct pwm_chip *chip, 160 int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
139 struct pwm_device *pwm, 161 enum pwm_polarity polarity);
140 int duty_ns, int period_ns); 162 int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
141 int (*set_polarity)(struct pwm_chip *chip, 163 void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
142 struct pwm_device *pwm,
143 enum pwm_polarity polarity);
144 int (*enable)(struct pwm_chip *chip,
145 struct pwm_device *pwm);
146 void (*disable)(struct pwm_chip *chip,
147 struct pwm_device *pwm);
148#ifdef CONFIG_DEBUG_FS 164#ifdef CONFIG_DEBUG_FS
149 void (*dbg_show)(struct pwm_chip *chip, 165 void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
150 struct seq_file *s);
151#endif 166#endif
152 struct module *owner; 167 struct module *owner;
153}; 168};
154 169
155/** 170/**
@@ -160,22 +175,24 @@ struct pwm_ops {
160 * @base: number of first PWM controlled by this chip 175 * @base: number of first PWM controlled by this chip
161 * @npwm: number of PWMs controlled by this chip 176 * @npwm: number of PWMs controlled by this chip
162 * @pwms: array of PWM devices allocated by the framework 177 * @pwms: array of PWM devices allocated by the framework
178 * @of_xlate: request a PWM device given a device tree PWM specifier
179 * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
163 * @can_sleep: must be true if the .config(), .enable() or .disable() 180 * @can_sleep: must be true if the .config(), .enable() or .disable()
164 * operations may sleep 181 * operations may sleep
165 */ 182 */
166struct pwm_chip { 183struct pwm_chip {
167 struct device *dev; 184 struct device *dev;
168 struct list_head list; 185 struct list_head list;
169 const struct pwm_ops *ops; 186 const struct pwm_ops *ops;
170 int base; 187 int base;
171 unsigned int npwm; 188 unsigned int npwm;
172 189
173 struct pwm_device *pwms; 190 struct pwm_device *pwms;
174 191
175 struct pwm_device * (*of_xlate)(struct pwm_chip *pc, 192 struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
176 const struct of_phandle_args *args); 193 const struct of_phandle_args *args);
177 unsigned int of_pwm_n_cells; 194 unsigned int of_pwm_n_cells;
178 bool can_sleep; 195 bool can_sleep;
179}; 196};
180 197
181#if IS_ENABLED(CONFIG_PWM) 198#if IS_ENABLED(CONFIG_PWM)
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 0485bab061fd..92273776bce6 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -197,6 +197,7 @@ enum pxa_ssp_type {
197 QUARK_X1000_SSP, 197 QUARK_X1000_SSP,
198 LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ 198 LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
199 LPSS_BYT_SSP, 199 LPSS_BYT_SSP,
200 LPSS_SPT_SSP,
200}; 201};
201 202
202struct ssp_device { 203struct ssp_device {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 77ca6601ff25..7a57c28eb5e7 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -43,7 +43,7 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number);
43void inode_sub_rsv_space(struct inode *inode, qsize_t number); 43void inode_sub_rsv_space(struct inode *inode, qsize_t number);
44void inode_reclaim_rsv_space(struct inode *inode, qsize_t number); 44void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
45 45
46void dquot_initialize(struct inode *inode); 46int dquot_initialize(struct inode *inode);
47void dquot_drop(struct inode *inode); 47void dquot_drop(struct inode *inode);
48struct dquot *dqget(struct super_block *sb, struct kqid qid); 48struct dquot *dqget(struct super_block *sb, struct kqid qid);
49static inline struct dquot *dqgrab(struct dquot *dquot) 49static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -200,8 +200,9 @@ static inline int sb_has_quota_active(struct super_block *sb, int type)
200 return 0; 200 return 0;
201} 201}
202 202
203static inline void dquot_initialize(struct inode *inode) 203static inline int dquot_initialize(struct inode *inode)
204{ 204{
205 return 0;
205} 206}
206 207
207static inline void dquot_drop(struct inode *inode) 208static inline void dquot_drop(struct inode *inode)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4cf5f51b4c9c..ff476515f716 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -226,6 +226,37 @@ struct rcu_synchronize {
226}; 226};
227void wakeme_after_rcu(struct rcu_head *head); 227void wakeme_after_rcu(struct rcu_head *head);
228 228
229void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
230 struct rcu_synchronize *rs_array);
231
232#define _wait_rcu_gp(checktiny, ...) \
233do { \
234 call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
235 const int __n = ARRAY_SIZE(__crcu_array); \
236 struct rcu_synchronize __rs_array[__n]; \
237 \
238 __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
239} while (0)
240
241#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
242
243/**
244 * synchronize_rcu_mult - Wait concurrently for multiple grace periods
245 * @...: List of call_rcu() functions for the flavors to wait on.
246 *
247 * This macro waits concurrently for multiple flavors of RCU grace periods.
248 * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
249 * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU
250 * domain requires you to write a wrapper function for that SRCU domain's
251 * call_srcu() function, supplying the corresponding srcu_struct.
252 *
253 * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
254 * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
255 * is automatically a grace period.
256 */
257#define synchronize_rcu_mult(...) \
258 _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
259
229/** 260/**
230 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period 261 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
231 * @head: structure to be used for queueing the RCU updates. 262 * @head: structure to be used for queueing the RCU updates.
@@ -309,7 +340,7 @@ static inline void rcu_sysrq_end(void)
309} 340}
310#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ 341#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
311 342
312#ifdef CONFIG_RCU_USER_QS 343#ifdef CONFIG_NO_HZ_FULL
313void rcu_user_enter(void); 344void rcu_user_enter(void);
314void rcu_user_exit(void); 345void rcu_user_exit(void);
315#else 346#else
@@ -317,7 +348,7 @@ static inline void rcu_user_enter(void) { }
317static inline void rcu_user_exit(void) { } 348static inline void rcu_user_exit(void) { }
318static inline void rcu_user_hooks_switch(struct task_struct *prev, 349static inline void rcu_user_hooks_switch(struct task_struct *prev,
319 struct task_struct *next) { } 350 struct task_struct *next) { }
320#endif /* CONFIG_RCU_USER_QS */ 351#endif /* CONFIG_NO_HZ_FULL */
321 352
322#ifdef CONFIG_RCU_NOCB_CPU 353#ifdef CONFIG_RCU_NOCB_CPU
323void rcu_init_nohz(void); 354void rcu_init_nohz(void);
@@ -392,10 +423,6 @@ bool __rcu_is_watching(void);
392 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 423 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
393 */ 424 */
394 425
395typedef void call_rcu_func_t(struct rcu_head *head,
396 void (*func)(struct rcu_head *head));
397void wait_rcu_gp(call_rcu_func_t crf);
398
399#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 426#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
400#include <linux/rcutree.h> 427#include <linux/rcutree.h>
401#elif defined(CONFIG_TINY_RCU) 428#elif defined(CONFIG_TINY_RCU)
@@ -469,46 +496,10 @@ int rcu_read_lock_bh_held(void);
469 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an 496 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
470 * RCU-sched read-side critical section. In absence of 497 * RCU-sched read-side critical section. In absence of
471 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side 498 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
472 * critical section unless it can prove otherwise. Note that disabling 499 * critical section unless it can prove otherwise.
473 * of preemption (including disabling irqs) counts as an RCU-sched
474 * read-side critical section. This is useful for debug checks in functions
475 * that required that they be called within an RCU-sched read-side
476 * critical section.
477 *
478 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
479 * and while lockdep is disabled.
480 *
481 * Note that if the CPU is in the idle loop from an RCU point of
482 * view (ie: that we are in the section between rcu_idle_enter() and
483 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
484 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
485 * that are in such a section, considering these as in extended quiescent
486 * state, so such a CPU is effectively never in an RCU read-side critical
487 * section regardless of what RCU primitives it invokes. This state of
488 * affairs is required --- we need to keep an RCU-free window in idle
489 * where the CPU may possibly enter into low power mode. This way we can
490 * notice an extended quiescent state to other CPUs that started a grace
491 * period. Otherwise we would delay any grace period as long as we run in
492 * the idle task.
493 *
494 * Similarly, we avoid claiming an SRCU read lock held if the current
495 * CPU is offline.
496 */ 500 */
497#ifdef CONFIG_PREEMPT_COUNT 501#ifdef CONFIG_PREEMPT_COUNT
498static inline int rcu_read_lock_sched_held(void) 502int rcu_read_lock_sched_held(void);
499{
500 int lockdep_opinion = 0;
501
502 if (!debug_lockdep_rcu_enabled())
503 return 1;
504 if (!rcu_is_watching())
505 return 0;
506 if (!rcu_lockdep_current_cpu_online())
507 return 0;
508 if (debug_locks)
509 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
510 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
511}
512#else /* #ifdef CONFIG_PREEMPT_COUNT */ 503#else /* #ifdef CONFIG_PREEMPT_COUNT */
513static inline int rcu_read_lock_sched_held(void) 504static inline int rcu_read_lock_sched_held(void)
514{ 505{
@@ -545,6 +536,11 @@ static inline int rcu_read_lock_sched_held(void)
545 536
546#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 537#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
547 538
539/* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */
540static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void)
541{
542}
543
548#ifdef CONFIG_PROVE_RCU 544#ifdef CONFIG_PROVE_RCU
549 545
550/** 546/**
@@ -555,17 +551,32 @@ static inline int rcu_read_lock_sched_held(void)
555#define rcu_lockdep_assert(c, s) \ 551#define rcu_lockdep_assert(c, s) \
556 do { \ 552 do { \
557 static bool __section(.data.unlikely) __warned; \ 553 static bool __section(.data.unlikely) __warned; \
554 deprecate_rcu_lockdep_assert(); \
558 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ 555 if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
559 __warned = true; \ 556 __warned = true; \
560 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ 557 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
561 } \ 558 } \
562 } while (0) 559 } while (0)
563 560
561/**
562 * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
563 * @c: condition to check
564 * @s: informative message
565 */
566#define RCU_LOCKDEP_WARN(c, s) \
567 do { \
568 static bool __section(.data.unlikely) __warned; \
569 if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
570 __warned = true; \
571 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
572 } \
573 } while (0)
574
564#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) 575#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
565static inline void rcu_preempt_sleep_check(void) 576static inline void rcu_preempt_sleep_check(void)
566{ 577{
567 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), 578 RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
568 "Illegal context switch in RCU read-side critical section"); 579 "Illegal context switch in RCU read-side critical section");
569} 580}
570#else /* #ifdef CONFIG_PROVE_RCU */ 581#else /* #ifdef CONFIG_PROVE_RCU */
571static inline void rcu_preempt_sleep_check(void) 582static inline void rcu_preempt_sleep_check(void)
@@ -576,15 +587,16 @@ static inline void rcu_preempt_sleep_check(void)
576#define rcu_sleep_check() \ 587#define rcu_sleep_check() \
577 do { \ 588 do { \
578 rcu_preempt_sleep_check(); \ 589 rcu_preempt_sleep_check(); \
579 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ 590 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
580 "Illegal context switch in RCU-bh read-side critical section"); \ 591 "Illegal context switch in RCU-bh read-side critical section"); \
581 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \ 592 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
582 "Illegal context switch in RCU-sched read-side critical section"); \ 593 "Illegal context switch in RCU-sched read-side critical section"); \
583 } while (0) 594 } while (0)
584 595
585#else /* #ifdef CONFIG_PROVE_RCU */ 596#else /* #ifdef CONFIG_PROVE_RCU */
586 597
587#define rcu_lockdep_assert(c, s) do { } while (0) 598#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert()
599#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
588#define rcu_sleep_check() do { } while (0) 600#define rcu_sleep_check() do { } while (0)
589 601
590#endif /* #else #ifdef CONFIG_PROVE_RCU */ 602#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -615,13 +627,13 @@ static inline void rcu_preempt_sleep_check(void)
615({ \ 627({ \
616 /* Dependency order vs. p above. */ \ 628 /* Dependency order vs. p above. */ \
617 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ 629 typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
618 rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ 630 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
619 rcu_dereference_sparse(p, space); \ 631 rcu_dereference_sparse(p, space); \
620 ((typeof(*p) __force __kernel *)(________p1)); \ 632 ((typeof(*p) __force __kernel *)(________p1)); \
621}) 633})
622#define __rcu_dereference_protected(p, c, space) \ 634#define __rcu_dereference_protected(p, c, space) \
623({ \ 635({ \
624 rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \ 636 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
625 rcu_dereference_sparse(p, space); \ 637 rcu_dereference_sparse(p, space); \
626 ((typeof(*p) __force __kernel *)(p)); \ 638 ((typeof(*p) __force __kernel *)(p)); \
627}) 639})
@@ -845,8 +857,8 @@ static inline void rcu_read_lock(void)
845 __rcu_read_lock(); 857 __rcu_read_lock();
846 __acquire(RCU); 858 __acquire(RCU);
847 rcu_lock_acquire(&rcu_lock_map); 859 rcu_lock_acquire(&rcu_lock_map);
848 rcu_lockdep_assert(rcu_is_watching(), 860 RCU_LOCKDEP_WARN(!rcu_is_watching(),
849 "rcu_read_lock() used illegally while idle"); 861 "rcu_read_lock() used illegally while idle");
850} 862}
851 863
852/* 864/*
@@ -896,8 +908,8 @@ static inline void rcu_read_lock(void)
896 */ 908 */
897static inline void rcu_read_unlock(void) 909static inline void rcu_read_unlock(void)
898{ 910{
899 rcu_lockdep_assert(rcu_is_watching(), 911 RCU_LOCKDEP_WARN(!rcu_is_watching(),
900 "rcu_read_unlock() used illegally while idle"); 912 "rcu_read_unlock() used illegally while idle");
901 __release(RCU); 913 __release(RCU);
902 __rcu_read_unlock(); 914 __rcu_read_unlock();
903 rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ 915 rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
@@ -925,8 +937,8 @@ static inline void rcu_read_lock_bh(void)
925 local_bh_disable(); 937 local_bh_disable();
926 __acquire(RCU_BH); 938 __acquire(RCU_BH);
927 rcu_lock_acquire(&rcu_bh_lock_map); 939 rcu_lock_acquire(&rcu_bh_lock_map);
928 rcu_lockdep_assert(rcu_is_watching(), 940 RCU_LOCKDEP_WARN(!rcu_is_watching(),
929 "rcu_read_lock_bh() used illegally while idle"); 941 "rcu_read_lock_bh() used illegally while idle");
930} 942}
931 943
932/* 944/*
@@ -936,8 +948,8 @@ static inline void rcu_read_lock_bh(void)
936 */ 948 */
937static inline void rcu_read_unlock_bh(void) 949static inline void rcu_read_unlock_bh(void)
938{ 950{
939 rcu_lockdep_assert(rcu_is_watching(), 951 RCU_LOCKDEP_WARN(!rcu_is_watching(),
940 "rcu_read_unlock_bh() used illegally while idle"); 952 "rcu_read_unlock_bh() used illegally while idle");
941 rcu_lock_release(&rcu_bh_lock_map); 953 rcu_lock_release(&rcu_bh_lock_map);
942 __release(RCU_BH); 954 __release(RCU_BH);
943 local_bh_enable(); 955 local_bh_enable();
@@ -961,8 +973,8 @@ static inline void rcu_read_lock_sched(void)
961 preempt_disable(); 973 preempt_disable();
962 __acquire(RCU_SCHED); 974 __acquire(RCU_SCHED);
963 rcu_lock_acquire(&rcu_sched_lock_map); 975 rcu_lock_acquire(&rcu_sched_lock_map);
964 rcu_lockdep_assert(rcu_is_watching(), 976 RCU_LOCKDEP_WARN(!rcu_is_watching(),
965 "rcu_read_lock_sched() used illegally while idle"); 977 "rcu_read_lock_sched() used illegally while idle");
966} 978}
967 979
968/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 980/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -979,8 +991,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
979 */ 991 */
980static inline void rcu_read_unlock_sched(void) 992static inline void rcu_read_unlock_sched(void)
981{ 993{
982 rcu_lockdep_assert(rcu_is_watching(), 994 RCU_LOCKDEP_WARN(!rcu_is_watching(),
983 "rcu_read_unlock_sched() used illegally while idle"); 995 "rcu_read_unlock_sched() used illegally while idle");
984 rcu_lock_release(&rcu_sched_lock_map); 996 rcu_lock_release(&rcu_sched_lock_map);
985 __release(RCU_SCHED); 997 __release(RCU_SCHED);
986 preempt_enable(); 998 preempt_enable();
@@ -1031,7 +1043,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
1031#define RCU_INIT_POINTER(p, v) \ 1043#define RCU_INIT_POINTER(p, v) \
1032 do { \ 1044 do { \
1033 rcu_dereference_sparse(p, __rcu); \ 1045 rcu_dereference_sparse(p, __rcu); \
1034 p = RCU_INITIALIZER(v); \ 1046 WRITE_ONCE(p, RCU_INITIALIZER(v)); \
1035 } while (0) 1047 } while (0)
1036 1048
1037/** 1049/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 3df6c1ec4e25..ff968b7af3a4 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -37,6 +37,16 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
37 might_sleep(); 37 might_sleep();
38} 38}
39 39
40static inline unsigned long get_state_synchronize_sched(void)
41{
42 return 0;
43}
44
45static inline void cond_synchronize_sched(unsigned long oldstate)
46{
47 might_sleep();
48}
49
40static inline void rcu_barrier_bh(void) 50static inline void rcu_barrier_bh(void)
41{ 51{
42 wait_rcu_gp(call_rcu_bh); 52 wait_rcu_gp(call_rcu_bh);
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 456879143f89..5abec82f325e 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -76,6 +76,8 @@ void rcu_barrier_bh(void);
76void rcu_barrier_sched(void); 76void rcu_barrier_sched(void);
77unsigned long get_state_synchronize_rcu(void); 77unsigned long get_state_synchronize_rcu(void);
78void cond_synchronize_rcu(unsigned long oldstate); 78void cond_synchronize_rcu(unsigned long oldstate);
79unsigned long get_state_synchronize_sched(void);
80void cond_synchronize_sched(unsigned long oldstate);
79 81
80extern unsigned long rcutorture_testseq; 82extern unsigned long rcutorture_testseq;
81extern unsigned long rcutorture_vernum; 83extern unsigned long rcutorture_vernum;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 59c55ea0f0b5..8fc0bfd8edc4 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -17,6 +17,7 @@
17#include <linux/rbtree.h> 17#include <linux/rbtree.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/bug.h> 19#include <linux/bug.h>
20#include <linux/lockdep.h>
20 21
21struct module; 22struct module;
22struct device; 23struct device;
@@ -50,6 +51,20 @@ struct reg_default {
50 unsigned int def; 51 unsigned int def;
51}; 52};
52 53
54/**
55 * Register/value pairs for sequences of writes with an optional delay in
56 * microseconds to be applied after each write.
57 *
58 * @reg: Register address.
59 * @def: Register value.
60 * @delay_us: Delay to be applied after the register write in microseconds
61 */
62struct reg_sequence {
63 unsigned int reg;
64 unsigned int def;
65 unsigned int delay_us;
66};
67
53#ifdef CONFIG_REGMAP 68#ifdef CONFIG_REGMAP
54 69
55enum regmap_endian { 70enum regmap_endian {
@@ -296,8 +311,12 @@ typedef void (*regmap_hw_free_context)(void *context);
296 * if not implemented on a given device. 311 * if not implemented on a given device.
297 * @async_write: Write operation which completes asynchronously, optional and 312 * @async_write: Write operation which completes asynchronously, optional and
298 * must serialise with respect to non-async I/O. 313 * must serialise with respect to non-async I/O.
314 * @reg_write: Write a single register value to the given register address. This
315 * write operation has to complete when returning from the function.
299 * @read: Read operation. Data is returned in the buffer used to transmit 316 * @read: Read operation. Data is returned in the buffer used to transmit
300 * data. 317 * data.
318 * @reg_read: Read a single register value from a given register address.
319 * @free_context: Free context.
301 * @async_alloc: Allocate a regmap_async() structure. 320 * @async_alloc: Allocate a regmap_async() structure.
302 * @read_flag_mask: Mask to be set in the top byte of the register when doing 321 * @read_flag_mask: Mask to be set in the top byte of the register when doing
303 * a read. 322 * a read.
@@ -307,7 +326,8 @@ typedef void (*regmap_hw_free_context)(void *context);
307 * @val_format_endian_default: Default endianness for formatted register 326 * @val_format_endian_default: Default endianness for formatted register
308 * values. Used when the regmap_config specifies DEFAULT. If this is 327 * values. Used when the regmap_config specifies DEFAULT. If this is
309 * DEFAULT, BIG is assumed. 328 * DEFAULT, BIG is assumed.
310 * @async_size: Size of struct used for async work. 329 * @max_raw_read: Max raw read size that can be used on the bus.
330 * @max_raw_write: Max raw write size that can be used on the bus.
311 */ 331 */
312struct regmap_bus { 332struct regmap_bus {
313 bool fast_io; 333 bool fast_io;
@@ -322,47 +342,186 @@ struct regmap_bus {
322 u8 read_flag_mask; 342 u8 read_flag_mask;
323 enum regmap_endian reg_format_endian_default; 343 enum regmap_endian reg_format_endian_default;
324 enum regmap_endian val_format_endian_default; 344 enum regmap_endian val_format_endian_default;
345 size_t max_raw_read;
346 size_t max_raw_write;
325}; 347};
326 348
327struct regmap *regmap_init(struct device *dev, 349/*
328 const struct regmap_bus *bus, 350 * __regmap_init functions.
329 void *bus_context, 351 *
330 const struct regmap_config *config); 352 * These functions take a lock key and name parameter, and should not be called
353 * directly. Instead, use the regmap_init macros that generate a key and name
354 * for each call.
355 */
356struct regmap *__regmap_init(struct device *dev,
357 const struct regmap_bus *bus,
358 void *bus_context,
359 const struct regmap_config *config,
360 struct lock_class_key *lock_key,
361 const char *lock_name);
362struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
363 const struct regmap_config *config,
364 struct lock_class_key *lock_key,
365 const char *lock_name);
366struct regmap *__regmap_init_spi(struct spi_device *dev,
367 const struct regmap_config *config,
368 struct lock_class_key *lock_key,
369 const char *lock_name);
370struct regmap *__regmap_init_spmi_base(struct spmi_device *dev,
371 const struct regmap_config *config,
372 struct lock_class_key *lock_key,
373 const char *lock_name);
374struct regmap *__regmap_init_spmi_ext(struct spmi_device *dev,
375 const struct regmap_config *config,
376 struct lock_class_key *lock_key,
377 const char *lock_name);
378struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
379 void __iomem *regs,
380 const struct regmap_config *config,
381 struct lock_class_key *lock_key,
382 const char *lock_name);
383struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
384 const struct regmap_config *config,
385 struct lock_class_key *lock_key,
386 const char *lock_name);
387
388struct regmap *__devm_regmap_init(struct device *dev,
389 const struct regmap_bus *bus,
390 void *bus_context,
391 const struct regmap_config *config,
392 struct lock_class_key *lock_key,
393 const char *lock_name);
394struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
395 const struct regmap_config *config,
396 struct lock_class_key *lock_key,
397 const char *lock_name);
398struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
399 const struct regmap_config *config,
400 struct lock_class_key *lock_key,
401 const char *lock_name);
402struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *dev,
403 const struct regmap_config *config,
404 struct lock_class_key *lock_key,
405 const char *lock_name);
406struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *dev,
407 const struct regmap_config *config,
408 struct lock_class_key *lock_key,
409 const char *lock_name);
410struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
411 const char *clk_id,
412 void __iomem *regs,
413 const struct regmap_config *config,
414 struct lock_class_key *lock_key,
415 const char *lock_name);
416struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
417 const struct regmap_config *config,
418 struct lock_class_key *lock_key,
419 const char *lock_name);
420
421/*
422 * Wrapper for regmap_init macros to include a unique lockdep key and name
423 * for each call. No-op if CONFIG_LOCKDEP is not set.
424 *
425 * @fn: Real function to call (in the form __[*_]regmap_init[_*])
426 * @name: Config variable name (#config in the calling macro)
427 **/
428#ifdef CONFIG_LOCKDEP
429#define __regmap_lockdep_wrapper(fn, name, ...) \
430( \
431 ({ \
432 static struct lock_class_key _key; \
433 fn(__VA_ARGS__, &_key, \
434 KBUILD_BASENAME ":" \
435 __stringify(__LINE__) ":" \
436 "(" name ")->lock"); \
437 }) \
438)
439#else
440#define __regmap_lockdep_wrapper(fn, name, ...) fn(__VA_ARGS__, NULL, NULL)
441#endif
442
443/**
444 * regmap_init(): Initialise register map
445 *
446 * @dev: Device that will be interacted with
447 * @bus: Bus-specific callbacks to use with device
448 * @bus_context: Data passed to bus-specific callbacks
449 * @config: Configuration for register map
450 *
451 * The return value will be an ERR_PTR() on error or a valid pointer to
452 * a struct regmap. This function should generally not be called
453 * directly, it should be called by bus-specific init functions.
454 */
455#define regmap_init(dev, bus, bus_context, config) \
456 __regmap_lockdep_wrapper(__regmap_init, #config, \
457 dev, bus, bus_context, config)
331int regmap_attach_dev(struct device *dev, struct regmap *map, 458int regmap_attach_dev(struct device *dev, struct regmap *map,
332 const struct regmap_config *config); 459 const struct regmap_config *config);
333struct regmap *regmap_init_i2c(struct i2c_client *i2c,
334 const struct regmap_config *config);
335struct regmap *regmap_init_spi(struct spi_device *dev,
336 const struct regmap_config *config);
337struct regmap *regmap_init_spmi_base(struct spmi_device *dev,
338 const struct regmap_config *config);
339struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
340 const struct regmap_config *config);
341struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
342 void __iomem *regs,
343 const struct regmap_config *config);
344struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
345 const struct regmap_config *config);
346
347struct regmap *devm_regmap_init(struct device *dev,
348 const struct regmap_bus *bus,
349 void *bus_context,
350 const struct regmap_config *config);
351struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
352 const struct regmap_config *config);
353struct regmap *devm_regmap_init_spi(struct spi_device *dev,
354 const struct regmap_config *config);
355struct regmap *devm_regmap_init_spmi_base(struct spmi_device *dev,
356 const struct regmap_config *config);
357struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
358 const struct regmap_config *config);
359struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
360 void __iomem *regs,
361 const struct regmap_config *config);
362struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
363 const struct regmap_config *config);
364 460
365bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); 461/**
462 * regmap_init_i2c(): Initialise register map
463 *
464 * @i2c: Device that will be interacted with
465 * @config: Configuration for register map
466 *
467 * The return value will be an ERR_PTR() on error or a valid pointer to
468 * a struct regmap.
469 */
470#define regmap_init_i2c(i2c, config) \
471 __regmap_lockdep_wrapper(__regmap_init_i2c, #config, \
472 i2c, config)
473
474/**
475 * regmap_init_spi(): Initialise register map
476 *
477 * @spi: Device that will be interacted with
478 * @config: Configuration for register map
479 *
480 * The return value will be an ERR_PTR() on error or a valid pointer to
481 * a struct regmap.
482 */
483#define regmap_init_spi(dev, config) \
484 __regmap_lockdep_wrapper(__regmap_init_spi, #config, \
485 dev, config)
486
487/**
488 * regmap_init_spmi_base(): Create regmap for the Base register space
489 * @sdev: SPMI device that will be interacted with
490 * @config: Configuration for register map
491 *
492 * The return value will be an ERR_PTR() on error or a valid pointer to
493 * a struct regmap.
494 */
495#define regmap_init_spmi_base(dev, config) \
496 __regmap_lockdep_wrapper(__regmap_init_spmi_base, #config, \
497 dev, config)
498
499/**
500 * regmap_init_spmi_ext(): Create regmap for Ext register space
501 * @sdev: Device that will be interacted with
502 * @config: Configuration for register map
503 *
504 * The return value will be an ERR_PTR() on error or a valid pointer to
505 * a struct regmap.
506 */
507#define regmap_init_spmi_ext(dev, config) \
508 __regmap_lockdep_wrapper(__regmap_init_spmi_ext, #config, \
509 dev, config)
510
511/**
512 * regmap_init_mmio_clk(): Initialise register map with register clock
513 *
514 * @dev: Device that will be interacted with
515 * @clk_id: register clock consumer ID
516 * @regs: Pointer to memory-mapped IO region
517 * @config: Configuration for register map
518 *
519 * The return value will be an ERR_PTR() on error or a valid pointer to
520 * a struct regmap.
521 */
522#define regmap_init_mmio_clk(dev, clk_id, regs, config) \
523 __regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \
524 dev, clk_id, regs, config)
366 525
367/** 526/**
368 * regmap_init_mmio(): Initialise register map 527 * regmap_init_mmio(): Initialise register map
@@ -374,12 +533,109 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
374 * The return value will be an ERR_PTR() on error or a valid pointer to 533 * The return value will be an ERR_PTR() on error or a valid pointer to
375 * a struct regmap. 534 * a struct regmap.
376 */ 535 */
377static inline struct regmap *regmap_init_mmio(struct device *dev, 536#define regmap_init_mmio(dev, regs, config) \
378 void __iomem *regs, 537 regmap_init_mmio_clk(dev, NULL, regs, config)
379 const struct regmap_config *config) 538
380{ 539/**
381 return regmap_init_mmio_clk(dev, NULL, regs, config); 540 * regmap_init_ac97(): Initialise AC'97 register map
382} 541 *
542 * @ac97: Device that will be interacted with
543 * @config: Configuration for register map
544 *
545 * The return value will be an ERR_PTR() on error or a valid pointer to
546 * a struct regmap.
547 */
548#define regmap_init_ac97(ac97, config) \
549 __regmap_lockdep_wrapper(__regmap_init_ac97, #config, \
550 ac97, config)
551bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
552
553/**
554 * devm_regmap_init(): Initialise managed register map
555 *
556 * @dev: Device that will be interacted with
557 * @bus: Bus-specific callbacks to use with device
558 * @bus_context: Data passed to bus-specific callbacks
559 * @config: Configuration for register map
560 *
561 * The return value will be an ERR_PTR() on error or a valid pointer
562 * to a struct regmap. This function should generally not be called
563 * directly, it should be called by bus-specific init functions. The
564 * map will be automatically freed by the device management code.
565 */
566#define devm_regmap_init(dev, bus, bus_context, config) \
567 __regmap_lockdep_wrapper(__devm_regmap_init, #config, \
568 dev, bus, bus_context, config)
569
570/**
571 * devm_regmap_init_i2c(): Initialise managed register map
572 *
573 * @i2c: Device that will be interacted with
574 * @config: Configuration for register map
575 *
576 * The return value will be an ERR_PTR() on error or a valid pointer
577 * to a struct regmap. The regmap will be automatically freed by the
578 * device management code.
579 */
580#define devm_regmap_init_i2c(i2c, config) \
581 __regmap_lockdep_wrapper(__devm_regmap_init_i2c, #config, \
582 i2c, config)
583
584/**
585 * devm_regmap_init_spi(): Initialise register map
586 *
587 * @spi: Device that will be interacted with
588 * @config: Configuration for register map
589 *
590 * The return value will be an ERR_PTR() on error or a valid pointer
591 * to a struct regmap. The map will be automatically freed by the
592 * device management code.
593 */
594#define devm_regmap_init_spi(dev, config) \
595 __regmap_lockdep_wrapper(__devm_regmap_init_spi, #config, \
596 dev, config)
597
598/**
599 * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
600 * @sdev: SPMI device that will be interacted with
601 * @config: Configuration for register map
602 *
603 * The return value will be an ERR_PTR() on error or a valid pointer
604 * to a struct regmap. The regmap will be automatically freed by the
605 * device management code.
606 */
607#define devm_regmap_init_spmi_base(dev, config) \
608 __regmap_lockdep_wrapper(__devm_regmap_init_spmi_base, #config, \
609 dev, config)
610
611/**
612 * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
613 * @sdev: SPMI device that will be interacted with
614 * @config: Configuration for register map
615 *
616 * The return value will be an ERR_PTR() on error or a valid pointer
617 * to a struct regmap. The regmap will be automatically freed by the
618 * device management code.
619 */
620#define devm_regmap_init_spmi_ext(dev, config) \
621 __regmap_lockdep_wrapper(__devm_regmap_init_spmi_ext, #config, \
622 dev, config)
623
624/**
625 * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
626 *
627 * @dev: Device that will be interacted with
628 * @clk_id: register clock consumer ID
629 * @regs: Pointer to memory-mapped IO region
630 * @config: Configuration for register map
631 *
632 * The return value will be an ERR_PTR() on error or a valid pointer
633 * to a struct regmap. The regmap will be automatically freed by the
634 * device management code.
635 */
636#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \
637 __regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \
638 dev, clk_id, regs, config)
383 639
384/** 640/**
385 * devm_regmap_init_mmio(): Initialise managed register map 641 * devm_regmap_init_mmio(): Initialise managed register map
@@ -392,12 +648,22 @@ static inline struct regmap *regmap_init_mmio(struct device *dev,
392 * to a struct regmap. The regmap will be automatically freed by the 648 * to a struct regmap. The regmap will be automatically freed by the
393 * device management code. 649 * device management code.
394 */ 650 */
395static inline struct regmap *devm_regmap_init_mmio(struct device *dev, 651#define devm_regmap_init_mmio(dev, regs, config) \
396 void __iomem *regs, 652 devm_regmap_init_mmio_clk(dev, NULL, regs, config)
397 const struct regmap_config *config) 653
398{ 654/**
399 return devm_regmap_init_mmio_clk(dev, NULL, regs, config); 655 * devm_regmap_init_ac97(): Initialise AC'97 register map
400} 656 *
657 * @ac97: Device that will be interacted with
658 * @config: Configuration for register map
659 *
660 * The return value will be an ERR_PTR() on error or a valid pointer
661 * to a struct regmap. The regmap will be automatically freed by the
662 * device management code.
663 */
664#define devm_regmap_init_ac97(ac97, config) \
665 __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
666 ac97, config)
401 667
402void regmap_exit(struct regmap *map); 668void regmap_exit(struct regmap *map);
403int regmap_reinit_cache(struct regmap *map, 669int regmap_reinit_cache(struct regmap *map,
@@ -410,10 +676,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
410 const void *val, size_t val_len); 676 const void *val, size_t val_len);
411int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 677int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
412 size_t val_count); 678 size_t val_count);
413int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, 679int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
414 int num_regs); 680 int num_regs);
415int regmap_multi_reg_write_bypassed(struct regmap *map, 681int regmap_multi_reg_write_bypassed(struct regmap *map,
416 const struct reg_default *regs, 682 const struct reg_sequence *regs,
417 int num_regs); 683 int num_regs);
418int regmap_raw_write_async(struct regmap *map, unsigned int reg, 684int regmap_raw_write_async(struct regmap *map, unsigned int reg,
419 const void *val, size_t val_len); 685 const void *val, size_t val_len);
@@ -424,6 +690,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
424 size_t val_count); 690 size_t val_count);
425int regmap_update_bits(struct regmap *map, unsigned int reg, 691int regmap_update_bits(struct regmap *map, unsigned int reg,
426 unsigned int mask, unsigned int val); 692 unsigned int mask, unsigned int val);
693int regmap_write_bits(struct regmap *map, unsigned int reg,
694 unsigned int mask, unsigned int val);
427int regmap_update_bits_async(struct regmap *map, unsigned int reg, 695int regmap_update_bits_async(struct regmap *map, unsigned int reg,
428 unsigned int mask, unsigned int val); 696 unsigned int mask, unsigned int val);
429int regmap_update_bits_check(struct regmap *map, unsigned int reg, 697int regmap_update_bits_check(struct regmap *map, unsigned int reg,
@@ -437,6 +705,8 @@ int regmap_get_max_register(struct regmap *map);
437int regmap_get_reg_stride(struct regmap *map); 705int regmap_get_reg_stride(struct regmap *map);
438int regmap_async_complete(struct regmap *map); 706int regmap_async_complete(struct regmap *map);
439bool regmap_can_raw_write(struct regmap *map); 707bool regmap_can_raw_write(struct regmap *map);
708size_t regmap_get_raw_read_max(struct regmap *map);
709size_t regmap_get_raw_write_max(struct regmap *map);
440 710
441int regcache_sync(struct regmap *map); 711int regcache_sync(struct regmap *map);
442int regcache_sync_region(struct regmap *map, unsigned int min, 712int regcache_sync_region(struct regmap *map, unsigned int min,
@@ -450,7 +720,7 @@ void regcache_mark_dirty(struct regmap *map);
450bool regmap_check_range_table(struct regmap *map, unsigned int reg, 720bool regmap_check_range_table(struct regmap *map, unsigned int reg,
451 const struct regmap_access_table *table); 721 const struct regmap_access_table *table);
452 722
453int regmap_register_patch(struct regmap *map, const struct reg_default *regs, 723int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
454 int num_regs); 724 int num_regs);
455int regmap_parse_val(struct regmap *map, const void *buf, 725int regmap_parse_val(struct regmap *map, const void *buf,
456 unsigned int *val); 726 unsigned int *val);
@@ -503,6 +773,8 @@ int regmap_field_update_bits(struct regmap_field *field,
503 773
504int regmap_fields_write(struct regmap_field *field, unsigned int id, 774int regmap_fields_write(struct regmap_field *field, unsigned int id,
505 unsigned int val); 775 unsigned int val);
776int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
777 unsigned int val);
506int regmap_fields_read(struct regmap_field *field, unsigned int id, 778int regmap_fields_read(struct regmap_field *field, unsigned int id,
507 unsigned int *val); 779 unsigned int *val);
508int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, 780int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
@@ -645,6 +917,13 @@ static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
645 return -EINVAL; 917 return -EINVAL;
646} 918}
647 919
920static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
921 unsigned int mask, unsigned int val)
922{
923 WARN_ONCE(1, "regmap API is disabled");
924 return -EINVAL;
925}
926
648static inline int regmap_update_bits_async(struct regmap *map, 927static inline int regmap_update_bits_async(struct regmap *map,
649 unsigned int reg, 928 unsigned int reg,
650 unsigned int mask, unsigned int val) 929 unsigned int mask, unsigned int val)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f8a689ed62a5..9e0e76992be0 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -550,8 +550,24 @@ static inline int regulator_count_voltages(struct regulator *regulator)
550{ 550{
551 return 0; 551 return 0;
552} 552}
553
554static inline int regulator_list_voltage(struct regulator *regulator, unsigned selector)
555{
556 return -EINVAL;
557}
558
553#endif 559#endif
554 560
561static inline int regulator_set_voltage_triplet(struct regulator *regulator,
562 int min_uV, int target_uV,
563 int max_uV)
564{
565 if (regulator_set_voltage(regulator, target_uV, max_uV) == 0)
566 return 0;
567
568 return regulator_set_voltage(regulator, min_uV, max_uV);
569}
570
555static inline int regulator_set_voltage_tol(struct regulator *regulator, 571static inline int regulator_set_voltage_tol(struct regulator *regulator,
556 int new_uV, int tol_uV) 572 int new_uV, int tol_uV)
557{ 573{
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5dd65acc2a69..a43a5ca1167b 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,16 +1,16 @@
1/* 1/*
2 * da9211.h - Regulator device driver for DA9211/DA9213 2 * da9211.h - Regulator device driver for DA9211/DA9213/DA9215
3 * Copyright (C) 2014 Dialog Semiconductor Ltd. 3 * Copyright (C) 2015 Dialog Semiconductor Ltd.
4 * 4 *
5 * This library is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public 6 * modify it under the terms of the GNU General Public License
7 * License as published by the Free Software Foundation; either 7 * as published by the Free Software Foundation; either version 2
8 * version 2 of the License, or (at your option) any later version. 8 * of the License, or (at your option) any later version.
9 * 9 *
10 * This library is distributed in the hope that it will be useful, 10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * Library General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#ifndef __LINUX_REGULATOR_DA9211_H 16#ifndef __LINUX_REGULATOR_DA9211_H
@@ -23,6 +23,7 @@
23enum da9211_chip_id { 23enum da9211_chip_id {
24 DA9211, 24 DA9211,
25 DA9213, 25 DA9213,
26 DA9215,
26}; 27};
27 28
28struct da9211_pdata { 29struct da9211_pdata {
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4db9fbe4889d..45932228cbf5 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -148,6 +148,7 @@ struct regulator_ops {
148 int (*get_current_limit) (struct regulator_dev *); 148 int (*get_current_limit) (struct regulator_dev *);
149 149
150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); 150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
151 int (*set_over_current_protection) (struct regulator_dev *);
151 152
152 /* enable/disable regulator */ 153 /* enable/disable regulator */
153 int (*enable) (struct regulator_dev *); 154 int (*enable) (struct regulator_dev *);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b11be1260129..a1067d0b3991 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -147,6 +147,7 @@ struct regulation_constraints {
147 unsigned ramp_disable:1; /* disable ramp delay */ 147 unsigned ramp_disable:1; /* disable ramp delay */
148 unsigned soft_start:1; /* ramp voltage slowly */ 148 unsigned soft_start:1; /* ramp voltage slowly */
149 unsigned pull_down:1; /* pull down resistor when regulator off */ 149 unsigned pull_down:1; /* pull down resistor when regulator off */
150 unsigned over_current_protection:1; /* auto disable on over current */
150}; 151};
151 152
152/** 153/**
diff --git a/include/linux/regulator/mt6311.h b/include/linux/regulator/mt6311.h
new file mode 100644
index 000000000000..8473259395b6
--- /dev/null
+++ b/include/linux/regulator/mt6311.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2015 MediaTek Inc.
3 * Author: Henry Chen <henryc.chen@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __LINUX_REGULATOR_MT6311_H
16#define __LINUX_REGULATOR_MT6311_H
17
18#define MT6311_MAX_REGULATORS 2
19
20enum {
21 MT6311_ID_VDVFS = 0,
22 MT6311_ID_VBIASN,
23};
24
25#define MT6311_E1_CID_CODE 0x10
26#define MT6311_E2_CID_CODE 0x20
27#define MT6311_E3_CID_CODE 0x30
28
29#endif /* __LINUX_REGULATOR_MT6311_H */
diff --git a/include/linux/reset.h b/include/linux/reset.h
index da5602bd77d7..7f65f9cff951 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -74,6 +74,20 @@ static inline int device_reset_optional(struct device *dev)
74 return -ENOSYS; 74 return -ENOSYS;
75} 75}
76 76
77static inline struct reset_control *__must_check reset_control_get(
78 struct device *dev, const char *id)
79{
80 WARN_ON(1);
81 return ERR_PTR(-EINVAL);
82}
83
84static inline struct reset_control *__must_check devm_reset_control_get(
85 struct device *dev, const char *id)
86{
87 WARN_ON(1);
88 return ERR_PTR(-EINVAL);
89}
90
77static inline struct reset_control *reset_control_get_optional( 91static inline struct reset_control *reset_control_get_optional(
78 struct device *dev, const char *id) 92 struct device *dev, const char *id)
79{ 93{
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c89c53a113a8..29446aeef36e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -89,6 +89,9 @@ enum ttu_flags {
89 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ 89 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
90 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ 90 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
91 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ 91 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
92 TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
93 * and caller guarantees they will
94 * do a final flush if necessary */
92}; 95};
93 96
94#ifdef CONFIG_MMU 97#ifdef CONFIG_MMU
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 9b1ef0c820a7..556ec1ea2574 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -161,10 +161,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
161static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, 161static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
162 struct scatterlist *sgl) 162 struct scatterlist *sgl)
163{ 163{
164#ifndef CONFIG_ARCH_HAS_SG_CHAIN
165 BUG();
166#endif
167
168 /* 164 /*
169 * offset and length are unused for chain entry. Clear them. 165 * offset and length are unused for chain entry. Clear them.
170 */ 166 */
@@ -251,6 +247,11 @@ struct scatterlist *sg_next(struct scatterlist *);
251struct scatterlist *sg_last(struct scatterlist *s, unsigned int); 247struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
252void sg_init_table(struct scatterlist *, unsigned int); 248void sg_init_table(struct scatterlist *, unsigned int);
253void sg_init_one(struct scatterlist *, const void *, unsigned int); 249void sg_init_one(struct scatterlist *, const void *, unsigned int);
250int sg_split(struct scatterlist *in, const int in_mapped_nents,
251 const off_t skip, const int nb_splits,
252 const size_t *split_sizes,
253 struct scatterlist **out, int *out_mapped_nents,
254 gfp_t gfp_mask);
254 255
255typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); 256typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
256typedef void (sg_free_fn)(struct scatterlist *, unsigned int); 257typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04b5ada460b4..a4ab9daa387c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -530,39 +530,49 @@ struct cpu_itimer {
530}; 530};
531 531
532/** 532/**
533 * struct cputime - snaphsot of system and user cputime 533 * struct prev_cputime - snaphsot of system and user cputime
534 * @utime: time spent in user mode 534 * @utime: time spent in user mode
535 * @stime: time spent in system mode 535 * @stime: time spent in system mode
536 * @lock: protects the above two fields
536 * 537 *
537 * Gathers a generic snapshot of user and system time. 538 * Stores previous user/system time values such that we can guarantee
539 * monotonicity.
538 */ 540 */
539struct cputime { 541struct prev_cputime {
542#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
540 cputime_t utime; 543 cputime_t utime;
541 cputime_t stime; 544 cputime_t stime;
545 raw_spinlock_t lock;
546#endif
542}; 547};
543 548
549static inline void prev_cputime_init(struct prev_cputime *prev)
550{
551#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
552 prev->utime = prev->stime = 0;
553 raw_spin_lock_init(&prev->lock);
554#endif
555}
556
544/** 557/**
545 * struct task_cputime - collected CPU time counts 558 * struct task_cputime - collected CPU time counts
546 * @utime: time spent in user mode, in &cputime_t units 559 * @utime: time spent in user mode, in &cputime_t units
547 * @stime: time spent in kernel mode, in &cputime_t units 560 * @stime: time spent in kernel mode, in &cputime_t units
548 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds 561 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
549 * 562 *
550 * This is an extension of struct cputime that includes the total runtime 563 * This structure groups together three kinds of CPU time that are tracked for
551 * spent by the task from the scheduler point of view. 564 * threads and thread groups. Most things considering CPU time want to group
552 * 565 * these counts together and treat all three of them in parallel.
553 * As a result, this structure groups together three kinds of CPU time
554 * that are tracked for threads and thread groups. Most things considering
555 * CPU time want to group these counts together and treat all three
556 * of them in parallel.
557 */ 566 */
558struct task_cputime { 567struct task_cputime {
559 cputime_t utime; 568 cputime_t utime;
560 cputime_t stime; 569 cputime_t stime;
561 unsigned long long sum_exec_runtime; 570 unsigned long long sum_exec_runtime;
562}; 571};
572
563/* Alternate field names when used to cache expirations. */ 573/* Alternate field names when used to cache expirations. */
564#define prof_exp stime
565#define virt_exp utime 574#define virt_exp utime
575#define prof_exp stime
566#define sched_exp sum_exec_runtime 576#define sched_exp sum_exec_runtime
567 577
568#define INIT_CPUTIME \ 578#define INIT_CPUTIME \
@@ -715,9 +725,7 @@ struct signal_struct {
715 cputime_t utime, stime, cutime, cstime; 725 cputime_t utime, stime, cutime, cstime;
716 cputime_t gtime; 726 cputime_t gtime;
717 cputime_t cgtime; 727 cputime_t cgtime;
718#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 728 struct prev_cputime prev_cputime;
719 struct cputime prev_cputime;
720#endif
721 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 729 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
722 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 730 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
723 unsigned long inblock, oublock, cinblock, coublock; 731 unsigned long inblock, oublock, cinblock, coublock;
@@ -1167,29 +1175,24 @@ struct load_weight {
1167 u32 inv_weight; 1175 u32 inv_weight;
1168}; 1176};
1169 1177
1178/*
1179 * The load_avg/util_avg accumulates an infinite geometric series.
1180 * 1) load_avg factors the amount of time that a sched_entity is
1181 * runnable on a rq into its weight. For cfs_rq, it is the aggregated
1182 * such weights of all runnable and blocked sched_entities.
1183 * 2) util_avg factors frequency scaling into the amount of time
1184 * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
1185 * For cfs_rq, it is the aggregated such times of all runnable and
1186 * blocked sched_entities.
1187 * The 64 bit load_sum can:
1188 * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
1189 * the highest weight (=88761) always runnable, we should not overflow
1190 * 2) for entity, support any load.weight always runnable
1191 */
1170struct sched_avg { 1192struct sched_avg {
1171 u64 last_runnable_update; 1193 u64 last_update_time, load_sum;
1172 s64 decay_count; 1194 u32 util_sum, period_contrib;
1173 /* 1195 unsigned long load_avg, util_avg;
1174 * utilization_avg_contrib describes the amount of time that a
1175 * sched_entity is running on a CPU. It is based on running_avg_sum
1176 * and is scaled in the range [0..SCHED_LOAD_SCALE].
1177 * load_avg_contrib described the amount of time that a sched_entity
1178 * is runnable on a rq. It is based on both runnable_avg_sum and the
1179 * weight of the task.
1180 */
1181 unsigned long load_avg_contrib, utilization_avg_contrib;
1182 /*
1183 * These sums represent an infinite geometric series and so are bound
1184 * above by 1024/(1-y). Thus we only need a u32 to store them for all
1185 * choices of y < 1-2^(-32)*1024.
1186 * running_avg_sum reflects the time that the sched_entity is
1187 * effectively running on the CPU.
1188 * runnable_avg_sum represents the amount of time a sched_entity is on
1189 * a runqueue which includes the running time that is monitored by
1190 * running_avg_sum.
1191 */
1192 u32 runnable_avg_sum, avg_period, running_avg_sum;
1193}; 1196};
1194 1197
1195#ifdef CONFIG_SCHEDSTATS 1198#ifdef CONFIG_SCHEDSTATS
@@ -1255,7 +1258,7 @@ struct sched_entity {
1255#endif 1258#endif
1256 1259
1257#ifdef CONFIG_SMP 1260#ifdef CONFIG_SMP
1258 /* Per-entity load-tracking */ 1261 /* Per entity load average tracking */
1259 struct sched_avg avg; 1262 struct sched_avg avg;
1260#endif 1263#endif
1261}; 1264};
@@ -1341,6 +1344,25 @@ enum perf_event_task_context {
1341 perf_nr_task_contexts, 1344 perf_nr_task_contexts,
1342}; 1345};
1343 1346
1347/* Track pages that require TLB flushes */
1348struct tlbflush_unmap_batch {
1349 /*
1350 * Each bit set is a CPU that potentially has a TLB entry for one of
1351 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
1352 */
1353 struct cpumask cpumask;
1354
1355 /* True if any bit in cpumask is set */
1356 bool flush_required;
1357
1358 /*
1359 * If true then the PTE was dirty when unmapped. The entry must be
1360 * flushed before IO is initiated or a stale TLB entry potentially
1361 * allows an update without redirtying the page.
1362 */
1363 bool writable;
1364};
1365
1344struct task_struct { 1366struct task_struct {
1345 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1367 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1346 void *stack; 1368 void *stack;
@@ -1351,9 +1373,9 @@ struct task_struct {
1351#ifdef CONFIG_SMP 1373#ifdef CONFIG_SMP
1352 struct llist_node wake_entry; 1374 struct llist_node wake_entry;
1353 int on_cpu; 1375 int on_cpu;
1354 struct task_struct *last_wakee; 1376 unsigned int wakee_flips;
1355 unsigned long wakee_flips;
1356 unsigned long wakee_flip_decay_ts; 1377 unsigned long wakee_flip_decay_ts;
1378 struct task_struct *last_wakee;
1357 1379
1358 int wake_cpu; 1380 int wake_cpu;
1359#endif 1381#endif
@@ -1481,9 +1503,7 @@ struct task_struct {
1481 1503
1482 cputime_t utime, stime, utimescaled, stimescaled; 1504 cputime_t utime, stime, utimescaled, stimescaled;
1483 cputime_t gtime; 1505 cputime_t gtime;
1484#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 1506 struct prev_cputime prev_cputime;
1485 struct cputime prev_cputime;
1486#endif
1487#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1507#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1488 seqlock_t vtime_seqlock; 1508 seqlock_t vtime_seqlock;
1489 unsigned long long vtime_snap; 1509 unsigned long long vtime_snap;
@@ -1699,6 +1719,10 @@ struct task_struct {
1699 unsigned long numa_pages_migrated; 1719 unsigned long numa_pages_migrated;
1700#endif /* CONFIG_NUMA_BALANCING */ 1720#endif /* CONFIG_NUMA_BALANCING */
1701 1721
1722#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1723 struct tlbflush_unmap_batch tlb_ubc;
1724#endif
1725
1702 struct rcu_head rcu; 1726 struct rcu_head rcu;
1703 1727
1704 /* 1728 /*
@@ -2214,13 +2238,6 @@ static inline void calc_load_enter_idle(void) { }
2214static inline void calc_load_exit_idle(void) { } 2238static inline void calc_load_exit_idle(void) { }
2215#endif /* CONFIG_NO_HZ_COMMON */ 2239#endif /* CONFIG_NO_HZ_COMMON */
2216 2240
2217#ifndef CONFIG_CPUMASK_OFFSTACK
2218static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2219{
2220 return set_cpus_allowed_ptr(p, &new_mask);
2221}
2222#endif
2223
2224/* 2241/*
2225 * Do not use outside of architecture code which knows its limitations. 2242 * Do not use outside of architecture code which knows its limitations.
2226 * 2243 *
@@ -2897,12 +2914,6 @@ extern int _cond_resched(void);
2897 2914
2898extern int __cond_resched_lock(spinlock_t *lock); 2915extern int __cond_resched_lock(spinlock_t *lock);
2899 2916
2900#ifdef CONFIG_PREEMPT_COUNT
2901#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2902#else
2903#define PREEMPT_LOCK_OFFSET 0
2904#endif
2905
2906#define cond_resched_lock(lock) ({ \ 2917#define cond_resched_lock(lock) ({ \
2907 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ 2918 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2908 __cond_resched_lock(lock); \ 2919 __cond_resched_lock(lock); \
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index a19ddacdac30..f4265039a94c 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -78,7 +78,7 @@ static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3)
78 78
79static inline int seccomp_mode(struct seccomp *s) 79static inline int seccomp_mode(struct seccomp *s)
80{ 80{
81 return 0; 81 return SECCOMP_MODE_DISABLED;
82} 82}
83#endif /* CONFIG_SECCOMP */ 83#endif /* CONFIG_SECCOMP */
84 84
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 912a7c482649..adeadbd6d7bf 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -122,6 +122,10 @@ int seq_write(struct seq_file *seq, const void *data, size_t len);
122__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...); 122__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
123__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args); 123__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
124 124
125void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
126 int rowsize, int groupsize, const void *buf, size_t len,
127 bool ascii);
128
125int seq_path(struct seq_file *, const struct path *, const char *); 129int seq_path(struct seq_file *, const struct path *, const char *);
126int seq_file_path(struct seq_file *, struct file *, const char *); 130int seq_file_path(struct seq_file *, struct file *, const char *);
127int seq_dentry(struct seq_file *, struct dentry *, const char *); 131int seq_dentry(struct seq_file *, struct dentry *, const char *);
@@ -149,6 +153,41 @@ static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
149#endif 153#endif
150} 154}
151 155
156/**
157 * seq_show_options - display mount options with appropriate escapes.
158 * @m: the seq_file handle
159 * @name: the mount option name
160 * @value: the mount option name's value, can be NULL
161 */
162static inline void seq_show_option(struct seq_file *m, const char *name,
163 const char *value)
164{
165 seq_putc(m, ',');
166 seq_escape(m, name, ",= \t\n\\");
167 if (value) {
168 seq_putc(m, '=');
169 seq_escape(m, value, ", \t\n\\");
170 }
171}
172
173/**
174 * seq_show_option_n - display mount options with appropriate escapes
175 * where @value must be a specific length.
176 * @m: the seq_file handle
177 * @name: the mount option name
178 * @value: the mount option name's value, cannot be NULL
179 * @length: the length of @value to display
180 *
181 * This is a macro since this uses "length" to define the size of the
182 * stack buffer.
183 */
184#define seq_show_option_n(m, name, value, length) { \
185 char val_buf[length + 1]; \
186 strncpy(val_buf, value, length); \
187 val_buf[length] = '\0'; \
188 seq_show_option(m, name, val_buf); \
189}
190
152#define SEQ_START_TOKEN ((void *)1) 191#define SEQ_START_TOKEN ((void *)1)
153/* 192/*
154 * Helpers for iteration over list_head-s in seq_files 193 * Helpers for iteration over list_head-s in seq_files
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index ba82c07feb95..faa0e0370ce7 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -136,8 +136,6 @@ void serial8250_resume_port(int line);
136 136
137extern int early_serial_setup(struct uart_port *port); 137extern int early_serial_setup(struct uart_port *port);
138 138
139extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
140extern void serial8250_early_out(struct uart_port *port, int offset, int value);
141extern int early_serial8250_setup(struct earlycon_device *device, 139extern int early_serial8250_setup(struct earlycon_device *device,
142 const char *options); 140 const char *options);
143extern void serial8250_do_set_termios(struct uart_port *port, 141extern void serial8250_do_set_termios(struct uart_port *port,
@@ -152,6 +150,11 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
152unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr); 150unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
153void serial8250_tx_chars(struct uart_8250_port *up); 151void serial8250_tx_chars(struct uart_8250_port *up);
154unsigned int serial8250_modem_status(struct uart_8250_port *up); 152unsigned int serial8250_modem_status(struct uart_8250_port *up);
153void serial8250_init_port(struct uart_8250_port *up);
154void serial8250_set_defaults(struct uart_8250_port *up);
155void serial8250_console_write(struct uart_8250_port *up, const char *s,
156 unsigned int count);
157int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
155 158
156extern void serial8250_set_isa_configurator(void (*v) 159extern void serial8250_set_isa_configurator(void (*v)
157 (int port, struct uart_port *up, 160 (int port, struct uart_port *up,
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 9f779c7a2da4..df4ab5de1586 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -18,6 +18,8 @@
18#include <linux/mod_devicetable.h> 18#include <linux/mod_devicetable.h>
19#include <uapi/linux/serio.h> 19#include <uapi/linux/serio.h>
20 20
21extern struct bus_type serio_bus;
22
21struct serio { 23struct serio {
22 void *port_data; 24 void *port_data;
23 25
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index dd0ba502ccb3..d927647e6350 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -128,7 +128,10 @@ void shdma_cleanup(struct shdma_dev *sdev);
128#if IS_ENABLED(CONFIG_SH_DMAE_BASE) 128#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
129bool shdma_chan_filter(struct dma_chan *chan, void *arg); 129bool shdma_chan_filter(struct dma_chan *chan, void *arg);
130#else 130#else
131#define shdma_chan_filter NULL 131static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
132{
133 return false;
134}
132#endif 135#endif
133 136
134#endif 137#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9b88536487e6..2738d355cdf9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
37#include <net/flow_dissector.h> 37#include <net/flow_dissector.h>
38#include <linux/splice.h> 38#include <linux/splice.h>
39#include <linux/in6.h> 39#include <linux/in6.h>
40#include <net/flow.h>
40 41
41/* A. Checksumming of received packets by device. 42/* A. Checksumming of received packets by device.
42 * 43 *
@@ -173,17 +174,24 @@ struct nf_bridge_info {
173 BRNF_PROTO_8021Q, 174 BRNF_PROTO_8021Q,
174 BRNF_PROTO_PPPOE 175 BRNF_PROTO_PPPOE
175 } orig_proto:8; 176 } orig_proto:8;
176 bool pkt_otherhost; 177 u8 pkt_otherhost:1;
178 u8 in_prerouting:1;
179 u8 bridged_dnat:1;
177 __u16 frag_max_size; 180 __u16 frag_max_size;
178 unsigned int mask;
179 struct net_device *physindev; 181 struct net_device *physindev;
180 union { 182 union {
181 struct net_device *physoutdev; 183 /* prerouting: detect dnat in orig/reply direction */
182 char neigh_header[8];
183 };
184 union {
185 __be32 ipv4_daddr; 184 __be32 ipv4_daddr;
186 struct in6_addr ipv6_daddr; 185 struct in6_addr ipv6_daddr;
186
187 /* after prerouting + nat detected: store original source
188 * mac since neigh resolution overwrites it, only used while
189 * skb is out in neigh layer.
190 */
191 char neigh_header[8];
192
193 /* always valid & non-NULL from FORWARD on, for physdev match */
194 struct net_device *physoutdev;
187 }; 195 };
188}; 196};
189#endif 197#endif
@@ -506,6 +514,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
506 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 514 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
507 * @napi_id: id of the NAPI struct this skb came from 515 * @napi_id: id of the NAPI struct this skb came from
508 * @secmark: security marking 516 * @secmark: security marking
517 * @offload_fwd_mark: fwding offload mark
509 * @mark: Generic packet mark 518 * @mark: Generic packet mark
510 * @vlan_proto: vlan encapsulation protocol 519 * @vlan_proto: vlan encapsulation protocol
511 * @vlan_tci: vlan tag control information 520 * @vlan_tci: vlan tag control information
@@ -650,9 +659,15 @@ struct sk_buff {
650 unsigned int sender_cpu; 659 unsigned int sender_cpu;
651 }; 660 };
652#endif 661#endif
662 union {
653#ifdef CONFIG_NETWORK_SECMARK 663#ifdef CONFIG_NETWORK_SECMARK
654 __u32 secmark; 664 __u32 secmark;
665#endif
666#ifdef CONFIG_NET_SWITCHDEV
667 __u32 offload_fwd_mark;
655#endif 668#endif
669 };
670
656 union { 671 union {
657 __u32 mark; 672 __u32 mark;
658 __u32 reserved_tailroom; 673 __u32 reserved_tailroom;
@@ -922,14 +937,90 @@ enum pkt_hash_types {
922 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ 937 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
923}; 938};
924 939
925static inline void 940static inline void skb_clear_hash(struct sk_buff *skb)
926skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
927{ 941{
928 skb->l4_hash = (type == PKT_HASH_TYPE_L4); 942 skb->hash = 0;
929 skb->sw_hash = 0; 943 skb->sw_hash = 0;
944 skb->l4_hash = 0;
945}
946
947static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
948{
949 if (!skb->l4_hash)
950 skb_clear_hash(skb);
951}
952
953static inline void
954__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
955{
956 skb->l4_hash = is_l4;
957 skb->sw_hash = is_sw;
930 skb->hash = hash; 958 skb->hash = hash;
931} 959}
932 960
961static inline void
962skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
963{
964 /* Used by drivers to set hash from HW */
965 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
966}
967
968static inline void
969__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
970{
971 __skb_set_hash(skb, hash, true, is_l4);
972}
973
974void __skb_get_hash(struct sk_buff *skb);
975u32 skb_get_poff(const struct sk_buff *skb);
976u32 __skb_get_poff(const struct sk_buff *skb, void *data,
977 const struct flow_keys *keys, int hlen);
978__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
979 void *data, int hlen_proto);
980
981static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
982 int thoff, u8 ip_proto)
983{
984 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
985}
986
987void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
988 const struct flow_dissector_key *key,
989 unsigned int key_count);
990
991bool __skb_flow_dissect(const struct sk_buff *skb,
992 struct flow_dissector *flow_dissector,
993 void *target_container,
994 void *data, __be16 proto, int nhoff, int hlen,
995 unsigned int flags);
996
997static inline bool skb_flow_dissect(const struct sk_buff *skb,
998 struct flow_dissector *flow_dissector,
999 void *target_container, unsigned int flags)
1000{
1001 return __skb_flow_dissect(skb, flow_dissector, target_container,
1002 NULL, 0, 0, 0, flags);
1003}
1004
1005static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1006 struct flow_keys *flow,
1007 unsigned int flags)
1008{
1009 memset(flow, 0, sizeof(*flow));
1010 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1011 NULL, 0, 0, 0, flags);
1012}
1013
1014static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1015 void *data, __be16 proto,
1016 int nhoff, int hlen,
1017 unsigned int flags)
1018{
1019 memset(flow, 0, sizeof(*flow));
1020 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1021 data, proto, nhoff, hlen, flags);
1022}
1023
933static inline __u32 skb_get_hash(struct sk_buff *skb) 1024static inline __u32 skb_get_hash(struct sk_buff *skb)
934{ 1025{
935 if (!skb->l4_hash && !skb->sw_hash) 1026 if (!skb->l4_hash && !skb->sw_hash)
@@ -938,24 +1029,39 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
938 return skb->hash; 1029 return skb->hash;
939} 1030}
940 1031
941__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); 1032__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
942 1033
943static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) 1034static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
944{ 1035{
1036 if (!skb->l4_hash && !skb->sw_hash) {
1037 struct flow_keys keys;
1038 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1039
1040 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1041 }
1042
945 return skb->hash; 1043 return skb->hash;
946} 1044}
947 1045
948static inline void skb_clear_hash(struct sk_buff *skb) 1046__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1047
1048static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
949{ 1049{
950 skb->hash = 0; 1050 if (!skb->l4_hash && !skb->sw_hash) {
951 skb->sw_hash = 0; 1051 struct flow_keys keys;
952 skb->l4_hash = 0; 1052 __u32 hash = __get_hash_from_flowi4(fl4, &keys);
1053
1054 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1055 }
1056
1057 return skb->hash;
953} 1058}
954 1059
955static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) 1060__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1061
1062static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
956{ 1063{
957 if (!skb->l4_hash) 1064 return skb->hash;
958 skb_clear_hash(skb);
959} 1065}
960 1066
961static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) 1067static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
@@ -1943,7 +2049,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
1943 2049
1944 if (skb_transport_header_was_set(skb)) 2050 if (skb_transport_header_was_set(skb))
1945 return; 2051 return;
1946 else if (skb_flow_dissect_flow_keys(skb, &keys)) 2052 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
1947 skb_set_transport_header(skb, keys.control.thoff); 2053 skb_set_transport_header(skb, keys.control.thoff);
1948 else 2054 else
1949 skb_set_transport_header(skb, offset_hint); 2055 skb_set_transport_header(skb, offset_hint);
@@ -2667,12 +2773,6 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
2667 skb_shinfo(skb)->frag_list = NULL; 2773 skb_shinfo(skb)->frag_list = NULL;
2668} 2774}
2669 2775
2670static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2671{
2672 frag->next = skb_shinfo(skb)->frag_list;
2673 skb_shinfo(skb)->frag_list = frag;
2674}
2675
2676#define skb_walk_frags(skb, iter) \ 2776#define skb_walk_frags(skb, iter) \
2677 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2777 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2678 2778
@@ -3464,5 +3564,6 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3464 skb_network_header(skb); 3564 skb_network_header(skb);
3465 return hdr_len + skb_gso_transport_seglen(skb); 3565 return hdr_len + skb_gso_transport_seglen(skb);
3466} 3566}
3567
3467#endif /* __KERNEL__ */ 3568#endif /* __KERNEL__ */
3468#endif /* _LINUX_SKBUFF_H */ 3569#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a99f0e5243e1..7e37d448ed91 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -290,6 +290,16 @@ void *__kmalloc(size_t size, gfp_t flags);
290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
291void kmem_cache_free(struct kmem_cache *, void *); 291void kmem_cache_free(struct kmem_cache *, void *);
292 292
293/*
294 * Bulk allocation and freeing operations. These are accellerated in an
295 * allocator specific way to avoid taking locks repeatedly or building
296 * metadata structures unnecessarily.
297 *
298 * Note that interrupts must be enabled when calling these functions.
299 */
300void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
301bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
302
293#ifdef CONFIG_NUMA 303#ifdef CONFIG_NUMA
294void *__kmalloc_node(size_t size, gfp_t flags, int node); 304void *__kmalloc_node(size_t size, gfp_t flags, int node);
295void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 305void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index da3c593f9845..e6109a6cd8f6 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -48,7 +48,16 @@ struct smp_hotplug_thread {
48 const char *thread_comm; 48 const char *thread_comm;
49}; 49};
50 50
51int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); 51int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
52 const struct cpumask *cpumask);
53
54static inline int
55smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
56{
57 return smpboot_register_percpu_thread_cpumask(plug_thread,
58 cpu_possible_mask);
59}
60
52void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); 61void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
53int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, 62int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
54 const struct cpumask *); 63 const struct cpumask *);
diff --git a/include/linux/soc/dove/pmu.h b/include/linux/soc/dove/pmu.h
new file mode 100644
index 000000000000..9c99f84bcc0e
--- /dev/null
+++ b/include/linux/soc/dove/pmu.h
@@ -0,0 +1,6 @@
1#ifndef LINUX_SOC_DOVE_PMU_H
2#define LINUX_SOC_DOVE_PMU_H
3
4int dove_init_pmu(void);
5
6#endif
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
new file mode 100644
index 000000000000..a5714e93fb34
--- /dev/null
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -0,0 +1,26 @@
1#ifndef __SOC_MEDIATEK_INFRACFG_H
2#define __SOC_MEDIATEK_INFRACFG_H
3
4#define MT8173_TOP_AXI_PROT_EN_MCI_M2 BIT(0)
5#define MT8173_TOP_AXI_PROT_EN_MM_M0 BIT(1)
6#define MT8173_TOP_AXI_PROT_EN_MM_M1 BIT(2)
7#define MT8173_TOP_AXI_PROT_EN_MMAPB_S BIT(6)
8#define MT8173_TOP_AXI_PROT_EN_L2C_M2 BIT(9)
9#define MT8173_TOP_AXI_PROT_EN_L2SS_SMI BIT(11)
10#define MT8173_TOP_AXI_PROT_EN_L2SS_ADD BIT(12)
11#define MT8173_TOP_AXI_PROT_EN_CCI_M2 BIT(13)
12#define MT8173_TOP_AXI_PROT_EN_MFG_S BIT(14)
13#define MT8173_TOP_AXI_PROT_EN_PERI_M0 BIT(15)
14#define MT8173_TOP_AXI_PROT_EN_PERI_M1 BIT(16)
15#define MT8173_TOP_AXI_PROT_EN_DEBUGSYS BIT(17)
16#define MT8173_TOP_AXI_PROT_EN_CQ_DMA BIT(18)
17#define MT8173_TOP_AXI_PROT_EN_GCPU BIT(19)
18#define MT8173_TOP_AXI_PROT_EN_IOMMU BIT(20)
19#define MT8173_TOP_AXI_PROT_EN_MFG_M0 BIT(21)
20#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
21#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
22
23int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask);
24int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask);
25
26#endif /* __SOC_MEDIATEK_INFRACFG_H */
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
new file mode 100644
index 000000000000..2a53dcaeeeed
--- /dev/null
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -0,0 +1,35 @@
1#ifndef __QCOM_SMD_RPM_H__
2#define __QCOM_SMD_RPM_H__
3
4struct qcom_smd_rpm;
5
6#define QCOM_SMD_RPM_ACTIVE_STATE 0
7#define QCOM_SMD_RPM_SLEEP_STATE 1
8
9/*
10 * Constants used for addressing resources in the RPM.
11 */
12#define QCOM_SMD_RPM_BOOST 0x61747362
13#define QCOM_SMD_RPM_BUS_CLK 0x316b6c63
14#define QCOM_SMD_RPM_BUS_MASTER 0x73616d62
15#define QCOM_SMD_RPM_BUS_SLAVE 0x766c7362
16#define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63
17#define QCOM_SMD_RPM_LDOA 0x616f646c
18#define QCOM_SMD_RPM_LDOB 0x626F646C
19#define QCOM_SMD_RPM_MEM_CLK 0x326b6c63
20#define QCOM_SMD_RPM_MISC_CLK 0x306b6c63
21#define QCOM_SMD_RPM_NCPA 0x6170636E
22#define QCOM_SMD_RPM_NCPB 0x6270636E
23#define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f
24#define QCOM_SMD_RPM_QPIC_CLK 0x63697071
25#define QCOM_SMD_RPM_SMPA 0x61706d73
26#define QCOM_SMD_RPM_SMPB 0x62706d73
27#define QCOM_SMD_RPM_SPDM 0x63707362
28#define QCOM_SMD_RPM_VSA 0x00617376
29
30int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
31 int state,
32 u32 resource_type, u32 resource_id,
33 void *buf, size_t count);
34
35#endif
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
new file mode 100644
index 000000000000..d7e50aa6a4ac
--- /dev/null
+++ b/include/linux/soc/qcom/smd.h
@@ -0,0 +1,46 @@
1#ifndef __QCOM_SMD_H__
2#define __QCOM_SMD_H__
3
4#include <linux/device.h>
5#include <linux/mod_devicetable.h>
6
7struct qcom_smd;
8struct qcom_smd_channel;
9struct qcom_smd_lookup;
10
11/**
12 * struct qcom_smd_device - smd device struct
13 * @dev: the device struct
14 * @channel: handle to the smd channel for this device
15 */
16struct qcom_smd_device {
17 struct device dev;
18 struct qcom_smd_channel *channel;
19};
20
21/**
22 * struct qcom_smd_driver - smd driver struct
23 * @driver: underlying device driver
24 * @probe: invoked when the smd channel is found
25 * @remove: invoked when the smd channel is closed
26 * @callback: invoked when an inbound message is received on the channel,
27 * should return 0 on success or -EBUSY if the data cannot be
28 * consumed at this time
29 */
30struct qcom_smd_driver {
31 struct device_driver driver;
32 int (*probe)(struct qcom_smd_device *dev);
33 void (*remove)(struct qcom_smd_device *dev);
34 int (*callback)(struct qcom_smd_device *, const void *, size_t);
35};
36
37int qcom_smd_driver_register(struct qcom_smd_driver *drv);
38void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
39
40#define module_qcom_smd_driver(__smd_driver) \
41 module_driver(__smd_driver, qcom_smd_driver_register, \
42 qcom_smd_driver_unregister)
43
44int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
45
46#endif
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
new file mode 100644
index 000000000000..bc9630d3aced
--- /dev/null
+++ b/include/linux/soc/qcom/smem.h
@@ -0,0 +1,11 @@
1#ifndef __QCOM_SMEM_H__
2#define __QCOM_SMEM_H__
3
4#define QCOM_SMEM_HOST_ANY -1
5
6int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
7int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size);
8
9int qcom_smem_get_free_space(unsigned host);
10
11#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index d673072346f2..269e8afd3e2a 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,8 @@
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24 24
25struct dma_chan; 25struct dma_chan;
26struct spi_master;
27struct spi_transfer;
26 28
27/* 29/*
28 * INTERFACES between SPI master-side drivers and SPI infrastructure. 30 * INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -31,6 +33,59 @@ struct dma_chan;
31extern struct bus_type spi_bus_type; 33extern struct bus_type spi_bus_type;
32 34
33/** 35/**
36 * struct spi_statistics - statistics for spi transfers
37 * @clock: lock protecting this structure
38 *
39 * @messages: number of spi-messages handled
40 * @transfers: number of spi_transfers handled
41 * @errors: number of errors during spi_transfer
42 * @timedout: number of timeouts during spi_transfer
43 *
44 * @spi_sync: number of times spi_sync is used
45 * @spi_sync_immediate:
46 * number of times spi_sync is executed immediately
47 * in calling context without queuing and scheduling
48 * @spi_async: number of times spi_async is used
49 *
50 * @bytes: number of bytes transferred to/from device
51 * @bytes_tx: number of bytes sent to device
52 * @bytes_rx: number of bytes received from device
53 *
54 */
55struct spi_statistics {
56 spinlock_t lock; /* lock for the whole structure */
57
58 unsigned long messages;
59 unsigned long transfers;
60 unsigned long errors;
61 unsigned long timedout;
62
63 unsigned long spi_sync;
64 unsigned long spi_sync_immediate;
65 unsigned long spi_async;
66
67 unsigned long long bytes;
68 unsigned long long bytes_rx;
69 unsigned long long bytes_tx;
70
71};
72
73void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
74 struct spi_transfer *xfer,
75 struct spi_master *master);
76
77#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
78 do { \
79 unsigned long flags; \
80 spin_lock_irqsave(&(stats)->lock, flags); \
81 (stats)->field += count; \
82 spin_unlock_irqrestore(&(stats)->lock, flags); \
83 } while (0)
84
85#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
86 SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
87
88/**
34 * struct spi_device - Master side proxy for an SPI slave device 89 * struct spi_device - Master side proxy for an SPI slave device
35 * @dev: Driver model representation of the device. 90 * @dev: Driver model representation of the device.
36 * @master: SPI controller used with the device. 91 * @master: SPI controller used with the device.
@@ -60,6 +115,8 @@ extern struct bus_type spi_bus_type;
60 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when 115 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
61 * when not using a GPIO line) 116 * when not using a GPIO line)
62 * 117 *
118 * @statistics: statistics for the spi_device
119 *
63 * A @spi_device is used to interchange data between an SPI slave 120 * A @spi_device is used to interchange data between an SPI slave
64 * (usually a discrete chip) and CPU memory. 121 * (usually a discrete chip) and CPU memory.
65 * 122 *
@@ -98,6 +155,9 @@ struct spi_device {
98 char modalias[SPI_NAME_SIZE]; 155 char modalias[SPI_NAME_SIZE];
99 int cs_gpio; /* chip select gpio */ 156 int cs_gpio; /* chip select gpio */
100 157
158 /* the statistics */
159 struct spi_statistics statistics;
160
101 /* 161 /*
102 * likely need more hooks for more protocol options affecting how 162 * likely need more hooks for more protocol options affecting how
103 * the controller talks to each chip, like: 163 * the controller talks to each chip, like:
@@ -296,6 +356,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
296 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 356 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
297 * number. Any individual value may be -ENOENT for CS lines that 357 * number. Any individual value may be -ENOENT for CS lines that
298 * are not GPIOs (driven by the SPI controller itself). 358 * are not GPIOs (driven by the SPI controller itself).
359 * @statistics: statistics for the spi_master
299 * @dma_tx: DMA transmit channel 360 * @dma_tx: DMA transmit channel
300 * @dma_rx: DMA receive channel 361 * @dma_rx: DMA receive channel
301 * @dummy_rx: dummy receive buffer for full-duplex devices 362 * @dummy_rx: dummy receive buffer for full-duplex devices
@@ -452,6 +513,9 @@ struct spi_master {
452 /* gpio chip select */ 513 /* gpio chip select */
453 int *cs_gpios; 514 int *cs_gpios;
454 515
516 /* statistics */
517 struct spi_statistics statistics;
518
455 /* DMA channels for use with core dmaengine helpers */ 519 /* DMA channels for use with core dmaengine helpers */
456 struct dma_chan *dma_tx; 520 struct dma_chan *dma_tx;
457 struct dma_chan *dma_rx; 521 struct dma_chan *dma_rx;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 0063b24b4f36..47dd0cebd204 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -130,16 +130,6 @@ do { \
130#define smp_mb__before_spinlock() smp_wmb() 130#define smp_mb__before_spinlock() smp_wmb()
131#endif 131#endif
132 132
133/*
134 * Place this after a lock-acquisition primitive to guarantee that
135 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
136 * if the UNLOCK and LOCK are executed by the same CPU or if the
137 * UNLOCK and LOCK operate on the same lock variable.
138 */
139#ifndef smp_mb__after_unlock_lock
140#define smp_mb__after_unlock_lock() do { } while (0)
141#endif
142
143/** 133/**
144 * raw_spin_unlock_wait - wait until the spinlock gets unlocked 134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
145 * @lock: the spinlock in question. 135 * @lock: the spinlock in question.
@@ -296,7 +286,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
296 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n 286 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
297 */ 287 */
298 288
299static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) 289static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
300{ 290{
301 return &lock->rlock; 291 return &lock->rlock;
302} 292}
@@ -307,17 +297,17 @@ do { \
307 raw_spin_lock_init(&(_lock)->rlock); \ 297 raw_spin_lock_init(&(_lock)->rlock); \
308} while (0) 298} while (0)
309 299
310static inline void spin_lock(spinlock_t *lock) 300static __always_inline void spin_lock(spinlock_t *lock)
311{ 301{
312 raw_spin_lock(&lock->rlock); 302 raw_spin_lock(&lock->rlock);
313} 303}
314 304
315static inline void spin_lock_bh(spinlock_t *lock) 305static __always_inline void spin_lock_bh(spinlock_t *lock)
316{ 306{
317 raw_spin_lock_bh(&lock->rlock); 307 raw_spin_lock_bh(&lock->rlock);
318} 308}
319 309
320static inline int spin_trylock(spinlock_t *lock) 310static __always_inline int spin_trylock(spinlock_t *lock)
321{ 311{
322 return raw_spin_trylock(&lock->rlock); 312 return raw_spin_trylock(&lock->rlock);
323} 313}
@@ -337,7 +327,7 @@ do { \
337 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 327 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
338} while (0) 328} while (0)
339 329
340static inline void spin_lock_irq(spinlock_t *lock) 330static __always_inline void spin_lock_irq(spinlock_t *lock)
341{ 331{
342 raw_spin_lock_irq(&lock->rlock); 332 raw_spin_lock_irq(&lock->rlock);
343} 333}
@@ -352,32 +342,32 @@ do { \
352 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 342 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
353} while (0) 343} while (0)
354 344
355static inline void spin_unlock(spinlock_t *lock) 345static __always_inline void spin_unlock(spinlock_t *lock)
356{ 346{
357 raw_spin_unlock(&lock->rlock); 347 raw_spin_unlock(&lock->rlock);
358} 348}
359 349
360static inline void spin_unlock_bh(spinlock_t *lock) 350static __always_inline void spin_unlock_bh(spinlock_t *lock)
361{ 351{
362 raw_spin_unlock_bh(&lock->rlock); 352 raw_spin_unlock_bh(&lock->rlock);
363} 353}
364 354
365static inline void spin_unlock_irq(spinlock_t *lock) 355static __always_inline void spin_unlock_irq(spinlock_t *lock)
366{ 356{
367 raw_spin_unlock_irq(&lock->rlock); 357 raw_spin_unlock_irq(&lock->rlock);
368} 358}
369 359
370static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 360static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
371{ 361{
372 raw_spin_unlock_irqrestore(&lock->rlock, flags); 362 raw_spin_unlock_irqrestore(&lock->rlock, flags);
373} 363}
374 364
375static inline int spin_trylock_bh(spinlock_t *lock) 365static __always_inline int spin_trylock_bh(spinlock_t *lock)
376{ 366{
377 return raw_spin_trylock_bh(&lock->rlock); 367 return raw_spin_trylock_bh(&lock->rlock);
378} 368}
379 369
380static inline int spin_trylock_irq(spinlock_t *lock) 370static __always_inline int spin_trylock_irq(spinlock_t *lock)
381{ 371{
382 return raw_spin_trylock_irq(&lock->rlock); 372 return raw_spin_trylock_irq(&lock->rlock);
383} 373}
@@ -387,22 +377,22 @@ static inline int spin_trylock_irq(spinlock_t *lock)
387 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 377 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
388}) 378})
389 379
390static inline void spin_unlock_wait(spinlock_t *lock) 380static __always_inline void spin_unlock_wait(spinlock_t *lock)
391{ 381{
392 raw_spin_unlock_wait(&lock->rlock); 382 raw_spin_unlock_wait(&lock->rlock);
393} 383}
394 384
395static inline int spin_is_locked(spinlock_t *lock) 385static __always_inline int spin_is_locked(spinlock_t *lock)
396{ 386{
397 return raw_spin_is_locked(&lock->rlock); 387 return raw_spin_is_locked(&lock->rlock);
398} 388}
399 389
400static inline int spin_is_contended(spinlock_t *lock) 390static __always_inline int spin_is_contended(spinlock_t *lock)
401{ 391{
402 return raw_spin_is_contended(&lock->rlock); 392 return raw_spin_is_contended(&lock->rlock);
403} 393}
404 394
405static inline int spin_can_lock(spinlock_t *lock) 395static __always_inline int spin_can_lock(spinlock_t *lock)
406{ 396{
407 return raw_spin_can_lock(&lock->rlock); 397 return raw_spin_can_lock(&lock->rlock);
408} 398}
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c735f5c91eea..eead8ab93c0a 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -119,30 +119,8 @@ struct plat_stmmacenet_data {
119 int rx_fifo_size; 119 int rx_fifo_size;
120 void (*fix_mac_speed)(void *priv, unsigned int speed); 120 void (*fix_mac_speed)(void *priv, unsigned int speed);
121 void (*bus_setup)(void __iomem *ioaddr); 121 void (*bus_setup)(void __iomem *ioaddr);
122 void *(*setup)(struct platform_device *pdev);
123 void (*free)(struct platform_device *pdev, void *priv);
124 int (*init)(struct platform_device *pdev, void *priv); 122 int (*init)(struct platform_device *pdev, void *priv);
125 void (*exit)(struct platform_device *pdev, void *priv); 123 void (*exit)(struct platform_device *pdev, void *priv);
126 void *custom_cfg;
127 void *custom_data;
128 void *bsp_priv; 124 void *bsp_priv;
129}; 125};
130
131/* of_data for SoC glue layer device tree bindings */
132
133struct stmmac_of_data {
134 int has_gmac;
135 int enh_desc;
136 int tx_coe;
137 int rx_coe;
138 int bugged_jumbo;
139 int pmt;
140 int riwt_off;
141 void (*fix_mac_speed)(void *priv, unsigned int speed);
142 void (*bus_setup)(void __iomem *ioaddr);
143 void *(*setup)(struct platform_device *pdev);
144 void (*free)(struct platform_device *pdev, void *priv);
145 int (*init)(struct platform_device *pdev, void *priv);
146 void (*exit)(struct platform_device *pdev, void *priv);
147};
148#endif 126#endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index d2abbdb8c6aa..414d924318ce 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -112,25 +112,13 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
112 * 112 *
113 * This can be thought of as a very heavy write lock, equivalent to 113 * This can be thought of as a very heavy write lock, equivalent to
114 * grabbing every spinlock in the kernel. */ 114 * grabbing every spinlock in the kernel. */
115int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 115int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
116 116
117/** 117int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
118 * __stop_machine: freeze the machine on all CPUs and run this function
119 * @fn: the function to run
120 * @data: the data ptr for the @fn
121 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
122 *
123 * Description: This is a special version of the above, which assumes cpus
124 * won't come or go while it's being called. Used by hotplug cpu.
125 */
126int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
127
128int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
129 const struct cpumask *cpus); 118 const struct cpumask *cpus);
130
131#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ 119#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
132 120
133static inline int __stop_machine(int (*fn)(void *), void *data, 121static inline int stop_machine(cpu_stop_fn_t fn, void *data,
134 const struct cpumask *cpus) 122 const struct cpumask *cpus)
135{ 123{
136 unsigned long flags; 124 unsigned long flags;
@@ -141,16 +129,10 @@ static inline int __stop_machine(int (*fn)(void *), void *data,
141 return ret; 129 return ret;
142} 130}
143 131
144static inline int stop_machine(int (*fn)(void *), void *data, 132static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
145 const struct cpumask *cpus)
146{
147 return __stop_machine(fn, data, cpus);
148}
149
150static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
151 const struct cpumask *cpus) 133 const struct cpumask *cpus)
152{ 134{
153 return __stop_machine(fn, data, cpus); 135 return stop_machine(fn, data, cpus);
154} 136}
155 137
156#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ 138#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 71f711db4500..dabe643eb5fa 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -48,24 +48,24 @@ static inline int string_unescape_any_inplace(char *buf)
48#define ESCAPE_HEX 0x20 48#define ESCAPE_HEX 0x20
49 49
50int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, 50int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
51 unsigned int flags, const char *esc); 51 unsigned int flags, const char *only);
52 52
53static inline int string_escape_mem_any_np(const char *src, size_t isz, 53static inline int string_escape_mem_any_np(const char *src, size_t isz,
54 char *dst, size_t osz, const char *esc) 54 char *dst, size_t osz, const char *only)
55{ 55{
56 return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc); 56 return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, only);
57} 57}
58 58
59static inline int string_escape_str(const char *src, char *dst, size_t sz, 59static inline int string_escape_str(const char *src, char *dst, size_t sz,
60 unsigned int flags, const char *esc) 60 unsigned int flags, const char *only)
61{ 61{
62 return string_escape_mem(src, strlen(src), dst, sz, flags, esc); 62 return string_escape_mem(src, strlen(src), dst, sz, flags, only);
63} 63}
64 64
65static inline int string_escape_str_any_np(const char *src, char *dst, 65static inline int string_escape_str_any_np(const char *src, char *dst,
66 size_t sz, const char *esc) 66 size_t sz, const char *only)
67{ 67{
68 return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc); 68 return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
69} 69}
70 70
71#endif 71#endif
diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
index 07d8e53bedfc..5c9c6cd08d3b 100644
--- a/include/linux/sunrpc/addr.h
+++ b/include/linux/sunrpc/addr.h
@@ -46,8 +46,8 @@ static inline void rpc_set_port(struct sockaddr *sap,
46#define IPV6_SCOPE_DELIMITER '%' 46#define IPV6_SCOPE_DELIMITER '%'
47#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") 47#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
48 48
49static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1, 49static inline bool rpc_cmp_addr4(const struct sockaddr *sap1,
50 const struct sockaddr *sap2) 50 const struct sockaddr *sap2)
51{ 51{
52 const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; 52 const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
53 const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; 53 const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
@@ -67,8 +67,8 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
67} 67}
68 68
69#if IS_ENABLED(CONFIG_IPV6) 69#if IS_ENABLED(CONFIG_IPV6)
70static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, 70static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
71 const struct sockaddr *sap2) 71 const struct sockaddr *sap2)
72{ 72{
73 const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; 73 const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
74 const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; 74 const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
@@ -93,7 +93,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
93 return true; 93 return true;
94} 94}
95#else /* !(IS_ENABLED(CONFIG_IPV6) */ 95#else /* !(IS_ENABLED(CONFIG_IPV6) */
96static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1, 96static inline bool rpc_cmp_addr6(const struct sockaddr *sap1,
97 const struct sockaddr *sap2) 97 const struct sockaddr *sap2)
98{ 98{
99 return false; 99 return false;
@@ -122,15 +122,28 @@ static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
122 if (sap1->sa_family == sap2->sa_family) { 122 if (sap1->sa_family == sap2->sa_family) {
123 switch (sap1->sa_family) { 123 switch (sap1->sa_family) {
124 case AF_INET: 124 case AF_INET:
125 return __rpc_cmp_addr4(sap1, sap2); 125 return rpc_cmp_addr4(sap1, sap2);
126 case AF_INET6: 126 case AF_INET6:
127 return __rpc_cmp_addr6(sap1, sap2); 127 return rpc_cmp_addr6(sap1, sap2);
128 } 128 }
129 } 129 }
130 return false; 130 return false;
131} 131}
132 132
133/** 133/**
134 * rpc_cmp_addr_port - compare the address and port number of two sockaddrs.
135 * @sap1: first sockaddr
136 * @sap2: second sockaddr
137 */
138static inline bool rpc_cmp_addr_port(const struct sockaddr *sap1,
139 const struct sockaddr *sap2)
140{
141 if (!rpc_cmp_addr(sap1, sap2))
142 return false;
143 return rpc_get_port(sap1) == rpc_get_port(sap2);
144}
145
146/**
134 * rpc_copy_addr - copy the address portion of one sockaddr to another 147 * rpc_copy_addr - copy the address portion of one sockaddr to another
135 * @dst: destination sockaddr 148 * @dst: destination sockaddr
136 * @src: source sockaddr 149 * @src: source sockaddr
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index a7cbb570cc5c..1ecf13e148b8 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -18,9 +18,13 @@
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <linux/rcupdate.h> 19#include <linux/rcupdate.h>
20#include <linux/uidgid.h> 20#include <linux/uidgid.h>
21#include <linux/utsname.h>
21 22
22/* size of the nodename buffer */ 23/*
23#define UNX_MAXNODENAME 32 24 * Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes,
25 * but Linux hostnames are actually limited to __NEW_UTS_LEN bytes.
26 */
27#define UNX_MAXNODENAME __NEW_UTS_LEN
24 28
25struct rpcsec_gss_info; 29struct rpcsec_gss_info;
26 30
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 437ddb6c4aef..03d3b4c92d9f 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -46,7 +46,7 @@
46 * 46 *
47 */ 47 */
48struct cache_head { 48struct cache_head {
49 struct cache_head * next; 49 struct hlist_node cache_list;
50 time_t expiry_time; /* After time time, don't use the data */ 50 time_t expiry_time; /* After time time, don't use the data */
51 time_t last_refresh; /* If CACHE_PENDING, this is when upcall 51 time_t last_refresh; /* If CACHE_PENDING, this is when upcall
52 * was sent, else this is when update was received 52 * was sent, else this is when update was received
@@ -73,7 +73,7 @@ struct cache_detail_pipefs {
73struct cache_detail { 73struct cache_detail {
74 struct module * owner; 74 struct module * owner;
75 int hash_size; 75 int hash_size;
76 struct cache_head ** hash_table; 76 struct hlist_head * hash_table;
77 rwlock_t hash_lock; 77 rwlock_t hash_lock;
78 78
79 atomic_t inuse; /* active user-space update or lookup */ 79 atomic_t inuse; /* active user-space update or lookup */
@@ -224,6 +224,11 @@ extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
224 umode_t, struct cache_detail *); 224 umode_t, struct cache_detail *);
225extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); 225extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
226 226
227/* Must store cache_detail in seq_file->private if using next three functions */
228extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
229extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos);
230extern void cache_seq_stop(struct seq_file *file, void *p);
231
227extern void qword_add(char **bpp, int *lp, char *str); 232extern void qword_add(char **bpp, int *lp, char *str);
228extern void qword_addhex(char **bpp, int *lp, char *buf, int blen); 233extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
229extern int qword_get(char **bpp, char *dest, int bufsize); 234extern int qword_get(char **bpp, char *dest, int bufsize);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index fae6fb947fc8..cc0fc712bb82 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -19,11 +19,6 @@
19#include <linux/wait.h> 19#include <linux/wait.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
21 21
22/*
23 * This is the RPC server thread function prototype
24 */
25typedef int (*svc_thread_fn)(void *);
26
27/* statistics for svc_pool structures */ 22/* statistics for svc_pool structures */
28struct svc_pool_stats { 23struct svc_pool_stats {
29 atomic_long_t packets; 24 atomic_long_t packets;
@@ -54,6 +49,25 @@ struct svc_pool {
54 unsigned long sp_flags; 49 unsigned long sp_flags;
55} ____cacheline_aligned_in_smp; 50} ____cacheline_aligned_in_smp;
56 51
52struct svc_serv;
53
54struct svc_serv_ops {
55 /* Callback to use when last thread exits. */
56 void (*svo_shutdown)(struct svc_serv *, struct net *);
57
58 /* function for service threads to run */
59 int (*svo_function)(void *);
60
61 /* queue up a transport for servicing */
62 void (*svo_enqueue_xprt)(struct svc_xprt *);
63
64 /* set up thread (or whatever) execution context */
65 int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
66
67 /* optional module to count when adding threads (pooled svcs only) */
68 struct module *svo_module;
69};
70
57/* 71/*
58 * RPC service. 72 * RPC service.
59 * 73 *
@@ -85,16 +99,7 @@ struct svc_serv {
85 99
86 unsigned int sv_nrpools; /* number of thread pools */ 100 unsigned int sv_nrpools; /* number of thread pools */
87 struct svc_pool * sv_pools; /* array of thread pools */ 101 struct svc_pool * sv_pools; /* array of thread pools */
88 102 struct svc_serv_ops *sv_ops; /* server operations */
89 void (*sv_shutdown)(struct svc_serv *serv,
90 struct net *net);
91 /* Callback to use when last thread
92 * exits.
93 */
94
95 struct module * sv_module; /* optional module to count when
96 * adding threads */
97 svc_thread_fn sv_function; /* main function for threads */
98#if defined(CONFIG_SUNRPC_BACKCHANNEL) 103#if defined(CONFIG_SUNRPC_BACKCHANNEL)
99 struct list_head sv_cb_list; /* queue for callback requests 104 struct list_head sv_cb_list; /* queue for callback requests
100 * that arrive over the same 105 * that arrive over the same
@@ -423,19 +428,46 @@ struct svc_procedure {
423}; 428};
424 429
425/* 430/*
431 * Mode for mapping cpus to pools.
432 */
433enum {
434 SVC_POOL_AUTO = -1, /* choose one of the others */
435 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
436 * (legacy & UP mode) */
437 SVC_POOL_PERCPU, /* one pool per cpu */
438 SVC_POOL_PERNODE /* one pool per numa node */
439};
440
441struct svc_pool_map {
442 int count; /* How many svc_servs use us */
443 int mode; /* Note: int not enum to avoid
444 * warnings about "enumeration value
445 * not handled in switch" */
446 unsigned int npools;
447 unsigned int *pool_to; /* maps pool id to cpu or node */
448 unsigned int *to_pool; /* maps cpu or node to pool id */
449};
450
451extern struct svc_pool_map svc_pool_map;
452
453/*
426 * Function prototypes. 454 * Function prototypes.
427 */ 455 */
428int svc_rpcb_setup(struct svc_serv *serv, struct net *net); 456int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
429void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); 457void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
430int svc_bind(struct svc_serv *serv, struct net *net); 458int svc_bind(struct svc_serv *serv, struct net *net);
431struct svc_serv *svc_create(struct svc_program *, unsigned int, 459struct svc_serv *svc_create(struct svc_program *, unsigned int,
432 void (*shutdown)(struct svc_serv *, struct net *net)); 460 struct svc_serv_ops *);
461struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
462 struct svc_pool *pool, int node);
433struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, 463struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
434 struct svc_pool *pool, int node); 464 struct svc_pool *pool, int node);
465void svc_rqst_free(struct svc_rqst *);
435void svc_exit_thread(struct svc_rqst *); 466void svc_exit_thread(struct svc_rqst *);
467unsigned int svc_pool_map_get(void);
468void svc_pool_map_put(void);
436struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, 469struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
437 void (*shutdown)(struct svc_serv *, struct net *net), 470 struct svc_serv_ops *);
438 svc_thread_fn, struct module *);
439int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); 471int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
440int svc_pool_stats_open(struct svc_serv *serv, struct file *file); 472int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
441void svc_destroy(struct svc_serv *); 473void svc_destroy(struct svc_serv *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cb94ee4181d4..7ccc961f33e9 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -132,6 +132,7 @@ struct svcxprt_rdma {
132 struct list_head sc_accept_q; /* Conn. waiting accept */ 132 struct list_head sc_accept_q; /* Conn. waiting accept */
133 int sc_ord; /* RDMA read limit */ 133 int sc_ord; /* RDMA read limit */
134 int sc_max_sge; 134 int sc_max_sge;
135 int sc_max_sge_rd; /* max sge for read target */
135 136
136 int sc_sq_depth; /* Depth of SQ */ 137 int sc_sq_depth; /* Depth of SQ */
137 atomic_t sc_sq_count; /* Number of SQ WR on queue */ 138 atomic_t sc_sq_count; /* Number of SQ WR on queue */
@@ -172,13 +173,6 @@ struct svcxprt_rdma {
172#define RDMAXPRT_SQ_PENDING 2 173#define RDMAXPRT_SQ_PENDING 2
173#define RDMAXPRT_CONN_PENDING 3 174#define RDMAXPRT_CONN_PENDING 3
174 175
175#define RPCRDMA_MAX_SVC_SEGS (64) /* server max scatter/gather */
176#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
177#define RPCRDMA_MAXPAYLOAD RPCSVC_MAXPAYLOAD
178#else
179#define RPCRDMA_MAXPAYLOAD (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
180#endif
181
182#define RPCRDMA_LISTEN_BACKLOG 10 176#define RPCRDMA_LISTEN_BACKLOG 10
183/* The default ORD value is based on two outstanding full-size writes with a 177/* The default ORD value is based on two outstanding full-size writes with a
184 * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ 178 * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
@@ -187,6 +181,8 @@ struct svcxprt_rdma {
187#define RPCRDMA_MAX_REQUESTS 32 181#define RPCRDMA_MAX_REQUESTS 32
188#define RPCRDMA_MAX_REQ_SIZE 4096 182#define RPCRDMA_MAX_REQ_SIZE 4096
189 183
184#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
185
190/* svc_rdma_marshal.c */ 186/* svc_rdma_marshal.c */
191extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); 187extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
192extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, 188extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
@@ -213,6 +209,8 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
213 209
214/* svc_rdma_sendto.c */ 210/* svc_rdma_sendto.c */
215extern int svc_rdma_sendto(struct svc_rqst *); 211extern int svc_rdma_sendto(struct svc_rqst *);
212extern struct rpcrdma_read_chunk *
213 svc_rdma_get_read_chunk(struct rpcrdma_msg *);
216 214
217/* svc_rdma_transport.c */ 215/* svc_rdma_transport.c */
218extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); 216extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
@@ -225,7 +223,6 @@ extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
225extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt); 223extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
226extern struct svc_rdma_req_map *svc_rdma_get_req_map(void); 224extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
227extern void svc_rdma_put_req_map(struct svc_rdma_req_map *); 225extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
228extern int svc_rdma_fastreg(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *);
229extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *); 226extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
230extern void svc_rdma_put_frmr(struct svcxprt_rdma *, 227extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
231 struct svc_rdma_fastreg_mr *); 228 struct svc_rdma_fastreg_mr *);
@@ -238,83 +235,4 @@ extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
238extern int svc_rdma_init(void); 235extern int svc_rdma_init(void);
239extern void svc_rdma_cleanup(void); 236extern void svc_rdma_cleanup(void);
240 237
241/*
242 * Returns the address of the first read chunk or <nul> if no read chunk is
243 * present
244 */
245static inline struct rpcrdma_read_chunk *
246svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
247{
248 struct rpcrdma_read_chunk *ch =
249 (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
250
251 if (ch->rc_discrim == 0)
252 return NULL;
253
254 return ch;
255}
256
257/*
258 * Returns the address of the first read write array element or <nul> if no
259 * write array list is present
260 */
261static inline struct rpcrdma_write_array *
262svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
263{
264 if (rmsgp->rm_body.rm_chunks[0] != 0
265 || rmsgp->rm_body.rm_chunks[1] == 0)
266 return NULL;
267
268 return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
269}
270
271/*
272 * Returns the address of the first reply array element or <nul> if no
273 * reply array is present
274 */
275static inline struct rpcrdma_write_array *
276svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
277{
278 struct rpcrdma_read_chunk *rch;
279 struct rpcrdma_write_array *wr_ary;
280 struct rpcrdma_write_array *rp_ary;
281
282 /* XXX: Need to fix when reply list may occur with read-list and/or
283 * write list */
284 if (rmsgp->rm_body.rm_chunks[0] != 0 ||
285 rmsgp->rm_body.rm_chunks[1] != 0)
286 return NULL;
287
288 rch = svc_rdma_get_read_chunk(rmsgp);
289 if (rch) {
290 while (rch->rc_discrim)
291 rch++;
292
293 /* The reply list follows an empty write array located
294 * at 'rc_position' here. The reply array is at rc_target.
295 */
296 rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
297
298 goto found_it;
299 }
300
301 wr_ary = svc_rdma_get_write_array(rmsgp);
302 if (wr_ary) {
303 rp_ary = (struct rpcrdma_write_array *)
304 &wr_ary->
305 wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
306
307 goto found_it;
308 }
309
310 /* No read list, no write list */
311 rp_ary = (struct rpcrdma_write_array *)
312 &rmsgp->rm_body.rm_chunks[2];
313
314 found_it:
315 if (rp_ary->wc_discrim == 0)
316 return NULL;
317
318 return rp_ary;
319}
320#endif 238#endif
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 79f6f8f3dc0a..78512cfe1fe6 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -116,6 +116,7 @@ void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
116 struct svc_serv *); 116 struct svc_serv *);
117int svc_create_xprt(struct svc_serv *, const char *, struct net *, 117int svc_create_xprt(struct svc_serv *, const char *, struct net *,
118 const int, const unsigned short, int); 118 const int, const unsigned short, int);
119void svc_xprt_do_enqueue(struct svc_xprt *xprt);
119void svc_xprt_enqueue(struct svc_xprt *xprt); 120void svc_xprt_enqueue(struct svc_xprt *xprt);
120void svc_xprt_put(struct svc_xprt *xprt); 121void svc_xprt_put(struct svc_xprt *xprt);
121void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt); 122void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index b17613052cc3..b7b279b54504 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -49,7 +49,7 @@
49 * a single chunk type per message is supported currently. 49 * a single chunk type per message is supported currently.
50 */ 50 */
51#define RPCRDMA_MIN_SLOT_TABLE (2U) 51#define RPCRDMA_MIN_SLOT_TABLE (2U)
52#define RPCRDMA_DEF_SLOT_TABLE (32U) 52#define RPCRDMA_DEF_SLOT_TABLE (128U)
53#define RPCRDMA_MAX_SLOT_TABLE (256U) 53#define RPCRDMA_MAX_SLOT_TABLE (256U)
54 54
55#define RPCRDMA_DEF_INLINE (1024) /* default inline max */ 55#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 38874729dc5f..7ba7dccaf0e7 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -351,7 +351,15 @@ extern void check_move_unevictable_pages(struct page **, int nr_pages);
351extern int kswapd_run(int nid); 351extern int kswapd_run(int nid);
352extern void kswapd_stop(int nid); 352extern void kswapd_stop(int nid);
353#ifdef CONFIG_MEMCG 353#ifdef CONFIG_MEMCG
354extern int mem_cgroup_swappiness(struct mem_cgroup *mem); 354static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
355{
356 /* root ? */
357 if (mem_cgroup_disabled() || !memcg->css.parent)
358 return vm_swappiness;
359
360 return memcg->swappiness;
361}
362
355#else 363#else
356static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) 364static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
357{ 365{
@@ -373,9 +381,9 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
373/* linux/mm/page_io.c */ 381/* linux/mm/page_io.c */
374extern int swap_readpage(struct page *); 382extern int swap_readpage(struct page *);
375extern int swap_writepage(struct page *page, struct writeback_control *wbc); 383extern int swap_writepage(struct page *page, struct writeback_control *wbc);
376extern void end_swap_bio_write(struct bio *bio, int err); 384extern void end_swap_bio_write(struct bio *bio);
377extern int __swap_writepage(struct page *page, struct writeback_control *wbc, 385extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
378 void (*end_write_func)(struct bio *, int)); 386 bio_end_io_t end_write_func);
379extern int swap_set_page_dirty(struct page *page); 387extern int swap_set_page_dirty(struct page *page);
380 388
381int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 389int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
@@ -398,6 +406,9 @@ extern void free_pages_and_swap_cache(struct page **, int);
398extern struct page *lookup_swap_cache(swp_entry_t); 406extern struct page *lookup_swap_cache(swp_entry_t);
399extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, 407extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
400 struct vm_area_struct *vma, unsigned long addr); 408 struct vm_area_struct *vma, unsigned long addr);
409extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
410 struct vm_area_struct *vma, unsigned long addr,
411 bool *new_page_allocated);
401extern struct page *swapin_readahead(swp_entry_t, gfp_t, 412extern struct page *swapin_readahead(swp_entry_t, gfp_t,
402 struct vm_area_struct *vma, unsigned long addr); 413 struct vm_area_struct *vma, unsigned long addr);
403 414
@@ -431,6 +442,7 @@ extern unsigned int count_swap_pages(int, int);
431extern sector_t map_swap_page(struct page *, struct block_device **); 442extern sector_t map_swap_page(struct page *, struct block_device **);
432extern sector_t swapdev_block(int, pgoff_t); 443extern sector_t swapdev_block(int, pgoff_t);
433extern int page_swapcount(struct page *); 444extern int page_swapcount(struct page *);
445extern int swp_swapcount(swp_entry_t entry);
434extern struct swap_info_struct *page_swap_info(struct page *); 446extern struct swap_info_struct *page_swap_info(struct page *);
435extern int reuse_swap_page(struct page *); 447extern int reuse_swap_page(struct page *);
436extern int try_to_free_swap(struct page *); 448extern int try_to_free_swap(struct page *);
@@ -522,6 +534,11 @@ static inline int page_swapcount(struct page *page)
522 return 0; 534 return 0;
523} 535}
524 536
537static inline int swp_swapcount(swp_entry_t entry)
538{
539 return 0;
540}
541
525#define reuse_swap_page(page) (page_mapcount(page) == 1) 542#define reuse_swap_page(page) (page_mapcount(page) == 1)
526 543
527static inline int try_to_free_swap(struct page *page) 544static inline int try_to_free_swap(struct page *page)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index cedf3d3c373f..5c3a5f3e7eec 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -164,6 +164,9 @@ static inline int is_write_migration_entry(swp_entry_t entry)
164#endif 164#endif
165 165
166#ifdef CONFIG_MEMORY_FAILURE 166#ifdef CONFIG_MEMORY_FAILURE
167
168extern atomic_long_t num_poisoned_pages __read_mostly;
169
167/* 170/*
168 * Support for hardware poisoned pages 171 * Support for hardware poisoned pages
169 */ 172 */
@@ -177,6 +180,31 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
177{ 180{
178 return swp_type(entry) == SWP_HWPOISON; 181 return swp_type(entry) == SWP_HWPOISON;
179} 182}
183
184static inline bool test_set_page_hwpoison(struct page *page)
185{
186 return TestSetPageHWPoison(page);
187}
188
189static inline void num_poisoned_pages_inc(void)
190{
191 atomic_long_inc(&num_poisoned_pages);
192}
193
194static inline void num_poisoned_pages_dec(void)
195{
196 atomic_long_dec(&num_poisoned_pages);
197}
198
199static inline void num_poisoned_pages_add(long num)
200{
201 atomic_long_add(num, &num_poisoned_pages);
202}
203
204static inline void num_poisoned_pages_sub(long num)
205{
206 atomic_long_sub(num, &num_poisoned_pages);
207}
180#else 208#else
181 209
182static inline swp_entry_t make_hwpoison_entry(struct page *page) 210static inline swp_entry_t make_hwpoison_entry(struct page *page)
@@ -188,6 +216,15 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
188{ 216{
189 return 0; 217 return 0;
190} 218}
219
220static inline bool test_set_page_hwpoison(struct page *page)
221{
222 return false;
223}
224
225static inline void num_poisoned_pages_inc(void)
226{
227}
191#endif 228#endif
192 229
193#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) 230#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b45c45b8c829..08001317aee7 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -810,6 +810,7 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
810asmlinkage long sys_eventfd(unsigned int count); 810asmlinkage long sys_eventfd(unsigned int count);
811asmlinkage long sys_eventfd2(unsigned int count, int flags); 811asmlinkage long sys_eventfd2(unsigned int count, int flags);
812asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags); 812asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
813asmlinkage long sys_userfaultfd(int flags);
813asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); 814asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
814asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int); 815asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
815asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, 816asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index c78dcfeaf25f..d4217eff489f 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -86,7 +86,6 @@ struct st_proto_s {
86extern long st_register(struct st_proto_s *); 86extern long st_register(struct st_proto_s *);
87extern long st_unregister(struct st_proto_s *); 87extern long st_unregister(struct st_proto_s *);
88 88
89extern struct ti_st_plat_data *dt_pdata;
90 89
91/* 90/*
92 * header information used by st_core.c 91 * header information used by st_core.c
diff --git a/include/linux/tick.h b/include/linux/tick.h
index edbfc9a5293e..48d901f83f92 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,22 +147,29 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
147 cpumask_or(mask, mask, tick_nohz_full_mask); 147 cpumask_or(mask, mask, tick_nohz_full_mask);
148} 148}
149 149
150extern void __tick_nohz_full_check(void);
151extern void tick_nohz_full_kick(void); 150extern void tick_nohz_full_kick(void);
152extern void tick_nohz_full_kick_cpu(int cpu); 151extern void tick_nohz_full_kick_cpu(int cpu);
153extern void tick_nohz_full_kick_all(void); 152extern void tick_nohz_full_kick_all(void);
154extern void __tick_nohz_task_switch(struct task_struct *tsk); 153extern void __tick_nohz_task_switch(void);
155#else 154#else
156static inline bool tick_nohz_full_enabled(void) { return false; } 155static inline bool tick_nohz_full_enabled(void) { return false; }
157static inline bool tick_nohz_full_cpu(int cpu) { return false; } 156static inline bool tick_nohz_full_cpu(int cpu) { return false; }
158static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } 157static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
159static inline void __tick_nohz_full_check(void) { }
160static inline void tick_nohz_full_kick_cpu(int cpu) { } 158static inline void tick_nohz_full_kick_cpu(int cpu) { }
161static inline void tick_nohz_full_kick(void) { } 159static inline void tick_nohz_full_kick(void) { }
162static inline void tick_nohz_full_kick_all(void) { } 160static inline void tick_nohz_full_kick_all(void) { }
163static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } 161static inline void __tick_nohz_task_switch(void) { }
164#endif 162#endif
165 163
164static inline const struct cpumask *housekeeping_cpumask(void)
165{
166#ifdef CONFIG_NO_HZ_FULL
167 if (tick_nohz_full_enabled())
168 return housekeeping_mask;
169#endif
170 return cpu_possible_mask;
171}
172
166static inline bool is_housekeeping_cpu(int cpu) 173static inline bool is_housekeeping_cpu(int cpu)
167{ 174{
168#ifdef CONFIG_NO_HZ_FULL 175#ifdef CONFIG_NO_HZ_FULL
@@ -181,16 +188,10 @@ static inline void housekeeping_affine(struct task_struct *t)
181#endif 188#endif
182} 189}
183 190
184static inline void tick_nohz_full_check(void) 191static inline void tick_nohz_task_switch(void)
185{
186 if (tick_nohz_full_enabled())
187 __tick_nohz_full_check();
188}
189
190static inline void tick_nohz_task_switch(struct task_struct *tsk)
191{ 192{
192 if (tick_nohz_full_enabled()) 193 if (tick_nohz_full_enabled())
193 __tick_nohz_task_switch(tsk); 194 __tick_nohz_task_switch();
194} 195}
195 196
196#endif 197#endif
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 77b5df2acd2a..367d5af899e8 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -12,11 +12,18 @@ typedef __s64 time64_t;
12 */ 12 */
13#if __BITS_PER_LONG == 64 13#if __BITS_PER_LONG == 64
14# define timespec64 timespec 14# define timespec64 timespec
15#define itimerspec64 itimerspec
15#else 16#else
16struct timespec64 { 17struct timespec64 {
17 time64_t tv_sec; /* seconds */ 18 time64_t tv_sec; /* seconds */
18 long tv_nsec; /* nanoseconds */ 19 long tv_nsec; /* nanoseconds */
19}; 20};
21
22struct itimerspec64 {
23 struct timespec64 it_interval;
24 struct timespec64 it_value;
25};
26
20#endif 27#endif
21 28
22/* Parameters used to convert the timespec values: */ 29/* Parameters used to convert the timespec values: */
@@ -45,6 +52,16 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
45 return ts; 52 return ts;
46} 53}
47 54
55static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
56{
57 return *its64;
58}
59
60static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
61{
62 return *its;
63}
64
48# define timespec64_equal timespec_equal 65# define timespec64_equal timespec_equal
49# define timespec64_compare timespec_compare 66# define timespec64_compare timespec_compare
50# define set_normalized_timespec64 set_normalized_timespec 67# define set_normalized_timespec64 set_normalized_timespec
@@ -77,6 +94,24 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
77 return ret; 94 return ret;
78} 95}
79 96
97static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
98{
99 struct itimerspec ret;
100
101 ret.it_interval = timespec64_to_timespec(its64->it_interval);
102 ret.it_value = timespec64_to_timespec(its64->it_value);
103 return ret;
104}
105
106static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
107{
108 struct itimerspec64 ret;
109
110 ret.it_interval = timespec_to_timespec64(its->it_interval);
111 ret.it_value = timespec_to_timespec64(its->it_value);
112 return ret;
113}
114
80static inline int timespec64_equal(const struct timespec64 *a, 115static inline int timespec64_equal(const struct timespec64 *a,
81 const struct timespec64 *b) 116 const struct timespec64 *b)
82{ 117{
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 6e191e4e6ab6..ba0ae09cbb21 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -18,10 +18,17 @@ extern int do_sys_settimeofday(const struct timespec *tv,
18 * Kernel time accessors 18 * Kernel time accessors
19 */ 19 */
20unsigned long get_seconds(void); 20unsigned long get_seconds(void);
21struct timespec current_kernel_time(void); 21struct timespec64 current_kernel_time64(void);
22/* does not take xtime_lock */ 22/* does not take xtime_lock */
23struct timespec __current_kernel_time(void); 23struct timespec __current_kernel_time(void);
24 24
25static inline struct timespec current_kernel_time(void)
26{
27 struct timespec64 now = current_kernel_time64();
28
29 return timespec64_to_timespec(now);
30}
31
25/* 32/*
26 * timespec based interfaces 33 * timespec based interfaces
27 */ 34 */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 1063c850dbab..ed27917cabc9 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -243,6 +243,7 @@ enum {
243 TRACE_EVENT_FL_USE_CALL_FILTER_BIT, 243 TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
244 TRACE_EVENT_FL_TRACEPOINT_BIT, 244 TRACE_EVENT_FL_TRACEPOINT_BIT,
245 TRACE_EVENT_FL_KPROBE_BIT, 245 TRACE_EVENT_FL_KPROBE_BIT,
246 TRACE_EVENT_FL_UPROBE_BIT,
246}; 247};
247 248
248/* 249/*
@@ -257,6 +258,7 @@ enum {
257 * USE_CALL_FILTER - For trace internal events, don't use file filter 258 * USE_CALL_FILTER - For trace internal events, don't use file filter
258 * TRACEPOINT - Event is a tracepoint 259 * TRACEPOINT - Event is a tracepoint
259 * KPROBE - Event is a kprobe 260 * KPROBE - Event is a kprobe
261 * UPROBE - Event is a uprobe
260 */ 262 */
261enum { 263enum {
262 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), 264 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -267,8 +269,11 @@ enum {
267 TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), 269 TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
268 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), 270 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
269 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), 271 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
272 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
270}; 273};
271 274
275#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
276
272struct trace_event_call { 277struct trace_event_call {
273 struct list_head list; 278 struct list_head list;
274 struct trace_event_class *class; 279 struct trace_event_class *class;
@@ -542,7 +547,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
542 event_triggers_post_call(file, tt); 547 event_triggers_post_call(file, tt);
543} 548}
544 549
545#ifdef CONFIG_BPF_SYSCALL 550#ifdef CONFIG_BPF_EVENTS
546unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx); 551unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
547#else 552#else
548static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) 553static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index ad6c8913aa3e..d072ded41678 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -709,4 +709,10 @@ static inline void proc_tty_register_driver(struct tty_driver *d) {}
709static inline void proc_tty_unregister_driver(struct tty_driver *d) {} 709static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
710#endif 710#endif
711 711
712#define tty_debug(tty, f, args...) \
713 do { \
714 printk(KERN_DEBUG "%s: %s: " f, __func__, \
715 tty_name(tty), ##args); \
716 } while (0)
717
712#endif 718#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 92e337c18839..161052477f77 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -296,7 +296,7 @@ struct tty_operations {
296struct tty_driver { 296struct tty_driver {
297 int magic; /* magic number for this structure */ 297 int magic; /* magic number for this structure */
298 struct kref kref; /* Reference management */ 298 struct kref kref; /* Reference management */
299 struct cdev *cdevs; 299 struct cdev **cdevs;
300 struct module *owner; 300 struct module *owner;
301 const char *driver_name; 301 const char *driver_name;
302 const char *name; 302 const char *name;
diff --git a/include/linux/types.h b/include/linux/types.h
index 8715287c3b1f..c314989d9158 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -212,6 +212,9 @@ struct callback_head {
212}; 212};
213#define rcu_head callback_head 213#define rcu_head callback_head
214 214
215typedef void (*rcu_callback_t)(struct rcu_head *head);
216typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
217
215/* clocksource cycle base type */ 218/* clocksource cycle base type */
216typedef u64 cycle_t; 219typedef u64 cycle_t;
217 220
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ae572c138607..d6f2c2c5b043 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -129,4 +129,6 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
129extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); 129extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
130extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); 130extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
131 131
132extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
133
132#endif /* __LINUX_UACCESS_H__ */ 134#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 60beb5dc7977..0bdc72f36905 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -92,6 +92,22 @@ struct uprobe_task {
92 unsigned int depth; 92 unsigned int depth;
93}; 93};
94 94
95struct return_instance {
96 struct uprobe *uprobe;
97 unsigned long func;
98 unsigned long stack; /* stack pointer */
99 unsigned long orig_ret_vaddr; /* original return address */
100 bool chained; /* true, if instance is nested */
101
102 struct return_instance *next; /* keep as stack */
103};
104
105enum rp_check {
106 RP_CHECK_CALL,
107 RP_CHECK_CHAIN_CALL,
108 RP_CHECK_RET,
109};
110
95struct xol_area; 111struct xol_area;
96 112
97struct uprobes_state { 113struct uprobes_state {
@@ -128,6 +144,7 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
128extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); 144extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
129extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 145extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
130extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); 146extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
147extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs);
131extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); 148extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
132extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 149extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
133 void *src, unsigned long len); 150 void *src, unsigned long len);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index ab94f78c4dd1..a41833cd184c 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -19,8 +19,11 @@ struct ci_hdrc_platform_data {
19 enum usb_phy_interface phy_mode; 19 enum usb_phy_interface phy_mode;
20 unsigned long flags; 20 unsigned long flags;
21#define CI_HDRC_REGS_SHARED BIT(0) 21#define CI_HDRC_REGS_SHARED BIT(0)
22#define CI_HDRC_DISABLE_DEVICE_STREAMING BIT(1)
22#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2) 23#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2)
23#define CI_HDRC_DISABLE_STREAMING BIT(3) 24#define CI_HDRC_DISABLE_HOST_STREAMING BIT(3)
25#define CI_HDRC_DISABLE_STREAMING (CI_HDRC_DISABLE_DEVICE_STREAMING | \
26 CI_HDRC_DISABLE_HOST_STREAMING)
24 /* 27 /*
25 * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1, 28 * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
26 * but otg is not supported (no register otgsc). 29 * but otg is not supported (no register otgsc).
@@ -29,12 +32,22 @@ struct ci_hdrc_platform_data {
29#define CI_HDRC_IMX28_WRITE_FIX BIT(5) 32#define CI_HDRC_IMX28_WRITE_FIX BIT(5)
30#define CI_HDRC_FORCE_FULLSPEED BIT(6) 33#define CI_HDRC_FORCE_FULLSPEED BIT(6)
31#define CI_HDRC_TURN_VBUS_EARLY_ON BIT(7) 34#define CI_HDRC_TURN_VBUS_EARLY_ON BIT(7)
35#define CI_HDRC_SET_NON_ZERO_TTHA BIT(8)
36#define CI_HDRC_OVERRIDE_AHB_BURST BIT(9)
37#define CI_HDRC_OVERRIDE_TX_BURST BIT(10)
38#define CI_HDRC_OVERRIDE_RX_BURST BIT(11)
32 enum usb_dr_mode dr_mode; 39 enum usb_dr_mode dr_mode;
33#define CI_HDRC_CONTROLLER_RESET_EVENT 0 40#define CI_HDRC_CONTROLLER_RESET_EVENT 0
34#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 41#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
35 void (*notify_event) (struct ci_hdrc *ci, unsigned event); 42 void (*notify_event) (struct ci_hdrc *ci, unsigned event);
36 struct regulator *reg_vbus; 43 struct regulator *reg_vbus;
44 struct usb_otg_caps ci_otg_caps;
37 bool tpl_support; 45 bool tpl_support;
46 /* interrupt threshold setting */
47 u32 itc_setting;
48 u32 ahb_burst_config;
49 u32 tx_burst_size;
50 u32 rx_burst_size;
38}; 51};
39 52
40/* Default offset of capability registers */ 53/* Default offset of capability registers */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 2511469a9904..1074b8921a5d 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -228,6 +228,8 @@ struct usb_function {
228 struct list_head list; 228 struct list_head list;
229 DECLARE_BITMAP(endpoints, 32); 229 DECLARE_BITMAP(endpoints, 32);
230 const struct usb_function_instance *fi; 230 const struct usb_function_instance *fi;
231
232 unsigned int bind_deactivated:1;
231}; 233};
232 234
233int usb_add_function(struct usb_configuration *, struct usb_function *); 235int usb_add_function(struct usb_configuration *, struct usb_function *);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 4f3dfb7d0654..c14a69b36d27 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -141,10 +141,49 @@ struct usb_ep_ops {
141}; 141};
142 142
143/** 143/**
144 * struct usb_ep_caps - endpoint capabilities description
145 * @type_control:Endpoint supports control type (reserved for ep0).
146 * @type_iso:Endpoint supports isochronous transfers.
147 * @type_bulk:Endpoint supports bulk transfers.
148 * @type_int:Endpoint supports interrupt transfers.
149 * @dir_in:Endpoint supports IN direction.
150 * @dir_out:Endpoint supports OUT direction.
151 */
152struct usb_ep_caps {
153 unsigned type_control:1;
154 unsigned type_iso:1;
155 unsigned type_bulk:1;
156 unsigned type_int:1;
157 unsigned dir_in:1;
158 unsigned dir_out:1;
159};
160
161#define USB_EP_CAPS_TYPE_CONTROL 0x01
162#define USB_EP_CAPS_TYPE_ISO 0x02
163#define USB_EP_CAPS_TYPE_BULK 0x04
164#define USB_EP_CAPS_TYPE_INT 0x08
165#define USB_EP_CAPS_TYPE_ALL \
166 (USB_EP_CAPS_TYPE_ISO | USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT)
167#define USB_EP_CAPS_DIR_IN 0x01
168#define USB_EP_CAPS_DIR_OUT 0x02
169#define USB_EP_CAPS_DIR_ALL (USB_EP_CAPS_DIR_IN | USB_EP_CAPS_DIR_OUT)
170
171#define USB_EP_CAPS(_type, _dir) \
172 { \
173 .type_control = !!(_type & USB_EP_CAPS_TYPE_CONTROL), \
174 .type_iso = !!(_type & USB_EP_CAPS_TYPE_ISO), \
175 .type_bulk = !!(_type & USB_EP_CAPS_TYPE_BULK), \
176 .type_int = !!(_type & USB_EP_CAPS_TYPE_INT), \
177 .dir_in = !!(_dir & USB_EP_CAPS_DIR_IN), \
178 .dir_out = !!(_dir & USB_EP_CAPS_DIR_OUT), \
179 }
180
181/**
144 * struct usb_ep - device side representation of USB endpoint 182 * struct usb_ep - device side representation of USB endpoint
145 * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk" 183 * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
146 * @ops: Function pointers used to access hardware-specific operations. 184 * @ops: Function pointers used to access hardware-specific operations.
147 * @ep_list:the gadget's ep_list holds all of its endpoints 185 * @ep_list:the gadget's ep_list holds all of its endpoints
186 * @caps:The structure describing types and directions supported by endoint.
148 * @maxpacket:The maximum packet size used on this endpoint. The initial 187 * @maxpacket:The maximum packet size used on this endpoint. The initial
149 * value can sometimes be reduced (hardware allowing), according to 188 * value can sometimes be reduced (hardware allowing), according to
150 * the endpoint descriptor used to configure the endpoint. 189 * the endpoint descriptor used to configure the endpoint.
@@ -167,12 +206,15 @@ struct usb_ep_ops {
167 * gadget->ep_list. the control endpoint (gadget->ep0) is not in that list, 206 * gadget->ep_list. the control endpoint (gadget->ep0) is not in that list,
168 * and is accessed only in response to a driver setup() callback. 207 * and is accessed only in response to a driver setup() callback.
169 */ 208 */
209
170struct usb_ep { 210struct usb_ep {
171 void *driver_data; 211 void *driver_data;
172 212
173 const char *name; 213 const char *name;
174 const struct usb_ep_ops *ops; 214 const struct usb_ep_ops *ops;
175 struct list_head ep_list; 215 struct list_head ep_list;
216 struct usb_ep_caps caps;
217 bool claimed;
176 unsigned maxpacket:16; 218 unsigned maxpacket:16;
177 unsigned maxpacket_limit:16; 219 unsigned maxpacket_limit:16;
178 unsigned max_streams:16; 220 unsigned max_streams:16;
@@ -492,6 +534,9 @@ struct usb_gadget_ops {
492 int (*udc_start)(struct usb_gadget *, 534 int (*udc_start)(struct usb_gadget *,
493 struct usb_gadget_driver *); 535 struct usb_gadget_driver *);
494 int (*udc_stop)(struct usb_gadget *); 536 int (*udc_stop)(struct usb_gadget *);
537 struct usb_ep *(*match_ep)(struct usb_gadget *,
538 struct usb_endpoint_descriptor *,
539 struct usb_ss_ep_comp_descriptor *);
495}; 540};
496 541
497/** 542/**
@@ -511,6 +556,7 @@ struct usb_gadget_ops {
511 * @dev: Driver model state for this abstract device. 556 * @dev: Driver model state for this abstract device.
512 * @out_epnum: last used out ep number 557 * @out_epnum: last used out ep number
513 * @in_epnum: last used in ep number 558 * @in_epnum: last used in ep number
559 * @otg_caps: OTG capabilities of this gadget.
514 * @sg_supported: true if we can handle scatter-gather 560 * @sg_supported: true if we can handle scatter-gather
515 * @is_otg: True if the USB device port uses a Mini-AB jack, so that the 561 * @is_otg: True if the USB device port uses a Mini-AB jack, so that the
516 * gadget driver must provide a USB OTG descriptor. 562 * gadget driver must provide a USB OTG descriptor.
@@ -526,6 +572,9 @@ struct usb_gadget_ops {
526 * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to 572 * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
527 * MaxPacketSize. 573 * MaxPacketSize.
528 * @is_selfpowered: if the gadget is self-powered. 574 * @is_selfpowered: if the gadget is self-powered.
575 * @deactivated: True if gadget is deactivated - in deactivated state it cannot
576 * be connected.
577 * @connected: True if gadget is connected.
529 * 578 *
530 * Gadgets have a mostly-portable "gadget driver" implementing device 579 * Gadgets have a mostly-portable "gadget driver" implementing device
531 * functions, handling all usb configurations and interfaces. Gadget 580 * functions, handling all usb configurations and interfaces. Gadget
@@ -559,6 +608,7 @@ struct usb_gadget {
559 struct device dev; 608 struct device dev;
560 unsigned out_epnum; 609 unsigned out_epnum;
561 unsigned in_epnum; 610 unsigned in_epnum;
611 struct usb_otg_caps *otg_caps;
562 612
563 unsigned sg_supported:1; 613 unsigned sg_supported:1;
564 unsigned is_otg:1; 614 unsigned is_otg:1;
@@ -567,7 +617,12 @@ struct usb_gadget {
567 unsigned a_hnp_support:1; 617 unsigned a_hnp_support:1;
568 unsigned a_alt_hnp_support:1; 618 unsigned a_alt_hnp_support:1;
569 unsigned quirk_ep_out_aligned_size:1; 619 unsigned quirk_ep_out_aligned_size:1;
620 unsigned quirk_altset_not_supp:1;
621 unsigned quirk_stall_not_supp:1;
622 unsigned quirk_zlp_not_supp:1;
570 unsigned is_selfpowered:1; 623 unsigned is_selfpowered:1;
624 unsigned deactivated:1;
625 unsigned connected:1;
571}; 626};
572#define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) 627#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
573 628
@@ -584,7 +639,6 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
584#define gadget_for_each_ep(tmp, gadget) \ 639#define gadget_for_each_ep(tmp, gadget) \
585 list_for_each_entry(tmp, &(gadget)->ep_list, ep_list) 640 list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
586 641
587
588/** 642/**
589 * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget 643 * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
590 * requires quirk_ep_out_aligned_size, otherwise reguens len. 644 * requires quirk_ep_out_aligned_size, otherwise reguens len.
@@ -603,6 +657,34 @@ usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
603} 657}
604 658
605/** 659/**
660 * gadget_is_altset_supported - return true iff the hardware supports
661 * altsettings
662 * @g: controller to check for quirk
663 */
664static inline int gadget_is_altset_supported(struct usb_gadget *g)
665{
666 return !g->quirk_altset_not_supp;
667}
668
669/**
670 * gadget_is_stall_supported - return true iff the hardware supports stalling
671 * @g: controller to check for quirk
672 */
673static inline int gadget_is_stall_supported(struct usb_gadget *g)
674{
675 return !g->quirk_stall_not_supp;
676}
677
678/**
679 * gadget_is_zlp_supported - return true iff the hardware supports zlp
680 * @g: controller to check for quirk
681 */
682static inline int gadget_is_zlp_supported(struct usb_gadget *g)
683{
684 return !g->quirk_zlp_not_supp;
685}
686
687/**
606 * gadget_is_dualspeed - return true iff the hardware handles high speed 688 * gadget_is_dualspeed - return true iff the hardware handles high speed
607 * @g: controller that might support both high and full speeds 689 * @g: controller that might support both high and full speeds
608 */ 690 */
@@ -771,9 +853,24 @@ static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
771 */ 853 */
772static inline int usb_gadget_connect(struct usb_gadget *gadget) 854static inline int usb_gadget_connect(struct usb_gadget *gadget)
773{ 855{
856 int ret;
857
774 if (!gadget->ops->pullup) 858 if (!gadget->ops->pullup)
775 return -EOPNOTSUPP; 859 return -EOPNOTSUPP;
776 return gadget->ops->pullup(gadget, 1); 860
861 if (gadget->deactivated) {
862 /*
863 * If gadget is deactivated we only save new state.
864 * Gadget will be connected automatically after activation.
865 */
866 gadget->connected = true;
867 return 0;
868 }
869
870 ret = gadget->ops->pullup(gadget, 1);
871 if (!ret)
872 gadget->connected = 1;
873 return ret;
777} 874}
778 875
779/** 876/**
@@ -784,20 +881,88 @@ static inline int usb_gadget_connect(struct usb_gadget *gadget)
784 * as a disconnect (when a VBUS session is active). Not all systems 881 * as a disconnect (when a VBUS session is active). Not all systems
785 * support software pullup controls. 882 * support software pullup controls.
786 * 883 *
884 * Returns zero on success, else negative errno.
885 */
886static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
887{
888 int ret;
889
890 if (!gadget->ops->pullup)
891 return -EOPNOTSUPP;
892
893 if (gadget->deactivated) {
894 /*
895 * If gadget is deactivated we only save new state.
896 * Gadget will stay disconnected after activation.
897 */
898 gadget->connected = false;
899 return 0;
900 }
901
902 ret = gadget->ops->pullup(gadget, 0);
903 if (!ret)
904 gadget->connected = 0;
905 return ret;
906}
907
908/**
909 * usb_gadget_deactivate - deactivate function which is not ready to work
910 * @gadget: the peripheral being deactivated
911 *
787 * This routine may be used during the gadget driver bind() call to prevent 912 * This routine may be used during the gadget driver bind() call to prevent
788 * the peripheral from ever being visible to the USB host, unless later 913 * the peripheral from ever being visible to the USB host, unless later
789 * usb_gadget_connect() is called. For example, user mode components may 914 * usb_gadget_activate() is called. For example, user mode components may
790 * need to be activated before the system can talk to hosts. 915 * need to be activated before the system can talk to hosts.
791 * 916 *
792 * Returns zero on success, else negative errno. 917 * Returns zero on success, else negative errno.
793 */ 918 */
794static inline int usb_gadget_disconnect(struct usb_gadget *gadget) 919static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
795{ 920{
796 if (!gadget->ops->pullup) 921 int ret;
797 return -EOPNOTSUPP; 922
798 return gadget->ops->pullup(gadget, 0); 923 if (gadget->deactivated)
924 return 0;
925
926 if (gadget->connected) {
927 ret = usb_gadget_disconnect(gadget);
928 if (ret)
929 return ret;
930 /*
931 * If gadget was being connected before deactivation, we want
932 * to reconnect it in usb_gadget_activate().
933 */
934 gadget->connected = true;
935 }
936 gadget->deactivated = true;
937
938 return 0;
799} 939}
800 940
941/**
942 * usb_gadget_activate - activate function which is not ready to work
943 * @gadget: the peripheral being activated
944 *
945 * This routine activates gadget which was previously deactivated with
946 * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
947 *
948 * Returns zero on success, else negative errno.
949 */
950static inline int usb_gadget_activate(struct usb_gadget *gadget)
951{
952 if (!gadget->deactivated)
953 return 0;
954
955 gadget->deactivated = false;
956
957 /*
958 * If gadget has been connected before deactivation, or became connected
959 * while it was being deactivated, we call usb_gadget_connect().
960 */
961 if (gadget->connected)
962 return usb_gadget_connect(gadget);
963
964 return 0;
965}
801 966
802/*-------------------------------------------------------------------------*/ 967/*-------------------------------------------------------------------------*/
803 968
@@ -1002,6 +1167,10 @@ int usb_assign_descriptors(struct usb_function *f,
1002 struct usb_descriptor_header **ss); 1167 struct usb_descriptor_header **ss);
1003void usb_free_all_descriptors(struct usb_function *f); 1168void usb_free_all_descriptors(struct usb_function *f);
1004 1169
1170struct usb_descriptor_header *usb_otg_descriptor_alloc(
1171 struct usb_gadget *gadget);
1172int usb_otg_descriptor_init(struct usb_gadget *gadget,
1173 struct usb_descriptor_header *otg_desc);
1005/*-------------------------------------------------------------------------*/ 1174/*-------------------------------------------------------------------------*/
1006 1175
1007/* utility to simplify map/unmap of usb_requests to/from DMA */ 1176/* utility to simplify map/unmap of usb_requests to/from DMA */
@@ -1034,6 +1203,21 @@ extern void usb_gadget_giveback_request(struct usb_ep *ep,
1034 1203
1035/*-------------------------------------------------------------------------*/ 1204/*-------------------------------------------------------------------------*/
1036 1205
1206/* utility to find endpoint by name */
1207
1208extern struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g,
1209 const char *name);
1210
1211/*-------------------------------------------------------------------------*/
1212
1213/* utility to check if endpoint caps match descriptor needs */
1214
1215extern int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
1216 struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
1217 struct usb_ss_ep_comp_descriptor *ep_comp);
1218
1219/*-------------------------------------------------------------------------*/
1220
1037/* utility to update vbus status for udc core, it may be scheduled */ 1221/* utility to update vbus status for udc core, it may be scheduled */
1038extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status); 1222extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status);
1039 1223
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index c9aa7792de10..d2784c10bfe2 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -564,9 +564,9 @@ extern void usb_ep0_reinit(struct usb_device *);
564 564
565/*-------------------------------------------------------------------------*/ 565/*-------------------------------------------------------------------------*/
566 566
567/* class requests from USB 3.0 hub spec, table 10-5 */ 567/* class requests from USB 3.1 hub spec, table 10-7 */
568#define SetHubDepth (0x3000 | HUB_SET_DEPTH) 568#define SetHubDepth (0x2000 | HUB_SET_DEPTH)
569#define GetPortErrorCount (0x8000 | HUB_GET_PORT_ERR_COUNT) 569#define GetPortErrorCount (0xa300 | HUB_GET_PORT_ERR_COUNT)
570 570
571/* 571/*
572 * Generic bandwidth allocation constants/support 572 * Generic bandwidth allocation constants/support
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index e55a1504266e..8c8f6854c993 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -128,7 +128,7 @@ struct msm_otg_platform_data {
128 */ 128 */
129struct msm_usb_cable { 129struct msm_usb_cable {
130 struct notifier_block nb; 130 struct notifier_block nb;
131 struct extcon_specific_cable_nb conn; 131 struct extcon_dev *extcon;
132}; 132};
133 133
134/** 134/**
@@ -155,6 +155,10 @@ struct msm_usb_cable {
155 * starting controller using usbcmd run/stop bit. 155 * starting controller using usbcmd run/stop bit.
156 * @vbus: VBUS signal state trakining, using extcon framework 156 * @vbus: VBUS signal state trakining, using extcon framework
157 * @id: ID signal state trakining, using extcon framework 157 * @id: ID signal state trakining, using extcon framework
158 * @switch_gpio: Descriptor for GPIO used to control external Dual
159 * SPDT USB Switch.
160 * @reboot: Used to inform the driver to route USB D+/D- line to Device
161 * connector
158 */ 162 */
159struct msm_otg { 163struct msm_otg {
160 struct usb_phy phy; 164 struct usb_phy phy;
@@ -188,6 +192,9 @@ struct msm_otg {
188 192
189 struct msm_usb_cable vbus; 193 struct msm_usb_cable vbus;
190 struct msm_usb_cable id; 194 struct msm_usb_cable id;
195
196 struct gpio_desc *switch_gpio;
197 struct notifier_block reboot;
191}; 198};
192 199
193#endif 200#endif
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index cfe0528cdbb1..8c5a818ec244 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -15,6 +15,8 @@
15enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np); 15enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
16enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np); 16enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
17bool of_usb_host_tpl_support(struct device_node *np); 17bool of_usb_host_tpl_support(struct device_node *np);
18int of_usb_update_otg_caps(struct device_node *np,
19 struct usb_otg_caps *otg_caps);
18#else 20#else
19static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np) 21static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
20{ 22{
@@ -30,6 +32,11 @@ static inline bool of_usb_host_tpl_support(struct device_node *np)
30{ 32{
31 return false; 33 return false;
32} 34}
35static inline int of_usb_update_otg_caps(struct device_node *np,
36 struct usb_otg_caps *otg_caps)
37{
38 return 0;
39}
33#endif 40#endif
34 41
35#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT) 42#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 52661c5da690..bd1dcf816100 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -41,6 +41,21 @@ struct usb_otg {
41 41
42}; 42};
43 43
44/**
45 * struct usb_otg_caps - describes the otg capabilities of the device
46 * @otg_rev: The OTG revision number the device is compliant with, it's
47 * in binary-coded decimal (i.e. 2.0 is 0200H).
48 * @hnp_support: Indicates if the device supports HNP.
49 * @srp_support: Indicates if the device supports SRP.
50 * @adp_support: Indicates if the device supports ADP.
51 */
52struct usb_otg_caps {
53 u16 otg_rev;
54 bool hnp_support;
55 bool srp_support;
56 bool adp_support;
57};
58
44extern const char *usb_otg_state_string(enum usb_otg_state state); 59extern const char *usb_otg_state_string(enum usb_otg_state state);
45 60
46/* Context: can sleep */ 61/* Context: can sleep */
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
new file mode 100644
index 000000000000..587480ad41b7
--- /dev/null
+++ b/include/linux/userfaultfd_k.h
@@ -0,0 +1,85 @@
1/*
2 * include/linux/userfaultfd_k.h
3 *
4 * Copyright (C) 2015 Red Hat, Inc.
5 *
6 */
7
8#ifndef _LINUX_USERFAULTFD_K_H
9#define _LINUX_USERFAULTFD_K_H
10
11#ifdef CONFIG_USERFAULTFD
12
13#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
14
15#include <linux/fcntl.h>
16
17/*
18 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
19 * new flags, since they might collide with O_* ones. We want
20 * to re-use O_* flags that couldn't possibly have a meaning
21 * from userfaultfd, in order to leave a free define-space for
22 * shared O_* flags.
23 */
24#define UFFD_CLOEXEC O_CLOEXEC
25#define UFFD_NONBLOCK O_NONBLOCK
26
27#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
28#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
29
30extern int handle_userfault(struct vm_area_struct *vma, unsigned long address,
31 unsigned int flags, unsigned long reason);
32
33extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
34 unsigned long src_start, unsigned long len);
35extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
36 unsigned long dst_start,
37 unsigned long len);
38
39/* mm helpers */
40static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
41 struct vm_userfaultfd_ctx vm_ctx)
42{
43 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
44}
45
46static inline bool userfaultfd_missing(struct vm_area_struct *vma)
47{
48 return vma->vm_flags & VM_UFFD_MISSING;
49}
50
51static inline bool userfaultfd_armed(struct vm_area_struct *vma)
52{
53 return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
54}
55
56#else /* CONFIG_USERFAULTFD */
57
58/* mm helpers */
59static inline int handle_userfault(struct vm_area_struct *vma,
60 unsigned long address,
61 unsigned int flags,
62 unsigned long reason)
63{
64 return VM_FAULT_SIGBUS;
65}
66
67static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
68 struct vm_userfaultfd_ctx vm_ctx)
69{
70 return true;
71}
72
73static inline bool userfaultfd_missing(struct vm_area_struct *vma)
74{
75 return false;
76}
77
78static inline bool userfaultfd_armed(struct vm_area_struct *vma)
79{
80 return false;
81}
82
83#endif /* CONFIG_USERFAULTFD */
84
85#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h
index ac34819214f9..da2049b5161c 100644
--- a/include/linux/verify_pefile.h
+++ b/include/linux/verify_pefile.h
@@ -12,7 +12,11 @@
12#ifndef _LINUX_VERIFY_PEFILE_H 12#ifndef _LINUX_VERIFY_PEFILE_H
13#define _LINUX_VERIFY_PEFILE_H 13#define _LINUX_VERIFY_PEFILE_H
14 14
15#include <crypto/public_key.h>
16
15extern int verify_pefile_signature(const void *pebuf, unsigned pelen, 17extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
16 struct key *trusted_keyring, bool *_trusted); 18 struct key *trusted_keyring,
19 enum key_being_used_for usage,
20 bool *_trusted);
17 21
18#endif /* _LINUX_VERIFY_PEFILE_H */ 22#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1e1bf9f963a9..d3d077228d4c 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -147,7 +147,8 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
147 147
148typedef int wait_bit_action_f(struct wait_bit_key *); 148typedef int wait_bit_action_f(struct wait_bit_key *);
149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
151 void *key);
151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 152void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
152void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 153void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
153void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 154void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -179,7 +180,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
179#define wake_up_poll(x, m) \ 180#define wake_up_poll(x, m) \
180 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 181 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
181#define wake_up_locked_poll(x, m) \ 182#define wake_up_locked_poll(x, m) \
182 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) 183 __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m))
183#define wake_up_interruptible_poll(x, m) \ 184#define wake_up_interruptible_poll(x, m) \
184 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 185 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
185#define wake_up_interruptible_sync_poll(x, m) \ 186#define wake_up_interruptible_sync_poll(x, m) \
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index f47feada5b42..d74a0e907b9e 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -140,12 +140,4 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
140extern int watchdog_register_device(struct watchdog_device *); 140extern int watchdog_register_device(struct watchdog_device *);
141extern void watchdog_unregister_device(struct watchdog_device *); 141extern void watchdog_unregister_device(struct watchdog_device *);
142 142
143#ifdef CONFIG_HARDLOCKUP_DETECTOR
144void watchdog_nmi_disable_all(void);
145void watchdog_nmi_enable_all(void);
146#else
147static inline void watchdog_nmi_disable_all(void) {}
148static inline void watchdog_nmi_enable_all(void) {}
149#endif
150
151#endif /* ifndef _LINUX_WATCHDOG_H */ 143#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 738b30b39b68..0197358f1e81 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -265,7 +265,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
265/** 265/**
266 * delayed_work_pending - Find out whether a delayable work item is currently 266 * delayed_work_pending - Find out whether a delayable work item is currently
267 * pending 267 * pending
268 * @work: The work item in question 268 * @w: The work item in question
269 */ 269 */
270#define delayed_work_pending(w) \ 270#define delayed_work_pending(w) \
271 work_pending(&(w)->work) 271 work_pending(&(w)->work)
@@ -366,7 +366,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
366 * @fmt: printf format for the name of the workqueue 366 * @fmt: printf format for the name of the workqueue
367 * @flags: WQ_* flags 367 * @flags: WQ_* flags
368 * @max_active: max in-flight work items, 0 for default 368 * @max_active: max in-flight work items, 0 for default
369 * @args: args for @fmt 369 * @args...: args for @fmt
370 * 370 *
371 * Allocate a workqueue with the specified parameters. For detailed 371 * Allocate a workqueue with the specified parameters. For detailed
372 * information on WQ_* flags, please refer to Documentation/workqueue.txt. 372 * information on WQ_* flags, please refer to Documentation/workqueue.txt.
@@ -398,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
398 * alloc_ordered_workqueue - allocate an ordered workqueue 398 * alloc_ordered_workqueue - allocate an ordered workqueue
399 * @fmt: printf format for the name of the workqueue 399 * @fmt: printf format for the name of the workqueue
400 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 400 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
401 * @args: args for @fmt 401 * @args...: args for @fmt
402 * 402 *
403 * Allocate an ordered workqueue. An ordered workqueue executes at 403 * Allocate an ordered workqueue. An ordered workqueue executes at
404 * most one work item at any given time in the queued order. They are 404 * most one work item at any given time in the queued order. They are
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
index f9d41a6e361f..e183a0a65ac1 100644
--- a/include/linux/zbud.h
+++ b/include/linux/zbud.h
@@ -9,7 +9,7 @@ struct zbud_ops {
9 int (*evict)(struct zbud_pool *pool, unsigned long handle); 9 int (*evict)(struct zbud_pool *pool, unsigned long handle);
10}; 10};
11 11
12struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops); 12struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
13void zbud_destroy_pool(struct zbud_pool *pool); 13void zbud_destroy_pool(struct zbud_pool *pool);
14int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, 14int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
15 unsigned long *handle); 15 unsigned long *handle);
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index d30eff3d84d5..42f8ec992452 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -36,8 +36,10 @@ enum zpool_mapmode {
36 ZPOOL_MM_DEFAULT = ZPOOL_MM_RW 36 ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
37}; 37};
38 38
39bool zpool_has_pool(char *type);
40
39struct zpool *zpool_create_pool(char *type, char *name, 41struct zpool *zpool_create_pool(char *type, char *name,
40 gfp_t gfp, struct zpool_ops *ops); 42 gfp_t gfp, const struct zpool_ops *ops);
41 43
42char *zpool_get_type(struct zpool *pool); 44char *zpool_get_type(struct zpool *pool);
43 45
@@ -81,7 +83,7 @@ struct zpool_driver {
81 atomic_t refcount; 83 atomic_t refcount;
82 struct list_head list; 84 struct list_head list;
83 85
84 void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops, 86 void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops,
85 struct zpool *zpool); 87 struct zpool *zpool);
86 void (*destroy)(void *pool); 88 void (*destroy)(void *pool);
87 89
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 1338190b5478..6398dfae53f1 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -34,6 +34,11 @@ enum zs_mapmode {
34 */ 34 */
35}; 35};
36 36
37struct zs_pool_stats {
38 /* How many pages were migrated (freed) */
39 unsigned long pages_compacted;
40};
41
37struct zs_pool; 42struct zs_pool;
38 43
39struct zs_pool *zs_create_pool(char *name, gfp_t flags); 44struct zs_pool *zs_create_pool(char *name, gfp_t flags);
@@ -49,4 +54,5 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
49unsigned long zs_get_total_pages(struct zs_pool *pool); 54unsigned long zs_get_total_pages(struct zs_pool *pool);
50unsigned long zs_compact(struct zs_pool *pool); 55unsigned long zs_compact(struct zs_pool *pool);
51 56
57void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
52#endif 58#endif
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 0dc7060f9625..17ddae32060d 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -53,9 +53,13 @@ struct media_file_operations {
53 53
54/** 54/**
55 * struct media_devnode - Media device node 55 * struct media_devnode - Media device node
56 * @fops: pointer to struct media_file_operations with media device ops
57 * @dev: struct device pointer for the media controller device
58 * @cdev: struct cdev pointer character device
56 * @parent: parent device 59 * @parent: parent device
57 * @minor: device node minor number 60 * @minor: device node minor number
58 * @flags: flags, combination of the MEDIA_FLAG_* constants 61 * @flags: flags, combination of the MEDIA_FLAG_* constants
62 * @release: release callback called at the end of media_devnode_release()
59 * 63 *
60 * This structure represents a media-related device node. 64 * This structure represents a media-related device node.
61 * 65 *
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
deleted file mode 100644
index 048f8f9117ef..000000000000
--- a/include/media/omap3isp.h
+++ /dev/null
@@ -1,158 +0,0 @@
1/*
2 * omap3isp.h
3 *
4 * TI OMAP3 ISP - Platform data
5 *
6 * Copyright (C) 2011 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#ifndef __MEDIA_OMAP3ISP_H__
27#define __MEDIA_OMAP3ISP_H__
28
29struct i2c_board_info;
30struct isp_device;
31
32enum isp_interface_type {
33 ISP_INTERFACE_PARALLEL,
34 ISP_INTERFACE_CSI2A_PHY2,
35 ISP_INTERFACE_CCP2B_PHY1,
36 ISP_INTERFACE_CCP2B_PHY2,
37 ISP_INTERFACE_CSI2C_PHY1,
38};
39
40enum {
41 ISP_LANE_SHIFT_0 = 0,
42 ISP_LANE_SHIFT_2 = 1,
43 ISP_LANE_SHIFT_4 = 2,
44 ISP_LANE_SHIFT_6 = 3,
45};
46
47/**
48 * struct isp_parallel_cfg - Parallel interface configuration
49 * @data_lane_shift: Data lane shifter
50 * ISP_LANE_SHIFT_0 - CAMEXT[13:0] -> CAM[13:0]
51 * ISP_LANE_SHIFT_2 - CAMEXT[13:2] -> CAM[11:0]
52 * ISP_LANE_SHIFT_4 - CAMEXT[13:4] -> CAM[9:0]
53 * ISP_LANE_SHIFT_6 - CAMEXT[13:6] -> CAM[7:0]
54 * @clk_pol: Pixel clock polarity
55 * 0 - Sample on rising edge, 1 - Sample on falling edge
56 * @hs_pol: Horizontal synchronization polarity
57 * 0 - Active high, 1 - Active low
58 * @vs_pol: Vertical synchronization polarity
59 * 0 - Active high, 1 - Active low
60 * @fld_pol: Field signal polarity
61 * 0 - Positive, 1 - Negative
62 * @data_pol: Data polarity
63 * 0 - Normal, 1 - One's complement
64 */
65struct isp_parallel_cfg {
66 unsigned int data_lane_shift:2;
67 unsigned int clk_pol:1;
68 unsigned int hs_pol:1;
69 unsigned int vs_pol:1;
70 unsigned int fld_pol:1;
71 unsigned int data_pol:1;
72};
73
74enum {
75 ISP_CCP2_PHY_DATA_CLOCK = 0,
76 ISP_CCP2_PHY_DATA_STROBE = 1,
77};
78
79enum {
80 ISP_CCP2_MODE_MIPI = 0,
81 ISP_CCP2_MODE_CCP2 = 1,
82};
83
84/**
85 * struct isp_csiphy_lane: CCP2/CSI2 lane position and polarity
86 * @pos: position of the lane
87 * @pol: polarity of the lane
88 */
89struct isp_csiphy_lane {
90 u8 pos;
91 u8 pol;
92};
93
94#define ISP_CSIPHY1_NUM_DATA_LANES 1
95#define ISP_CSIPHY2_NUM_DATA_LANES 2
96
97/**
98 * struct isp_csiphy_lanes_cfg - CCP2/CSI2 lane configuration
99 * @data: Configuration of one or two data lanes
100 * @clk: Clock lane configuration
101 */
102struct isp_csiphy_lanes_cfg {
103 struct isp_csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
104 struct isp_csiphy_lane clk;
105};
106
107/**
108 * struct isp_ccp2_cfg - CCP2 interface configuration
109 * @strobe_clk_pol: Strobe/clock polarity
110 * 0 - Non Inverted, 1 - Inverted
111 * @crc: Enable the cyclic redundancy check
112 * @ccp2_mode: Enable CCP2 compatibility mode
113 * ISP_CCP2_MODE_MIPI - MIPI-CSI1 mode
114 * ISP_CCP2_MODE_CCP2 - CCP2 mode
115 * @phy_layer: Physical layer selection
116 * ISP_CCP2_PHY_DATA_CLOCK - Data/clock physical layer
117 * ISP_CCP2_PHY_DATA_STROBE - Data/strobe physical layer
118 * @vpclk_div: Video port output clock control
119 */
120struct isp_ccp2_cfg {
121 unsigned int strobe_clk_pol:1;
122 unsigned int crc:1;
123 unsigned int ccp2_mode:1;
124 unsigned int phy_layer:1;
125 unsigned int vpclk_div:2;
126 struct isp_csiphy_lanes_cfg lanecfg;
127};
128
129/**
130 * struct isp_csi2_cfg - CSI2 interface configuration
131 * @crc: Enable the cyclic redundancy check
132 */
133struct isp_csi2_cfg {
134 unsigned crc:1;
135 struct isp_csiphy_lanes_cfg lanecfg;
136};
137
138struct isp_bus_cfg {
139 enum isp_interface_type interface;
140 union {
141 struct isp_parallel_cfg parallel;
142 struct isp_ccp2_cfg ccp2;
143 struct isp_csi2_cfg csi2;
144 } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
145};
146
147struct isp_platform_subdev {
148 struct i2c_board_info *board_info;
149 int i2c_adapter_id;
150 struct isp_bus_cfg *bus;
151};
152
153struct isp_platform_data {
154 struct isp_platform_subdev *subdevs;
155 void (*set_constraints)(struct isp_device *isp, bool enable);
156};
157
158#endif /* __MEDIA_OMAP3ISP_H__ */
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 644bdc61c387..ec921f6538c7 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -69,7 +69,7 @@ enum rc_filter_type {
69 * @rc_map: current scan/key table 69 * @rc_map: current scan/key table
70 * @lock: used to ensure we've filled in all protocol details before 70 * @lock: used to ensure we've filled in all protocol details before
71 * anyone can call show_protocols or store_protocols 71 * anyone can call show_protocols or store_protocols
72 * @devno: unique remote control device number 72 * @minor: unique minor remote control device number
73 * @raw: additional data for raw pulse/space devices 73 * @raw: additional data for raw pulse/space devices
74 * @input_dev: the input child device used to communicate events to userspace 74 * @input_dev: the input child device used to communicate events to userspace
75 * @driver_type: specifies if protocol decoding is done in hardware or software 75 * @driver_type: specifies if protocol decoding is done in hardware or software
@@ -110,7 +110,7 @@ enum rc_filter_type {
110 * @s_tx_mask: set transmitter mask (for devices with multiple tx outputs) 110 * @s_tx_mask: set transmitter mask (for devices with multiple tx outputs)
111 * @s_tx_carrier: set transmit carrier frequency 111 * @s_tx_carrier: set transmit carrier frequency
112 * @s_tx_duty_cycle: set transmit duty cycle (0% - 100%) 112 * @s_tx_duty_cycle: set transmit duty cycle (0% - 100%)
113 * @s_rx_carrier: inform driver about carrier it is expected to handle 113 * @s_rx_carrier_range: inform driver about carrier it is expected to handle
114 * @tx_ir: transmit IR 114 * @tx_ir: transmit IR
115 * @s_idle: enable/disable hardware idle mode, upon which, 115 * @s_idle: enable/disable hardware idle mode, upon which,
116 * device doesn't interrupt host until it sees IR pulses 116 * device doesn't interrupt host until it sees IR pulses
@@ -129,7 +129,7 @@ struct rc_dev {
129 const char *map_name; 129 const char *map_name;
130 struct rc_map rc_map; 130 struct rc_map rc_map;
131 struct mutex lock; 131 struct mutex lock;
132 unsigned long devno; 132 unsigned int minor;
133 struct ir_raw_event_ctrl *raw; 133 struct ir_raw_event_ctrl *raw;
134 struct input_dev *input_dev; 134 struct input_dev *input_dev;
135 enum rc_driver_type driver_type; 135 enum rc_driver_type driver_type;
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 27763d5bd261..7c4bbc4dfab4 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -14,30 +14,28 @@
14enum rc_type { 14enum rc_type {
15 RC_TYPE_UNKNOWN = 0, /* Protocol not known */ 15 RC_TYPE_UNKNOWN = 0, /* Protocol not known */
16 RC_TYPE_OTHER = 1, /* Protocol known but proprietary */ 16 RC_TYPE_OTHER = 1, /* Protocol known but proprietary */
17 RC_TYPE_LIRC = 2, /* Pass raw IR to lirc userspace */ 17 RC_TYPE_RC5 = 2, /* Philips RC5 protocol */
18 RC_TYPE_RC5 = 3, /* Philips RC5 protocol */ 18 RC_TYPE_RC5X = 3, /* Philips RC5x protocol */
19 RC_TYPE_RC5X = 4, /* Philips RC5x protocol */ 19 RC_TYPE_RC5_SZ = 4, /* StreamZap variant of RC5 */
20 RC_TYPE_RC5_SZ = 5, /* StreamZap variant of RC5 */ 20 RC_TYPE_JVC = 5, /* JVC protocol */
21 RC_TYPE_JVC = 6, /* JVC protocol */ 21 RC_TYPE_SONY12 = 6, /* Sony 12 bit protocol */
22 RC_TYPE_SONY12 = 7, /* Sony 12 bit protocol */ 22 RC_TYPE_SONY15 = 7, /* Sony 15 bit protocol */
23 RC_TYPE_SONY15 = 8, /* Sony 15 bit protocol */ 23 RC_TYPE_SONY20 = 8, /* Sony 20 bit protocol */
24 RC_TYPE_SONY20 = 9, /* Sony 20 bit protocol */ 24 RC_TYPE_NEC = 9, /* NEC protocol */
25 RC_TYPE_NEC = 10, /* NEC protocol */ 25 RC_TYPE_SANYO = 10, /* Sanyo protocol */
26 RC_TYPE_SANYO = 11, /* Sanyo protocol */ 26 RC_TYPE_MCE_KBD = 11, /* RC6-ish MCE keyboard/mouse */
27 RC_TYPE_MCE_KBD = 12, /* RC6-ish MCE keyboard/mouse */ 27 RC_TYPE_RC6_0 = 12, /* Philips RC6-0-16 protocol */
28 RC_TYPE_RC6_0 = 13, /* Philips RC6-0-16 protocol */ 28 RC_TYPE_RC6_6A_20 = 13, /* Philips RC6-6A-20 protocol */
29 RC_TYPE_RC6_6A_20 = 14, /* Philips RC6-6A-20 protocol */ 29 RC_TYPE_RC6_6A_24 = 14, /* Philips RC6-6A-24 protocol */
30 RC_TYPE_RC6_6A_24 = 15, /* Philips RC6-6A-24 protocol */ 30 RC_TYPE_RC6_6A_32 = 15, /* Philips RC6-6A-32 protocol */
31 RC_TYPE_RC6_6A_32 = 16, /* Philips RC6-6A-32 protocol */ 31 RC_TYPE_RC6_MCE = 16, /* MCE (Philips RC6-6A-32 subtype) protocol */
32 RC_TYPE_RC6_MCE = 17, /* MCE (Philips RC6-6A-32 subtype) protocol */ 32 RC_TYPE_SHARP = 17, /* Sharp protocol */
33 RC_TYPE_SHARP = 18, /* Sharp protocol */ 33 RC_TYPE_XMP = 18, /* XMP protocol */
34 RC_TYPE_XMP = 19, /* XMP protocol */
35}; 34};
36 35
37#define RC_BIT_NONE 0 36#define RC_BIT_NONE 0
38#define RC_BIT_UNKNOWN (1 << RC_TYPE_UNKNOWN) 37#define RC_BIT_UNKNOWN (1 << RC_TYPE_UNKNOWN)
39#define RC_BIT_OTHER (1 << RC_TYPE_OTHER) 38#define RC_BIT_OTHER (1 << RC_TYPE_OTHER)
40#define RC_BIT_LIRC (1 << RC_TYPE_LIRC)
41#define RC_BIT_RC5 (1 << RC_TYPE_RC5) 39#define RC_BIT_RC5 (1 << RC_TYPE_RC5)
42#define RC_BIT_RC5X (1 << RC_TYPE_RC5X) 40#define RC_BIT_RC5X (1 << RC_TYPE_RC5X)
43#define RC_BIT_RC5_SZ (1 << RC_TYPE_RC5_SZ) 41#define RC_BIT_RC5_SZ (1 << RC_TYPE_RC5_SZ)
@@ -56,7 +54,7 @@ enum rc_type {
56#define RC_BIT_SHARP (1 << RC_TYPE_SHARP) 54#define RC_BIT_SHARP (1 << RC_TYPE_SHARP)
57#define RC_BIT_XMP (1 << RC_TYPE_XMP) 55#define RC_BIT_XMP (1 << RC_TYPE_XMP)
58 56
59#define RC_BIT_ALL (RC_BIT_UNKNOWN | RC_BIT_OTHER | RC_BIT_LIRC | \ 57#define RC_BIT_ALL (RC_BIT_UNKNOWN | RC_BIT_OTHER | \
60 RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \ 58 RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \
61 RC_BIT_JVC | \ 59 RC_BIT_JVC | \
62 RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \ 60 RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
diff --git a/include/media/tc358743.h b/include/media/tc358743.h
new file mode 100644
index 000000000000..4513f2f9cfbc
--- /dev/null
+++ b/include/media/tc358743.h
@@ -0,0 +1,131 @@
1/*
2 * tc358743 - Toshiba HDMI to CSI-2 bridge
3 *
4 * Copyright 2015 Cisco Systems, Inc. and/or its affiliates. All rights
5 * reserved.
6 *
7 * This program is free software; you may redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
12 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
13 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
14 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
15 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
16 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
17 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 * SOFTWARE.
19 *
20 */
21
22/*
23 * References (c = chapter, p = page):
24 * REF_01 - Toshiba, TC358743XBG (H2C), Functional Specification, Rev 0.60
25 * REF_02 - Toshiba, TC358743XBG_HDMI-CSI_Tv11p_nm.xls
26 */
27
28#ifndef _TC358743_
29#define _TC358743_
30
31enum tc358743_ddc5v_delays {
32 DDC5V_DELAY_0_MS,
33 DDC5V_DELAY_50_MS,
34 DDC5V_DELAY_100_MS,
35 DDC5V_DELAY_200_MS,
36};
37
38enum tc358743_hdmi_detection_delay {
39 HDMI_MODE_DELAY_0_MS,
40 HDMI_MODE_DELAY_25_MS,
41 HDMI_MODE_DELAY_50_MS,
42 HDMI_MODE_DELAY_100_MS,
43};
44
45struct tc358743_platform_data {
46 /* System clock connected to REFCLK (pin H5) */
47 u32 refclk_hz; /* 26 MHz, 27 MHz or 42 MHz */
48
49 /* DDC +5V debounce delay to avoid spurious interrupts when the cable
50 * is connected.
51 * Sets DDC5V_MODE in register DDC_CTL.
52 * Default: DDC5V_DELAY_0_MS
53 */
54 enum tc358743_ddc5v_delays ddc5v_delay;
55
56 bool enable_hdcp;
57
58 /*
59 * The FIFO size is 512x32, so Toshiba recommend to set the default FIFO
60 * level to somewhere in the middle (e.g. 300), so it can cover speed
61 * mismatches in input and output ports.
62 */
63 u16 fifo_level;
64
65 /* Bps pr lane is (refclk_hz / pll_prd) * pll_fbd */
66 u16 pll_prd;
67 u16 pll_fbd;
68
69 /* CSI
70 * Calculate CSI parameters with REF_02 for the highest resolution your
71 * CSI interface can handle. The driver will adjust the number of CSI
72 * lanes in use according to the pixel clock.
73 *
74 * The values in brackets are calculated with REF_02 when the number of
75 * bps pr lane is 823.5 MHz, and can serve as a starting point.
76 */
77 u32 lineinitcnt; /* (0x00001770) */
78 u32 lptxtimecnt; /* (0x00000005) */
79 u32 tclk_headercnt; /* (0x00001d04) */
80 u32 tclk_trailcnt; /* (0x00000000) */
81 u32 ths_headercnt; /* (0x00000505) */
82 u32 twakeup; /* (0x00004650) */
83 u32 tclk_postcnt; /* (0x00000000) */
84 u32 ths_trailcnt; /* (0x00000004) */
85 u32 hstxvregcnt; /* (0x00000005) */
86
87 /* DVI->HDMI detection delay to avoid unnecessary switching between DVI
88 * and HDMI mode.
89 * Sets HDMI_DET_V in register HDMI_DET.
90 * Default: HDMI_MODE_DELAY_0_MS
91 */
92 enum tc358743_hdmi_detection_delay hdmi_detection_delay;
93
94 /* Reset PHY automatically when TMDS clock goes from DC to AC.
95 * Sets PHY_AUTO_RST2 in register PHY_CTL2.
96 * Default: false
97 */
98 bool hdmi_phy_auto_reset_tmds_detected;
99
100 /* Reset PHY automatically when TMDS clock passes 21 MHz.
101 * Sets PHY_AUTO_RST3 in register PHY_CTL2.
102 * Default: false
103 */
104 bool hdmi_phy_auto_reset_tmds_in_range;
105
106 /* Reset PHY automatically when TMDS clock is detected.
107 * Sets PHY_AUTO_RST4 in register PHY_CTL2.
108 * Default: false
109 */
110 bool hdmi_phy_auto_reset_tmds_valid;
111
112 /* Reset HDMI PHY automatically when hsync period is out of range.
113 * Sets H_PI_RST in register HV_RST.
114 * Default: false
115 */
116 bool hdmi_phy_auto_reset_hsync_out_of_range;
117
118 /* Reset HDMI PHY automatically when vsync period is out of range.
119 * Sets V_PI_RST in register HV_RST.
120 * Default: false
121 */
122 bool hdmi_phy_auto_reset_vsync_out_of_range;
123};
124
125/* custom controls */
126/* Audio sample rate in Hz */
127#define TC358743_CID_AUDIO_SAMPLING_RATE (V4L2_CID_USER_TC358743_BASE + 0)
128/* Audio present status */
129#define TC358743_CID_AUDIO_PRESENT (V4L2_CID_USER_TC358743_BASE + 1)
130
131#endif
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 768356917bea..1d6d7da4c45d 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -32,7 +32,8 @@ enum v4l2_async_match_type {
32 32
33/** 33/**
34 * struct v4l2_async_subdev - sub-device descriptor, as known to a bridge 34 * struct v4l2_async_subdev - sub-device descriptor, as known to a bridge
35 * @bus_type: subdevice bus type to select the appropriate matching method 35 *
36 * @match_type: type of match that will be used
36 * @match: union of per-bus type matching data sets 37 * @match: union of per-bus type matching data sets
37 * @list: used to link struct v4l2_async_subdev objects, waiting to be 38 * @list: used to link struct v4l2_async_subdev objects, waiting to be
38 * probed, to a notifier->waiting list 39 * probed, to a notifier->waiting list
@@ -62,8 +63,9 @@ struct v4l2_async_subdev {
62}; 63};
63 64
64/** 65/**
65 * v4l2_async_notifier - v4l2_device notifier data 66 * struct v4l2_async_notifier - v4l2_device notifier data
66 * @num_subdevs:number of subdevices 67 *
68 * @num_subdevs: number of subdevices
67 * @subdevs: array of pointers to subdevice descriptors 69 * @subdevs: array of pointers to subdevice descriptors
68 * @v4l2_dev: pointer to struct v4l2_device 70 * @v4l2_dev: pointer to struct v4l2_device
69 * @waiting: list of struct v4l2_async_subdev, waiting for their drivers 71 * @waiting: list of struct v4l2_async_subdev, waiting for their drivers
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 911f3e542834..da6fe9802fee 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -36,7 +36,8 @@ struct v4l2_subscribed_event;
36struct v4l2_fh; 36struct v4l2_fh;
37struct poll_table_struct; 37struct poll_table_struct;
38 38
39/** union v4l2_ctrl_ptr - A pointer to a control value. 39/**
40 * union v4l2_ctrl_ptr - A pointer to a control value.
40 * @p_s32: Pointer to a 32-bit signed value. 41 * @p_s32: Pointer to a 32-bit signed value.
41 * @p_s64: Pointer to a 64-bit signed value. 42 * @p_s64: Pointer to a 64-bit signed value.
42 * @p_u8: Pointer to a 8-bit unsigned value. 43 * @p_u8: Pointer to a 8-bit unsigned value.
@@ -55,30 +56,34 @@ union v4l2_ctrl_ptr {
55 void *p; 56 void *p;
56}; 57};
57 58
58/** struct v4l2_ctrl_ops - The control operations that the driver has to provide. 59/**
59 * @g_volatile_ctrl: Get a new value for this control. Generally only relevant 60 * struct v4l2_ctrl_ops - The control operations that the driver has to provide.
60 * for volatile (and usually read-only) controls such as a control 61 * @g_volatile_ctrl: Get a new value for this control. Generally only relevant
61 * that returns the current signal strength which changes 62 * for volatile (and usually read-only) controls such as a control
62 * continuously. 63 * that returns the current signal strength which changes
63 * If not set, then the currently cached value will be returned. 64 * continuously.
64 * @try_ctrl: Test whether the control's value is valid. Only relevant when 65 * If not set, then the currently cached value will be returned.
65 * the usual min/max/step checks are not sufficient. 66 * @try_ctrl: Test whether the control's value is valid. Only relevant when
66 * @s_ctrl: Actually set the new control value. s_ctrl is compulsory. The 67 * the usual min/max/step checks are not sufficient.
67 * ctrl->handler->lock is held when these ops are called, so no 68 * @s_ctrl: Actually set the new control value. s_ctrl is compulsory. The
68 * one else can access controls owned by that handler. 69 * ctrl->handler->lock is held when these ops are called, so no
69 */ 70 * one else can access controls owned by that handler.
71 */
70struct v4l2_ctrl_ops { 72struct v4l2_ctrl_ops {
71 int (*g_volatile_ctrl)(struct v4l2_ctrl *ctrl); 73 int (*g_volatile_ctrl)(struct v4l2_ctrl *ctrl);
72 int (*try_ctrl)(struct v4l2_ctrl *ctrl); 74 int (*try_ctrl)(struct v4l2_ctrl *ctrl);
73 int (*s_ctrl)(struct v4l2_ctrl *ctrl); 75 int (*s_ctrl)(struct v4l2_ctrl *ctrl);
74}; 76};
75 77
76/** struct v4l2_ctrl_type_ops - The control type operations that the driver has to provide. 78/**
77 * @equal: return true if both values are equal. 79 * struct v4l2_ctrl_type_ops - The control type operations that the driver
78 * @init: initialize the value. 80 * has to provide.
79 * @log: log the value. 81 *
80 * @validate: validate the value. Return 0 on success and a negative value otherwise. 82 * @equal: return true if both values are equal.
81 */ 83 * @init: initialize the value.
84 * @log: log the value.
85 * @validate: validate the value. Return 0 on success and a negative value otherwise.
86 */
82struct v4l2_ctrl_type_ops { 87struct v4l2_ctrl_type_ops {
83 bool (*equal)(const struct v4l2_ctrl *ctrl, u32 idx, 88 bool (*equal)(const struct v4l2_ctrl *ctrl, u32 idx,
84 union v4l2_ctrl_ptr ptr1, 89 union v4l2_ctrl_ptr ptr1,
@@ -92,74 +97,80 @@ struct v4l2_ctrl_type_ops {
92 97
93typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); 98typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
94 99
95/** struct v4l2_ctrl - The control structure. 100/**
96 * @node: The list node. 101 * struct v4l2_ctrl - The control structure.
97 * @ev_subs: The list of control event subscriptions. 102 * @node: The list node.
98 * @handler: The handler that owns the control. 103 * @ev_subs: The list of control event subscriptions.
99 * @cluster: Point to start of cluster array. 104 * @handler: The handler that owns the control.
100 * @ncontrols: Number of controls in cluster array. 105 * @cluster: Point to start of cluster array.
101 * @done: Internal flag: set for each processed control. 106 * @ncontrols: Number of controls in cluster array.
102 * @is_new: Set when the user specified a new value for this control. It 107 * @done: Internal flag: set for each processed control.
103 * is also set when called from v4l2_ctrl_handler_setup. Drivers 108 * @is_new: Set when the user specified a new value for this control. It
104 * should never set this flag. 109 * is also set when called from v4l2_ctrl_handler_setup. Drivers
105 * @has_changed: Set when the current value differs from the new value. Drivers 110 * should never set this flag.
106 * should never use this flag. 111 * @has_changed: Set when the current value differs from the new value. Drivers
107 * @is_private: If set, then this control is private to its handler and it 112 * should never use this flag.
108 * will not be added to any other handlers. Drivers can set 113 * @is_private: If set, then this control is private to its handler and it
109 * this flag. 114 * will not be added to any other handlers. Drivers can set
110 * @is_auto: If set, then this control selects whether the other cluster 115 * this flag.
111 * members are in 'automatic' mode or 'manual' mode. This is 116 * @is_auto: If set, then this control selects whether the other cluster
112 * used for autogain/gain type clusters. Drivers should never 117 * members are in 'automatic' mode or 'manual' mode. This is
113 * set this flag directly. 118 * used for autogain/gain type clusters. Drivers should never
114 * @is_int: If set, then this control has a simple integer value (i.e. it 119 * set this flag directly.
115 * uses ctrl->val). 120 * @is_int: If set, then this control has a simple integer value (i.e. it
116 * @is_string: If set, then this control has type V4L2_CTRL_TYPE_STRING. 121 * uses ctrl->val).
117 * @is_ptr: If set, then this control is an array and/or has type >= V4L2_CTRL_COMPOUND_TYPES 122 * @is_string: If set, then this control has type V4L2_CTRL_TYPE_STRING.
118 * and/or has type V4L2_CTRL_TYPE_STRING. In other words, struct 123 * @is_ptr: If set, then this control is an array and/or has type >= V4L2_CTRL_COMPOUND_TYPES
119 * v4l2_ext_control uses field p to point to the data. 124 * and/or has type V4L2_CTRL_TYPE_STRING. In other words, struct
120 * @is_array: If set, then this control contains an N-dimensional array. 125 * v4l2_ext_control uses field p to point to the data.
121 * @has_volatiles: If set, then one or more members of the cluster are volatile. 126 * @is_array: If set, then this control contains an N-dimensional array.
122 * Drivers should never touch this flag. 127 * @has_volatiles: If set, then one or more members of the cluster are volatile.
123 * @call_notify: If set, then call the handler's notify function whenever the 128 * Drivers should never touch this flag.
124 * control's value changes. 129 * @call_notify: If set, then call the handler's notify function whenever the
125 * @manual_mode_value: If the is_auto flag is set, then this is the value 130 * control's value changes.
126 * of the auto control that determines if that control is in 131 * @manual_mode_value: If the is_auto flag is set, then this is the value
127 * manual mode. So if the value of the auto control equals this 132 * of the auto control that determines if that control is in
128 * value, then the whole cluster is in manual mode. Drivers should 133 * manual mode. So if the value of the auto control equals this
129 * never set this flag directly. 134 * value, then the whole cluster is in manual mode. Drivers should
130 * @ops: The control ops. 135 * never set this flag directly.
131 * @type_ops: The control type ops. 136 * @ops: The control ops.
132 * @id: The control ID. 137 * @type_ops: The control type ops.
133 * @name: The control name. 138 * @id: The control ID.
134 * @type: The control type. 139 * @name: The control name.
135 * @minimum: The control's minimum value. 140 * @type: The control type.
136 * @maximum: The control's maximum value. 141 * @minimum: The control's minimum value.
137 * @default_value: The control's default value. 142 * @maximum: The control's maximum value.
138 * @step: The control's step value for non-menu controls. 143 * @default_value: The control's default value.
139 * @elems: The number of elements in the N-dimensional array. 144 * @step: The control's step value for non-menu controls.
140 * @elem_size: The size in bytes of the control. 145 * @elems: The number of elements in the N-dimensional array.
141 * @dims: The size of each dimension. 146 * @elem_size: The size in bytes of the control.
142 * @nr_of_dims:The number of dimensions in @dims. 147 * @dims: The size of each dimension.
143 * @menu_skip_mask: The control's skip mask for menu controls. This makes it 148 * @nr_of_dims:The number of dimensions in @dims.
144 * easy to skip menu items that are not valid. If bit X is set, 149 * @menu_skip_mask: The control's skip mask for menu controls. This makes it
145 * then menu item X is skipped. Of course, this only works for 150 * easy to skip menu items that are not valid. If bit X is set,
146 * menus with <= 32 menu items. There are no menus that come 151 * then menu item X is skipped. Of course, this only works for
147 * close to that number, so this is OK. Should we ever need more, 152 * menus with <= 32 menu items. There are no menus that come
148 * then this will have to be extended to a u64 or a bit array. 153 * close to that number, so this is OK. Should we ever need more,
149 * @qmenu: A const char * array for all menu items. Array entries that are 154 * then this will have to be extended to a u64 or a bit array.
150 * empty strings ("") correspond to non-existing menu items (this 155 * @qmenu: A const char * array for all menu items. Array entries that are
151 * is in addition to the menu_skip_mask above). The last entry 156 * empty strings ("") correspond to non-existing menu items (this
152 * must be NULL. 157 * is in addition to the menu_skip_mask above). The last entry
153 * @flags: The control's flags. 158 * must be NULL.
154 * @cur: The control's current value. 159 * @flags: The control's flags.
155 * @val: The control's new s32 value. 160 * @cur: The control's current value.
156 * @val64: The control's new s64 value. 161 * @val: The control's new s32 value.
157 * @priv: The control's private pointer. For use by the driver. It is 162 * @priv: The control's private pointer. For use by the driver. It is
158 * untouched by the control framework. Note that this pointer is 163 * untouched by the control framework. Note that this pointer is
159 * not freed when the control is deleted. Should this be needed 164 * not freed when the control is deleted. Should this be needed
160 * then a new internal bitfield can be added to tell the framework 165 * then a new internal bitfield can be added to tell the framework
161 * to free this pointer. 166 * to free this pointer.
162 */ 167 * @p_cur: The control's current value represented via an union with
168 * provides a standard way of accessing control types
169 * through a pointer.
170 * @p_new: The control's new value represented via an union with provides
171 * a standard way of accessing control types
172 * through a pointer.
173 */
163struct v4l2_ctrl { 174struct v4l2_ctrl {
164 /* Administrative fields */ 175 /* Administrative fields */
165 struct list_head node; 176 struct list_head node;
@@ -210,16 +221,17 @@ struct v4l2_ctrl {
210 union v4l2_ctrl_ptr p_cur; 221 union v4l2_ctrl_ptr p_cur;
211}; 222};
212 223
213/** struct v4l2_ctrl_ref - The control reference. 224/**
214 * @node: List node for the sorted list. 225 * struct v4l2_ctrl_ref - The control reference.
215 * @next: Single-link list node for the hash. 226 * @node: List node for the sorted list.
216 * @ctrl: The actual control information. 227 * @next: Single-link list node for the hash.
217 * @helper: Pointer to helper struct. Used internally in prepare_ext_ctrls(). 228 * @ctrl: The actual control information.
218 * 229 * @helper: Pointer to helper struct. Used internally in prepare_ext_ctrls().
219 * Each control handler has a list of these refs. The list_head is used to 230 *
220 * keep a sorted-by-control-ID list of all controls, while the next pointer 231 * Each control handler has a list of these refs. The list_head is used to
221 * is used to link the control in the hash's bucket. 232 * keep a sorted-by-control-ID list of all controls, while the next pointer
222 */ 233 * is used to link the control in the hash's bucket.
234 */
223struct v4l2_ctrl_ref { 235struct v4l2_ctrl_ref {
224 struct list_head node; 236 struct list_head node;
225 struct v4l2_ctrl_ref *next; 237 struct v4l2_ctrl_ref *next;
@@ -227,25 +239,26 @@ struct v4l2_ctrl_ref {
227 struct v4l2_ctrl_helper *helper; 239 struct v4l2_ctrl_helper *helper;
228}; 240};
229 241
230/** struct v4l2_ctrl_handler - The control handler keeps track of all the 242/**
231 * controls: both the controls owned by the handler and those inherited 243 * struct v4l2_ctrl_handler - The control handler keeps track of all the
232 * from other handlers. 244 * controls: both the controls owned by the handler and those inherited
233 * @_lock: Default for "lock". 245 * from other handlers.
234 * @lock: Lock to control access to this handler and its controls. 246 * @_lock: Default for "lock".
235 * May be replaced by the user right after init. 247 * @lock: Lock to control access to this handler and its controls.
236 * @ctrls: The list of controls owned by this handler. 248 * May be replaced by the user right after init.
237 * @ctrl_refs: The list of control references. 249 * @ctrls: The list of controls owned by this handler.
238 * @cached: The last found control reference. It is common that the same 250 * @ctrl_refs: The list of control references.
239 * control is needed multiple times, so this is a simple 251 * @cached: The last found control reference. It is common that the same
240 * optimization. 252 * control is needed multiple times, so this is a simple
241 * @buckets: Buckets for the hashing. Allows for quick control lookup. 253 * optimization.
242 * @notify: A notify callback that is called whenever the control changes value. 254 * @buckets: Buckets for the hashing. Allows for quick control lookup.
243 * Note that the handler's lock is held when the notify function 255 * @notify: A notify callback that is called whenever the control changes value.
244 * is called! 256 * Note that the handler's lock is held when the notify function
245 * @notify_priv: Passed as argument to the v4l2_ctrl notify callback. 257 * is called!
246 * @nr_of_buckets: Total number of buckets in the array. 258 * @notify_priv: Passed as argument to the v4l2_ctrl notify callback.
247 * @error: The error code of the first failed control addition. 259 * @nr_of_buckets: Total number of buckets in the array.
248 */ 260 * @error: The error code of the first failed control addition.
261 */
249struct v4l2_ctrl_handler { 262struct v4l2_ctrl_handler {
250 struct mutex _lock; 263 struct mutex _lock;
251 struct mutex *lock; 264 struct mutex *lock;
@@ -259,32 +272,35 @@ struct v4l2_ctrl_handler {
259 int error; 272 int error;
260}; 273};
261 274
262/** struct v4l2_ctrl_config - Control configuration structure. 275/**
263 * @ops: The control ops. 276 * struct v4l2_ctrl_config - Control configuration structure.
264 * @type_ops: The control type ops. Only needed for compound controls. 277 * @ops: The control ops.
265 * @id: The control ID. 278 * @type_ops: The control type ops. Only needed for compound controls.
266 * @name: The control name. 279 * @id: The control ID.
267 * @type: The control type. 280 * @name: The control name.
268 * @min: The control's minimum value. 281 * @type: The control type.
269 * @max: The control's maximum value. 282 * @min: The control's minimum value.
270 * @step: The control's step value for non-menu controls. 283 * @max: The control's maximum value.
271 * @def: The control's default value. 284 * @step: The control's step value for non-menu controls.
272 * @dims: The size of each dimension. 285 * @def: The control's default value.
273 * @elem_size: The size in bytes of the control. 286 * @dims: The size of each dimension.
274 * @flags: The control's flags. 287 * @elem_size: The size in bytes of the control.
275 * @menu_skip_mask: The control's skip mask for menu controls. This makes it 288 * @flags: The control's flags.
276 * easy to skip menu items that are not valid. If bit X is set, 289 * @menu_skip_mask: The control's skip mask for menu controls. This makes it
277 * then menu item X is skipped. Of course, this only works for 290 * easy to skip menu items that are not valid. If bit X is set,
278 * menus with <= 64 menu items. There are no menus that come 291 * then menu item X is skipped. Of course, this only works for
279 * close to that number, so this is OK. Should we ever need more, 292 * menus with <= 64 menu items. There are no menus that come
280 * then this will have to be extended to a bit array. 293 * close to that number, so this is OK. Should we ever need more,
281 * @qmenu: A const char * array for all menu items. Array entries that are 294 * then this will have to be extended to a bit array.
282 * empty strings ("") correspond to non-existing menu items (this 295 * @qmenu: A const char * array for all menu items. Array entries that are
283 * is in addition to the menu_skip_mask above). The last entry 296 * empty strings ("") correspond to non-existing menu items (this
284 * must be NULL. 297 * is in addition to the menu_skip_mask above). The last entry
285 * @is_private: If set, then this control is private to its handler and it 298 * must be NULL.
286 * will not be added to any other handlers. 299 * @qmenu_int: A const s64 integer array for all menu items of the type
287 */ 300 * V4L2_CTRL_TYPE_INTEGER_MENU.
301 * @is_private: If set, then this control is private to its handler and it
302 * will not be added to any other handlers.
303 */
288struct v4l2_ctrl_config { 304struct v4l2_ctrl_config {
289 const struct v4l2_ctrl_ops *ops; 305 const struct v4l2_ctrl_ops *ops;
290 const struct v4l2_ctrl_type_ops *type_ops; 306 const struct v4l2_ctrl_type_ops *type_ops;
@@ -304,42 +320,44 @@ struct v4l2_ctrl_config {
304 unsigned int is_private:1; 320 unsigned int is_private:1;
305}; 321};
306 322
307/** v4l2_ctrl_fill() - Fill in the control fields based on the control ID. 323/*
308 * 324 * v4l2_ctrl_fill() - Fill in the control fields based on the control ID.
309 * This works for all standard V4L2 controls. 325 *
310 * For non-standard controls it will only fill in the given arguments 326 * This works for all standard V4L2 controls.
311 * and @name will be NULL. 327 * For non-standard controls it will only fill in the given arguments
312 * 328 * and @name will be NULL.
313 * This function will overwrite the contents of @name, @type and @flags. 329 *
314 * The contents of @min, @max, @step and @def may be modified depending on 330 * This function will overwrite the contents of @name, @type and @flags.
315 * the type. 331 * The contents of @min, @max, @step and @def may be modified depending on
316 * 332 * the type.
317 * Do not use in drivers! It is used internally for backwards compatibility 333 *
318 * control handling only. Once all drivers are converted to use the new 334 * Do not use in drivers! It is used internally for backwards compatibility
319 * control framework this function will no longer be exported. 335 * control handling only. Once all drivers are converted to use the new
320 */ 336 * control framework this function will no longer be exported.
337 */
321void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, 338void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
322 s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags); 339 s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags);
323 340
324 341
325/** v4l2_ctrl_handler_init_class() - Initialize the control handler. 342/**
326 * @hdl: The control handler. 343 * v4l2_ctrl_handler_init_class() - Initialize the control handler.
327 * @nr_of_controls_hint: A hint of how many controls this handler is 344 * @hdl: The control handler.
328 * expected to refer to. This is the total number, so including 345 * @nr_of_controls_hint: A hint of how many controls this handler is
329 * any inherited controls. It doesn't have to be precise, but if 346 * expected to refer to. This is the total number, so including
330 * it is way off, then you either waste memory (too many buckets 347 * any inherited controls. It doesn't have to be precise, but if
331 * are allocated) or the control lookup becomes slower (not enough 348 * it is way off, then you either waste memory (too many buckets
332 * buckets are allocated, so there are more slow list lookups). 349 * are allocated) or the control lookup becomes slower (not enough
333 * It will always work, though. 350 * buckets are allocated, so there are more slow list lookups).
334 * @key: Used by the lock validator if CONFIG_LOCKDEP is set. 351 * It will always work, though.
335 * @name: Used by the lock validator if CONFIG_LOCKDEP is set. 352 * @key: Used by the lock validator if CONFIG_LOCKDEP is set.
336 * 353 * @name: Used by the lock validator if CONFIG_LOCKDEP is set.
337 * Returns an error if the buckets could not be allocated. This error will 354 *
338 * also be stored in @hdl->error. 355 * Returns an error if the buckets could not be allocated. This error will
339 * 356 * also be stored in @hdl->error.
340 * Never use this call directly, always use the v4l2_ctrl_handler_init 357 *
341 * macro that hides the @key and @name arguments. 358 * Never use this call directly, always use the v4l2_ctrl_handler_init
342 */ 359 * macro that hides the @key and @name arguments.
360 */
343int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, 361int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
344 unsigned nr_of_controls_hint, 362 unsigned nr_of_controls_hint,
345 struct lock_class_key *key, const char *name); 363 struct lock_class_key *key, const char *name);
@@ -361,289 +379,326 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
361 v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, NULL, NULL) 379 v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, NULL, NULL)
362#endif 380#endif
363 381
364/** v4l2_ctrl_handler_free() - Free all controls owned by the handler and free 382/**
365 * the control list. 383 * v4l2_ctrl_handler_free() - Free all controls owned by the handler and free
366 * @hdl: The control handler. 384 * the control list.
367 * 385 * @hdl: The control handler.
368 * Does nothing if @hdl == NULL. 386 *
369 */ 387 * Does nothing if @hdl == NULL.
388 */
370void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl); 389void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl);
371 390
372/** v4l2_ctrl_lock() - Helper function to lock the handler 391/**
373 * associated with the control. 392 * v4l2_ctrl_lock() - Helper function to lock the handler
374 * @ctrl: The control to lock. 393 * associated with the control.
375 */ 394 * @ctrl: The control to lock.
395 */
376static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl) 396static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl)
377{ 397{
378 mutex_lock(ctrl->handler->lock); 398 mutex_lock(ctrl->handler->lock);
379} 399}
380 400
381/** v4l2_ctrl_unlock() - Helper function to unlock the handler 401/**
382 * associated with the control. 402 * v4l2_ctrl_unlock() - Helper function to unlock the handler
383 * @ctrl: The control to unlock. 403 * associated with the control.
384 */ 404 * @ctrl: The control to unlock.
405 */
385static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl) 406static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl)
386{ 407{
387 mutex_unlock(ctrl->handler->lock); 408 mutex_unlock(ctrl->handler->lock);
388} 409}
389 410
390/** v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging 411/**
391 * to the handler to initialize the hardware to the current control values. 412 * v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging
392 * @hdl: The control handler. 413 * to the handler to initialize the hardware to the current control values.
393 * 414 * @hdl: The control handler.
394 * Button controls will be skipped, as are read-only controls. 415 *
395 * 416 * Button controls will be skipped, as are read-only controls.
396 * If @hdl == NULL, then this just returns 0. 417 *
397 */ 418 * If @hdl == NULL, then this just returns 0.
419 */
398int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl); 420int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl);
399 421
400/** v4l2_ctrl_handler_log_status() - Log all controls owned by the handler. 422/**
401 * @hdl: The control handler. 423 * v4l2_ctrl_handler_log_status() - Log all controls owned by the handler.
402 * @prefix: The prefix to use when logging the control values. If the 424 * @hdl: The control handler.
403 * prefix does not end with a space, then ": " will be added 425 * @prefix: The prefix to use when logging the control values. If the
404 * after the prefix. If @prefix == NULL, then no prefix will be 426 * prefix does not end with a space, then ": " will be added
405 * used. 427 * after the prefix. If @prefix == NULL, then no prefix will be
406 * 428 * used.
407 * For use with VIDIOC_LOG_STATUS. 429 *
408 * 430 * For use with VIDIOC_LOG_STATUS.
409 * Does nothing if @hdl == NULL. 431 *
410 */ 432 * Does nothing if @hdl == NULL.
433 */
411void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl, 434void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
412 const char *prefix); 435 const char *prefix);
413 436
414/** v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2 437/**
415 * control. 438 * v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2
416 * @hdl: The control handler. 439 * control.
417 * @cfg: The control's configuration data. 440 * @hdl: The control handler.
418 * @priv: The control's driver-specific private data. 441 * @cfg: The control's configuration data.
419 * 442 * @priv: The control's driver-specific private data.
420 * If the &v4l2_ctrl struct could not be allocated then NULL is returned 443 *
421 * and @hdl->error is set to the error code (if it wasn't set already). 444 * If the &v4l2_ctrl struct could not be allocated then NULL is returned
422 */ 445 * and @hdl->error is set to the error code (if it wasn't set already).
446 */
423struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, 447struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
424 const struct v4l2_ctrl_config *cfg, void *priv); 448 const struct v4l2_ctrl_config *cfg, void *priv);
425 449
426/** v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu control. 450/**
427 * @hdl: The control handler. 451 * v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu control.
428 * @ops: The control ops. 452 * @hdl: The control handler.
429 * @id: The control ID. 453 * @ops: The control ops.
430 * @min: The control's minimum value. 454 * @id: The control ID.
431 * @max: The control's maximum value. 455 * @min: The control's minimum value.
432 * @step: The control's step value 456 * @max: The control's maximum value.
433 * @def: The control's default value. 457 * @step: The control's step value
434 * 458 * @def: The control's default value.
435 * If the &v4l2_ctrl struct could not be allocated, or the control 459 *
436 * ID is not known, then NULL is returned and @hdl->error is set to the 460 * If the &v4l2_ctrl struct could not be allocated, or the control
437 * appropriate error code (if it wasn't set already). 461 * ID is not known, then NULL is returned and @hdl->error is set to the
438 * 462 * appropriate error code (if it wasn't set already).
439 * If @id refers to a menu control, then this function will return NULL. 463 *
440 * 464 * If @id refers to a menu control, then this function will return NULL.
441 * Use v4l2_ctrl_new_std_menu() when adding menu controls. 465 *
442 */ 466 * Use v4l2_ctrl_new_std_menu() when adding menu controls.
467 */
443struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl, 468struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
444 const struct v4l2_ctrl_ops *ops, 469 const struct v4l2_ctrl_ops *ops,
445 u32 id, s64 min, s64 max, u64 step, s64 def); 470 u32 id, s64 min, s64 max, u64 step, s64 def);
446 471
447/** v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 menu control. 472/**
448 * @hdl: The control handler. 473 * v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 menu control.
449 * @ops: The control ops. 474 * @hdl: The control handler.
450 * @id: The control ID. 475 * @ops: The control ops.
451 * @max: The control's maximum value. 476 * @id: The control ID.
452 * @mask: The control's skip mask for menu controls. This makes it 477 * @max: The control's maximum value.
453 * easy to skip menu items that are not valid. If bit X is set, 478 * @mask: The control's skip mask for menu controls. This makes it
454 * then menu item X is skipped. Of course, this only works for 479 * easy to skip menu items that are not valid. If bit X is set,
455 * menus with <= 64 menu items. There are no menus that come 480 * then menu item X is skipped. Of course, this only works for
456 * close to that number, so this is OK. Should we ever need more, 481 * menus with <= 64 menu items. There are no menus that come
457 * then this will have to be extended to a bit array. 482 * close to that number, so this is OK. Should we ever need more,
458 * @def: The control's default value. 483 * then this will have to be extended to a bit array.
459 * 484 * @def: The control's default value.
460 * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value 485 *
461 * determines which menu items are to be skipped. 486 * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value
462 * 487 * determines which menu items are to be skipped.
463 * If @id refers to a non-menu control, then this function will return NULL. 488 *
464 */ 489 * If @id refers to a non-menu control, then this function will return NULL.
490 */
465struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, 491struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
466 const struct v4l2_ctrl_ops *ops, 492 const struct v4l2_ctrl_ops *ops,
467 u32 id, u8 max, u64 mask, u8 def); 493 u32 id, u8 max, u64 mask, u8 def);
468 494
469/** v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control 495/**
470 * with driver specific menu. 496 * v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control
471 * @hdl: The control handler. 497 * with driver specific menu.
472 * @ops: The control ops. 498 * @hdl: The control handler.
473 * @id: The control ID. 499 * @ops: The control ops.
474 * @max: The control's maximum value. 500 * @id: The control ID.
475 * @mask: The control's skip mask for menu controls. This makes it 501 * @max: The control's maximum value.
476 * easy to skip menu items that are not valid. If bit X is set, 502 * @mask: The control's skip mask for menu controls. This makes it
477 * then menu item X is skipped. Of course, this only works for 503 * easy to skip menu items that are not valid. If bit X is set,
478 * menus with <= 64 menu items. There are no menus that come 504 * then menu item X is skipped. Of course, this only works for
479 * close to that number, so this is OK. Should we ever need more, 505 * menus with <= 64 menu items. There are no menus that come
480 * then this will have to be extended to a bit array. 506 * close to that number, so this is OK. Should we ever need more,
481 * @def: The control's default value. 507 * then this will have to be extended to a bit array.
482 * @qmenu: The new menu. 508 * @def: The control's default value.
483 * 509 * @qmenu: The new menu.
484 * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific 510 *
485 * menu of this control. 511 * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific
486 * 512 * menu of this control.
487 */ 513 *
514 */
488struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl, 515struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
489 const struct v4l2_ctrl_ops *ops, u32 id, u8 max, 516 const struct v4l2_ctrl_ops *ops, u32 id, u8 max,
490 u64 mask, u8 def, const char * const *qmenu); 517 u64 mask, u8 def, const char * const *qmenu);
491 518
492/** v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control. 519/**
493 * @hdl: The control handler. 520 * v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
494 * @ops: The control ops. 521 * @hdl: The control handler.
495 * @id: The control ID. 522 * @ops: The control ops.
496 * @max: The control's maximum value. 523 * @id: The control ID.
497 * @def: The control's default value. 524 * @max: The control's maximum value.
498 * @qmenu_int: The control's menu entries. 525 * @def: The control's default value.
499 * 526 * @qmenu_int: The control's menu entries.
500 * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionaly 527 *
501 * takes as an argument an array of integers determining the menu items. 528 * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionaly
502 * 529 * takes as an argument an array of integers determining the menu items.
503 * If @id refers to a non-integer-menu control, then this function will return NULL. 530 *
504 */ 531 * If @id refers to a non-integer-menu control, then this function will return NULL.
532 */
505struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, 533struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
506 const struct v4l2_ctrl_ops *ops, 534 const struct v4l2_ctrl_ops *ops,
507 u32 id, u8 max, u8 def, const s64 *qmenu_int); 535 u32 id, u8 max, u8 def, const s64 *qmenu_int);
508 536
509/** v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler. 537/**
510 * @hdl: The control handler. 538 * v4l2_ctrl_add_ctrl() - Add a control from another handler to this handler.
511 * @ctrl: The control to add. 539 * @hdl: The control handler.
512 * 540 * @ctrl: The control to add.
513 * It will return NULL if it was unable to add the control reference. 541 *
514 * If the control already belonged to the handler, then it will do 542 * It will return NULL if it was unable to add the control reference.
515 * nothing and just return @ctrl. 543 * If the control already belonged to the handler, then it will do
516 */ 544 * nothing and just return @ctrl.
545 */
517struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl, 546struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
518 struct v4l2_ctrl *ctrl); 547 struct v4l2_ctrl *ctrl);
519 548
520/** v4l2_ctrl_add_handler() - Add all controls from handler @add to 549/**
521 * handler @hdl. 550 * v4l2_ctrl_add_handler() - Add all controls from handler @add to
522 * @hdl: The control handler. 551 * handler @hdl.
523 * @add: The control handler whose controls you want to add to 552 * @hdl: The control handler.
524 * the @hdl control handler. 553 * @add: The control handler whose controls you want to add to
525 * @filter: This function will filter which controls should be added. 554 * the @hdl control handler.
526 * 555 * @filter: This function will filter which controls should be added.
527 * Does nothing if either of the two handlers is a NULL pointer. 556 *
528 * If @filter is NULL, then all controls are added. Otherwise only those 557 * Does nothing if either of the two handlers is a NULL pointer.
529 * controls for which @filter returns true will be added. 558 * If @filter is NULL, then all controls are added. Otherwise only those
530 * In case of an error @hdl->error will be set to the error code (if it 559 * controls for which @filter returns true will be added.
531 * wasn't set already). 560 * In case of an error @hdl->error will be set to the error code (if it
532 */ 561 * wasn't set already).
562 */
533int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, 563int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
534 struct v4l2_ctrl_handler *add, 564 struct v4l2_ctrl_handler *add,
535 bool (*filter)(const struct v4l2_ctrl *ctrl)); 565 bool (*filter)(const struct v4l2_ctrl *ctrl));
536 566
537/** v4l2_ctrl_radio_filter() - Standard filter for radio controls. 567/**
538 * @ctrl: The control that is filtered. 568 * v4l2_ctrl_radio_filter() - Standard filter for radio controls.
539 * 569 * @ctrl: The control that is filtered.
540 * This will return true for any controls that are valid for radio device 570 *
541 * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM 571 * This will return true for any controls that are valid for radio device
542 * transmitter class controls. 572 * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM
543 * 573 * transmitter class controls.
544 * This function is to be used with v4l2_ctrl_add_handler(). 574 *
545 */ 575 * This function is to be used with v4l2_ctrl_add_handler().
576 */
546bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl); 577bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl);
547 578
548/** v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging to that cluster. 579/**
549 * @ncontrols: The number of controls in this cluster. 580 * v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging to that cluster.
550 * @controls: The cluster control array of size @ncontrols. 581 * @ncontrols: The number of controls in this cluster.
551 */ 582 * @controls: The cluster control array of size @ncontrols.
583 */
552void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls); 584void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls);
553 585
554 586
555/** v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging to 587/**
556 * that cluster and set it up for autofoo/foo-type handling. 588 * v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging to
557 * @ncontrols: The number of controls in this cluster. 589 * that cluster and set it up for autofoo/foo-type handling.
558 * @controls: The cluster control array of size @ncontrols. The first control 590 * @ncontrols: The number of controls in this cluster.
559 * must be the 'auto' control (e.g. autogain, autoexposure, etc.) 591 * @controls: The cluster control array of size @ncontrols. The first control
560 * @manual_val: The value for the first control in the cluster that equals the 592 * must be the 'auto' control (e.g. autogain, autoexposure, etc.)
561 * manual setting. 593 * @manual_val: The value for the first control in the cluster that equals the
562 * @set_volatile: If true, then all controls except the first auto control will 594 * manual setting.
563 * be volatile. 595 * @set_volatile: If true, then all controls except the first auto control will
564 * 596 * be volatile.
565 * Use for control groups where one control selects some automatic feature and 597 *
566 * the other controls are only active whenever the automatic feature is turned 598 * Use for control groups where one control selects some automatic feature and
567 * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs 599 * the other controls are only active whenever the automatic feature is turned
568 * red and blue balance, etc. 600 * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs
569 * 601 * red and blue balance, etc.
570 * The behavior of such controls is as follows: 602 *
571 * 603 * The behavior of such controls is as follows:
572 * When the autofoo control is set to automatic, then any manual controls 604 *
573 * are set to inactive and any reads will call g_volatile_ctrl (if the control 605 * When the autofoo control is set to automatic, then any manual controls
574 * was marked volatile). 606 * are set to inactive and any reads will call g_volatile_ctrl (if the control
575 * 607 * was marked volatile).
576 * When the autofoo control is set to manual, then any manual controls will 608 *
577 * be marked active, and any reads will just return the current value without 609 * When the autofoo control is set to manual, then any manual controls will
578 * going through g_volatile_ctrl. 610 * be marked active, and any reads will just return the current value without
579 * 611 * going through g_volatile_ctrl.
580 * In addition, this function will set the V4L2_CTRL_FLAG_UPDATE flag 612 *
581 * on the autofoo control and V4L2_CTRL_FLAG_INACTIVE on the foo control(s) 613 * In addition, this function will set the V4L2_CTRL_FLAG_UPDATE flag
582 * if autofoo is in auto mode. 614 * on the autofoo control and V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
583 */ 615 * if autofoo is in auto mode.
616 */
584void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls, 617void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
585 u8 manual_val, bool set_volatile); 618 u8 manual_val, bool set_volatile);
586 619
587 620
588/** v4l2_ctrl_find() - Find a control with the given ID. 621/**
589 * @hdl: The control handler. 622 * v4l2_ctrl_find() - Find a control with the given ID.
590 * @id: The control ID to find. 623 * @hdl: The control handler.
591 * 624 * @id: The control ID to find.
592 * If @hdl == NULL this will return NULL as well. Will lock the handler so 625 *
593 * do not use from inside &v4l2_ctrl_ops. 626 * If @hdl == NULL this will return NULL as well. Will lock the handler so
594 */ 627 * do not use from inside &v4l2_ctrl_ops.
628 */
595struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id); 629struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id);
596 630
597/** v4l2_ctrl_activate() - Make the control active or inactive. 631/**
598 * @ctrl: The control to (de)activate. 632 * v4l2_ctrl_activate() - Make the control active or inactive.
599 * @active: True if the control should become active. 633 * @ctrl: The control to (de)activate.
600 * 634 * @active: True if the control should become active.
601 * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically. 635 *
602 * Does nothing if @ctrl == NULL. 636 * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically.
603 * This will usually be called from within the s_ctrl op. 637 * Does nothing if @ctrl == NULL.
604 * The V4L2_EVENT_CTRL event will be generated afterwards. 638 * This will usually be called from within the s_ctrl op.
605 * 639 * The V4L2_EVENT_CTRL event will be generated afterwards.
606 * This function assumes that the control handler is locked. 640 *
607 */ 641 * This function assumes that the control handler is locked.
642 */
608void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active); 643void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active);
609 644
610/** v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed. 645/**
611 * @ctrl: The control to (de)activate. 646 * v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed.
612 * @grabbed: True if the control should become grabbed. 647 * @ctrl: The control to (de)activate.
613 * 648 * @grabbed: True if the control should become grabbed.
614 * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically. 649 *
615 * Does nothing if @ctrl == NULL. 650 * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically.
616 * The V4L2_EVENT_CTRL event will be generated afterwards. 651 * Does nothing if @ctrl == NULL.
617 * This will usually be called when starting or stopping streaming in the 652 * The V4L2_EVENT_CTRL event will be generated afterwards.
618 * driver. 653 * This will usually be called when starting or stopping streaming in the
619 * 654 * driver.
620 * This function assumes that the control handler is not locked and will 655 *
621 * take the lock itself. 656 * This function assumes that the control handler is not locked and will
622 */ 657 * take the lock itself.
658 */
623void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed); 659void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
624 660
625 661
626/** __v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range() */ 662/**
663 *__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range()
664 *
665 * @ctrl: The control to update.
666 * @min: The control's minimum value.
667 * @max: The control's maximum value.
668 * @step: The control's step value
669 * @def: The control's default value.
670 *
671 * Update the range of a control on the fly. This works for control types
672 * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
673 * @step value is interpreted as a menu_skip_mask.
674 *
675 * An error is returned if one of the range arguments is invalid for this
676 * control type.
677 *
678 * This function assumes that the control handler is not locked and will
679 * take the lock itself.
680 */
627int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, 681int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
628 s64 min, s64 max, u64 step, s64 def); 682 s64 min, s64 max, u64 step, s64 def);
629 683
630/** v4l2_ctrl_modify_range() - Update the range of a control. 684/**
631 * @ctrl: The control to update. 685 * v4l2_ctrl_modify_range() - Update the range of a control.
632 * @min: The control's minimum value. 686 * @ctrl: The control to update.
633 * @max: The control's maximum value. 687 * @min: The control's minimum value.
634 * @step: The control's step value 688 * @max: The control's maximum value.
635 * @def: The control's default value. 689 * @step: The control's step value
636 * 690 * @def: The control's default value.
637 * Update the range of a control on the fly. This works for control types 691 *
638 * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the 692 * Update the range of a control on the fly. This works for control types
639 * @step value is interpreted as a menu_skip_mask. 693 * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the
640 * 694 * @step value is interpreted as a menu_skip_mask.
641 * An error is returned if one of the range arguments is invalid for this 695 *
642 * control type. 696 * An error is returned if one of the range arguments is invalid for this
643 * 697 * control type.
644 * This function assumes that the control handler is not locked and will 698 *
645 * take the lock itself. 699 * This function assumes that the control handler is not locked and will
646 */ 700 * take the lock itself.
701 */
647static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, 702static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
648 s64 min, s64 max, u64 step, s64 def) 703 s64 min, s64 max, u64 step, s64 def)
649{ 704{
@@ -656,21 +711,23 @@ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
656 return rval; 711 return rval;
657} 712}
658 713
659/** v4l2_ctrl_notify() - Function to set a notify callback for a control. 714/**
660 * @ctrl: The control. 715 * v4l2_ctrl_notify() - Function to set a notify callback for a control.
661 * @notify: The callback function. 716 * @ctrl: The control.
662 * @priv: The callback private handle, passed as argument to the callback. 717 * @notify: The callback function.
663 * 718 * @priv: The callback private handle, passed as argument to the callback.
664 * This function sets a callback function for the control. If @ctrl is NULL, 719 *
665 * then it will do nothing. If @notify is NULL, then the notify callback will 720 * This function sets a callback function for the control. If @ctrl is NULL,
666 * be removed. 721 * then it will do nothing. If @notify is NULL, then the notify callback will
667 * 722 * be removed.
668 * There can be only one notify. If another already exists, then a WARN_ON 723 *
669 * will be issued and the function will do nothing. 724 * There can be only one notify. If another already exists, then a WARN_ON
670 */ 725 * will be issued and the function will do nothing.
726 */
671void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv); 727void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv);
672 728
673/** v4l2_ctrl_get_name() - Get the name of the control 729/**
730 * v4l2_ctrl_get_name() - Get the name of the control
674 * @id: The control ID. 731 * @id: The control ID.
675 * 732 *
676 * This function returns the name of the given control ID or NULL if it isn't 733 * This function returns the name of the given control ID or NULL if it isn't
@@ -678,7 +735,8 @@ void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void
678 */ 735 */
679const char *v4l2_ctrl_get_name(u32 id); 736const char *v4l2_ctrl_get_name(u32 id);
680 737
681/** v4l2_ctrl_get_menu() - Get the menu string array of the control 738/**
739 * v4l2_ctrl_get_menu() - Get the menu string array of the control
682 * @id: The control ID. 740 * @id: The control ID.
683 * 741 *
684 * This function returns the NULL-terminated menu string array name of the 742 * This function returns the NULL-terminated menu string array name of the
@@ -686,7 +744,8 @@ const char *v4l2_ctrl_get_name(u32 id);
686 */ 744 */
687const char * const *v4l2_ctrl_get_menu(u32 id); 745const char * const *v4l2_ctrl_get_menu(u32 id);
688 746
689/** v4l2_ctrl_get_int_menu() - Get the integer menu array of the control 747/**
748 * v4l2_ctrl_get_int_menu() - Get the integer menu array of the control
690 * @id: The control ID. 749 * @id: The control ID.
691 * @len: The size of the integer array. 750 * @len: The size of the integer array.
692 * 751 *
@@ -695,29 +754,41 @@ const char * const *v4l2_ctrl_get_menu(u32 id);
695 */ 754 */
696const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len); 755const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
697 756
698/** v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver. 757/**
699 * @ctrl: The control. 758 * v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver.
700 * 759 * @ctrl: The control.
701 * This returns the control's value safely by going through the control 760 *
702 * framework. This function will lock the control's handler, so it cannot be 761 * This returns the control's value safely by going through the control
703 * used from within the &v4l2_ctrl_ops functions. 762 * framework. This function will lock the control's handler, so it cannot be
704 * 763 * used from within the &v4l2_ctrl_ops functions.
705 * This function is for integer type controls only. 764 *
706 */ 765 * This function is for integer type controls only.
766 */
707s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl); 767s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl);
708 768
709/** __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl(). */ 769/**
770 * __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl().
771 * @ctrl: The control.
772 * @val: The new value.
773 *
774 * This set the control's new value safely by going through the control
775 * framework. This function will lock the control's handler, so it cannot be
776 * used from within the &v4l2_ctrl_ops functions.
777 *
778 * This function is for integer type controls only.
779 */
710int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val); 780int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val);
781
711/** v4l2_ctrl_s_ctrl() - Helper function to set the control's value from within a driver. 782/** v4l2_ctrl_s_ctrl() - Helper function to set the control's value from within a driver.
712 * @ctrl: The control. 783 * @ctrl: The control.
713 * @val: The new value. 784 * @val: The new value.
714 * 785 *
715 * This set the control's new value safely by going through the control 786 * This set the control's new value safely by going through the control
716 * framework. This function will lock the control's handler, so it cannot be 787 * framework. This function will lock the control's handler, so it cannot be
717 * used from within the &v4l2_ctrl_ops functions. 788 * used from within the &v4l2_ctrl_ops functions.
718 * 789 *
719 * This function is for integer type controls only. 790 * This function is for integer type controls only.
720 */ 791 */
721static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) 792static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
722{ 793{
723 int rval; 794 int rval;
@@ -729,30 +800,45 @@ static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
729 return rval; 800 return rval;
730} 801}
731 802
732/** v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value from within a driver. 803/**
733 * @ctrl: The control. 804 * v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value
734 * 805 * from within a driver.
735 * This returns the control's value safely by going through the control 806 * @ctrl: The control.
736 * framework. This function will lock the control's handler, so it cannot be 807 *
737 * used from within the &v4l2_ctrl_ops functions. 808 * This returns the control's value safely by going through the control
738 * 809 * framework. This function will lock the control's handler, so it cannot be
739 * This function is for 64-bit integer type controls only. 810 * used from within the &v4l2_ctrl_ops functions.
740 */ 811 *
812 * This function is for 64-bit integer type controls only.
813 */
741s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl); 814s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl);
742 815
743/** __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64(). */ 816/**
817 * __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64().
818 *
819 * @ctrl: The control.
820 * @val: The new value.
821 *
822 * This set the control's new value safely by going through the control
823 * framework. This function will lock the control's handler, so it cannot be
824 * used from within the &v4l2_ctrl_ops functions.
825 *
826 * This function is for 64-bit integer type controls only.
827 */
744int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val); 828int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val);
745 829
746/** v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value from within a driver. 830/** v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value
747 * @ctrl: The control. 831 * from within a driver.
748 * @val: The new value. 832 *
749 * 833 * @ctrl: The control.
750 * This set the control's new value safely by going through the control 834 * @val: The new value.
751 * framework. This function will lock the control's handler, so it cannot be 835 *
752 * used from within the &v4l2_ctrl_ops functions. 836 * This set the control's new value safely by going through the control
753 * 837 * framework. This function will lock the control's handler, so it cannot be
754 * This function is for 64-bit integer type controls only. 838 * used from within the &v4l2_ctrl_ops functions.
755 */ 839 *
840 * This function is for 64-bit integer type controls only.
841 */
756static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) 842static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
757{ 843{
758 int rval; 844 int rval;
@@ -764,19 +850,31 @@ static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
764 return rval; 850 return rval;
765} 851}
766 852
767/** __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string(). */ 853/** __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string().
854 *
855 * @ctrl: The control.
856 * @s: The new string.
857 *
858 * This set the control's new string safely by going through the control
859 * framework. This function will lock the control's handler, so it cannot be
860 * used from within the &v4l2_ctrl_ops functions.
861 *
862 * This function is for string type controls only.
863 */
768int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s); 864int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s);
769 865
770/** v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value from within a driver. 866/** v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value
771 * @ctrl: The control. 867 * from within a driver.
772 * @s: The new string. 868 *
773 * 869 * @ctrl: The control.
774 * This set the control's new string safely by going through the control 870 * @s: The new string.
775 * framework. This function will lock the control's handler, so it cannot be 871 *
776 * used from within the &v4l2_ctrl_ops functions. 872 * This set the control's new string safely by going through the control
777 * 873 * framework. This function will lock the control's handler, so it cannot be
778 * This function is for string type controls only. 874 * used from within the &v4l2_ctrl_ops functions.
779 */ 875 *
876 * This function is for string type controls only.
877 */
780static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) 878static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
781{ 879{
782 int rval; 880 int rval;
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index eecd3102a618..b6130b50a0f1 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -23,11 +23,14 @@
23 23
24#include <linux/videodev2.h> 24#include <linux/videodev2.h>
25 25
26/** v4l2_dv_timings_presets: list of all dv_timings presets. 26/**
27 * v4l2_dv_timings_presets: list of all dv_timings presets.
27 */ 28 */
28extern const struct v4l2_dv_timings v4l2_dv_timings_presets[]; 29extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
29 30
30/** v4l2_check_dv_timings_fnc - timings check callback 31/**
32 * v4l2_check_dv_timings_fnc - timings check callback
33 *
31 * @t: the v4l2_dv_timings struct. 34 * @t: the v4l2_dv_timings struct.
32 * @handle: a handle from the driver. 35 * @handle: a handle from the driver.
33 * 36 *
@@ -35,86 +38,101 @@ extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
35 */ 38 */
36typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle); 39typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle);
37 40
38/** v4l2_valid_dv_timings() - are these timings valid? 41/**
39 * @t: the v4l2_dv_timings struct. 42 * v4l2_valid_dv_timings() - are these timings valid?
40 * @cap: the v4l2_dv_timings_cap capabilities. 43 *
41 * @fnc: callback to check if this timing is OK. May be NULL. 44 * @t: the v4l2_dv_timings struct.
42 * @fnc_handle: a handle that is passed on to @fnc. 45 * @cap: the v4l2_dv_timings_cap capabilities.
43 * 46 * @fnc: callback to check if this timing is OK. May be NULL.
44 * Returns true if the given dv_timings struct is supported by the 47 * @fnc_handle: a handle that is passed on to @fnc.
45 * hardware capabilities and the callback function (if non-NULL), returns 48 *
46 * false otherwise. 49 * Returns true if the given dv_timings struct is supported by the
47 */ 50 * hardware capabilities and the callback function (if non-NULL), returns
51 * false otherwise.
52 */
48bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, 53bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
49 const struct v4l2_dv_timings_cap *cap, 54 const struct v4l2_dv_timings_cap *cap,
50 v4l2_check_dv_timings_fnc fnc, 55 v4l2_check_dv_timings_fnc fnc,
51 void *fnc_handle); 56 void *fnc_handle);
52 57
53/** v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV timings based on capabilities 58/**
54 * @t: the v4l2_enum_dv_timings struct. 59 * v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV
55 * @cap: the v4l2_dv_timings_cap capabilities. 60 * timings based on capabilities
56 * @fnc: callback to check if this timing is OK. May be NULL. 61 *
57 * @fnc_handle: a handle that is passed on to @fnc. 62 * @t: the v4l2_enum_dv_timings struct.
58 * 63 * @cap: the v4l2_dv_timings_cap capabilities.
59 * This enumerates dv_timings using the full list of possible CEA-861 and DMT 64 * @fnc: callback to check if this timing is OK. May be NULL.
60 * timings, filtering out any timings that are not supported based on the 65 * @fnc_handle: a handle that is passed on to @fnc.
61 * hardware capabilities and the callback function (if non-NULL). 66 *
62 * 67 * This enumerates dv_timings using the full list of possible CEA-861 and DMT
63 * If a valid timing for the given index is found, it will fill in @t and 68 * timings, filtering out any timings that are not supported based on the
64 * return 0, otherwise it returns -EINVAL. 69 * hardware capabilities and the callback function (if non-NULL).
65 */ 70 *
71 * If a valid timing for the given index is found, it will fill in @t and
72 * return 0, otherwise it returns -EINVAL.
73 */
66int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t, 74int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
67 const struct v4l2_dv_timings_cap *cap, 75 const struct v4l2_dv_timings_cap *cap,
68 v4l2_check_dv_timings_fnc fnc, 76 v4l2_check_dv_timings_fnc fnc,
69 void *fnc_handle); 77 void *fnc_handle);
70 78
71/** v4l2_find_dv_timings_cap() - Find the closest timings struct 79/**
72 * @t: the v4l2_enum_dv_timings struct. 80 * v4l2_find_dv_timings_cap() - Find the closest timings struct
73 * @cap: the v4l2_dv_timings_cap capabilities. 81 *
74 * @pclock_delta: maximum delta between t->pixelclock and the timing struct 82 * @t: the v4l2_enum_dv_timings struct.
75 * under consideration. 83 * @cap: the v4l2_dv_timings_cap capabilities.
76 * @fnc: callback to check if a given timings struct is OK. May be NULL. 84 * @pclock_delta: maximum delta between t->pixelclock and the timing struct
77 * @fnc_handle: a handle that is passed on to @fnc. 85 * under consideration.
78 * 86 * @fnc: callback to check if a given timings struct is OK. May be NULL.
79 * This function tries to map the given timings to an entry in the 87 * @fnc_handle: a handle that is passed on to @fnc.
80 * full list of possible CEA-861 and DMT timings, filtering out any timings 88 *
81 * that are not supported based on the hardware capabilities and the callback 89 * This function tries to map the given timings to an entry in the
82 * function (if non-NULL). 90 * full list of possible CEA-861 and DMT timings, filtering out any timings
83 * 91 * that are not supported based on the hardware capabilities and the callback
84 * On success it will fill in @t with the found timings and it returns true. 92 * function (if non-NULL).
85 * On failure it will return false. 93 *
86 */ 94 * On success it will fill in @t with the found timings and it returns true.
95 * On failure it will return false.
96 */
87bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t, 97bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
88 const struct v4l2_dv_timings_cap *cap, 98 const struct v4l2_dv_timings_cap *cap,
89 unsigned pclock_delta, 99 unsigned pclock_delta,
90 v4l2_check_dv_timings_fnc fnc, 100 v4l2_check_dv_timings_fnc fnc,
91 void *fnc_handle); 101 void *fnc_handle);
92 102
93/** v4l2_match_dv_timings() - do two timings match? 103/**
94 * @measured: the measured timings data. 104 * v4l2_match_dv_timings() - do two timings match?
95 * @standard: the timings according to the standard. 105 *
96 * @pclock_delta: maximum delta in Hz between standard->pixelclock and 106 * @measured: the measured timings data.
97 * the measured timings. 107 * @standard: the timings according to the standard.
98 * 108 * @pclock_delta: maximum delta in Hz between standard->pixelclock and
99 * Returns true if the two timings match, returns false otherwise. 109 * the measured timings.
100 */ 110 *
111 * Returns true if the two timings match, returns false otherwise.
112 */
101bool v4l2_match_dv_timings(const struct v4l2_dv_timings *measured, 113bool v4l2_match_dv_timings(const struct v4l2_dv_timings *measured,
102 const struct v4l2_dv_timings *standard, 114 const struct v4l2_dv_timings *standard,
103 unsigned pclock_delta); 115 unsigned pclock_delta);
104 116
105/** v4l2_print_dv_timings() - log the contents of a dv_timings struct 117/**
106 * @dev_prefix:device prefix for each log line. 118 * v4l2_print_dv_timings() - log the contents of a dv_timings struct
107 * @prefix: additional prefix for each log line, may be NULL. 119 * @dev_prefix:device prefix for each log line.
108 * @t: the timings data. 120 * @prefix: additional prefix for each log line, may be NULL.
109 * @detailed: if true, give a detailed log. 121 * @t: the timings data.
110 */ 122 * @detailed: if true, give a detailed log.
123 */
111void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix, 124void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
112 const struct v4l2_dv_timings *t, bool detailed); 125 const struct v4l2_dv_timings *t, bool detailed);
113 126
114/** v4l2_detect_cvt - detect if the given timings follow the CVT standard 127/**
128 * v4l2_detect_cvt - detect if the given timings follow the CVT standard
129 *
115 * @frame_height - the total height of the frame (including blanking) in lines. 130 * @frame_height - the total height of the frame (including blanking) in lines.
116 * @hfreq - the horizontal frequency in Hz. 131 * @hfreq - the horizontal frequency in Hz.
117 * @vsync - the height of the vertical sync in lines. 132 * @vsync - the height of the vertical sync in lines.
133 * @active_width - active width of image (does not include blanking). This
134 * information is needed only in case of version 2 of reduced blanking.
135 * In other cases, this parameter does not have any effect on timings.
118 * @polarities - the horizontal and vertical polarities (same as struct 136 * @polarities - the horizontal and vertical polarities (same as struct
119 * v4l2_bt_timings polarities). 137 * v4l2_bt_timings polarities).
120 * @interlaced - if this flag is true, it indicates interlaced format 138 * @interlaced - if this flag is true, it indicates interlaced format
@@ -125,9 +143,12 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
125 * in with the found CVT timings. 143 * in with the found CVT timings.
126 */ 144 */
127bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync, 145bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
128 u32 polarities, bool interlaced, struct v4l2_dv_timings *fmt); 146 unsigned active_width, u32 polarities, bool interlaced,
147 struct v4l2_dv_timings *fmt);
129 148
130/** v4l2_detect_gtf - detect if the given timings follow the GTF standard 149/**
150 * v4l2_detect_gtf - detect if the given timings follow the GTF standard
151 *
131 * @frame_height - the total height of the frame (including blanking) in lines. 152 * @frame_height - the total height of the frame (including blanking) in lines.
132 * @hfreq - the horizontal frequency in Hz. 153 * @hfreq - the horizontal frequency in Hz.
133 * @vsync - the height of the vertical sync in lines. 154 * @vsync - the height of the vertical sync in lines.
@@ -149,8 +170,10 @@ bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
149 u32 polarities, bool interlaced, struct v4l2_fract aspect, 170 u32 polarities, bool interlaced, struct v4l2_fract aspect,
150 struct v4l2_dv_timings *fmt); 171 struct v4l2_dv_timings *fmt);
151 172
152/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes 173/**
174 * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
153 * 0x15 and 0x16 from the EDID. 175 * 0x15 and 0x16 from the EDID.
176 *
154 * @hor_landscape - byte 0x15 from the EDID. 177 * @hor_landscape - byte 0x15 from the EDID.
155 * @vert_portrait - byte 0x16 from the EDID. 178 * @vert_portrait - byte 0x16 from the EDID.
156 * 179 *
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 1ab9045e52e3..9792f906423b 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -68,10 +68,11 @@ struct v4l2_subdev;
68struct v4l2_subscribed_event; 68struct v4l2_subscribed_event;
69struct video_device; 69struct video_device;
70 70
71/** struct v4l2_kevent - Internal kernel event struct. 71/**
72 * @list: List node for the v4l2_fh->available list. 72 * struct v4l2_kevent - Internal kernel event struct.
73 * @sev: Pointer to parent v4l2_subscribed_event. 73 * @list: List node for the v4l2_fh->available list.
74 * @event: The event itself. 74 * @sev: Pointer to parent v4l2_subscribed_event.
75 * @event: The event itself.
75 */ 76 */
76struct v4l2_kevent { 77struct v4l2_kevent {
77 struct list_head list; 78 struct list_head list;
@@ -80,11 +81,12 @@ struct v4l2_kevent {
80}; 81};
81 82
82/** struct v4l2_subscribed_event_ops - Subscribed event operations. 83/** struct v4l2_subscribed_event_ops - Subscribed event operations.
83 * @add: Optional callback, called when a new listener is added 84 *
84 * @del: Optional callback, called when a listener stops listening 85 * @add: Optional callback, called when a new listener is added
85 * @replace: Optional callback that can replace event 'old' with event 'new'. 86 * @del: Optional callback, called when a listener stops listening
86 * @merge: Optional callback that can merge event 'old' into event 'new'. 87 * @replace: Optional callback that can replace event 'old' with event 'new'.
87 */ 88 * @merge: Optional callback that can merge event 'old' into event 'new'.
89 */
88struct v4l2_subscribed_event_ops { 90struct v4l2_subscribed_event_ops {
89 int (*add)(struct v4l2_subscribed_event *sev, unsigned elems); 91 int (*add)(struct v4l2_subscribed_event *sev, unsigned elems);
90 void (*del)(struct v4l2_subscribed_event *sev); 92 void (*del)(struct v4l2_subscribed_event *sev);
@@ -92,19 +94,20 @@ struct v4l2_subscribed_event_ops {
92 void (*merge)(const struct v4l2_event *old, struct v4l2_event *new); 94 void (*merge)(const struct v4l2_event *old, struct v4l2_event *new);
93}; 95};
94 96
95/** struct v4l2_subscribed_event - Internal struct representing a subscribed event. 97/**
96 * @list: List node for the v4l2_fh->subscribed list. 98 * struct v4l2_subscribed_event - Internal struct representing a subscribed event.
97 * @type: Event type. 99 * @list: List node for the v4l2_fh->subscribed list.
98 * @id: Associated object ID (e.g. control ID). 0 if there isn't any. 100 * @type: Event type.
99 * @flags: Copy of v4l2_event_subscription->flags. 101 * @id: Associated object ID (e.g. control ID). 0 if there isn't any.
100 * @fh: Filehandle that subscribed to this event. 102 * @flags: Copy of v4l2_event_subscription->flags.
101 * @node: List node that hooks into the object's event list (if there is one). 103 * @fh: Filehandle that subscribed to this event.
102 * @ops: v4l2_subscribed_event_ops 104 * @node: List node that hooks into the object's event list (if there is one).
103 * @elems: The number of elements in the events array. 105 * @ops: v4l2_subscribed_event_ops
104 * @first: The index of the events containing the oldest available event. 106 * @elems: The number of elements in the events array.
105 * @in_use: The number of queued events. 107 * @first: The index of the events containing the oldest available event.
106 * @events: An array of @elems events. 108 * @in_use: The number of queued events.
107 */ 109 * @events: An array of @elems events.
110 */
108struct v4l2_subscribed_event { 111struct v4l2_subscribed_event {
109 struct list_head list; 112 struct list_head list;
110 u32 type; 113 u32 type;
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
index 098236c083b8..3d184ab52274 100644
--- a/include/media/v4l2-flash-led-class.h
+++ b/include/media/v4l2-flash-led-class.h
@@ -48,13 +48,13 @@ struct v4l2_flash_ops {
48/** 48/**
49 * struct v4l2_flash_config - V4L2 Flash sub-device initialization data 49 * struct v4l2_flash_config - V4L2 Flash sub-device initialization data
50 * @dev_name: the name of the media entity, 50 * @dev_name: the name of the media entity,
51 unique in the system 51 * unique in the system
52 * @torch_intensity: constraints for the LED in torch mode 52 * @torch_intensity: constraints for the LED in torch mode
53 * @indicator_intensity: constraints for the indicator LED 53 * @indicator_intensity: constraints for the indicator LED
54 * @flash_faults: bitmask of flash faults that the LED flash class 54 * @flash_faults: bitmask of flash faults that the LED flash class
55 device can report; corresponding LED_FAULT* bit 55 * device can report; corresponding LED_FAULT* bit
56 definitions are available in the header file 56 * definitions are available in the header file
57 <linux/led-class-flash.h> 57 * <linux/led-class-flash.h>
58 * @has_external_strobe: external strobe capability 58 * @has_external_strobe: external strobe capability
59 */ 59 */
60struct v4l2_flash_config { 60struct v4l2_flash_config {
@@ -105,7 +105,7 @@ static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c)
105 * @fled_cdev: LED flash class device to wrap 105 * @fled_cdev: LED flash class device to wrap
106 * @iled_cdev: LED flash class device representing indicator LED associated 106 * @iled_cdev: LED flash class device representing indicator LED associated
107 * with fled_cdev, may be NULL 107 * with fled_cdev, may be NULL
108 * @flash_ops: V4L2 Flash device ops 108 * @ops: V4L2 Flash device ops
109 * @config: initialization data for V4L2 Flash sub-device 109 * @config: initialization data for V4L2 Flash sub-device
110 * 110 *
111 * Create V4L2 Flash sub-device wrapping given LED subsystem device. 111 * Create V4L2 Flash sub-device wrapping given LED subsystem device.
@@ -123,7 +123,7 @@ struct v4l2_flash *v4l2_flash_init(
123 123
124/** 124/**
125 * v4l2_flash_release - release V4L2 Flash sub-device 125 * v4l2_flash_release - release V4L2 Flash sub-device
126 * @flash: the V4L2 Flash sub-device to release 126 * @v4l2_flash: the V4L2 Flash sub-device to release
127 * 127 *
128 * Release V4L2 Flash sub-device. 128 * Release V4L2 Flash sub-device.
129 */ 129 */
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 73069e4c2796..34cc99e093ef 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -65,7 +65,7 @@
65 V4L2_MBUS_CSI2_CHANNEL_2 | V4L2_MBUS_CSI2_CHANNEL_3) 65 V4L2_MBUS_CSI2_CHANNEL_2 | V4L2_MBUS_CSI2_CHANNEL_3)
66 66
67/** 67/**
68 * v4l2_mbus_type - media bus type 68 * enum v4l2_mbus_type - media bus type
69 * @V4L2_MBUS_PARALLEL: parallel interface with hsync and vsync 69 * @V4L2_MBUS_PARALLEL: parallel interface with hsync and vsync
70 * @V4L2_MBUS_BT656: parallel interface with embedded synchronisation, can 70 * @V4L2_MBUS_BT656: parallel interface with embedded synchronisation, can
71 * also be used for BT.1120 71 * also be used for BT.1120
@@ -78,7 +78,7 @@ enum v4l2_mbus_type {
78}; 78};
79 79
80/** 80/**
81 * v4l2_mbus_config - media bus configuration 81 * struct v4l2_mbus_config - media bus configuration
82 * @type: in: interface type 82 * @type: in: interface type
83 * @flags: in / out: configuration flags, depending on @type 83 * @flags: in / out: configuration flags, depending on @type
84 */ 84 */
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 3bbd96da25c9..8849aaba6aa5 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -40,6 +40,10 @@
40 * v4l2_m2m_job_finish() (as if the transaction ended normally). 40 * v4l2_m2m_job_finish() (as if the transaction ended normally).
41 * This function does not have to (and will usually not) wait 41 * This function does not have to (and will usually not) wait
42 * until the device enters a state when it can be stopped. 42 * until the device enters a state when it can be stopped.
43 * @lock: optional. Define a driver's own lock callback, instead of using
44 * m2m_ctx->q_lock.
45 * @unlock: optional. Define a driver's own unlock callback, instead of
46 * using m2m_ctx->q_lock.
43 */ 47 */
44struct v4l2_m2m_ops { 48struct v4l2_m2m_ops {
45 void (*device_run)(void *priv); 49 void (*device_run)(void *priv);
@@ -161,6 +165,8 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);
161/** 165/**
162 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for 166 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
163 * use 167 * use
168 *
169 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
164 */ 170 */
165static inline 171static inline
166unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) 172unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -171,6 +177,8 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
171/** 177/**
172 * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers 178 * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
173 * ready for use 179 * ready for use
180 *
181 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
174 */ 182 */
175static inline 183static inline
176unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) 184unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -183,6 +191,8 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
183/** 191/**
184 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready 192 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
185 * buffers 193 * buffers
194 *
195 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
186 */ 196 */
187static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) 197static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
188{ 198{
@@ -192,6 +202,8 @@ static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
192/** 202/**
193 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of 203 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
194 * ready buffers 204 * ready buffers
205 *
206 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
195 */ 207 */
196static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) 208static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
197{ 209{
@@ -200,6 +212,8 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
200 212
201/** 213/**
202 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers 214 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
215 *
216 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
203 */ 217 */
204static inline 218static inline
205struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) 219struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -209,6 +223,8 @@ struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
209 223
210/** 224/**
211 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers 225 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
226 *
227 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
212 */ 228 */
213static inline 229static inline
214struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) 230struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -221,6 +237,8 @@ void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
221/** 237/**
222 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready 238 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
223 * buffers and return it 239 * buffers and return it
240 *
241 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
224 */ 242 */
225static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) 243static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
226{ 244{
@@ -230,6 +248,8 @@ static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
230/** 248/**
231 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of 249 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
232 * ready buffers and return it 250 * ready buffers and return it
251 *
252 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
233 */ 253 */
234static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) 254static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
235{ 255{
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 4e18318eb425..b273cf9ac047 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -44,6 +44,7 @@
44 44
45struct v4l2_device; 45struct v4l2_device;
46struct v4l2_ctrl_handler; 46struct v4l2_ctrl_handler;
47struct v4l2_event;
47struct v4l2_event_subscription; 48struct v4l2_event_subscription;
48struct v4l2_fh; 49struct v4l2_fh;
49struct v4l2_subdev; 50struct v4l2_subdev;
@@ -117,34 +118,67 @@ struct v4l2_subdev_io_pin_config {
117 u8 strength; /* Pin drive strength */ 118 u8 strength; /* Pin drive strength */
118}; 119};
119 120
120/* 121/**
121 s_io_pin_config: configure one or more chip I/O pins for chips that 122 * struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs
122 multiplex different internal signal pads out to IO pins. This function 123 *
123 takes a pointer to an array of 'n' pin configuration entries, one for 124 * @log_status: callback for VIDIOC_LOG_STATUS ioctl handler code.
124 each pin being configured. This function could be called at times 125 *
125 other than just subdevice initialization. 126 * @s_io_pin_config: configure one or more chip I/O pins for chips that
126 127 * multiplex different internal signal pads out to IO pins. This function
127 init: initialize the sensor registers to some sort of reasonable default 128 * takes a pointer to an array of 'n' pin configuration entries, one for
128 values. Do not use for new drivers and should be removed in existing 129 * each pin being configured. This function could be called at times
129 drivers. 130 * other than just subdevice initialization.
130 131 *
131 load_fw: load firmware. 132 * @init: initialize the sensor registers to some sort of reasonable default
132 133 * values. Do not use for new drivers and should be removed in existing
133 reset: generic reset command. The argument selects which subsystems to 134 * drivers.
134 reset. Passing 0 will always reset the whole chip. Do not use for new 135 *
135 drivers without discussing this first on the linux-media mailinglist. 136 * @load_fw: load firmware.
136 There should be no reason normally to reset a device. 137 *
137 138 * @reset: generic reset command. The argument selects which subsystems to
138 s_gpio: set GPIO pins. Very simple right now, might need to be extended with 139 * reset. Passing 0 will always reset the whole chip. Do not use for new
139 a direction argument if needed. 140 * drivers without discussing this first on the linux-media mailinglist.
140 141 * There should be no reason normally to reset a device.
141 s_power: puts subdevice in power saving mode (on == 0) or normal operation 142 *
142 mode (on == 1). 143 * @s_gpio: set GPIO pins. Very simple right now, might need to be extended with
143 144 * a direction argument if needed.
144 interrupt_service_routine: Called by the bridge chip's interrupt service 145 *
145 handler, when an interrupt status has be raised due to this subdev, 146 * @queryctrl: callback for VIDIOC_QUERYCTL ioctl handler code.
146 so that this subdev can handle the details. It may schedule work to be 147 *
147 performed later. It must not sleep. *Called from an IRQ context*. 148 * @g_ctrl: callback for VIDIOC_G_CTRL ioctl handler code.
149 *
150 * @s_ctrl: callback for VIDIOC_S_CTRL ioctl handler code.
151 *
152 * @g_ext_ctrls: callback for VIDIOC_G_EXT_CTRLS ioctl handler code.
153 *
154 * @s_ext_ctrls: callback for VIDIOC_S_EXT_CTRLS ioctl handler code.
155 *
156 * @try_ext_ctrls: callback for VIDIOC_TRY_EXT_CTRLS ioctl handler code.
157 *
158 * @querymenu: callback for VIDIOC_QUERYMENU ioctl handler code.
159 *
160 * @ioctl: called at the end of ioctl() syscall handler at the V4L2 core.
161 * used to provide support for private ioctls used on the driver.
162 *
163 * @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel,
164 * in order to fix data passed from/to userspace.
165 *
166 * @g_register: callback for VIDIOC_G_REGISTER ioctl handler code.
167 *
168 * @s_register: callback for VIDIOC_G_REGISTER ioctl handler code.
169 *
170 * @s_power: puts subdevice in power saving mode (on == 0) or normal operation
171 * mode (on == 1).
172 *
173 * @interrupt_service_routine: Called by the bridge chip's interrupt service
174 * handler, when an interrupt status has be raised due to this subdev,
175 * so that this subdev can handle the details. It may schedule work to be
176 * performed later. It must not sleep. *Called from an IRQ context*.
177 *
178 * @subscribe_event: used by the drivers to request the control framework that
179 * for it to be warned when the value of a control changes.
180 *
181 * @unsubscribe_event: remove event subscription from the control framework.
148 */ 182 */
149struct v4l2_subdev_core_ops { 183struct v4l2_subdev_core_ops {
150 int (*log_status)(struct v4l2_subdev *sd); 184 int (*log_status)(struct v4l2_subdev *sd);
@@ -179,18 +213,32 @@ struct v4l2_subdev_core_ops {
179 struct v4l2_event_subscription *sub); 213 struct v4l2_event_subscription *sub);
180}; 214};
181 215
182/* s_radio: v4l device was opened in radio mode. 216/**
183 217 * struct s_radio - Callbacks used when v4l device was opened in radio mode.
184 g_frequency: freq->type must be filled in. Normally done by video_ioctl2 218 *
185 or the bridge driver. 219 * @s_radio: callback for VIDIOC_S_RADIO ioctl handler code.
186 220 *
187 g_tuner: 221 * @s_frequency: callback for VIDIOC_S_FREQUENCY ioctl handler code.
188 s_tuner: vt->type must be filled in. Normally done by video_ioctl2 or the 222 *
189 bridge driver. 223 * @g_frequency: callback for VIDIOC_G_FREQUENCY ioctl handler code.
190 224 * freq->type must be filled in. Normally done by video_ioctl2
191 s_type_addr: sets tuner type and its I2C addr. 225 * or the bridge driver.
192 226 *
193 s_config: sets tda9887 specific stuff, like port1, port2 and qss 227 * @enum_freq_bands: callback for VIDIOC_ENUM_FREQ_BANDS ioctl handler code.
228 *
229 * @g_tuner: callback for VIDIOC_G_TUNER ioctl handler code.
230 *
231 * @s_tuner: callback for VIDIOC_S_TUNER ioctl handler code. vt->type must be
232 * filled in. Normally done by video_ioctl2 or the
233 * bridge driver.
234 *
235 * @g_modulator: callback for VIDIOC_G_MODULATOR ioctl handler code.
236 *
237 * @s_modulator: callback for VIDIOC_S_MODULATOR ioctl handler code.
238 *
239 * @s_type_addr: sets tuner type and its I2C addr.
240 *
241 * @s_config: sets tda9887 specific stuff, like port1, port2 and qss
194 */ 242 */
195struct v4l2_subdev_tuner_ops { 243struct v4l2_subdev_tuner_ops {
196 int (*s_radio)(struct v4l2_subdev *sd); 244 int (*s_radio)(struct v4l2_subdev *sd);
@@ -205,25 +253,31 @@ struct v4l2_subdev_tuner_ops {
205 int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config); 253 int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config);
206}; 254};
207 255
208/* s_clock_freq: set the frequency (in Hz) of the audio clock output. 256/**
209 Used to slave an audio processor to the video decoder, ensuring that 257 * struct v4l2_subdev_audio_ops - Callbacks used for audio-related settings
210 audio and video remain synchronized. Usual values for the frequency 258 *
211 are 48000, 44100 or 32000 Hz. If the frequency is not supported, then 259 * @s_clock_freq: set the frequency (in Hz) of the audio clock output.
212 -EINVAL is returned. 260 * Used to slave an audio processor to the video decoder, ensuring that
213 261 * audio and video remain synchronized. Usual values for the frequency
214 s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard 262 * are 48000, 44100 or 32000 Hz. If the frequency is not supported, then
215 way to select I2S clock used by driving digital audio streams at some 263 * -EINVAL is returned.
216 board designs. Usual values for the frequency are 1024000 and 2048000. 264 *
217 If the frequency is not supported, then -EINVAL is returned. 265 * @s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard
218 266 * way to select I2S clock used by driving digital audio streams at some
219 s_routing: used to define the input and/or output pins of an audio chip, 267 * board designs. Usual values for the frequency are 1024000 and 2048000.
220 and any additional configuration data. 268 * If the frequency is not supported, then -EINVAL is returned.
221 Never attempt to use user-level input IDs (e.g. Composite, S-Video, 269 *
222 Tuner) at this level. An i2c device shouldn't know about whether an 270 * @s_routing: used to define the input and/or output pins of an audio chip,
223 input pin is connected to a Composite connector, become on another 271 * and any additional configuration data.
224 board or platform it might be connected to something else entirely. 272 * Never attempt to use user-level input IDs (e.g. Composite, S-Video,
225 The calling driver is responsible for mapping a user-level input to 273 * Tuner) at this level. An i2c device shouldn't know about whether an
226 the right pins on the i2c device. 274 * input pin is connected to a Composite connector, become on another
275 * board or platform it might be connected to something else entirely.
276 * The calling driver is responsible for mapping a user-level input to
277 * the right pins on the i2c device.
278 *
279 * @s_stream: used to notify the audio code that stream will start or has
280 * stopped.
227 */ 281 */
228struct v4l2_subdev_audio_ops { 282struct v4l2_subdev_audio_ops {
229 int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq); 283 int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq);
@@ -242,6 +296,7 @@ struct v4l2_subdev_audio_ops {
242 296
243/** 297/**
244 * struct v4l2_mbus_frame_desc_entry - media bus frame description structure 298 * struct v4l2_mbus_frame_desc_entry - media bus frame description structure
299 *
245 * @flags: V4L2_MBUS_FRAME_DESC_FL_* flags 300 * @flags: V4L2_MBUS_FRAME_DESC_FL_* flags
246 * @pixelcode: media bus pixel code, valid if FRAME_DESC_FL_BLOB is not set 301 * @pixelcode: media bus pixel code, valid if FRAME_DESC_FL_BLOB is not set
247 * @length: number of octets per frame, valid if V4L2_MBUS_FRAME_DESC_FL_BLOB 302 * @length: number of octets per frame, valid if V4L2_MBUS_FRAME_DESC_FL_BLOB
@@ -265,45 +320,73 @@ struct v4l2_mbus_frame_desc {
265 unsigned short num_entries; 320 unsigned short num_entries;
266}; 321};
267 322
268/* 323/**
269 s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by 324 * struct v4l2_subdev_video_ops - Callbacks used when v4l device was opened
270 video input devices. 325 * in video mode.
271 326 *
272 g_std_output: get current standard for video OUTPUT devices. This is ignored 327 * @s_routing: see s_routing in audio_ops, except this version is for video
273 by video input devices. 328 * devices.
274 329 *
275 g_tvnorms: get v4l2_std_id with all standards supported by the video 330 * @s_crystal_freq: sets the frequency of the crystal used to generate the
276 CAPTURE device. This is ignored by video output devices. 331 * clocks in Hz. An extra flags field allows device specific configuration
277 332 * regarding clock frequency dividers, etc. If not used, then set flags
278 g_tvnorms_output: get v4l2_std_id with all standards supported by the video 333 * to 0. If the frequency is not supported, then -EINVAL is returned.
279 OUTPUT device. This is ignored by video capture devices. 334 *
280 335 * @g_std: callback for VIDIOC_G_STD ioctl handler code.
281 s_crystal_freq: sets the frequency of the crystal used to generate the 336 *
282 clocks in Hz. An extra flags field allows device specific configuration 337 * @s_std: callback for VIDIOC_S_STD ioctl handler code.
283 regarding clock frequency dividers, etc. If not used, then set flags 338 *
284 to 0. If the frequency is not supported, then -EINVAL is returned. 339 * @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
285 340 * video input devices.
286 g_input_status: get input status. Same as the status field in the v4l2_input 341 *
287 struct. 342 * @g_std_output: get current standard for video OUTPUT devices. This is ignored
288 343 * by video input devices.
289 s_routing: see s_routing in audio_ops, except this version is for video 344 *
290 devices. 345 * @querystd: callback for VIDIOC_QUERYSTD ioctl handler code.
291 346 *
292 s_dv_timings(): Set custom dv timings in the sub device. This is used 347 * @g_tvnorms: get v4l2_std_id with all standards supported by the video
293 when sub device is capable of setting detailed timing information 348 * CAPTURE device. This is ignored by video output devices.
294 in the hardware to generate/detect the video signal. 349 *
295 350 * @g_tvnorms_output: get v4l2_std_id with all standards supported by the video
296 g_dv_timings(): Get custom dv timings in the sub device. 351 * OUTPUT device. This is ignored by video capture devices.
297 352 *
298 g_mbus_config: get supported mediabus configurations 353 * @g_input_status: get input status. Same as the status field in the v4l2_input
299 354 * struct.
300 s_mbus_config: set a certain mediabus configuration. This operation is added 355 *
301 for compatibility with soc-camera drivers and should not be used by new 356 * @s_stream: used to notify the driver that a video stream will start or has
302 software. 357 * stopped.
303 358 *
304 s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev 359 * @cropcap: callback for VIDIOC_CROPCAP ioctl handler code.
305 can adjust @size to a lower value and must not write more data to the 360 *
306 buffer starting at @data than the original value of @size. 361 * @g_crop: callback for VIDIOC_G_CROP ioctl handler code.
362 *
363 * @s_crop: callback for VIDIOC_S_CROP ioctl handler code.
364 *
365 * @g_parm: callback for VIDIOC_G_PARM ioctl handler code.
366 *
367 * @s_parm: callback for VIDIOC_S_PARM ioctl handler code.
368 *
369 * @g_frame_interval: callback for VIDIOC_G_FRAMEINTERVAL ioctl handler code.
370 *
371 * @s_frame_interval: callback for VIDIOC_S_FRAMEINTERVAL ioctl handler code.
372 *
373 * @s_dv_timings: Set custom dv timings in the sub device. This is used
374 * when sub device is capable of setting detailed timing information
375 * in the hardware to generate/detect the video signal.
376 *
377 * @g_dv_timings: Get custom dv timings in the sub device.
378 *
379 * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS ioctl handler code.
380 *
381 * @g_mbus_config: get supported mediabus configurations
382 *
383 * @s_mbus_config: set a certain mediabus configuration. This operation is added
384 * for compatibility with soc-camera drivers and should not be used by new
385 * software.
386 *
387 * @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
388 * can adjust @size to a lower value and must not write more data to the
389 * buffer starting at @data than the original value of @size.
307 */ 390 */
308struct v4l2_subdev_video_ops { 391struct v4l2_subdev_video_ops {
309 int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config); 392 int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
@@ -340,34 +423,39 @@ struct v4l2_subdev_video_ops {
340 unsigned int *size); 423 unsigned int *size);
341}; 424};
342 425
343/* 426/**
344 decode_vbi_line: video decoders that support sliced VBI need to implement 427 * struct v4l2_subdev_vbi_ops - Callbacks used when v4l device was opened
345 this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the 428 * in video mode via the vbi device node.
346 start of the VBI data that was generated by the decoder. The driver 429 *
347 then parses the sliced VBI data and sets the other fields in the 430 * @decode_vbi_line: video decoders that support sliced VBI need to implement
348 struct accordingly. The pointer p is updated to point to the start of 431 * this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
349 the payload which can be copied verbatim into the data field of the 432 * start of the VBI data that was generated by the decoder. The driver
350 v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the 433 * then parses the sliced VBI data and sets the other fields in the
351 type field is set to 0 on return. 434 * struct accordingly. The pointer p is updated to point to the start of
352 435 * the payload which can be copied verbatim into the data field of the
353 s_vbi_data: used to generate VBI signals on a video signal. 436 * v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
354 v4l2_sliced_vbi_data is filled with the data packets that should be 437 * type field is set to 0 on return.
355 output. Note that if you set the line field to 0, then that VBI signal 438 *
356 is disabled. If no valid VBI data was found, then the type field is 439 * @s_vbi_data: used to generate VBI signals on a video signal.
357 set to 0 on return. 440 * v4l2_sliced_vbi_data is filled with the data packets that should be
358 441 * output. Note that if you set the line field to 0, then that VBI signal
359 g_vbi_data: used to obtain the sliced VBI packet from a readback register. 442 * is disabled. If no valid VBI data was found, then the type field is
360 Not all video decoders support this. If no data is available because 443 * set to 0 on return.
361 the readback register contains invalid or erroneous data -EIO is 444 *
362 returned. Note that you must fill in the 'id' member and the 'field' 445 * @g_vbi_data: used to obtain the sliced VBI packet from a readback register.
363 member (to determine whether CC data from the first or second field 446 * Not all video decoders support this. If no data is available because
364 should be obtained). 447 * the readback register contains invalid or erroneous data -EIO is
365 448 * returned. Note that you must fill in the 'id' member and the 'field'
366 s_raw_fmt: setup the video encoder/decoder for raw VBI. 449 * member (to determine whether CC data from the first or second field
367 450 * should be obtained).
368 g_sliced_fmt: retrieve the current sliced VBI settings. 451 *
369 452 * @g_sliced_vbi_cap: callback for VIDIOC_SLICED_VBI_CAP ioctl handler code.
370 s_sliced_fmt: setup the sliced VBI settings. 453 *
454 * @s_raw_fmt: setup the video encoder/decoder for raw VBI.
455 *
456 * @g_sliced_fmt: retrieve the current sliced VBI settings.
457 *
458 * @s_sliced_fmt: setup the sliced VBI settings.
371 */ 459 */
372struct v4l2_subdev_vbi_ops { 460struct v4l2_subdev_vbi_ops {
373 int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line); 461 int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line);
@@ -480,8 +568,39 @@ struct v4l2_subdev_pad_config {
480 568
481/** 569/**
482 * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations 570 * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
571 *
572 * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE ioctl handler
573 * code.
574 * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE ioctl handler
575 * code.
576 *
577 * @enum_frame_interval: callback for VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL ioctl
578 * handler code.
579 *
580 * @get_fmt: callback for VIDIOC_SUBDEV_G_FMT ioctl handler code.
581 *
582 * @set_fmt: callback for VIDIOC_SUBDEV_S_FMT ioctl handler code.
583 *
584 * @get_selection: callback for VIDIOC_SUBDEV_G_SELECTION ioctl handler code.
585 *
586 * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION ioctl handler code.
587 *
588 * @get_edid: callback for VIDIOC_SUBDEV_G_EDID ioctl handler code.
589 *
590 * @set_edid: callback for VIDIOC_SUBDEV_S_EDID ioctl handler code.
591 *
592 * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP ioctl handler
593 * code.
594 *
595 * @enum_dv_timings: callback for VIDIOC_SUBDEV_ENUM_DV_TIMINGS ioctl handler
596 * code.
597 *
598 * @link_validate: used by the media controller code to check if the links
599 * that belongs to a pipeline can be used for stream.
600 *
483 * @get_frame_desc: get the current low level media bus frame parameters. 601 * @get_frame_desc: get the current low level media bus frame parameters.
484 * @get_frame_desc: set the low level media bus frame parameters, @fd array 602 *
603 * @set_frame_desc: set the low level media bus frame parameters, @fd array
485 * may be adjusted by the subdev driver to device capabilities. 604 * may be adjusted by the subdev driver to device capabilities.
486 */ 605 */
487struct v4l2_subdev_pad_ops { 606struct v4l2_subdev_pad_ops {
@@ -695,4 +814,7 @@ void v4l2_subdev_init(struct v4l2_subdev *sd,
695#define v4l2_subdev_has_op(sd, o, f) \ 814#define v4l2_subdev_has_op(sd, o, f) \
696 ((sd)->ops->o && (sd)->ops->o->f) 815 ((sd)->ops->o && (sd)->ops->o->f)
697 816
817void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
818 const struct v4l2_event *ev);
819
698#endif 820#endif
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index 8c6e825940e5..d760aa73ebbb 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -37,7 +37,7 @@ struct videobuf_queue;
37 * 37 *
38 * about the mmap helpers (videobuf_mmap_*): 38 * about the mmap helpers (videobuf_mmap_*):
39 * 39 *
40 * The mmaper function allows to map any subset of contingous buffers. 40 * The mmaper function allows to map any subset of contiguous buffers.
41 * This includes one mmap() call for all buffers (which the original 41 * This includes one mmap() call for all buffers (which the original
42 * video4linux API uses) as well as one mmap() for every single buffer 42 * video4linux API uses) as well as one mmap() for every single buffer
43 * (which v4l2 uses). 43 * (which v4l2 uses).
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index c192e1b46cdc..589b56c68400 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -364,7 +364,9 @@ struct v4l2_fh;
364 * start_streaming() can be called. Used when a DMA engine 364 * start_streaming() can be called. Used when a DMA engine
365 * cannot be started unless at least this number of buffers 365 * cannot be started unless at least this number of buffers
366 * have been queued into the driver. 366 * have been queued into the driver.
367 * 367 */
368/*
369 * Private elements (won't appear at the DocBook):
368 * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped 370 * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
369 * @memory: current memory type used 371 * @memory: current memory type used
370 * @bufs: videobuf buffer structures 372 * @bufs: videobuf buffer structures
@@ -407,7 +409,7 @@ struct vb2_queue {
407 gfp_t gfp_flags; 409 gfp_t gfp_flags;
408 u32 min_buffers_needed; 410 u32 min_buffers_needed;
409 411
410/* private: internal use only */ 412 /* private: internal use only */
411 struct mutex mmap_lock; 413 struct mutex mmap_lock;
412 enum v4l2_memory memory; 414 enum v4l2_memory memory;
413 struct vb2_buffer *bufs[VIDEO_MAX_FRAME]; 415 struct vb2_buffer *bufs[VIDEO_MAX_FRAME];
@@ -484,7 +486,8 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
484 loff_t *ppos, int nonblock); 486 loff_t *ppos, int nonblock);
485size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, 487size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
486 loff_t *ppos, int nonblock); 488 loff_t *ppos, int nonblock);
487/** 489
490/*
488 * vb2_thread_fnc - callback function for use with vb2_thread 491 * vb2_thread_fnc - callback function for use with vb2_thread
489 * 492 *
490 * This is called whenever a buffer is dequeued in the thread. 493 * This is called whenever a buffer is dequeued in the thread.
@@ -577,7 +580,6 @@ static inline void vb2_set_plane_payload(struct vb2_buffer *vb,
577 * vb2_get_plane_payload() - get bytesused for the plane plane_no 580 * vb2_get_plane_payload() - get bytesused for the plane plane_no
578 * @vb: buffer for which plane payload should be set 581 * @vb: buffer for which plane payload should be set
579 * @plane_no: plane number for which payload should be set 582 * @plane_no: plane number for which payload should be set
580 * @size: payload in bytes
581 */ 583 */
582static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb, 584static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb,
583 unsigned int plane_no) 585 unsigned int plane_no)
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index f05444ca8c0c..9f36641a6781 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -17,7 +17,8 @@
17#include <media/videobuf2-core.h> 17#include <media/videobuf2-core.h>
18 18
19/** 19/**
20 * vb2_vmarea_handler - common vma refcount tracking handler 20 * struct vb2_vmarea_handler - common vma refcount tracking handler
21 *
21 * @refcount: pointer to refcount entry in the buffer 22 * @refcount: pointer to refcount entry in the buffer
22 * @put: callback to function that decreases buffer refcount 23 * @put: callback to function that decreases buffer refcount
23 * @arg: argument for @put callback 24 * @arg: argument for @put callback
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 7a6c1d6cc173..f2ffe5bd720d 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -200,4 +200,14 @@ unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
200ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count, 200ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
201 loff_t *off); 201 loff_t *off);
202 202
203/*
204 * For EEH, a driver may want to assert a PERST will reload the same image
205 * from flash into the FPGA.
206 *
207 * This is a property of the entire adapter, not a single AFU, so drivers
208 * should set this property with care!
209 */
210void cxl_perst_reloads_same_image(struct cxl_afu *afu,
211 bool perst_reloads_same_image);
212
203#endif /* _MISC_CXL_H */ 213#endif /* _MISC_CXL_H */
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index dc03d77ad23b..a2f59ec98d24 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -197,6 +197,27 @@
197#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */ 197#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
198#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */ 198#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
199 199
200#define LOWPAN_PRIV_SIZE(llpriv_size) \
201 (sizeof(struct lowpan_priv) + llpriv_size)
202
203enum lowpan_lltypes {
204 LOWPAN_LLTYPE_BTLE,
205 LOWPAN_LLTYPE_IEEE802154,
206};
207
208struct lowpan_priv {
209 enum lowpan_lltypes lltype;
210
211 /* must be last */
212 u8 priv[0] __aligned(sizeof(void *));
213};
214
215static inline
216struct lowpan_priv *lowpan_priv(const struct net_device *dev)
217{
218 return netdev_priv(dev);
219}
220
200#ifdef DEBUG 221#ifdef DEBUG
201/* print data in line */ 222/* print data in line */
202static inline void raw_dump_inline(const char *caller, char *msg, 223static inline void raw_dump_inline(const char *caller, char *msg,
@@ -372,6 +393,8 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
372 return skb->len + uncomp_header - ret; 393 return skb->len + uncomp_header - ret;
373} 394}
374 395
396void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
397
375int 398int
376lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, 399lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
377 const u8 *saddr, const u8 saddr_type, 400 const u8 *saddr, const u8 saddr_type,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 931738bc5bba..9d446f136607 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -21,6 +21,8 @@ struct tcf_common {
21 struct gnet_stats_rate_est64 tcfc_rate_est; 21 struct gnet_stats_rate_est64 tcfc_rate_est;
22 spinlock_t tcfc_lock; 22 spinlock_t tcfc_lock;
23 struct rcu_head tcfc_rcu; 23 struct rcu_head tcfc_rcu;
24 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
25 struct gnet_stats_queue __percpu *cpu_qstats;
24}; 26};
25#define tcf_head common.tcfc_head 27#define tcf_head common.tcfc_head
26#define tcf_index common.tcfc_index 28#define tcf_index common.tcfc_index
@@ -68,6 +70,17 @@ static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf)
68 kfree(hf->htab); 70 kfree(hf->htab);
69} 71}
70 72
73/* Update lastuse only if needed, to avoid dirtying a cache line.
74 * We use a temp variable to avoid fetching jiffies twice.
75 */
76static inline void tcf_lastuse_update(struct tcf_t *tm)
77{
78 unsigned long now = jiffies;
79
80 if (tm->lastuse != now)
81 tm->lastuse = now;
82}
83
71#ifdef CONFIG_NET_CLS_ACT 84#ifdef CONFIG_NET_CLS_ACT
72 85
73#define ACT_P_CREATED 1 86#define ACT_P_CREATED 1
@@ -98,11 +111,10 @@ struct tc_action_ops {
98}; 111};
99 112
100int tcf_hash_search(struct tc_action *a, u32 index); 113int tcf_hash_search(struct tc_action *a, u32 index);
101void tcf_hash_destroy(struct tc_action *a);
102u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); 114u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
103int tcf_hash_check(u32 index, struct tc_action *a, int bind); 115int tcf_hash_check(u32 index, struct tc_action *a, int bind);
104int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, 116int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
105 int size, int bind); 117 int size, int bind, bool cpustats);
106void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); 118void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
107void tcf_hash_insert(struct tc_action *a); 119void tcf_hash_insert(struct tc_action *a);
108 120
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index def59d3a34d5..b5474b1fcd83 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -91,6 +91,37 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2);
91void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr); 91void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
92void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); 92void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
93 93
94static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
95{
96 if (dev->addr_len != ETH_ALEN)
97 return -1;
98 memcpy(eui, dev->dev_addr, 3);
99 memcpy(eui + 5, dev->dev_addr + 3, 3);
100
101 /*
102 * The zSeries OSA network cards can be shared among various
103 * OS instances, but the OSA cards have only one MAC address.
104 * This leads to duplicate address conflicts in conjunction
105 * with IPv6 if more than one instance uses the same card.
106 *
107 * The driver for these cards can deliver a unique 16-bit
108 * identifier for each instance sharing the same card. It is
109 * placed instead of 0xFFFE in the interface identifier. The
110 * "u" bit of the interface identifier is not inverted in this
111 * case. Hence the resulting interface identifier has local
112 * scope according to RFC2373.
113 */
114 if (dev->dev_id) {
115 eui[3] = (dev->dev_id >> 8) & 0xFF;
116 eui[4] = dev->dev_id & 0xFF;
117 } else {
118 eui[3] = 0xFF;
119 eui[4] = 0xFE;
120 eui[0] ^= 2;
121 }
122 return 0;
123}
124
94static inline unsigned long addrconf_timeout_fixup(u32 timeout, 125static inline unsigned long addrconf_timeout_fixup(u32 timeout,
95 unsigned int unit) 126 unsigned int unit)
96{ 127{
@@ -158,8 +189,8 @@ struct ipv6_stub {
158 const struct in6_addr *addr); 189 const struct in6_addr *addr);
159 int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, 190 int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
160 const struct in6_addr *addr); 191 const struct in6_addr *addr);
161 int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst, 192 int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
162 struct flowi6 *fl6); 193 struct dst_entry **dst, struct flowi6 *fl6);
163 void (*udpv6_encap_enable)(void); 194 void (*udpv6_encap_enable)(void);
164 void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh, 195 void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
165 const struct in6_addr *daddr, 196 const struct in6_addr *daddr,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3bd618d3e55d..9e1a59e01fa2 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -512,9 +512,11 @@ struct hci_conn_params {
512 HCI_AUTO_CONN_DIRECT, 512 HCI_AUTO_CONN_DIRECT,
513 HCI_AUTO_CONN_ALWAYS, 513 HCI_AUTO_CONN_ALWAYS,
514 HCI_AUTO_CONN_LINK_LOSS, 514 HCI_AUTO_CONN_LINK_LOSS,
515 HCI_AUTO_CONN_EXPLICIT,
515 } auto_connect; 516 } auto_connect;
516 517
517 struct hci_conn *conn; 518 struct hci_conn *conn;
519 bool explicit_connect;
518}; 520};
519 521
520extern struct list_head hci_dev_list; 522extern struct list_head hci_dev_list;
@@ -639,6 +641,7 @@ enum {
639 HCI_CONN_DROP, 641 HCI_CONN_DROP,
640 HCI_CONN_PARAM_REMOVAL_PEND, 642 HCI_CONN_PARAM_REMOVAL_PEND,
641 HCI_CONN_NEW_LINK_KEY, 643 HCI_CONN_NEW_LINK_KEY,
644 HCI_CONN_SCANNING,
642}; 645};
643 646
644static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) 647static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -808,6 +811,26 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
808 return NULL; 811 return NULL;
809} 812}
810 813
814static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
815{
816 struct hci_conn_hash *h = &hdev->conn_hash;
817 struct hci_conn *c;
818
819 rcu_read_lock();
820
821 list_for_each_entry_rcu(c, &h->list, list) {
822 if (c->type == LE_LINK && c->state == BT_CONNECT &&
823 !test_bit(HCI_CONN_SCANNING, &c->flags)) {
824 rcu_read_unlock();
825 return c;
826 }
827 }
828
829 rcu_read_unlock();
830
831 return NULL;
832}
833
811int hci_disconnect(struct hci_conn *conn, __u8 reason); 834int hci_disconnect(struct hci_conn *conn, __u8 reason);
812bool hci_setup_sync(struct hci_conn *conn, __u16 handle); 835bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
813void hci_sco_setup(struct hci_conn *conn, __u8 status); 836void hci_sco_setup(struct hci_conn *conn, __u8 status);
@@ -823,6 +846,9 @@ void hci_chan_del(struct hci_chan *chan);
823void hci_chan_list_flush(struct hci_conn *conn); 846void hci_chan_list_flush(struct hci_conn *conn);
824struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); 847struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
825 848
849struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
850 u8 dst_type, u8 sec_level,
851 u16 conn_timeout, u8 role);
826struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 852struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
827 u8 dst_type, u8 sec_level, u16 conn_timeout, 853 u8 dst_type, u8 sec_level, u16 conn_timeout,
828 u8 role); 854 u8 role);
@@ -988,6 +1014,9 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
988struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 1014struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
989 bdaddr_t *addr, 1015 bdaddr_t *addr,
990 u8 addr_type); 1016 u8 addr_type);
1017struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
1018 bdaddr_t *addr,
1019 u8 addr_type);
991 1020
992void hci_uuids_clear(struct hci_dev *hdev); 1021void hci_uuids_clear(struct hci_dev *hdev);
993 1022
@@ -1297,7 +1326,7 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
1297 if (max >= to_multiplier * 8) 1326 if (max >= to_multiplier * 8)
1298 return -EINVAL; 1327 return -EINVAL;
1299 1328
1300 max_latency = (to_multiplier * 8 / max) - 1; 1329 max_latency = (to_multiplier * 4 / max) - 1;
1301 if (latency > 499 || latency > max_latency) 1330 if (latency > 499 || latency > max_latency)
1302 return -EINVAL; 1331 return -EINVAL;
1303 1332
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 2239a3753092..c98afc08cc26 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -55,6 +55,8 @@
55#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000) 55#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
56#define L2CAP_MOVE_TIMEOUT msecs_to_jiffies(4000) 56#define L2CAP_MOVE_TIMEOUT msecs_to_jiffies(4000)
57#define L2CAP_MOVE_ERTX_TIMEOUT msecs_to_jiffies(60000) 57#define L2CAP_MOVE_ERTX_TIMEOUT msecs_to_jiffies(60000)
58#define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
59#define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
58 60
59#define L2CAP_A2MP_DEFAULT_MTU 670 61#define L2CAP_A2MP_DEFAULT_MTU 670
60 62
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index c28aca25320e..1797235cd590 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -66,6 +66,7 @@ enum {
66 BOND_OPT_AD_ACTOR_SYS_PRIO, 66 BOND_OPT_AD_ACTOR_SYS_PRIO,
67 BOND_OPT_AD_ACTOR_SYSTEM, 67 BOND_OPT_AD_ACTOR_SYSTEM,
68 BOND_OPT_AD_USER_PORT_KEY, 68 BOND_OPT_AD_USER_PORT_KEY,
69 BOND_OPT_NUM_PEER_NOTIF_ALIAS,
69 BOND_OPT_LAST 70 BOND_OPT_LAST
70}; 71};
71 72
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 20defc0353d1..c1740a2794a3 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -310,6 +310,13 @@ static inline bool bond_uses_primary(struct bonding *bond)
310 return bond_mode_uses_primary(BOND_MODE(bond)); 310 return bond_mode_uses_primary(BOND_MODE(bond));
311} 311}
312 312
313static inline struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
314{
315 struct slave *slave = rcu_dereference(bond->curr_active_slave);
316
317 return bond_uses_primary(bond) && slave ? slave->dev : NULL;
318}
319
313static inline bool bond_slave_is_up(struct slave *slave) 320static inline bool bond_slave_is_up(struct slave *slave)
314{ 321{
315 return netif_running(slave->dev) && netif_carrier_ok(slave->dev); 322 return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 883fe1e7c5a1..f0889a247643 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -2369,8 +2369,7 @@ struct cfg80211_qos_map {
2369 * method returns 0.) 2369 * method returns 0.)
2370 * 2370 *
2371 * @mgmt_frame_register: Notify driver that a management frame type was 2371 * @mgmt_frame_register: Notify driver that a management frame type was
2372 * registered. Note that this callback may not sleep, and cannot run 2372 * registered. The callback is allowed to sleep.
2373 * concurrently with itself.
2374 * 2373 *
2375 * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device. 2374 * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
2376 * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may 2375 * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 290a9a69af07..76b1ffaea863 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -34,6 +34,8 @@ struct cfg802154_ops {
34 int type); 34 int type);
35 void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, 35 void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
36 struct net_device *dev); 36 struct net_device *dev);
37 int (*suspend)(struct wpan_phy *wpan_phy);
38 int (*resume)(struct wpan_phy *wpan_phy);
37 int (*add_virtual_intf)(struct wpan_phy *wpan_phy, 39 int (*add_virtual_intf)(struct wpan_phy *wpan_phy,
38 const char *name, 40 const char *name,
39 unsigned char name_assign_type, 41 unsigned char name_assign_type,
@@ -61,6 +63,8 @@ struct cfg802154_ops {
61 s8 max_frame_retries); 63 s8 max_frame_retries);
62 int (*set_lbt_mode)(struct wpan_phy *wpan_phy, 64 int (*set_lbt_mode)(struct wpan_phy *wpan_phy,
63 struct wpan_dev *wpan_dev, bool mode); 65 struct wpan_dev *wpan_dev, bool mode);
66 int (*set_ackreq_default)(struct wpan_phy *wpan_phy,
67 struct wpan_dev *wpan_dev, bool ackreq);
64}; 68};
65 69
66static inline bool 70static inline bool
@@ -171,6 +175,9 @@ struct wpan_dev {
171 struct list_head list; 175 struct list_head list;
172 struct net_device *netdev; 176 struct net_device *netdev;
173 177
178 /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
179 struct net_device *lowpan_dev;
180
174 u32 identifier; 181 u32 identifier;
175 182
176 /* MAC PIB */ 183 /* MAC PIB */
@@ -191,6 +198,9 @@ struct wpan_dev {
191 bool lbt; 198 bool lbt;
192 199
193 bool promiscuous_mode; 200 bool promiscuous_mode;
201
202 /* fallback for acknowledgment bit setting */
203 bool ackreq;
194}; 204};
195 205
196#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) 206#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 2d1d73cb773e..9fcaedf994ee 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -140,14 +140,16 @@ static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
140 140
141struct sk_buff; 141struct sk_buff;
142void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, 142void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
143 __be32 from, __be32 to, int pseudohdr); 143 __be32 from, __be32 to, bool pseudohdr);
144void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, 144void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
145 const __be32 *from, const __be32 *to, 145 const __be32 *from, const __be32 *to,
146 int pseudohdr); 146 bool pseudohdr);
147void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
148 __wsum diff, bool pseudohdr);
147 149
148static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, 150static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
149 __be16 from, __be16 to, 151 __be16 from, __be16 to,
150 int pseudohdr) 152 bool pseudohdr)
151{ 153{
152 inet_proto_csum_replace4(sum, skb, (__force __be32)from, 154 inet_proto_csum_replace4(sum, skb, (__force __be32)from,
153 (__force __be32)to, pseudohdr); 155 (__force __be32)to, pseudohdr);
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c15d39456e14..ccd6d8bffa4d 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk)
49 if (classid != sk->sk_classid) 49 if (classid != sk->sk_classid)
50 sk->sk_classid = classid; 50 sk->sk_classid = classid;
51} 51}
52
53static inline u32 task_get_classid(const struct sk_buff *skb)
54{
55 u32 classid = task_cls_state(current)->classid;
56
57 /* Due to the nature of the classifier it is required to ignore all
58 * packets originating from softirq context as accessing `current'
59 * would lead to false results.
60 *
61 * This test assumes that all callers of dev_queue_xmit() explicitly
62 * disable bh. Knowing this, it is possible to detect softirq based
63 * calls by looking at the number of nested bh disable calls because
64 * softirqs always disables bh.
65 */
66 if (in_serving_softirq()) {
67 /* If there is an sk_classid we'll use that. */
68 if (!skb->sk)
69 return 0;
70
71 classid = skb->sk->sk_classid;
72 }
73
74 return classid;
75}
52#else /* !CONFIG_CGROUP_NET_CLASSID */ 76#else /* !CONFIG_CGROUP_NET_CLASSID */
53static inline void sock_update_classid(struct sock *sk) 77static inline void sock_update_classid(struct sock *sk)
54{ 78{
55} 79}
80
81static inline u32 task_get_classid(const struct sk_buff *skb)
82{
83 return 0;
84}
56#endif /* CONFIG_CGROUP_NET_CLASSID */ 85#endif /* CONFIG_CGROUP_NET_CLASSID */
57#endif /* _NET_CLS_CGROUP_H */ 86#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index fbca63ba8f73..b34d812bc5d0 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -171,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
171 return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); 171 return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
172} 172}
173 173
174static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
175{
176 return !!((ds->dsa_port_mask) & (1 << p));
177}
178
174static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p) 179static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
175{ 180{
176 return ds->phys_port_mask & (1 << p) && ds->ports[p]; 181 return ds->phys_port_mask & (1 << p) && ds->ports[p];
@@ -296,12 +301,28 @@ struct dsa_switch_driver {
296 u32 br_port_mask); 301 u32 br_port_mask);
297 int (*port_stp_update)(struct dsa_switch *ds, int port, 302 int (*port_stp_update)(struct dsa_switch *ds, int port,
298 u8 state); 303 u8 state);
299 int (*fdb_add)(struct dsa_switch *ds, int port, 304
300 const unsigned char *addr, u16 vid); 305 /*
301 int (*fdb_del)(struct dsa_switch *ds, int port, 306 * VLAN support
302 const unsigned char *addr, u16 vid); 307 */
303 int (*fdb_getnext)(struct dsa_switch *ds, int port, 308 int (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid);
304 unsigned char *addr, bool *is_static); 309 int (*port_pvid_set)(struct dsa_switch *ds, int port, u16 pvid);
310 int (*port_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
311 bool untagged);
312 int (*port_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
313 int (*vlan_getnext)(struct dsa_switch *ds, u16 *vid,
314 unsigned long *ports, unsigned long *untagged);
315
316 /*
317 * Forwarding database
318 */
319 int (*port_fdb_add)(struct dsa_switch *ds, int port,
320 const unsigned char *addr, u16 vid);
321 int (*port_fdb_del)(struct dsa_switch *ds, int port,
322 const unsigned char *addr, u16 vid);
323 int (*port_fdb_getnext)(struct dsa_switch *ds, int port,
324 unsigned char *addr, u16 *vid,
325 bool *is_static);
305}; 326};
306 327
307void register_switch_driver(struct dsa_switch_driver *type); 328void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst.h b/include/net/dst.h
index 2bc73f8a00a9..9261d928303d 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -57,6 +57,7 @@ struct dst_entry {
57#define DST_FAKE_RTABLE 0x0040 57#define DST_FAKE_RTABLE 0x0040
58#define DST_XFRM_TUNNEL 0x0080 58#define DST_XFRM_TUNNEL 0x0080
59#define DST_XFRM_QUEUE 0x0100 59#define DST_XFRM_QUEUE 0x0100
60#define DST_METADATA 0x0200
60 61
61 unsigned short pending_confirm; 62 unsigned short pending_confirm;
62 63
@@ -83,12 +84,13 @@ struct dst_entry {
83 __u32 __pad2; 84 __u32 __pad2;
84#endif 85#endif
85 86
87#ifdef CONFIG_64BIT
88 struct lwtunnel_state *lwtstate;
86 /* 89 /*
87 * Align __refcnt to a 64 bytes alignment 90 * Align __refcnt to a 64 bytes alignment
88 * (L1_CACHE_SIZE would be too much) 91 * (L1_CACHE_SIZE would be too much)
89 */ 92 */
90#ifdef CONFIG_64BIT 93 long __pad_to_align_refcnt[1];
91 long __pad_to_align_refcnt[2];
92#endif 94#endif
93 /* 95 /*
94 * __refcnt wants to be on a different cache line from 96 * __refcnt wants to be on a different cache line from
@@ -97,6 +99,9 @@ struct dst_entry {
97 atomic_t __refcnt; /* client references */ 99 atomic_t __refcnt; /* client references */
98 int __use; 100 int __use;
99 unsigned long lastuse; 101 unsigned long lastuse;
102#ifndef CONFIG_64BIT
103 struct lwtunnel_state *lwtstate;
104#endif
100 union { 105 union {
101 struct dst_entry *next; 106 struct dst_entry *next;
102 struct rtable __rcu *rt_next; 107 struct rtable __rcu *rt_next;
@@ -202,6 +207,12 @@ static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
202 p[metric-1] = val; 207 p[metric-1] = val;
203} 208}
204 209
210/* Kernel-internal feature bits that are unallocated in user space. */
211#define DST_FEATURE_ECN_CA (1 << 31)
212
213#define DST_FEATURE_MASK (DST_FEATURE_ECN_CA)
214#define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
215
205static inline u32 216static inline u32
206dst_feature(const struct dst_entry *dst, u32 feature) 217dst_feature(const struct dst_entry *dst, u32 feature)
207{ 218{
@@ -284,13 +295,18 @@ static inline void skb_dst_drop(struct sk_buff *skb)
284 } 295 }
285} 296}
286 297
287static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) 298static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
288{ 299{
289 nskb->_skb_refdst = oskb->_skb_refdst; 300 nskb->_skb_refdst = refdst;
290 if (!(nskb->_skb_refdst & SKB_DST_NOREF)) 301 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
291 dst_clone(skb_dst(nskb)); 302 dst_clone(skb_dst(nskb));
292} 303}
293 304
305static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
306{
307 __skb_dst_copy(nskb, oskb->_skb_refdst);
308}
309
294/** 310/**
295 * skb_dst_force - makes sure skb dst is refcounted 311 * skb_dst_force - makes sure skb dst is refcounted
296 * @skb: buffer 312 * @skb: buffer
@@ -356,6 +372,9 @@ static inline int dst_discard(struct sk_buff *skb)
356} 372}
357void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, 373void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
358 int initial_obsolete, unsigned short flags); 374 int initial_obsolete, unsigned short flags);
375void dst_init(struct dst_entry *dst, struct dst_ops *ops,
376 struct net_device *dev, int initial_ref, int initial_obsolete,
377 unsigned short flags);
359void __dst_free(struct dst_entry *dst); 378void __dst_free(struct dst_entry *dst);
360struct dst_entry *dst_destroy(struct dst_entry *dst); 379struct dst_entry *dst_destroy(struct dst_entry *dst);
361 380
@@ -457,7 +476,7 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
457 return dst; 476 return dst;
458} 477}
459 478
460void dst_init(void); 479void dst_subsys_init(void);
461 480
462/* Flags for xfrm_lookup flags argument. */ 481/* Flags for xfrm_lookup flags argument. */
463enum { 482enum {
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
new file mode 100644
index 000000000000..af9d5382f6cb
--- /dev/null
+++ b/include/net/dst_metadata.h
@@ -0,0 +1,108 @@
1#ifndef __NET_DST_METADATA_H
2#define __NET_DST_METADATA_H 1
3
4#include <linux/skbuff.h>
5#include <net/ip_tunnels.h>
6#include <net/dst.h>
7
8struct metadata_dst {
9 struct dst_entry dst;
10 union {
11 struct ip_tunnel_info tun_info;
12 } u;
13};
14
15static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
16{
17 struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
18
19 if (md_dst && md_dst->dst.flags & DST_METADATA)
20 return md_dst;
21
22 return NULL;
23}
24
25static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
26{
27 struct metadata_dst *md_dst = skb_metadata_dst(skb);
28 struct dst_entry *dst;
29
30 if (md_dst)
31 return &md_dst->u.tun_info;
32
33 dst = skb_dst(skb);
34 if (dst && dst->lwtstate)
35 return lwt_tun_info(dst->lwtstate);
36
37 return NULL;
38}
39
40static inline bool skb_valid_dst(const struct sk_buff *skb)
41{
42 struct dst_entry *dst = skb_dst(skb);
43
44 return dst && !(dst->flags & DST_METADATA);
45}
46
47struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
48struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
49
50static inline struct metadata_dst *tun_rx_dst(int md_size)
51{
52 struct metadata_dst *tun_dst;
53
54 tun_dst = metadata_dst_alloc(md_size, GFP_ATOMIC);
55 if (!tun_dst)
56 return NULL;
57
58 tun_dst->u.tun_info.options_len = 0;
59 tun_dst->u.tun_info.mode = 0;
60 return tun_dst;
61}
62
63static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
64 __be16 flags,
65 __be64 tunnel_id,
66 int md_size)
67{
68 const struct iphdr *iph = ip_hdr(skb);
69 struct metadata_dst *tun_dst;
70
71 tun_dst = tun_rx_dst(md_size);
72 if (!tun_dst)
73 return NULL;
74
75 ip_tunnel_key_init(&tun_dst->u.tun_info.key,
76 iph->saddr, iph->daddr, iph->tos, iph->ttl,
77 0, 0, tunnel_id, flags);
78 return tun_dst;
79}
80
81static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
82 __be16 flags,
83 __be64 tunnel_id,
84 int md_size)
85{
86 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
87 struct metadata_dst *tun_dst;
88 struct ip_tunnel_info *info;
89
90 tun_dst = tun_rx_dst(md_size);
91 if (!tun_dst)
92 return NULL;
93
94 info = &tun_dst->u.tun_info;
95 info->mode = IP_TUNNEL_INFO_IPV6;
96 info->key.tun_flags = flags;
97 info->key.tun_id = tunnel_id;
98 info->key.tp_src = 0;
99 info->key.tp_dst = 0;
100
101 info->key.u.ipv6.src = ip6h->saddr;
102 info->key.u.ipv6.dst = ip6h->daddr;
103 info->key.tos = ipv6_get_dsfield(ip6h);
104 info->key.ttl = ip6h->hop_limit;
105 return tun_dst;
106}
107
108#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 903a55efbffe..59160de702b6 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -19,6 +19,7 @@ struct fib_rule {
19 u8 action; 19 u8 action;
20 /* 3 bytes hole, try to use */ 20 /* 3 bytes hole, try to use */
21 u32 target; 21 u32 target;
22 __be64 tun_id;
22 struct fib_rule __rcu *ctarget; 23 struct fib_rule __rcu *ctarget;
23 struct net *fr_net; 24 struct net *fr_net;
24 25
@@ -65,7 +66,6 @@ struct fib_rules_ops {
65 struct nlattr **); 66 struct nlattr **);
66 int (*fill)(struct fib_rule *, struct sk_buff *, 67 int (*fill)(struct fib_rule *, struct sk_buff *,
67 struct fib_rule_hdr *); 68 struct fib_rule_hdr *);
68 u32 (*default_pref)(struct fib_rules_ops *ops);
69 size_t (*nlmsg_payload)(struct fib_rule *); 69 size_t (*nlmsg_payload)(struct fib_rule *);
70 70
71 /* Called after modifications to the rules set, must flush 71 /* Called after modifications to the rules set, must flush
@@ -117,5 +117,4 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
117 struct fib_lookup_arg *); 117 struct fib_lookup_arg *);
118int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table, 118int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
119 u32 flags); 119 u32 flags);
120u32 fib_default_rule_pref(struct fib_rules_ops *ops);
121#endif 120#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 8109a159d1b3..acd6a096250e 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -10,6 +10,7 @@
10#include <linux/socket.h> 10#include <linux/socket.h>
11#include <linux/in6.h> 11#include <linux/in6.h>
12#include <linux/atomic.h> 12#include <linux/atomic.h>
13#include <net/flow_dissector.h>
13 14
14/* 15/*
15 * ifindex generation is per-net namespace, and loopback is 16 * ifindex generation is per-net namespace, and loopback is
@@ -19,6 +20,10 @@
19 20
20#define LOOPBACK_IFINDEX 1 21#define LOOPBACK_IFINDEX 1
21 22
23struct flowi_tunnel {
24 __be64 tun_id;
25};
26
22struct flowi_common { 27struct flowi_common {
23 int flowic_oif; 28 int flowic_oif;
24 int flowic_iif; 29 int flowic_iif;
@@ -29,7 +34,9 @@ struct flowi_common {
29 __u8 flowic_flags; 34 __u8 flowic_flags;
30#define FLOWI_FLAG_ANYSRC 0x01 35#define FLOWI_FLAG_ANYSRC 0x01
31#define FLOWI_FLAG_KNOWN_NH 0x02 36#define FLOWI_FLAG_KNOWN_NH 0x02
37#define FLOWI_FLAG_VRFSRC 0x04
32 __u32 flowic_secid; 38 __u32 flowic_secid;
39 struct flowi_tunnel flowic_tun_key;
33}; 40};
34 41
35union flowi_uli { 42union flowi_uli {
@@ -66,6 +73,7 @@ struct flowi4 {
66#define flowi4_proto __fl_common.flowic_proto 73#define flowi4_proto __fl_common.flowic_proto
67#define flowi4_flags __fl_common.flowic_flags 74#define flowi4_flags __fl_common.flowic_flags
68#define flowi4_secid __fl_common.flowic_secid 75#define flowi4_secid __fl_common.flowic_secid
76#define flowi4_tun_key __fl_common.flowic_tun_key
69 77
70 /* (saddr,daddr) must be grouped, same order as in IP header */ 78 /* (saddr,daddr) must be grouped, same order as in IP header */
71 __be32 saddr; 79 __be32 saddr;
@@ -95,6 +103,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
95 fl4->flowi4_proto = proto; 103 fl4->flowi4_proto = proto;
96 fl4->flowi4_flags = flags; 104 fl4->flowi4_flags = flags;
97 fl4->flowi4_secid = 0; 105 fl4->flowi4_secid = 0;
106 fl4->flowi4_tun_key.tun_id = 0;
98 fl4->daddr = daddr; 107 fl4->daddr = daddr;
99 fl4->saddr = saddr; 108 fl4->saddr = saddr;
100 fl4->fl4_dport = dport; 109 fl4->fl4_dport = dport;
@@ -122,6 +131,7 @@ struct flowi6 {
122#define flowi6_proto __fl_common.flowic_proto 131#define flowi6_proto __fl_common.flowic_proto
123#define flowi6_flags __fl_common.flowic_flags 132#define flowi6_flags __fl_common.flowic_flags
124#define flowi6_secid __fl_common.flowic_secid 133#define flowi6_secid __fl_common.flowic_secid
134#define flowi6_tun_key __fl_common.flowic_tun_key
125 struct in6_addr daddr; 135 struct in6_addr daddr;
126 struct in6_addr saddr; 136 struct in6_addr saddr;
127 __be32 flowlabel; 137 __be32 flowlabel;
@@ -165,6 +175,7 @@ struct flowi {
165#define flowi_proto u.__fl_common.flowic_proto 175#define flowi_proto u.__fl_common.flowic_proto
166#define flowi_flags u.__fl_common.flowic_flags 176#define flowi_flags u.__fl_common.flowic_flags
167#define flowi_secid u.__fl_common.flowic_secid 177#define flowi_secid u.__fl_common.flowic_secid
178#define flowi_tun_key u.__fl_common.flowic_tun_key
168} __attribute__((__aligned__(BITS_PER_LONG/8))); 179} __attribute__((__aligned__(BITS_PER_LONG/8)));
169 180
170static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4) 181static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
@@ -233,4 +244,22 @@ void flow_cache_flush(struct net *net);
233void flow_cache_flush_deferred(struct net *net); 244void flow_cache_flush_deferred(struct net *net);
234extern atomic_t flow_cache_genid; 245extern atomic_t flow_cache_genid;
235 246
247__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
248
249static inline __u32 get_hash_from_flowi6(const struct flowi6 *fl6)
250{
251 struct flow_keys keys;
252
253 return __get_hash_from_flowi6(fl6, &keys);
254}
255
256__u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys);
257
258static inline __u32 get_hash_from_flowi4(const struct flowi4 *fl4)
259{
260 struct flow_keys keys;
261
262 return __get_hash_from_flowi4(fl4, &keys);
263}
264
236#endif 265#endif
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 1a8c22419936..8c8548cf5888 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -2,7 +2,6 @@
2#define _NET_FLOW_DISSECTOR_H 2#define _NET_FLOW_DISSECTOR_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/skbuff.h>
6#include <linux/in6.h> 5#include <linux/in6.h>
7#include <uapi/linux/if_ether.h> 6#include <uapi/linux/if_ether.h>
8 7
@@ -13,8 +12,13 @@
13struct flow_dissector_key_control { 12struct flow_dissector_key_control {
14 u16 thoff; 13 u16 thoff;
15 u16 addr_type; 14 u16 addr_type;
15 u32 flags;
16}; 16};
17 17
18#define FLOW_DIS_IS_FRAGMENT BIT(0)
19#define FLOW_DIS_FIRST_FRAG BIT(1)
20#define FLOW_DIS_ENCAPSULATION BIT(2)
21
18/** 22/**
19 * struct flow_dissector_key_basic: 23 * struct flow_dissector_key_basic:
20 * @thoff: Transport header offset 24 * @thoff: Transport header offset
@@ -123,6 +127,11 @@ enum flow_dissector_key_id {
123 FLOW_DISSECTOR_KEY_MAX, 127 FLOW_DISSECTOR_KEY_MAX,
124}; 128};
125 129
130#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0)
131#define FLOW_DISSECTOR_F_STOP_AT_L3 BIT(1)
132#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(2)
133#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(3)
134
126struct flow_dissector_key { 135struct flow_dissector_key {
127 enum flow_dissector_key_id key_id; 136 enum flow_dissector_key_id key_id;
128 size_t offset; /* offset of struct flow_dissector_key_* 137 size_t offset; /* offset of struct flow_dissector_key_*
@@ -134,23 +143,6 @@ struct flow_dissector {
134 unsigned short int offset[FLOW_DISSECTOR_KEY_MAX]; 143 unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
135}; 144};
136 145
137void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
138 const struct flow_dissector_key *key,
139 unsigned int key_count);
140
141bool __skb_flow_dissect(const struct sk_buff *skb,
142 struct flow_dissector *flow_dissector,
143 void *target_container,
144 void *data, __be16 proto, int nhoff, int hlen);
145
146static inline bool skb_flow_dissect(const struct sk_buff *skb,
147 struct flow_dissector *flow_dissector,
148 void *target_container)
149{
150 return __skb_flow_dissect(skb, flow_dissector, target_container,
151 NULL, 0, 0, 0);
152}
153
154struct flow_keys { 146struct flow_keys {
155 struct flow_dissector_key_control control; 147 struct flow_dissector_key_control control;
156#define FLOW_KEYS_HASH_START_FIELD basic 148#define FLOW_KEYS_HASH_START_FIELD basic
@@ -170,38 +162,6 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow);
170extern struct flow_dissector flow_keys_dissector; 162extern struct flow_dissector flow_keys_dissector;
171extern struct flow_dissector flow_keys_buf_dissector; 163extern struct flow_dissector flow_keys_buf_dissector;
172 164
173static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
174 struct flow_keys *flow)
175{
176 memset(flow, 0, sizeof(*flow));
177 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
178 NULL, 0, 0, 0);
179}
180
181static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
182 void *data, __be16 proto,
183 int nhoff, int hlen)
184{
185 memset(flow, 0, sizeof(*flow));
186 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
187 data, proto, nhoff, hlen);
188}
189
190__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
191 void *data, int hlen_proto);
192
193static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
194 int thoff, u8 ip_proto)
195{
196 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
197}
198
199u32 flow_hash_from_keys(struct flow_keys *keys);
200void __skb_get_hash(struct sk_buff *skb);
201u32 skb_get_poff(const struct sk_buff *skb);
202u32 __skb_get_poff(const struct sk_buff *skb, void *data,
203 const struct flow_keys *keys, int hlen);
204
205/* struct flow_keys_digest: 165/* struct flow_keys_digest:
206 * 166 *
207 * This structure is used to hold a digest of the full flow keys. This is a 167 * This structure is used to hold a digest of the full flow keys. This is a
@@ -217,4 +177,11 @@ struct flow_keys_digest {
217void make_flow_keys_digest(struct flow_keys_digest *digest, 177void make_flow_keys_digest(struct flow_keys_digest *digest,
218 const struct flow_keys *flow); 178 const struct flow_keys *flow);
219 179
180static inline bool flow_keys_have_l4(struct flow_keys *keys)
181{
182 return (keys->ports.ports || keys->tags.flow_label);
183}
184
185u32 flow_hash_from_keys(struct flow_keys *keys);
186
220#endif 187#endif
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 2a0543a1899d..3106ed6eae0d 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,40 +62,9 @@ struct genevehdr {
62 struct geneve_opt options[]; 62 struct geneve_opt options[];
63}; 63};
64 64
65static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
66{
67 return (struct genevehdr *)(udp_hdr(skb) + 1);
68}
69
70#ifdef CONFIG_INET 65#ifdef CONFIG_INET
71struct geneve_sock; 66struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
72 67 u8 name_assign_type, u16 dst_port);
73typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
74
75struct geneve_sock {
76 struct list_head list;
77 geneve_rcv_t *rcv;
78 void *rcv_data;
79 struct socket *sock;
80 struct rcu_head rcu;
81 int refcnt;
82 struct udp_offload udp_offloads;
83};
84
85#define GENEVE_VER 0
86#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
87
88struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
89 geneve_rcv_t *rcv, void *data,
90 bool no_share, bool ipv6);
91
92void geneve_sock_release(struct geneve_sock *vs);
93
94int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
95 struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
96 __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
97 __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
98 bool csum, bool xnet);
99#endif /*ifdef CONFIG_INET */ 68#endif /*ifdef CONFIG_INET */
100 69
101#endif /*ifdef__NET_GENEVE_H */ 70#endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gre.h b/include/net/gre.h
index b53182018743..97eafdc47eea 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -4,6 +4,12 @@
4#include <linux/skbuff.h> 4#include <linux/skbuff.h>
5#include <net/ip_tunnels.h> 5#include <net/ip_tunnels.h>
6 6
7struct gre_base_hdr {
8 __be16 flags;
9 __be16 protocol;
10};
11#define GRE_HEADER_SECTION 4
12
7#define GREPROTO_CISCO 0 13#define GREPROTO_CISCO 0
8#define GREPROTO_PPTP 1 14#define GREPROTO_PPTP 1
9#define GREPROTO_MAX 2 15#define GREPROTO_MAX 2
@@ -14,91 +20,9 @@ struct gre_protocol {
14 void (*err_handler)(struct sk_buff *skb, u32 info); 20 void (*err_handler)(struct sk_buff *skb, u32 info);
15}; 21};
16 22
17struct gre_base_hdr {
18 __be16 flags;
19 __be16 protocol;
20};
21#define GRE_HEADER_SECTION 4
22
23int gre_add_protocol(const struct gre_protocol *proto, u8 version); 23int gre_add_protocol(const struct gre_protocol *proto, u8 version);
24int gre_del_protocol(const struct gre_protocol *proto, u8 version); 24int gre_del_protocol(const struct gre_protocol *proto, u8 version);
25 25
26struct gre_cisco_protocol { 26struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
27 int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi); 27 u8 name_assign_type);
28 int (*err_handler)(struct sk_buff *skb, u32 info,
29 const struct tnl_ptk_info *tpi);
30 u8 priority;
31};
32
33int gre_cisco_register(struct gre_cisco_protocol *proto);
34int gre_cisco_unregister(struct gre_cisco_protocol *proto);
35
36void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
37 int hdr_len);
38
39static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
40 bool csum)
41{
42 return iptunnel_handle_offloads(skb, csum,
43 csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
44}
45
46
47static inline int ip_gre_calc_hlen(__be16 o_flags)
48{
49 int addend = 4;
50
51 if (o_flags&TUNNEL_CSUM)
52 addend += 4;
53 if (o_flags&TUNNEL_KEY)
54 addend += 4;
55 if (o_flags&TUNNEL_SEQ)
56 addend += 4;
57 return addend;
58}
59
60static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
61{
62 __be16 tflags = 0;
63
64 if (flags & GRE_CSUM)
65 tflags |= TUNNEL_CSUM;
66 if (flags & GRE_ROUTING)
67 tflags |= TUNNEL_ROUTING;
68 if (flags & GRE_KEY)
69 tflags |= TUNNEL_KEY;
70 if (flags & GRE_SEQ)
71 tflags |= TUNNEL_SEQ;
72 if (flags & GRE_STRICT)
73 tflags |= TUNNEL_STRICT;
74 if (flags & GRE_REC)
75 tflags |= TUNNEL_REC;
76 if (flags & GRE_VERSION)
77 tflags |= TUNNEL_VERSION;
78
79 return tflags;
80}
81
82static inline __be16 tnl_flags_to_gre_flags(__be16 tflags)
83{
84 __be16 flags = 0;
85
86 if (tflags & TUNNEL_CSUM)
87 flags |= GRE_CSUM;
88 if (tflags & TUNNEL_ROUTING)
89 flags |= GRE_ROUTING;
90 if (tflags & TUNNEL_KEY)
91 flags |= GRE_KEY;
92 if (tflags & TUNNEL_SEQ)
93 flags |= GRE_SEQ;
94 if (tflags & TUNNEL_STRICT)
95 flags |= GRE_STRICT;
96 if (tflags & TUNNEL_REC)
97 flags |= GRE_REC;
98 if (tflags & TUNNEL_VERSION)
99 flags |= GRE_VERSION;
100
101 return flags;
102}
103
104#endif 28#endif
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index 0f712c0bc0bf..cf6c74550baa 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -32,37 +32,28 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
32 return; 32 return;
33 } 33 }
34 34
35 /* We run in BH context */
36 spin_lock(&cell->napi_skbs.lock);
37
38 __skb_queue_tail(&cell->napi_skbs, skb); 35 __skb_queue_tail(&cell->napi_skbs, skb);
39 if (skb_queue_len(&cell->napi_skbs) == 1) 36 if (skb_queue_len(&cell->napi_skbs) == 1)
40 napi_schedule(&cell->napi); 37 napi_schedule(&cell->napi);
41
42 spin_unlock(&cell->napi_skbs.lock);
43} 38}
44 39
45/* called unser BH context */ 40/* called under BH context */
46static inline int gro_cell_poll(struct napi_struct *napi, int budget) 41static inline int gro_cell_poll(struct napi_struct *napi, int budget)
47{ 42{
48 struct gro_cell *cell = container_of(napi, struct gro_cell, napi); 43 struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
49 struct sk_buff *skb; 44 struct sk_buff *skb;
50 int work_done = 0; 45 int work_done = 0;
51 46
52 spin_lock(&cell->napi_skbs.lock);
53 while (work_done < budget) { 47 while (work_done < budget) {
54 skb = __skb_dequeue(&cell->napi_skbs); 48 skb = __skb_dequeue(&cell->napi_skbs);
55 if (!skb) 49 if (!skb)
56 break; 50 break;
57 spin_unlock(&cell->napi_skbs.lock);
58 napi_gro_receive(napi, skb); 51 napi_gro_receive(napi, skb);
59 work_done++; 52 work_done++;
60 spin_lock(&cell->napi_skbs.lock);
61 } 53 }
62 54
63 if (work_done < budget) 55 if (work_done < budget)
64 napi_complete(napi); 56 napi_complete_done(napi, work_done);
65 spin_unlock(&cell->napi_skbs.lock);
66 return work_done; 57 return work_done;
67} 58}
68 59
@@ -77,7 +68,7 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
77 for_each_possible_cpu(i) { 68 for_each_possible_cpu(i) {
78 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 69 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
79 70
80 skb_queue_head_init(&cell->napi_skbs); 71 __skb_queue_head_init(&cell->napi_skbs);
81 netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); 72 netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
82 napi_enable(&cell->napi); 73 napi_enable(&cell->napi);
83 } 74 }
@@ -92,8 +83,9 @@ static inline void gro_cells_destroy(struct gro_cells *gcells)
92 return; 83 return;
93 for_each_possible_cpu(i) { 84 for_each_possible_cpu(i) {
94 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 85 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
86
95 netif_napi_del(&cell->napi); 87 netif_napi_del(&cell->napi);
96 skb_queue_purge(&cell->napi_skbs); 88 __skb_queue_purge(&cell->napi_skbs);
97 } 89 }
98 free_percpu(gcells->cells); 90 free_percpu(gcells->cells);
99 gcells->cells = NULL; 91 gcells->cells = NULL;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b73c88a19dd4..b07d126694a7 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -205,8 +205,8 @@ void inet_put_port(struct sock *sk);
205 205
206void inet_hashinfo_init(struct inet_hashinfo *h); 206void inet_hashinfo_init(struct inet_hashinfo *h);
207 207
208int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw); 208void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
209int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw); 209void __inet_hash(struct sock *sk, struct sock *osk);
210void inet_hash(struct sock *sk); 210void inet_hash(struct sock *sk);
211void inet_unhash(struct sock *sk); 211void inet_unhash(struct sock *sk);
212 212
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 360c4802288d..879d6e5a973b 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -100,10 +100,8 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
100void inet_twsk_free(struct inet_timewait_sock *tw); 100void inet_twsk_free(struct inet_timewait_sock *tw);
101void inet_twsk_put(struct inet_timewait_sock *tw); 101void inet_twsk_put(struct inet_timewait_sock *tw);
102 102
103int inet_twsk_unhash(struct inet_timewait_sock *tw); 103void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
104 104 struct inet_hashinfo *hashinfo);
105int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
106 struct inet_hashinfo *hashinfo);
107 105
108struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, 106struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
109 struct inet_timewait_death_row *dr, 107 struct inet_timewait_death_row *dr,
@@ -113,7 +111,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
113 struct inet_hashinfo *hashinfo); 111 struct inet_hashinfo *hashinfo);
114 112
115void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); 113void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
116void inet_twsk_deschedule(struct inet_timewait_sock *tw); 114void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
117 115
118void inet_twsk_purge(struct inet_hashinfo *hashinfo, 116void inet_twsk_purge(struct inet_hashinfo *hashinfo,
119 struct inet_timewait_death_row *twdr, int family); 117 struct inet_timewait_death_row *twdr, int family);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index d5332ddcea3f..4a6009d4486b 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -15,16 +15,20 @@
15#include <net/ipv6.h> 15#include <net/ipv6.h>
16#include <linux/atomic.h> 16#include <linux/atomic.h>
17 17
18struct inetpeer_addr_base { 18/* IPv4 address key for cache lookups */
19 union { 19struct ipv4_addr_key {
20 __be32 a4; 20 __be32 addr;
21 __be32 a6[4]; 21 int vif;
22 struct in6_addr in6;
23 };
24}; 22};
25 23
24#define INETPEER_MAXKEYSZ (sizeof(struct in6_addr) / sizeof(u32))
25
26struct inetpeer_addr { 26struct inetpeer_addr {
27 struct inetpeer_addr_base addr; 27 union {
28 struct ipv4_addr_key a4;
29 struct in6_addr a6;
30 u32 key[INETPEER_MAXKEYSZ];
31 };
28 __u16 family; 32 __u16 family;
29}; 33};
30 34
@@ -65,69 +69,33 @@ struct inet_peer_base {
65 int total; 69 int total;
66}; 70};
67 71
68#define INETPEER_BASE_BIT 0x1UL 72void inet_peer_base_init(struct inet_peer_base *);
69
70static inline struct inet_peer *inetpeer_ptr(unsigned long val)
71{
72 BUG_ON(val & INETPEER_BASE_BIT);
73 return (struct inet_peer *) val;
74}
75 73
76static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val) 74void inet_initpeers(void) __init;
77{
78 if (!(val & INETPEER_BASE_BIT))
79 return NULL;
80 val &= ~INETPEER_BASE_BIT;
81 return (struct inet_peer_base *) val;
82}
83 75
84static inline bool inetpeer_ptr_is_peer(unsigned long val) 76#define INETPEER_METRICS_NEW (~(u32) 0)
85{
86 return !(val & INETPEER_BASE_BIT);
87}
88 77
89static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer) 78static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
90{ 79{
91 /* This implicitly clears INETPEER_BASE_BIT */ 80 iaddr->a4.addr = ip;
92 *val = (unsigned long) peer; 81 iaddr->family = AF_INET;
93} 82}
94 83
95static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer) 84static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
96{ 85{
97 unsigned long val = (unsigned long) peer; 86 return iaddr->a4.addr;
98 unsigned long orig = *ptr;
99
100 if (!(orig & INETPEER_BASE_BIT) ||
101 cmpxchg(ptr, orig, val) != orig)
102 return false;
103 return true;
104} 87}
105 88
106static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base) 89static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
90 struct in6_addr *in6)
107{ 91{
108 *ptr = (unsigned long) base | INETPEER_BASE_BIT; 92 iaddr->a6 = *in6;
93 iaddr->family = AF_INET6;
109} 94}
110 95
111static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from) 96static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
112{ 97{
113 unsigned long val = *from; 98 return &iaddr->a6;
114
115 *to = val;
116 if (inetpeer_ptr_is_peer(val)) {
117 struct inet_peer *peer = inetpeer_ptr(val);
118 atomic_inc(&peer->refcnt);
119 }
120}
121
122void inet_peer_base_init(struct inet_peer_base *);
123
124void inet_initpeers(void) __init;
125
126#define INETPEER_METRICS_NEW (~(u32) 0)
127
128static inline bool inet_metrics_new(const struct inet_peer *p)
129{
130 return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
131} 99}
132 100
133/* can be called with or without local BH being disabled */ 101/* can be called with or without local BH being disabled */
@@ -137,11 +105,12 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
137 105
138static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base, 106static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
139 __be32 v4daddr, 107 __be32 v4daddr,
140 int create) 108 int vif, int create)
141{ 109{
142 struct inetpeer_addr daddr; 110 struct inetpeer_addr daddr;
143 111
144 daddr.addr.a4 = v4daddr; 112 daddr.a4.addr = v4daddr;
113 daddr.a4.vif = vif;
145 daddr.family = AF_INET; 114 daddr.family = AF_INET;
146 return inet_getpeer(base, &daddr, create); 115 return inet_getpeer(base, &daddr, create);
147} 116}
@@ -152,23 +121,36 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
152{ 121{
153 struct inetpeer_addr daddr; 122 struct inetpeer_addr daddr;
154 123
155 daddr.addr.in6 = *v6daddr; 124 daddr.a6 = *v6daddr;
156 daddr.family = AF_INET6; 125 daddr.family = AF_INET6;
157 return inet_getpeer(base, &daddr, create); 126 return inet_getpeer(base, &daddr, create);
158} 127}
159 128
129static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
130 const struct inetpeer_addr *b)
131{
132 int i, n;
133
134 if (a->family == AF_INET)
135 n = sizeof(a->a4) / sizeof(u32);
136 else
137 n = sizeof(a->a6) / sizeof(u32);
138
139 for (i = 0; i < n; i++) {
140 if (a->key[i] == b->key[i])
141 continue;
142 if (a->key[i] < b->key[i])
143 return -1;
144 return 1;
145 }
146
147 return 0;
148}
149
160/* can be called from BH context or outside */ 150/* can be called from BH context or outside */
161void inet_putpeer(struct inet_peer *p); 151void inet_putpeer(struct inet_peer *p);
162bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); 152bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
163 153
164void inetpeer_invalidate_tree(struct inet_peer_base *); 154void inetpeer_invalidate_tree(struct inet_peer_base *);
165 155
166/*
167 * temporary check to make sure we dont access rid, tcp_ts,
168 * tcp_ts_stamp if no refcount is taken on inet_peer
169 */
170static inline void inet_peer_refcheck(const struct inet_peer *p)
171{
172 WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
173}
174#endif /* _NET_INETPEER_H */ 156#endif /* _NET_INETPEER_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index d5fe9f2ab699..9b9ca2839399 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -202,10 +202,20 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
202#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) 202#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
203#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd) 203#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
204 204
205u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
205unsigned long snmp_fold_field(void __percpu *mib, int offt); 206unsigned long snmp_fold_field(void __percpu *mib, int offt);
206#if BITS_PER_LONG==32 207#if BITS_PER_LONG==32
208u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
209 size_t syncp_offset);
207u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off); 210u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
208#else 211#else
212static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
213 size_t syncp_offset)
214{
215 return snmp_get_cpu_field(mib, cpu, offct);
216
217}
218
209static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off) 219static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
210{ 220{
211 return snmp_fold_field(mib, offt); 221 return snmp_fold_field(mib, offt);
@@ -370,22 +380,6 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
370 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 380 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
371} 381}
372 382
373static inline void inet_set_txhash(struct sock *sk)
374{
375 struct inet_sock *inet = inet_sk(sk);
376 struct flow_keys keys;
377
378 memset(&keys, 0, sizeof(keys));
379
380 keys.addrs.v4addrs.src = inet->inet_saddr;
381 keys.addrs.v4addrs.dst = inet->inet_daddr;
382 keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
383 keys.ports.src = inet->inet_sport;
384 keys.ports.dst = inet->inet_dport;
385
386 sk->sk_txhash = flow_hash_from_keys(&keys);
387}
388
389static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto) 383static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
390{ 384{
391 const struct iphdr *iph = skb_gro_network_header(skb); 385 const struct iphdr *iph = skb_gro_network_header(skb);
@@ -474,6 +468,11 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
474 468
475#endif 469#endif
476 470
471static inline unsigned int ipv4_addr_hash(__be32 ip)
472{
473 return (__force unsigned int) ip;
474}
475
477bool ip_call_ra_chain(struct sk_buff *skb); 476bool ip_call_ra_chain(struct sk_buff *skb);
478 477
479/* 478/*
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3b76849c190f..063d30474cf6 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -51,6 +51,8 @@ struct fib6_config {
51 struct nlattr *fc_mp; 51 struct nlattr *fc_mp;
52 52
53 struct nl_info fc_nlinfo; 53 struct nl_info fc_nlinfo;
54 struct nlattr *fc_encap;
55 u16 fc_encap_type;
54}; 56};
55 57
56struct fib6_node { 58struct fib6_node {
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 5fa643b4e891..a37d0432bebd 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -44,7 +44,9 @@ struct fib_config {
44 u32 fc_flow; 44 u32 fc_flow;
45 u32 fc_nlflags; 45 u32 fc_nlflags;
46 struct nl_info fc_nlinfo; 46 struct nl_info fc_nlinfo;
47 }; 47 struct nlattr *fc_encap;
48 u16 fc_encap_type;
49};
48 50
49struct fib_info; 51struct fib_info;
50struct rtable; 52struct rtable;
@@ -89,6 +91,7 @@ struct fib_nh {
89 struct rtable __rcu * __percpu *nh_pcpu_rth_output; 91 struct rtable __rcu * __percpu *nh_pcpu_rth_output;
90 struct rtable __rcu *nh_rth_input; 92 struct rtable __rcu *nh_rth_input;
91 struct fnhe_hash_bucket __rcu *nh_exceptions; 93 struct fnhe_hash_bucket __rcu *nh_exceptions;
94 struct lwtunnel_state *nh_lwtstate;
92}; 95};
93 96
94/* 97/*
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index d8214cb88bbc..9a6a3ba888e8 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -4,14 +4,15 @@
4#include <linux/if_tunnel.h> 4#include <linux/if_tunnel.h>
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/skbuff.h> 6#include <linux/skbuff.h>
7#include <linux/socket.h>
7#include <linux/types.h> 8#include <linux/types.h>
8#include <linux/u64_stats_sync.h> 9#include <linux/u64_stats_sync.h>
9#include <net/dsfield.h> 10#include <net/dsfield.h>
10#include <net/gro_cells.h> 11#include <net/gro_cells.h>
11#include <net/inet_ecn.h> 12#include <net/inet_ecn.h>
12#include <net/ip.h>
13#include <net/netns/generic.h> 13#include <net/netns/generic.h>
14#include <net/rtnetlink.h> 14#include <net/rtnetlink.h>
15#include <net/lwtunnel.h>
15 16
16#if IS_ENABLED(CONFIG_IPV6) 17#if IS_ENABLED(CONFIG_IPV6)
17#include <net/ipv6.h> 18#include <net/ipv6.h>
@@ -22,6 +23,44 @@
22/* Keep error state on tunnel for 30 sec */ 23/* Keep error state on tunnel for 30 sec */
23#define IPTUNNEL_ERR_TIMEO (30*HZ) 24#define IPTUNNEL_ERR_TIMEO (30*HZ)
24 25
26/* Used to memset ip_tunnel padding. */
27#define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
28
29/* Used to memset ipv4 address padding. */
30#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
31#define IP_TUNNEL_KEY_IPV4_PAD_LEN \
32 (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
33 FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
34
35struct ip_tunnel_key {
36 __be64 tun_id;
37 union {
38 struct {
39 __be32 src;
40 __be32 dst;
41 } ipv4;
42 struct {
43 struct in6_addr src;
44 struct in6_addr dst;
45 } ipv6;
46 } u;
47 __be16 tun_flags;
48 u8 tos; /* TOS for IPv4, TC for IPv6 */
49 u8 ttl; /* TTL for IPv4, HL for IPv6 */
50 __be16 tp_src;
51 __be16 tp_dst;
52};
53
54/* Flags for ip_tunnel_info mode. */
55#define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
56#define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
57
58struct ip_tunnel_info {
59 struct ip_tunnel_key key;
60 u8 options_len;
61 u8 mode;
62};
63
25/* 6rd prefix/relay information */ 64/* 6rd prefix/relay information */
26#ifdef CONFIG_IPV6_SIT_6RD 65#ifdef CONFIG_IPV6_SIT_6RD
27struct ip_tunnel_6rd_parm { 66struct ip_tunnel_6rd_parm {
@@ -33,8 +72,8 @@ struct ip_tunnel_6rd_parm {
33#endif 72#endif
34 73
35struct ip_tunnel_encap { 74struct ip_tunnel_encap {
36 __u16 type; 75 u16 type;
37 __u16 flags; 76 u16 flags;
38 __be16 sport; 77 __be16 sport;
39 __be16 dport; 78 __be16 dport;
40}; 79};
@@ -51,6 +90,8 @@ struct ip_tunnel_dst {
51 __be32 saddr; 90 __be32 saddr;
52}; 91};
53 92
93struct metadata_dst;
94
54struct ip_tunnel { 95struct ip_tunnel {
55 struct ip_tunnel __rcu *next; 96 struct ip_tunnel __rcu *next;
56 struct hlist_node hash_node; 97 struct hlist_node hash_node;
@@ -62,8 +103,8 @@ struct ip_tunnel {
62 * arrived */ 103 * arrived */
63 104
64 /* These four fields used only by GRE */ 105 /* These four fields used only by GRE */
65 __u32 i_seqno; /* The last seen seqno */ 106 u32 i_seqno; /* The last seen seqno */
66 __u32 o_seqno; /* The last output seqno */ 107 u32 o_seqno; /* The last output seqno */
67 int tun_hlen; /* Precalculated header length */ 108 int tun_hlen; /* Precalculated header length */
68 int mlink; 109 int mlink;
69 110
@@ -84,6 +125,7 @@ struct ip_tunnel {
84 unsigned int prl_count; /* # of entries in PRL */ 125 unsigned int prl_count; /* # of entries in PRL */
85 int ip_tnl_net_id; 126 int ip_tnl_net_id;
86 struct gro_cells gro_cells; 127 struct gro_cells gro_cells;
128 bool collect_md;
87}; 129};
88 130
89#define TUNNEL_CSUM __cpu_to_be16(0x01) 131#define TUNNEL_CSUM __cpu_to_be16(0x01)
@@ -118,6 +160,7 @@ struct tnl_ptk_info {
118struct ip_tunnel_net { 160struct ip_tunnel_net {
119 struct net_device *fb_tunnel_dev; 161 struct net_device *fb_tunnel_dev;
120 struct hlist_head tunnels[IP_TNL_HASH_SIZE]; 162 struct hlist_head tunnels[IP_TNL_HASH_SIZE];
163 struct ip_tunnel __rcu *collect_md_tun;
121}; 164};
122 165
123struct ip_tunnel_encap_ops { 166struct ip_tunnel_encap_ops {
@@ -136,6 +179,40 @@ int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
136int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, 179int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
137 unsigned int num); 180 unsigned int num);
138 181
182static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
183 __be32 saddr, __be32 daddr,
184 u8 tos, u8 ttl,
185 __be16 tp_src, __be16 tp_dst,
186 __be64 tun_id, __be16 tun_flags)
187{
188 key->tun_id = tun_id;
189 key->u.ipv4.src = saddr;
190 key->u.ipv4.dst = daddr;
191 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
192 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
193 key->tos = tos;
194 key->ttl = ttl;
195 key->tun_flags = tun_flags;
196
197 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
198 * the upper tunnel are used.
199 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
200 */
201 key->tp_src = tp_src;
202 key->tp_dst = tp_dst;
203
204 /* Clear struct padding. */
205 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
206 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
207 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
208}
209
210static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
211 *tun_info)
212{
213 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
214}
215
139#ifdef CONFIG_INET 216#ifdef CONFIG_INET
140 217
141int ip_tunnel_init(struct net_device *dev); 218int ip_tunnel_init(struct net_device *dev);
@@ -163,7 +240,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
163 __be32 key); 240 __be32 key);
164 241
165int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, 242int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
166 const struct tnl_ptk_info *tpi, bool log_ecn_error); 243 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
244 bool log_ecn_error);
167int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], 245int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
168 struct ip_tunnel_parm *p); 246 struct ip_tunnel_parm *p);
169int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], 247int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
@@ -196,8 +274,8 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
196 274
197int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); 275int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
198int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, 276int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
199 __be32 src, __be32 dst, __u8 proto, 277 __be32 src, __be32 dst, u8 proto,
200 __u8 tos, __u8 ttl, __be16 df, bool xnet); 278 u8 tos, u8 ttl, __be16 df, bool xnet);
201 279
202struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, 280struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
203 int gso_type_mask); 281 int gso_type_mask);
@@ -221,6 +299,57 @@ static inline void iptunnel_xmit_stats(int err,
221 } 299 }
222} 300}
223 301
302static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
303{
304 return info + 1;
305}
306
307static inline void ip_tunnel_info_opts_get(void *to,
308 const struct ip_tunnel_info *info)
309{
310 memcpy(to, info + 1, info->options_len);
311}
312
313static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
314 const void *from, int len)
315{
316 memcpy(ip_tunnel_info_opts(info), from, len);
317 info->options_len = len;
318}
319
320static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
321{
322 return (struct ip_tunnel_info *)lwtstate->data;
323}
324
325extern struct static_key ip_tunnel_metadata_cnt;
326
327/* Returns > 0 if metadata should be collected */
328static inline int ip_tunnel_collect_metadata(void)
329{
330 return static_key_false(&ip_tunnel_metadata_cnt);
331}
332
333void __init ip_tunnel_core_init(void);
334
335void ip_tunnel_need_metadata(void);
336void ip_tunnel_unneed_metadata(void);
337
338#else /* CONFIG_INET */
339
340static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
341{
342 return NULL;
343}
344
345static inline void ip_tunnel_need_metadata(void)
346{
347}
348
349static inline void ip_tunnel_unneed_metadata(void)
350{
351}
352
224#endif /* CONFIG_INET */ 353#endif /* CONFIG_INET */
225 354
226#endif /* __NET_IP_TUNNELS_H */ 355#endif /* __NET_IP_TUNNELS_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 4e3731ee4eac..9b9ca87a4210 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -846,6 +846,17 @@ struct ipvs_master_sync_state {
846/* How much time to keep dests in trash */ 846/* How much time to keep dests in trash */
847#define IP_VS_DEST_TRASH_PERIOD (120 * HZ) 847#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
848 848
849struct ipvs_sync_daemon_cfg {
850 union nf_inet_addr mcast_group;
851 int syncid;
852 u16 sync_maxlen;
853 u16 mcast_port;
854 u8 mcast_af;
855 u8 mcast_ttl;
856 /* multicast interface name */
857 char mcast_ifn[IP_VS_IFNAME_MAXLEN];
858};
859
849/* IPVS in network namespace */ 860/* IPVS in network namespace */
850struct netns_ipvs { 861struct netns_ipvs {
851 int gen; /* Generation */ 862 int gen; /* Generation */
@@ -961,15 +972,10 @@ struct netns_ipvs {
961 spinlock_t sync_buff_lock; 972 spinlock_t sync_buff_lock;
962 struct task_struct **backup_threads; 973 struct task_struct **backup_threads;
963 int threads_mask; 974 int threads_mask;
964 int send_mesg_maxlen;
965 int recv_mesg_maxlen;
966 volatile int sync_state; 975 volatile int sync_state;
967 volatile int master_syncid;
968 volatile int backup_syncid;
969 struct mutex sync_mutex; 976 struct mutex sync_mutex;
970 /* multicast interface name */ 977 struct ipvs_sync_daemon_cfg mcfg; /* Master Configuration */
971 char master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; 978 struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */
972 char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
973 /* net name space ptr */ 979 /* net name space ptr */
974 struct net *net; /* Needed by timer routines */ 980 struct net *net; /* Needed by timer routines */
975 /* Number of heterogeneous destinations, needed becaus heterogeneous 981 /* Number of heterogeneous destinations, needed becaus heterogeneous
@@ -1408,7 +1414,8 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
1408/* IPVS sync daemon data and function prototypes 1414/* IPVS sync daemon data and function prototypes
1409 * (from ip_vs_sync.c) 1415 * (from ip_vs_sync.c)
1410 */ 1416 */
1411int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid); 1417int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *cfg,
1418 int state);
1412int stop_sync_thread(struct net *net, int state); 1419int stop_sync_thread(struct net *net, int state);
1413void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); 1420void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
1414 1421
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 82dbdb092a5d..711cca428cc8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -707,54 +707,69 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
707} 707}
708 708
709#if IS_ENABLED(CONFIG_IPV6) 709#if IS_ENABLED(CONFIG_IPV6)
710static inline void ip6_set_txhash(struct sock *sk)
711{
712 struct inet_sock *inet = inet_sk(sk);
713 struct ipv6_pinfo *np = inet6_sk(sk);
714 struct flow_keys keys;
715 710
716 memset(&keys, 0, sizeof(keys)); 711/* Sysctl settings for net ipv6.auto_flowlabels */
712#define IP6_AUTO_FLOW_LABEL_OFF 0
713#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
714#define IP6_AUTO_FLOW_LABEL_OPTIN 2
715#define IP6_AUTO_FLOW_LABEL_FORCED 3
717 716
718 memcpy(&keys.addrs.v6addrs.src, &np->saddr, 717#define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED
719 sizeof(keys.addrs.v6addrs.src));
720 memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
721 sizeof(keys.addrs.v6addrs.dst));
722 keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
723 keys.ports.src = inet->inet_sport;
724 keys.ports.dst = inet->inet_dport;
725 718
726 sk->sk_txhash = flow_hash_from_keys(&keys); 719#define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT
727}
728 720
729static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, 721static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
730 __be32 flowlabel, bool autolabel) 722 __be32 flowlabel, bool autolabel,
723 struct flowi6 *fl6)
731{ 724{
732 if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) { 725 u32 hash;
733 u32 hash; 726
727 if (flowlabel ||
728 net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
729 (!autolabel &&
730 net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
731 return flowlabel;
734 732
735 hash = skb_get_hash(skb); 733 hash = skb_get_hash_flowi6(skb, fl6);
736 734
737 /* Since this is being sent on the wire obfuscate hash a bit 735 /* Since this is being sent on the wire obfuscate hash a bit
738 * to minimize possbility that any useful information to an 736 * to minimize possbility that any useful information to an
739 * attacker is leaked. Only lower 20 bits are relevant. 737 * attacker is leaked. Only lower 20 bits are relevant.
740 */ 738 */
741 hash ^= hash >> 12; 739 rol32(hash, 16);
742 740
743 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; 741 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
744 742
745 if (net->ipv6.sysctl.flowlabel_state_ranges) 743 if (net->ipv6.sysctl.flowlabel_state_ranges)
746 flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG; 744 flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
747 }
748 745
749 return flowlabel; 746 return flowlabel;
750} 747}
748
749static inline int ip6_default_np_autolabel(struct net *net)
750{
751 switch (net->ipv6.sysctl.auto_flowlabels) {
752 case IP6_AUTO_FLOW_LABEL_OFF:
753 case IP6_AUTO_FLOW_LABEL_OPTIN:
754 default:
755 return 0;
756 case IP6_AUTO_FLOW_LABEL_OPTOUT:
757 case IP6_AUTO_FLOW_LABEL_FORCED:
758 return 1;
759 }
760}
751#else 761#else
752static inline void ip6_set_txhash(struct sock *sk) { } 762static inline void ip6_set_txhash(struct sock *sk) { }
753static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, 763static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
754 __be32 flowlabel, bool autolabel) 764 __be32 flowlabel, bool autolabel,
765 struct flowi6 *fl6)
755{ 766{
756 return flowlabel; 767 return flowlabel;
757} 768}
769static inline int ip6_default_np_autolabel(struct net *net)
770{
771 return 0;
772}
758#endif 773#endif
759 774
760 775
@@ -832,7 +847,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
832 &inet6_sk(sk)->cork); 847 &inet6_sk(sk)->cork);
833} 848}
834 849
835int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); 850int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
851 struct flowi6 *fl6);
836struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, 852struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
837 const struct in6_addr *final_dst); 853 const struct in6_addr *final_dst);
838struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, 854struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
new file mode 100644
index 000000000000..fce0e35e74d0
--- /dev/null
+++ b/include/net/lwtunnel.h
@@ -0,0 +1,175 @@
1#ifndef __NET_LWTUNNEL_H
2#define __NET_LWTUNNEL_H 1
3
4#include <linux/lwtunnel.h>
5#include <linux/netdevice.h>
6#include <linux/skbuff.h>
7#include <linux/types.h>
8#include <net/route.h>
9
10#define LWTUNNEL_HASH_BITS 7
11#define LWTUNNEL_HASH_SIZE (1 << LWTUNNEL_HASH_BITS)
12
13/* lw tunnel state flags */
14#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
15#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
16
17struct lwtunnel_state {
18 __u16 type;
19 __u16 flags;
20 atomic_t refcnt;
21 int (*orig_output)(struct sock *sk, struct sk_buff *skb);
22 int (*orig_input)(struct sk_buff *);
23 int len;
24 __u8 data[0];
25};
26
27struct lwtunnel_encap_ops {
28 int (*build_state)(struct net_device *dev, struct nlattr *encap,
29 unsigned int family, const void *cfg,
30 struct lwtunnel_state **ts);
31 int (*output)(struct sock *sk, struct sk_buff *skb);
32 int (*input)(struct sk_buff *skb);
33 int (*fill_encap)(struct sk_buff *skb,
34 struct lwtunnel_state *lwtstate);
35 int (*get_encap_size)(struct lwtunnel_state *lwtstate);
36 int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
37};
38
39#ifdef CONFIG_LWTUNNEL
40static inline void lwtstate_free(struct lwtunnel_state *lws)
41{
42 kfree(lws);
43}
44
45static inline struct lwtunnel_state *
46lwtstate_get(struct lwtunnel_state *lws)
47{
48 if (lws)
49 atomic_inc(&lws->refcnt);
50
51 return lws;
52}
53
54static inline void lwtstate_put(struct lwtunnel_state *lws)
55{
56 if (!lws)
57 return;
58
59 if (atomic_dec_and_test(&lws->refcnt))
60 lwtstate_free(lws);
61}
62
63static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
64{
65 if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_OUTPUT_REDIRECT))
66 return true;
67
68 return false;
69}
70
71static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
72{
73 if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_INPUT_REDIRECT))
74 return true;
75
76 return false;
77}
78int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
79 unsigned int num);
80int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
81 unsigned int num);
82int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
83 struct nlattr *encap,
84 unsigned int family, const void *cfg,
85 struct lwtunnel_state **lws);
86int lwtunnel_fill_encap(struct sk_buff *skb,
87 struct lwtunnel_state *lwtstate);
88int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
89struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
90int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
91int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
92int lwtunnel_input(struct sk_buff *skb);
93
94#else
95
96static inline void lwtstate_free(struct lwtunnel_state *lws)
97{
98}
99
100static inline struct lwtunnel_state *
101lwtstate_get(struct lwtunnel_state *lws)
102{
103 return lws;
104}
105
106static inline void lwtstate_put(struct lwtunnel_state *lws)
107{
108}
109
110static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
111{
112 return false;
113}
114
115static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
116{
117 return false;
118}
119
120static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
121 unsigned int num)
122{
123 return -EOPNOTSUPP;
124
125}
126
127static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
128 unsigned int num)
129{
130 return -EOPNOTSUPP;
131}
132
133static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
134 struct nlattr *encap,
135 unsigned int family, const void *cfg,
136 struct lwtunnel_state **lws)
137{
138 return -EOPNOTSUPP;
139}
140
141static inline int lwtunnel_fill_encap(struct sk_buff *skb,
142 struct lwtunnel_state *lwtstate)
143{
144 return 0;
145}
146
147static inline int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
148{
149 return 0;
150}
151
152static inline struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
153{
154 return NULL;
155}
156
157static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a,
158 struct lwtunnel_state *b)
159{
160 return 0;
161}
162
163static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
164{
165 return -EOPNOTSUPP;
166}
167
168static inline int lwtunnel_input(struct sk_buff *skb)
169{
170 return -EOPNOTSUPP;
171}
172
173#endif
174
175#endif /* __NET_LWTUNNEL_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 6b1077c2a63f..bfc569498bfa 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -477,7 +477,9 @@ struct ieee80211_event {
477 * @chandef: Channel definition for this BSS -- the hardware might be 477 * @chandef: Channel definition for this BSS -- the hardware might be
478 * configured a higher bandwidth than this BSS uses, for example. 478 * configured a higher bandwidth than this BSS uses, for example.
479 * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation. 479 * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
480 * This field is only valid when the channel type is one of the HT types. 480 * This field is only valid when the channel is a wide HT/VHT channel.
481 * Note that with TDLS this can be the case (channel is HT, protection must
482 * be used from this field) even when the BSS association isn't using HT.
481 * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value 483 * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
482 * implies disabled 484 * implies disabled
483 * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis 485 * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
@@ -973,6 +975,10 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
973 * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame. 975 * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
974 * If this flag is set, the stack cannot do any replay detection 976 * If this flag is set, the stack cannot do any replay detection
975 * hence the driver or hardware will have to do that. 977 * hence the driver or hardware will have to do that.
978 * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
979 * flag indicates that the PN was verified for replay protection.
980 * Note that this flag is also currently only supported when a frame
981 * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
976 * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on 982 * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
977 * the frame. 983 * the frame.
978 * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on 984 * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
@@ -997,9 +1003,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
997 * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference 1003 * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
998 * number (@ampdu_reference) must be populated and be a distinct number for 1004 * number (@ampdu_reference) must be populated and be a distinct number for
999 * each A-MPDU 1005 * each A-MPDU
1000 * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
1001 * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
1002 * monitoring purposes only
1003 * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all 1006 * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
1004 * subframes of a single A-MPDU 1007 * subframes of a single A-MPDU
1005 * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU 1008 * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
@@ -1039,8 +1042,8 @@ enum mac80211_rx_flags {
1039 RX_FLAG_NO_SIGNAL_VAL = BIT(12), 1042 RX_FLAG_NO_SIGNAL_VAL = BIT(12),
1040 RX_FLAG_HT_GF = BIT(13), 1043 RX_FLAG_HT_GF = BIT(13),
1041 RX_FLAG_AMPDU_DETAILS = BIT(14), 1044 RX_FLAG_AMPDU_DETAILS = BIT(14),
1042 RX_FLAG_AMPDU_REPORT_ZEROLEN = BIT(15), 1045 RX_FLAG_PN_VALIDATED = BIT(15),
1043 RX_FLAG_AMPDU_IS_ZEROLEN = BIT(16), 1046 /* bit 16 free */
1044 RX_FLAG_AMPDU_LAST_KNOWN = BIT(17), 1047 RX_FLAG_AMPDU_LAST_KNOWN = BIT(17),
1045 RX_FLAG_AMPDU_IS_LAST = BIT(18), 1048 RX_FLAG_AMPDU_IS_LAST = BIT(18),
1046 RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19), 1049 RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19),
@@ -1491,8 +1494,10 @@ enum ieee80211_key_flags {
1491 * - Temporal Authenticator Rx MIC Key (64 bits) 1494 * - Temporal Authenticator Rx MIC Key (64 bits)
1492 * @icv_len: The ICV length for this key type 1495 * @icv_len: The ICV length for this key type
1493 * @iv_len: The IV length for this key type 1496 * @iv_len: The IV length for this key type
1497 * @drv_priv: pointer for driver use
1494 */ 1498 */
1495struct ieee80211_key_conf { 1499struct ieee80211_key_conf {
1500 void *drv_priv;
1496 atomic64_t tx_pn; 1501 atomic64_t tx_pn;
1497 u32 cipher; 1502 u32 cipher;
1498 u8 icv_len; 1503 u8 icv_len;
@@ -1675,7 +1680,6 @@ struct ieee80211_sta_rates {
1675 * @tdls: indicates whether the STA is a TDLS peer 1680 * @tdls: indicates whether the STA is a TDLS peer
1676 * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only 1681 * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
1677 * valid if the STA is a TDLS peer in the first place. 1682 * valid if the STA is a TDLS peer in the first place.
1678 * @mfp: indicates whether the STA uses management frame protection or not.
1679 * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) 1683 * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
1680 */ 1684 */
1681struct ieee80211_sta { 1685struct ieee80211_sta {
@@ -1693,7 +1697,6 @@ struct ieee80211_sta {
1693 struct ieee80211_sta_rates __rcu *rates; 1697 struct ieee80211_sta_rates __rcu *rates;
1694 bool tdls; 1698 bool tdls;
1695 bool tdls_initiator; 1699 bool tdls_initiator;
1696 bool mfp;
1697 1700
1698 struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; 1701 struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
1699 1702
@@ -1888,6 +1891,9 @@ struct ieee80211_txq {
1888 * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands 1891 * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
1889 * in one command, mac80211 doesn't have to run separate scans per band. 1892 * in one command, mac80211 doesn't have to run separate scans per band.
1890 * 1893 *
1894 * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
1895 * than then BSS bandwidth for a TDLS link on the base channel.
1896 *
1891 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays 1897 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
1892 */ 1898 */
1893enum ieee80211_hw_flags { 1899enum ieee80211_hw_flags {
@@ -1920,6 +1926,7 @@ enum ieee80211_hw_flags {
1920 IEEE80211_HW_CHANCTX_STA_CSA, 1926 IEEE80211_HW_CHANCTX_STA_CSA,
1921 IEEE80211_HW_SUPPORTS_CLONED_SKBS, 1927 IEEE80211_HW_SUPPORTS_CLONED_SKBS,
1922 IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS, 1928 IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
1929 IEEE80211_HW_TDLS_WIDER_BW,
1923 1930
1924 /* keep last, obviously */ 1931 /* keep last, obviously */
1925 NUM_IEEE80211_HW_FLAGS 1932 NUM_IEEE80211_HW_FLAGS
@@ -3696,20 +3703,28 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
3696void ieee80211_restart_hw(struct ieee80211_hw *hw); 3703void ieee80211_restart_hw(struct ieee80211_hw *hw);
3697 3704
3698/** 3705/**
3699 * ieee80211_napi_add - initialize mac80211 NAPI context 3706 * ieee80211_rx_napi - receive frame from NAPI context
3700 * @hw: the hardware to initialize the NAPI context on 3707 *
3701 * @napi: the NAPI context to initialize 3708 * Use this function to hand received frames to mac80211. The receive
3702 * @napi_dev: dummy NAPI netdevice, here to not waste the space if the 3709 * buffer in @skb must start with an IEEE 802.11 header. In case of a
3703 * driver doesn't use NAPI 3710 * paged @skb is used, the driver is recommended to put the ieee80211
3704 * @poll: poll function 3711 * header of the frame on the linear part of the @skb to avoid memory
3705 * @weight: default weight 3712 * allocation and/or memcpy by the stack.
3713 *
3714 * This function may not be called in IRQ context. Calls to this function
3715 * for a single hardware must be synchronized against each other. Calls to
3716 * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
3717 * mixed for a single hardware. Must not run concurrently with
3718 * ieee80211_tx_status() or ieee80211_tx_status_ni().
3719 *
3720 * This function must be called with BHs disabled.
3706 * 3721 *
3707 * See also netif_napi_add(). 3722 * @hw: the hardware this frame came in on
3723 * @skb: the buffer to receive, owned by mac80211 after this call
3724 * @napi: the NAPI context
3708 */ 3725 */
3709void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi, 3726void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
3710 struct net_device *napi_dev, 3727 struct napi_struct *napi);
3711 int (*poll)(struct napi_struct *, int),
3712 int weight);
3713 3728
3714/** 3729/**
3715 * ieee80211_rx - receive frame 3730 * ieee80211_rx - receive frame
@@ -3731,7 +3746,10 @@ void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
3731 * @hw: the hardware this frame came in on 3746 * @hw: the hardware this frame came in on
3732 * @skb: the buffer to receive, owned by mac80211 after this call 3747 * @skb: the buffer to receive, owned by mac80211 after this call
3733 */ 3748 */
3734void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb); 3749static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
3750{
3751 ieee80211_rx_napi(hw, skb, NULL);
3752}
3735 3753
3736/** 3754/**
3737 * ieee80211_rx_irqsafe - receive frame 3755 * ieee80211_rx_irqsafe - receive frame
@@ -4315,19 +4333,6 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
4315 struct sk_buff *skb, u8 *p2k); 4333 struct sk_buff *skb, u8 *p2k);
4316 4334
4317/** 4335/**
4318 * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys
4319 *
4320 * This function computes the two AES-CMAC sub-keys, based on the
4321 * previously installed master key.
4322 *
4323 * @keyconf: the parameter passed with the set key
4324 * @k1: a buffer to be filled with the 1st sub-key
4325 * @k2: a buffer to be filled with the 2nd sub-key
4326 */
4327void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
4328 u8 *k1, u8 *k2);
4329
4330/**
4331 * ieee80211_get_key_tx_seq - get key TX sequence counter 4336 * ieee80211_get_key_tx_seq - get key TX sequence counter
4332 * 4337 *
4333 * @keyconf: the parameter passed with the set key 4338 * @keyconf: the parameter passed with the set key
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index f534a46911dc..b7f99615224b 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -321,23 +321,6 @@ int ieee802154_register_hw(struct ieee802154_hw *hw);
321void ieee802154_unregister_hw(struct ieee802154_hw *hw); 321void ieee802154_unregister_hw(struct ieee802154_hw *hw);
322 322
323/** 323/**
324 * ieee802154_rx - receive frame
325 *
326 * Use this function to hand received frames to mac802154. The receive
327 * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
328 * paged @skb is used, the driver is recommended to put the ieee802154
329 * header of the frame on the linear part of the @skb to avoid memory
330 * allocation and/or memcpy by the stack.
331 *
332 * This function may not be called in IRQ context. Calls to this function
333 * for a single hardware must be synchronized against each other.
334 *
335 * @hw: the hardware this frame came in on
336 * @skb: the buffer to receive, owned by mac802154 after this call
337 */
338void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
339
340/**
341 * ieee802154_rx_irqsafe - receive frame 324 * ieee802154_rx_irqsafe - receive frame
342 * 325 *
343 * Like ieee802154_rx() but can be called in IRQ context 326 * Like ieee802154_rx() but can be called in IRQ context
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
new file mode 100644
index 000000000000..4757997f76ed
--- /dev/null
+++ b/include/net/mpls_iptunnel.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2015 Cumulus Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13
14#ifndef _NET_MPLS_IPTUNNEL_H
15#define _NET_MPLS_IPTUNNEL_H 1
16
17#define MAX_NEW_LABELS 2
18
19struct mpls_iptunnel_encap {
20 u32 label[MAX_NEW_LABELS];
21 u32 labels;
22};
23
24static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
25{
26 return (struct mpls_iptunnel_encap *)lwtstate->data;
27}
28
29#endif
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b3a7751251b4..aba5695fadb0 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -182,7 +182,8 @@ int ndisc_rcv(struct sk_buff *skb);
182 182
183void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, 183void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
184 const struct in6_addr *solicit, 184 const struct in6_addr *solicit,
185 const struct in6_addr *daddr, const struct in6_addr *saddr); 185 const struct in6_addr *daddr, const struct in6_addr *saddr,
186 struct sk_buff *oskb);
186 187
187void ndisc_send_rs(struct net_device *dev, 188void ndisc_send_rs(struct net_device *dev,
188 const struct in6_addr *saddr, const struct in6_addr *daddr); 189 const struct in6_addr *saddr, const struct in6_addr *daddr);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index bd33e66f49aa..8b683841e574 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -125,6 +125,7 @@ struct neigh_statistics {
125 unsigned long forced_gc_runs; /* number of forced GC runs */ 125 unsigned long forced_gc_runs; /* number of forced GC runs */
126 126
127 unsigned long unres_discards; /* number of unresolved drops */ 127 unsigned long unres_discards; /* number of unresolved drops */
128 unsigned long table_fulls; /* times even gc couldn't help */
128}; 129};
129 130
130#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) 131#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index e951453e0a23..2dcea635ecce 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -118,6 +118,9 @@ struct net {
118#endif 118#endif
119 struct sock *nfnl; 119 struct sock *nfnl;
120 struct sock *nfnl_stash; 120 struct sock *nfnl_stash;
121#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
122 struct list_head nfnl_acct_list;
123#endif
121#endif 124#endif
122#ifdef CONFIG_WEXT_CORE 125#ifdef CONFIG_WEXT_CORE
123 struct sk_buff_head wext_nlevents; 126 struct sk_buff_head wext_nlevents;
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index bab824bde92c..d4c6b5f30acd 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -59,7 +59,7 @@ static inline unsigned int
59br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb, 59br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
60 const struct nf_hook_state *state) 60 const struct nf_hook_state *state)
61{ 61{
62 return NF_DROP; 62 return NF_ACCEPT;
63} 63}
64#endif 64#endif
65 65
diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h
new file mode 100644
index 000000000000..42008f10dfc4
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h
@@ -0,0 +1,7 @@
1#ifndef _NF_DUP_IPV4_H_
2#define _NF_DUP_IPV4_H_
3
4void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
5 const struct in_addr *gw, int oif);
6
7#endif /* _NF_DUP_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h
new file mode 100644
index 000000000000..ed6bd66fa5a0
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h
@@ -0,0 +1,7 @@
1#ifndef _NF_DUP_IPV6_H_
2#define _NF_DUP_IPV6_H_
3
4void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
5 const struct in6_addr *gw, int oif);
6
7#endif /* _NF_DUP_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 37cd3911d5c5..e8ad46834df8 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -250,8 +250,12 @@ void nf_ct_untracked_status_or(unsigned long bits);
250void nf_ct_iterate_cleanup(struct net *net, 250void nf_ct_iterate_cleanup(struct net *net,
251 int (*iter)(struct nf_conn *i, void *data), 251 int (*iter)(struct nf_conn *i, void *data),
252 void *data, u32 portid, int report); 252 void *data, u32 portid, int report);
253
254struct nf_conntrack_zone;
255
253void nf_conntrack_free(struct nf_conn *ct); 256void nf_conntrack_free(struct nf_conn *ct);
254struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, 257struct nf_conn *nf_conntrack_alloc(struct net *net,
258 const struct nf_conntrack_zone *zone,
255 const struct nf_conntrack_tuple *orig, 259 const struct nf_conntrack_tuple *orig,
256 const struct nf_conntrack_tuple *repl, 260 const struct nf_conntrack_tuple *repl,
257 gfp_t gfp); 261 gfp_t gfp);
@@ -291,7 +295,10 @@ extern unsigned int nf_conntrack_max;
291extern unsigned int nf_conntrack_hash_rnd; 295extern unsigned int nf_conntrack_hash_rnd;
292void init_nf_conntrack_hash_rnd(void); 296void init_nf_conntrack_hash_rnd(void);
293 297
294struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags); 298struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
299 const struct nf_conntrack_zone *zone,
300 gfp_t flags);
301void nf_ct_tmpl_free(struct nf_conn *tmpl);
295 302
296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) 303#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) 304#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index f2f0fa3bb150..c03f9c42b3cd 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -52,7 +52,8 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
52 52
53/* Find a connection corresponding to a tuple. */ 53/* Find a connection corresponding to a tuple. */
54struct nf_conntrack_tuple_hash * 54struct nf_conntrack_tuple_hash *
55nf_conntrack_find_get(struct net *net, u16 zone, 55nf_conntrack_find_get(struct net *net,
56 const struct nf_conntrack_zone *zone,
56 const struct nf_conntrack_tuple *tuple); 57 const struct nf_conntrack_tuple *tuple);
57 58
58int __nf_conntrack_confirm(struct sk_buff *skb); 59int __nf_conntrack_confirm(struct sk_buff *skb);
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 3f3aecbc8632..dce56f09ac9a 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -4,7 +4,9 @@
4 4
5#ifndef _NF_CONNTRACK_EXPECT_H 5#ifndef _NF_CONNTRACK_EXPECT_H
6#define _NF_CONNTRACK_EXPECT_H 6#define _NF_CONNTRACK_EXPECT_H
7
7#include <net/netfilter/nf_conntrack.h> 8#include <net/netfilter/nf_conntrack.h>
9#include <net/netfilter/nf_conntrack_zones.h>
8 10
9extern unsigned int nf_ct_expect_hsize; 11extern unsigned int nf_ct_expect_hsize;
10extern unsigned int nf_ct_expect_max; 12extern unsigned int nf_ct_expect_max;
@@ -76,15 +78,18 @@ int nf_conntrack_expect_init(void);
76void nf_conntrack_expect_fini(void); 78void nf_conntrack_expect_fini(void);
77 79
78struct nf_conntrack_expect * 80struct nf_conntrack_expect *
79__nf_ct_expect_find(struct net *net, u16 zone, 81__nf_ct_expect_find(struct net *net,
82 const struct nf_conntrack_zone *zone,
80 const struct nf_conntrack_tuple *tuple); 83 const struct nf_conntrack_tuple *tuple);
81 84
82struct nf_conntrack_expect * 85struct nf_conntrack_expect *
83nf_ct_expect_find_get(struct net *net, u16 zone, 86nf_ct_expect_find_get(struct net *net,
87 const struct nf_conntrack_zone *zone,
84 const struct nf_conntrack_tuple *tuple); 88 const struct nf_conntrack_tuple *tuple);
85 89
86struct nf_conntrack_expect * 90struct nf_conntrack_expect *
87nf_ct_find_expectation(struct net *net, u16 zone, 91nf_ct_find_expectation(struct net *net,
92 const struct nf_conntrack_zone *zone,
88 const struct nf_conntrack_tuple *tuple); 93 const struct nf_conntrack_tuple *tuple);
89 94
90void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, 95void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index dec6336bf850..7e2b1d025f50 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -54,7 +54,11 @@ int nf_connlabels_replace(struct nf_conn *ct,
54#ifdef CONFIG_NF_CONNTRACK_LABELS 54#ifdef CONFIG_NF_CONNTRACK_LABELS
55int nf_conntrack_labels_init(void); 55int nf_conntrack_labels_init(void);
56void nf_conntrack_labels_fini(void); 56void nf_conntrack_labels_fini(void);
57int nf_connlabels_get(struct net *net, unsigned int n_bits);
58void nf_connlabels_put(struct net *net);
57#else 59#else
58static inline int nf_conntrack_labels_init(void) { return 0; } 60static inline int nf_conntrack_labels_init(void) { return 0; }
59static inline void nf_conntrack_labels_fini(void) {} 61static inline void nf_conntrack_labels_fini(void) {}
62static inline int nf_connlabels_get(struct net *net, unsigned int n_bits) { return 0; }
63static inline void nf_connlabels_put(struct net *net) {}
60#endif 64#endif
diff --git a/include/net/netfilter/nf_conntrack_zones.h b/include/net/netfilter/nf_conntrack_zones.h
index 034efe8d45a5..4e32512cef32 100644
--- a/include/net/netfilter/nf_conntrack_zones.h
+++ b/include/net/netfilter/nf_conntrack_zones.h
@@ -1,25 +1,89 @@
1#ifndef _NF_CONNTRACK_ZONES_H 1#ifndef _NF_CONNTRACK_ZONES_H
2#define _NF_CONNTRACK_ZONES_H 2#define _NF_CONNTRACK_ZONES_H
3 3
4#define NF_CT_DEFAULT_ZONE 0 4#include <linux/netfilter/nf_conntrack_zones_common.h>
5 5
6#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 6#if IS_ENABLED(CONFIG_NF_CONNTRACK)
7#include <net/netfilter/nf_conntrack_extend.h> 7#include <net/netfilter/nf_conntrack_extend.h>
8 8
9struct nf_conntrack_zone { 9static inline const struct nf_conntrack_zone *
10 u16 id; 10nf_ct_zone(const struct nf_conn *ct)
11}; 11{
12 const struct nf_conntrack_zone *nf_ct_zone = NULL;
13
14#ifdef CONFIG_NF_CONNTRACK_ZONES
15 nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
16#endif
17 return nf_ct_zone ? nf_ct_zone : &nf_ct_zone_dflt;
18}
19
20static inline const struct nf_conntrack_zone *
21nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
22{
23 zone->id = id;
24 zone->flags = flags;
25 zone->dir = dir;
26
27 return zone;
28}
29
30static inline const struct nf_conntrack_zone *
31nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
32 struct nf_conntrack_zone *tmp)
33{
34 const struct nf_conntrack_zone *zone;
35
36 if (!tmpl)
37 return &nf_ct_zone_dflt;
38
39 zone = nf_ct_zone(tmpl);
40 if (zone->flags & NF_CT_FLAG_MARK)
41 zone = nf_ct_zone_init(tmp, skb->mark, zone->dir, 0);
42
43 return zone;
44}
12 45
13static inline u16 nf_ct_zone(const struct nf_conn *ct) 46static inline int nf_ct_zone_add(struct nf_conn *ct, gfp_t flags,
47 const struct nf_conntrack_zone *info)
14{ 48{
15#ifdef CONFIG_NF_CONNTRACK_ZONES 49#ifdef CONFIG_NF_CONNTRACK_ZONES
16 struct nf_conntrack_zone *nf_ct_zone; 50 struct nf_conntrack_zone *nf_ct_zone;
17 nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE); 51
18 if (nf_ct_zone) 52 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, flags);
19 return nf_ct_zone->id; 53 if (!nf_ct_zone)
54 return -ENOMEM;
55
56 nf_ct_zone_init(nf_ct_zone, info->id, info->dir,
57 info->flags);
20#endif 58#endif
21 return NF_CT_DEFAULT_ZONE; 59 return 0;
22} 60}
23 61
24#endif /* CONFIG_NF_CONNTRACK || CONFIG_NF_CONNTRACK_MODULE */ 62static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
63 enum ip_conntrack_dir dir)
64{
65 return zone->dir & (1 << dir);
66}
67
68static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
69 enum ip_conntrack_dir dir)
70{
71 return nf_ct_zone_matches_dir(zone, dir) ?
72 zone->id : NF_CT_DEFAULT_ZONE_ID;
73}
74
75static inline bool nf_ct_zone_equal(const struct nf_conn *a,
76 const struct nf_conntrack_zone *b,
77 enum ip_conntrack_dir dir)
78{
79 return nf_ct_zone_id(nf_ct_zone(a), dir) ==
80 nf_ct_zone_id(b, dir);
81}
82
83static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
84 const struct nf_conntrack_zone *b)
85{
86 return nf_ct_zone(a)->id == b->id;
87}
88#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
25#endif /* _NF_CONNTRACK_ZONES_H */ 89#endif /* _NF_CONNTRACK_ZONES_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2a246680a6c3..aa8bee72c9d3 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -125,7 +125,7 @@ static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
125 125
126static inline enum nft_registers nft_type_to_reg(enum nft_data_types type) 126static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
127{ 127{
128 return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1; 128 return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
129} 129}
130 130
131unsigned int nft_parse_register(const struct nlattr *attr); 131unsigned int nft_parse_register(const struct nlattr *attr);
diff --git a/include/net/netfilter/nft_dup.h b/include/net/netfilter/nft_dup.h
new file mode 100644
index 000000000000..6b84cf6491a2
--- /dev/null
+++ b/include/net/netfilter/nft_dup.h
@@ -0,0 +1,9 @@
1#ifndef _NFT_DUP_H_
2#define _NFT_DUP_H_
3
4struct nft_dup_inet {
5 enum nft_registers sreg_addr:8;
6 enum nft_registers sreg_dev:8;
7};
8
9#endif /* _NFT_DUP_H_ */
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 8d93544a2d2b..c0368db6df54 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -31,6 +31,7 @@ struct netns_sysctl_ipv6 {
31 int auto_flowlabels; 31 int auto_flowlabels;
32 int icmpv6_time; 32 int icmpv6_time;
33 int anycast_src_echo_reply; 33 int anycast_src_echo_reply;
34 int ip_nonlocal_bind;
34 int fwmark_reflect; 35 int fwmark_reflect;
35 int idgen_retries; 36 int idgen_retries;
36 int idgen_delay; 37 int idgen_delay;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 532e4ba64f49..38aa4983e2a9 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -14,5 +14,6 @@ struct netns_nf {
14#ifdef CONFIG_SYSCTL 14#ifdef CONFIG_SYSCTL
15 struct ctl_table_header *nf_log_dir_header; 15 struct ctl_table_header *nf_log_dir_header;
16#endif 16#endif
17 struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
17}; 18};
18#endif 19#endif
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 01fc8c531115..d0d0f1e53bb9 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -79,6 +79,7 @@ struct nci_ops {
79 int (*close)(struct nci_dev *ndev); 79 int (*close)(struct nci_dev *ndev);
80 int (*send)(struct nci_dev *ndev, struct sk_buff *skb); 80 int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
81 int (*setup)(struct nci_dev *ndev); 81 int (*setup)(struct nci_dev *ndev);
82 int (*post_setup)(struct nci_dev *ndev);
82 int (*fw_download)(struct nci_dev *ndev, const char *firmware_name); 83 int (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
83 __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol); 84 __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
84 int (*discover_se)(struct nci_dev *ndev); 85 int (*discover_se)(struct nci_dev *ndev);
@@ -277,6 +278,8 @@ int nci_request(struct nci_dev *ndev,
277 unsigned long opt), 278 unsigned long opt),
278 unsigned long opt, __u32 timeout); 279 unsigned long opt, __u32 timeout);
279int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload); 280int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
281int nci_core_reset(struct nci_dev *ndev);
282int nci_core_init(struct nci_dev *ndev);
280 283
281int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); 284int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
282int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val); 285int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index f9e58ae45f9c..30afc9a6718c 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -203,6 +203,7 @@ struct nfc_dev {
203 int n_vendor_cmds; 203 int n_vendor_cmds;
204 204
205 struct nfc_ops *ops; 205 struct nfc_ops *ops;
206 struct genl_info *cur_cmd_info;
206}; 207};
207#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev) 208#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
208 209
@@ -318,4 +319,44 @@ static inline int nfc_set_vendor_cmds(struct nfc_dev *dev,
318 return 0; 319 return 0;
319} 320}
320 321
322struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
323 enum nfc_attrs attr,
324 u32 oui, u32 subcmd,
325 int approxlen);
326int nfc_vendor_cmd_reply(struct sk_buff *skb);
327
328/**
329 * nfc_vendor_cmd_alloc_reply_skb - allocate vendor command reply
330 * @dev: nfc device
331 * @oui: vendor oui
332 * @approxlen: an upper bound of the length of the data that will
333 * be put into the skb
334 *
335 * This function allocates and pre-fills an skb for a reply to
336 * a vendor command. Since it is intended for a reply, calling
337 * it outside of a vendor command's doit() operation is invalid.
338 *
339 * The returned skb is pre-filled with some identifying data in
340 * a way that any data that is put into the skb (with skb_put(),
341 * nla_put() or similar) will end up being within the
342 * %NFC_ATTR_VENDOR_DATA attribute, so all that needs to be done
343 * with the skb is adding data for the corresponding userspace tool
344 * which can then read that data out of the vendor data attribute.
345 * You must not modify the skb in any other way.
346 *
347 * When done, call nfc_vendor_cmd_reply() with the skb and return
348 * its error code as the result of the doit() operation.
349 *
350 * Return: An allocated and pre-filled skb. %NULL if any errors happen.
351 */
352static inline struct sk_buff *
353nfc_vendor_cmd_alloc_reply_skb(struct nfc_dev *dev,
354 u32 oui, u32 subcmd, int approxlen)
355{
356 return __nfc_alloc_vendor_cmd_reply_skb(dev,
357 NFC_ATTR_VENDOR_DATA,
358 oui,
359 subcmd, approxlen);
360}
361
321#endif /* __NET_NFC_H */ 362#endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index b0ab530d28cd..cf2713d8b975 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -52,6 +52,8 @@ enum nl802154_commands {
52 52
53 NL802154_CMD_SET_LBT_MODE, 53 NL802154_CMD_SET_LBT_MODE,
54 54
55 NL802154_CMD_SET_ACKREQ_DEFAULT,
56
55 /* add new commands above here */ 57 /* add new commands above here */
56 58
57 /* used to define NL802154_CMD_MAX below */ 59 /* used to define NL802154_CMD_MAX below */
@@ -104,6 +106,8 @@ enum nl802154_attrs {
104 106
105 NL802154_ATTR_SUPPORTED_COMMANDS, 107 NL802154_ATTR_SUPPORTED_COMMANDS,
106 108
109 NL802154_ATTR_ACKREQ_DEFAULT,
110
107 /* add attributes here, update the policy in nl802154.c */ 111 /* add attributes here, update the policy in nl802154.c */
108 112
109 __NL802154_ATTR_AFTER_LAST, 113 __NL802154_ATTR_AFTER_LAST,
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 2342bf12cb78..401038d2f9b8 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -110,10 +110,8 @@ static inline void qdisc_run(struct Qdisc *q)
110 __qdisc_run(q); 110 __qdisc_run(q);
111} 111}
112 112
113int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
114 struct tcf_result *res);
115int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, 113int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
116 struct tcf_result *res); 114 struct tcf_result *res, bool compat_mode);
117 115
118static inline __be16 tc_skb_protocol(const struct sk_buff *skb) 116static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
119{ 117{
diff --git a/include/net/route.h b/include/net/route.h
index fe22d03afb6a..cc61cb95f059 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -188,8 +188,12 @@ void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
188void ip_rt_send_redirect(struct sk_buff *skb); 188void ip_rt_send_redirect(struct sk_buff *skb);
189 189
190unsigned int inet_addr_type(struct net *net, __be32 addr); 190unsigned int inet_addr_type(struct net *net, __be32 addr);
191unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id);
191unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, 192unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
192 __be32 addr); 193 __be32 addr);
194unsigned int inet_addr_type_dev_table(struct net *net,
195 const struct net_device *dev,
196 __be32 addr);
193void ip_rt_multicast_event(struct in_device *); 197void ip_rt_multicast_event(struct in_device *);
194int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); 198int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
195void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); 199void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
@@ -250,6 +254,9 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
250 if (inet_sk(sk)->transparent) 254 if (inet_sk(sk)->transparent)
251 flow_flags |= FLOWI_FLAG_ANYSRC; 255 flow_flags |= FLOWI_FLAG_ANYSRC;
252 256
257 if (netif_index_is_vrf(sock_net(sk), oif))
258 flow_flags |= FLOWI_FLAG_VRFSRC;
259
253 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, 260 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
254 protocol, flow_flags, dst, src, dport, sport); 261 protocol, flow_flags, dst, src, dport, sport);
255} 262}
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 343d922d15c2..18fdb98185ab 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -141,6 +141,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
141 unsigned char name_assign_type, 141 unsigned char name_assign_type,
142 const struct rtnl_link_ops *ops, 142 const struct rtnl_link_ops *ops,
143 struct nlattr *tb[]); 143 struct nlattr *tb[]);
144int rtnl_delete_link(struct net_device *dev);
144int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); 145int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
145 146
146int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len); 147int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 2738f6f87908..444faa89a55f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -340,6 +340,7 @@ extern struct Qdisc noop_qdisc;
340extern struct Qdisc_ops noop_qdisc_ops; 340extern struct Qdisc_ops noop_qdisc_ops;
341extern struct Qdisc_ops pfifo_fast_ops; 341extern struct Qdisc_ops pfifo_fast_ops;
342extern struct Qdisc_ops mq_qdisc_ops; 342extern struct Qdisc_ops mq_qdisc_ops;
343extern struct Qdisc_ops noqueue_qdisc_ops;
343extern const struct Qdisc_ops *default_qdisc_ops; 344extern const struct Qdisc_ops *default_qdisc_ops;
344 345
345struct Qdisc_class_common { 346struct Qdisc_class_common {
@@ -513,17 +514,20 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
513 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 514 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
514} 515}
515 516
516static inline void qdisc_bstats_update_cpu(struct Qdisc *sch, 517static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
517 const struct sk_buff *skb) 518 const struct sk_buff *skb)
518{ 519{
519 struct gnet_stats_basic_cpu *bstats =
520 this_cpu_ptr(sch->cpu_bstats);
521
522 u64_stats_update_begin(&bstats->syncp); 520 u64_stats_update_begin(&bstats->syncp);
523 bstats_update(&bstats->bstats, skb); 521 bstats_update(&bstats->bstats, skb);
524 u64_stats_update_end(&bstats->syncp); 522 u64_stats_update_end(&bstats->syncp);
525} 523}
526 524
525static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
526 const struct sk_buff *skb)
527{
528 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
529}
530
527static inline void qdisc_bstats_update(struct Qdisc *sch, 531static inline void qdisc_bstats_update(struct Qdisc *sch,
528 const struct sk_buff *skb) 532 const struct sk_buff *skb)
529{ 533{
@@ -547,16 +551,24 @@ static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
547 sch->qstats.drops += count; 551 sch->qstats.drops += count;
548} 552}
549 553
550static inline void qdisc_qstats_drop(struct Qdisc *sch) 554static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
551{ 555{
552 sch->qstats.drops++; 556 qstats->drops++;
553} 557}
554 558
555static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch) 559static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
556{ 560{
557 struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats); 561 qstats->overlimits++;
562}
558 563
559 qstats->drops++; 564static inline void qdisc_qstats_drop(struct Qdisc *sch)
565{
566 qstats_drop_inc(&sch->qstats);
567}
568
569static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
570{
571 qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
560} 572}
561 573
562static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 574static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
diff --git a/include/net/sock.h b/include/net/sock.h
index f21f0708ec59..7aa78440559a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -429,7 +429,9 @@ struct sock {
429 void *sk_security; 429 void *sk_security;
430#endif 430#endif
431 __u32 sk_mark; 431 __u32 sk_mark;
432#ifdef CONFIG_CGROUP_NET_CLASSID
432 u32 sk_classid; 433 u32 sk_classid;
434#endif
433 struct cg_proto *sk_cgrp; 435 struct cg_proto *sk_cgrp;
434 void (*sk_state_change)(struct sock *sk); 436 void (*sk_state_change)(struct sock *sk);
435 void (*sk_data_ready)(struct sock *sk); 437 void (*sk_data_ready)(struct sock *sk);
@@ -1040,42 +1042,9 @@ struct proto {
1040#endif 1042#endif
1041}; 1043};
1042 1044
1043/*
1044 * Bits in struct cg_proto.flags
1045 */
1046enum cg_proto_flags {
1047 /* Currently active and new sockets should be assigned to cgroups */
1048 MEMCG_SOCK_ACTIVE,
1049 /* It was ever activated; we must disarm static keys on destruction */
1050 MEMCG_SOCK_ACTIVATED,
1051};
1052
1053struct cg_proto {
1054 struct page_counter memory_allocated; /* Current allocated memory. */
1055 struct percpu_counter sockets_allocated; /* Current number of sockets. */
1056 int memory_pressure;
1057 long sysctl_mem[3];
1058 unsigned long flags;
1059 /*
1060 * memcg field is used to find which memcg we belong directly
1061 * Each memcg struct can hold more than one cg_proto, so container_of
1062 * won't really cut.
1063 *
1064 * The elegant solution would be having an inverse function to
1065 * proto_cgroup in struct proto, but that means polluting the structure
1066 * for everybody, instead of just for memcg users.
1067 */
1068 struct mem_cgroup *memcg;
1069};
1070
1071int proto_register(struct proto *prot, int alloc_slab); 1045int proto_register(struct proto *prot, int alloc_slab);
1072void proto_unregister(struct proto *prot); 1046void proto_unregister(struct proto *prot);
1073 1047
1074static inline bool memcg_proto_active(struct cg_proto *cg_proto)
1075{
1076 return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
1077}
1078
1079#ifdef SOCK_REFCNT_DEBUG 1048#ifdef SOCK_REFCNT_DEBUG
1080static inline void sk_refcnt_debug_inc(struct sock *sk) 1049static inline void sk_refcnt_debug_inc(struct sock *sk)
1081{ 1050{
@@ -1685,6 +1654,20 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
1685kuid_t sock_i_uid(struct sock *sk); 1654kuid_t sock_i_uid(struct sock *sk);
1686unsigned long sock_i_ino(struct sock *sk); 1655unsigned long sock_i_ino(struct sock *sk);
1687 1656
1657static inline void sk_set_txhash(struct sock *sk)
1658{
1659 sk->sk_txhash = prandom_u32();
1660
1661 if (unlikely(!sk->sk_txhash))
1662 sk->sk_txhash = 1;
1663}
1664
1665static inline void sk_rethink_txhash(struct sock *sk)
1666{
1667 if (sk->sk_txhash)
1668 sk_set_txhash(sk);
1669}
1670
1688static inline struct dst_entry * 1671static inline struct dst_entry *
1689__sk_dst_get(struct sock *sk) 1672__sk_dst_get(struct sock *sk)
1690{ 1673{
@@ -1709,6 +1692,8 @@ static inline void dst_negative_advice(struct sock *sk)
1709{ 1692{
1710 struct dst_entry *ndst, *dst = __sk_dst_get(sk); 1693 struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1711 1694
1695 sk_rethink_txhash(sk);
1696
1712 if (dst && dst->ops->negative_advice) { 1697 if (dst && dst->ops->negative_advice) {
1713 ndst = dst->ops->negative_advice(dst); 1698 ndst = dst->ops->negative_advice(dst);
1714 1699
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d5671f118bfc..319baab3b48e 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -72,6 +72,7 @@ struct switchdev_obj {
72 struct switchdev_obj_fdb { /* PORT_FDB */ 72 struct switchdev_obj_fdb { /* PORT_FDB */
73 const unsigned char *addr; 73 const unsigned char *addr;
74 u16 vid; 74 u16 vid;
75 u16 ndm_state;
75 } fdb; 76 } fdb;
76 } u; 77 } u;
77}; 78};
@@ -157,6 +158,9 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
157int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 158int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
158 struct net_device *dev, 159 struct net_device *dev,
159 struct net_device *filter_dev, int idx); 160 struct net_device *filter_dev, int idx);
161void switchdev_port_fwd_mark_set(struct net_device *dev,
162 struct net_device *group_dev,
163 bool joining);
160 164
161#else 165#else
162 166
@@ -271,6 +275,12 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
271 return -EOPNOTSUPP; 275 return -EOPNOTSUPP;
272} 276}
273 277
278static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
279 struct net_device *group_dev,
280 bool joining)
281{
282}
283
274#endif 284#endif
275 285
276#endif /* _LINUX_SWITCHDEV_H_ */ 286#endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
index a152e9858b2c..958d69cfb19c 100644
--- a/include/net/tc_act/tc_bpf.h
+++ b/include/net/tc_act/tc_bpf.h
@@ -15,7 +15,7 @@
15 15
16struct tcf_bpf { 16struct tcf_bpf {
17 struct tcf_common common; 17 struct tcf_common common;
18 struct bpf_prog *filter; 18 struct bpf_prog __rcu *filter;
19 union { 19 union {
20 u32 bpf_fd; 20 u32 bpf_fd;
21 u16 bpf_num_ops; 21 u16 bpf_num_ops;
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 9fc9b578908a..592a6bc02b0b 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -6,9 +6,10 @@
6struct tcf_gact { 6struct tcf_gact {
7 struct tcf_common common; 7 struct tcf_common common;
8#ifdef CONFIG_GACT_PROB 8#ifdef CONFIG_GACT_PROB
9 u16 tcfg_ptype; 9 u16 tcfg_ptype;
10 u16 tcfg_pval; 10 u16 tcfg_pval;
11 int tcfg_paction; 11 int tcfg_paction;
12 atomic_t packets;
12#endif 13#endif
13}; 14};
14#define to_gact(a) \ 15#define to_gact(a) \
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 4dd77a1c106b..dae96bae1c19 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,7 +8,7 @@ struct tcf_mirred {
8 int tcfm_eaction; 8 int tcfm_eaction;
9 int tcfm_ifindex; 9 int tcfm_ifindex;
10 int tcfm_ok_push; 10 int tcfm_ok_push;
11 struct net_device *tcfm_dev; 11 struct net_device __rcu *tcfm_dev;
12 struct list_head tcfm_list; 12 struct list_head tcfm_list;
13}; 13};
14#define to_mirred(a) \ 14#define to_mirred(a) \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 950cfecaad3c..0cab28cd43a9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -281,6 +281,8 @@ extern unsigned int sysctl_tcp_notsent_lowat;
281extern int sysctl_tcp_min_tso_segs; 281extern int sysctl_tcp_min_tso_segs;
282extern int sysctl_tcp_autocorking; 282extern int sysctl_tcp_autocorking;
283extern int sysctl_tcp_invalid_ratelimit; 283extern int sysctl_tcp_invalid_ratelimit;
284extern int sysctl_tcp_pacing_ss_ratio;
285extern int sysctl_tcp_pacing_ca_ratio;
284 286
285extern atomic_long_t tcp_memory_allocated; 287extern atomic_long_t tcp_memory_allocated;
286extern struct percpu_counter tcp_sockets_allocated; 288extern struct percpu_counter tcp_sockets_allocated;
@@ -886,7 +888,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
886extern struct tcp_congestion_ops tcp_reno; 888extern struct tcp_congestion_ops tcp_reno;
887 889
888struct tcp_congestion_ops *tcp_ca_find_key(u32 key); 890struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
889u32 tcp_ca_get_key_by_name(const char *name); 891u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
890#ifdef CONFIG_INET 892#ifdef CONFIG_INET
891char *tcp_ca_get_name_by_key(u32 key, char *buffer); 893char *tcp_ca_get_name_by_key(u32 key, char *buffer);
892#else 894#else
@@ -989,6 +991,11 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
989 991
990#define TCP_INFINITE_SSTHRESH 0x7fffffff 992#define TCP_INFINITE_SSTHRESH 0x7fffffff
991 993
994static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
995{
996 return tp->snd_cwnd < tp->snd_ssthresh;
997}
998
992static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) 999static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
993{ 1000{
994 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; 1001 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
@@ -1065,7 +1072,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1065 const struct tcp_sock *tp = tcp_sk(sk); 1072 const struct tcp_sock *tp = tcp_sk(sk);
1066 1073
1067 /* If in slow start, ensure cwnd grows to twice what was ACKed. */ 1074 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1068 if (tp->snd_cwnd <= tp->snd_ssthresh) 1075 if (tcp_in_slow_start(tp))
1069 return tp->snd_cwnd < 2 * tp->max_packets_out; 1076 return tp->snd_cwnd < 2 * tp->max_packets_out;
1070 1077
1071 return tp->is_cwnd_limited; 1078 return tp->is_cwnd_limited;
@@ -1160,6 +1167,19 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1160} 1167}
1161 1168
1162u32 tcp_default_init_rwnd(u32 mss); 1169u32 tcp_default_init_rwnd(u32 mss);
1170void tcp_cwnd_restart(struct sock *sk, s32 delta);
1171
1172static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1173{
1174 struct tcp_sock *tp = tcp_sk(sk);
1175 s32 delta;
1176
1177 if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
1178 return;
1179 delta = tcp_time_stamp - tp->lsndtime;
1180 if (delta > inet_csk(sk)->icsk_rto)
1181 tcp_cwnd_restart(sk, delta);
1182}
1163 1183
1164/* Determine a window scaling and initial window to offer. */ 1184/* Determine a window scaling and initial window to offer. */
1165void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, 1185void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 68f0ecad6c6e..1a47946f95ba 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -33,9 +33,6 @@ static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
33 33
34static inline void twsk_destructor(struct sock *sk) 34static inline void twsk_destructor(struct sock *sk)
35{ 35{
36 BUG_ON(sk == NULL);
37 BUG_ON(sk->sk_prot == NULL);
38 BUG_ON(sk->sk_prot->twsk_prot == NULL);
39 if (sk->sk_prot->twsk_prot->twsk_destructor != NULL) 36 if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
40 sk->sk_prot->twsk_prot->twsk_destructor(sk); 37 sk->sk_prot->twsk_prot->twsk_destructor(sk);
41} 38}
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index c491c1221606..cb2f89f20f5c 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -31,7 +31,8 @@ struct udp_port_cfg {
31 __be16 peer_udp_port; 31 __be16 peer_udp_port;
32 unsigned int use_udp_checksums:1, 32 unsigned int use_udp_checksums:1,
33 use_udp6_tx_checksums:1, 33 use_udp6_tx_checksums:1,
34 use_udp6_rx_checksums:1; 34 use_udp6_rx_checksums:1,
35 ipv6_v6only:1;
35}; 36};
36 37
37int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, 38int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
@@ -93,6 +94,10 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
93 94
94void udp_tunnel_sock_release(struct socket *sock); 95void udp_tunnel_sock_release(struct socket *sock);
95 96
97struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
98 __be16 flags, __be64 tunnel_id,
99 int md_size);
100
96static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, 101static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
97 bool udp_csum) 102 bool udp_csum)
98{ 103{
diff --git a/include/net/vrf.h b/include/net/vrf.h
new file mode 100644
index 000000000000..593e6094ddd4
--- /dev/null
+++ b/include/net/vrf.h
@@ -0,0 +1,178 @@
1/*
2 * include/net/net_vrf.h - adds vrf dev structure definitions
3 * Copyright (c) 2015 Cumulus Networks
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#ifndef __LINUX_NET_VRF_H
12#define __LINUX_NET_VRF_H
13
14struct net_vrf_dev {
15 struct rcu_head rcu;
16 int ifindex; /* ifindex of master dev */
17 u32 tb_id; /* table id for VRF */
18};
19
20struct slave {
21 struct list_head list;
22 struct net_device *dev;
23};
24
25struct slave_queue {
26 struct list_head all_slaves;
27};
28
29struct net_vrf {
30 struct slave_queue queue;
31 struct rtable *rth;
32 u32 tb_id;
33};
34
35
36#if IS_ENABLED(CONFIG_NET_VRF)
37/* called with rcu_read_lock() */
38static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
39{
40 struct net_vrf_dev *vrf_ptr;
41 int ifindex = 0;
42
43 if (!dev)
44 return 0;
45
46 if (netif_is_vrf(dev)) {
47 ifindex = dev->ifindex;
48 } else {
49 vrf_ptr = rcu_dereference(dev->vrf_ptr);
50 if (vrf_ptr)
51 ifindex = vrf_ptr->ifindex;
52 }
53
54 return ifindex;
55}
56
57static inline int vrf_master_ifindex(const struct net_device *dev)
58{
59 int ifindex;
60
61 rcu_read_lock();
62 ifindex = vrf_master_ifindex_rcu(dev);
63 rcu_read_unlock();
64
65 return ifindex;
66}
67
68/* called with rcu_read_lock */
69static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
70{
71 u32 tb_id = 0;
72
73 if (dev) {
74 struct net_vrf_dev *vrf_ptr;
75
76 vrf_ptr = rcu_dereference(dev->vrf_ptr);
77 if (vrf_ptr)
78 tb_id = vrf_ptr->tb_id;
79 }
80 return tb_id;
81}
82
83static inline u32 vrf_dev_table(const struct net_device *dev)
84{
85 u32 tb_id;
86
87 rcu_read_lock();
88 tb_id = vrf_dev_table_rcu(dev);
89 rcu_read_unlock();
90
91 return tb_id;
92}
93
94static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
95{
96 struct net_device *dev;
97 u32 tb_id = 0;
98
99 if (!ifindex)
100 return 0;
101
102 rcu_read_lock();
103
104 dev = dev_get_by_index_rcu(net, ifindex);
105 if (dev)
106 tb_id = vrf_dev_table_rcu(dev);
107
108 rcu_read_unlock();
109
110 return tb_id;
111}
112
113/* called with rtnl */
114static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
115{
116 u32 tb_id = 0;
117
118 if (dev) {
119 struct net_vrf_dev *vrf_ptr;
120
121 vrf_ptr = rtnl_dereference(dev->vrf_ptr);
122 if (vrf_ptr)
123 tb_id = vrf_ptr->tb_id;
124 }
125 return tb_id;
126}
127
128/* caller has already checked netif_is_vrf(dev) */
129static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
130{
131 struct rtable *rth = ERR_PTR(-ENETUNREACH);
132 struct net_vrf *vrf = netdev_priv(dev);
133
134 if (vrf) {
135 rth = vrf->rth;
136 atomic_inc(&rth->dst.__refcnt);
137 }
138 return rth;
139}
140
141#else
142static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
143{
144 return 0;
145}
146
147static inline int vrf_master_ifindex(const struct net_device *dev)
148{
149 return 0;
150}
151
152static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
153{
154 return 0;
155}
156
157static inline u32 vrf_dev_table(const struct net_device *dev)
158{
159 return 0;
160}
161
162static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
163{
164 return 0;
165}
166
167static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
168{
169 return 0;
170}
171
172static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
173{
174 return ERR_PTR(-ENETUNREACH);
175}
176#endif
177
178#endif /* __LINUX_NET_VRF_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0082b5d33d7d..480a319b4c92 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -7,6 +7,7 @@
7#include <linux/skbuff.h> 7#include <linux/skbuff.h>
8#include <linux/netdevice.h> 8#include <linux/netdevice.h>
9#include <linux/udp.h> 9#include <linux/udp.h>
10#include <net/dst_metadata.h>
10 11
11#define VNI_HASH_BITS 10 12#define VNI_HASH_BITS 10
12#define VNI_HASH_SIZE (1<<VNI_HASH_BITS) 13#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
@@ -94,20 +95,18 @@ struct vxlanhdr {
94#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8) 95#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8)
95#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) 96#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
96 97
98#define VNI_HASH_BITS 10
99#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
100#define FDB_HASH_BITS 8
101#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
102
97struct vxlan_metadata { 103struct vxlan_metadata {
98 __be32 vni;
99 u32 gbp; 104 u32 gbp;
100}; 105};
101 106
102struct vxlan_sock;
103typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
104 struct vxlan_metadata *md);
105
106/* per UDP socket information */ 107/* per UDP socket information */
107struct vxlan_sock { 108struct vxlan_sock {
108 struct hlist_node hlist; 109 struct hlist_node hlist;
109 vxlan_rcv_t *rcv;
110 void *data;
111 struct work_struct del_work; 110 struct work_struct del_work;
112 struct socket *sock; 111 struct socket *sock;
113 struct rcu_head rcu; 112 struct rcu_head rcu;
@@ -117,6 +116,58 @@ struct vxlan_sock {
117 u32 flags; 116 u32 flags;
118}; 117};
119 118
119union vxlan_addr {
120 struct sockaddr_in sin;
121 struct sockaddr_in6 sin6;
122 struct sockaddr sa;
123};
124
125struct vxlan_rdst {
126 union vxlan_addr remote_ip;
127 __be16 remote_port;
128 u32 remote_vni;
129 u32 remote_ifindex;
130 struct list_head list;
131 struct rcu_head rcu;
132};
133
134struct vxlan_config {
135 union vxlan_addr remote_ip;
136 union vxlan_addr saddr;
137 u32 vni;
138 int remote_ifindex;
139 int mtu;
140 __be16 dst_port;
141 __u16 port_min;
142 __u16 port_max;
143 __u8 tos;
144 __u8 ttl;
145 u32 flags;
146 unsigned long age_interval;
147 unsigned int addrmax;
148 bool no_share;
149};
150
151/* Pseudo network device */
152struct vxlan_dev {
153 struct hlist_node hlist; /* vni hash table */
154 struct list_head next; /* vxlan's per namespace list */
155 struct vxlan_sock *vn_sock; /* listening socket */
156 struct net_device *dev;
157 struct net *net; /* netns for packet i/o */
158 struct vxlan_rdst default_dst; /* default destination */
159 u32 flags; /* VXLAN_F_* in vxlan.h */
160
161 struct timer_list age_timer;
162 spinlock_t hash_lock;
163 unsigned int addrcnt;
164 struct gro_cells gro_cells;
165
166 struct vxlan_config cfg;
167
168 struct hlist_head fdb_head[FDB_HASH_SIZE];
169};
170
120#define VXLAN_F_LEARN 0x01 171#define VXLAN_F_LEARN 0x01
121#define VXLAN_F_PROXY 0x02 172#define VXLAN_F_PROXY 0x02
122#define VXLAN_F_RSC 0x04 173#define VXLAN_F_RSC 0x04
@@ -130,6 +181,7 @@ struct vxlan_sock {
130#define VXLAN_F_REMCSUM_RX 0x400 181#define VXLAN_F_REMCSUM_RX 0x400
131#define VXLAN_F_GBP 0x800 182#define VXLAN_F_GBP 0x800
132#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000 183#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
184#define VXLAN_F_COLLECT_METADATA 0x2000
133 185
134/* Flags that are used in the receive path. These flags must match in 186/* Flags that are used in the receive path. These flags must match in
135 * order for a socket to be shareable 187 * order for a socket to be shareable
@@ -137,18 +189,16 @@ struct vxlan_sock {
137#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \ 189#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
138 VXLAN_F_UDP_ZERO_CSUM6_RX | \ 190 VXLAN_F_UDP_ZERO_CSUM6_RX | \
139 VXLAN_F_REMCSUM_RX | \ 191 VXLAN_F_REMCSUM_RX | \
140 VXLAN_F_REMCSUM_NOPARTIAL) 192 VXLAN_F_REMCSUM_NOPARTIAL | \
141 193 VXLAN_F_COLLECT_METADATA)
142struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
143 vxlan_rcv_t *rcv, void *data,
144 bool no_share, u32 flags);
145 194
146void vxlan_sock_release(struct vxlan_sock *vs); 195struct net_device *vxlan_dev_create(struct net *net, const char *name,
196 u8 name_assign_type, struct vxlan_config *conf);
147 197
148int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, 198static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan)
149 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 199{
150 __be16 src_port, __be16 dst_port, struct vxlan_metadata *md, 200 return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport;
151 bool xnet, u32 vxflags); 201}
152 202
153static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, 203static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
154 netdev_features_t features) 204 netdev_features_t features)
@@ -191,4 +241,10 @@ static inline void vxlan_get_rx_port(struct net_device *netdev)
191{ 241{
192} 242}
193#endif 243#endif
244
245static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
246{
247 return vs->sock->sk->sk_family;
248}
249
194#endif 250#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index f0ee97eec24d..312e3fee9ccf 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -285,10 +285,13 @@ struct xfrm_policy_afinfo {
285 unsigned short family; 285 unsigned short family;
286 struct dst_ops *dst_ops; 286 struct dst_ops *dst_ops;
287 void (*garbage_collect)(struct net *net); 287 void (*garbage_collect)(struct net *net);
288 struct dst_entry *(*dst_lookup)(struct net *net, int tos, 288 struct dst_entry *(*dst_lookup)(struct net *net,
289 int tos, int oif,
289 const xfrm_address_t *saddr, 290 const xfrm_address_t *saddr,
290 const xfrm_address_t *daddr); 291 const xfrm_address_t *daddr);
291 int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr); 292 int (*get_saddr)(struct net *net, int oif,
293 xfrm_address_t *saddr,
294 xfrm_address_t *daddr);
292 void (*decode_session)(struct sk_buff *skb, 295 void (*decode_session)(struct sk_buff *skb,
293 struct flowi *fl, 296 struct flowi *fl,
294 int reverse); 297 int reverse);
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 39ed2d2fbd51..92a7d85917b4 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -105,14 +105,16 @@ enum ib_cm_data_size {
105 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216, 105 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
106 IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136, 106 IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
107 IB_CM_SIDR_REP_INFO_LENGTH = 72, 107 IB_CM_SIDR_REP_INFO_LENGTH = 72,
108 /* compare done u32 at a time */
109 IB_CM_COMPARE_SIZE = (64 / sizeof(u32))
110}; 108};
111 109
112struct ib_cm_id; 110struct ib_cm_id;
113 111
114struct ib_cm_req_event_param { 112struct ib_cm_req_event_param {
115 struct ib_cm_id *listen_id; 113 struct ib_cm_id *listen_id;
114
115 /* P_Key that was used by the GMP's BTH header */
116 u16 bth_pkey;
117
116 u8 port; 118 u8 port;
117 119
118 struct ib_sa_path_rec *primary_path; 120 struct ib_sa_path_rec *primary_path;
@@ -223,6 +225,9 @@ struct ib_cm_apr_event_param {
223 225
224struct ib_cm_sidr_req_event_param { 226struct ib_cm_sidr_req_event_param {
225 struct ib_cm_id *listen_id; 227 struct ib_cm_id *listen_id;
228 __be64 service_id;
229 /* P_Key that was used by the GMP's BTH header */
230 u16 bth_pkey;
226 u8 port; 231 u8 port;
227 u16 pkey; 232 u16 pkey;
228}; 233};
@@ -337,11 +342,6 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
337#define IB_SDP_SERVICE_ID cpu_to_be64(0x0000000000010000ULL) 342#define IB_SDP_SERVICE_ID cpu_to_be64(0x0000000000010000ULL)
338#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL) 343#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
339 344
340struct ib_cm_compare_data {
341 u32 data[IB_CM_COMPARE_SIZE];
342 u32 mask[IB_CM_COMPARE_SIZE];
343};
344
345/** 345/**
346 * ib_cm_listen - Initiates listening on the specified service ID for 346 * ib_cm_listen - Initiates listening on the specified service ID for
347 * connection and service ID resolution requests. 347 * connection and service ID resolution requests.
@@ -354,12 +354,13 @@ struct ib_cm_compare_data {
354 * range of service IDs. If set to 0, the service ID is matched 354 * range of service IDs. If set to 0, the service ID is matched
355 * exactly. This parameter is ignored if %service_id is set to 355 * exactly. This parameter is ignored if %service_id is set to
356 * IB_CM_ASSIGN_SERVICE_ID. 356 * IB_CM_ASSIGN_SERVICE_ID.
357 * @compare_data: This parameter is optional. It specifies data that must
358 * appear in the private data of a connection request for the specified
359 * listen request.
360 */ 357 */
361int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, 358int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
362 struct ib_cm_compare_data *compare_data); 359 __be64 service_mask);
360
361struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
362 ib_cm_handler cm_handler,
363 __be64 service_id);
363 364
364struct ib_cm_req_param { 365struct ib_cm_req_param {
365 struct ib_sa_path_rec *primary_path; 366 struct ib_sa_path_rec *primary_path;
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index c8422d5a5a91..188df91d5851 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -127,6 +127,23 @@
127#define IB_DEFAULT_PKEY_PARTIAL 0x7FFF 127#define IB_DEFAULT_PKEY_PARTIAL 0x7FFF
128#define IB_DEFAULT_PKEY_FULL 0xFFFF 128#define IB_DEFAULT_PKEY_FULL 0xFFFF
129 129
130/*
131 * Generic trap/notice types
132 */
133#define IB_NOTICE_TYPE_FATAL 0x80
134#define IB_NOTICE_TYPE_URGENT 0x81
135#define IB_NOTICE_TYPE_SECURITY 0x82
136#define IB_NOTICE_TYPE_SM 0x83
137#define IB_NOTICE_TYPE_INFO 0x84
138
139/*
140 * Generic trap/notice producers
141 */
142#define IB_NOTICE_PROD_CA cpu_to_be16(1)
143#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
144#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
145#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
146
130enum { 147enum {
131 IB_MGMT_MAD_HDR = 24, 148 IB_MGMT_MAD_HDR = 24,
132 IB_MGMT_MAD_DATA = 232, 149 IB_MGMT_MAD_DATA = 232,
@@ -240,6 +257,70 @@ struct ib_class_port_info {
240 __be32 trap_qkey; 257 __be32 trap_qkey;
241}; 258};
242 259
260struct ib_mad_notice_attr {
261 u8 generic_type;
262 u8 prod_type_msb;
263 __be16 prod_type_lsb;
264 __be16 trap_num;
265 __be16 issuer_lid;
266 __be16 toggle_count;
267
268 union {
269 struct {
270 u8 details[54];
271 } raw_data;
272
273 struct {
274 __be16 reserved;
275 __be16 lid; /* where violation happened */
276 u8 port_num; /* where violation happened */
277 } __packed ntc_129_131;
278
279 struct {
280 __be16 reserved;
281 __be16 lid; /* LID where change occurred */
282 u8 reserved2;
283 u8 local_changes; /* low bit - local changes */
284 __be32 new_cap_mask; /* new capability mask */
285 u8 reserved3;
286 u8 change_flags; /* low 3 bits only */
287 } __packed ntc_144;
288
289 struct {
290 __be16 reserved;
291 __be16 lid; /* lid where sys guid changed */
292 __be16 reserved2;
293 __be64 new_sys_guid;
294 } __packed ntc_145;
295
296 struct {
297 __be16 reserved;
298 __be16 lid;
299 __be16 dr_slid;
300 u8 method;
301 u8 reserved2;
302 __be16 attr_id;
303 __be32 attr_mod;
304 __be64 mkey;
305 u8 reserved3;
306 u8 dr_trunc_hop;
307 u8 dr_rtn_path[30];
308 } __packed ntc_256;
309
310 struct {
311 __be16 reserved;
312 __be16 lid1;
313 __be16 lid2;
314 __be32 key;
315 __be32 sl_qp1; /* SL: high 4 bits */
316 __be32 qp2; /* high 8 bits reserved */
317 union ib_gid gid1;
318 union ib_gid gid2;
319 } __packed ntc_257_258;
320
321 } details;
322};
323
243/** 324/**
244 * ib_mad_send_buf - MAD data buffer and work request for sends. 325 * ib_mad_send_buf - MAD data buffer and work request for sends.
245 * @next: A pointer used to chain together MADs for posting. 326 * @next: A pointer used to chain together MADs for posting.
@@ -388,7 +469,6 @@ enum {
388struct ib_mad_agent { 469struct ib_mad_agent {
389 struct ib_device *device; 470 struct ib_device *device;
390 struct ib_qp *qp; 471 struct ib_qp *qp;
391 struct ib_mr *mr;
392 ib_mad_recv_handler recv_handler; 472 ib_mad_recv_handler recv_handler;
393 ib_mad_send_handler send_handler; 473 ib_mad_send_handler send_handler;
394 ib_mad_snoop_handler snoop_handler; 474 ib_mad_snoop_handler snoop_handler;
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index b1f7592e02e4..709a5331e6b9 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -76,6 +76,8 @@ enum {
76 IB_OPCODE_UC = 0x20, 76 IB_OPCODE_UC = 0x20,
77 IB_OPCODE_RD = 0x40, 77 IB_OPCODE_RD = 0x40,
78 IB_OPCODE_UD = 0x60, 78 IB_OPCODE_UD = 0x60,
79 /* per IBTA 3.1 Table 38, A10.3.2 */
80 IB_OPCODE_CNP = 0x80,
79 81
80 /* operations -- just used to define real constants */ 82 /* operations -- just used to define real constants */
81 IB_OPCODE_SEND_FIRST = 0x00, 83 IB_OPCODE_SEND_FIRST = 0x00,
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index 98b9086d769a..b439e988408e 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -119,10 +119,57 @@ struct ib_port_info {
119 u8 link_roundtrip_latency[3]; 119 u8 link_roundtrip_latency[3];
120}; 120};
121 121
122struct ib_node_info {
123 u8 base_version;
124 u8 class_version;
125 u8 node_type;
126 u8 num_ports;
127 __be64 sys_guid;
128 __be64 node_guid;
129 __be64 port_guid;
130 __be16 partition_cap;
131 __be16 device_id;
132 __be32 revision;
133 u8 local_port_num;
134 u8 vendor_id[3];
135} __packed;
136
137struct ib_vl_weight_elem {
138 u8 vl; /* IB: VL is low 4 bits, upper 4 bits reserved */
139 /* OPA: VL is low 5 bits, upper 3 bits reserved */
140 u8 weight;
141};
142
122static inline u8 143static inline u8
123ib_get_smp_direction(struct ib_smp *smp) 144ib_get_smp_direction(struct ib_smp *smp)
124{ 145{
125 return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION); 146 return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION);
126} 147}
127 148
149/*
150 * SM Trap/Notice numbers
151 */
152#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129)
153#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130)
154#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131)
155#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144)
156#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145)
157#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256)
158#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257)
159#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258)
160
161/*
162 * Other local changes flags (trap 144).
163 */
164#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
165#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
166#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01
167
168/*
169 * M_Key volation flags in dr_trunc_hop (trap 256).
170 */
171#define IB_NOTICE_TRAP_DR_NOTICE 0x80
172#define IB_NOTICE_TRAP_DR_TRUNC 0x40
173
174
128#endif /* IB_SMI_H */ 175#endif /* IB_SMI_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index b0f898e3b2e7..7845fae6f2df 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -48,6 +48,7 @@
48#include <linux/rwsem.h> 48#include <linux/rwsem.h>
49#include <linux/scatterlist.h> 49#include <linux/scatterlist.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <linux/socket.h>
51#include <uapi/linux/if_ether.h> 52#include <uapi/linux/if_ether.h>
52 53
53#include <linux/atomic.h> 54#include <linux/atomic.h>
@@ -64,6 +65,12 @@ union ib_gid {
64 } global; 65 } global;
65}; 66};
66 67
68extern union ib_gid zgid;
69
70struct ib_gid_attr {
71 struct net_device *ndev;
72};
73
67enum rdma_node_type { 74enum rdma_node_type {
68 /* IB values map to NodeInfo:NodeType. */ 75 /* IB values map to NodeInfo:NodeType. */
69 RDMA_NODE_IB_CA = 1, 76 RDMA_NODE_IB_CA = 1,
@@ -284,7 +291,7 @@ enum ib_port_cap_flags {
284 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 291 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
285 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 292 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
286 IB_PORT_CLIENT_REG_SUP = 1 << 25, 293 IB_PORT_CLIENT_REG_SUP = 1 << 25,
287 IB_PORT_IP_BASED_GIDS = 1 << 26 294 IB_PORT_IP_BASED_GIDS = 1 << 26,
288}; 295};
289 296
290enum ib_port_width { 297enum ib_port_width {
@@ -556,20 +563,18 @@ __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
556 */ 563 */
557__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 564__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
558 565
559enum ib_mr_create_flags {
560 IB_MR_SIGNATURE_EN = 1,
561};
562 566
563/** 567/**
564 * ib_mr_init_attr - Memory region init attributes passed to routine 568 * enum ib_mr_type - memory region type
565 * ib_create_mr. 569 * @IB_MR_TYPE_MEM_REG: memory region that is used for
566 * @max_reg_descriptors: max number of registration descriptors that 570 * normal registration
567 * may be used with registration work requests. 571 * @IB_MR_TYPE_SIGNATURE: memory region that is used for
568 * @flags: MR creation flags bit mask. 572 * signature operations (data-integrity
573 * capable regions)
569 */ 574 */
570struct ib_mr_init_attr { 575enum ib_mr_type {
571 int max_reg_descriptors; 576 IB_MR_TYPE_MEM_REG,
572 u32 flags; 577 IB_MR_TYPE_SIGNATURE,
573}; 578};
574 579
575/** 580/**
@@ -1252,9 +1257,11 @@ struct ib_udata {
1252}; 1257};
1253 1258
1254struct ib_pd { 1259struct ib_pd {
1260 u32 local_dma_lkey;
1255 struct ib_device *device; 1261 struct ib_device *device;
1256 struct ib_uobject *uobject; 1262 struct ib_uobject *uobject;
1257 atomic_t usecnt; /* count all resources */ 1263 atomic_t usecnt; /* count all resources */
1264 struct ib_mr *local_mr;
1258}; 1265};
1259 1266
1260struct ib_xrcd { 1267struct ib_xrcd {
@@ -1488,7 +1495,7 @@ struct ib_cache {
1488 rwlock_t lock; 1495 rwlock_t lock;
1489 struct ib_event_handler event_handler; 1496 struct ib_event_handler event_handler;
1490 struct ib_pkey_cache **pkey_cache; 1497 struct ib_pkey_cache **pkey_cache;
1491 struct ib_gid_cache **gid_cache; 1498 struct ib_gid_table **gid_cache;
1492 u8 *lmc_cache; 1499 u8 *lmc_cache;
1493}; 1500};
1494 1501
@@ -1550,6 +1557,8 @@ struct ib_device {
1550 1557
1551 spinlock_t client_data_lock; 1558 spinlock_t client_data_lock;
1552 struct list_head core_list; 1559 struct list_head core_list;
1560 /* Access to the client_data_list is protected by the client_data_lock
1561 * spinlock and the lists_rwsem read-write semaphore */
1553 struct list_head client_data_list; 1562 struct list_head client_data_list;
1554 1563
1555 struct ib_cache cache; 1564 struct ib_cache cache;
@@ -1572,9 +1581,47 @@ struct ib_device {
1572 struct ib_port_attr *port_attr); 1581 struct ib_port_attr *port_attr);
1573 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1582 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
1574 u8 port_num); 1583 u8 port_num);
1584 /* When calling get_netdev, the HW vendor's driver should return the
1585 * net device of device @device at port @port_num or NULL if such
1586 * a net device doesn't exist. The vendor driver should call dev_hold
1587 * on this net device. The HW vendor's device driver must guarantee
1588 * that this function returns NULL before the net device reaches
1589 * NETDEV_UNREGISTER_FINAL state.
1590 */
1591 struct net_device *(*get_netdev)(struct ib_device *device,
1592 u8 port_num);
1575 int (*query_gid)(struct ib_device *device, 1593 int (*query_gid)(struct ib_device *device,
1576 u8 port_num, int index, 1594 u8 port_num, int index,
1577 union ib_gid *gid); 1595 union ib_gid *gid);
1596 /* When calling add_gid, the HW vendor's driver should
1597 * add the gid of device @device at gid index @index of
1598 * port @port_num to be @gid. Meta-info of that gid (for example,
1599 * the network device related to this gid is available
1600 * at @attr. @context allows the HW vendor driver to store extra
1601 * information together with a GID entry. The HW vendor may allocate
1602 * memory to contain this information and store it in @context when a
1603 * new GID entry is written to. Params are consistent until the next
1604 * call of add_gid or delete_gid. The function should return 0 on
1605 * success or error otherwise. The function could be called
1606 * concurrently for different ports. This function is only called
1607 * when roce_gid_table is used.
1608 */
1609 int (*add_gid)(struct ib_device *device,
1610 u8 port_num,
1611 unsigned int index,
1612 const union ib_gid *gid,
1613 const struct ib_gid_attr *attr,
1614 void **context);
1615 /* When calling del_gid, the HW vendor's driver should delete the
1616 * gid of device @device at gid index @index of port @port_num.
1617 * Upon the deletion of a GID entry, the HW vendor must free any
1618 * allocated memory. The caller will clear @context afterwards.
1619 * This function is only called when roce_gid_table is used.
1620 */
1621 int (*del_gid)(struct ib_device *device,
1622 u8 port_num,
1623 unsigned int index,
1624 void **context);
1578 int (*query_pkey)(struct ib_device *device, 1625 int (*query_pkey)(struct ib_device *device,
1579 u8 port_num, u16 index, u16 *pkey); 1626 u8 port_num, u16 index, u16 *pkey);
1580 int (*modify_device)(struct ib_device *device, 1627 int (*modify_device)(struct ib_device *device,
@@ -1668,11 +1715,9 @@ struct ib_device {
1668 int (*query_mr)(struct ib_mr *mr, 1715 int (*query_mr)(struct ib_mr *mr,
1669 struct ib_mr_attr *mr_attr); 1716 struct ib_mr_attr *mr_attr);
1670 int (*dereg_mr)(struct ib_mr *mr); 1717 int (*dereg_mr)(struct ib_mr *mr);
1671 int (*destroy_mr)(struct ib_mr *mr); 1718 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
1672 struct ib_mr * (*create_mr)(struct ib_pd *pd, 1719 enum ib_mr_type mr_type,
1673 struct ib_mr_init_attr *mr_init_attr); 1720 u32 max_num_sg);
1674 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd,
1675 int max_page_list_len);
1676 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, 1721 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
1677 int page_list_len); 1722 int page_list_len);
1678 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list); 1723 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
@@ -1724,6 +1769,7 @@ struct ib_device {
1724 int (*destroy_flow)(struct ib_flow *flow_id); 1769 int (*destroy_flow)(struct ib_flow *flow_id);
1725 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 1770 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
1726 struct ib_mr_status *mr_status); 1771 struct ib_mr_status *mr_status);
1772 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
1727 1773
1728 struct ib_dma_mapping_ops *dma_ops; 1774 struct ib_dma_mapping_ops *dma_ops;
1729 1775
@@ -1761,8 +1807,30 @@ struct ib_device {
1761struct ib_client { 1807struct ib_client {
1762 char *name; 1808 char *name;
1763 void (*add) (struct ib_device *); 1809 void (*add) (struct ib_device *);
1764 void (*remove)(struct ib_device *); 1810 void (*remove)(struct ib_device *, void *client_data);
1765 1811
1812 /* Returns the net_dev belonging to this ib_client and matching the
1813 * given parameters.
1814 * @dev: An RDMA device that the net_dev use for communication.
1815 * @port: A physical port number on the RDMA device.
1816 * @pkey: P_Key that the net_dev uses if applicable.
1817 * @gid: A GID that the net_dev uses to communicate.
1818 * @addr: An IP address the net_dev is configured with.
1819 * @client_data: The device's client data set by ib_set_client_data().
1820 *
1821 * An ib_client that implements a net_dev on top of RDMA devices
1822 * (such as IP over IB) should implement this callback, allowing the
1823 * rdma_cm module to find the right net_dev for a given request.
1824 *
1825 * The caller is responsible for calling dev_put on the returned
1826 * netdev. */
1827 struct net_device *(*get_net_dev_by_params)(
1828 struct ib_device *dev,
1829 u8 port,
1830 u16 pkey,
1831 const union ib_gid *gid,
1832 const struct sockaddr *addr,
1833 void *client_data);
1766 struct list_head list; 1834 struct list_head list;
1767}; 1835};
1768 1836
@@ -2071,34 +2139,6 @@ static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2071} 2139}
2072 2140
2073/** 2141/**
2074 * rdma_cap_read_multi_sge - Check if the port of device has the capability
2075 * RDMA Read Multiple Scatter-Gather Entries.
2076 * @device: Device to check
2077 * @port_num: Port number to check
2078 *
2079 * iWARP has a restriction that RDMA READ requests may only have a single
2080 * Scatter/Gather Entry (SGE) in the work request.
2081 *
2082 * NOTE: although the linux kernel currently assumes all devices are either
2083 * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and
2084 * WRITEs, according to Tom Talpey, this is not accurate. There are some
2085 * devices out there that support more than a single SGE on RDMA READ
2086 * requests, but do not support the same number of SGEs as they do on
2087 * RDMA WRITE requests. The linux kernel would need rearchitecting to
2088 * support these imbalanced READ/WRITE SGEs allowed devices. So, for now,
2089 * suffice with either the device supports the same READ/WRITE SGEs, or
2090 * it only gets one READ sge.
2091 *
2092 * Return: true for any device that allows more than one SGE in RDMA READ
2093 * requests.
2094 */
2095static inline bool rdma_cap_read_multi_sge(struct ib_device *device,
2096 u8 port_num)
2097{
2098 return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP);
2099}
2100
2101/**
2102 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 2142 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2103 * 2143 *
2104 * @device: Device 2144 * @device: Device
@@ -2115,6 +2155,26 @@ static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_n
2115 return device->port_immutable[port_num].max_mad_size; 2155 return device->port_immutable[port_num].max_mad_size;
2116} 2156}
2117 2157
2158/**
2159 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2160 * @device: Device to check
2161 * @port_num: Port number to check
2162 *
2163 * RoCE GID table mechanism manages the various GIDs for a device.
2164 *
2165 * NOTE: if allocating the port's GID table has failed, this call will still
2166 * return true, but any RoCE GID table API will fail.
2167 *
2168 * Return: true if the port uses RoCE GID table mechanism in order to manage
2169 * its GIDs.
2170 */
2171static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2172 u8 port_num)
2173{
2174 return rdma_protocol_roce(device, port_num) &&
2175 device->add_gid && device->del_gid;
2176}
2177
2118int ib_query_gid(struct ib_device *device, 2178int ib_query_gid(struct ib_device *device,
2119 u8 port_num, int index, union ib_gid *gid); 2179 u8 port_num, int index, union ib_gid *gid);
2120 2180
@@ -2135,20 +2195,9 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2135int ib_find_pkey(struct ib_device *device, 2195int ib_find_pkey(struct ib_device *device,
2136 u8 port_num, u16 pkey, u16 *index); 2196 u8 port_num, u16 pkey, u16 *index);
2137 2197
2138/**
2139 * ib_alloc_pd - Allocates an unused protection domain.
2140 * @device: The device on which to allocate the protection domain.
2141 *
2142 * A protection domain object provides an association between QPs, shared
2143 * receive queues, address handles, memory regions, and memory windows.
2144 */
2145struct ib_pd *ib_alloc_pd(struct ib_device *device); 2198struct ib_pd *ib_alloc_pd(struct ib_device *device);
2146 2199
2147/** 2200void ib_dealloc_pd(struct ib_pd *pd);
2148 * ib_dealloc_pd - Deallocates a protection domain.
2149 * @pd: The protection domain to deallocate.
2150 */
2151int ib_dealloc_pd(struct ib_pd *pd);
2152 2201
2153/** 2202/**
2154 * ib_create_ah - Creates an address handle for the given address vector. 2203 * ib_create_ah - Creates an address handle for the given address vector.
@@ -2760,52 +2809,6 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
2760} 2809}
2761 2810
2762/** 2811/**
2763 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
2764 * by an HCA.
2765 * @pd: The protection domain associated assigned to the registered region.
2766 * @phys_buf_array: Specifies a list of physical buffers to use in the
2767 * memory region.
2768 * @num_phys_buf: Specifies the size of the phys_buf_array.
2769 * @mr_access_flags: Specifies the memory access rights.
2770 * @iova_start: The offset of the region's starting I/O virtual address.
2771 */
2772struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
2773 struct ib_phys_buf *phys_buf_array,
2774 int num_phys_buf,
2775 int mr_access_flags,
2776 u64 *iova_start);
2777
2778/**
2779 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
2780 * Conceptually, this call performs the functions deregister memory region
2781 * followed by register physical memory region. Where possible,
2782 * resources are reused instead of deallocated and reallocated.
2783 * @mr: The memory region to modify.
2784 * @mr_rereg_mask: A bit-mask used to indicate which of the following
2785 * properties of the memory region are being modified.
2786 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
2787 * the new protection domain to associated with the memory region,
2788 * otherwise, this parameter is ignored.
2789 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2790 * field specifies a list of physical buffers to use in the new
2791 * translation, otherwise, this parameter is ignored.
2792 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
2793 * field specifies the size of the phys_buf_array, otherwise, this
2794 * parameter is ignored.
2795 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
2796 * field specifies the new memory access rights, otherwise, this
2797 * parameter is ignored.
2798 * @iova_start: The offset of the region's starting I/O virtual address.
2799 */
2800int ib_rereg_phys_mr(struct ib_mr *mr,
2801 int mr_rereg_mask,
2802 struct ib_pd *pd,
2803 struct ib_phys_buf *phys_buf_array,
2804 int num_phys_buf,
2805 int mr_access_flags,
2806 u64 *iova_start);
2807
2808/**
2809 * ib_query_mr - Retrieves information about a specific memory region. 2812 * ib_query_mr - Retrieves information about a specific memory region.
2810 * @mr: The memory region to retrieve information about. 2813 * @mr: The memory region to retrieve information about.
2811 * @mr_attr: The attributes of the specified memory region. 2814 * @mr_attr: The attributes of the specified memory region.
@@ -2821,33 +2824,9 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
2821 */ 2824 */
2822int ib_dereg_mr(struct ib_mr *mr); 2825int ib_dereg_mr(struct ib_mr *mr);
2823 2826
2824 2827struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
2825/** 2828 enum ib_mr_type mr_type,
2826 * ib_create_mr - Allocates a memory region that may be used for 2829 u32 max_num_sg);
2827 * signature handover operations.
2828 * @pd: The protection domain associated with the region.
2829 * @mr_init_attr: memory region init attributes.
2830 */
2831struct ib_mr *ib_create_mr(struct ib_pd *pd,
2832 struct ib_mr_init_attr *mr_init_attr);
2833
2834/**
2835 * ib_destroy_mr - Destroys a memory region that was created using
2836 * ib_create_mr and removes it from HW translation tables.
2837 * @mr: The memory region to destroy.
2838 *
2839 * This function can fail, if the memory region has memory windows bound to it.
2840 */
2841int ib_destroy_mr(struct ib_mr *mr);
2842
2843/**
2844 * ib_alloc_fast_reg_mr - Allocates memory region usable with the
2845 * IB_WR_FAST_REG_MR send work request.
2846 * @pd: The protection domain associated with the region.
2847 * @max_page_list_len: requested max physical buffer list length to be
2848 * used with fast register work requests for this MR.
2849 */
2850struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
2851 2830
2852/** 2831/**
2853 * ib_alloc_fast_reg_page_list - Allocates a page list array 2832 * ib_alloc_fast_reg_page_list - Allocates a page list array
@@ -3040,4 +3019,8 @@ static inline int ib_check_mr_access(int flags)
3040int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3019int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3041 struct ib_mr_status *mr_status); 3020 struct ib_mr_status *mr_status);
3042 3021
3022struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3023 u16 pkey, const union ib_gid *gid,
3024 const struct sockaddr *addr);
3025
3043#endif /* IB_VERBS_H */ 3026#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
new file mode 100644
index 000000000000..391dae1931c0
--- /dev/null
+++ b/include/rdma/opa_port_info.h
@@ -0,0 +1,433 @@
1/*
2 * Copyright (c) 2014 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#if !defined(OPA_PORT_INFO_H)
34#define OPA_PORT_INFO_H
35
36/* Temporary until HFI driver is updated */
37#ifndef USE_PI_LED_ENABLE
38#define USE_PI_LED_ENABLE 0
39#endif
40
41#define OPA_PORT_LINK_MODE_NOP 0 /* No change */
42#define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */
43
44#define OPA_PORT_PACKET_FORMAT_NOP 0 /* No change */
45#define OPA_PORT_PACKET_FORMAT_8B 1 /* Format 8B */
46#define OPA_PORT_PACKET_FORMAT_9B 2 /* Format 9B */
47#define OPA_PORT_PACKET_FORMAT_10B 4 /* Format 10B */
48#define OPA_PORT_PACKET_FORMAT_16B 8 /* Format 16B */
49
50#define OPA_PORT_LTP_CRC_MODE_NONE 0 /* No change */
51#define OPA_PORT_LTP_CRC_MODE_14 1 /* 14-bit LTP CRC mode (optional) */
52#define OPA_PORT_LTP_CRC_MODE_16 2 /* 16-bit LTP CRC mode */
53#define OPA_PORT_LTP_CRC_MODE_48 4 /* 48-bit LTP CRC mode (optional) */
54#define OPA_PORT_LTP_CRC_MODE_PER_LANE 8 /* 12/16-bit per lane LTP CRC mode */
55
56/* Link Down / Neighbor Link Down Reason; indicated as follows: */
57#define OPA_LINKDOWN_REASON_NONE 0 /* No specified reason */
58#define OPA_LINKDOWN_REASON_RCV_ERROR_0 1
59#define OPA_LINKDOWN_REASON_BAD_PKT_LEN 2
60#define OPA_LINKDOWN_REASON_PKT_TOO_LONG 3
61#define OPA_LINKDOWN_REASON_PKT_TOO_SHORT 4
62#define OPA_LINKDOWN_REASON_BAD_SLID 5
63#define OPA_LINKDOWN_REASON_BAD_DLID 6
64#define OPA_LINKDOWN_REASON_BAD_L2 7
65#define OPA_LINKDOWN_REASON_BAD_SC 8
66#define OPA_LINKDOWN_REASON_RCV_ERROR_8 9
67#define OPA_LINKDOWN_REASON_BAD_MID_TAIL 10
68#define OPA_LINKDOWN_REASON_RCV_ERROR_10 11
69#define OPA_LINKDOWN_REASON_PREEMPT_ERROR 12
70#define OPA_LINKDOWN_REASON_PREEMPT_VL15 13
71#define OPA_LINKDOWN_REASON_BAD_VL_MARKER 14
72#define OPA_LINKDOWN_REASON_RCV_ERROR_14 15
73#define OPA_LINKDOWN_REASON_RCV_ERROR_15 16
74#define OPA_LINKDOWN_REASON_BAD_HEAD_DIST 17
75#define OPA_LINKDOWN_REASON_BAD_TAIL_DIST 18
76#define OPA_LINKDOWN_REASON_BAD_CTRL_DIST 19
77#define OPA_LINKDOWN_REASON_BAD_CREDIT_ACK 20
78#define OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER 21
79#define OPA_LINKDOWN_REASON_BAD_PREEMPT 22
80#define OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT 23
81#define OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT 24
82#define OPA_LINKDOWN_REASON_RCV_ERROR_24 25
83#define OPA_LINKDOWN_REASON_RCV_ERROR_25 26
84#define OPA_LINKDOWN_REASON_RCV_ERROR_26 27
85#define OPA_LINKDOWN_REASON_RCV_ERROR_27 28
86#define OPA_LINKDOWN_REASON_RCV_ERROR_28 29
87#define OPA_LINKDOWN_REASON_RCV_ERROR_29 30
88#define OPA_LINKDOWN_REASON_RCV_ERROR_30 31
89#define OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN 32
90#define OPA_LINKDOWN_REASON_UNKNOWN 33
91/* 34 -reserved */
92#define OPA_LINKDOWN_REASON_REBOOT 35
93#define OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN 36
94/* 37-38 reserved */
95#define OPA_LINKDOWN_REASON_FM_BOUNCE 39
96#define OPA_LINKDOWN_REASON_SPEED_POLICY 40
97#define OPA_LINKDOWN_REASON_WIDTH_POLICY 41
98/* 42-48 reserved */
99#define OPA_LINKDOWN_REASON_DISCONNECTED 49
100#define OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED 50
101#define OPA_LINKDOWN_REASON_NOT_INSTALLED 51
102#define OPA_LINKDOWN_REASON_CHASSIS_CONFIG 52
103/* 53 reserved */
104#define OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED 54
105/* 55 reserved */
106#define OPA_LINKDOWN_REASON_POWER_POLICY 56
107#define OPA_LINKDOWN_REASON_LINKSPEED_POLICY 57
108#define OPA_LINKDOWN_REASON_LINKWIDTH_POLICY 58
109/* 59 reserved */
110#define OPA_LINKDOWN_REASON_SWITCH_MGMT 60
111#define OPA_LINKDOWN_REASON_SMA_DISABLED 61
112/* 62 reserved */
113#define OPA_LINKDOWN_REASON_TRANSIENT 63
114/* 64-255 reserved */
115
116/* OPA Link Init reason; indicated as follows: */
117/* 3-7; 11-15 reserved; 8-15 cleared on Polling->LinkUp */
118#define OPA_LINKINIT_REASON_NOP 0
119#define OPA_LINKINIT_REASON_LINKUP (1 << 4)
120#define OPA_LINKINIT_REASON_FLAPPING (2 << 4)
121#define OPA_LINKINIT_REASON_CLEAR (8 << 4)
122#define OPA_LINKINIT_OUTSIDE_POLICY (8 << 4)
123#define OPA_LINKINIT_QUARANTINED (9 << 4)
124#define OPA_LINKINIT_INSUFIC_CAPABILITY (10 << 4)
125
126#define OPA_LINK_SPEED_NOP 0x0000 /* Reserved (1-5 Gbps) */
127#define OPA_LINK_SPEED_12_5G 0x0001 /* 12.5 Gbps */
128#define OPA_LINK_SPEED_25G 0x0002 /* 25.78125? Gbps (EDR) */
129
130#define OPA_LINK_WIDTH_1X 0x0001
131#define OPA_LINK_WIDTH_2X 0x0002
132#define OPA_LINK_WIDTH_3X 0x0004
133#define OPA_LINK_WIDTH_4X 0x0008
134
135#define OPA_CAP_MASK3_IsSnoopSupported (1 << 7)
136#define OPA_CAP_MASK3_IsAsyncSC2VLSupported (1 << 6)
137#define OPA_CAP_MASK3_IsAddrRangeConfigSupported (1 << 5)
138#define OPA_CAP_MASK3_IsPassThroughSupported (1 << 4)
139#define OPA_CAP_MASK3_IsSharedSpaceSupported (1 << 3)
140/* reserved (1 << 2) */
141#define OPA_CAP_MASK3_IsVLMarkerSupported (1 << 1)
142#define OPA_CAP_MASK3_IsVLrSupported (1 << 0)
143
144/**
145 * new MTU values
146 */
147enum {
148 OPA_MTU_8192 = 6,
149 OPA_MTU_10240 = 7,
150};
151
152enum {
153 OPA_PORT_PHYS_CONF_DISCONNECTED = 0,
154 OPA_PORT_PHYS_CONF_STANDARD = 1,
155 OPA_PORT_PHYS_CONF_FIXED = 2,
156 OPA_PORT_PHYS_CONF_VARIABLE = 3,
157 OPA_PORT_PHYS_CONF_SI_PHOTO = 4
158};
159
160enum port_info_field_masks {
161 /* vl.cap */
162 OPA_PI_MASK_VL_CAP = 0x1F,
163 /* port_states.ledenable_offlinereason */
164 OPA_PI_MASK_OFFLINE_REASON = 0x0F,
165 OPA_PI_MASK_LED_ENABLE = 0x40,
166 /* port_states.unsleepstate_downdefstate */
167 OPA_PI_MASK_UNSLEEP_STATE = 0xF0,
168 OPA_PI_MASK_DOWNDEF_STATE = 0x0F,
169 /* port_states.portphysstate_portstate */
170 OPA_PI_MASK_PORT_PHYSICAL_STATE = 0xF0,
171 OPA_PI_MASK_PORT_STATE = 0x0F,
172 /* port_phys_conf */
173 OPA_PI_MASK_PORT_PHYSICAL_CONF = 0x0F,
174 /* collectivemask_multicastmask */
175 OPA_PI_MASK_COLLECT_MASK = 0x38,
176 OPA_PI_MASK_MULTICAST_MASK = 0x07,
177 /* mkeyprotect_lmc */
178 OPA_PI_MASK_MKEY_PROT_BIT = 0xC0,
179 OPA_PI_MASK_LMC = 0x0F,
180 /* smsl */
181 OPA_PI_MASK_SMSL = 0x1F,
182 /* partenforce_filterraw */
183 /* Filter Raw In/Out bits 1 and 2 were removed */
184 OPA_PI_MASK_LINKINIT_REASON = 0xF0,
185 OPA_PI_MASK_PARTITION_ENFORCE_IN = 0x08,
186 OPA_PI_MASK_PARTITION_ENFORCE_OUT = 0x04,
187 /* operational_vls */
188 OPA_PI_MASK_OPERATIONAL_VL = 0x1F,
189 /* sa_qp */
190 OPA_PI_MASK_SA_QP = 0x00FFFFFF,
191 /* sm_trap_qp */
192 OPA_PI_MASK_SM_TRAP_QP = 0x00FFFFFF,
193 /* localphy_overrun_errors */
194 OPA_PI_MASK_LOCAL_PHY_ERRORS = 0xF0,
195 OPA_PI_MASK_OVERRUN_ERRORS = 0x0F,
196 /* clientrereg_subnettimeout */
197 OPA_PI_MASK_CLIENT_REREGISTER = 0x80,
198 OPA_PI_MASK_SUBNET_TIMEOUT = 0x1F,
199 /* port_link_mode */
200 OPA_PI_MASK_PORT_LINK_SUPPORTED = (0x001F << 10),
201 OPA_PI_MASK_PORT_LINK_ENABLED = (0x001F << 5),
202 OPA_PI_MASK_PORT_LINK_ACTIVE = (0x001F << 0),
203 /* port_link_crc_mode */
204 OPA_PI_MASK_PORT_LINK_CRC_SUPPORTED = 0x0F00,
205 OPA_PI_MASK_PORT_LINK_CRC_ENABLED = 0x00F0,
206 OPA_PI_MASK_PORT_LINK_CRC_ACTIVE = 0x000F,
207 /* port_mode */
208 OPA_PI_MASK_PORT_MODE_SECURITY_CHECK = 0x0001,
209 OPA_PI_MASK_PORT_MODE_16B_TRAP_QUERY = 0x0002,
210 OPA_PI_MASK_PORT_MODE_PKEY_CONVERT = 0x0004,
211 OPA_PI_MASK_PORT_MODE_SC2SC_MAPPING = 0x0008,
212 OPA_PI_MASK_PORT_MODE_VL_MARKER = 0x0010,
213 OPA_PI_MASK_PORT_PASS_THROUGH = 0x0020,
214 OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE = 0x0040,
215 /* flit_control.interleave */
216 OPA_PI_MASK_INTERLEAVE_DIST_SUP = (0x0003 << 12),
217 OPA_PI_MASK_INTERLEAVE_DIST_ENABLE = (0x0003 << 10),
218 OPA_PI_MASK_INTERLEAVE_MAX_NEST_TX = (0x001F << 5),
219 OPA_PI_MASK_INTERLEAVE_MAX_NEST_RX = (0x001F << 0),
220
221 /* port_error_action */
222 OPA_PI_MASK_EX_BUFFER_OVERRUN = 0x80000000,
223 /* 7 bits reserved */
224 OPA_PI_MASK_FM_CFG_ERR_EXCEED_MULTICAST_LIMIT = 0x00800000,
225 OPA_PI_MASK_FM_CFG_BAD_CONTROL_FLIT = 0x00400000,
226 OPA_PI_MASK_FM_CFG_BAD_PREEMPT = 0x00200000,
227 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER = 0x00100000,
228 OPA_PI_MASK_FM_CFG_BAD_CRDT_ACK = 0x00080000,
229 OPA_PI_MASK_FM_CFG_BAD_CTRL_DIST = 0x00040000,
230 OPA_PI_MASK_FM_CFG_BAD_TAIL_DIST = 0x00020000,
231 OPA_PI_MASK_FM_CFG_BAD_HEAD_DIST = 0x00010000,
232 /* 2 bits reserved */
233 OPA_PI_MASK_PORT_RCV_BAD_VL_MARKER = 0x00002000,
234 OPA_PI_MASK_PORT_RCV_PREEMPT_VL15 = 0x00001000,
235 OPA_PI_MASK_PORT_RCV_PREEMPT_ERROR = 0x00000800,
236 /* 1 bit reserved */
237 OPA_PI_MASK_PORT_RCV_BAD_MidTail = 0x00000200,
238 /* 1 bit reserved */
239 OPA_PI_MASK_PORT_RCV_BAD_SC = 0x00000080,
240 OPA_PI_MASK_PORT_RCV_BAD_L2 = 0x00000040,
241 OPA_PI_MASK_PORT_RCV_BAD_DLID = 0x00000020,
242 OPA_PI_MASK_PORT_RCV_BAD_SLID = 0x00000010,
243 OPA_PI_MASK_PORT_RCV_PKTLEN_TOOSHORT = 0x00000008,
244 OPA_PI_MASK_PORT_RCV_PKTLEN_TOOLONG = 0x00000004,
245 OPA_PI_MASK_PORT_RCV_BAD_PKTLEN = 0x00000002,
246 OPA_PI_MASK_PORT_RCV_BAD_LT = 0x00000001,
247
248 /* pass_through.res_drctl */
249 OPA_PI_MASK_PASS_THROUGH_DR_CONTROL = 0x01,
250
251 /* buffer_units */
252 OPA_PI_MASK_BUF_UNIT_VL15_INIT = (0x00000FFF << 11),
253 OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE = (0x0000001F << 6),
254 OPA_PI_MASK_BUF_UNIT_CREDIT_ACK = (0x00000003 << 3),
255 OPA_PI_MASK_BUF_UNIT_BUF_ALLOC = (0x00000003 << 0),
256
257 /* neigh_mtu.pvlx_to_mtu */
258 OPA_PI_MASK_NEIGH_MTU_PVL0 = 0xF0,
259 OPA_PI_MASK_NEIGH_MTU_PVL1 = 0x0F,
260
261 /* neigh_mtu.vlstall_hoq_life */
262 OPA_PI_MASK_VL_STALL = (0x03 << 5),
263 OPA_PI_MASK_HOQ_LIFE = (0x1F << 0),
264
265 /* port_neigh_mode */
266 OPA_PI_MASK_NEIGH_MGMT_ALLOWED = (0x01 << 3),
267 OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS = (0x01 << 2),
268 OPA_PI_MASK_NEIGH_NODE_TYPE = (0x03 << 0),
269
270 /* resptime_value */
271 OPA_PI_MASK_RESPONSE_TIME_VALUE = 0x1F,
272
273 /* mtucap */
274 OPA_PI_MASK_MTU_CAP = 0x0F,
275};
276
277#if USE_PI_LED_ENABLE
278struct opa_port_states {
279 u8 reserved;
280 u8 ledenable_offlinereason; /* 1 res, 1 bit, 6 bits */
281 u8 reserved2;
282 u8 portphysstate_portstate; /* 4 bits, 4 bits */
283};
284#define PI_LED_ENABLE_SUP 1
285#else
286struct opa_port_states {
287 u8 reserved;
288 u8 offline_reason; /* 2 res, 6 bits */
289 u8 reserved2;
290 u8 portphysstate_portstate; /* 4 bits, 4 bits */
291};
292#define PI_LED_ENABLE_SUP 0
293#endif
294
295struct opa_port_state_info {
296 struct opa_port_states port_states;
297 u16 link_width_downgrade_tx_active;
298 u16 link_width_downgrade_rx_active;
299};
300
301struct opa_port_info {
302 __be32 lid;
303 __be32 flow_control_mask;
304
305 struct {
306 u8 res; /* was inittype */
307 u8 cap; /* 3 res, 5 bits */
308 __be16 high_limit;
309 __be16 preempt_limit;
310 u8 arb_high_cap;
311 u8 arb_low_cap;
312 } vl;
313
314 struct opa_port_states port_states;
315 u8 port_phys_conf; /* 4 res, 4 bits */
316 u8 collectivemask_multicastmask; /* 2 res, 3, 3 */
317 u8 mkeyprotect_lmc; /* 2 bits, 2 res, 4 bits */
318 u8 smsl; /* 3 res, 5 bits */
319
320 u8 partenforce_filterraw; /* bit fields */
321 u8 operational_vls; /* 3 res, 5 bits */
322 __be16 pkey_8b;
323 __be16 pkey_10b;
324 __be16 mkey_violations;
325
326 __be16 pkey_violations;
327 __be16 qkey_violations;
328 __be32 sm_trap_qp; /* 8 bits, 24 bits */
329
330 __be32 sa_qp; /* 8 bits, 24 bits */
331 u8 neigh_port_num;
332 u8 link_down_reason;
333 u8 neigh_link_down_reason;
334 u8 clientrereg_subnettimeout; /* 1 bit, 2 bits, 5 */
335
336 struct {
337 __be16 supported;
338 __be16 enabled;
339 __be16 active;
340 } link_speed;
341 struct {
342 __be16 supported;
343 __be16 enabled;
344 __be16 active;
345 } link_width;
346 struct {
347 __be16 supported;
348 __be16 enabled;
349 __be16 tx_active;
350 __be16 rx_active;
351 } link_width_downgrade;
352 __be16 port_link_mode; /* 1 res, 5 bits, 5 bits, 5 bits */
353 __be16 port_ltp_crc_mode; /* 4 res, 4 bits, 4 bits, 4 bits */
354
355 __be16 port_mode; /* 9 res, bit fields */
356 struct {
357 __be16 supported;
358 __be16 enabled;
359 } port_packet_format;
360 struct {
361 __be16 interleave; /* 2 res, 2,2,5,5 */
362 struct {
363 __be16 min_initial;
364 __be16 min_tail;
365 u8 large_pkt_limit;
366 u8 small_pkt_limit;
367 u8 max_small_pkt_limit;
368 u8 preemption_limit;
369 } preemption;
370 } flit_control;
371
372 __be32 reserved4;
373 __be32 port_error_action; /* bit field */
374
375 struct {
376 u8 egress_port;
377 u8 res_drctl; /* 7 res, 1 */
378 } pass_through;
379 __be16 mkey_lease_period;
380 __be32 buffer_units; /* 9 res, 12, 5, 3, 3 */
381
382 __be32 reserved5;
383 __be32 sm_lid;
384
385 __be64 mkey;
386
387 __be64 subnet_prefix;
388
389 struct {
390 u8 pvlx_to_mtu[OPA_MAX_VLS/2]; /* 4 bits, 4 bits */
391 } neigh_mtu;
392
393 struct {
394 u8 vlstall_hoqlife; /* 3 bits, 5 bits */
395 } xmit_q[OPA_MAX_VLS];
396
397 struct {
398 u8 addr[16];
399 } ipaddr_ipv6;
400
401 struct {
402 u8 addr[4];
403 } ipaddr_ipv4;
404
405 u32 reserved6;
406 u32 reserved7;
407 u32 reserved8;
408
409 __be64 neigh_node_guid;
410
411 __be32 ib_cap_mask;
412 __be16 reserved9; /* was ib_cap_mask2 */
413 __be16 opa_cap_mask;
414
415 __be32 reserved10; /* was link_roundtrip_latency */
416 __be16 overall_buffer_space;
417 __be16 reserved11; /* was max_credit_hint */
418
419 __be16 diag_code;
420 struct {
421 u8 buffer;
422 u8 wire;
423 } replay_depth;
424 u8 port_neigh_mode;
425 u8 mtucap; /* 4 res, 4 bits */
426
427 u8 resptimevalue; /* 3 res, 5 bits */
428 u8 local_port_num;
429 u8 reserved12;
430 u8 reserved13; /* was guid_cap */
431} __attribute__ ((packed));
432
433#endif /* OPA_PORT_INFO_H */
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
index 29063e84c253..4a529ef47995 100644
--- a/include/rdma/opa_smi.h
+++ b/include/rdma/opa_smi.h
@@ -40,6 +40,10 @@
40#define OPA_SMP_DR_DATA_SIZE 1872 40#define OPA_SMP_DR_DATA_SIZE 1872
41#define OPA_SMP_MAX_PATH_HOPS 64 41#define OPA_SMP_MAX_PATH_HOPS 64
42 42
43#define OPA_MAX_VLS 32
44#define OPA_MAX_SLS 32
45#define OPA_MAX_SCS 32
46
43#define OPA_SMI_CLASS_VERSION 0x80 47#define OPA_SMI_CLASS_VERSION 0x80
44 48
45#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF) 49#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF)
@@ -73,6 +77,49 @@ struct opa_smp {
73} __packed; 77} __packed;
74 78
75 79
80/* Subnet management attributes */
81/* ... */
82#define OPA_ATTRIB_ID_NODE_DESCRIPTION cpu_to_be16(0x0010)
83#define OPA_ATTRIB_ID_NODE_INFO cpu_to_be16(0x0011)
84#define OPA_ATTRIB_ID_PORT_INFO cpu_to_be16(0x0015)
85#define OPA_ATTRIB_ID_PARTITION_TABLE cpu_to_be16(0x0016)
86#define OPA_ATTRIB_ID_SL_TO_SC_MAP cpu_to_be16(0x0017)
87#define OPA_ATTRIB_ID_VL_ARBITRATION cpu_to_be16(0x0018)
88#define OPA_ATTRIB_ID_SM_INFO cpu_to_be16(0x0020)
89#define OPA_ATTRIB_ID_CABLE_INFO cpu_to_be16(0x0032)
90#define OPA_ATTRIB_ID_AGGREGATE cpu_to_be16(0x0080)
91#define OPA_ATTRIB_ID_SC_TO_SL_MAP cpu_to_be16(0x0082)
92#define OPA_ATTRIB_ID_SC_TO_VLR_MAP cpu_to_be16(0x0083)
93#define OPA_ATTRIB_ID_SC_TO_VLT_MAP cpu_to_be16(0x0084)
94#define OPA_ATTRIB_ID_SC_TO_VLNT_MAP cpu_to_be16(0x0085)
95/* ... */
96#define OPA_ATTRIB_ID_PORT_STATE_INFO cpu_to_be16(0x0087)
97/* ... */
98#define OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE cpu_to_be16(0x008A)
99/* ... */
100
101struct opa_node_description {
102 u8 data[64];
103} __attribute__ ((packed));
104
105struct opa_node_info {
106 u8 base_version;
107 u8 class_version;
108 u8 node_type;
109 u8 num_ports;
110 __be32 reserved;
111 __be64 system_image_guid;
112 __be64 node_guid;
113 __be64 port_guid;
114 __be16 partition_cap;
115 __be16 device_id;
116 __be32 revision;
117 u8 local_port_num;
118 u8 vendor_id[3]; /* network byte order */
119} __attribute__ ((packed));
120
121#define OPA_PARTITION_TABLE_BLK_SIZE 32
122
76static inline u8 123static inline u8
77opa_get_smp_direction(struct opa_smp *smp) 124opa_get_smp_direction(struct opa_smp *smp)
78{ 125{
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 0790882e0c9b..585266144329 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -77,4 +77,11 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
77int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, 77int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
78 unsigned int group, gfp_t flags); 78 unsigned int group, gfp_t flags);
79 79
80/**
81 * Check if there are any listeners to the netlink group
82 * @group: the netlink group ID
83 * Returns 0 on success or a negative for no listeners.
84 */
85int ibnl_chk_listeners(unsigned int group);
86
80#endif /* _RDMA_NETLINK_H */ 87#endif /* _RDMA_NETLINK_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index ae84b2214d40..50c2a363bc8f 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -57,9 +57,10 @@ enum scsi_device_event {
57 SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */ 57 SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */
58 SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */ 58 SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */
59 SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */ 59 SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */
60 SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */
60 61
61 SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE, 62 SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE,
62 SDEV_EVT_LAST = SDEV_EVT_LUN_CHANGE_REPORTED, 63 SDEV_EVT_LAST = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED,
63 64
64 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1 65 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
65}; 66};
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 2555ee5343fd..6183d20a01fb 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -241,6 +241,7 @@ struct iscsi_cls_session {
241 241
242 /* recovery fields */ 242 /* recovery fields */
243 int recovery_tmo; 243 int recovery_tmo;
244 bool recovery_tmo_sysfs_override;
244 struct delayed_work recovery_work; 245 struct delayed_work recovery_work;
245 246
246 unsigned int target_id; 247 unsigned int target_id;
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index b019e3465f11..961b821b6a46 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -22,6 +22,7 @@
22#define TEGRA114 0x35 22#define TEGRA114 0x35
23#define TEGRA124 0x40 23#define TEGRA124 0x40
24#define TEGRA132 0x13 24#define TEGRA132 0x13
25#define TEGRA210 0x21
25 26
26#define TEGRA_FUSE_SKU_CALIB_0 0xf0 27#define TEGRA_FUSE_SKU_CALIB_0 0xf0
27#define TEGRA30_FUSE_SATA_CALIB 0x124 28#define TEGRA30_FUSE_SATA_CALIB 0x124
@@ -47,10 +48,11 @@ struct tegra_sku_info {
47 int cpu_speedo_id; 48 int cpu_speedo_id;
48 int cpu_speedo_value; 49 int cpu_speedo_value;
49 int cpu_iddq_value; 50 int cpu_iddq_value;
50 int core_process_id; 51 int soc_process_id;
51 int soc_speedo_id; 52 int soc_speedo_id;
52 int gpu_speedo_id; 53 int soc_speedo_value;
53 int gpu_process_id; 54 int gpu_process_id;
55 int gpu_speedo_id;
54 int gpu_speedo_value; 56 int gpu_speedo_value;
55 enum tegra_revision revision; 57 enum tegra_revision revision;
56}; 58};
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 1ab2813273cd..44202ff897fd 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -51,11 +51,6 @@ struct tegra_smmu_swgroup {
51 unsigned int reg; 51 unsigned int reg;
52}; 52};
53 53
54struct tegra_smmu_ops {
55 void (*flush_dcache)(struct page *page, unsigned long offset,
56 size_t size);
57};
58
59struct tegra_smmu_soc { 54struct tegra_smmu_soc {
60 const struct tegra_mc_client *clients; 55 const struct tegra_mc_client *clients;
61 unsigned int num_clients; 56 unsigned int num_clients;
@@ -66,9 +61,8 @@ struct tegra_smmu_soc {
66 bool supports_round_robin_arbitration; 61 bool supports_round_robin_arbitration;
67 bool supports_request_limit; 62 bool supports_request_limit;
68 63
64 unsigned int num_tlb_lines;
69 unsigned int num_asids; 65 unsigned int num_asids;
70
71 const struct tegra_smmu_ops *ops;
72}; 66};
73 67
74struct tegra_mc; 68struct tegra_mc;
@@ -102,6 +96,8 @@ struct tegra_mc_soc {
102 unsigned int num_address_bits; 96 unsigned int num_address_bits;
103 unsigned int atom_size; 97 unsigned int atom_size;
104 98
99 u8 client_id_mask;
100
105 const struct tegra_smmu_soc *smmu; 101 const struct tegra_smmu_soc *smmu;
106}; 102};
107 103
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index f5c0de43a5fa..d18efe402ff1 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -67,6 +67,11 @@ int tegra_pmc_cpu_remove_clamping(int cpuid);
67#define TEGRA_POWERGATE_XUSBC 22 67#define TEGRA_POWERGATE_XUSBC 22
68#define TEGRA_POWERGATE_VIC 23 68#define TEGRA_POWERGATE_VIC 23
69#define TEGRA_POWERGATE_IRAM 24 69#define TEGRA_POWERGATE_IRAM 24
70#define TEGRA_POWERGATE_NVDEC 25
71#define TEGRA_POWERGATE_NVJPG 26
72#define TEGRA_POWERGATE_AUD 27
73#define TEGRA_POWERGATE_DFD 28
74#define TEGRA_POWERGATE_VE2 29
70 75
71#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D 76#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
72 77
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 0e9d75b49bed..74bc85473b58 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -584,6 +584,8 @@ static inline int snd_ac97_update_power(struct snd_ac97 *ac97, int reg,
584void snd_ac97_suspend(struct snd_ac97 *ac97); 584void snd_ac97_suspend(struct snd_ac97 *ac97);
585void snd_ac97_resume(struct snd_ac97 *ac97); 585void snd_ac97_resume(struct snd_ac97 *ac97);
586#endif 586#endif
587int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
588 unsigned int id_mask);
587 589
588/* quirk types */ 590/* quirk types */
589enum { 591enum {
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index adb5ba5cbd9d..930b41e5acf4 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -4,14 +4,17 @@
4#ifndef __SOUND_HDA_I915_H 4#ifndef __SOUND_HDA_I915_H
5#define __SOUND_HDA_I915_H 5#define __SOUND_HDA_I915_H
6 6
7#include <drm/i915_component.h>
8
7#ifdef CONFIG_SND_HDA_I915 9#ifdef CONFIG_SND_HDA_I915
8int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); 10int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
9int snd_hdac_display_power(struct hdac_bus *bus, bool enable); 11int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
10int snd_hdac_get_display_clk(struct hdac_bus *bus); 12int snd_hdac_get_display_clk(struct hdac_bus *bus);
11int snd_hdac_i915_init(struct hdac_bus *bus); 13int snd_hdac_i915_init(struct hdac_bus *bus);
12int snd_hdac_i915_exit(struct hdac_bus *bus); 14int snd_hdac_i915_exit(struct hdac_bus *bus);
15int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *);
13#else 16#else
14static int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable) 17static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
15{ 18{
16 return 0; 19 return 0;
17} 20}
@@ -31,6 +34,10 @@ static inline int snd_hdac_i915_exit(struct hdac_bus *bus)
31{ 34{
32 return 0; 35 return 0;
33} 36}
37static inline int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *ops)
38{
39 return -ENODEV;
40}
34#endif 41#endif
35 42
36#endif /* __SOUND_HDA_I915_H */ 43#endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index ae995e523ff8..2ae8812d7b1a 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -160,6 +160,10 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
160#define AZX_SPB_BASE 0x08 160#define AZX_SPB_BASE 0x08
161/* Interval used to calculate the iterating register offset */ 161/* Interval used to calculate the iterating register offset */
162#define AZX_SPB_INTERVAL 0x08 162#define AZX_SPB_INTERVAL 0x08
163/* SPIB base */
164#define AZX_SPB_SPIB 0x00
165/* SPIB MAXFIFO base*/
166#define AZX_SPB_MAXFIFO 0x04
163 167
164/* registers of Global Time Synchronization Capability Structure */ 168/* registers of Global Time Synchronization Capability Structure */
165#define AZX_GTS_CAP_ID 0x1 169#define AZX_GTS_CAP_ID 0x1
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 4caf1fde8a4f..49bc836fcd84 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -119,6 +119,7 @@ int snd_hdac_device_register(struct hdac_device *codec);
119void snd_hdac_device_unregister(struct hdac_device *codec); 119void snd_hdac_device_unregister(struct hdac_device *codec);
120 120
121int snd_hdac_refresh_widgets(struct hdac_device *codec); 121int snd_hdac_refresh_widgets(struct hdac_device *codec);
122int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec);
122 123
123unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid, 124unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
124 unsigned int verb, unsigned int parm); 125 unsigned int verb, unsigned int parm);
@@ -164,15 +165,15 @@ static inline int snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid,
164} 165}
165 166
166#ifdef CONFIG_PM 167#ifdef CONFIG_PM
167void snd_hdac_power_up(struct hdac_device *codec); 168int snd_hdac_power_up(struct hdac_device *codec);
168void snd_hdac_power_down(struct hdac_device *codec); 169int snd_hdac_power_down(struct hdac_device *codec);
169void snd_hdac_power_up_pm(struct hdac_device *codec); 170int snd_hdac_power_up_pm(struct hdac_device *codec);
170void snd_hdac_power_down_pm(struct hdac_device *codec); 171int snd_hdac_power_down_pm(struct hdac_device *codec);
171#else 172#else
172static inline void snd_hdac_power_up(struct hdac_device *codec) {} 173static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
173static inline void snd_hdac_power_down(struct hdac_device *codec) {} 174static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
174static inline void snd_hdac_power_up_pm(struct hdac_device *codec) {} 175static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
175static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {} 176static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
176#endif 177#endif
177 178
178/* 179/*
@@ -437,6 +438,8 @@ void snd_hdac_stream_init(struct hdac_bus *bus, struct hdac_stream *azx_dev,
437struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus, 438struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
438 struct snd_pcm_substream *substream); 439 struct snd_pcm_substream *substream);
439void snd_hdac_stream_release(struct hdac_stream *azx_dev); 440void snd_hdac_stream_release(struct hdac_stream *azx_dev);
441struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
442 int dir, int stream_tag);
440 443
441int snd_hdac_stream_setup(struct hdac_stream *azx_dev); 444int snd_hdac_stream_setup(struct hdac_stream *azx_dev);
442void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev); 445void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 0f89df1511dc..94210dcdb6ea 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -34,6 +34,7 @@ int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
34void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus); 34void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
35int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr); 35int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
36void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev); 36void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
37void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
37 38
38#define ebus_to_hbus(ebus) (&(ebus)->bus) 39#define ebus_to_hbus(ebus) (&(ebus)->bus)
39#define hbus_to_ebus(_bus) \ 40#define hbus_to_ebus(_bus) \
@@ -62,6 +63,8 @@ enum hdac_ext_stream_type {
62 * @hstream: hdac_stream 63 * @hstream: hdac_stream
63 * @pphc_addr: processing pipe host stream pointer 64 * @pphc_addr: processing pipe host stream pointer
64 * @pplc_addr: processing pipe link stream pointer 65 * @pplc_addr: processing pipe link stream pointer
66 * @spib_addr: software position in buffers stream pointer
67 * @fifo_addr: software position Max fifos stream pointer
65 * @decoupled: stream host and link is decoupled 68 * @decoupled: stream host and link is decoupled
66 * @link_locked: link is locked 69 * @link_locked: link is locked
67 * @link_prepared: link is prepared 70 * @link_prepared: link is prepared
@@ -73,6 +76,9 @@ struct hdac_ext_stream {
73 void __iomem *pphc_addr; 76 void __iomem *pphc_addr;
74 void __iomem *pplc_addr; 77 void __iomem *pplc_addr;
75 78
79 void __iomem *spib_addr;
80 void __iomem *fifo_addr;
81
76 bool decoupled:1; 82 bool decoupled:1;
77 bool link_locked:1; 83 bool link_locked:1;
78 bool link_prepared; 84 bool link_prepared;
@@ -99,6 +105,11 @@ void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *bus,
99 struct hdac_ext_stream *azx_dev, bool decouple); 105 struct hdac_ext_stream *azx_dev, bool decouple);
100void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus); 106void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
101 107
108int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
109 struct hdac_ext_stream *stream, u32 value);
110int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
111 struct hdac_ext_stream *stream);
112
102void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream); 113void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
103void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream); 114void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
104void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream); 115void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
@@ -115,6 +126,7 @@ struct hdac_ext_link {
115 126
116int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link); 127int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
117int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link); 128int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
129int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus);
118void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link, 130void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
119 int stream); 131 int stream);
120void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link, 132void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
@@ -129,4 +141,63 @@ void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
129 writew(((readw(addr + reg) & ~(mask)) | (val)), \ 141 writew(((readw(addr + reg) & ~(mask)) | (val)), \
130 addr + reg) 142 addr + reg)
131 143
144
145struct hdac_ext_device;
146
147/* ops common to all codec drivers */
148struct hdac_ext_codec_ops {
149 int (*build_controls)(struct hdac_ext_device *dev);
150 int (*init)(struct hdac_ext_device *dev);
151 void (*free)(struct hdac_ext_device *dev);
152};
153
154struct hda_dai_map {
155 char *dai_name;
156 hda_nid_t nid;
157 u32 maxbps;
158};
159
160#define HDA_MAX_NIDS 16
161
162/**
163 * struct hdac_ext_device - HDAC Ext device
164 *
165 * @hdac: hdac core device
166 * @nid_list - the dai map which matches the dai-name with the nid
167 * @map_cur_idx - the idx in use in dai_map
168 * @ops - the hda codec ops common to all codec drivers
169 * @pvt_data - private data, for asoc contains asoc codec object
170 */
171struct hdac_ext_device {
172 struct hdac_device hdac;
173 struct hdac_ext_bus *ebus;
174
175 /* soc-dai to nid map */
176 struct hda_dai_map nid_list[HDA_MAX_NIDS];
177 unsigned int map_cur_idx;
178
179 /* codec ops */
180 struct hdac_ext_codec_ops ops;
181
182 void *private_data;
183};
184
185#define to_ehdac_device(dev) (container_of((dev), \
186 struct hdac_ext_device, hdac))
187/*
188 * HD-audio codec base driver
189 */
190struct hdac_ext_driver {
191 struct hdac_driver hdac;
192
193 int (*probe)(struct hdac_ext_device *dev);
194 int (*remove)(struct hdac_ext_device *dev);
195 void (*shutdown)(struct hdac_ext_device *dev);
196};
197
198int snd_hda_ext_driver_register(struct hdac_ext_driver *drv);
199void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv);
200
201#define to_ehdac_driver(_drv) container_of(_drv, struct hdac_ext_driver, hdac)
202
132#endif /* __SOUND_HDAUDIO_EXT_H */ 203#endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index 4cecd0c175f6..bb7b2ebfee7b 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -61,6 +61,14 @@ struct rsnd_src_platform_info {
61/* 61/*
62 * flags 62 * flags
63 */ 63 */
64struct rsnd_ctu_platform_info {
65 u32 flags;
66};
67
68struct rsnd_mix_platform_info {
69 u32 flags;
70};
71
64struct rsnd_dvc_platform_info { 72struct rsnd_dvc_platform_info {
65 u32 flags; 73 u32 flags;
66}; 74};
@@ -68,6 +76,8 @@ struct rsnd_dvc_platform_info {
68struct rsnd_dai_path_info { 76struct rsnd_dai_path_info {
69 struct rsnd_ssi_platform_info *ssi; 77 struct rsnd_ssi_platform_info *ssi;
70 struct rsnd_src_platform_info *src; 78 struct rsnd_src_platform_info *src;
79 struct rsnd_ctu_platform_info *ctu;
80 struct rsnd_mix_platform_info *mix;
71 struct rsnd_dvc_platform_info *dvc; 81 struct rsnd_dvc_platform_info *dvc;
72}; 82};
73 83
@@ -93,6 +103,10 @@ struct rcar_snd_info {
93 int ssi_info_nr; 103 int ssi_info_nr;
94 struct rsnd_src_platform_info *src_info; 104 struct rsnd_src_platform_info *src_info;
95 int src_info_nr; 105 int src_info_nr;
106 struct rsnd_ctu_platform_info *ctu_info;
107 int ctu_info_nr;
108 struct rsnd_mix_platform_info *mix_info;
109 int mix_info_nr;
96 struct rsnd_dvc_platform_info *dvc_info; 110 struct rsnd_dvc_platform_info *dvc_info;
97 int dvc_info_nr; 111 int dvc_info_nr;
98 struct rsnd_dai_platform_info *dai_info; 112 struct rsnd_dai_platform_info *dai_info;
diff --git a/include/sound/rt298.h b/include/sound/rt298.h
new file mode 100644
index 000000000000..7fffeaa84f64
--- /dev/null
+++ b/include/sound/rt298.h
@@ -0,0 +1,20 @@
1/*
2 * linux/sound/rt286.h -- Platform data for RT286
3 *
4 * Copyright 2013 Realtek Microelectronics
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __LINUX_SND_RT298_H
12#define __LINUX_SND_RT298_H
13
14struct rt298_platform_data {
15 bool cbj_en; /*combo jack enable*/
16 bool gpio2_en; /*GPIO2 enable*/
17 bool suspend_power_off; /* power is off during suspend */
18};
19
20#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 37d95a898275..5abba037d245 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -397,6 +397,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
397 const struct snd_soc_dapm_route *route, int num); 397 const struct snd_soc_dapm_route *route, int num);
398int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, 398int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
399 const struct snd_soc_dapm_route *route, int num); 399 const struct snd_soc_dapm_route *route, int num);
400void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
400 401
401/* dapm events */ 402/* dapm events */
402void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, 403void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
@@ -511,9 +512,18 @@ struct snd_soc_dapm_route {
511struct snd_soc_dapm_path { 512struct snd_soc_dapm_path {
512 const char *name; 513 const char *name;
513 514
514 /* source (input) and sink (output) widgets */ 515 /*
515 struct snd_soc_dapm_widget *source; 516 * source (input) and sink (output) widgets
516 struct snd_soc_dapm_widget *sink; 517 * The union is for convience, since it is a lot nicer to type
518 * p->source, rather than p->node[SND_SOC_DAPM_DIR_IN]
519 */
520 union {
521 struct {
522 struct snd_soc_dapm_widget *source;
523 struct snd_soc_dapm_widget *sink;
524 };
525 struct snd_soc_dapm_widget *node[2];
526 };
517 527
518 /* status */ 528 /* status */
519 u32 connect:1; /* source and sink widgets are connected */ 529 u32 connect:1; /* source and sink widgets are connected */
@@ -524,8 +534,7 @@ struct snd_soc_dapm_path {
524 int (*connected)(struct snd_soc_dapm_widget *source, 534 int (*connected)(struct snd_soc_dapm_widget *source,
525 struct snd_soc_dapm_widget *sink); 535 struct snd_soc_dapm_widget *sink);
526 536
527 struct list_head list_source; 537 struct list_head list_node[2];
528 struct list_head list_sink;
529 struct list_head list_kcontrol; 538 struct list_head list_kcontrol;
530 struct list_head list; 539 struct list_head list;
531}; 540};
@@ -559,8 +568,7 @@ struct snd_soc_dapm_widget {
559 unsigned char new_power:1; /* power from this run */ 568 unsigned char new_power:1; /* power from this run */
560 unsigned char power_checked:1; /* power checked this run */ 569 unsigned char power_checked:1; /* power checked this run */
561 unsigned char is_supply:1; /* Widget is a supply type widget */ 570 unsigned char is_supply:1; /* Widget is a supply type widget */
562 unsigned char is_sink:1; /* Widget is a sink type widget */ 571 unsigned char is_ep:2; /* Widget is a endpoint type widget */
563 unsigned char is_source:1; /* Widget is a source type widget */
564 int subseq; /* sort within widget type */ 572 int subseq; /* sort within widget type */
565 573
566 int (*power_check)(struct snd_soc_dapm_widget *w); 574 int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -575,16 +583,14 @@ struct snd_soc_dapm_widget {
575 struct snd_kcontrol **kcontrols; 583 struct snd_kcontrol **kcontrols;
576 struct snd_soc_dobj dobj; 584 struct snd_soc_dobj dobj;
577 585
578 /* widget input and outputs */ 586 /* widget input and output edges */
579 struct list_head sources; 587 struct list_head edges[2];
580 struct list_head sinks;
581 588
582 /* used during DAPM updates */ 589 /* used during DAPM updates */
583 struct list_head work_list; 590 struct list_head work_list;
584 struct list_head power_list; 591 struct list_head power_list;
585 struct list_head dirty; 592 struct list_head dirty;
586 int inputs; 593 int endpoints[2];
587 int outputs;
588 594
589 struct clk *clk; 595 struct clk *clk;
590}; 596};
@@ -672,4 +678,58 @@ static inline enum snd_soc_bias_level snd_soc_dapm_get_bias_level(
672 return dapm->bias_level; 678 return dapm->bias_level;
673} 679}
674 680
681enum snd_soc_dapm_direction {
682 SND_SOC_DAPM_DIR_IN,
683 SND_SOC_DAPM_DIR_OUT
684};
685
686#define SND_SOC_DAPM_DIR_TO_EP(x) BIT(x)
687
688#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
689#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
690
691/**
692 * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths in the
693 * specified direction of a widget
694 * @w: The widget
695 * @dir: Whether to iterate over the paths where the specified widget is the
696 * incoming or outgoing widgets
697 * @p: The path iterator variable
698 */
699#define snd_soc_dapm_widget_for_each_path(w, dir, p) \
700 list_for_each_entry(p, &w->edges[dir], list_node[dir])
701
702/**
703 * snd_soc_dapm_widget_for_each_sink_path_safe - Iterates over all paths in the
704 * specified direction of a widget
705 * @w: The widget
706 * @dir: Whether to iterate over the paths where the specified widget is the
707 * incoming or outgoing widgets
708 * @p: The path iterator variable
709 * @next_p: Temporary storage for the next path
710 *
711 * This function works like snd_soc_dapm_widget_for_each_sink_path, expect that
712 * it is safe to remove the current path from the list while iterating
713 */
714#define snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p) \
715 list_for_each_entry_safe(p, next_p, &w->edges[dir], list_node[dir])
716
717/**
718 * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths leaving a
719 * widget
720 * @w: The widget
721 * @p: The path iterator variable
722 */
723#define snd_soc_dapm_widget_for_each_sink_path(w, p) \
724 snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_IN, p)
725
726/**
727 * snd_soc_dapm_widget_for_each_source_path - Iterates over all paths leading to
728 * a widget
729 * @w: The widget
730 * @p: The path iterator variable
731 */
732#define snd_soc_dapm_widget_for_each_source_path(w, p) \
733 snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_OUT, p)
734
675#endif 735#endif
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 427bc41df3ae..086cd7ff6ddc 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -89,6 +89,13 @@ struct snd_soc_tplg_kcontrol_ops {
89 struct snd_ctl_elem_info *uinfo); 89 struct snd_ctl_elem_info *uinfo);
90}; 90};
91 91
92/* Bytes ext operations, for TLV byte controls */
93struct snd_soc_tplg_bytes_ext_ops {
94 u32 id;
95 int (*get)(unsigned int __user *bytes, unsigned int size);
96 int (*put)(const unsigned int __user *bytes, unsigned int size);
97};
98
92/* 99/*
93 * DAPM widget event handlers - used to map handlers onto widgets. 100 * DAPM widget event handlers - used to map handlers onto widgets.
94 */ 101 */
@@ -136,9 +143,13 @@ struct snd_soc_tplg_ops {
136 int (*manifest)(struct snd_soc_component *, 143 int (*manifest)(struct snd_soc_component *,
137 struct snd_soc_tplg_manifest *); 144 struct snd_soc_tplg_manifest *);
138 145
139 /* bespoke kcontrol handlers available for binding */ 146 /* vendor specific kcontrol handlers available for binding */
140 const struct snd_soc_tplg_kcontrol_ops *io_ops; 147 const struct snd_soc_tplg_kcontrol_ops *io_ops;
141 int io_ops_count; 148 int io_ops_count;
149
150 /* vendor specific bytes ext handlers available for binding */
151 const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops;
152 int bytes_ext_ops_count;
142}; 153};
143 154
144#ifdef CONFIG_SND_SOC_TOPOLOGY 155#ifdef CONFIG_SND_SOC_TOPOLOGY
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 93df8bf9d54a..884e728b09d9 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -526,7 +526,8 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
526 526
527#ifdef CONFIG_SND_SOC_AC97_BUS 527#ifdef CONFIG_SND_SOC_AC97_BUS
528struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec); 528struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
529struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec); 529struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
530 unsigned int id, unsigned int id_mask);
530void snd_soc_free_ac97_codec(struct snd_ac97 *ac97); 531void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
531 532
532int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops); 533int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
@@ -619,6 +620,7 @@ int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
619 * @pin: name of the pin to update 620 * @pin: name of the pin to update
620 * @mask: bits to check for in reported jack status 621 * @mask: bits to check for in reported jack status
621 * @invert: if non-zero then pin is enabled when status is not reported 622 * @invert: if non-zero then pin is enabled when status is not reported
623 * @list: internal list entry
622 */ 624 */
623struct snd_soc_jack_pin { 625struct snd_soc_jack_pin {
624 struct list_head list; 626 struct list_head list;
@@ -635,7 +637,7 @@ struct snd_soc_jack_pin {
635 * @jack_type: type of jack that is expected for this voltage 637 * @jack_type: type of jack that is expected for this voltage
636 * @debounce_time: debounce_time for jack, codec driver should wait for this 638 * @debounce_time: debounce_time for jack, codec driver should wait for this
637 * duration before reading the adc for voltages 639 * duration before reading the adc for voltages
638 * @:list: list container 640 * @list: internal list entry
639 */ 641 */
640struct snd_soc_jack_zone { 642struct snd_soc_jack_zone {
641 unsigned int min_mv; 643 unsigned int min_mv;
@@ -651,12 +653,12 @@ struct snd_soc_jack_zone {
651 * @gpio: legacy gpio number 653 * @gpio: legacy gpio number
652 * @idx: gpio descriptor index within the function of the GPIO 654 * @idx: gpio descriptor index within the function of the GPIO
653 * consumer device 655 * consumer device
654 * @gpiod_dev GPIO consumer device 656 * @gpiod_dev: GPIO consumer device
655 * @name: gpio name. Also as connection ID for the GPIO consumer 657 * @name: gpio name. Also as connection ID for the GPIO consumer
656 * device function name lookup 658 * device function name lookup
657 * @report: value to report when jack detected 659 * @report: value to report when jack detected
658 * @invert: report presence in low state 660 * @invert: report presence in low state
659 * @debouce_time: debouce time in ms 661 * @debounce_time: debounce time in ms
660 * @wake: enable as wake source 662 * @wake: enable as wake source
661 * @jack_status_check: callback function which overrides the detection 663 * @jack_status_check: callback function which overrides the detection
662 * to provide more complex checks (eg, reading an 664 * to provide more complex checks (eg, reading an
@@ -672,11 +674,13 @@ struct snd_soc_jack_gpio {
672 int debounce_time; 674 int debounce_time;
673 bool wake; 675 bool wake;
674 676
677 /* private: */
675 struct snd_soc_jack *jack; 678 struct snd_soc_jack *jack;
676 struct delayed_work work; 679 struct delayed_work work;
677 struct gpio_desc *desc; 680 struct gpio_desc *desc;
678 681
679 void *data; 682 void *data;
683 /* public: */
680 int (*jack_status_check)(void *data); 684 int (*jack_status_check)(void *data);
681}; 685};
682 686
@@ -758,7 +762,6 @@ struct snd_soc_component {
758 762
759 unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */ 763 unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
760 unsigned int registered_as_component:1; 764 unsigned int registered_as_component:1;
761 unsigned int probed:1;
762 765
763 struct list_head list; 766 struct list_head list;
764 767
@@ -792,7 +795,6 @@ struct snd_soc_component {
792 795
793 /* Don't use these, use snd_soc_component_get_dapm() */ 796 /* Don't use these, use snd_soc_component_get_dapm() */
794 struct snd_soc_dapm_context dapm; 797 struct snd_soc_dapm_context dapm;
795 struct snd_soc_dapm_context *dapm_ptr;
796 798
797 const struct snd_kcontrol_new *controls; 799 const struct snd_kcontrol_new *controls;
798 unsigned int num_controls; 800 unsigned int num_controls;
@@ -832,9 +834,6 @@ struct snd_soc_codec {
832 /* component */ 834 /* component */
833 struct snd_soc_component component; 835 struct snd_soc_component component;
834 836
835 /* Don't access this directly, use snd_soc_codec_get_dapm() */
836 struct snd_soc_dapm_context dapm;
837
838#ifdef CONFIG_DEBUG_FS 837#ifdef CONFIG_DEBUG_FS
839 struct dentry *debugfs_reg; 838 struct dentry *debugfs_reg;
840#endif 839#endif
@@ -1277,7 +1276,7 @@ static inline struct snd_soc_component *snd_soc_dapm_to_component(
1277static inline struct snd_soc_codec *snd_soc_dapm_to_codec( 1276static inline struct snd_soc_codec *snd_soc_dapm_to_codec(
1278 struct snd_soc_dapm_context *dapm) 1277 struct snd_soc_dapm_context *dapm)
1279{ 1278{
1280 return container_of(dapm, struct snd_soc_codec, dapm); 1279 return snd_soc_component_to_codec(snd_soc_dapm_to_component(dapm));
1281} 1280}
1282 1281
1283/** 1282/**
@@ -1302,7 +1301,7 @@ static inline struct snd_soc_platform *snd_soc_dapm_to_platform(
1302static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm( 1301static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
1303 struct snd_soc_component *component) 1302 struct snd_soc_component *component)
1304{ 1303{
1305 return component->dapm_ptr; 1304 return &component->dapm;
1306} 1305}
1307 1306
1308/** 1307/**
@@ -1314,12 +1313,12 @@ static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
1314static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm( 1313static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm(
1315 struct snd_soc_codec *codec) 1314 struct snd_soc_codec *codec)
1316{ 1315{
1317 return &codec->dapm; 1316 return snd_soc_component_get_dapm(&codec->component);
1318} 1317}
1319 1318
1320/** 1319/**
1321 * snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level 1320 * snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level
1322 * @dapm: The CODEC for which to initialize the DAPM bias level 1321 * @codec: The CODEC for which to initialize the DAPM bias level
1323 * @level: The DAPM level to initialize to 1322 * @level: The DAPM level to initialize to
1324 * 1323 *
1325 * Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level(). 1324 * Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level().
@@ -1604,6 +1603,10 @@ int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
1604int snd_soc_of_parse_tdm_slot(struct device_node *np, 1603int snd_soc_of_parse_tdm_slot(struct device_node *np,
1605 unsigned int *slots, 1604 unsigned int *slots,
1606 unsigned int *slot_width); 1605 unsigned int *slot_width);
1606void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
1607 struct snd_soc_codec_conf *codec_conf,
1608 struct device_node *of_node,
1609 const char *propname);
1607int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, 1610int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
1608 const char *propname); 1611 const char *propname);
1609unsigned int snd_soc_of_parse_daifmt(struct device_node *np, 1612unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 88cf39d96d0f..317a1ed2f4ac 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,6 +8,7 @@
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9 9
10#define DAPM_DIRECT "(direct)" 10#define DAPM_DIRECT "(direct)"
11#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
11 12
12struct snd_soc_jack; 13struct snd_soc_jack;
13struct snd_soc_codec; 14struct snd_soc_codec;
@@ -152,62 +153,38 @@ TRACE_EVENT(snd_soc_dapm_walk_done,
152 (int)__entry->path_checks, (int)__entry->neighbour_checks) 153 (int)__entry->path_checks, (int)__entry->neighbour_checks)
153); 154);
154 155
155TRACE_EVENT(snd_soc_dapm_output_path, 156TRACE_EVENT(snd_soc_dapm_path,
156 157
157 TP_PROTO(struct snd_soc_dapm_widget *widget, 158 TP_PROTO(struct snd_soc_dapm_widget *widget,
159 enum snd_soc_dapm_direction dir,
158 struct snd_soc_dapm_path *path), 160 struct snd_soc_dapm_path *path),
159 161
160 TP_ARGS(widget, path), 162 TP_ARGS(widget, dir, path),
161 163
162 TP_STRUCT__entry( 164 TP_STRUCT__entry(
163 __string( wname, widget->name ) 165 __string( wname, widget->name )
164 __string( pname, path->name ? path->name : DAPM_DIRECT) 166 __string( pname, path->name ? path->name : DAPM_DIRECT)
165 __string( psname, path->sink->name ) 167 __string( pnname, path->node[dir]->name )
166 __field( int, path_sink ) 168 __field( int, path_node )
167 __field( int, path_connect ) 169 __field( int, path_connect )
170 __field( int, path_dir )
168 ), 171 ),
169 172
170 TP_fast_assign( 173 TP_fast_assign(
171 __assign_str(wname, widget->name); 174 __assign_str(wname, widget->name);
172 __assign_str(pname, path->name ? path->name : DAPM_DIRECT); 175 __assign_str(pname, path->name ? path->name : DAPM_DIRECT);
173 __assign_str(psname, path->sink->name); 176 __assign_str(pnname, path->node[dir]->name);
174 __entry->path_connect = path->connect; 177 __entry->path_connect = path->connect;
175 __entry->path_sink = (long)path->sink; 178 __entry->path_node = (long)path->node[dir];
179 __entry->path_dir = dir;
176 ), 180 ),
177 181
178 TP_printk("%c%s -> %s -> %s", 182 TP_printk("%c%s %s %s %s %s",
179 (int) __entry->path_sink && 183 (int) __entry->path_node &&
180 (int) __entry->path_connect ? '*' : ' ', 184 (int) __entry->path_connect ? '*' : ' ',
181 __get_str(wname), __get_str(pname), __get_str(psname)) 185 __get_str(wname), DAPM_ARROW(__entry->path_dir),
182); 186 __get_str(pname), DAPM_ARROW(__entry->path_dir),
183 187 __get_str(pnname))
184TRACE_EVENT(snd_soc_dapm_input_path,
185
186 TP_PROTO(struct snd_soc_dapm_widget *widget,
187 struct snd_soc_dapm_path *path),
188
189 TP_ARGS(widget, path),
190
191 TP_STRUCT__entry(
192 __string( wname, widget->name )
193 __string( pname, path->name ? path->name : DAPM_DIRECT)
194 __string( psname, path->source->name )
195 __field( int, path_source )
196 __field( int, path_connect )
197 ),
198
199 TP_fast_assign(
200 __assign_str(wname, widget->name);
201 __assign_str(pname, path->name ? path->name : DAPM_DIRECT);
202 __assign_str(psname, path->source->name);
203 __entry->path_connect = path->connect;
204 __entry->path_source = (long)path->source;
205 ),
206
207 TP_printk("%c%s <- %s <- %s",
208 (int) __entry->path_source &&
209 (int) __entry->path_connect ? '*' : ' ',
210 __get_str(wname), __get_str(pname), __get_str(psname))
211); 188);
212 189
213TRACE_EVENT(snd_soc_dapm_connected, 190TRACE_EVENT(snd_soc_dapm_connected,
diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h
deleted file mode 100644
index fc733d28117a..000000000000
--- a/include/trace/events/ext3.h
+++ /dev/null
@@ -1,866 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM ext3
3
4#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_EXT3_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT(ext3_free_inode,
10 TP_PROTO(struct inode *inode),
11
12 TP_ARGS(inode),
13
14 TP_STRUCT__entry(
15 __field( dev_t, dev )
16 __field( ino_t, ino )
17 __field( umode_t, mode )
18 __field( uid_t, uid )
19 __field( gid_t, gid )
20 __field( blkcnt_t, blocks )
21 ),
22
23 TP_fast_assign(
24 __entry->dev = inode->i_sb->s_dev;
25 __entry->ino = inode->i_ino;
26 __entry->mode = inode->i_mode;
27 __entry->uid = i_uid_read(inode);
28 __entry->gid = i_gid_read(inode);
29 __entry->blocks = inode->i_blocks;
30 ),
31
32 TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu",
33 MAJOR(__entry->dev), MINOR(__entry->dev),
34 (unsigned long) __entry->ino,
35 __entry->mode, __entry->uid, __entry->gid,
36 (unsigned long) __entry->blocks)
37);
38
39TRACE_EVENT(ext3_request_inode,
40 TP_PROTO(struct inode *dir, int mode),
41
42 TP_ARGS(dir, mode),
43
44 TP_STRUCT__entry(
45 __field( dev_t, dev )
46 __field( ino_t, dir )
47 __field( umode_t, mode )
48 ),
49
50 TP_fast_assign(
51 __entry->dev = dir->i_sb->s_dev;
52 __entry->dir = dir->i_ino;
53 __entry->mode = mode;
54 ),
55
56 TP_printk("dev %d,%d dir %lu mode 0%o",
57 MAJOR(__entry->dev), MINOR(__entry->dev),
58 (unsigned long) __entry->dir, __entry->mode)
59);
60
61TRACE_EVENT(ext3_allocate_inode,
62 TP_PROTO(struct inode *inode, struct inode *dir, int mode),
63
64 TP_ARGS(inode, dir, mode),
65
66 TP_STRUCT__entry(
67 __field( dev_t, dev )
68 __field( ino_t, ino )
69 __field( ino_t, dir )
70 __field( umode_t, mode )
71 ),
72
73 TP_fast_assign(
74 __entry->dev = inode->i_sb->s_dev;
75 __entry->ino = inode->i_ino;
76 __entry->dir = dir->i_ino;
77 __entry->mode = mode;
78 ),
79
80 TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
81 MAJOR(__entry->dev), MINOR(__entry->dev),
82 (unsigned long) __entry->ino,
83 (unsigned long) __entry->dir, __entry->mode)
84);
85
86TRACE_EVENT(ext3_evict_inode,
87 TP_PROTO(struct inode *inode),
88
89 TP_ARGS(inode),
90
91 TP_STRUCT__entry(
92 __field( dev_t, dev )
93 __field( ino_t, ino )
94 __field( int, nlink )
95 ),
96
97 TP_fast_assign(
98 __entry->dev = inode->i_sb->s_dev;
99 __entry->ino = inode->i_ino;
100 __entry->nlink = inode->i_nlink;
101 ),
102
103 TP_printk("dev %d,%d ino %lu nlink %d",
104 MAJOR(__entry->dev), MINOR(__entry->dev),
105 (unsigned long) __entry->ino, __entry->nlink)
106);
107
108TRACE_EVENT(ext3_drop_inode,
109 TP_PROTO(struct inode *inode, int drop),
110
111 TP_ARGS(inode, drop),
112
113 TP_STRUCT__entry(
114 __field( dev_t, dev )
115 __field( ino_t, ino )
116 __field( int, drop )
117 ),
118
119 TP_fast_assign(
120 __entry->dev = inode->i_sb->s_dev;
121 __entry->ino = inode->i_ino;
122 __entry->drop = drop;
123 ),
124
125 TP_printk("dev %d,%d ino %lu drop %d",
126 MAJOR(__entry->dev), MINOR(__entry->dev),
127 (unsigned long) __entry->ino, __entry->drop)
128);
129
130TRACE_EVENT(ext3_mark_inode_dirty,
131 TP_PROTO(struct inode *inode, unsigned long IP),
132
133 TP_ARGS(inode, IP),
134
135 TP_STRUCT__entry(
136 __field( dev_t, dev )
137 __field( ino_t, ino )
138 __field(unsigned long, ip )
139 ),
140
141 TP_fast_assign(
142 __entry->dev = inode->i_sb->s_dev;
143 __entry->ino = inode->i_ino;
144 __entry->ip = IP;
145 ),
146
147 TP_printk("dev %d,%d ino %lu caller %pS",
148 MAJOR(__entry->dev), MINOR(__entry->dev),
149 (unsigned long) __entry->ino, (void *)__entry->ip)
150);
151
152TRACE_EVENT(ext3_write_begin,
153 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
154 unsigned int flags),
155
156 TP_ARGS(inode, pos, len, flags),
157
158 TP_STRUCT__entry(
159 __field( dev_t, dev )
160 __field( ino_t, ino )
161 __field( loff_t, pos )
162 __field( unsigned int, len )
163 __field( unsigned int, flags )
164 ),
165
166 TP_fast_assign(
167 __entry->dev = inode->i_sb->s_dev;
168 __entry->ino = inode->i_ino;
169 __entry->pos = pos;
170 __entry->len = len;
171 __entry->flags = flags;
172 ),
173
174 TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
175 MAJOR(__entry->dev), MINOR(__entry->dev),
176 (unsigned long) __entry->ino,
177 (unsigned long long) __entry->pos, __entry->len,
178 __entry->flags)
179);
180
181DECLARE_EVENT_CLASS(ext3__write_end,
182 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
183 unsigned int copied),
184
185 TP_ARGS(inode, pos, len, copied),
186
187 TP_STRUCT__entry(
188 __field( dev_t, dev )
189 __field( ino_t, ino )
190 __field( loff_t, pos )
191 __field( unsigned int, len )
192 __field( unsigned int, copied )
193 ),
194
195 TP_fast_assign(
196 __entry->dev = inode->i_sb->s_dev;
197 __entry->ino = inode->i_ino;
198 __entry->pos = pos;
199 __entry->len = len;
200 __entry->copied = copied;
201 ),
202
203 TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
204 MAJOR(__entry->dev), MINOR(__entry->dev),
205 (unsigned long) __entry->ino,
206 (unsigned long long) __entry->pos, __entry->len,
207 __entry->copied)
208);
209
210DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end,
211
212 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
213 unsigned int copied),
214
215 TP_ARGS(inode, pos, len, copied)
216);
217
218DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end,
219
220 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
221 unsigned int copied),
222
223 TP_ARGS(inode, pos, len, copied)
224);
225
226DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end,
227
228 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
229 unsigned int copied),
230
231 TP_ARGS(inode, pos, len, copied)
232);
233
234DECLARE_EVENT_CLASS(ext3__page_op,
235 TP_PROTO(struct page *page),
236
237 TP_ARGS(page),
238
239 TP_STRUCT__entry(
240 __field( dev_t, dev )
241 __field( ino_t, ino )
242 __field( pgoff_t, index )
243
244 ),
245
246 TP_fast_assign(
247 __entry->index = page->index;
248 __entry->ino = page->mapping->host->i_ino;
249 __entry->dev = page->mapping->host->i_sb->s_dev;
250 ),
251
252 TP_printk("dev %d,%d ino %lu page_index %lu",
253 MAJOR(__entry->dev), MINOR(__entry->dev),
254 (unsigned long) __entry->ino, __entry->index)
255);
256
257DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage,
258
259 TP_PROTO(struct page *page),
260
261 TP_ARGS(page)
262);
263
264DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage,
265
266 TP_PROTO(struct page *page),
267
268 TP_ARGS(page)
269);
270
271DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage,
272
273 TP_PROTO(struct page *page),
274
275 TP_ARGS(page)
276);
277
278DEFINE_EVENT(ext3__page_op, ext3_readpage,
279
280 TP_PROTO(struct page *page),
281
282 TP_ARGS(page)
283);
284
285DEFINE_EVENT(ext3__page_op, ext3_releasepage,
286
287 TP_PROTO(struct page *page),
288
289 TP_ARGS(page)
290);
291
292TRACE_EVENT(ext3_invalidatepage,
293 TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
294
295 TP_ARGS(page, offset, length),
296
297 TP_STRUCT__entry(
298 __field( pgoff_t, index )
299 __field( unsigned int, offset )
300 __field( unsigned int, length )
301 __field( ino_t, ino )
302 __field( dev_t, dev )
303
304 ),
305
306 TP_fast_assign(
307 __entry->index = page->index;
308 __entry->offset = offset;
309 __entry->length = length;
310 __entry->ino = page->mapping->host->i_ino;
311 __entry->dev = page->mapping->host->i_sb->s_dev;
312 ),
313
314 TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
315 MAJOR(__entry->dev), MINOR(__entry->dev),
316 (unsigned long) __entry->ino,
317 __entry->index, __entry->offset, __entry->length)
318);
319
320TRACE_EVENT(ext3_discard_blocks,
321 TP_PROTO(struct super_block *sb, unsigned long blk,
322 unsigned long count),
323
324 TP_ARGS(sb, blk, count),
325
326 TP_STRUCT__entry(
327 __field( dev_t, dev )
328 __field( unsigned long, blk )
329 __field( unsigned long, count )
330
331 ),
332
333 TP_fast_assign(
334 __entry->dev = sb->s_dev;
335 __entry->blk = blk;
336 __entry->count = count;
337 ),
338
339 TP_printk("dev %d,%d blk %lu count %lu",
340 MAJOR(__entry->dev), MINOR(__entry->dev),
341 __entry->blk, __entry->count)
342);
343
344TRACE_EVENT(ext3_request_blocks,
345 TP_PROTO(struct inode *inode, unsigned long goal,
346 unsigned long count),
347
348 TP_ARGS(inode, goal, count),
349
350 TP_STRUCT__entry(
351 __field( dev_t, dev )
352 __field( ino_t, ino )
353 __field( unsigned long, count )
354 __field( unsigned long, goal )
355 ),
356
357 TP_fast_assign(
358 __entry->dev = inode->i_sb->s_dev;
359 __entry->ino = inode->i_ino;
360 __entry->count = count;
361 __entry->goal = goal;
362 ),
363
364 TP_printk("dev %d,%d ino %lu count %lu goal %lu ",
365 MAJOR(__entry->dev), MINOR(__entry->dev),
366 (unsigned long) __entry->ino,
367 __entry->count, __entry->goal)
368);
369
370TRACE_EVENT(ext3_allocate_blocks,
371 TP_PROTO(struct inode *inode, unsigned long goal,
372 unsigned long count, unsigned long block),
373
374 TP_ARGS(inode, goal, count, block),
375
376 TP_STRUCT__entry(
377 __field( dev_t, dev )
378 __field( ino_t, ino )
379 __field( unsigned long, block )
380 __field( unsigned long, count )
381 __field( unsigned long, goal )
382 ),
383
384 TP_fast_assign(
385 __entry->dev = inode->i_sb->s_dev;
386 __entry->ino = inode->i_ino;
387 __entry->block = block;
388 __entry->count = count;
389 __entry->goal = goal;
390 ),
391
392 TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu",
393 MAJOR(__entry->dev), MINOR(__entry->dev),
394 (unsigned long) __entry->ino,
395 __entry->count, __entry->block,
396 __entry->goal)
397);
398
399TRACE_EVENT(ext3_free_blocks,
400 TP_PROTO(struct inode *inode, unsigned long block,
401 unsigned long count),
402
403 TP_ARGS(inode, block, count),
404
405 TP_STRUCT__entry(
406 __field( dev_t, dev )
407 __field( ino_t, ino )
408 __field( umode_t, mode )
409 __field( unsigned long, block )
410 __field( unsigned long, count )
411 ),
412
413 TP_fast_assign(
414 __entry->dev = inode->i_sb->s_dev;
415 __entry->ino = inode->i_ino;
416 __entry->mode = inode->i_mode;
417 __entry->block = block;
418 __entry->count = count;
419 ),
420
421 TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu",
422 MAJOR(__entry->dev), MINOR(__entry->dev),
423 (unsigned long) __entry->ino,
424 __entry->mode, __entry->block, __entry->count)
425);
426
427TRACE_EVENT(ext3_sync_file_enter,
428 TP_PROTO(struct file *file, int datasync),
429
430 TP_ARGS(file, datasync),
431
432 TP_STRUCT__entry(
433 __field( dev_t, dev )
434 __field( ino_t, ino )
435 __field( ino_t, parent )
436 __field( int, datasync )
437 ),
438
439 TP_fast_assign(
440 struct dentry *dentry = file->f_path.dentry;
441
442 __entry->dev = d_inode(dentry)->i_sb->s_dev;
443 __entry->ino = d_inode(dentry)->i_ino;
444 __entry->datasync = datasync;
445 __entry->parent = d_inode(dentry->d_parent)->i_ino;
446 ),
447
448 TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
449 MAJOR(__entry->dev), MINOR(__entry->dev),
450 (unsigned long) __entry->ino,
451 (unsigned long) __entry->parent, __entry->datasync)
452);
453
454TRACE_EVENT(ext3_sync_file_exit,
455 TP_PROTO(struct inode *inode, int ret),
456
457 TP_ARGS(inode, ret),
458
459 TP_STRUCT__entry(
460 __field( int, ret )
461 __field( ino_t, ino )
462 __field( dev_t, dev )
463 ),
464
465 TP_fast_assign(
466 __entry->ret = ret;
467 __entry->ino = inode->i_ino;
468 __entry->dev = inode->i_sb->s_dev;
469 ),
470
471 TP_printk("dev %d,%d ino %lu ret %d",
472 MAJOR(__entry->dev), MINOR(__entry->dev),
473 (unsigned long) __entry->ino,
474 __entry->ret)
475);
476
477TRACE_EVENT(ext3_sync_fs,
478 TP_PROTO(struct super_block *sb, int wait),
479
480 TP_ARGS(sb, wait),
481
482 TP_STRUCT__entry(
483 __field( dev_t, dev )
484 __field( int, wait )
485
486 ),
487
488 TP_fast_assign(
489 __entry->dev = sb->s_dev;
490 __entry->wait = wait;
491 ),
492
493 TP_printk("dev %d,%d wait %d",
494 MAJOR(__entry->dev), MINOR(__entry->dev),
495 __entry->wait)
496);
497
498TRACE_EVENT(ext3_rsv_window_add,
499 TP_PROTO(struct super_block *sb,
500 struct ext3_reserve_window_node *rsv_node),
501
502 TP_ARGS(sb, rsv_node),
503
504 TP_STRUCT__entry(
505 __field( unsigned long, start )
506 __field( unsigned long, end )
507 __field( dev_t, dev )
508 ),
509
510 TP_fast_assign(
511 __entry->dev = sb->s_dev;
512 __entry->start = rsv_node->rsv_window._rsv_start;
513 __entry->end = rsv_node->rsv_window._rsv_end;
514 ),
515
516 TP_printk("dev %d,%d start %lu end %lu",
517 MAJOR(__entry->dev), MINOR(__entry->dev),
518 __entry->start, __entry->end)
519);
520
521TRACE_EVENT(ext3_discard_reservation,
522 TP_PROTO(struct inode *inode,
523 struct ext3_reserve_window_node *rsv_node),
524
525 TP_ARGS(inode, rsv_node),
526
527 TP_STRUCT__entry(
528 __field( unsigned long, start )
529 __field( unsigned long, end )
530 __field( ino_t, ino )
531 __field( dev_t, dev )
532 ),
533
534 TP_fast_assign(
535 __entry->start = rsv_node->rsv_window._rsv_start;
536 __entry->end = rsv_node->rsv_window._rsv_end;
537 __entry->ino = inode->i_ino;
538 __entry->dev = inode->i_sb->s_dev;
539 ),
540
541 TP_printk("dev %d,%d ino %lu start %lu end %lu",
542 MAJOR(__entry->dev), MINOR(__entry->dev),
543 (unsigned long)__entry->ino, __entry->start,
544 __entry->end)
545);
546
547TRACE_EVENT(ext3_alloc_new_reservation,
548 TP_PROTO(struct super_block *sb, unsigned long goal),
549
550 TP_ARGS(sb, goal),
551
552 TP_STRUCT__entry(
553 __field( dev_t, dev )
554 __field( unsigned long, goal )
555 ),
556
557 TP_fast_assign(
558 __entry->dev = sb->s_dev;
559 __entry->goal = goal;
560 ),
561
562 TP_printk("dev %d,%d goal %lu",
563 MAJOR(__entry->dev), MINOR(__entry->dev),
564 __entry->goal)
565);
566
567TRACE_EVENT(ext3_reserved,
568 TP_PROTO(struct super_block *sb, unsigned long block,
569 struct ext3_reserve_window_node *rsv_node),
570
571 TP_ARGS(sb, block, rsv_node),
572
573 TP_STRUCT__entry(
574 __field( unsigned long, block )
575 __field( unsigned long, start )
576 __field( unsigned long, end )
577 __field( dev_t, dev )
578 ),
579
580 TP_fast_assign(
581 __entry->block = block;
582 __entry->start = rsv_node->rsv_window._rsv_start;
583 __entry->end = rsv_node->rsv_window._rsv_end;
584 __entry->dev = sb->s_dev;
585 ),
586
587 TP_printk("dev %d,%d block %lu, start %lu end %lu",
588 MAJOR(__entry->dev), MINOR(__entry->dev),
589 __entry->block, __entry->start, __entry->end)
590);
591
592TRACE_EVENT(ext3_forget,
593 TP_PROTO(struct inode *inode, int is_metadata, unsigned long block),
594
595 TP_ARGS(inode, is_metadata, block),
596
597 TP_STRUCT__entry(
598 __field( dev_t, dev )
599 __field( ino_t, ino )
600 __field( umode_t, mode )
601 __field( int, is_metadata )
602 __field( unsigned long, block )
603 ),
604
605 TP_fast_assign(
606 __entry->dev = inode->i_sb->s_dev;
607 __entry->ino = inode->i_ino;
608 __entry->mode = inode->i_mode;
609 __entry->is_metadata = is_metadata;
610 __entry->block = block;
611 ),
612
613 TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu",
614 MAJOR(__entry->dev), MINOR(__entry->dev),
615 (unsigned long) __entry->ino,
616 __entry->mode, __entry->is_metadata, __entry->block)
617);
618
619TRACE_EVENT(ext3_read_block_bitmap,
620 TP_PROTO(struct super_block *sb, unsigned int group),
621
622 TP_ARGS(sb, group),
623
624 TP_STRUCT__entry(
625 __field( dev_t, dev )
626 __field( __u32, group )
627
628 ),
629
630 TP_fast_assign(
631 __entry->dev = sb->s_dev;
632 __entry->group = group;
633 ),
634
635 TP_printk("dev %d,%d group %u",
636 MAJOR(__entry->dev), MINOR(__entry->dev),
637 __entry->group)
638);
639
640TRACE_EVENT(ext3_direct_IO_enter,
641 TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
642
643 TP_ARGS(inode, offset, len, rw),
644
645 TP_STRUCT__entry(
646 __field( ino_t, ino )
647 __field( dev_t, dev )
648 __field( loff_t, pos )
649 __field( unsigned long, len )
650 __field( int, rw )
651 ),
652
653 TP_fast_assign(
654 __entry->ino = inode->i_ino;
655 __entry->dev = inode->i_sb->s_dev;
656 __entry->pos = offset;
657 __entry->len = len;
658 __entry->rw = rw;
659 ),
660
661 TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
662 MAJOR(__entry->dev), MINOR(__entry->dev),
663 (unsigned long) __entry->ino,
664 (unsigned long long) __entry->pos, __entry->len,
665 __entry->rw)
666);
667
668TRACE_EVENT(ext3_direct_IO_exit,
669 TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
670 int rw, int ret),
671
672 TP_ARGS(inode, offset, len, rw, ret),
673
674 TP_STRUCT__entry(
675 __field( ino_t, ino )
676 __field( dev_t, dev )
677 __field( loff_t, pos )
678 __field( unsigned long, len )
679 __field( int, rw )
680 __field( int, ret )
681 ),
682
683 TP_fast_assign(
684 __entry->ino = inode->i_ino;
685 __entry->dev = inode->i_sb->s_dev;
686 __entry->pos = offset;
687 __entry->len = len;
688 __entry->rw = rw;
689 __entry->ret = ret;
690 ),
691
692 TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
693 MAJOR(__entry->dev), MINOR(__entry->dev),
694 (unsigned long) __entry->ino,
695 (unsigned long long) __entry->pos, __entry->len,
696 __entry->rw, __entry->ret)
697);
698
699TRACE_EVENT(ext3_unlink_enter,
700 TP_PROTO(struct inode *parent, struct dentry *dentry),
701
702 TP_ARGS(parent, dentry),
703
704 TP_STRUCT__entry(
705 __field( ino_t, parent )
706 __field( ino_t, ino )
707 __field( loff_t, size )
708 __field( dev_t, dev )
709 ),
710
711 TP_fast_assign(
712 __entry->parent = parent->i_ino;
713 __entry->ino = d_inode(dentry)->i_ino;
714 __entry->size = d_inode(dentry)->i_size;
715 __entry->dev = d_inode(dentry)->i_sb->s_dev;
716 ),
717
718 TP_printk("dev %d,%d ino %lu size %lld parent %ld",
719 MAJOR(__entry->dev), MINOR(__entry->dev),
720 (unsigned long) __entry->ino,
721 (unsigned long long)__entry->size,
722 (unsigned long) __entry->parent)
723);
724
725TRACE_EVENT(ext3_unlink_exit,
726 TP_PROTO(struct dentry *dentry, int ret),
727
728 TP_ARGS(dentry, ret),
729
730 TP_STRUCT__entry(
731 __field( ino_t, ino )
732 __field( dev_t, dev )
733 __field( int, ret )
734 ),
735
736 TP_fast_assign(
737 __entry->ino = d_inode(dentry)->i_ino;
738 __entry->dev = d_inode(dentry)->i_sb->s_dev;
739 __entry->ret = ret;
740 ),
741
742 TP_printk("dev %d,%d ino %lu ret %d",
743 MAJOR(__entry->dev), MINOR(__entry->dev),
744 (unsigned long) __entry->ino,
745 __entry->ret)
746);
747
748DECLARE_EVENT_CLASS(ext3__truncate,
749 TP_PROTO(struct inode *inode),
750
751 TP_ARGS(inode),
752
753 TP_STRUCT__entry(
754 __field( ino_t, ino )
755 __field( dev_t, dev )
756 __field( blkcnt_t, blocks )
757 ),
758
759 TP_fast_assign(
760 __entry->ino = inode->i_ino;
761 __entry->dev = inode->i_sb->s_dev;
762 __entry->blocks = inode->i_blocks;
763 ),
764
765 TP_printk("dev %d,%d ino %lu blocks %lu",
766 MAJOR(__entry->dev), MINOR(__entry->dev),
767 (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
768);
769
770DEFINE_EVENT(ext3__truncate, ext3_truncate_enter,
771
772 TP_PROTO(struct inode *inode),
773
774 TP_ARGS(inode)
775);
776
777DEFINE_EVENT(ext3__truncate, ext3_truncate_exit,
778
779 TP_PROTO(struct inode *inode),
780
781 TP_ARGS(inode)
782);
783
784TRACE_EVENT(ext3_get_blocks_enter,
785 TP_PROTO(struct inode *inode, unsigned long lblk,
786 unsigned long len, int create),
787
788 TP_ARGS(inode, lblk, len, create),
789
790 TP_STRUCT__entry(
791 __field( ino_t, ino )
792 __field( dev_t, dev )
793 __field( unsigned long, lblk )
794 __field( unsigned long, len )
795 __field( int, create )
796 ),
797
798 TP_fast_assign(
799 __entry->ino = inode->i_ino;
800 __entry->dev = inode->i_sb->s_dev;
801 __entry->lblk = lblk;
802 __entry->len = len;
803 __entry->create = create;
804 ),
805
806 TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u",
807 MAJOR(__entry->dev), MINOR(__entry->dev),
808 (unsigned long) __entry->ino,
809 __entry->lblk, __entry->len, __entry->create)
810);
811
812TRACE_EVENT(ext3_get_blocks_exit,
813 TP_PROTO(struct inode *inode, unsigned long lblk,
814 unsigned long pblk, unsigned long len, int ret),
815
816 TP_ARGS(inode, lblk, pblk, len, ret),
817
818 TP_STRUCT__entry(
819 __field( ino_t, ino )
820 __field( dev_t, dev )
821 __field( unsigned long, lblk )
822 __field( unsigned long, pblk )
823 __field( unsigned long, len )
824 __field( int, ret )
825 ),
826
827 TP_fast_assign(
828 __entry->ino = inode->i_ino;
829 __entry->dev = inode->i_sb->s_dev;
830 __entry->lblk = lblk;
831 __entry->pblk = pblk;
832 __entry->len = len;
833 __entry->ret = ret;
834 ),
835
836 TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d",
837 MAJOR(__entry->dev), MINOR(__entry->dev),
838 (unsigned long) __entry->ino,
839 __entry->lblk, __entry->pblk,
840 __entry->len, __entry->ret)
841);
842
843TRACE_EVENT(ext3_load_inode,
844 TP_PROTO(struct inode *inode),
845
846 TP_ARGS(inode),
847
848 TP_STRUCT__entry(
849 __field( ino_t, ino )
850 __field( dev_t, dev )
851 ),
852
853 TP_fast_assign(
854 __entry->ino = inode->i_ino;
855 __entry->dev = inode->i_sb->s_dev;
856 ),
857
858 TP_printk("dev %d,%d ino %lu",
859 MAJOR(__entry->dev), MINOR(__entry->dev),
860 (unsigned long) __entry->ino)
861);
862
863#endif /* _TRACE_EXT3_H */
864
865/* This part must be outside protection */
866#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 04856a2d8c82..a01946514b5a 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1099,11 +1099,11 @@ TRACE_EVENT(f2fs_lookup_extent_tree_start,
1099TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end, 1099TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
1100 1100
1101 TP_PROTO(struct inode *inode, unsigned int pgofs, 1101 TP_PROTO(struct inode *inode, unsigned int pgofs,
1102 struct extent_node *en), 1102 struct extent_info *ei),
1103 1103
1104 TP_ARGS(inode, pgofs, en), 1104 TP_ARGS(inode, pgofs, ei),
1105 1105
1106 TP_CONDITION(en), 1106 TP_CONDITION(ei),
1107 1107
1108 TP_STRUCT__entry( 1108 TP_STRUCT__entry(
1109 __field(dev_t, dev) 1109 __field(dev_t, dev)
@@ -1118,9 +1118,9 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
1118 __entry->dev = inode->i_sb->s_dev; 1118 __entry->dev = inode->i_sb->s_dev;
1119 __entry->ino = inode->i_ino; 1119 __entry->ino = inode->i_ino;
1120 __entry->pgofs = pgofs; 1120 __entry->pgofs = pgofs;
1121 __entry->fofs = en->ei.fofs; 1121 __entry->fofs = ei->fofs;
1122 __entry->blk = en->ei.blk; 1122 __entry->blk = ei->blk;
1123 __entry->len = en->ei.len; 1123 __entry->len = ei->len;
1124 ), 1124 ),
1125 1125
1126 TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " 1126 TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
new file mode 100644
index 000000000000..833cfcb6750d
--- /dev/null
+++ b/include/trace/events/fib.h
@@ -0,0 +1,113 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM fib
3
4#if !defined(_TRACE_FIB_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_FIB_H
6
7#include <linux/skbuff.h>
8#include <linux/netdevice.h>
9#include <net/ip_fib.h>
10#include <linux/tracepoint.h>
11
12TRACE_EVENT(fib_table_lookup,
13
14 TP_PROTO(u32 tb_id, const struct flowi4 *flp),
15
16 TP_ARGS(tb_id, flp),
17
18 TP_STRUCT__entry(
19 __field( u32, tb_id )
20 __field( int, oif )
21 __field( int, iif )
22 __field( __u8, tos )
23 __field( __u8, scope )
24 __field( __u8, flags )
25 __array( __u8, src, 4 )
26 __array( __u8, dst, 4 )
27 ),
28
29 TP_fast_assign(
30 __be32 *p32;
31
32 __entry->tb_id = tb_id;
33 __entry->oif = flp->flowi4_oif;
34 __entry->iif = flp->flowi4_iif;
35 __entry->tos = flp->flowi4_tos;
36 __entry->scope = flp->flowi4_scope;
37 __entry->flags = flp->flowi4_flags;
38
39 p32 = (__be32 *) __entry->src;
40 *p32 = flp->saddr;
41
42 p32 = (__be32 *) __entry->dst;
43 *p32 = flp->daddr;
44 ),
45
46 TP_printk("table %u oif %d iif %d src %pI4 dst %pI4 tos %d scope %d flags %x",
47 __entry->tb_id, __entry->oif, __entry->iif,
48 __entry->src, __entry->dst, __entry->tos, __entry->scope,
49 __entry->flags)
50);
51
52TRACE_EVENT(fib_table_lookup_nh,
53
54 TP_PROTO(const struct fib_nh *nh),
55
56 TP_ARGS(nh),
57
58 TP_STRUCT__entry(
59 __string( name, nh->nh_dev->name)
60 __field( int, oif )
61 __array( __u8, src, 4 )
62 ),
63
64 TP_fast_assign(
65 __be32 *p32 = (__be32 *) __entry->src;
66
67 __assign_str(name, nh->nh_dev ? nh->nh_dev->name : "not set");
68 __entry->oif = nh->nh_oif;
69 *p32 = nh->nh_saddr;
70 ),
71
72 TP_printk("nexthop dev %s oif %d src %pI4",
73 __get_str(name), __entry->oif, __entry->src)
74);
75
76TRACE_EVENT(fib_validate_source,
77
78 TP_PROTO(const struct net_device *dev, const struct flowi4 *flp),
79
80 TP_ARGS(dev, flp),
81
82 TP_STRUCT__entry(
83 __string( name, dev->name )
84 __field( int, oif )
85 __field( int, iif )
86 __field( __u8, tos )
87 __array( __u8, src, 4 )
88 __array( __u8, dst, 4 )
89 ),
90
91 TP_fast_assign(
92 __be32 *p32;
93
94 __assign_str(name, dev ? dev->name : "not set");
95 __entry->oif = flp->flowi4_oif;
96 __entry->iif = flp->flowi4_iif;
97 __entry->tos = flp->flowi4_tos;
98
99 p32 = (__be32 *) __entry->src;
100 *p32 = flp->saddr;
101
102 p32 = (__be32 *) __entry->dst;
103 *p32 = flp->daddr;
104 ),
105
106 TP_printk("dev %s oif %d iif %d tos %d src %pI4 dst %pI4",
107 __get_str(name), __entry->oif, __entry->iif, __entry->tos,
108 __entry->src, __entry->dst)
109);
110#endif /* _TRACE_FIB_H */
111
112/* This part must be outside protection */
113#include <trace/define_trace.h>
diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h
deleted file mode 100644
index da6f2591c25e..000000000000
--- a/include/trace/events/jbd.h
+++ /dev/null
@@ -1,194 +0,0 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM jbd
3
4#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_JBD_H
6
7#include <linux/jbd.h>
8#include <linux/tracepoint.h>
9
10TRACE_EVENT(jbd_checkpoint,
11
12 TP_PROTO(journal_t *journal, int result),
13
14 TP_ARGS(journal, result),
15
16 TP_STRUCT__entry(
17 __field( dev_t, dev )
18 __field( int, result )
19 ),
20
21 TP_fast_assign(
22 __entry->dev = journal->j_fs_dev->bd_dev;
23 __entry->result = result;
24 ),
25
26 TP_printk("dev %d,%d result %d",
27 MAJOR(__entry->dev), MINOR(__entry->dev),
28 __entry->result)
29);
30
31DECLARE_EVENT_CLASS(jbd_commit,
32
33 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
34
35 TP_ARGS(journal, commit_transaction),
36
37 TP_STRUCT__entry(
38 __field( dev_t, dev )
39 __field( int, transaction )
40 ),
41
42 TP_fast_assign(
43 __entry->dev = journal->j_fs_dev->bd_dev;
44 __entry->transaction = commit_transaction->t_tid;
45 ),
46
47 TP_printk("dev %d,%d transaction %d",
48 MAJOR(__entry->dev), MINOR(__entry->dev),
49 __entry->transaction)
50);
51
52DEFINE_EVENT(jbd_commit, jbd_start_commit,
53
54 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
55
56 TP_ARGS(journal, commit_transaction)
57);
58
59DEFINE_EVENT(jbd_commit, jbd_commit_locking,
60
61 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
62
63 TP_ARGS(journal, commit_transaction)
64);
65
66DEFINE_EVENT(jbd_commit, jbd_commit_flushing,
67
68 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
69
70 TP_ARGS(journal, commit_transaction)
71);
72
73DEFINE_EVENT(jbd_commit, jbd_commit_logging,
74
75 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
76
77 TP_ARGS(journal, commit_transaction)
78);
79
80TRACE_EVENT(jbd_drop_transaction,
81
82 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
83
84 TP_ARGS(journal, commit_transaction),
85
86 TP_STRUCT__entry(
87 __field( dev_t, dev )
88 __field( int, transaction )
89 ),
90
91 TP_fast_assign(
92 __entry->dev = journal->j_fs_dev->bd_dev;
93 __entry->transaction = commit_transaction->t_tid;
94 ),
95
96 TP_printk("dev %d,%d transaction %d",
97 MAJOR(__entry->dev), MINOR(__entry->dev),
98 __entry->transaction)
99);
100
101TRACE_EVENT(jbd_end_commit,
102 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
103
104 TP_ARGS(journal, commit_transaction),
105
106 TP_STRUCT__entry(
107 __field( dev_t, dev )
108 __field( int, transaction )
109 __field( int, head )
110 ),
111
112 TP_fast_assign(
113 __entry->dev = journal->j_fs_dev->bd_dev;
114 __entry->transaction = commit_transaction->t_tid;
115 __entry->head = journal->j_tail_sequence;
116 ),
117
118 TP_printk("dev %d,%d transaction %d head %d",
119 MAJOR(__entry->dev), MINOR(__entry->dev),
120 __entry->transaction, __entry->head)
121);
122
123TRACE_EVENT(jbd_do_submit_data,
124 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
125
126 TP_ARGS(journal, commit_transaction),
127
128 TP_STRUCT__entry(
129 __field( dev_t, dev )
130 __field( int, transaction )
131 ),
132
133 TP_fast_assign(
134 __entry->dev = journal->j_fs_dev->bd_dev;
135 __entry->transaction = commit_transaction->t_tid;
136 ),
137
138 TP_printk("dev %d,%d transaction %d",
139 MAJOR(__entry->dev), MINOR(__entry->dev),
140 __entry->transaction)
141);
142
143TRACE_EVENT(jbd_cleanup_journal_tail,
144
145 TP_PROTO(journal_t *journal, tid_t first_tid,
146 unsigned long block_nr, unsigned long freed),
147
148 TP_ARGS(journal, first_tid, block_nr, freed),
149
150 TP_STRUCT__entry(
151 __field( dev_t, dev )
152 __field( tid_t, tail_sequence )
153 __field( tid_t, first_tid )
154 __field(unsigned long, block_nr )
155 __field(unsigned long, freed )
156 ),
157
158 TP_fast_assign(
159 __entry->dev = journal->j_fs_dev->bd_dev;
160 __entry->tail_sequence = journal->j_tail_sequence;
161 __entry->first_tid = first_tid;
162 __entry->block_nr = block_nr;
163 __entry->freed = freed;
164 ),
165
166 TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
167 MAJOR(__entry->dev), MINOR(__entry->dev),
168 __entry->tail_sequence, __entry->first_tid,
169 __entry->block_nr, __entry->freed)
170);
171
172TRACE_EVENT(journal_write_superblock,
173 TP_PROTO(journal_t *journal, int write_op),
174
175 TP_ARGS(journal, write_op),
176
177 TP_STRUCT__entry(
178 __field( dev_t, dev )
179 __field( int, write_op )
180 ),
181
182 TP_fast_assign(
183 __entry->dev = journal->j_fs_dev->bd_dev;
184 __entry->write_op = write_op;
185 ),
186
187 TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
188 MINOR(__entry->dev), __entry->write_op)
189);
190
191#endif /* _TRACE_JBD_H */
192
193/* This part must be outside protection */
194#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index a44062da684b..d6f83222a6a1 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -358,6 +358,36 @@ TRACE_EVENT(
358 358
359#endif 359#endif
360 360
361TRACE_EVENT(kvm_halt_poll_ns,
362 TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
363 TP_ARGS(grow, vcpu_id, new, old),
364
365 TP_STRUCT__entry(
366 __field(bool, grow)
367 __field(unsigned int, vcpu_id)
368 __field(int, new)
369 __field(int, old)
370 ),
371
372 TP_fast_assign(
373 __entry->grow = grow;
374 __entry->vcpu_id = vcpu_id;
375 __entry->new = new;
376 __entry->old = old;
377 ),
378
379 TP_printk("vcpu %u: halt_poll_ns %d (%s %d)",
380 __entry->vcpu_id,
381 __entry->new,
382 __entry->grow ? "grow" : "shrink",
383 __entry->old)
384);
385
386#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
387 trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
388#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
389 trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
390
361#endif /* _TRACE_KVM_MAIN_H */ 391#endif /* _TRACE_KVM_MAIN_H */
362 392
363/* This part must be outside protection */ 393/* This part must be outside protection */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index c78e88ce5ea3..ef72c4aada56 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -661,7 +661,6 @@ TRACE_EVENT(rcu_torture_read,
661 * Tracepoint for _rcu_barrier() execution. The string "s" describes 661 * Tracepoint for _rcu_barrier() execution. The string "s" describes
662 * the _rcu_barrier phase: 662 * the _rcu_barrier phase:
663 * "Begin": _rcu_barrier() started. 663 * "Begin": _rcu_barrier() started.
664 * "Check": _rcu_barrier() checking for piggybacking.
665 * "EarlyExit": _rcu_barrier() piggybacked, thus early exit. 664 * "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
666 * "Inc1": _rcu_barrier() piggyback check counter incremented. 665 * "Inc1": _rcu_barrier() piggyback check counter incremented.
667 * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU 666 * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index d57a575fe31f..539d6bc3216a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -55,9 +55,9 @@ TRACE_EVENT(sched_kthread_stop_ret,
55 */ 55 */
56DECLARE_EVENT_CLASS(sched_wakeup_template, 56DECLARE_EVENT_CLASS(sched_wakeup_template,
57 57
58 TP_PROTO(struct task_struct *p, int success), 58 TP_PROTO(struct task_struct *p),
59 59
60 TP_ARGS(__perf_task(p), success), 60 TP_ARGS(__perf_task(p)),
61 61
62 TP_STRUCT__entry( 62 TP_STRUCT__entry(
63 __array( char, comm, TASK_COMM_LEN ) 63 __array( char, comm, TASK_COMM_LEN )
@@ -71,25 +71,37 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
71 memcpy(__entry->comm, p->comm, TASK_COMM_LEN); 71 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 __entry->pid = p->pid; 72 __entry->pid = p->pid;
73 __entry->prio = p->prio; 73 __entry->prio = p->prio;
74 __entry->success = success; 74 __entry->success = 1; /* rudiment, kill when possible */
75 __entry->target_cpu = task_cpu(p); 75 __entry->target_cpu = task_cpu(p);
76 ), 76 ),
77 77
78 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", 78 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
79 __entry->comm, __entry->pid, __entry->prio, 79 __entry->comm, __entry->pid, __entry->prio,
80 __entry->success, __entry->target_cpu) 80 __entry->target_cpu)
81); 81);
82 82
83/*
84 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
85 * called from the waking context.
86 */
87DEFINE_EVENT(sched_wakeup_template, sched_waking,
88 TP_PROTO(struct task_struct *p),
89 TP_ARGS(p));
90
91/*
92 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
93 * It it not always called from the waking context.
94 */
83DEFINE_EVENT(sched_wakeup_template, sched_wakeup, 95DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
84 TP_PROTO(struct task_struct *p, int success), 96 TP_PROTO(struct task_struct *p),
85 TP_ARGS(p, success)); 97 TP_ARGS(p));
86 98
87/* 99/*
88 * Tracepoint for waking up a new task: 100 * Tracepoint for waking up a new task:
89 */ 101 */
90DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, 102DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
91 TP_PROTO(struct task_struct *p, int success), 103 TP_PROTO(struct task_struct *p),
92 TP_ARGS(p, success)); 104 TP_ARGS(p));
93 105
94#ifdef CREATE_TRACE_POINTS 106#ifdef CREATE_TRACE_POINTS
95static inline long __trace_sched_switch_state(struct task_struct *p) 107static inline long __trace_sched_switch_state(struct task_struct *p)
diff --git a/include/trace/events/spmi.h b/include/trace/events/spmi.h
new file mode 100644
index 000000000000..62f005ef4c7e
--- /dev/null
+++ b/include/trace/events/spmi.h
@@ -0,0 +1,135 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM spmi
3
4#if !defined(_TRACE_SPMI_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SPMI_H
6
7#include <linux/spmi.h>
8#include <linux/tracepoint.h>
9
10/*
11 * drivers/spmi/spmi.c
12 */
13
14TRACE_EVENT(spmi_write_begin,
15 TP_PROTO(u8 opcode, u8 sid, u16 addr, u8 len, const u8 *buf),
16 TP_ARGS(opcode, sid, addr, len, buf),
17
18 TP_STRUCT__entry(
19 __field ( u8, opcode )
20 __field ( u8, sid )
21 __field ( u16, addr )
22 __field ( u8, len )
23 __dynamic_array ( u8, buf, len + 1 )
24 ),
25
26 TP_fast_assign(
27 __entry->opcode = opcode;
28 __entry->sid = sid;
29 __entry->addr = addr;
30 __entry->len = len + 1;
31 memcpy(__get_dynamic_array(buf), buf, len + 1);
32 ),
33
34 TP_printk("opc=%d sid=%02d addr=0x%04x len=%d buf=0x[%*phD]",
35 (int)__entry->opcode, (int)__entry->sid,
36 (int)__entry->addr, (int)__entry->len,
37 (int)__entry->len, __get_dynamic_array(buf))
38);
39
40TRACE_EVENT(spmi_write_end,
41 TP_PROTO(u8 opcode, u8 sid, u16 addr, int ret),
42 TP_ARGS(opcode, sid, addr, ret),
43
44 TP_STRUCT__entry(
45 __field ( u8, opcode )
46 __field ( u8, sid )
47 __field ( u16, addr )
48 __field ( int, ret )
49 ),
50
51 TP_fast_assign(
52 __entry->opcode = opcode;
53 __entry->sid = sid;
54 __entry->addr = addr;
55 __entry->ret = ret;
56 ),
57
58 TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d",
59 (int)__entry->opcode, (int)__entry->sid,
60 (int)__entry->addr, __entry->ret)
61);
62
63TRACE_EVENT(spmi_read_begin,
64 TP_PROTO(u8 opcode, u8 sid, u16 addr),
65 TP_ARGS(opcode, sid, addr),
66
67 TP_STRUCT__entry(
68 __field ( u8, opcode )
69 __field ( u8, sid )
70 __field ( u16, addr )
71 ),
72
73 TP_fast_assign(
74 __entry->opcode = opcode;
75 __entry->sid = sid;
76 __entry->addr = addr;
77 ),
78
79 TP_printk("opc=%d sid=%02d addr=0x%04x",
80 (int)__entry->opcode, (int)__entry->sid,
81 (int)__entry->addr)
82);
83
84TRACE_EVENT(spmi_read_end,
85 TP_PROTO(u8 opcode, u8 sid, u16 addr, int ret, u8 len, const u8 *buf),
86 TP_ARGS(opcode, sid, addr, ret, len, buf),
87
88 TP_STRUCT__entry(
89 __field ( u8, opcode )
90 __field ( u8, sid )
91 __field ( u16, addr )
92 __field ( int, ret )
93 __field ( u8, len )
94 __dynamic_array ( u8, buf, len + 1 )
95 ),
96
97 TP_fast_assign(
98 __entry->opcode = opcode;
99 __entry->sid = sid;
100 __entry->addr = addr;
101 __entry->ret = ret;
102 __entry->len = len + 1;
103 memcpy(__get_dynamic_array(buf), buf, len + 1);
104 ),
105
106 TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d len=%02d buf=0x[%*phD]",
107 (int)__entry->opcode, (int)__entry->sid,
108 (int)__entry->addr, __entry->ret, (int)__entry->len,
109 (int)__entry->len, __get_dynamic_array(buf))
110);
111
112TRACE_EVENT(spmi_cmd,
113 TP_PROTO(u8 opcode, u8 sid, int ret),
114 TP_ARGS(opcode, sid, ret),
115
116 TP_STRUCT__entry(
117 __field ( u8, opcode )
118 __field ( u8, sid )
119 __field ( int, ret )
120 ),
121
122 TP_fast_assign(
123 __entry->opcode = opcode;
124 __entry->sid = sid;
125 __entry->ret = ret;
126 ),
127
128 TP_printk("opc=%d sid=%02d ret=%d", (int)__entry->opcode,
129 (int)__entry->sid, ret)
130);
131
132#endif /* _TRACE_SPMI_H */
133
134/* This part must be outside protection */
135#include <trace/define_trace.h>
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index fd1a02cb3c82..003dca933803 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -529,18 +529,21 @@ TRACE_EVENT(svc_xprt_do_enqueue,
529 529
530 TP_STRUCT__entry( 530 TP_STRUCT__entry(
531 __field(struct svc_xprt *, xprt) 531 __field(struct svc_xprt *, xprt)
532 __field(struct svc_rqst *, rqst) 532 __field_struct(struct sockaddr_storage, ss)
533 __field(int, pid)
534 __field(unsigned long, flags)
533 ), 535 ),
534 536
535 TP_fast_assign( 537 TP_fast_assign(
536 __entry->xprt = xprt; 538 __entry->xprt = xprt;
537 __entry->rqst = rqst; 539 xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
540 __entry->pid = rqst? rqst->rq_task->pid : 0;
541 __entry->flags = xprt ? xprt->xpt_flags : 0;
538 ), 542 ),
539 543
540 TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt, 544 TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
541 (struct sockaddr *)&__entry->xprt->xpt_remote, 545 (struct sockaddr *)&__entry->ss,
542 __entry->rqst ? __entry->rqst->rq_task->pid : 0, 546 __entry->pid, show_svc_xprt_flags(__entry->flags))
543 show_svc_xprt_flags(__entry->xprt->xpt_flags))
544); 547);
545 548
546TRACE_EVENT(svc_xprt_dequeue, 549TRACE_EVENT(svc_xprt_dequeue,
@@ -589,16 +592,20 @@ TRACE_EVENT(svc_handle_xprt,
589 TP_STRUCT__entry( 592 TP_STRUCT__entry(
590 __field(struct svc_xprt *, xprt) 593 __field(struct svc_xprt *, xprt)
591 __field(int, len) 594 __field(int, len)
595 __field_struct(struct sockaddr_storage, ss)
596 __field(unsigned long, flags)
592 ), 597 ),
593 598
594 TP_fast_assign( 599 TP_fast_assign(
595 __entry->xprt = xprt; 600 __entry->xprt = xprt;
601 xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
596 __entry->len = len; 602 __entry->len = len;
603 __entry->flags = xprt ? xprt->xpt_flags : 0;
597 ), 604 ),
598 605
599 TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt, 606 TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
600 (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len, 607 (struct sockaddr *)&__entry->ss,
601 show_svc_xprt_flags(__entry->xprt->xpt_flags)) 608 __entry->len, show_svc_xprt_flags(__entry->flags))
602); 609);
603#endif /* _TRACE_SUNRPC_H */ 610#endif /* _TRACE_SUNRPC_H */
604 611
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index dee3bb1d5a6b..2cca6cd342d8 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -46,7 +46,7 @@ TRACE_EVENT(task_rename,
46 TP_fast_assign( 46 TP_fast_assign(
47 __entry->pid = task->pid; 47 __entry->pid = task->pid;
48 memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN); 48 memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
49 memcpy(entry->newcomm, comm, TASK_COMM_LEN); 49 strlcpy(entry->newcomm, comm, TASK_COMM_LEN);
50 __entry->oom_score_adj = task->signal->oom_score_adj; 50 __entry->oom_score_adj = task->signal->oom_score_adj;
51 ), 51 ),
52 52
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 4250f364a6ca..bc8815f45f3b 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -11,7 +11,8 @@
11 EM( TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" ) \ 11 EM( TLB_FLUSH_ON_TASK_SWITCH, "flush on task switch" ) \
12 EM( TLB_REMOTE_SHOOTDOWN, "remote shootdown" ) \ 12 EM( TLB_REMOTE_SHOOTDOWN, "remote shootdown" ) \
13 EM( TLB_LOCAL_SHOOTDOWN, "local shootdown" ) \ 13 EM( TLB_LOCAL_SHOOTDOWN, "local shootdown" ) \
14 EMe( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" ) 14 EM( TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" ) \
15 EMe( TLB_REMOTE_SEND_IPI, "remote ipi send" )
15 16
16/* 17/*
17 * First define the enums in TLB_FLUSH_REASON to be exported to userspace 18 * First define the enums in TLB_FLUSH_REASON to be exported to userspace
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index 89d0497c058a..dbf017bfddd9 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -93,90 +93,183 @@ SHOW_FIELD
93 { V4L2_TC_USERBITS_USERDEFINED, "USERBITS_USERDEFINED" }, \ 93 { V4L2_TC_USERBITS_USERDEFINED, "USERBITS_USERDEFINED" }, \
94 { V4L2_TC_USERBITS_8BITCHARS, "USERBITS_8BITCHARS" }) 94 { V4L2_TC_USERBITS_8BITCHARS, "USERBITS_8BITCHARS" })
95 95
96#define V4L2_TRACE_EVENT(event_name) \ 96DECLARE_EVENT_CLASS(v4l2_event_class,
97 TRACE_EVENT(event_name, \ 97 TP_PROTO(int minor, struct v4l2_buffer *buf),
98 TP_PROTO(int minor, struct v4l2_buffer *buf), \ 98
99 \ 99 TP_ARGS(minor, buf),
100 TP_ARGS(minor, buf), \ 100
101 \ 101 TP_STRUCT__entry(
102 TP_STRUCT__entry( \ 102 __field(int, minor)
103 __field(int, minor) \ 103 __field(u32, index)
104 __field(u32, index) \ 104 __field(u32, type)
105 __field(u32, type) \ 105 __field(u32, bytesused)
106 __field(u32, bytesused) \ 106 __field(u32, flags)
107 __field(u32, flags) \ 107 __field(u32, field)
108 __field(u32, field) \ 108 __field(s64, timestamp)
109 __field(s64, timestamp) \ 109 __field(u32, timecode_type)
110 __field(u32, timecode_type) \ 110 __field(u32, timecode_flags)
111 __field(u32, timecode_flags) \ 111 __field(u8, timecode_frames)
112 __field(u8, timecode_frames) \ 112 __field(u8, timecode_seconds)
113 __field(u8, timecode_seconds) \ 113 __field(u8, timecode_minutes)
114 __field(u8, timecode_minutes) \ 114 __field(u8, timecode_hours)
115 __field(u8, timecode_hours) \ 115 __field(u8, timecode_userbits0)
116 __field(u8, timecode_userbits0) \ 116 __field(u8, timecode_userbits1)
117 __field(u8, timecode_userbits1) \ 117 __field(u8, timecode_userbits2)
118 __field(u8, timecode_userbits2) \ 118 __field(u8, timecode_userbits3)
119 __field(u8, timecode_userbits3) \ 119 __field(u32, sequence)
120 __field(u32, sequence) \ 120 ),
121 ), \ 121
122 \ 122 TP_fast_assign(
123 TP_fast_assign( \ 123 __entry->minor = minor;
124 __entry->minor = minor; \ 124 __entry->index = buf->index;
125 __entry->index = buf->index; \ 125 __entry->type = buf->type;
126 __entry->type = buf->type; \ 126 __entry->bytesused = buf->bytesused;
127 __entry->bytesused = buf->bytesused; \ 127 __entry->flags = buf->flags;
128 __entry->flags = buf->flags; \ 128 __entry->field = buf->field;
129 __entry->field = buf->field; \ 129 __entry->timestamp = timeval_to_ns(&buf->timestamp);
130 __entry->timestamp = \ 130 __entry->timecode_type = buf->timecode.type;
131 timeval_to_ns(&buf->timestamp); \ 131 __entry->timecode_flags = buf->timecode.flags;
132 __entry->timecode_type = buf->timecode.type; \ 132 __entry->timecode_frames = buf->timecode.frames;
133 __entry->timecode_flags = buf->timecode.flags; \ 133 __entry->timecode_seconds = buf->timecode.seconds;
134 __entry->timecode_frames = \ 134 __entry->timecode_minutes = buf->timecode.minutes;
135 buf->timecode.frames; \ 135 __entry->timecode_hours = buf->timecode.hours;
136 __entry->timecode_seconds = \ 136 __entry->timecode_userbits0 = buf->timecode.userbits[0];
137 buf->timecode.seconds; \ 137 __entry->timecode_userbits1 = buf->timecode.userbits[1];
138 __entry->timecode_minutes = \ 138 __entry->timecode_userbits2 = buf->timecode.userbits[2];
139 buf->timecode.minutes; \ 139 __entry->timecode_userbits3 = buf->timecode.userbits[3];
140 __entry->timecode_hours = buf->timecode.hours; \ 140 __entry->sequence = buf->sequence;
141 __entry->timecode_userbits0 = \ 141 ),
142 buf->timecode.userbits[0]; \ 142
143 __entry->timecode_userbits1 = \ 143 TP_printk("minor = %d, index = %u, type = %s, bytesused = %u, "
144 buf->timecode.userbits[1]; \ 144 "flags = %s, field = %s, timestamp = %llu, "
145 __entry->timecode_userbits2 = \ 145 "timecode = { type = %s, flags = %s, frames = %u, "
146 buf->timecode.userbits[2]; \ 146 "seconds = %u, minutes = %u, hours = %u, "
147 __entry->timecode_userbits3 = \ 147 "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor,
148 buf->timecode.userbits[3]; \ 148 __entry->index, show_type(__entry->type),
149 __entry->sequence = buf->sequence; \ 149 __entry->bytesused,
150 ), \ 150 show_flags(__entry->flags),
151 \ 151 show_field(__entry->field),
152 TP_printk("minor = %d, index = %u, type = %s, " \ 152 __entry->timestamp,
153 "bytesused = %u, flags = %s, " \ 153 show_timecode_type(__entry->timecode_type),
154 "field = %s, timestamp = %llu, timecode = { " \ 154 show_timecode_flags(__entry->timecode_flags),
155 "type = %s, flags = %s, frames = %u, " \ 155 __entry->timecode_frames,
156 "seconds = %u, minutes = %u, hours = %u, " \ 156 __entry->timecode_seconds,
157 "userbits = { %u %u %u %u } }, " \ 157 __entry->timecode_minutes,
158 "sequence = %u", __entry->minor, \ 158 __entry->timecode_hours,
159 __entry->index, show_type(__entry->type), \ 159 __entry->timecode_userbits0,
160 __entry->bytesused, \ 160 __entry->timecode_userbits1,
161 show_flags(__entry->flags), \ 161 __entry->timecode_userbits2,
162 show_field(__entry->field), \ 162 __entry->timecode_userbits3,
163 __entry->timestamp, \ 163 __entry->sequence
164 show_timecode_type(__entry->timecode_type), \ 164 )
165 show_timecode_flags(__entry->timecode_flags), \ 165)
166 __entry->timecode_frames, \ 166
167 __entry->timecode_seconds, \ 167DEFINE_EVENT(v4l2_event_class, v4l2_dqbuf,
168 __entry->timecode_minutes, \ 168 TP_PROTO(int minor, struct v4l2_buffer *buf),
169 __entry->timecode_hours, \ 169 TP_ARGS(minor, buf)
170 __entry->timecode_userbits0, \ 170);
171 __entry->timecode_userbits1, \ 171
172 __entry->timecode_userbits2, \ 172DEFINE_EVENT(v4l2_event_class, v4l2_qbuf,
173 __entry->timecode_userbits3, \ 173 TP_PROTO(int minor, struct v4l2_buffer *buf),
174 __entry->sequence \ 174 TP_ARGS(minor, buf)
175 ) \ 175);
176
177DECLARE_EVENT_CLASS(vb2_event_class,
178 TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
179 TP_ARGS(q, vb),
180
181 TP_STRUCT__entry(
182 __field(int, minor)
183 __field(u32, queued_count)
184 __field(int, owned_by_drv_count)
185 __field(u32, index)
186 __field(u32, type)
187 __field(u32, bytesused)
188 __field(u32, flags)
189 __field(u32, field)
190 __field(s64, timestamp)
191 __field(u32, timecode_type)
192 __field(u32, timecode_flags)
193 __field(u8, timecode_frames)
194 __field(u8, timecode_seconds)
195 __field(u8, timecode_minutes)
196 __field(u8, timecode_hours)
197 __field(u8, timecode_userbits0)
198 __field(u8, timecode_userbits1)
199 __field(u8, timecode_userbits2)
200 __field(u8, timecode_userbits3)
201 __field(u32, sequence)
202 ),
203
204 TP_fast_assign(
205 __entry->minor = q->owner ? q->owner->vdev->minor : -1;
206 __entry->queued_count = q->queued_count;
207 __entry->owned_by_drv_count =
208 atomic_read(&q->owned_by_drv_count);
209 __entry->index = vb->v4l2_buf.index;
210 __entry->type = vb->v4l2_buf.type;
211 __entry->bytesused = vb->v4l2_planes[0].bytesused;
212 __entry->flags = vb->v4l2_buf.flags;
213 __entry->field = vb->v4l2_buf.field;
214 __entry->timestamp = timeval_to_ns(&vb->v4l2_buf.timestamp);
215 __entry->timecode_type = vb->v4l2_buf.timecode.type;
216 __entry->timecode_flags = vb->v4l2_buf.timecode.flags;
217 __entry->timecode_frames = vb->v4l2_buf.timecode.frames;
218 __entry->timecode_seconds = vb->v4l2_buf.timecode.seconds;
219 __entry->timecode_minutes = vb->v4l2_buf.timecode.minutes;
220 __entry->timecode_hours = vb->v4l2_buf.timecode.hours;
221 __entry->timecode_userbits0 = vb->v4l2_buf.timecode.userbits[0];
222 __entry->timecode_userbits1 = vb->v4l2_buf.timecode.userbits[1];
223 __entry->timecode_userbits2 = vb->v4l2_buf.timecode.userbits[2];
224 __entry->timecode_userbits3 = vb->v4l2_buf.timecode.userbits[3];
225 __entry->sequence = vb->v4l2_buf.sequence;
226 ),
227
228 TP_printk("minor = %d, queued = %u, owned_by_drv = %d, index = %u, "
229 "type = %s, bytesused = %u, flags = %s, field = %s, "
230 "timestamp = %llu, timecode = { type = %s, flags = %s, "
231 "frames = %u, seconds = %u, minutes = %u, hours = %u, "
232 "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor,
233 __entry->queued_count,
234 __entry->owned_by_drv_count,
235 __entry->index, show_type(__entry->type),
236 __entry->bytesused,
237 show_flags(__entry->flags),
238 show_field(__entry->field),
239 __entry->timestamp,
240 show_timecode_type(__entry->timecode_type),
241 show_timecode_flags(__entry->timecode_flags),
242 __entry->timecode_frames,
243 __entry->timecode_seconds,
244 __entry->timecode_minutes,
245 __entry->timecode_hours,
246 __entry->timecode_userbits0,
247 __entry->timecode_userbits1,
248 __entry->timecode_userbits2,
249 __entry->timecode_userbits3,
250 __entry->sequence
176 ) 251 )
252)
253
254DEFINE_EVENT(vb2_event_class, vb2_buf_done,
255 TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
256 TP_ARGS(q, vb)
257);
258
259DEFINE_EVENT(vb2_event_class, vb2_buf_queue,
260 TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
261 TP_ARGS(q, vb)
262);
263
264DEFINE_EVENT(vb2_event_class, vb2_dqbuf,
265 TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
266 TP_ARGS(q, vb)
267);
177 268
178V4L2_TRACE_EVENT(v4l2_dqbuf); 269DEFINE_EVENT(vb2_event_class, vb2_qbuf,
179V4L2_TRACE_EVENT(v4l2_qbuf); 270 TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
271 TP_ARGS(q, vb)
272);
180 273
181#endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */ 274#endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */
182 275
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index a7aa607a4c55..fff846b512e6 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -131,6 +131,66 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
131 TP_ARGS(inode, flags) 131 TP_ARGS(inode, flags)
132); 132);
133 133
134#ifdef CREATE_TRACE_POINTS
135#ifdef CONFIG_CGROUP_WRITEBACK
136
137static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
138{
139 return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1;
140}
141
142static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
143{
144 struct cgroup *cgrp = wb->memcg_css->cgroup;
145 char *path;
146
147 path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
148 WARN_ON_ONCE(path != buf);
149}
150
151static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
152{
153 if (wbc->wb)
154 return __trace_wb_cgroup_size(wbc->wb);
155 else
156 return 2;
157}
158
159static inline void __trace_wbc_assign_cgroup(char *buf,
160 struct writeback_control *wbc)
161{
162 if (wbc->wb)
163 __trace_wb_assign_cgroup(buf, wbc->wb);
164 else
165 strcpy(buf, "/");
166}
167
168#else /* CONFIG_CGROUP_WRITEBACK */
169
170static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
171{
172 return 2;
173}
174
175static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
176{
177 strcpy(buf, "/");
178}
179
180static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
181{
182 return 2;
183}
184
185static inline void __trace_wbc_assign_cgroup(char *buf,
186 struct writeback_control *wbc)
187{
188 strcpy(buf, "/");
189}
190
191#endif /* CONFIG_CGROUP_WRITEBACK */
192#endif /* CREATE_TRACE_POINTS */
193
134DECLARE_EVENT_CLASS(writeback_write_inode_template, 194DECLARE_EVENT_CLASS(writeback_write_inode_template,
135 195
136 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 196 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
@@ -141,6 +201,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
141 __array(char, name, 32) 201 __array(char, name, 32)
142 __field(unsigned long, ino) 202 __field(unsigned long, ino)
143 __field(int, sync_mode) 203 __field(int, sync_mode)
204 __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
144 ), 205 ),
145 206
146 TP_fast_assign( 207 TP_fast_assign(
@@ -148,12 +209,14 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
148 dev_name(inode_to_bdi(inode)->dev), 32); 209 dev_name(inode_to_bdi(inode)->dev), 32);
149 __entry->ino = inode->i_ino; 210 __entry->ino = inode->i_ino;
150 __entry->sync_mode = wbc->sync_mode; 211 __entry->sync_mode = wbc->sync_mode;
212 __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
151 ), 213 ),
152 214
153 TP_printk("bdi %s: ino=%lu sync_mode=%d", 215 TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s",
154 __entry->name, 216 __entry->name,
155 __entry->ino, 217 __entry->ino,
156 __entry->sync_mode 218 __entry->sync_mode,
219 __get_str(cgroup)
157 ) 220 )
158); 221);
159 222
@@ -172,8 +235,8 @@ DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
172); 235);
173 236
174DECLARE_EVENT_CLASS(writeback_work_class, 237DECLARE_EVENT_CLASS(writeback_work_class,
175 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), 238 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
176 TP_ARGS(bdi, work), 239 TP_ARGS(wb, work),
177 TP_STRUCT__entry( 240 TP_STRUCT__entry(
178 __array(char, name, 32) 241 __array(char, name, 32)
179 __field(long, nr_pages) 242 __field(long, nr_pages)
@@ -183,10 +246,11 @@ DECLARE_EVENT_CLASS(writeback_work_class,
183 __field(int, range_cyclic) 246 __field(int, range_cyclic)
184 __field(int, for_background) 247 __field(int, for_background)
185 __field(int, reason) 248 __field(int, reason)
249 __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
186 ), 250 ),
187 TP_fast_assign( 251 TP_fast_assign(
188 strncpy(__entry->name, 252 strncpy(__entry->name,
189 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); 253 wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
190 __entry->nr_pages = work->nr_pages; 254 __entry->nr_pages = work->nr_pages;
191 __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 255 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
192 __entry->sync_mode = work->sync_mode; 256 __entry->sync_mode = work->sync_mode;
@@ -194,9 +258,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
194 __entry->range_cyclic = work->range_cyclic; 258 __entry->range_cyclic = work->range_cyclic;
195 __entry->for_background = work->for_background; 259 __entry->for_background = work->for_background;
196 __entry->reason = work->reason; 260 __entry->reason = work->reason;
261 __trace_wb_assign_cgroup(__get_str(cgroup), wb);
197 ), 262 ),
198 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " 263 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
199 "kupdate=%d range_cyclic=%d background=%d reason=%s", 264 "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s",
200 __entry->name, 265 __entry->name,
201 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), 266 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
202 __entry->nr_pages, 267 __entry->nr_pages,
@@ -204,13 +269,14 @@ DECLARE_EVENT_CLASS(writeback_work_class,
204 __entry->for_kupdate, 269 __entry->for_kupdate,
205 __entry->range_cyclic, 270 __entry->range_cyclic,
206 __entry->for_background, 271 __entry->for_background,
207 __print_symbolic(__entry->reason, WB_WORK_REASON) 272 __print_symbolic(__entry->reason, WB_WORK_REASON),
273 __get_str(cgroup)
208 ) 274 )
209); 275);
210#define DEFINE_WRITEBACK_WORK_EVENT(name) \ 276#define DEFINE_WRITEBACK_WORK_EVENT(name) \
211DEFINE_EVENT(writeback_work_class, name, \ 277DEFINE_EVENT(writeback_work_class, name, \
212 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \ 278 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
213 TP_ARGS(bdi, work)) 279 TP_ARGS(wb, work))
214DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); 280DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
215DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); 281DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
216DEFINE_WRITEBACK_WORK_EVENT(writeback_start); 282DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
@@ -230,26 +296,42 @@ TRACE_EVENT(writeback_pages_written,
230); 296);
231 297
232DECLARE_EVENT_CLASS(writeback_class, 298DECLARE_EVENT_CLASS(writeback_class,
233 TP_PROTO(struct backing_dev_info *bdi), 299 TP_PROTO(struct bdi_writeback *wb),
234 TP_ARGS(bdi), 300 TP_ARGS(wb),
235 TP_STRUCT__entry( 301 TP_STRUCT__entry(
236 __array(char, name, 32) 302 __array(char, name, 32)
303 __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
237 ), 304 ),
238 TP_fast_assign( 305 TP_fast_assign(
239 strncpy(__entry->name, dev_name(bdi->dev), 32); 306 strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
307 __trace_wb_assign_cgroup(__get_str(cgroup), wb);
240 ), 308 ),
241 TP_printk("bdi %s", 309 TP_printk("bdi %s: cgroup=%s",
242 __entry->name 310 __entry->name,
311 __get_str(cgroup)
243 ) 312 )
244); 313);
245#define DEFINE_WRITEBACK_EVENT(name) \ 314#define DEFINE_WRITEBACK_EVENT(name) \
246DEFINE_EVENT(writeback_class, name, \ 315DEFINE_EVENT(writeback_class, name, \
247 TP_PROTO(struct backing_dev_info *bdi), \ 316 TP_PROTO(struct bdi_writeback *wb), \
248 TP_ARGS(bdi)) 317 TP_ARGS(wb))
249 318
250DEFINE_WRITEBACK_EVENT(writeback_nowork); 319DEFINE_WRITEBACK_EVENT(writeback_nowork);
251DEFINE_WRITEBACK_EVENT(writeback_wake_background); 320DEFINE_WRITEBACK_EVENT(writeback_wake_background);
252DEFINE_WRITEBACK_EVENT(writeback_bdi_register); 321
322TRACE_EVENT(writeback_bdi_register,
323 TP_PROTO(struct backing_dev_info *bdi),
324 TP_ARGS(bdi),
325 TP_STRUCT__entry(
326 __array(char, name, 32)
327 ),
328 TP_fast_assign(
329 strncpy(__entry->name, dev_name(bdi->dev), 32);
330 ),
331 TP_printk("bdi %s",
332 __entry->name
333 )
334);
253 335
254DECLARE_EVENT_CLASS(wbc_class, 336DECLARE_EVENT_CLASS(wbc_class,
255 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), 337 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
@@ -265,6 +347,7 @@ DECLARE_EVENT_CLASS(wbc_class,
265 __field(int, range_cyclic) 347 __field(int, range_cyclic)
266 __field(long, range_start) 348 __field(long, range_start)
267 __field(long, range_end) 349 __field(long, range_end)
350 __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
268 ), 351 ),
269 352
270 TP_fast_assign( 353 TP_fast_assign(
@@ -278,11 +361,12 @@ DECLARE_EVENT_CLASS(wbc_class,
278 __entry->range_cyclic = wbc->range_cyclic; 361 __entry->range_cyclic = wbc->range_cyclic;
279 __entry->range_start = (long)wbc->range_start; 362 __entry->range_start = (long)wbc->range_start;
280 __entry->range_end = (long)wbc->range_end; 363 __entry->range_end = (long)wbc->range_end;
364 __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
281 ), 365 ),
282 366
283 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " 367 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
284 "bgrd=%d reclm=%d cyclic=%d " 368 "bgrd=%d reclm=%d cyclic=%d "
285 "start=0x%lx end=0x%lx", 369 "start=0x%lx end=0x%lx cgroup=%s",
286 __entry->name, 370 __entry->name,
287 __entry->nr_to_write, 371 __entry->nr_to_write,
288 __entry->pages_skipped, 372 __entry->pages_skipped,
@@ -292,7 +376,9 @@ DECLARE_EVENT_CLASS(wbc_class,
292 __entry->for_reclaim, 376 __entry->for_reclaim,
293 __entry->range_cyclic, 377 __entry->range_cyclic,
294 __entry->range_start, 378 __entry->range_start,
295 __entry->range_end) 379 __entry->range_end,
380 __get_str(cgroup)
381 )
296) 382)
297 383
298#define DEFINE_WBC_EVENT(name) \ 384#define DEFINE_WBC_EVENT(name) \
@@ -312,6 +398,7 @@ TRACE_EVENT(writeback_queue_io,
312 __field(long, age) 398 __field(long, age)
313 __field(int, moved) 399 __field(int, moved)
314 __field(int, reason) 400 __field(int, reason)
401 __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
315 ), 402 ),
316 TP_fast_assign( 403 TP_fast_assign(
317 unsigned long *older_than_this = work->older_than_this; 404 unsigned long *older_than_this = work->older_than_this;
@@ -321,13 +408,15 @@ TRACE_EVENT(writeback_queue_io,
321 (jiffies - *older_than_this) * 1000 / HZ : -1; 408 (jiffies - *older_than_this) * 1000 / HZ : -1;
322 __entry->moved = moved; 409 __entry->moved = moved;
323 __entry->reason = work->reason; 410 __entry->reason = work->reason;
411 __trace_wb_assign_cgroup(__get_str(cgroup), wb);
324 ), 412 ),
325 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s", 413 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s",
326 __entry->name, 414 __entry->name,
327 __entry->older, /* older_than_this in jiffies */ 415 __entry->older, /* older_than_this in jiffies */
328 __entry->age, /* older_than_this in relative milliseconds */ 416 __entry->age, /* older_than_this in relative milliseconds */
329 __entry->moved, 417 __entry->moved,
330 __print_symbolic(__entry->reason, WB_WORK_REASON) 418 __print_symbolic(__entry->reason, WB_WORK_REASON),
419 __get_str(cgroup)
331 ) 420 )
332); 421);
333 422
@@ -381,11 +470,11 @@ TRACE_EVENT(global_dirty_state,
381 470
382TRACE_EVENT(bdi_dirty_ratelimit, 471TRACE_EVENT(bdi_dirty_ratelimit,
383 472
384 TP_PROTO(struct backing_dev_info *bdi, 473 TP_PROTO(struct bdi_writeback *wb,
385 unsigned long dirty_rate, 474 unsigned long dirty_rate,
386 unsigned long task_ratelimit), 475 unsigned long task_ratelimit),
387 476
388 TP_ARGS(bdi, dirty_rate, task_ratelimit), 477 TP_ARGS(wb, dirty_rate, task_ratelimit),
389 478
390 TP_STRUCT__entry( 479 TP_STRUCT__entry(
391 __array(char, bdi, 32) 480 __array(char, bdi, 32)
@@ -395,36 +484,39 @@ TRACE_EVENT(bdi_dirty_ratelimit,
395 __field(unsigned long, dirty_ratelimit) 484 __field(unsigned long, dirty_ratelimit)
396 __field(unsigned long, task_ratelimit) 485 __field(unsigned long, task_ratelimit)
397 __field(unsigned long, balanced_dirty_ratelimit) 486 __field(unsigned long, balanced_dirty_ratelimit)
487 __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
398 ), 488 ),
399 489
400 TP_fast_assign( 490 TP_fast_assign(
401 strlcpy(__entry->bdi, dev_name(bdi->dev), 32); 491 strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
402 __entry->write_bw = KBps(bdi->wb.write_bandwidth); 492 __entry->write_bw = KBps(wb->write_bandwidth);
403 __entry->avg_write_bw = KBps(bdi->wb.avg_write_bandwidth); 493 __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
404 __entry->dirty_rate = KBps(dirty_rate); 494 __entry->dirty_rate = KBps(dirty_rate);
405 __entry->dirty_ratelimit = KBps(bdi->wb.dirty_ratelimit); 495 __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
406 __entry->task_ratelimit = KBps(task_ratelimit); 496 __entry->task_ratelimit = KBps(task_ratelimit);
407 __entry->balanced_dirty_ratelimit = 497 __entry->balanced_dirty_ratelimit =
408 KBps(bdi->wb.balanced_dirty_ratelimit); 498 KBps(wb->balanced_dirty_ratelimit);
499 __trace_wb_assign_cgroup(__get_str(cgroup), wb);
409 ), 500 ),
410 501
411 TP_printk("bdi %s: " 502 TP_printk("bdi %s: "
412 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " 503 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
413 "dirty_ratelimit=%lu task_ratelimit=%lu " 504 "dirty_ratelimit=%lu task_ratelimit=%lu "
414 "balanced_dirty_ratelimit=%lu", 505 "balanced_dirty_ratelimit=%lu cgroup=%s",
415 __entry->bdi, 506 __entry->bdi,
416 __entry->write_bw, /* write bandwidth */ 507 __entry->write_bw, /* write bandwidth */
417 __entry->avg_write_bw, /* avg write bandwidth */ 508 __entry->avg_write_bw, /* avg write bandwidth */
418 __entry->dirty_rate, /* bdi dirty rate */ 509 __entry->dirty_rate, /* bdi dirty rate */
419 __entry->dirty_ratelimit, /* base ratelimit */ 510 __entry->dirty_ratelimit, /* base ratelimit */
420 __entry->task_ratelimit, /* ratelimit with position control */ 511 __entry->task_ratelimit, /* ratelimit with position control */
421 __entry->balanced_dirty_ratelimit /* the balanced ratelimit */ 512 __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
513 __get_str(cgroup)
422 ) 514 )
423); 515);
424 516
425TRACE_EVENT(balance_dirty_pages, 517TRACE_EVENT(balance_dirty_pages,
426 518
427 TP_PROTO(struct backing_dev_info *bdi, 519 TP_PROTO(struct bdi_writeback *wb,
428 unsigned long thresh, 520 unsigned long thresh,
429 unsigned long bg_thresh, 521 unsigned long bg_thresh,
430 unsigned long dirty, 522 unsigned long dirty,
@@ -437,7 +529,7 @@ TRACE_EVENT(balance_dirty_pages,
437 long pause, 529 long pause,
438 unsigned long start_time), 530 unsigned long start_time),
439 531
440 TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, 532 TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
441 dirty_ratelimit, task_ratelimit, 533 dirty_ratelimit, task_ratelimit,
442 dirtied, period, pause, start_time), 534 dirtied, period, pause, start_time),
443 535
@@ -456,11 +548,12 @@ TRACE_EVENT(balance_dirty_pages,
456 __field( long, pause) 548 __field( long, pause)
457 __field(unsigned long, period) 549 __field(unsigned long, period)
458 __field( long, think) 550 __field( long, think)
551 __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
459 ), 552 ),
460 553
461 TP_fast_assign( 554 TP_fast_assign(
462 unsigned long freerun = (thresh + bg_thresh) / 2; 555 unsigned long freerun = (thresh + bg_thresh) / 2;
463 strlcpy(__entry->bdi, dev_name(bdi->dev), 32); 556 strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
464 557
465 __entry->limit = global_wb_domain.dirty_limit; 558 __entry->limit = global_wb_domain.dirty_limit;
466 __entry->setpoint = (global_wb_domain.dirty_limit + 559 __entry->setpoint = (global_wb_domain.dirty_limit +
@@ -478,6 +571,7 @@ TRACE_EVENT(balance_dirty_pages,
478 __entry->period = period * 1000 / HZ; 571 __entry->period = period * 1000 / HZ;
479 __entry->pause = pause * 1000 / HZ; 572 __entry->pause = pause * 1000 / HZ;
480 __entry->paused = (jiffies - start_time) * 1000 / HZ; 573 __entry->paused = (jiffies - start_time) * 1000 / HZ;
574 __trace_wb_assign_cgroup(__get_str(cgroup), wb);
481 ), 575 ),
482 576
483 577
@@ -486,7 +580,7 @@ TRACE_EVENT(balance_dirty_pages,
486 "bdi_setpoint=%lu bdi_dirty=%lu " 580 "bdi_setpoint=%lu bdi_dirty=%lu "
487 "dirty_ratelimit=%lu task_ratelimit=%lu " 581 "dirty_ratelimit=%lu task_ratelimit=%lu "
488 "dirtied=%u dirtied_pause=%u " 582 "dirtied=%u dirtied_pause=%u "
489 "paused=%lu pause=%ld period=%lu think=%ld", 583 "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s",
490 __entry->bdi, 584 __entry->bdi,
491 __entry->limit, 585 __entry->limit,
492 __entry->setpoint, 586 __entry->setpoint,
@@ -500,7 +594,8 @@ TRACE_EVENT(balance_dirty_pages,
500 __entry->paused, /* ms */ 594 __entry->paused, /* ms */
501 __entry->pause, /* ms */ 595 __entry->pause, /* ms */
502 __entry->period, /* ms */ 596 __entry->period, /* ms */
503 __entry->think /* ms */ 597 __entry->think, /* ms */
598 __get_str(cgroup)
504 ) 599 )
505); 600);
506 601
@@ -514,6 +609,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
514 __field(unsigned long, ino) 609 __field(unsigned long, ino)
515 __field(unsigned long, state) 610 __field(unsigned long, state)
516 __field(unsigned long, dirtied_when) 611 __field(unsigned long, dirtied_when)
612 __dynamic_array(char, cgroup,
613 __trace_wb_cgroup_size(inode_to_wb(inode)))
517 ), 614 ),
518 615
519 TP_fast_assign( 616 TP_fast_assign(
@@ -522,14 +619,16 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
522 __entry->ino = inode->i_ino; 619 __entry->ino = inode->i_ino;
523 __entry->state = inode->i_state; 620 __entry->state = inode->i_state;
524 __entry->dirtied_when = inode->dirtied_when; 621 __entry->dirtied_when = inode->dirtied_when;
622 __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode));
525 ), 623 ),
526 624
527 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu", 625 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s",
528 __entry->name, 626 __entry->name,
529 __entry->ino, 627 __entry->ino,
530 show_inode_state(__entry->state), 628 show_inode_state(__entry->state),
531 __entry->dirtied_when, 629 __entry->dirtied_when,
532 (jiffies - __entry->dirtied_when) / HZ 630 (jiffies - __entry->dirtied_when) / HZ,
631 __get_str(cgroup)
533 ) 632 )
534); 633);
535 634
@@ -585,6 +684,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
585 __field(unsigned long, writeback_index) 684 __field(unsigned long, writeback_index)
586 __field(long, nr_to_write) 685 __field(long, nr_to_write)
587 __field(unsigned long, wrote) 686 __field(unsigned long, wrote)
687 __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
588 ), 688 ),
589 689
590 TP_fast_assign( 690 TP_fast_assign(
@@ -596,10 +696,11 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
596 __entry->writeback_index = inode->i_mapping->writeback_index; 696 __entry->writeback_index = inode->i_mapping->writeback_index;
597 __entry->nr_to_write = nr_to_write; 697 __entry->nr_to_write = nr_to_write;
598 __entry->wrote = nr_to_write - wbc->nr_to_write; 698 __entry->wrote = nr_to_write - wbc->nr_to_write;
699 __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
599 ), 700 ),
600 701
601 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " 702 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
602 "index=%lu to_write=%ld wrote=%lu", 703 "index=%lu to_write=%ld wrote=%lu cgroup=%s",
603 __entry->name, 704 __entry->name,
604 __entry->ino, 705 __entry->ino,
605 show_inode_state(__entry->state), 706 show_inode_state(__entry->state),
@@ -607,7 +708,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
607 (jiffies - __entry->dirtied_when) / HZ, 708 (jiffies - __entry->dirtied_when) / HZ,
608 __entry->writeback_index, 709 __entry->writeback_index,
609 __entry->nr_to_write, 710 __entry->nr_to_write,
610 __entry->wrote 711 __entry->wrote,
712 __get_str(cgroup)
611 ) 713 )
612); 714);
613 715
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2f295cde657e..8c5e8b91a3cb 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -34,6 +34,13 @@
34/* color index */ 34/* color index */
35#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ 35#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
36 36
37/* 8 bpp Red */
38#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
39
40/* 16 bpp RG */
41#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
42#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
43
37/* 8 bpp RGB */ 44/* 8 bpp RGB */
38#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ 45#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
39#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ 46#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index db809b722985..fd5aa47bd689 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -354,9 +354,15 @@ typedef struct drm_i915_irq_wait {
354#define I915_PARAM_REVISION 32 354#define I915_PARAM_REVISION 32
355#define I915_PARAM_SUBSLICE_TOTAL 33 355#define I915_PARAM_SUBSLICE_TOTAL 33
356#define I915_PARAM_EU_TOTAL 34 356#define I915_PARAM_EU_TOTAL 34
357#define I915_PARAM_HAS_GPU_RESET 35
358#define I915_PARAM_HAS_RESOURCE_STREAMER 36
357 359
358typedef struct drm_i915_getparam { 360typedef struct drm_i915_getparam {
359 int param; 361 __s32 param;
362 /*
363 * WARNING: Using pointers instead of fixed-size u64 means we need to write
364 * compat32 code. Don't repeat this mistake.
365 */
360 int __user *value; 366 int __user *value;
361} drm_i915_getparam_t; 367} drm_i915_getparam_t;
362 368
@@ -764,7 +770,12 @@ struct drm_i915_gem_execbuffer2 {
764#define I915_EXEC_BSD_RING1 (1<<13) 770#define I915_EXEC_BSD_RING1 (1<<13)
765#define I915_EXEC_BSD_RING2 (2<<13) 771#define I915_EXEC_BSD_RING2 (2<<13)
766 772
767#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15) 773/** Tell the kernel that the batchbuffer is processed by
774 * the resource streamer.
775 */
776#define I915_EXEC_RESOURCE_STREAMER (1<<15)
777
778#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
768 779
769#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 780#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
770#define i915_execbuffer2_set_context_id(eb2, context) \ 781#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1114,6 +1125,7 @@ struct drm_i915_gem_context_param {
1114 __u32 size; 1125 __u32 size;
1115 __u64 param; 1126 __u64 param;
1116#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1127#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1128#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1117 __u64 value; 1129 __u64 value;
1118}; 1130};
1119 1131
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index c472bedbe38e..05b204954d16 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -64,6 +64,7 @@
64#define DRM_VMW_GB_SURFACE_CREATE 23 64#define DRM_VMW_GB_SURFACE_CREATE 23
65#define DRM_VMW_GB_SURFACE_REF 24 65#define DRM_VMW_GB_SURFACE_REF 24
66#define DRM_VMW_SYNCCPU 25 66#define DRM_VMW_SYNCCPU 25
67#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
67 68
68/*************************************************************************/ 69/*************************************************************************/
69/** 70/**
@@ -88,6 +89,8 @@
88#define DRM_VMW_PARAM_3D_CAPS_SIZE 8 89#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
89#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 90#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
90#define DRM_VMW_PARAM_MAX_MOB_SIZE 10 91#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
92#define DRM_VMW_PARAM_SCREEN_TARGET 11
93#define DRM_VMW_PARAM_DX 12
91 94
92/** 95/**
93 * enum drm_vmw_handle_type - handle type for ref ioctls 96 * enum drm_vmw_handle_type - handle type for ref ioctls
@@ -296,7 +299,7 @@ union drm_vmw_surface_reference_arg {
296 * Argument to the DRM_VMW_EXECBUF Ioctl. 299 * Argument to the DRM_VMW_EXECBUF Ioctl.
297 */ 300 */
298 301
299#define DRM_VMW_EXECBUF_VERSION 1 302#define DRM_VMW_EXECBUF_VERSION 2
300 303
301struct drm_vmw_execbuf_arg { 304struct drm_vmw_execbuf_arg {
302 uint64_t commands; 305 uint64_t commands;
@@ -305,6 +308,8 @@ struct drm_vmw_execbuf_arg {
305 uint64_t fence_rep; 308 uint64_t fence_rep;
306 uint32_t version; 309 uint32_t version;
307 uint32_t flags; 310 uint32_t flags;
311 uint32_t context_handle;
312 uint32_t pad64;
308}; 313};
309 314
310/** 315/**
@@ -825,7 +830,6 @@ struct drm_vmw_update_layout_arg {
825enum drm_vmw_shader_type { 830enum drm_vmw_shader_type {
826 drm_vmw_shader_type_vs = 0, 831 drm_vmw_shader_type_vs = 0,
827 drm_vmw_shader_type_ps, 832 drm_vmw_shader_type_ps,
828 drm_vmw_shader_type_gs
829}; 833};
830 834
831 835
@@ -907,6 +911,8 @@ enum drm_vmw_surface_flags {
907 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 911 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
908 * if none. 912 * if none.
909 * @base_size Size of the base mip level for all faces. 913 * @base_size Size of the base mip level for all faces.
914 * @array_size Must be zero for non-DX hardware, and if non-zero
915 * svga3d_flags must have proper bind flags setup.
910 * 916 *
911 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 917 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
912 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 918 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
@@ -919,7 +925,7 @@ struct drm_vmw_gb_surface_create_req {
919 uint32_t multisample_count; 925 uint32_t multisample_count;
920 uint32_t autogen_filter; 926 uint32_t autogen_filter;
921 uint32_t buffer_handle; 927 uint32_t buffer_handle;
922 uint32_t pad64; 928 uint32_t array_size;
923 struct drm_vmw_size base_size; 929 struct drm_vmw_size base_size;
924}; 930};
925 931
@@ -1059,4 +1065,28 @@ struct drm_vmw_synccpu_arg {
1059 uint32_t pad64; 1065 uint32_t pad64;
1060}; 1066};
1061 1067
1068/*************************************************************************/
1069/**
1070 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1071 *
1072 * Allocates a device unique context id, and queues a create context command
1073 * for the host. Does not wait for host completion.
1074 */
1075enum drm_vmw_extended_context {
1076 drm_vmw_context_legacy,
1077 drm_vmw_context_dx
1078};
1079
1080/**
1081 * union drm_vmw_extended_context_arg
1082 *
1083 * @req: Context type.
1084 * @rep: Context identifier.
1085 *
1086 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1087 */
1088union drm_vmw_extended_context_arg {
1089 enum drm_vmw_extended_context req;
1090 struct drm_vmw_context_arg rep;
1091};
1062#endif 1092#endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 1ff9942718fe..70ff1d9abf0d 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -243,6 +243,7 @@ header-y += limits.h
243header-y += llc.h 243header-y += llc.h
244header-y += loop.h 244header-y += loop.h
245header-y += lp.h 245header-y += lp.h
246header-y += lwtunnel.h
246header-y += magic.h 247header-y += magic.h
247header-y += major.h 248header-y += major.h
248header-y += map_to_7segment.h 249header-y += map_to_7segment.h
@@ -455,3 +456,4 @@ header-y += xfrm.h
455header-y += xilinx-v4l2-controls.h 456header-y += xilinx-v4l2-controls.h
456header-y += zorro.h 457header-y += zorro.h
457header-y += zorro_ids.h 458header-y += zorro_ids.h
459header-y += userfaultfd.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index d3475e1f15ec..843540c398eb 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -266,6 +266,7 @@
266#define AUDIT_OBJ_UID 109 266#define AUDIT_OBJ_UID 109
267#define AUDIT_OBJ_GID 110 267#define AUDIT_OBJ_GID 110
268#define AUDIT_FIELD_COMPARE 111 268#define AUDIT_FIELD_COMPARE 111
269#define AUDIT_EXE 112
269 270
270#define AUDIT_ARG0 200 271#define AUDIT_ARG0 200
271#define AUDIT_ARG1 (AUDIT_ARG0+1) 272#define AUDIT_ARG1 (AUDIT_ARG0+1)
@@ -324,8 +325,10 @@ enum {
324 325
325#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001 326#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001
326#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002 327#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002
328#define AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH 0x00000004
327#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \ 329#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
328 AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME) 330 AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME | \
331 AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH)
329 332
330/* deprecated: AUDIT_VERSION_* */ 333/* deprecated: AUDIT_VERSION_* */
331#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL 334#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL
@@ -382,6 +385,9 @@ enum {
382#define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 385#define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
383#define AUDIT_ARCH_SPARC (EM_SPARC) 386#define AUDIT_ARCH_SPARC (EM_SPARC)
384#define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT) 387#define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT)
388#define AUDIT_ARCH_TILEGX (EM_TILEGX|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
389#define AUDIT_ARCH_TILEGX32 (EM_TILEGX|__AUDIT_ARCH_LE)
390#define AUDIT_ARCH_TILEPRO (EM_TILEPRO|__AUDIT_ARCH_LE)
385#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 391#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
386 392
387#define AUDIT_PERM_EXEC 1 393#define AUDIT_PERM_EXEC 1
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 29ef6f99e43d..92a48e2d5461 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -114,6 +114,7 @@ enum bpf_map_type {
114 BPF_MAP_TYPE_HASH, 114 BPF_MAP_TYPE_HASH,
115 BPF_MAP_TYPE_ARRAY, 115 BPF_MAP_TYPE_ARRAY,
116 BPF_MAP_TYPE_PROG_ARRAY, 116 BPF_MAP_TYPE_PROG_ARRAY,
117 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
117}; 118};
118 119
119enum bpf_prog_type { 120enum bpf_prog_type {
@@ -249,6 +250,28 @@ enum bpf_func_id {
249 * Return: 0 on success 250 * Return: 0 on success
250 */ 251 */
251 BPF_FUNC_get_current_comm, 252 BPF_FUNC_get_current_comm,
253
254 /**
255 * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
256 * @skb: pointer to skb
257 * Return: classid if != 0
258 */
259 BPF_FUNC_get_cgroup_classid,
260 BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
261 BPF_FUNC_skb_vlan_pop, /* bpf_skb_vlan_pop(skb) */
262
263 /**
264 * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
265 * retrieve or populate tunnel metadata
266 * @skb: pointer to skb
267 * @key: pointer to 'struct bpf_tunnel_key'
268 * @size: size of 'struct bpf_tunnel_key'
269 * @flags: room for future extensions
270 * Retrun: 0 on success
271 */
272 BPF_FUNC_skb_get_tunnel_key,
273 BPF_FUNC_skb_set_tunnel_key,
274 BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */
252 __BPF_FUNC_MAX_ID, 275 __BPF_FUNC_MAX_ID,
253}; 276};
254 277
@@ -269,6 +292,12 @@ struct __sk_buff {
269 __u32 ifindex; 292 __u32 ifindex;
270 __u32 tc_index; 293 __u32 tc_index;
271 __u32 cb[5]; 294 __u32 cb[5];
295 __u32 hash;
296};
297
298struct bpf_tunnel_key {
299 __u32 tunnel_id;
300 __u32 remote_ipv4;
272}; 301};
273 302
274#endif /* _UAPI__LINUX_BPF_H__ */ 303#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/dlm_device.h b/include/uapi/linux/dlm_device.h
index 3060783c4191..df56c8ff0769 100644
--- a/include/uapi/linux/dlm_device.h
+++ b/include/uapi/linux/dlm_device.h
@@ -26,7 +26,7 @@
26/* Version of the device interface */ 26/* Version of the device interface */
27#define DLM_DEVICE_VERSION_MAJOR 6 27#define DLM_DEVICE_VERSION_MAJOR 6
28#define DLM_DEVICE_VERSION_MINOR 0 28#define DLM_DEVICE_VERSION_MINOR 0
29#define DLM_DEVICE_VERSION_PATCH 1 29#define DLM_DEVICE_VERSION_PATCH 2
30 30
31/* struct passed to the lock write */ 31/* struct passed to the lock write */
32struct dlm_lock_params { 32struct dlm_lock_params {
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 061aca3a962d..d34611e35a30 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
267#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 267#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
268 268
269#define DM_VERSION_MAJOR 4 269#define DM_VERSION_MAJOR 4
270#define DM_VERSION_MINOR 32 270#define DM_VERSION_MINOR 33
271#define DM_VERSION_PATCHLEVEL 0 271#define DM_VERSION_PATCHLEVEL 0
272#define DM_VERSION_EXTRA "-ioctl (2015-6-26)" 272#define DM_VERSION_EXTRA "-ioctl (2015-8-18)"
273 273
274/* Status bits */ 274/* Status bits */
275#define DM_READONLY_FLAG (1 << 0) /* In/Out */ 275#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index b08829667ed7..b56dfcfe922a 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -38,6 +38,9 @@
38#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ 38#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
39#define EM_TI_C6000 140 /* TI C6X DSPs */ 39#define EM_TI_C6000 140 /* TI C6X DSPs */
40#define EM_AARCH64 183 /* ARM 64 bit */ 40#define EM_AARCH64 183 /* ARM 64 bit */
41#define EM_TILEPRO 188 /* Tilera TILEPro */
42#define EM_MICROBLAZE 189 /* Xilinx MicroBlaze */
43#define EM_TILEGX 191 /* Tilera TILE-Gx */
41#define EM_FRV 0x5441 /* Fujitsu FR-V */ 44#define EM_FRV 0x5441 /* Fujitsu FR-V */
42#define EM_AVR32 0x18ad /* Atmel AVR32 */ 45#define EM_AVR32 0x18ad /* Atmel AVR32 */
43 46
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index cd67aec187d9..cd1629170103 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1093,6 +1093,11 @@ struct ethtool_sfeatures {
1093 * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values, 1093 * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
1094 * respectively. For example, if the device supports HWTSTAMP_TX_ON, 1094 * respectively. For example, if the device supports HWTSTAMP_TX_ON,
1095 * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set. 1095 * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
1096 *
1097 * Drivers should only report the filters they actually support without
1098 * upscaling in the SIOCSHWTSTAMP ioctl. If the SIOCSHWSTAMP request for
1099 * HWTSTAMP_FILTER_V1_SYNC is supported by HWTSTAMP_FILTER_V1_EVENT, then the
1100 * driver should only report HWTSTAMP_FILTER_V1_EVENT in this op.
1096 */ 1101 */
1097struct ethtool_ts_info { 1102struct ethtool_ts_info {
1098 __u32 cmd; 1103 __u32 cmd;
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 2b82d7e30974..96161b8202b5 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -43,7 +43,7 @@ enum {
43 FRA_UNUSED5, 43 FRA_UNUSED5,
44 FRA_FWMARK, /* mark */ 44 FRA_FWMARK, /* mark */
45 FRA_FLOW, /* flow/class id */ 45 FRA_FLOW, /* flow/class id */
46 FRA_UNUSED6, 46 FRA_TUN_ID,
47 FRA_SUPPRESS_IFGROUP, 47 FRA_SUPPRESS_IFGROUP,
48 FRA_SUPPRESS_PREFIXLEN, 48 FRA_SUPPRESS_PREFIXLEN,
49 FRA_TABLE, /* Extended table id */ 49 FRA_TABLE, /* Extended table id */
diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index c06742d52856..ab055d8cddef 100644
--- a/include/uapi/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/if.h> 4#include <linux/if.h>
5#include <linux/ioctl.h> 5#include <linux/ioctl.h>
6#include <linux/types.h>
6 7
7struct gsm_config 8struct gsm_config
8{ 9{
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index eaaea6208b42..3635b7797508 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -182,6 +182,7 @@ struct br_mdb_entry {
182#define MDB_TEMPORARY 0 182#define MDB_TEMPORARY 0
183#define MDB_PERMANENT 1 183#define MDB_PERMANENT 1
184 __u8 state; 184 __u8 state;
185 __u16 vid;
185 struct { 186 struct {
186 union { 187 union {
187 __be32 ip4; 188 __be32 ip4;
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index aa63ed023c2b..ea9221b0331a 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -42,6 +42,7 @@
42#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */ 42#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
43#define ETH_P_PUP 0x0200 /* Xerox PUP packet */ 43#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
44#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */ 44#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
45#define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */
45#define ETH_P_IP 0x0800 /* Internet Protocol packet */ 46#define ETH_P_IP 0x0800 /* Internet Protocol packet */
46#define ETH_P_X25 0x0805 /* CCITT X.25 */ 47#define ETH_P_X25 0x0805 /* CCITT X.25 */
47#define ETH_P_ARP 0x0806 /* Address Resolution packet */ 48#define ETH_P_ARP 0x0806 /* Address Resolution packet */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 2c7e8e3d3981..3a5f263cfc2f 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -148,6 +148,7 @@ enum {
148 IFLA_PHYS_SWITCH_ID, 148 IFLA_PHYS_SWITCH_ID,
149 IFLA_LINK_NETNSID, 149 IFLA_LINK_NETNSID,
150 IFLA_PHYS_PORT_NAME, 150 IFLA_PHYS_PORT_NAME,
151 IFLA_PROTO_DOWN,
151 __IFLA_MAX 152 __IFLA_MAX
152}; 153};
153 154
@@ -229,6 +230,8 @@ enum {
229 IFLA_BR_AGEING_TIME, 230 IFLA_BR_AGEING_TIME,
230 IFLA_BR_STP_STATE, 231 IFLA_BR_STP_STATE,
231 IFLA_BR_PRIORITY, 232 IFLA_BR_PRIORITY,
233 IFLA_BR_VLAN_FILTERING,
234 IFLA_BR_VLAN_PROTOCOL,
232 __IFLA_BR_MAX, 235 __IFLA_BR_MAX,
233}; 236};
234 237
@@ -339,6 +342,15 @@ enum macvlan_macaddr_mode {
339 342
340#define MACVLAN_FLAG_NOPROMISC 1 343#define MACVLAN_FLAG_NOPROMISC 1
341 344
345/* VRF section */
346enum {
347 IFLA_VRF_UNSPEC,
348 IFLA_VRF_TABLE,
349 __IFLA_VRF_MAX
350};
351
352#define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1)
353
342/* IPVLAN section */ 354/* IPVLAN section */
343enum { 355enum {
344 IFLA_IPVLAN_UNSPEC, 356 IFLA_IPVLAN_UNSPEC,
@@ -381,6 +393,7 @@ enum {
381 IFLA_VXLAN_REMCSUM_RX, 393 IFLA_VXLAN_REMCSUM_RX,
382 IFLA_VXLAN_GBP, 394 IFLA_VXLAN_GBP,
383 IFLA_VXLAN_REMCSUM_NOPARTIAL, 395 IFLA_VXLAN_REMCSUM_NOPARTIAL,
396 IFLA_VXLAN_COLLECT_METADATA,
384 __IFLA_VXLAN_MAX 397 __IFLA_VXLAN_MAX
385}; 398};
386#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) 399#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -397,6 +410,8 @@ enum {
397 IFLA_GENEVE_REMOTE, 410 IFLA_GENEVE_REMOTE,
398 IFLA_GENEVE_TTL, 411 IFLA_GENEVE_TTL,
399 IFLA_GENEVE_TOS, 412 IFLA_GENEVE_TOS,
413 IFLA_GENEVE_PORT, /* destination port */
414 IFLA_GENEVE_COLLECT_METADATA,
400 __IFLA_GENEVE_MAX 415 __IFLA_GENEVE_MAX
401}; 416};
402#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) 417#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
@@ -431,6 +446,7 @@ enum {
431 IFLA_BOND_AD_ACTOR_SYS_PRIO, 446 IFLA_BOND_AD_ACTOR_SYS_PRIO,
432 IFLA_BOND_AD_USER_PORT_KEY, 447 IFLA_BOND_AD_USER_PORT_KEY,
433 IFLA_BOND_AD_ACTOR_SYSTEM, 448 IFLA_BOND_AD_ACTOR_SYSTEM,
449 IFLA_BOND_TLB_DYNAMIC_LB,
434 __IFLA_BOND_MAX, 450 __IFLA_BOND_MAX,
435}; 451};
436 452
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index d3d715f8c88f..9e7edfd8141e 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -55,6 +55,7 @@ struct sockaddr_ll {
55#define PACKET_TX_HAS_OFF 19 55#define PACKET_TX_HAS_OFF 19
56#define PACKET_QDISC_BYPASS 20 56#define PACKET_QDISC_BYPASS 20
57#define PACKET_ROLLOVER_STATS 21 57#define PACKET_ROLLOVER_STATS 21
58#define PACKET_FANOUT_DATA 22
58 59
59#define PACKET_FANOUT_HASH 0 60#define PACKET_FANOUT_HASH 0
60#define PACKET_FANOUT_LB 1 61#define PACKET_FANOUT_LB 1
@@ -62,6 +63,8 @@ struct sockaddr_ll {
62#define PACKET_FANOUT_ROLLOVER 3 63#define PACKET_FANOUT_ROLLOVER 3
63#define PACKET_FANOUT_RND 4 64#define PACKET_FANOUT_RND 4
64#define PACKET_FANOUT_QM 5 65#define PACKET_FANOUT_QM 5
66#define PACKET_FANOUT_CBPF 6
67#define PACKET_FANOUT_EBPF 7
65#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000 68#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
66#define PACKET_FANOUT_FLAG_DEFRAG 0x8000 69#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
67 70
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index bd3cc11a431f..af4de90ba27d 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -112,6 +112,7 @@ enum {
112 IFLA_GRE_ENCAP_FLAGS, 112 IFLA_GRE_ENCAP_FLAGS,
113 IFLA_GRE_ENCAP_SPORT, 113 IFLA_GRE_ENCAP_SPORT,
114 IFLA_GRE_ENCAP_DPORT, 114 IFLA_GRE_ENCAP_DPORT,
115 IFLA_GRE_COLLECT_METADATA,
115 __IFLA_GRE_MAX, 116 __IFLA_GRE_MAX,
116}; 117};
117 118
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
new file mode 100644
index 000000000000..7ed9e670814e
--- /dev/null
+++ b/include/uapi/linux/ila.h
@@ -0,0 +1,15 @@
1/* ila.h - ILA Interface */
2
3#ifndef _UAPI_LINUX_ILA_H
4#define _UAPI_LINUX_ILA_H
5
6enum {
7 ILA_ATTR_UNSPEC,
8 ILA_ATTR_LOCATOR, /* u64 */
9
10 __ILA_ATTR_MAX,
11};
12
13#define ILA_ATTR_MAX (__ILA_ATTR_MAX - 1)
14
15#endif /* _UAPI_LINUX_ILA_H */
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 3199243f2028..391395c06c7e 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -406,6 +406,11 @@ enum {
406 IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */ 406 IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */
407 IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */ 407 IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */
408 IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */ 408 IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */
409 IPVS_DAEMON_ATTR_SYNC_MAXLEN, /* UDP Payload Size */
410 IPVS_DAEMON_ATTR_MCAST_GROUP, /* IPv4 Multicast Address */
411 IPVS_DAEMON_ATTR_MCAST_GROUP6, /* IPv6 Multicast Address */
412 IPVS_DAEMON_ATTR_MCAST_PORT, /* Multicast Port (base) */
413 IPVS_DAEMON_ATTR_MCAST_TTL, /* Multicast TTL */
409 __IPVS_DAEMON_ATTR_MAX, 414 __IPVS_DAEMON_ATTR_MAX,
410}; 415};
411 416
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 5efa54ae567c..38b4fef20219 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -171,6 +171,9 @@ enum {
171 DEVCONF_USE_OPTIMISTIC, 171 DEVCONF_USE_OPTIMISTIC,
172 DEVCONF_ACCEPT_RA_MTU, 172 DEVCONF_ACCEPT_RA_MTU,
173 DEVCONF_STABLE_SECRET, 173 DEVCONF_STABLE_SECRET,
174 DEVCONF_USE_OIF_ADDRS_ONLY,
175 DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
176 DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
174 DEVCONF_MAX 177 DEVCONF_MAX
175}; 178};
176 179
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index a6c4962e5d46..5da5f8751ce7 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -33,6 +33,7 @@
33#define KPF_THP 22 33#define KPF_THP 22
34#define KPF_BALLOON 23 34#define KPF_BALLOON 23
35#define KPF_ZERO_PAGE 24 35#define KPF_ZERO_PAGE 24
36#define KPF_IDLE 25
36 37
37 38
38#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */ 39#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 716ad4ae4d4b..a9256f0331ae 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -237,6 +237,7 @@ struct kvm_run {
237 __u32 count; 237 __u32 count;
238 __u64 data_offset; /* relative to kvm_run start */ 238 __u64 data_offset; /* relative to kvm_run start */
239 } io; 239 } io;
240 /* KVM_EXIT_DEBUG */
240 struct { 241 struct {
241 struct kvm_debug_exit_arch arch; 242 struct kvm_debug_exit_arch arch;
242 } debug; 243 } debug;
@@ -285,6 +286,7 @@ struct kvm_run {
285 __u32 data; 286 __u32 data;
286 __u8 is_write; 287 __u8 is_write;
287 } dcr; 288 } dcr;
289 /* KVM_EXIT_INTERNAL_ERROR */
288 struct { 290 struct {
289 __u32 suberror; 291 __u32 suberror;
290 /* Available with KVM_CAP_INTERNAL_ERROR_DATA: */ 292 /* Available with KVM_CAP_INTERNAL_ERROR_DATA: */
@@ -295,6 +297,7 @@ struct kvm_run {
295 struct { 297 struct {
296 __u64 gprs[32]; 298 __u64 gprs[32];
297 } osi; 299 } osi;
300 /* KVM_EXIT_PAPR_HCALL */
298 struct { 301 struct {
299 __u64 nr; 302 __u64 nr;
300 __u64 ret; 303 __u64 ret;
@@ -317,6 +320,7 @@ struct kvm_run {
317 struct { 320 struct {
318#define KVM_SYSTEM_EVENT_SHUTDOWN 1 321#define KVM_SYSTEM_EVENT_SHUTDOWN 1
319#define KVM_SYSTEM_EVENT_RESET 2 322#define KVM_SYSTEM_EVENT_RESET 2
323#define KVM_SYSTEM_EVENT_CRASH 3
320 __u32 type; 324 __u32 type;
321 __u64 flags; 325 __u64 flags;
322 } system_event; 326 } system_event;
@@ -481,6 +485,7 @@ struct kvm_s390_psw {
481 ((ai) << 26)) 485 ((ai) << 26))
482#define KVM_S390_INT_IO_MIN 0x00000000u 486#define KVM_S390_INT_IO_MIN 0x00000000u
483#define KVM_S390_INT_IO_MAX 0xfffdffffu 487#define KVM_S390_INT_IO_MAX 0xfffdffffu
488#define KVM_S390_INT_IO_AI_MASK 0x04000000u
484 489
485 490
486struct kvm_s390_interrupt { 491struct kvm_s390_interrupt {
@@ -817,6 +822,8 @@ struct kvm_ppc_smmu_info {
817#define KVM_CAP_DISABLE_QUIRKS 116 822#define KVM_CAP_DISABLE_QUIRKS 116
818#define KVM_CAP_X86_SMM 117 823#define KVM_CAP_X86_SMM 117
819#define KVM_CAP_MULTI_ADDRESS_SPACE 118 824#define KVM_CAP_MULTI_ADDRESS_SPACE 118
825#define KVM_CAP_GUEST_DEBUG_HW_BPS 119
826#define KVM_CAP_GUEST_DEBUG_HW_WPS 120
820 827
821#ifdef KVM_CAP_IRQ_ROUTING 828#ifdef KVM_CAP_IRQ_ROUTING
822 829
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
new file mode 100644
index 000000000000..34141a5dfe74
--- /dev/null
+++ b/include/uapi/linux/lwtunnel.h
@@ -0,0 +1,47 @@
1#ifndef _UAPI_LWTUNNEL_H_
2#define _UAPI_LWTUNNEL_H_
3
4#include <linux/types.h>
5
6enum lwtunnel_encap_types {
7 LWTUNNEL_ENCAP_NONE,
8 LWTUNNEL_ENCAP_MPLS,
9 LWTUNNEL_ENCAP_IP,
10 LWTUNNEL_ENCAP_ILA,
11 LWTUNNEL_ENCAP_IP6,
12 __LWTUNNEL_ENCAP_MAX,
13};
14
15#define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
16
17enum lwtunnel_ip_t {
18 LWTUNNEL_IP_UNSPEC,
19 LWTUNNEL_IP_ID,
20 LWTUNNEL_IP_DST,
21 LWTUNNEL_IP_SRC,
22 LWTUNNEL_IP_TTL,
23 LWTUNNEL_IP_TOS,
24 LWTUNNEL_IP_SPORT,
25 LWTUNNEL_IP_DPORT,
26 LWTUNNEL_IP_FLAGS,
27 __LWTUNNEL_IP_MAX,
28};
29
30#define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1)
31
32enum lwtunnel_ip6_t {
33 LWTUNNEL_IP6_UNSPEC,
34 LWTUNNEL_IP6_ID,
35 LWTUNNEL_IP6_DST,
36 LWTUNNEL_IP6_SRC,
37 LWTUNNEL_IP6_HOPLIMIT,
38 LWTUNNEL_IP6_TC,
39 LWTUNNEL_IP6_SPORT,
40 LWTUNNEL_IP6_DPORT,
41 LWTUNNEL_IP6_FLAGS,
42 __LWTUNNEL_IP6_MAX,
43};
44
45#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
46
47#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h
index bc0d8b69c49e..7c3b64f6a215 100644
--- a/include/uapi/linux/mei.h
+++ b/include/uapi/linux/mei.h
@@ -107,4 +107,23 @@ struct mei_connect_client_data {
107 }; 107 };
108}; 108};
109 109
110/**
111 * DOC: set and unset event notification for a connected client
112 *
113 * The IOCTL argument is 1 for enabling event notification and 0 for
114 * disabling the service
115 * Return: -EOPNOTSUPP if the devices doesn't support the feature
116 */
117#define IOCTL_MEI_NOTIFY_SET _IOW('H', 0x02, __u32)
118
119/**
120 * DOC: retrieve notification
121 *
122 * The IOCTL output argument is 1 if an event was is pending and 0 otherwise
123 * the ioctl has to be called in order to acknowledge pending event
124 *
125 * Return: -EOPNOTSUPP if the devices doesn't support the feature
126 */
127#define IOCTL_MEI_NOTIFY_GET _IOR('H', 0x03, __u32)
128
110#endif /* _LINUX_MEI_H */ 129#endif /* _LINUX_MEI_H */
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index 139d4dd1cab8..24a6cb1aec86 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -41,4 +41,6 @@ struct mpls_label {
41#define MPLS_LABEL_OAMALERT 14 /* RFC3429 */ 41#define MPLS_LABEL_OAMALERT 14 /* RFC3429 */
42#define MPLS_LABEL_EXTENSION 15 /* RFC7274 */ 42#define MPLS_LABEL_EXTENSION 15 /* RFC7274 */
43 43
44#define MPLS_LABEL_FIRST_UNRESERVED 16 /* RFC3032 */
45
44#endif /* _UAPI_MPLS_H */ 46#endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
new file mode 100644
index 000000000000..d80a0498f77e
--- /dev/null
+++ b/include/uapi/linux/mpls_iptunnel.h
@@ -0,0 +1,28 @@
1/*
2 * mpls tunnel api
3 *
4 * Authors:
5 * Roopa Prabhu <roopa@cumulusnetworks.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef _UAPI_LINUX_MPLS_IPTUNNEL_H
14#define _UAPI_LINUX_MPLS_IPTUNNEL_H
15
16/* MPLS tunnel attributes
17 * [RTA_ENCAP] = {
18 * [MPLS_IPTUNNEL_DST]
19 * }
20 */
21enum {
22 MPLS_IPTUNNEL_UNSPEC,
23 MPLS_IPTUNNEL_DST,
24 __MPLS_IPTUNNEL_MAX,
25};
26#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
27
28#endif /* _UAPI_LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 2b94ea2287bb..5b4a4be06e2b 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -87,7 +87,7 @@ struct nd_cmd_ars_status {
87 __u32 handle; 87 __u32 handle;
88 __u32 flags; 88 __u32 flags;
89 __u64 err_address; 89 __u64 err_address;
90 __u64 mask; 90 __u64 length;
91 } __packed records[0]; 91 } __packed records[0];
92} __packed; 92} __packed;
93 93
@@ -111,6 +111,11 @@ enum {
111 ND_CMD_VENDOR = 9, 111 ND_CMD_VENDOR = 9,
112}; 112};
113 113
114enum {
115 ND_ARS_VOLATILE = 1,
116 ND_ARS_PERSISTENT = 2,
117};
118
114static inline const char *nvdimm_bus_cmd_name(unsigned cmd) 119static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
115{ 120{
116 static const char * const names[] = { 121 static const char * const names[] = {
@@ -194,4 +199,9 @@ enum nd_driver_flags {
194enum { 199enum {
195 ND_MIN_NAMESPACE_SIZE = 0x00400000, 200 ND_MIN_NAMESPACE_SIZE = 0x00400000,
196}; 201};
202
203enum ars_masks {
204 ARS_STATUS_MASK = 0x0000FFFF,
205 ARS_EXT_STATUS_SHIFT = 16,
206};
197#endif /* __NDCTL_H__ */ 207#endif /* __NDCTL_H__ */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 2e35c61bbdd1..788655bfa0f3 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -106,6 +106,7 @@ struct ndt_stats {
106 __u64 ndts_rcv_probes_ucast; 106 __u64 ndts_rcv_probes_ucast;
107 __u64 ndts_periodic_gc_runs; 107 __u64 ndts_periodic_gc_runs;
108 __u64 ndts_forced_gc_runs; 108 __u64 ndts_forced_gc_runs;
109 __u64 ndts_table_fulls;
109}; 110};
110 111
111enum { 112enum {
diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
index ceeefe6681b5..ed4e776e1242 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
@@ -13,6 +13,8 @@ enum sctp_conntrack {
13 SCTP_CONNTRACK_SHUTDOWN_SENT, 13 SCTP_CONNTRACK_SHUTDOWN_SENT,
14 SCTP_CONNTRACK_SHUTDOWN_RECD, 14 SCTP_CONNTRACK_SHUTDOWN_RECD,
15 SCTP_CONNTRACK_SHUTDOWN_ACK_SENT, 15 SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
16 SCTP_CONNTRACK_HEARTBEAT_SENT,
17 SCTP_CONNTRACK_HEARTBEAT_ACKED,
16 SCTP_CONNTRACK_MAX 18 SCTP_CONNTRACK_MAX
17}; 19};
18 20
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a99e6a997140..d8c8a7c9d88a 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -756,16 +756,25 @@ enum nft_ct_attributes {
756}; 756};
757#define NFTA_CT_MAX (__NFTA_CT_MAX - 1) 757#define NFTA_CT_MAX (__NFTA_CT_MAX - 1)
758 758
759enum nft_limit_type {
760 NFT_LIMIT_PKTS,
761 NFT_LIMIT_PKT_BYTES
762};
763
759/** 764/**
760 * enum nft_limit_attributes - nf_tables limit expression netlink attributes 765 * enum nft_limit_attributes - nf_tables limit expression netlink attributes
761 * 766 *
762 * @NFTA_LIMIT_RATE: refill rate (NLA_U64) 767 * @NFTA_LIMIT_RATE: refill rate (NLA_U64)
763 * @NFTA_LIMIT_UNIT: refill unit (NLA_U64) 768 * @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
769 * @NFTA_LIMIT_BURST: burst (NLA_U32)
770 * @NFTA_LIMIT_TYPE: type of limit (NLA_U32: enum nft_limit_type)
764 */ 771 */
765enum nft_limit_attributes { 772enum nft_limit_attributes {
766 NFTA_LIMIT_UNSPEC, 773 NFTA_LIMIT_UNSPEC,
767 NFTA_LIMIT_RATE, 774 NFTA_LIMIT_RATE,
768 NFTA_LIMIT_UNIT, 775 NFTA_LIMIT_UNIT,
776 NFTA_LIMIT_BURST,
777 NFTA_LIMIT_TYPE,
769 __NFTA_LIMIT_MAX 778 __NFTA_LIMIT_MAX
770}; 779};
771#define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1) 780#define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1)
@@ -936,6 +945,20 @@ enum nft_redir_attributes {
936#define NFTA_REDIR_MAX (__NFTA_REDIR_MAX - 1) 945#define NFTA_REDIR_MAX (__NFTA_REDIR_MAX - 1)
937 946
938/** 947/**
948 * enum nft_dup_attributes - nf_tables dup expression netlink attributes
949 *
950 * @NFTA_DUP_SREG_ADDR: source register of address (NLA_U32: nft_registers)
951 * @NFTA_DUP_SREG_DEV: source register of output interface (NLA_U32: nft_register)
952 */
953enum nft_dup_attributes {
954 NFTA_DUP_UNSPEC,
955 NFTA_DUP_SREG_ADDR,
956 NFTA_DUP_SREG_DEV,
957 __NFTA_DUP_MAX
958};
959#define NFTA_DUP_MAX (__NFTA_DUP_MAX - 1)
960
961/**
939 * enum nft_gen_attributes - nf_tables ruleset generation attributes 962 * enum nft_gen_attributes - nf_tables ruleset generation attributes
940 * 963 *
941 * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32) 964 * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index acad6c52a652..c1a4e1441a25 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -61,6 +61,7 @@ enum ctattr_tuple {
61 CTA_TUPLE_UNSPEC, 61 CTA_TUPLE_UNSPEC,
62 CTA_TUPLE_IP, 62 CTA_TUPLE_IP,
63 CTA_TUPLE_PROTO, 63 CTA_TUPLE_PROTO,
64 CTA_TUPLE_ZONE,
64 __CTA_TUPLE_MAX 65 __CTA_TUPLE_MAX
65}; 66};
66#define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1) 67#define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
index 1ab0b97b3a1e..f2c10dc140d6 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
@@ -92,6 +92,8 @@ enum ctattr_timeout_sctp {
92 CTA_TIMEOUT_SCTP_SHUTDOWN_SENT, 92 CTA_TIMEOUT_SCTP_SHUTDOWN_SENT,
93 CTA_TIMEOUT_SCTP_SHUTDOWN_RECD, 93 CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
94 CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, 94 CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
95 CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
96 CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
95 __CTA_TIMEOUT_SCTP_MAX 97 __CTA_TIMEOUT_SCTP_MAX
96}; 98};
97#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1) 99#define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_CT.h b/include/uapi/linux/netfilter/xt_CT.h
index 5a688c1ca4d7..9e520418b858 100644
--- a/include/uapi/linux/netfilter/xt_CT.h
+++ b/include/uapi/linux/netfilter/xt_CT.h
@@ -6,7 +6,13 @@
6enum { 6enum {
7 XT_CT_NOTRACK = 1 << 0, 7 XT_CT_NOTRACK = 1 << 0,
8 XT_CT_NOTRACK_ALIAS = 1 << 1, 8 XT_CT_NOTRACK_ALIAS = 1 << 1,
9 XT_CT_MASK = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS, 9 XT_CT_ZONE_DIR_ORIG = 1 << 2,
10 XT_CT_ZONE_DIR_REPL = 1 << 3,
11 XT_CT_ZONE_MARK = 1 << 4,
12
13 XT_CT_MASK = XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS |
14 XT_CT_ZONE_DIR_ORIG | XT_CT_ZONE_DIR_REPL |
15 XT_CT_ZONE_MARK,
10}; 16};
11 17
12struct xt_ct_target_info { 18struct xt_ct_target_info {
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h b/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
index 205ed62e4605..cd2e940c8bf5 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
@@ -10,7 +10,9 @@ enum ip6t_reject_with {
10 IP6T_ICMP6_ADDR_UNREACH, 10 IP6T_ICMP6_ADDR_UNREACH,
11 IP6T_ICMP6_PORT_UNREACH, 11 IP6T_ICMP6_PORT_UNREACH,
12 IP6T_ICMP6_ECHOREPLY, 12 IP6T_ICMP6_ECHOREPLY,
13 IP6T_TCP_RESET 13 IP6T_TCP_RESET,
14 IP6T_ICMP6_POLICY_FAIL,
15 IP6T_ICMP6_REJECT_ROUTE
14}; 16};
15 17
16struct ip6t_reject_info { 18struct ip6t_reject_info {
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index cf6a65cccbdf..6f3fe16cd22a 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -110,6 +110,7 @@ struct nlmsgerr {
110#define NETLINK_TX_RING 7 110#define NETLINK_TX_RING 7
111#define NETLINK_LISTEN_ALL_NSID 8 111#define NETLINK_LISTEN_ALL_NSID 8
112#define NETLINK_LIST_MEMBERSHIPS 9 112#define NETLINK_LIST_MEMBERSHIPS 9
113#define NETLINK_CAP_ACK 10
113 114
114struct nl_pktinfo { 115struct nl_pktinfo {
115 __u32 group; 116 __u32 group;
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 2119c7c274d7..2b871e0858d9 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -15,7 +15,7 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18#define NFS4_BITMAP_SIZE 2 18#define NFS4_BITMAP_SIZE 3
19#define NFS4_VERIFIER_SIZE 8 19#define NFS4_VERIFIER_SIZE 8
20#define NFS4_STATEID_SEQID_SIZE 4 20#define NFS4_STATEID_SEQID_SIZE 4
21#define NFS4_STATEID_OTHER_SIZE 12 21#define NFS4_STATEID_OTHER_SIZE 12
diff --git a/include/uapi/linux/nfsacl.h b/include/uapi/linux/nfsacl.h
index 9bb9771a107f..552726631162 100644
--- a/include/uapi/linux/nfsacl.h
+++ b/include/uapi/linux/nfsacl.h
@@ -22,6 +22,7 @@
22#define NFS_ACLCNT 0x0002 22#define NFS_ACLCNT 0x0002
23#define NFS_DFACL 0x0004 23#define NFS_DFACL 0x0004
24#define NFS_DFACLCNT 0x0008 24#define NFS_DFACLCNT 0x0008
25#define NFS_ACL_MASK 0x000f
25 26
26/* Flag for Default ACL entries */ 27/* Flag for Default ACL entries */
27#define NFS_ACL_DEFAULT 0x1000 28#define NFS_ACL_DEFAULT 0x1000
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 732b32e92b02..8864194a4151 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -584,5 +584,6 @@ struct nvme_passthru_cmd {
584#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io) 584#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
585#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) 585#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
586#define NVME_IOCTL_RESET _IO('N', 0x44) 586#define NVME_IOCTL_RESET _IO('N', 0x44)
587#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
587 588
588#endif /* _UAPI_LINUX_NVME_H */ 589#endif /* _UAPI_LINUX_NVME_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 1dab77601c21..32e07d8cbaf4 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -164,6 +164,9 @@ enum ovs_packet_cmd {
164 * %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the 164 * %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the
165 * output port is actually a tunnel port. Contains the output tunnel key 165 * output port is actually a tunnel port. Contains the output tunnel key
166 * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes. 166 * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes.
167 * @OVS_PACKET_ATTR_MRU: Present for an %OVS_PACKET_CMD_ACTION and
168 * %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
169 * size.
167 * 170 *
168 * These attributes follow the &struct ovs_header within the Generic Netlink 171 * These attributes follow the &struct ovs_header within the Generic Netlink
169 * payload for %OVS_PACKET_* commands. 172 * payload for %OVS_PACKET_* commands.
@@ -180,6 +183,7 @@ enum ovs_packet_attr {
180 OVS_PACKET_ATTR_UNUSED2, 183 OVS_PACKET_ATTR_UNUSED2,
181 OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe, 184 OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
182 error logging should be suppressed. */ 185 error logging should be suppressed. */
186 OVS_PACKET_ATTR_MRU, /* Maximum received IP fragment size. */
183 __OVS_PACKET_ATTR_MAX 187 __OVS_PACKET_ATTR_MAX
184}; 188};
185 189
@@ -319,9 +323,13 @@ enum ovs_key_attr {
319 OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls. 323 OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls.
320 * The implementation may restrict 324 * The implementation may restrict
321 * the accepted length of the array. */ 325 * the accepted length of the array. */
326 OVS_KEY_ATTR_CT_STATE, /* u8 bitmask of OVS_CS_F_* */
327 OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */
328 OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */
329 OVS_KEY_ATTR_CT_LABEL, /* 16-octet connection tracking label */
322 330
323#ifdef __KERNEL__ 331#ifdef __KERNEL__
324 OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */ 332 OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */
325#endif 333#endif
326 __OVS_KEY_ATTR_MAX 334 __OVS_KEY_ATTR_MAX
327}; 335};
@@ -431,6 +439,20 @@ struct ovs_key_nd {
431 __u8 nd_tll[ETH_ALEN]; 439 __u8 nd_tll[ETH_ALEN];
432}; 440};
433 441
442#define OVS_CT_LABEL_LEN 16
443struct ovs_key_ct_label {
444 __u8 ct_label[OVS_CT_LABEL_LEN];
445};
446
447/* OVS_KEY_ATTR_CT_STATE flags */
448#define OVS_CS_F_NEW 0x01 /* Beginning of a new connection. */
449#define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */
450#define OVS_CS_F_RELATED 0x04 /* Related to an established
451 * connection. */
452#define OVS_CS_F_INVALID 0x20 /* Could not track connection. */
453#define OVS_CS_F_REPLY_DIR 0x40 /* Flow is in the reply direction. */
454#define OVS_CS_F_TRACKED 0x80 /* Conntrack has occurred. */
455
434/** 456/**
435 * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. 457 * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
436 * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow 458 * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
@@ -595,6 +617,39 @@ struct ovs_action_hash {
595}; 617};
596 618
597/** 619/**
620 * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
621 * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags.
622 * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
623 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
624 * mask, the corresponding bit in the value is copied to the connection
625 * tracking mark field in the connection.
626 * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN
627 * mask. For each bit set in the mask, the corresponding bit in the value is
628 * copied to the connection tracking label field in the connection.
629 * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
630 */
631enum ovs_ct_attr {
632 OVS_CT_ATTR_UNSPEC,
633 OVS_CT_ATTR_FLAGS, /* u8 bitmask of OVS_CT_F_*. */
634 OVS_CT_ATTR_ZONE, /* u16 zone id. */
635 OVS_CT_ATTR_MARK, /* mark to associate with this connection. */
636 OVS_CT_ATTR_LABEL, /* label to associate with this connection. */
637 OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of
638 related connections. */
639 __OVS_CT_ATTR_MAX
640};
641
642#define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
643
644/*
645 * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_*
646 * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows
647 * future packets for the same connection to be identified as 'established'
648 * or 'related'.
649 */
650#define OVS_CT_F_COMMIT 0x01
651
652/**
598 * enum ovs_action_attr - Action types. 653 * enum ovs_action_attr - Action types.
599 * 654 *
600 * @OVS_ACTION_ATTR_OUTPUT: Output packet to port. 655 * @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
@@ -623,6 +678,8 @@ struct ovs_action_hash {
623 * indicate the new packet contents. This could potentially still be 678 * indicate the new packet contents. This could potentially still be
624 * %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there 679 * %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there
625 * is no MPLS label stack, as determined by ethertype, no action is taken. 680 * is no MPLS label stack, as determined by ethertype, no action is taken.
681 * @OVS_ACTION_ATTR_CT: Track the connection. Populate the conntrack-related
682 * entries in the flow key.
626 * 683 *
627 * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all 684 * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
628 * fields within a header are modifiable, e.g. the IPv4 protocol and fragment 685 * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -648,6 +705,7 @@ enum ovs_action_attr {
648 * data immediately followed by a mask. 705 * data immediately followed by a mask.
649 * The data must be zero for the unmasked 706 * The data must be zero for the unmasked
650 * bits. */ 707 * bits. */
708 OVS_ACTION_ATTR_CT, /* One nested OVS_CT_ATTR_* . */
651 709
652 __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted 710 __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
653 * from userspace. */ 711 * from userspace. */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index d97f84c080da..2881145cda86 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -330,7 +330,8 @@ struct perf_event_attr {
330 mmap2 : 1, /* include mmap with inode data */ 330 mmap2 : 1, /* include mmap with inode data */
331 comm_exec : 1, /* flag comm events that are due to an exec */ 331 comm_exec : 1, /* flag comm events that are due to an exec */
332 use_clockid : 1, /* use @clockid for time fields */ 332 use_clockid : 1, /* use @clockid for time fields */
333 __reserved_1 : 38; 333 context_switch : 1, /* context switch data */
334 __reserved_1 : 37;
334 335
335 union { 336 union {
336 __u32 wakeup_events; /* wakeup every n events */ 337 __u32 wakeup_events; /* wakeup every n events */
@@ -572,9 +573,11 @@ struct perf_event_mmap_page {
572/* 573/*
573 * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on 574 * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
574 * different events so can reuse the same bit position. 575 * different events so can reuse the same bit position.
576 * Ditto PERF_RECORD_MISC_SWITCH_OUT.
575 */ 577 */
576#define PERF_RECORD_MISC_MMAP_DATA (1 << 13) 578#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
577#define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 579#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
580#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
578/* 581/*
579 * Indicates that the content of PERF_SAMPLE_IP points to 582 * Indicates that the content of PERF_SAMPLE_IP points to
580 * the actual instruction that triggered the event. See also 583 * the actual instruction that triggered the event. See also
@@ -818,6 +821,32 @@ enum perf_event_type {
818 */ 821 */
819 PERF_RECORD_LOST_SAMPLES = 13, 822 PERF_RECORD_LOST_SAMPLES = 13,
820 823
824 /*
825 * Records a context switch in or out (flagged by
826 * PERF_RECORD_MISC_SWITCH_OUT). See also
827 * PERF_RECORD_SWITCH_CPU_WIDE.
828 *
829 * struct {
830 * struct perf_event_header header;
831 * struct sample_id sample_id;
832 * };
833 */
834 PERF_RECORD_SWITCH = 14,
835
836 /*
837 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
838 * next_prev_tid that are the next (switching out) or previous
839 * (switching in) pid/tid.
840 *
841 * struct {
842 * struct perf_event_header header;
843 * u32 next_prev_pid;
844 * u32 next_prev_tid;
845 * struct sample_id sample_id;
846 * };
847 */
848 PERF_RECORD_SWITCH_CPU_WIDE = 15,
849
821 PERF_RECORD_MAX, /* non-ABI */ 850 PERF_RECORD_MAX, /* non-ABI */
822}; 851};
823 852
@@ -922,6 +951,7 @@ union perf_mem_data_src {
922 * 951 *
923 * in_tx: running in a hardware transaction 952 * in_tx: running in a hardware transaction
924 * abort: aborting a hardware transaction 953 * abort: aborting a hardware transaction
954 * cycles: cycles from last branch (or 0 if not supported)
925 */ 955 */
926struct perf_branch_entry { 956struct perf_branch_entry {
927 __u64 from; 957 __u64 from;
@@ -930,7 +960,8 @@ struct perf_branch_entry {
930 predicted:1,/* target predicted */ 960 predicted:1,/* target predicted */
931 in_tx:1, /* in transaction */ 961 in_tx:1, /* in transaction */
932 abort:1, /* transaction abort */ 962 abort:1, /* transaction abort */
933 reserved:60; 963 cycles:16, /* cycle count to last branch */
964 reserved:44;
934}; 965};
935 966
936#endif /* _UAPI_LINUX_PERF_EVENT_H */ 967#endif /* _UAPI_LINUX_PERF_EVENT_H */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 31891d9535e2..a8d0759a9e40 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -190,4 +190,11 @@ struct prctl_mm_map {
190# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */ 190# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */
191# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */ 191# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */
192 192
193/* Control the ambient capability set */
194#define PR_CAP_AMBIENT 47
195# define PR_CAP_AMBIENT_IS_SET 1
196# define PR_CAP_AMBIENT_RAISE 2
197# define PR_CAP_AMBIENT_LOWER 3
198# define PR_CAP_AMBIENT_CLEAR_ALL 4
199
193#endif /* _LINUX_PRCTL_H */ 200#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index cf1019e15f5b..a7a697986614 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -89,9 +89,11 @@ struct ptrace_peeksiginfo_args {
89#define PTRACE_O_TRACESECCOMP (1 << PTRACE_EVENT_SECCOMP) 89#define PTRACE_O_TRACESECCOMP (1 << PTRACE_EVENT_SECCOMP)
90 90
91/* eventless options */ 91/* eventless options */
92#define PTRACE_O_EXITKILL (1 << 20) 92#define PTRACE_O_EXITKILL (1 << 20)
93#define PTRACE_O_SUSPEND_SECCOMP (1 << 21)
93 94
94#define PTRACE_O_MASK (0x000000ff | PTRACE_O_EXITKILL) 95#define PTRACE_O_MASK (\
96 0x000000ff | PTRACE_O_EXITKILL | PTRACE_O_SUSPEND_SECCOMP)
95 97
96#include <asm/ptrace.h> 98#include <asm/ptrace.h>
97 99
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index fdd8f07f1d34..702024769c74 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -308,6 +308,8 @@ enum rtattr_type_t {
308 RTA_VIA, 308 RTA_VIA,
309 RTA_NEWDST, 309 RTA_NEWDST,
310 RTA_PREF, 310 RTA_PREF,
311 RTA_ENCAP_TYPE,
312 RTA_ENCAP,
311 __RTA_MAX 313 __RTA_MAX
312}; 314};
313 315
@@ -416,10 +418,13 @@ enum {
416 418
417#define RTAX_MAX (__RTAX_MAX - 1) 419#define RTAX_MAX (__RTAX_MAX - 1)
418 420
419#define RTAX_FEATURE_ECN 0x00000001 421#define RTAX_FEATURE_ECN (1 << 0)
420#define RTAX_FEATURE_SACK 0x00000002 422#define RTAX_FEATURE_SACK (1 << 1)
421#define RTAX_FEATURE_TIMESTAMP 0x00000004 423#define RTAX_FEATURE_TIMESTAMP (1 << 2)
422#define RTAX_FEATURE_ALLFRAG 0x00000008 424#define RTAX_FEATURE_ALLFRAG (1 << 3)
425
426#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \
427 RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG)
423 428
424struct rta_session { 429struct rta_session {
425 __u8 proto; 430 __u8 proto;
diff --git a/include/uapi/linux/securebits.h b/include/uapi/linux/securebits.h
index 985aac9e6bf8..35ac35cef217 100644
--- a/include/uapi/linux/securebits.h
+++ b/include/uapi/linux/securebits.h
@@ -43,9 +43,18 @@
43#define SECBIT_KEEP_CAPS (issecure_mask(SECURE_KEEP_CAPS)) 43#define SECBIT_KEEP_CAPS (issecure_mask(SECURE_KEEP_CAPS))
44#define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED)) 44#define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED))
45 45
46/* When set, a process cannot add new capabilities to its ambient set. */
47#define SECURE_NO_CAP_AMBIENT_RAISE 6
48#define SECURE_NO_CAP_AMBIENT_RAISE_LOCKED 7 /* make bit-6 immutable */
49
50#define SECBIT_NO_CAP_AMBIENT_RAISE (issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
51#define SECBIT_NO_CAP_AMBIENT_RAISE_LOCKED \
52 (issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE_LOCKED))
53
46#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ 54#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
47 issecure_mask(SECURE_NO_SETUID_FIXUP) | \ 55 issecure_mask(SECURE_NO_SETUID_FIXUP) | \
48 issecure_mask(SECURE_KEEP_CAPS)) 56 issecure_mask(SECURE_KEEP_CAPS) | \
57 issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
49#define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1) 58#define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1)
50 59
51#endif /* _UAPI_LINUX_SECUREBITS_H */ 60#endif /* _UAPI_LINUX_SECUREBITS_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index eee8968407f0..25a9ad8bcef1 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -278,6 +278,8 @@ enum
278 LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */ 278 LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
279 LINUX_MIB_TCPWINPROBE, /* TCPWinProbe */ 279 LINUX_MIB_TCPWINPROBE, /* TCPWinProbe */
280 LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */ 280 LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
281 LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
282 LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
281 __LINUX_MIB_MAX 283 __LINUX_MIB_MAX
282}; 284};
283 285
diff --git a/include/uapi/linux/toshiba.h b/include/uapi/linux/toshiba.h
index e9bef5b2f91e..c58bf4b5bb26 100644
--- a/include/uapi/linux/toshiba.h
+++ b/include/uapi/linux/toshiba.h
@@ -1,6 +1,7 @@
1/* toshiba.h -- Linux driver for accessing the SMM on Toshiba laptops 1/* toshiba.h -- Linux driver for accessing the SMM on Toshiba laptops
2 * 2 *
3 * Copyright (c) 1996-2000 Jonathan A. Buzzard (jonathan@buzzard.org.uk) 3 * Copyright (c) 1996-2000 Jonathan A. Buzzard (jonathan@buzzard.org.uk)
4 * Copyright (c) 2015 Azael Avalos <coproscefalo@gmail.com>
4 * 5 *
5 * Thanks to Juergen Heinzl <juergen@monocerus.demon.co.uk> for the pointers 6 * Thanks to Juergen Heinzl <juergen@monocerus.demon.co.uk> for the pointers
6 * on making sure the structure is aligned and packed. 7 * on making sure the structure is aligned and packed.
@@ -20,9 +21,18 @@
20#ifndef _UAPI_LINUX_TOSHIBA_H 21#ifndef _UAPI_LINUX_TOSHIBA_H
21#define _UAPI_LINUX_TOSHIBA_H 22#define _UAPI_LINUX_TOSHIBA_H
22 23
23#define TOSH_PROC "/proc/toshiba" 24/*
24#define TOSH_DEVICE "/dev/toshiba" 25 * Toshiba modules paths
25#define TOSH_SMM _IOWR('t', 0x90, int) /* broken: meant 24 bytes */ 26 */
27
28#define TOSH_PROC "/proc/toshiba"
29#define TOSH_DEVICE "/dev/toshiba"
30#define TOSHIBA_ACPI_PROC "/proc/acpi/toshiba"
31#define TOSHIBA_ACPI_DEVICE "/dev/toshiba_acpi"
32
33/*
34 * Toshiba SMM structure
35 */
26 36
27typedef struct { 37typedef struct {
28 unsigned int eax; 38 unsigned int eax;
@@ -33,5 +43,21 @@ typedef struct {
33 unsigned int edi __attribute__ ((packed)); 43 unsigned int edi __attribute__ ((packed));
34} SMMRegisters; 44} SMMRegisters;
35 45
46/*
47 * IOCTLs (0x90 - 0x91)
48 */
49
50#define TOSH_SMM _IOWR('t', 0x90, SMMRegisters)
51/*
52 * Convenience toshiba_acpi command.
53 *
54 * The System Configuration Interface (SCI) is opened/closed internally
55 * to avoid userspace of buggy BIOSes.
56 *
57 * The toshiba_acpi module checks whether the eax register is set with
58 * SCI_GET (0xf300) or SCI_SET (0xf400), returning -EINVAL if not.
59 */
60#define TOSHIBA_ACPI_SCI _IOWR('t', 0x91, SMMRegisters)
61
36 62
37#endif /* _UAPI_LINUX_TOSHIBA_H */ 63#endif /* _UAPI_LINUX_TOSHIBA_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index aa33fd1b2d4f..f7adc6e01f9e 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -674,9 +674,21 @@ struct usb_otg_descriptor {
674 __u8 bmAttributes; /* support for HNP, SRP, etc */ 674 __u8 bmAttributes; /* support for HNP, SRP, etc */
675} __attribute__ ((packed)); 675} __attribute__ ((packed));
676 676
677/* USB_DT_OTG (from OTG 2.0 supplement) */
678struct usb_otg20_descriptor {
679 __u8 bLength;
680 __u8 bDescriptorType;
681
682 __u8 bmAttributes; /* support for HNP, SRP and ADP, etc */
683 __le16 bcdOTG; /* OTG and EH supplement release number
684 * in binary-coded decimal(i.e. 2.0 is 0200H)
685 */
686} __attribute__ ((packed));
687
677/* from usb_otg_descriptor.bmAttributes */ 688/* from usb_otg_descriptor.bmAttributes */
678#define USB_OTG_SRP (1 << 0) 689#define USB_OTG_SRP (1 << 0)
679#define USB_OTG_HNP (1 << 1) /* swap host/device roles */ 690#define USB_OTG_HNP (1 << 1) /* swap host/device roles */
691#define USB_OTG_ADP (1 << 2) /* support ADP */
680 692
681/*-------------------------------------------------------------------------*/ 693/*-------------------------------------------------------------------------*/
682 694
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
new file mode 100644
index 000000000000..df0e09bb7dd5
--- /dev/null
+++ b/include/uapi/linux/userfaultfd.h
@@ -0,0 +1,169 @@
1/*
2 * include/linux/userfaultfd.h
3 *
4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
5 * Copyright (C) 2015 Red Hat, Inc.
6 *
7 */
8
9#ifndef _LINUX_USERFAULTFD_H
10#define _LINUX_USERFAULTFD_H
11
12#include <linux/types.h>
13
14#include <linux/compiler.h>
15
16#define UFFD_API ((__u64)0xAA)
17/*
18 * After implementing the respective features it will become:
19 * #define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \
20 * UFFD_FEATURE_EVENT_FORK)
21 */
22#define UFFD_API_FEATURES (0)
23#define UFFD_API_IOCTLS \
24 ((__u64)1 << _UFFDIO_REGISTER | \
25 (__u64)1 << _UFFDIO_UNREGISTER | \
26 (__u64)1 << _UFFDIO_API)
27#define UFFD_API_RANGE_IOCTLS \
28 ((__u64)1 << _UFFDIO_WAKE | \
29 (__u64)1 << _UFFDIO_COPY | \
30 (__u64)1 << _UFFDIO_ZEROPAGE)
31
32/*
33 * Valid ioctl command number range with this API is from 0x00 to
34 * 0x3F. UFFDIO_API is the fixed number, everything else can be
35 * changed by implementing a different UFFD_API. If sticking to the
36 * same UFFD_API more ioctl can be added and userland will be aware of
37 * which ioctl the running kernel implements through the ioctl command
38 * bitmask written by the UFFDIO_API.
39 */
40#define _UFFDIO_REGISTER (0x00)
41#define _UFFDIO_UNREGISTER (0x01)
42#define _UFFDIO_WAKE (0x02)
43#define _UFFDIO_COPY (0x03)
44#define _UFFDIO_ZEROPAGE (0x04)
45#define _UFFDIO_API (0x3F)
46
47/* userfaultfd ioctl ids */
48#define UFFDIO 0xAA
49#define UFFDIO_API _IOWR(UFFDIO, _UFFDIO_API, \
50 struct uffdio_api)
51#define UFFDIO_REGISTER _IOWR(UFFDIO, _UFFDIO_REGISTER, \
52 struct uffdio_register)
53#define UFFDIO_UNREGISTER _IOR(UFFDIO, _UFFDIO_UNREGISTER, \
54 struct uffdio_range)
55#define UFFDIO_WAKE _IOR(UFFDIO, _UFFDIO_WAKE, \
56 struct uffdio_range)
57#define UFFDIO_COPY _IOWR(UFFDIO, _UFFDIO_COPY, \
58 struct uffdio_copy)
59#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
60 struct uffdio_zeropage)
61
62/* read() structure */
63struct uffd_msg {
64 __u8 event;
65
66 __u8 reserved1;
67 __u16 reserved2;
68 __u32 reserved3;
69
70 union {
71 struct {
72 __u64 flags;
73 __u64 address;
74 } pagefault;
75
76 struct {
77 /* unused reserved fields */
78 __u64 reserved1;
79 __u64 reserved2;
80 __u64 reserved3;
81 } reserved;
82 } arg;
83} __packed;
84
85/*
86 * Start at 0x12 and not at 0 to be more strict against bugs.
87 */
88#define UFFD_EVENT_PAGEFAULT 0x12
89#if 0 /* not available yet */
90#define UFFD_EVENT_FORK 0x13
91#endif
92
93/* flags for UFFD_EVENT_PAGEFAULT */
94#define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */
95#define UFFD_PAGEFAULT_FLAG_WP (1<<1) /* If reason is VM_UFFD_WP */
96
97struct uffdio_api {
98 /* userland asks for an API number and the features to enable */
99 __u64 api;
100 /*
101 * Kernel answers below with the all available features for
102 * the API, this notifies userland of which events and/or
103 * which flags for each event are enabled in the current
104 * kernel.
105 *
106 * Note: UFFD_EVENT_PAGEFAULT and UFFD_PAGEFAULT_FLAG_WRITE
107 * are to be considered implicitly always enabled in all kernels as
108 * long as the uffdio_api.api requested matches UFFD_API.
109 */
110#if 0 /* not available yet */
111#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
112#define UFFD_FEATURE_EVENT_FORK (1<<1)
113#endif
114 __u64 features;
115
116 __u64 ioctls;
117};
118
119struct uffdio_range {
120 __u64 start;
121 __u64 len;
122};
123
124struct uffdio_register {
125 struct uffdio_range range;
126#define UFFDIO_REGISTER_MODE_MISSING ((__u64)1<<0)
127#define UFFDIO_REGISTER_MODE_WP ((__u64)1<<1)
128 __u64 mode;
129
130 /*
131 * kernel answers which ioctl commands are available for the
132 * range, keep at the end as the last 8 bytes aren't read.
133 */
134 __u64 ioctls;
135};
136
137struct uffdio_copy {
138 __u64 dst;
139 __u64 src;
140 __u64 len;
141 /*
142 * There will be a wrprotection flag later that allows to map
143 * pages wrprotected on the fly. And such a flag will be
144 * available if the wrprotection ioctl are implemented for the
145 * range according to the uffdio_register.ioctls.
146 */
147#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0)
148 __u64 mode;
149
150 /*
151 * "copy" is written by the ioctl and must be at the end: the
152 * copy_from_user will not read the last 8 bytes.
153 */
154 __s64 copy;
155};
156
157struct uffdio_zeropage {
158 struct uffdio_range range;
159#define UFFDIO_ZEROPAGE_MODE_DONTWAKE ((__u64)1<<0)
160 __u64 mode;
161
162 /*
163 * "zeropage" is written by the ioctl and must be at the end:
164 * the copy_from_user will not read the last 8 bytes.
165 */
166 __s64 zeropage;
167};
168
169#endif /* _LINUX_USERFAULTFD_H */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 9f6e108ff4a0..d448c536b49d 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -174,6 +174,10 @@ enum v4l2_colorfx {
174 * We reserve 16 controls for this driver. */ 174 * We reserve 16 controls for this driver. */
175#define V4L2_CID_USER_ADV7180_BASE (V4L2_CID_USER_BASE + 0x1070) 175#define V4L2_CID_USER_ADV7180_BASE (V4L2_CID_USER_BASE + 0x1070)
176 176
177/* The base for the tc358743 driver controls.
178 * We reserve 16 controls for this driver. */
179#define V4L2_CID_USER_TC358743_BASE (V4L2_CID_USER_BASE + 0x1080)
180
177/* MPEG-class control IDs */ 181/* MPEG-class control IDs */
178/* The MPEG controls are applicable to all codec controls 182/* The MPEG controls are applicable to all codec controls
179 * and the 'MPEG' part of the define is historical */ 183 * and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/linux/vsp1.h b/include/uapi/linux/vsp1.h
index e18858f6e865..9a823696d816 100644
--- a/include/uapi/linux/vsp1.h
+++ b/include/uapi/linux/vsp1.h
@@ -28,7 +28,7 @@
28 _IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct vsp1_lut_config) 28 _IOWR('V', BASE_VIDIOC_PRIVATE + 1, struct vsp1_lut_config)
29 29
30struct vsp1_lut_config { 30struct vsp1_lut_config {
31 u32 lut[256]; 31 __u32 lut[256];
32}; 32};
33 33
34#endif /* __VSP1_USER_H__ */ 34#endif /* __VSP1_USER_H__ */
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index 99a8ca15fe64..1e889aa8a36e 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -29,8 +29,10 @@ struct cxl_ioctl_start_work {
29 29
30#define CXL_START_WORK_AMR 0x0000000000000001ULL 30#define CXL_START_WORK_AMR 0x0000000000000001ULL
31#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL 31#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
32#define CXL_START_WORK_ERR_FF 0x0000000000000004ULL
32#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\ 33#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
33 CXL_START_WORK_NUM_IRQS) 34 CXL_START_WORK_NUM_IRQS |\
35 CXL_START_WORK_ERR_FF)
34 36
35 37
36/* Possible modes that an afu can be in */ 38/* Possible modes that an afu can be in */
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 687ae332200f..231901b08f6c 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -5,3 +5,4 @@ header-y += ib_user_sa.h
5header-y += ib_user_verbs.h 5header-y += ib_user_verbs.h
6header-y += rdma_netlink.h 6header-y += rdma_netlink.h
7header-y += rdma_user_cm.h 7header-y += rdma_user_cm.h
8header-y += hfi/
diff --git a/include/uapi/rdma/hfi/Kbuild b/include/uapi/rdma/hfi/Kbuild
new file mode 100644
index 000000000000..ef23c294fc71
--- /dev/null
+++ b/include/uapi/rdma/hfi/Kbuild
@@ -0,0 +1,2 @@
1# UAPI Header export list
2header-y += hfi1_user.h
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
new file mode 100644
index 000000000000..78c442fbf263
--- /dev/null
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -0,0 +1,427 @@
1/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains defines, structures, etc. that are used
53 * to communicate between kernel and user code.
54 */
55
56#ifndef _LINUX__HFI1_USER_H
57#define _LINUX__HFI1_USER_H
58
59#include <linux/types.h>
60
61/*
62 * This version number is given to the driver by the user code during
63 * initialization in the spu_userversion field of hfi1_user_info, so
64 * the driver can check for compatibility with user code.
65 *
66 * The major version changes when data structures change in an incompatible
67 * way. The driver must be the same for initialization to succeed.
68 */
69#define HFI1_USER_SWMAJOR 4
70
71/*
72 * Minor version differences are always compatible
73 * a within a major version, however if user software is larger
74 * than driver software, some new features and/or structure fields
75 * may not be implemented; the user code must deal with this if it
76 * cares, or it must abort after initialization reports the difference.
77 */
78#define HFI1_USER_SWMINOR 0
79
80/*
81 * Set of HW and driver capability/feature bits.
82 * These bit values are used to configure enabled/disabled HW and
83 * driver features. The same set of bits are communicated to user
84 * space.
85 */
86#define HFI1_CAP_DMA_RTAIL (1UL << 0) /* Use DMA'ed RTail value */
87#define HFI1_CAP_SDMA (1UL << 1) /* Enable SDMA support */
88#define HFI1_CAP_SDMA_AHG (1UL << 2) /* Enable SDMA AHG support */
89#define HFI1_CAP_EXTENDED_PSN (1UL << 3) /* Enable Extended PSN support */
90#define HFI1_CAP_HDRSUPP (1UL << 4) /* Enable Header Suppression */
91/* 1UL << 5 reserved */
92#define HFI1_CAP_USE_SDMA_HEAD (1UL << 6) /* DMA Hdr Q tail vs. use CSR */
93#define HFI1_CAP_MULTI_PKT_EGR (1UL << 7) /* Enable multi-packet Egr buffs*/
94#define HFI1_CAP_NODROP_RHQ_FULL (1UL << 8) /* Don't drop on Hdr Q full */
95#define HFI1_CAP_NODROP_EGR_FULL (1UL << 9) /* Don't drop on EGR buffs full */
96#define HFI1_CAP_TID_UNMAP (1UL << 10) /* Enable Expected TID caching */
97#define HFI1_CAP_PRINT_UNIMPL (1UL << 11) /* Show for unimplemented feats */
98#define HFI1_CAP_ALLOW_PERM_JKEY (1UL << 12) /* Allow use of permissive JKEY */
99#define HFI1_CAP_NO_INTEGRITY (1UL << 13) /* Enable ctxt integrity checks */
100#define HFI1_CAP_PKEY_CHECK (1UL << 14) /* Enable ctxt PKey checking */
101#define HFI1_CAP_STATIC_RATE_CTRL (1UL << 15) /* Allow PBC.StaticRateControl */
102#define HFI1_CAP_QSFP_ENABLED (1UL << 16) /* Enable QSFP check during LNI */
103#define HFI1_CAP_SDMA_HEAD_CHECK (1UL << 17) /* SDMA head checking */
104#define HFI1_CAP_EARLY_CREDIT_RETURN (1UL << 18) /* early credit return */
105
106#define HFI1_RCVHDR_ENTSIZE_2 (1UL << 0)
107#define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1)
108#define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2)
109
110/*
111 * If the unit is specified via open, HFI choice is fixed. If port is
112 * specified, it's also fixed. Otherwise we try to spread contexts
113 * across ports and HFIs, using different algorithms. WITHIN is
114 * the old default, prior to this mechanism.
115 */
116#define HFI1_ALG_ACROSS 0 /* round robin contexts across HFIs, then
117 * ports; this is the default */
118#define HFI1_ALG_WITHIN 1 /* use all contexts on an HFI (round robin
119 * active ports within), then next HFI */
120#define HFI1_ALG_COUNT 2 /* number of algorithm choices */
121
122
123/* User commands. */
124#define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */
125#define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */
126#define HFI1_CMD_USER_INFO 3 /* set up userspace */
127#define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */
128#define HFI1_CMD_TID_FREE 5 /* free expected TID entries */
129#define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */
130#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */
131
132#define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */
133#define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */
134#define HFI1_CMD_ACK_EVENT 10 /* ack & clear user status bits */
135#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */
136#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */
137/* separate EPROM commands from normal PSM commands */
138#define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */
139#define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */
140#define HFI1_CMD_EP_ERASE_P0 66 /* erase EPROM partition 0 */
141#define HFI1_CMD_EP_ERASE_P1 67 /* erase EPROM partition 1 */
142#define HFI1_CMD_EP_READ_P0 68 /* read EPROM partition 0 */
143#define HFI1_CMD_EP_READ_P1 69 /* read EPROM partition 1 */
144#define HFI1_CMD_EP_WRITE_P0 70 /* write EPROM partition 0 */
145#define HFI1_CMD_EP_WRITE_P1 71 /* write EPROM partition 1 */
146
147#define _HFI1_EVENT_FROZEN_BIT 0
148#define _HFI1_EVENT_LINKDOWN_BIT 1
149#define _HFI1_EVENT_LID_CHANGE_BIT 2
150#define _HFI1_EVENT_LMC_CHANGE_BIT 3
151#define _HFI1_EVENT_SL2VL_CHANGE_BIT 4
152#define _HFI1_MAX_EVENT_BIT _HFI1_EVENT_SL2VL_CHANGE_BIT
153
154#define HFI1_EVENT_FROZEN (1UL << _HFI1_EVENT_FROZEN_BIT)
155#define HFI1_EVENT_LINKDOWN_BIT (1UL << _HFI1_EVENT_LINKDOWN_BIT)
156#define HFI1_EVENT_LID_CHANGE_BIT (1UL << _HFI1_EVENT_LID_CHANGE_BIT)
157#define HFI1_EVENT_LMC_CHANGE_BIT (1UL << _HFI1_EVENT_LMC_CHANGE_BIT)
158#define HFI1_EVENT_SL2VL_CHANGE_BIT (1UL << _HFI1_EVENT_SL2VL_CHANGE_BIT)
159
160/*
161 * These are the status bits readable (in ASCII form, 64bit value)
162 * from the "status" sysfs file. For binary compatibility, values
163 * must remain as is; removed states can be reused for different
164 * purposes.
165 */
166#define HFI1_STATUS_INITTED 0x1 /* basic initialization done */
167/* Chip has been found and initialized */
168#define HFI1_STATUS_CHIP_PRESENT 0x20
169/* IB link is at ACTIVE, usable for data traffic */
170#define HFI1_STATUS_IB_READY 0x40
171/* link is configured, LID, MTU, etc. have been set */
172#define HFI1_STATUS_IB_CONF 0x80
173/* A Fatal hardware error has occurred. */
174#define HFI1_STATUS_HWERROR 0x200
175
176/*
177 * Number of supported shared contexts.
178 * This is the maximum number of software contexts that can share
179 * a hardware send/receive context.
180 */
181#define HFI1_MAX_SHARED_CTXTS 8
182
183/*
184 * Poll types
185 */
186#define HFI1_POLL_TYPE_ANYRCV 0x0
187#define HFI1_POLL_TYPE_URGENT 0x1
188
189/*
190 * This structure is passed to the driver to tell it where
191 * user code buffers are, sizes, etc. The offsets and sizes of the
192 * fields must remain unchanged, for binary compatibility. It can
193 * be extended, if userversion is changed so user code can tell, if needed
194 */
195struct hfi1_user_info {
196 /*
197 * version of user software, to detect compatibility issues.
198 * Should be set to HFI1_USER_SWVERSION.
199 */
200 __u32 userversion;
201 __u16 pad;
202 /* HFI selection algorithm, if unit has not selected */
203 __u16 hfi1_alg;
204 /*
205 * If two or more processes wish to share a context, each process
206 * must set the subcontext_cnt and subcontext_id to the same
207 * values. The only restriction on the subcontext_id is that
208 * it be unique for a given node.
209 */
210 __u16 subctxt_cnt;
211 __u16 subctxt_id;
212 /* 128bit UUID passed in by PSM. */
213 __u8 uuid[16];
214};
215
216struct hfi1_ctxt_info {
217 __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
218 __u32 rcvegr_size; /* size of each eager buffer */
219 __u16 num_active; /* number of active units */
220 __u16 unit; /* unit (chip) assigned to caller */
221 __u16 ctxt; /* ctxt on unit assigned to caller */
222 __u16 subctxt; /* subctxt on unit assigned to caller */
223 __u16 rcvtids; /* number of Rcv TIDs for this context */
224 __u16 credits; /* number of PIO credits for this context */
225 __u16 numa_node; /* NUMA node of the assigned device */
226 __u16 rec_cpu; /* cpu # for affinity (0xffff if none) */
227 __u16 send_ctxt; /* send context in use by this user context */
228 __u16 egrtids; /* number of RcvArray entries for Eager Rcvs */
229 __u16 rcvhdrq_cnt; /* number of RcvHdrQ entries */
230 __u16 rcvhdrq_entsize; /* size (in bytes) for each RcvHdrQ entry */
231 __u16 sdma_ring_size; /* number of entries in SDMA request ring */
232};
233
234struct hfi1_tid_info {
235 /* virtual address of first page in transfer */
236 __u64 vaddr;
237 /* pointer to tid array. this array is big enough */
238 __u64 tidlist;
239 /* number of tids programmed by this request */
240 __u32 tidcnt;
241 /* length of transfer buffer programmed by this request */
242 __u32 length;
243 /*
244 * pointer to bitmap of TIDs used for this call;
245 * checked for being large enough at open
246 */
247 __u64 tidmap;
248};
249
250struct hfi1_cmd {
251 __u32 type; /* command type */
252 __u32 len; /* length of struct pointed to by add */
253 __u64 addr; /* pointer to user structure */
254};
255
256enum hfi1_sdma_comp_state {
257 FREE = 0,
258 QUEUED,
259 COMPLETE,
260 ERROR
261};
262
263/*
264 * SDMA completion ring entry
265 */
266struct hfi1_sdma_comp_entry {
267 __u32 status;
268 __u32 errcode;
269};
270
271/*
272 * Device status and notifications from driver to user-space.
273 */
274struct hfi1_status {
275 __u64 dev; /* device/hw status bits */
276 __u64 port; /* port state and status bits */
277 char freezemsg[0];
278};
279
280/*
281 * This structure is returned by the driver immediately after
282 * open to get implementation-specific info, and info specific to this
283 * instance.
284 *
285 * This struct must have explicit pad fields where type sizes
286 * may result in different alignments between 32 and 64 bit
287 * programs, since the 64 bit * bit kernel requires the user code
288 * to have matching offsets
289 */
290struct hfi1_base_info {
291 /* version of hardware, for feature checking. */
292 __u32 hw_version;
293 /* version of software, for feature checking. */
294 __u32 sw_version;
295 /* Job key */
296 __u16 jkey;
297 __u16 padding1;
298 /*
299 * The special QP (queue pair) value that identifies PSM
300 * protocol packet from standard IB packets.
301 */
302 __u32 bthqp;
303 /* PIO credit return address, */
304 __u64 sc_credits_addr;
305 /*
306 * Base address of write-only pio buffers for this process.
307 * Each buffer has sendpio_credits*64 bytes.
308 */
309 __u64 pio_bufbase_sop;
310 /*
311 * Base address of write-only pio buffers for this process.
312 * Each buffer has sendpio_credits*64 bytes.
313 */
314 __u64 pio_bufbase;
315 /* address where receive buffer queue is mapped into */
316 __u64 rcvhdr_bufbase;
317 /* base address of Eager receive buffers. */
318 __u64 rcvegr_bufbase;
319 /* base address of SDMA completion ring */
320 __u64 sdma_comp_bufbase;
321 /*
322 * User register base for init code, not to be used directly by
323 * protocol or applications. Always maps real chip register space.
324 * the register addresses are:
325 * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail,
326 * ur_rcvtidflow
327 */
328 __u64 user_regbase;
329 /* notification events */
330 __u64 events_bufbase;
331 /* status page */
332 __u64 status_bufbase;
333 /* rcvhdrtail update */
334 __u64 rcvhdrtail_base;
335 /*
336 * shared memory pages for subctxts if ctxt is shared; these cover
337 * all the processes in the group sharing a single context.
338 * all have enough space for the num_subcontexts value on this job.
339 */
340 __u64 subctxt_uregbase;
341 __u64 subctxt_rcvegrbuf;
342 __u64 subctxt_rcvhdrbuf;
343};
344
345enum sdma_req_opcode {
346 EXPECTED = 0,
347 EAGER
348};
349
350#define HFI1_SDMA_REQ_VERSION_MASK 0xF
351#define HFI1_SDMA_REQ_VERSION_SHIFT 0x0
352#define HFI1_SDMA_REQ_OPCODE_MASK 0xF
353#define HFI1_SDMA_REQ_OPCODE_SHIFT 0x4
354#define HFI1_SDMA_REQ_IOVCNT_MASK 0xFF
355#define HFI1_SDMA_REQ_IOVCNT_SHIFT 0x8
356
357struct sdma_req_info {
358 /*
359 * bits 0-3 - version (currently unused)
360 * bits 4-7 - opcode (enum sdma_req_opcode)
361 * bits 8-15 - io vector count
362 */
363 __u16 ctrl;
364 /*
365 * Number of fragments contained in this request.
366 * User-space has already computed how many
367 * fragment-sized packet the user buffer will be
368 * split into.
369 */
370 __u16 npkts;
371 /*
372 * Size of each fragment the user buffer will be
373 * split into.
374 */
375 __u16 fragsize;
376 /*
377 * Index of the slot in the SDMA completion ring
378 * this request should be using. User-space is
379 * in charge of managing its own ring.
380 */
381 __u16 comp_idx;
382} __packed;
383
384/*
385 * SW KDETH header.
386 * swdata is SW defined portion.
387 */
388struct hfi1_kdeth_header {
389 __le32 ver_tid_offset;
390 __le16 jkey;
391 __le16 hcrc;
392 __le32 swdata[7];
393} __packed;
394
395/*
396 * Structure describing the headers that User space uses. The
397 * structure above is a subset of this one.
398 */
399struct hfi1_pkt_header {
400 __le16 pbc[4];
401 __be16 lrh[4];
402 __be32 bth[3];
403 struct hfi1_kdeth_header kdeth;
404} __packed;
405
406
407/*
408 * The list of usermode accessible registers.
409 */
410enum hfi1_ureg {
411 /* (RO) DMA RcvHdr to be used next. */
412 ur_rcvhdrtail = 0,
413 /* (RW) RcvHdr entry to be processed next by host. */
414 ur_rcvhdrhead = 1,
415 /* (RO) Index of next Eager index to use. */
416 ur_rcvegrindextail = 2,
417 /* (RW) Eager TID to be processed next */
418 ur_rcvegrindexhead = 3,
419 /* (RO) Receive Eager Offset Tail */
420 ur_rcvegroffsettail = 4,
421 /* For internal use only; max register number. */
422 ur_maxreg,
423 /* (RW) Receive TID flow table */
424 ur_rcvtidflowtable = 256
425};
426
427#endif /* _LINIUX__HFI1_USER_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 6e4bb4270ca2..c19a5dc1531a 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -7,12 +7,14 @@ enum {
7 RDMA_NL_RDMA_CM = 1, 7 RDMA_NL_RDMA_CM = 1,
8 RDMA_NL_NES, 8 RDMA_NL_NES,
9 RDMA_NL_C4IW, 9 RDMA_NL_C4IW,
10 RDMA_NL_LS, /* RDMA Local Services */
10 RDMA_NL_NUM_CLIENTS 11 RDMA_NL_NUM_CLIENTS
11}; 12};
12 13
13enum { 14enum {
14 RDMA_NL_GROUP_CM = 1, 15 RDMA_NL_GROUP_CM = 1,
15 RDMA_NL_GROUP_IWPM, 16 RDMA_NL_GROUP_IWPM,
17 RDMA_NL_GROUP_LS,
16 RDMA_NL_NUM_GROUPS 18 RDMA_NL_NUM_GROUPS
17}; 19};
18 20
@@ -128,5 +130,85 @@ enum {
128 IWPM_NLA_ERR_MAX 130 IWPM_NLA_ERR_MAX
129}; 131};
130 132
133/*
134 * Local service operations:
135 * RESOLVE - The client requests the local service to resolve a path.
136 * SET_TIMEOUT - The local service requests the client to set the timeout.
137 */
138enum {
139 RDMA_NL_LS_OP_RESOLVE = 0,
140 RDMA_NL_LS_OP_SET_TIMEOUT,
141 RDMA_NL_LS_NUM_OPS
142};
143
144/* Local service netlink message flags */
145#define RDMA_NL_LS_F_ERR 0x0100 /* Failed response */
146
147/*
148 * Local service resolve operation family header.
149 * The layout for the resolve operation:
150 * nlmsg header
151 * family header
152 * attributes
153 */
154
155/*
156 * Local service path use:
157 * Specify how the path(s) will be used.
158 * ALL - For connected CM operation (6 pathrecords)
159 * UNIDIRECTIONAL - For unidirectional UD (1 pathrecord)
160 * GMP - For miscellaneous GMP like operation (at least 1 reversible
161 * pathrecord)
162 */
163enum {
164 LS_RESOLVE_PATH_USE_ALL = 0,
165 LS_RESOLVE_PATH_USE_UNIDIRECTIONAL,
166 LS_RESOLVE_PATH_USE_GMP,
167 LS_RESOLVE_PATH_USE_MAX
168};
169
170#define LS_DEVICE_NAME_MAX 64
171
172struct rdma_ls_resolve_header {
173 __u8 device_name[LS_DEVICE_NAME_MAX];
174 __u8 port_num;
175 __u8 path_use;
176};
177
178/* Local service attribute type */
179#define RDMA_NLA_F_MANDATORY (1 << 13)
180#define RDMA_NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER | \
181 RDMA_NLA_F_MANDATORY))
182
183/*
184 * Local service attributes:
185 * Attr Name Size Byte order
186 * -----------------------------------------------------
187 * PATH_RECORD struct ib_path_rec_data
188 * TIMEOUT u32 cpu
189 * SERVICE_ID u64 cpu
190 * DGID u8[16] BE
191 * SGID u8[16] BE
192 * TCLASS u8
193 * PKEY u16 cpu
194 * QOS_CLASS u16 cpu
195 */
196enum {
197 LS_NLA_TYPE_UNSPEC = 0,
198 LS_NLA_TYPE_PATH_RECORD,
199 LS_NLA_TYPE_TIMEOUT,
200 LS_NLA_TYPE_SERVICE_ID,
201 LS_NLA_TYPE_DGID,
202 LS_NLA_TYPE_SGID,
203 LS_NLA_TYPE_TCLASS,
204 LS_NLA_TYPE_PKEY,
205 LS_NLA_TYPE_QOS_CLASS,
206 LS_NLA_TYPE_MAX
207};
208
209/* Local service DGID/SGID attribute: big endian */
210struct rdma_nla_ls_gid {
211 __u8 gid[16];
212};
131 213
132#endif /* _UAPI_RDMA_NETLINK_H */ 214#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild
index 75746d52f208..d791e0ad509d 100644
--- a/include/uapi/scsi/Kbuild
+++ b/include/uapi/scsi/Kbuild
@@ -3,3 +3,4 @@ header-y += fc/
3header-y += scsi_bsg_fc.h 3header-y += scsi_bsg_fc.h
4header-y += scsi_netlink.h 4header-y += scsi_netlink.h
5header-y += scsi_netlink_fc.h 5header-y += scsi_netlink_fc.h
6header-y += cxlflash_ioctl.h
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
new file mode 100644
index 000000000000..831351b2e660
--- /dev/null
+++ b/include/uapi/scsi/cxlflash_ioctl.h
@@ -0,0 +1,174 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef _CXLFLASH_IOCTL_H
16#define _CXLFLASH_IOCTL_H
17
18#include <linux/types.h>
19
20/*
21 * Structure and flag definitions CXL Flash superpipe ioctls
22 */
23
24#define DK_CXLFLASH_VERSION_0 0
25
26struct dk_cxlflash_hdr {
27 __u16 version; /* Version data */
28 __u16 rsvd[3]; /* Reserved for future use */
29 __u64 flags; /* Input flags */
30 __u64 return_flags; /* Returned flags */
31};
32
33/*
34 * Notes:
35 * -----
36 * The 'context_id' field of all ioctl structures contains the context
37 * identifier for a context in the lower 32-bits (upper 32-bits are not
38 * to be used when identifying a context to the AFU). That said, the value
39 * in its entirety (all 64-bits) is to be treated as an opaque cookie and
40 * should be presented as such when issuing ioctls.
41 *
42 * For DK_CXLFLASH_ATTACH ioctl, user specifies read/write access
43 * permissions via the O_RDONLY, O_WRONLY, and O_RDWR flags defined in
44 * the fcntl.h header file.
45 */
46#define DK_CXLFLASH_ATTACH_REUSE_CONTEXT 0x8000000000000000ULL
47
48struct dk_cxlflash_attach {
49 struct dk_cxlflash_hdr hdr; /* Common fields */
50 __u64 num_interrupts; /* Requested number of interrupts */
51 __u64 context_id; /* Returned context */
52 __u64 mmio_size; /* Returned size of MMIO area */
53 __u64 block_size; /* Returned block size, in bytes */
54 __u64 adap_fd; /* Returned adapter file descriptor */
55 __u64 last_lba; /* Returned last LBA on the device */
56 __u64 max_xfer; /* Returned max transfer size, blocks */
57 __u64 reserved[8]; /* Reserved for future use */
58};
59
60struct dk_cxlflash_detach {
61 struct dk_cxlflash_hdr hdr; /* Common fields */
62 __u64 context_id; /* Context to detach */
63 __u64 reserved[8]; /* Reserved for future use */
64};
65
66struct dk_cxlflash_udirect {
67 struct dk_cxlflash_hdr hdr; /* Common fields */
68 __u64 context_id; /* Context to own physical resources */
69 __u64 rsrc_handle; /* Returned resource handle */
70 __u64 last_lba; /* Returned last LBA on the device */
71 __u64 reserved[8]; /* Reserved for future use */
72};
73
74#define DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME 0x8000000000000000ULL
75
76struct dk_cxlflash_uvirtual {
77 struct dk_cxlflash_hdr hdr; /* Common fields */
78 __u64 context_id; /* Context to own virtual resources */
79 __u64 lun_size; /* Requested size, in 4K blocks */
80 __u64 rsrc_handle; /* Returned resource handle */
81 __u64 last_lba; /* Returned last LBA of LUN */
82 __u64 reserved[8]; /* Reserved for future use */
83};
84
85struct dk_cxlflash_release {
86 struct dk_cxlflash_hdr hdr; /* Common fields */
87 __u64 context_id; /* Context owning resources */
88 __u64 rsrc_handle; /* Resource handle to release */
89 __u64 reserved[8]; /* Reserved for future use */
90};
91
92struct dk_cxlflash_resize {
93 struct dk_cxlflash_hdr hdr; /* Common fields */
94 __u64 context_id; /* Context owning resources */
95 __u64 rsrc_handle; /* Resource handle of LUN to resize */
96 __u64 req_size; /* New requested size, in 4K blocks */
97 __u64 last_lba; /* Returned last LBA of LUN */
98 __u64 reserved[8]; /* Reserved for future use */
99};
100
101struct dk_cxlflash_clone {
102 struct dk_cxlflash_hdr hdr; /* Common fields */
103 __u64 context_id_src; /* Context to clone from */
104 __u64 context_id_dst; /* Context to clone to */
105 __u64 adap_fd_src; /* Source context adapter fd */
106 __u64 reserved[8]; /* Reserved for future use */
107};
108
109#define DK_CXLFLASH_VERIFY_SENSE_LEN 18
110#define DK_CXLFLASH_VERIFY_HINT_SENSE 0x8000000000000000ULL
111
112struct dk_cxlflash_verify {
113 struct dk_cxlflash_hdr hdr; /* Common fields */
114 __u64 context_id; /* Context owning resources to verify */
115 __u64 rsrc_handle; /* Resource handle of LUN */
116 __u64 hint; /* Reasons for verify */
117 __u64 last_lba; /* Returned last LBA of device */
118 __u8 sense_data[DK_CXLFLASH_VERIFY_SENSE_LEN]; /* SCSI sense data */
119 __u8 pad[6]; /* Pad to next 8-byte boundary */
120 __u64 reserved[8]; /* Reserved for future use */
121};
122
123#define DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET 0x8000000000000000ULL
124
125struct dk_cxlflash_recover_afu {
126 struct dk_cxlflash_hdr hdr; /* Common fields */
127 __u64 reason; /* Reason for recovery request */
128 __u64 context_id; /* Context to recover / updated ID */
129 __u64 mmio_size; /* Returned size of MMIO area */
130 __u64 adap_fd; /* Returned adapter file descriptor */
131 __u64 reserved[8]; /* Reserved for future use */
132};
133
134#define DK_CXLFLASH_MANAGE_LUN_WWID_LEN 16
135#define DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE 0x8000000000000000ULL
136#define DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE 0x4000000000000000ULL
137#define DK_CXLFLASH_MANAGE_LUN_ALL_PORTS_ACCESSIBLE 0x2000000000000000ULL
138
139struct dk_cxlflash_manage_lun {
140 struct dk_cxlflash_hdr hdr; /* Common fields */
141 __u8 wwid[DK_CXLFLASH_MANAGE_LUN_WWID_LEN]; /* Page83 WWID, NAA-6 */
142 __u64 reserved[8]; /* Rsvd, future use */
143};
144
145union cxlflash_ioctls {
146 struct dk_cxlflash_attach attach;
147 struct dk_cxlflash_detach detach;
148 struct dk_cxlflash_udirect udirect;
149 struct dk_cxlflash_uvirtual uvirtual;
150 struct dk_cxlflash_release release;
151 struct dk_cxlflash_resize resize;
152 struct dk_cxlflash_clone clone;
153 struct dk_cxlflash_verify verify;
154 struct dk_cxlflash_recover_afu recover_afu;
155 struct dk_cxlflash_manage_lun manage_lun;
156};
157
158#define MAX_CXLFLASH_IOCTL_SZ (sizeof(union cxlflash_ioctls))
159
160#define CXL_MAGIC 0xCA
161#define CXL_IOWR(_n, _s) _IOWR(CXL_MAGIC, _n, struct _s)
162
163#define DK_CXLFLASH_ATTACH CXL_IOWR(0x80, dk_cxlflash_attach)
164#define DK_CXLFLASH_USER_DIRECT CXL_IOWR(0x81, dk_cxlflash_udirect)
165#define DK_CXLFLASH_RELEASE CXL_IOWR(0x82, dk_cxlflash_release)
166#define DK_CXLFLASH_DETACH CXL_IOWR(0x83, dk_cxlflash_detach)
167#define DK_CXLFLASH_VERIFY CXL_IOWR(0x84, dk_cxlflash_verify)
168#define DK_CXLFLASH_RECOVER_AFU CXL_IOWR(0x85, dk_cxlflash_recover_afu)
169#define DK_CXLFLASH_MANAGE_LUN CXL_IOWR(0x86, dk_cxlflash_manage_lun)
170#define DK_CXLFLASH_USER_VIRTUAL CXL_IOWR(0x87, dk_cxlflash_uvirtual)
171#define DK_CXLFLASH_VLUN_RESIZE CXL_IOWR(0x88, dk_cxlflash_resize)
172#define DK_CXLFLASH_VLUN_CLONE CXL_IOWR(0x89, dk_cxlflash_clone)
173
174#endif /* ifndef _CXLFLASH_IOCTL_H */
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index a85316811d79..7ddeeda93809 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -44,6 +44,10 @@ struct privcmd_hypercall {
44 44
45struct privcmd_mmap_entry { 45struct privcmd_mmap_entry {
46 __u64 va; 46 __u64 va;
47 /*
48 * This should be a GFN. It's not possible to change the name because
49 * it's exposed to the user-space.
50 */
47 __u64 mfn; 51 __u64 mfn;
48 __u64 npages; 52 __u64 npages;
49}; 53};
diff --git a/include/video/kyro.h b/include/video/kyro.h
index c563968e926c..b958c2e9c915 100644
--- a/include/video/kyro.h
+++ b/include/video/kyro.h
@@ -35,9 +35,7 @@ struct kyrofb_info {
35 /* Useful to hold depth here for Linux */ 35 /* Useful to hold depth here for Linux */
36 u8 PIXDEPTH; 36 u8 PIXDEPTH;
37 37
38#ifdef CONFIG_MTRR 38 int wc_cookie;
39 int mtrr_handle;
40#endif
41}; 39};
42 40
43extern int kyro_dev_init(void); 41extern int kyro_dev_init(void);
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index 0530e5a4c6b1..d8fc96ed11e9 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -296,6 +296,7 @@
296 296
297/* Video buffer addresses */ 297/* Video buffer addresses */
298#define VIDW_BUF_START(_buff) (0xA0 + ((_buff) * 8)) 298#define VIDW_BUF_START(_buff) (0xA0 + ((_buff) * 8))
299#define VIDW_BUF_START_S(_buff) (0x40A0 + ((_buff) * 8))
299#define VIDW_BUF_START1(_buff) (0xA4 + ((_buff) * 8)) 300#define VIDW_BUF_START1(_buff) (0xA4 + ((_buff) * 8))
300#define VIDW_BUF_END(_buff) (0xD0 + ((_buff) * 8)) 301#define VIDW_BUF_END(_buff) (0xD0 + ((_buff) * 8))
301#define VIDW_BUF_END1(_buff) (0xD4 + ((_buff) * 8)) 302#define VIDW_BUF_END1(_buff) (0xD4 + ((_buff) * 8))
diff --git a/include/video/vga.h b/include/video/vga.h
index cac567f22e62..d334e64c1c19 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -18,7 +18,7 @@
18#define __linux_video_vga_h__ 18#define __linux_video_vga_h__
19 19
20#include <linux/types.h> 20#include <linux/types.h>
21#include <asm/io.h> 21#include <linux/io.h>
22#include <asm/vga.h> 22#include <asm/vga.h>
23#include <asm/byteorder.h> 23#include <asm/byteorder.h>
24 24
diff --git a/include/xen/events.h b/include/xen/events.h
index 7d95fdf9cf3e..88da2abaf535 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -92,7 +92,6 @@ void xen_hvm_callback_vector(void);
92#ifdef CONFIG_TRACING 92#ifdef CONFIG_TRACING
93#define trace_xen_hvm_callback_vector xen_hvm_callback_vector 93#define trace_xen_hvm_callback_vector xen_hvm_callback_vector
94#endif 94#endif
95extern int xen_have_vector_callback;
96int xen_set_callback_via(uint64_t via); 95int xen_set_callback_via(uint64_t via);
97void xen_evtchn_do_upcall(struct pt_regs *regs); 96void xen_evtchn_do_upcall(struct pt_regs *regs);
98void xen_hvm_evtchn_do_upcall(void); 97void xen_hvm_evtchn_do_upcall(void);
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 70054cc0708d..252ffd4801ef 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -156,7 +156,9 @@ struct xen_netif_tx_request {
156/* Types of xen_netif_extra_info descriptors. */ 156/* Types of xen_netif_extra_info descriptors. */
157#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ 157#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
158#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ 158#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
159#define XEN_NETIF_EXTRA_TYPE_MAX (2) 159#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
160#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
161#define XEN_NETIF_EXTRA_TYPE_MAX (4)
160 162
161/* xen_netif_extra_info flags. */ 163/* xen_netif_extra_info flags. */
162#define _XEN_NETIF_EXTRA_FLAG_MORE (0) 164#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
@@ -201,6 +203,10 @@ struct xen_netif_extra_info {
201 uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ 203 uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
202 } gso; 204 } gso;
203 205
206 struct {
207 uint8_t addr[6]; /* Address to add/remove. */
208 } mcast;
209
204 uint16_t pad[3]; 210 uint16_t pad[3];
205 } u; 211 } u;
206}; 212};
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 5cc49ea8d840..8e035871360e 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -474,6 +474,23 @@ struct xenpf_core_parking {
474}; 474};
475DEFINE_GUEST_HANDLE_STRUCT(xenpf_core_parking); 475DEFINE_GUEST_HANDLE_STRUCT(xenpf_core_parking);
476 476
477#define XENPF_get_symbol 63
478struct xenpf_symdata {
479 /* IN/OUT variables */
480 uint32_t namelen; /* size of 'name' buffer */
481
482 /* IN/OUT variables */
483 uint32_t symnum; /* IN: Symbol to read */
484 /* OUT: Next available symbol. If same as IN */
485 /* then we reached the end */
486
487 /* OUT variables */
488 GUEST_HANDLE(char) name;
489 uint64_t address;
490 char type;
491};
492DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata);
493
477struct xen_platform_op { 494struct xen_platform_op {
478 uint32_t cmd; 495 uint32_t cmd;
479 uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ 496 uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -495,6 +512,7 @@ struct xen_platform_op {
495 struct xenpf_cpu_hotadd cpu_add; 512 struct xenpf_cpu_hotadd cpu_add;
496 struct xenpf_mem_hotadd mem_add; 513 struct xenpf_mem_hotadd mem_add;
497 struct xenpf_core_parking core_parking; 514 struct xenpf_core_parking core_parking;
515 struct xenpf_symdata symdata;
498 uint8_t pad[128]; 516 uint8_t pad[128];
499 } u; 517 } u;
500}; 518};
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index a48378958062..167071c290b3 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -80,6 +80,7 @@
80#define __HYPERVISOR_kexec_op 37 80#define __HYPERVISOR_kexec_op 37
81#define __HYPERVISOR_tmem_op 38 81#define __HYPERVISOR_tmem_op 38
82#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */ 82#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
83#define __HYPERVISOR_xenpmu_op 40
83 84
84/* Architecture-specific hypercall definitions. */ 85/* Architecture-specific hypercall definitions. */
85#define __HYPERVISOR_arch_0 48 86#define __HYPERVISOR_arch_0 48
@@ -112,6 +113,7 @@
112#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */ 113#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
113#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */ 114#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
114#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */ 115#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
116#define VIRQ_XENPMU 13 /* PMC interrupt */
115 117
116/* Architecture-specific VIRQ definitions. */ 118/* Architecture-specific VIRQ definitions. */
117#define VIRQ_ARCH_0 16 119#define VIRQ_ARCH_0 16
@@ -585,26 +587,29 @@ struct shared_info {
585}; 587};
586 588
587/* 589/*
588 * Start-of-day memory layout for the initial domain (DOM0): 590 * Start-of-day memory layout
591 *
589 * 1. The domain is started within contiguous virtual-memory region. 592 * 1. The domain is started within contiguous virtual-memory region.
590 * 2. The contiguous region begins and ends on an aligned 4MB boundary. 593 * 2. The contiguous region begins and ends on an aligned 4MB boundary.
591 * 3. The region start corresponds to the load address of the OS image. 594 * 3. This the order of bootstrap elements in the initial virtual region:
592 * If the load address is not 4MB aligned then the address is rounded down.
593 * 4. This the order of bootstrap elements in the initial virtual region:
594 * a. relocated kernel image 595 * a. relocated kernel image
595 * b. initial ram disk [mod_start, mod_len] 596 * b. initial ram disk [mod_start, mod_len]
597 * (may be omitted)
596 * c. list of allocated page frames [mfn_list, nr_pages] 598 * c. list of allocated page frames [mfn_list, nr_pages]
599 * (unless relocated due to XEN_ELFNOTE_INIT_P2M)
597 * d. start_info_t structure [register ESI (x86)] 600 * d. start_info_t structure [register ESI (x86)]
598 * e. bootstrap page tables [pt_base, CR3 (x86)] 601 * in case of dom0 this page contains the console info, too
599 * f. bootstrap stack [register ESP (x86)] 602 * e. unless dom0: xenstore ring page
600 * 5. Bootstrap elements are packed together, but each is 4kB-aligned. 603 * f. unless dom0: console ring page
601 * 6. The initial ram disk may be omitted. 604 * g. bootstrap page tables [pt_base, CR3 (x86)]
602 * 7. The list of page frames forms a contiguous 'pseudo-physical' memory 605 * h. bootstrap stack [register ESP (x86)]
606 * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
607 * 5. The list of page frames forms a contiguous 'pseudo-physical' memory
603 * layout for the domain. In particular, the bootstrap virtual-memory 608 * layout for the domain. In particular, the bootstrap virtual-memory
604 * region is a 1:1 mapping to the first section of the pseudo-physical map. 609 * region is a 1:1 mapping to the first section of the pseudo-physical map.
605 * 8. All bootstrap elements are mapped read-writable for the guest OS. The 610 * 6. All bootstrap elements are mapped read-writable for the guest OS. The
606 * only exception is the bootstrap page table, which is mapped read-only. 611 * only exception is the bootstrap page table, which is mapped read-only.
607 * 9. There is guaranteed to be at least 512kB padding after the final 612 * 7. There is guaranteed to be at least 512kB padding after the final
608 * bootstrap element. If necessary, the bootstrap virtual region is 613 * bootstrap element. If necessary, the bootstrap virtual region is
609 * extended by an extra 4MB to ensure this. 614 * extended by an extra 4MB to ensure this.
610 */ 615 */
@@ -641,10 +646,12 @@ struct start_info {
641}; 646};
642 647
643/* These flags are passed in the 'flags' field of start_info_t. */ 648/* These flags are passed in the 'flags' field of start_info_t. */
644#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ 649#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
645#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ 650#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
646#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */ 651#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
647#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */ 652#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
653#define SIF_VIRT_P2M_4TOOLS (1<<4) /* Do Xen tools understand a virt. mapped */
654 /* P->M making the 3 level tree obsolete? */
648#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */ 655#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
649 656
650/* 657/*
diff --git a/include/xen/interface/xenpmu.h b/include/xen/interface/xenpmu.h
new file mode 100644
index 000000000000..139efc91bceb
--- /dev/null
+++ b/include/xen/interface/xenpmu.h
@@ -0,0 +1,94 @@
1#ifndef __XEN_PUBLIC_XENPMU_H__
2#define __XEN_PUBLIC_XENPMU_H__
3
4#include "xen.h"
5
6#define XENPMU_VER_MAJ 0
7#define XENPMU_VER_MIN 1
8
9/*
10 * ` enum neg_errnoval
11 * ` HYPERVISOR_xenpmu_op(enum xenpmu_op cmd, struct xenpmu_params *args);
12 *
13 * @cmd == XENPMU_* (PMU operation)
14 * @args == struct xenpmu_params
15 */
16/* ` enum xenpmu_op { */
17#define XENPMU_mode_get 0 /* Also used for getting PMU version */
18#define XENPMU_mode_set 1
19#define XENPMU_feature_get 2
20#define XENPMU_feature_set 3
21#define XENPMU_init 4
22#define XENPMU_finish 5
23#define XENPMU_lvtpc_set 6
24#define XENPMU_flush 7
25
26/* ` } */
27
28/* Parameters structure for HYPERVISOR_xenpmu_op call */
29struct xen_pmu_params {
30 /* IN/OUT parameters */
31 struct {
32 uint32_t maj;
33 uint32_t min;
34 } version;
35 uint64_t val;
36
37 /* IN parameters */
38 uint32_t vcpu;
39 uint32_t pad;
40};
41
42/* PMU modes:
43 * - XENPMU_MODE_OFF: No PMU virtualization
44 * - XENPMU_MODE_SELF: Guests can profile themselves
45 * - XENPMU_MODE_HV: Guests can profile themselves, dom0 profiles
46 * itself and Xen
47 * - XENPMU_MODE_ALL: Only dom0 has access to VPMU and it profiles
48 * everyone: itself, the hypervisor and the guests.
49 */
50#define XENPMU_MODE_OFF 0
51#define XENPMU_MODE_SELF (1<<0)
52#define XENPMU_MODE_HV (1<<1)
53#define XENPMU_MODE_ALL (1<<2)
54
55/*
56 * PMU features:
57 * - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
58 */
59#define XENPMU_FEATURE_INTEL_BTS 1
60
61/*
62 * Shared PMU data between hypervisor and PV(H) domains.
63 *
64 * The hypervisor fills out this structure during PMU interrupt and sends an
65 * interrupt to appropriate VCPU.
66 * Architecture-independent fields of xen_pmu_data are WO for the hypervisor
67 * and RO for the guest but some fields in xen_pmu_arch can be writable
68 * by both the hypervisor and the guest (see arch-$arch/pmu.h).
69 */
70struct xen_pmu_data {
71 /* Interrupted VCPU */
72 uint32_t vcpu_id;
73
74 /*
75 * Physical processor on which the interrupt occurred. On non-privileged
76 * guests set to vcpu_id;
77 */
78 uint32_t pcpu_id;
79
80 /*
81 * Domain that was interrupted. On non-privileged guests set to
82 * DOMID_SELF.
83 * On privileged guests can be DOMID_SELF, DOMID_XEN, or, when in
84 * XENPMU_MODE_ALL mode, domain ID of another domain.
85 */
86 domid_t domain_id;
87
88 uint8_t pad[6];
89
90 /* Architecture-specific information */
91 struct xen_pmu_arch pmu;
92};
93
94#endif /* __XEN_PUBLIC_XENPMU_H__ */
diff --git a/include/xen/page.h b/include/xen/page.h
index c5ed20bb3fe9..1daae485e336 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -3,14 +3,14 @@
3 3
4#include <asm/xen/page.h> 4#include <asm/xen/page.h>
5 5
6static inline unsigned long page_to_mfn(struct page *page) 6static inline unsigned long xen_page_to_gfn(struct page *page)
7{ 7{
8 return pfn_to_mfn(page_to_pfn(page)); 8 return pfn_to_gfn(page_to_pfn(page));
9} 9}
10 10
11struct xen_memory_region { 11struct xen_memory_region {
12 phys_addr_t start; 12 unsigned long start_pfn;
13 phys_addr_t size; 13 unsigned long n_pfns;
14}; 14};
15 15
16#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */ 16#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 0ce4f32017ea..e4e214a5abd5 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -30,7 +30,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
30struct vm_area_struct; 30struct vm_area_struct;
31 31
32/* 32/*
33 * xen_remap_domain_mfn_array() - map an array of foreign frames 33 * xen_remap_domain_gfn_array() - map an array of foreign frames
34 * @vma: VMA to map the pages into 34 * @vma: VMA to map the pages into
35 * @addr: Address at which to map the pages 35 * @addr: Address at which to map the pages
36 * @gfn: Array of GFNs to map 36 * @gfn: Array of GFNs to map
@@ -46,14 +46,14 @@ struct vm_area_struct;
46 * Returns the number of successfully mapped frames, or a -ve error 46 * Returns the number of successfully mapped frames, or a -ve error
47 * code. 47 * code.
48 */ 48 */
49int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 49int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
50 unsigned long addr, 50 unsigned long addr,
51 xen_pfn_t *gfn, int nr, 51 xen_pfn_t *gfn, int nr,
52 int *err_ptr, pgprot_t prot, 52 int *err_ptr, pgprot_t prot,
53 unsigned domid, 53 unsigned domid,
54 struct page **pages); 54 struct page **pages);
55 55
56/* xen_remap_domain_mfn_range() - map a range of foreign frames 56/* xen_remap_domain_gfn_range() - map a range of foreign frames
57 * @vma: VMA to map the pages into 57 * @vma: VMA to map the pages into
58 * @addr: Address at which to map the pages 58 * @addr: Address at which to map the pages
59 * @gfn: First GFN to map. 59 * @gfn: First GFN to map.
@@ -65,12 +65,12 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
65 * Returns the number of successfully mapped frames, or a -ve error 65 * Returns the number of successfully mapped frames, or a -ve error
66 * code. 66 * code.
67 */ 67 */
68int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 68int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
69 unsigned long addr, 69 unsigned long addr,
70 xen_pfn_t gfn, int nr, 70 xen_pfn_t gfn, int nr,
71 pgprot_t prot, unsigned domid, 71 pgprot_t prot, unsigned domid,
72 struct page **pages); 72 struct page **pages);
73int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 73int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
74 int numpgs, struct page **pages); 74 int numpgs, struct page **pages);
75int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 75int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
76 unsigned long addr, 76 unsigned long addr,